diff --git a/.features/skill/MEMORY.md b/.features/skill/MEMORY.md
index fdf9b92..b68ebd7 100644
--- a/.features/skill/MEMORY.md
+++ b/.features/skill/MEMORY.md
@@ -18,6 +18,8 @@ Skill 系统支持两种来源:官方 skills (`./skills/`) 和用户 skills (`
## 最近重要事项
+- 2026-04-18: `create_robot_project` 改为自动加载 `skills/autoload/{SKILLS_SUBDIR}` 下所有 skill,并跳过已显式传入的同名 skill
+- 2026-04-18: `/api/v1/skill/list` 的官方库改为同时读取 `skills/common` 和 `skills/{SKILLS_SUBDIR}`,并按目录顺序去重
- 2026-04-18: `_extract_skills_to_robot` 改为通过环境变量 `SKILLS_SUBDIR` 选择官方 skills 子目录,默认使用 `skills/common`
- 2025-02-11: 初始化 skill 功能 memory
diff --git a/routes/skill_manager.py b/routes/skill_manager.py
index 49158c4..9f52a1a 100644
--- a/routes/skill_manager.py
+++ b/routes/skill_manager.py
@@ -10,7 +10,7 @@ from typing import List, Optional
from dataclasses import dataclass
from fastapi import APIRouter, HTTPException, Query, UploadFile, File, Form
from pydantic import BaseModel
-from utils.settings import SKILLS_DIR
+from utils.settings import SKILLS_DIR, SKILLS_SUBDIR
import aiofiles
logger = logging.getLogger('app')
@@ -428,27 +428,39 @@ def get_official_skills(base_dir: str) -> List[SkillItem]:
List of SkillItem objects
"""
skills = []
+ skill_names = set()
+
# Use SKILLS_DIR from settings, relative to base_dir
if os.path.isabs(SKILLS_DIR):
- official_skills_dir = SKILLS_DIR
+ skills_root_dir = SKILLS_DIR
else:
- official_skills_dir = os.path.join(base_dir, SKILLS_DIR)
+ skills_root_dir = os.path.join(base_dir, SKILLS_DIR)
- if not os.path.exists(official_skills_dir):
- logger.warning(f"Official skills directory not found: {official_skills_dir}")
- return skills
+ official_skills_dirs = [
+ os.path.join(skills_root_dir, "common"),
+ os.path.join(skills_root_dir, SKILLS_SUBDIR),
+ ]
- for skill_name in os.listdir(official_skills_dir):
- skill_path = os.path.join(official_skills_dir, skill_name)
- if os.path.isdir(skill_path):
- metadata = get_skill_metadata_legacy(skill_path)
- if metadata:
- skills.append(SkillItem(
- name=metadata['name'],
- description=metadata['description'],
- user_skill=False
- ))
- logger.debug(f"Found official skill: {metadata['name']}")
+ for official_skills_dir in official_skills_dirs:
+ if not os.path.exists(official_skills_dir):
+ logger.warning(f"Official skills directory not found: {official_skills_dir}")
+ continue
+
+ for skill_name in os.listdir(official_skills_dir):
+ if skill_name in skill_names:
+ continue
+
+ skill_path = os.path.join(official_skills_dir, skill_name)
+ if os.path.isdir(skill_path):
+ metadata = get_skill_metadata_legacy(skill_path)
+ if metadata:
+ skills.append(SkillItem(
+ name=metadata['name'],
+ description=metadata['description'],
+ user_skill=False
+ ))
+ skill_names.add(skill_name)
+ logger.debug(f"Found official skill: {metadata['name']} from {official_skills_dir}")
return skills
@@ -499,7 +511,7 @@ async def list_skills(
SkillListResponse containing all skills
Notes:
- - Official skills are read from the /skills directory
+ - Official skills are read from /skills/common and /skills/{SKILLS_SUBDIR}
- User skills are read from /projects/uploads/{bot_id}/skills directory
- User skills are marked with user_skill: true
"""
diff --git a/skills_autoload/rag-retrieve/.claude-plugin/plugin.json b/skills/autoload/onprem/rag-retrieve/.claude-plugin/plugin.json
similarity index 100%
rename from skills_autoload/rag-retrieve/.claude-plugin/plugin.json
rename to skills/autoload/onprem/rag-retrieve/.claude-plugin/plugin.json
diff --git a/skills_autoload/rag-retrieve/README.md b/skills/autoload/onprem/rag-retrieve/README.md
similarity index 100%
rename from skills_autoload/rag-retrieve/README.md
rename to skills/autoload/onprem/rag-retrieve/README.md
diff --git a/skills_autoload/rag-retrieve/hooks/hook-backup.md b/skills/autoload/onprem/rag-retrieve/hooks/hook-backup.md
similarity index 100%
rename from skills_autoload/rag-retrieve/hooks/hook-backup.md
rename to skills/autoload/onprem/rag-retrieve/hooks/hook-backup.md
diff --git a/skills_autoload/rag-retrieve/hooks/pre_prompt.py b/skills/autoload/onprem/rag-retrieve/hooks/pre_prompt.py
similarity index 100%
rename from skills_autoload/rag-retrieve/hooks/pre_prompt.py
rename to skills/autoload/onprem/rag-retrieve/hooks/pre_prompt.py
diff --git a/skills_autoload/rag-retrieve/hooks/retrieval-policy.md b/skills/autoload/onprem/rag-retrieve/hooks/retrieval-policy.md
similarity index 100%
rename from skills_autoload/rag-retrieve/hooks/retrieval-policy.md
rename to skills/autoload/onprem/rag-retrieve/hooks/retrieval-policy.md
diff --git a/skills_autoload/rag-retrieve/mcp_common.py b/skills/autoload/onprem/rag-retrieve/mcp_common.py
similarity index 100%
rename from skills_autoload/rag-retrieve/mcp_common.py
rename to skills/autoload/onprem/rag-retrieve/mcp_common.py
diff --git a/skills_autoload/rag-retrieve/rag_retrieve_server.py b/skills/autoload/onprem/rag-retrieve/rag_retrieve_server.py
similarity index 100%
rename from skills_autoload/rag-retrieve/rag_retrieve_server.py
rename to skills/autoload/onprem/rag-retrieve/rag_retrieve_server.py
diff --git a/skills_autoload/rag-retrieve/rag_retrieve_tools.json b/skills/autoload/onprem/rag-retrieve/rag_retrieve_tools.json
similarity index 100%
rename from skills_autoload/rag-retrieve/rag_retrieve_tools.json
rename to skills/autoload/onprem/rag-retrieve/rag_retrieve_tools.json
diff --git a/skills/autoload/support/rag-retrieve/.claude-plugin/plugin.json b/skills/autoload/support/rag-retrieve/.claude-plugin/plugin.json
new file mode 100644
index 0000000..a929aa6
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/.claude-plugin/plugin.json
@@ -0,0 +1,22 @@
+{
+ "name": "rag-retrieve",
+ "description": "Provides RAG and table RAG retrieval tools through a PrePrompt hook and MCP server.",
+ "hooks": {
+ "PrePrompt": [
+ {
+ "type": "command",
+ "command": "python hooks/pre_prompt.py"
+ }
+ ]
+ },
+ "mcpServers": {
+ "rag_retrieve": {
+ "transport": "stdio",
+ "command": "python",
+ "args": [
+ "./rag_retrieve_server.py",
+ "{bot_id}"
+ ]
+ }
+ }
+}
diff --git a/skills/autoload/support/rag-retrieve/README.md b/skills/autoload/support/rag-retrieve/README.md
new file mode 100644
index 0000000..acf13ac
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/README.md
@@ -0,0 +1,99 @@
+# RAG Retrieve
+
+An example autoload skill that demonstrates how to integrate `rag-retrieve` and `table-rag-retrieve` through Claude Plugins hooks and an MCP server.
+
+## Overview
+
+This skill uses a `PrePrompt` hook to inject retrieval guidance into the prompt, and starts an MCP server that exposes retrieval capabilities for the current bot.
+
+### PrePrompt Hook
+Runs when the system prompt is loaded and injects retrieval policy content.
+- File: `hooks/pre_prompt.py`
+- Purpose: load retrieval instructions and add them to the prompt context
+
+### MCP Server
+Provides retrieval tools over stdio for the current `bot_id`.
+- File: `rag_retrieve_server.py`
+- Purpose: expose `rag-retrieve` and related retrieval tools to the agent
+
+## Directory Structure
+
+```text
+rag-retrieve/
+├── README.md # Skill documentation
+├── .claude-plugin/
+│ └── plugin.json # Hook and MCP server configuration
+├── hooks/
+│ ├── pre_prompt.py # PrePrompt hook script
+│ └── retrieval-policy.md # Retrieval policy injected into the prompt
+├── mcp_common.py # Shared MCP utilities
+├── rag_retrieve_server.py # MCP server entrypoint
+└── rag_retrieve_tools.json # Tool definitions
+```
+
+## `plugin.json` Format
+
+```json
+{
+ "name": "rag-retrieve",
+ "description": "rag-retrieve and table-rag-retrieve",
+ "hooks": {
+ "PrePrompt": [
+ {
+ "type": "command",
+ "command": "python hooks/pre_prompt.py"
+ }
+ ]
+ },
+ "mcpServers": {
+ "rag_retrieve": {
+ "transport": "stdio",
+ "command": "python",
+ "args": [
+ "./skills_autoload/rag-retrieve/rag_retrieve_server.py",
+ "{bot_id}"
+ ]
+ }
+ }
+}
+```
+
+## Hook Script Behavior
+
+The hook script runs as a subprocess, receives input through environment variables, and writes the injected content to stdout.
+
+### Available Environment Variables
+
+| Environment Variable | Description | Applies To |
+|----------------------|-------------|------------|
+| `ASSISTANT_ID` | Bot ID | All hooks |
+| `USER_IDENTIFIER` | User identifier | All hooks |
+| `SESSION_ID` | Session ID | All hooks |
+| `LANGUAGE` | Language code | All hooks |
+| `HOOK_TYPE` | Hook type | All hooks |
+
+### PrePrompt Example
+
+```python
+#!/usr/bin/env python3
+import os
+import sys
+
+
+def main():
+ user_identifier = os.environ.get('USER_IDENTIFIER', '')
+ bot_id = os.environ.get('ASSISTANT_ID', '')
+
+ print(f"## Retrieval Context\n\nUser: {user_identifier}\nBot: {bot_id}")
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+```
+
+## Example Use Cases
+
+1. **Prompt-time retrieval guidance**: inject retrieval rules before the model starts reasoning
+2. **Bot-specific retrieval setup**: start the MCP server with the current `bot_id`
+3. **Unified retrieval access**: expose RAG and table RAG tools through a single skill
diff --git a/skills/autoload/support/rag-retrieve/hooks/hook-backup.md b/skills/autoload/support/rag-retrieve/hooks/hook-backup.md
new file mode 100644
index 0000000..c90e84f
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/hooks/hook-backup.md
@@ -0,0 +1,55 @@
+# Retrieval Policy
+
+### 1. Retrieval Order and Tool Selection
+- Follow this section for source choice, tool choice, query rewrite, `top_k`, fallback, result handling, and citations.
+- Use this default retrieval order and execute it sequentially: skill-enabled knowledge retrieval tools > `rag_retrieve` / `table_rag_retrieve`.
+- Do NOT answer from model knowledge first.
+- Do NOT bypass the retrieval flow and inspect local filesystem documents on your own.
+- Do NOT use local filesystem retrieval as a fallback knowledge source.
+- Local filesystem documents are not a recommended retrieval source here because file formats are inconsistent and have not been normalized or parsed for reliable knowledge lookup.
+- Knowledge must be retrieved through the supported knowledge tools only: skill-enabled retrieval scripts, `table_rag_retrieve`, and `rag_retrieve`.
+- When a suitable skill-enabled knowledge retrieval tool is available, use it first.
+- If no suitable skill-enabled retrieval tool is available, or if its result is insufficient, continue with `rag_retrieve` or `table_rag_retrieve`.
+- Use `table_rag_retrieve` first for values, prices, quantities, inventory, specifications, rankings, comparisons, summaries, extraction, lists, tables, name lookup, historical coverage, mixed questions, and unclear cases.
+- Use `rag_retrieve` first only for clearly pure concept, definition, workflow, policy, or explanation questions without structured data needs.
+- After each retrieval step, evaluate sufficiency before moving to the next source. Do NOT run these retrieval sources in parallel.
+
+### 2. Query Preparation
+- Do NOT pass the raw user question unless it already works well for retrieval.
+- Rewrite for recall: extract entity, time scope, attributes, and intent.
+- Add useful variants: synonyms, aliases, abbreviations, related titles, historical names, and category terms.
+- Expand list-style, extraction, overview, historical, roster, timeline, and archive queries more aggressively.
+- Preserve meaning. Do NOT introduce unrelated topics.
+
+### 3. Retrieval Breadth (`top_k`)
+- Apply `top_k` only to `rag_retrieve`. Use the smallest sufficient value, then expand only if coverage is insufficient.
+- Use `30` for simple fact lookup.
+- Use `50` for moderate synthesis, comparison, summarization, or disambiguation.
+- Use `100` for broad recall, such as comprehensive analysis, scattered knowledge, multiple entities or periods, or list / catalog / timeline / roster / overview requests.
+- Raise `top_k` when keyword branches are many or results are too few, repetitive, incomplete, sparse, or too narrow.
+- Use this expansion order: `30 -> 50 -> 100`. If unsure, use `100`.
+
+### 4. Result Evaluation
+- Treat results as insufficient if they are empty, start with `Error:`, say `no excel files found`, are off-topic, miss the core entity or scope, or provide no usable evidence.
+- Also treat results as insufficient when they cover only part of the request, or when full-list, historical, comparison, or mixed data + explanation requests return only partial or truncated coverage.
+
+### 5. Fallback and Sequential Retry
+- If the first retrieval result is insufficient, call the next supported retrieval source in the default order before replying.
+- `table_rag_retrieve` now performs an internal fallback to `rag_retrieve` when it returns `no excel files found`, but this does NOT change the higher-level retrieval order.
+- If `table_rag_retrieve` is insufficient or empty, continue with `rag_retrieve`.
+- If `rag_retrieve` is insufficient or empty, continue with `table_rag_retrieve`.
+- Say no relevant information was found only after all applicable skill-enabled retrieval tools, `rag_retrieve`, and `table_rag_retrieve` have been tried and still do not provide enough evidence.
+- Do NOT reply that no relevant information was found before the supported knowledge retrieval flow has been exhausted.
+
+### 6. Table RAG Result Handling
+- Follow all `[INSTRUCTION]` and `[EXTRA_INSTRUCTION]` content in `table_rag_retrieve` results.
+- If results are truncated, explicitly tell the user total matches (`N+M`), displayed count (`N`), and omitted count (`M`).
+- Cite data sources using filenames from `file_ref_table`.
+
+### 7. Citation Requirements for Retrieved Knowledge
+- When using knowledge from `rag_retrieve` or `table_rag_retrieve`, you MUST generate `` tags.
+- Follow the citation format returned by each tool.
+- Place citations immediately after the paragraph or bullet list that uses the knowledge.
+- Do NOT collect citations at the end.
+- Use 1-2 citations per paragraph or bullet list when possible.
+- If learned knowledge is used, include at least 1 ``.
diff --git a/skills/autoload/support/rag-retrieve/hooks/pre_prompt.py b/skills/autoload/support/rag-retrieve/hooks/pre_prompt.py
new file mode 100644
index 0000000..11f445d
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/hooks/pre_prompt.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+"""
+PreMemoryPrompt Hook - 用户上下文加载器示例
+
+在记忆提取提示词(FACT_RETRIEVAL_PROMPT)加载时执行,
+读取同目录下的 memory_prompt.md 作为自定义记忆提取提示词模板。
+"""
+import sys
+from pathlib import Path
+
+
+def main():
+ prompt_file = Path(__file__).parent / "retrieval-policy.md"
+ if prompt_file.exists():
+ print(prompt_file.read_text(encoding="utf-8"))
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/skills/autoload/support/rag-retrieve/hooks/retrieval-policy.md b/skills/autoload/support/rag-retrieve/hooks/retrieval-policy.md
new file mode 100644
index 0000000..5ef9572
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/hooks/retrieval-policy.md
@@ -0,0 +1,80 @@
+# Retrieval Policy
+
+## 0. Task Classification
+
+Classify the request before acting:
+- **Knowledge retrieval** (facts, summaries, comparisons, prices, lists, timelines, extraction, etc.): follow this policy strictly.
+- **Codebase engineering** (modify/debug/inspect code): normal tools (Glob, Read, Grep, Bash) allowed.
+- **Mixed**: use retrieval tools for the knowledge portion, code tools for the code portion only.
+- **Uncertain**: default to knowledge retrieval.
+
+## 1. Critical Enforcement
+
+For knowledge retrieval tasks, **this policy overrides generic codebase exploration behavior**.
+
+- **Prohibited tools**: `Glob`, `Read`, `LS`, Bash (`ls`, `find`, `cat`, `head`, `tail`, `grep`, etc.) — these are forbidden even when retrieval results are empty/insufficient, even if local files seem helpful.
+- **Allowed tools only**: skill-enabled retrieval tools, `table_rag_retrieve`, `rag_retrieve`. No other source for factual answering.
+- Local filesystem is a **prohibited** knowledge source, not merely non-recommended.
+- Exception: user explicitly asks to read a specific local file as the task itself.
+
+## 2. Retrieval Order and Tool Selection
+
+Execute **sequentially, one at a time**. Do NOT run in parallel. Do NOT probe filesystem first.
+
+1. **Skill-enabled retrieval tools** (use first when available)
+2. **`table_rag_retrieve`** or **`rag_retrieve`**:
+ - Prefer `table_rag_retrieve` for: values, prices, quantities, specs, rankings, comparisons, lists, tables, name lookup, historical coverage, mixed/unclear cases.
+ - Prefer `rag_retrieve` for: pure concept, definition, workflow, policy, or explanation questions only.
+
+- Do NOT answer from model knowledge first.
+- After each step, evaluate sufficiency before proceeding.
+
+## 3. Query Preparation
+
+- Do NOT pass raw user question unless it already works well for retrieval.
+- Rewrite for recall: extract entity, time scope, attributes, intent. Add synonyms, aliases, abbreviations, historical names, category terms.
+- Expand list/extraction/overview/timeline queries more aggressively. Preserve meaning.
+
+## 4. Retrieval Breadth (`top_k`)
+
+- Apply `top_k` only to `rag_retrieve`. Use smallest sufficient value, expand if insufficient.
+- `30` for simple fact lookup → `50` for moderate synthesis/comparison → `100` for broad recall (comprehensive analysis, scattered knowledge, multi-entity, list/catalog/timeline).
+- Expansion order: `30 → 50 → 100`. If unsure, use `100`.
+
+## 5. Result Evaluation
+
+Treat as insufficient if: empty, `Error:`, `no excel files found`, off-topic, missing core entity/scope, no usable evidence, partial coverage, or truncated results.
+
+## 6. Fallback and Sequential Retry
+
+On insufficient results, follow this sequence:
+
+1. Rewrite query, retry same tool (once)
+2. Switch to next retrieval source in default order
+3. For `rag_retrieve`, expand `top_k`: `30 → 50 → 100`
+4. `table_rag_retrieve` insufficient → try `rag_retrieve`; `rag_retrieve` insufficient → try `table_rag_retrieve`
+
+- `table_rag_retrieve` internally falls back to `rag_retrieve` on `no excel files found`, but this does NOT change the higher-level order.
+- Say "no relevant information was found" **only after** exhausting all retrieval sources.
+- Do NOT switch to local filesystem inspection at any point.
+
+## 7. Table RAG Result Handling
+
+- Follow all `[INSTRUCTION]` and `[EXTRA_INSTRUCTION]` in results.
+- If truncated: tell user total (`N+M`), displayed (`N`), omitted (`M`).
+- Cite sources using filenames from `file_ref_table`.
+
+## 8. Citation Requirements
+
+- MUST generate `` tags when using retrieval results.
+- Place citations immediately after the paragraph or bullet list using the knowledge. Do NOT collect at end.
+- 1-2 citations per paragraph/bullet. At least 1 citation when using retrieved knowledge.
+
+## 9. Pre-Reply Self-Check
+
+Before replying to a knowledge retrieval task, verify:
+- Used only whitelisted retrieval tools — no local filesystem inspection?
+- Exhausted retrieval flow before concluding "not found"?
+- Citations placed immediately after each relevant paragraph?
+
+If any answer is "no", correct the process first.
diff --git a/skills/autoload/support/rag-retrieve/mcp_common.py b/skills/autoload/support/rag-retrieve/mcp_common.py
new file mode 100644
index 0000000..5bf5935
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/mcp_common.py
@@ -0,0 +1,251 @@
+#!/usr/bin/env python3
+"""
+MCP服务器通用工具函数
+提供路径处理、文件验证、请求处理等公共功能
+"""
+
+import json
+import os
+import sys
+import asyncio
+from typing import Any, Dict, List, Optional, Union
+import re
+
+def get_allowed_directory():
+ """获取允许访问的目录"""
+ # 优先使用命令行参数传入的dataset_dir
+ if len(sys.argv) > 1:
+ dataset_dir = sys.argv[1]
+ return os.path.abspath(dataset_dir)
+
+ # 从环境变量读取项目数据目录
+ project_dir = os.getenv("PROJECT_DATA_DIR", "./projects/data")
+ return os.path.abspath(project_dir)
+
+
+def resolve_file_path(file_path: str, default_subfolder: str = "default") -> str:
+ """
+ 解析文件路径,支持 folder/document.txt 和 document.txt 两种格式
+
+ Args:
+ file_path: 输入的文件路径
+ default_subfolder: 当只传入文件名时使用的默认子文件夹名称
+
+ Returns:
+ 解析后的完整文件路径
+ """
+ # 如果路径包含文件夹分隔符,直接使用
+ if '/' in file_path or '\\' in file_path:
+ clean_path = file_path.replace('\\', '/')
+
+ # 移除 projects/ 前缀(如果存在)
+ if clean_path.startswith('projects/'):
+ clean_path = clean_path[9:] # 移除 'projects/' 前缀
+ elif clean_path.startswith('./projects/'):
+ clean_path = clean_path[11:] # 移除 './projects/' 前缀
+ else:
+ # 如果只有文件名,添加默认子文件夹
+ clean_path = f"{default_subfolder}/{file_path}"
+
+ # 获取允许的目录
+ project_data_dir = get_allowed_directory()
+
+ # 尝试在项目目录中查找文件
+ full_path = os.path.join(project_data_dir, clean_path.lstrip('./'))
+ if os.path.exists(full_path):
+ return full_path
+
+ # 如果直接路径不存在,尝试递归查找
+ found = find_file_in_project(clean_path, project_data_dir)
+ if found:
+ return found
+
+ # 如果是纯文件名且在default子文件夹中不存在,尝试在根目录查找
+ if '/' not in file_path and '\\' not in file_path:
+ root_path = os.path.join(project_data_dir, file_path)
+ if os.path.exists(root_path):
+ return root_path
+
+ raise FileNotFoundError(f"File not found: {file_path} (searched in {project_data_dir})")
+
+
+def find_file_in_project(filename: str, project_dir: str) -> Optional[str]:
+ """在项目目录中递归查找文件"""
+ # 如果filename包含路径,只搜索指定的路径
+ if '/' in filename:
+ parts = filename.split('/')
+ target_file = parts[-1]
+ search_dir = os.path.join(project_dir, *parts[:-1])
+
+ if os.path.exists(search_dir):
+ target_path = os.path.join(search_dir, target_file)
+ if os.path.exists(target_path):
+ return target_path
+ else:
+ # 纯文件名,递归搜索整个项目目录
+ for root, dirs, files in os.walk(project_dir):
+ if filename in files:
+ return os.path.join(root, filename)
+ return None
+
+
+def load_tools_from_json(tools_file_name: str) -> List[Dict[str, Any]]:
+ """从 JSON 文件加载工具定义"""
+ try:
+ tools_file = os.path.join(os.path.dirname(__file__), tools_file_name)
+ if os.path.exists(tools_file):
+ with open(tools_file, 'r', encoding='utf-8') as f:
+ return json.load(f)
+ else:
+ # 如果 JSON 文件不存在,使用默认定义
+ return []
+ except Exception as e:
+ print(f"Warning: Unable to load tool definition JSON file: {str(e)}")
+ return []
+
+
+def create_error_response(request_id: Any, code: int, message: str) -> Dict[str, Any]:
+ """创建标准化的错误响应"""
+ return {
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "error": {
+ "code": code,
+ "message": message
+ }
+ }
+
+
+def create_success_response(request_id: Any, result: Any) -> Dict[str, Any]:
+ """创建标准化的成功响应"""
+ return {
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": result
+ }
+
+
+def create_initialize_response(request_id: Any, server_name: str, server_version: str = "1.0.0") -> Dict[str, Any]:
+ """创建标准化的初始化响应"""
+ return {
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "protocolVersion": "2024-11-05",
+ "capabilities": {
+ "tools": {}
+ },
+ "serverInfo": {
+ "name": server_name,
+ "version": server_version
+ }
+ }
+ }
+
+
+def create_ping_response(request_id: Any) -> Dict[str, Any]:
+ """创建标准化的ping响应"""
+ return {
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "pong": True
+ }
+ }
+
+
+def create_tools_list_response(request_id: Any, tools: List[Dict[str, Any]]) -> Dict[str, Any]:
+ """创建标准化的工具列表响应"""
+ return {
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "tools": tools
+ }
+ }
+
+
+def is_regex_pattern(pattern: str) -> bool:
+ """检测字符串是否为正则表达式模式"""
+ # 检查 /pattern/ 格式
+ if pattern.startswith('/') and pattern.endswith('/') and len(pattern) > 2:
+ return True
+
+ # 检查 r"pattern" 或 r'pattern' 格式
+ if pattern.startswith(('r"', "r'")) and pattern.endswith(('"', "'")) and len(pattern) > 3:
+ return True
+
+ # 检查是否包含正则特殊字符
+ regex_chars = {'*', '+', '?', '|', '(', ')', '[', ']', '{', '}', '^', '$', '\\', '.'}
+ return any(char in pattern for char in regex_chars)
+
+
+def compile_pattern(pattern: str) -> Union[re.Pattern, str, None]:
+ """编译正则表达式模式,如果不是正则则返回原字符串"""
+ if not is_regex_pattern(pattern):
+ return pattern
+
+ try:
+ # 处理 /pattern/ 格式
+ if pattern.startswith('/') and pattern.endswith('/'):
+ regex_body = pattern[1:-1]
+ return re.compile(regex_body)
+
+ # 处理 r"pattern" 或 r'pattern' 格式
+ if pattern.startswith(('r"', "r'")) and pattern.endswith(('"', "'")):
+ regex_body = pattern[2:-1]
+ return re.compile(regex_body)
+
+ # 直接编译包含正则字符的字符串
+ return re.compile(pattern)
+ except re.error as e:
+ # 如果编译失败,返回None表示无效的正则
+ print(f"Warning: Regular expression '{pattern}' compilation failed: {e}")
+ return None
+
+
+async def handle_mcp_streaming(request_handler):
+ """处理MCP请求的标准主循环"""
+ try:
+ while True:
+ # Read from stdin
+ line = await asyncio.get_event_loop().run_in_executor(None, sys.stdin.readline)
+ if not line:
+ break
+
+ line = line.strip()
+ if not line:
+ continue
+
+ try:
+ request = json.loads(line)
+ response = await request_handler(request)
+
+ # Write to stdout
+ sys.stdout.write(json.dumps(response, ensure_ascii=False) + "\n")
+ sys.stdout.flush()
+
+ except json.JSONDecodeError:
+ error_response = {
+ "jsonrpc": "2.0",
+ "error": {
+ "code": -32700,
+ "message": "Parse error"
+ }
+ }
+ sys.stdout.write(json.dumps(error_response, ensure_ascii=False) + "\n")
+ sys.stdout.flush()
+
+ except Exception as e:
+ error_response = {
+ "jsonrpc": "2.0",
+ "error": {
+ "code": -32603,
+ "message": f"Internal error: {str(e)}"
+ }
+ }
+ sys.stdout.write(json.dumps(error_response, ensure_ascii=False) + "\n")
+ sys.stdout.flush()
+
+ except KeyboardInterrupt:
+ pass
diff --git a/skills/autoload/support/rag-retrieve/rag_retrieve_server.py b/skills/autoload/support/rag-retrieve/rag_retrieve_server.py
new file mode 100644
index 0000000..1ef7a2d
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/rag_retrieve_server.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python3
+"""
+RAG检索MCP服务器
+调用本地RAG API进行文档检索
+"""
+
+import asyncio
+import hashlib
+import json
+import re
+import sys
+import os
+from typing import Any, Dict, List
+
+try:
+ import requests
+except ImportError:
+ print("Error: requests module is required. Please install it with: pip install requests")
+ sys.exit(1)
+
+from mcp_common import (
+ create_error_response,
+ create_success_response,
+ create_initialize_response,
+ create_ping_response,
+ create_tools_list_response,
+ load_tools_from_json,
+ handle_mcp_streaming
+)
+BACKEND_HOST = os.getenv("BACKEND_HOST", "https://api-dev.gptbase.ai")
+MASTERKEY = os.getenv("MASTERKEY", "master")
+
+# Citation instruction prefixes injected into tool results
+DOCUMENT_CITATION_INSTRUCTIONS = """
+When using the retrieved knowledge below, you MUST add XML citation tags for factual claims.
+
+## Document Knowledge
+Format: ``
+- Use `file` attribute with the UUID from document markers
+- Use `filename` attribute with the actual filename from document markers
+- Use `page` attribute (singular) with the page number
+- `page` MUST be 0-based and must match the `pages:` values shown in the learned knowledge context
+
+## Web Page Knowledge
+Format: ``
+- Use `url` attribute with the web page URL from the source metadata
+- Do not use `file`, `filename`, or `page` attributes for web sources
+- If content is grounded in a web source, prefer a web citation with `url` over a file citation
+
+## Placement Rules
+- Citations MUST appear IMMEDIATELY AFTER the paragraph or bullet list that uses the knowledge
+- NEVER collect all citations and place them at the end of your response
+- Limit to 1-2 citations per paragraph/bullet list
+- If your answer uses learned knowledge, you MUST generate at least 1 `` in the response
+
+
+"""
+
+TABLE_CITATION_INSTRUCTIONS = """
+When using the retrieved table knowledge below, you MUST add XML citation tags for factual claims.
+
+Format: ``
+- Parse `__src`: `F1S2R5` = file_ref F1, sheet 2, row 5
+- Look up file_id in `file_ref_table`
+- Combine same-sheet rows into one citation: `rows=[2, 4, 6]`
+- MANDATORY: Create SEPARATE citation for EACH (file, sheet) combination
+- NEVER put on the same line as a bullet point or table row
+- Citations MUST be on separate lines AFTER the complete list/table
+- NEVER include the `__src` column in your response - it is internal metadata only
+- Citations MUST appear IMMEDIATELY AFTER the paragraph or bullet list that uses the knowledge
+- NEVER collect all citations and place them at the end of your response
+
+
+"""
+
+def rag_retrieve(query: str, top_k: int = 100) -> Dict[str, Any]:
+ """调用RAG检索API"""
+ try:
+ bot_id = ""
+ if len(sys.argv) > 1:
+ bot_id = sys.argv[1]
+
+ url = f"{BACKEND_HOST}/v1/rag_retrieve/{bot_id}"
+ if not url:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": "Error: RAG API URL not provided. Please provide URL as command line argument."
+ }
+ ]
+ }
+
+ # 获取masterkey并生成认证token
+ masterkey = MASTERKEY
+ token_input = f"{masterkey}:{bot_id}"
+ auth_token = hashlib.md5(token_input.encode()).hexdigest()
+
+ headers = {
+ "content-type": "application/json",
+ "authorization": f"Bearer {auth_token}"
+ }
+ data = {
+ "query": query,
+ "top_k": top_k
+ }
+
+ # 发送POST请求
+ response = requests.post(url, json=data, headers=headers, timeout=30)
+
+ if response.status_code != 200:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: RAG API returned status code {response.status_code}. Response: {response.text}"
+ }
+ ]
+ }
+
+ # 解析响应
+ try:
+ response_data = response.json()
+ except json.JSONDecodeError as e:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: Failed to parse API response as JSON. Error: {str(e)}, Raw response: {response.text}"
+ }
+ ]
+ }
+
+ # 提取markdown字段
+ if "markdown" in response_data:
+ markdown_content = response_data["markdown"]
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": DOCUMENT_CITATION_INSTRUCTIONS + markdown_content
+ }
+ ]
+ }
+ else:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: 'markdown' field not found in API response. Response: {json.dumps(response_data, indent=2, ensure_ascii=False)}"
+ }
+ ]
+ }
+
+ except requests.exceptions.RequestException as e:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: Failed to connect to RAG API. {str(e)}"
+ }
+ ]
+ }
+ except Exception as e:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: {str(e)}"
+ }
+ ]
+ }
+
+
+def table_rag_retrieve(query: str) -> Dict[str, Any]:
+ """调用Table RAG检索API"""
+ try:
+ bot_id = ""
+ if len(sys.argv) > 1:
+ bot_id = sys.argv[1]
+
+ url = f"{BACKEND_HOST}/v1/table_rag_retrieve/{bot_id}"
+
+ masterkey = MASTERKEY
+ token_input = f"{masterkey}:{bot_id}"
+ auth_token = hashlib.md5(token_input.encode()).hexdigest()
+
+ headers = {
+ "content-type": "application/json",
+ "authorization": f"Bearer {auth_token}"
+ }
+ data = {
+ "query": query,
+ }
+
+ response = requests.post(url, json=data, headers=headers, timeout=300)
+
+ if response.status_code != 200:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: Table RAG API returned status code {response.status_code}. Response: {response.text}"
+ }
+ ]
+ }
+
+ try:
+ response_data = response.json()
+ except json.JSONDecodeError as e:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: Failed to parse API response as JSON. Error: {str(e)}, Raw response: {response.text}"
+ }
+ ]
+ }
+
+ if "markdown" in response_data:
+ markdown_content = response_data["markdown"]
+ if re.search(r"^no excel files found", markdown_content, re.IGNORECASE):
+ rag_result = rag_retrieve(query)
+ content = rag_result.get("content", [])
+ if content and content[0].get("type") == "text":
+ content[0]["text"] = "No table_rag_retrieve results were found. The content below is the fallback result from rag_retrieve:\n\n" + content[0]["text"]
+ return rag_result
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": TABLE_CITATION_INSTRUCTIONS + markdown_content
+ }
+ ]
+ }
+ else:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: 'markdown' field not found in API response. Response: {json.dumps(response_data, indent=2, ensure_ascii=False)}"
+ }
+ ]
+ }
+
+ except requests.exceptions.RequestException as e:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: Failed to connect to Table RAG API. {str(e)}"
+ }
+ ]
+ }
+ except Exception as e:
+ return {
+ "content": [
+ {
+ "type": "text",
+ "text": f"Error: {str(e)}"
+ }
+ ]
+ }
+
+
+async def handle_request(request: Dict[str, Any]) -> Dict[str, Any]:
+ """Handle MCP request"""
+ try:
+ method = request.get("method")
+ params = request.get("params", {})
+ request_id = request.get("id")
+
+ if method == "initialize":
+ return create_initialize_response(request_id, "rag-retrieve")
+
+ elif method == "ping":
+ return create_ping_response(request_id)
+
+ elif method == "tools/list":
+ # 从 JSON 文件加载工具定义
+ tools = load_tools_from_json("rag_retrieve_tools.json")
+ if not tools:
+ # 如果 JSON 文件不存在,使用默认定义
+ tools = [
+ {
+ "name": "rag_retrieve",
+ "description": "调用RAG检索API,根据查询内容检索相关文档。返回包含相关内容的markdown格式结果。",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "检索查询内容"
+ }
+ },
+ "required": ["query"]
+ }
+ }
+ ]
+ return create_tools_list_response(request_id, tools)
+
+ elif method == "tools/call":
+ tool_name = params.get("name")
+ arguments = params.get("arguments", {})
+
+ if tool_name == "rag_retrieve":
+ query = arguments.get("query", "")
+ top_k = arguments.get("top_k", 100)
+
+ if not query:
+ return create_error_response(request_id, -32602, "Missing required parameter: query")
+
+ result = rag_retrieve(query, top_k)
+
+ return {
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": result
+ }
+
+ elif tool_name == "table_rag_retrieve":
+ query = arguments.get("query", "")
+
+ if not query:
+ return create_error_response(request_id, -32602, "Missing required parameter: query")
+
+ result = table_rag_retrieve(query)
+
+ return {
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": result
+ }
+
+ else:
+ return create_error_response(request_id, -32601, f"Unknown tool: {tool_name}")
+
+ else:
+ return create_error_response(request_id, -32601, f"Unknown method: {method}")
+
+ except Exception as e:
+ return create_error_response(request.get("id"), -32603, f"Internal error: {str(e)}")
+
+
+async def main():
+ """Main entry point."""
+ await handle_mcp_streaming(handle_request)
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/skills/autoload/support/rag-retrieve/rag_retrieve_tools.json b/skills/autoload/support/rag-retrieve/rag_retrieve_tools.json
new file mode 100644
index 0000000..0db0f70
--- /dev/null
+++ b/skills/autoload/support/rag-retrieve/rag_retrieve_tools.json
@@ -0,0 +1,35 @@
+[
+ {
+ "name": "rag_retrieve",
+ "description": "Retrieve relevant documents from the knowledge base. Returns markdown results. Use this tool first only for clearly pure concept, definition, workflow, policy, or explanation questions without structured data needs. If the result is insufficient, try table_rag_retrieve before replying with no result.",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "Retrieval query content. Rewrite the query when needed to improve recall."
+ },
+ "top_k": {
+ "type": "integer",
+ "description": "Number of top results to retrieve. Choose dynamically based on retrieval breadth and coverage needs.",
+ "default": 100
+ }
+ },
+ "required": ["query"]
+ }
+ },
+ {
+ "name": "table_rag_retrieve",
+ "description": "Retrieve relevant table data from Excel or spreadsheet files in the knowledge base. Returns markdown results. Use this tool first for structured data, lists, statistics, extraction, mixed questions, and unclear cases. If the result is insufficient, try rag_retrieve before replying with no result.",
+ "inputSchema": {
+ "type": "object",
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "Retrieval query content for table data. Rewrite the query when needed to improve recall."
+ }
+ },
+ "required": ["query"]
+ }
+ }
+]
diff --git a/skills_developing/catalog-search-agent/.claude-plugin/plugin.json b/skills/developing/catalog-search-agent/.claude-plugin/plugin.json
similarity index 100%
rename from skills_developing/catalog-search-agent/.claude-plugin/plugin.json
rename to skills/developing/catalog-search-agent/.claude-plugin/plugin.json
diff --git a/skills_developing/catalog-search-agent/README.md b/skills/developing/catalog-search-agent/README.md
similarity index 100%
rename from skills_developing/catalog-search-agent/README.md
rename to skills/developing/catalog-search-agent/README.md
diff --git a/skills_developing/catalog-search-agent/skills/catalog-search-agent-0123.zip b/skills/developing/catalog-search-agent/skills/catalog-search-agent-0123.zip
similarity index 100%
rename from skills_developing/catalog-search-agent/skills/catalog-search-agent-0123.zip
rename to skills/developing/catalog-search-agent/skills/catalog-search-agent-0123.zip
diff --git a/skills_developing/catalog-search-agent/skills/catalog-search-agent-embedding.zip b/skills/developing/catalog-search-agent/skills/catalog-search-agent-embedding.zip
similarity index 100%
rename from skills_developing/catalog-search-agent/skills/catalog-search-agent-embedding.zip
rename to skills/developing/catalog-search-agent/skills/catalog-search-agent-embedding.zip
diff --git a/skills_developing/catalog-search-agent/skills/catalog-search-agent/SKILL.md b/skills/developing/catalog-search-agent/skills/catalog-search-agent/SKILL.md
similarity index 100%
rename from skills_developing/catalog-search-agent/skills/catalog-search-agent/SKILL.md
rename to skills/developing/catalog-search-agent/skills/catalog-search-agent/SKILL.md
diff --git a/skills_developing/catalog-search-agent/skills/catalog-search-agent/scripts/multi_keyword_search.py b/skills/developing/catalog-search-agent/skills/catalog-search-agent/scripts/multi_keyword_search.py
similarity index 100%
rename from skills_developing/catalog-search-agent/skills/catalog-search-agent/scripts/multi_keyword_search.py
rename to skills/developing/catalog-search-agent/skills/catalog-search-agent/scripts/multi_keyword_search.py
diff --git a/skills_developing/catalog-search-agent/skills/catalog-search-agent/scripts/requirements.txt b/skills/developing/catalog-search-agent/skills/catalog-search-agent/scripts/requirements.txt
similarity index 100%
rename from skills_developing/catalog-search-agent/skills/catalog-search-agent/scripts/requirements.txt
rename to skills/developing/catalog-search-agent/skills/catalog-search-agent/scripts/requirements.txt
diff --git a/skills_developing/managing-scripts/SKILL.md b/skills/developing/managing-scripts/SKILL.md
similarity index 100%
rename from skills_developing/managing-scripts/SKILL.md
rename to skills/developing/managing-scripts/SKILL.md
diff --git a/skills_developing/rag-retrieve-cli/Retrieval_Policy.md b/skills/developing/rag-retrieve-cli/Retrieval_Policy.md
similarity index 100%
rename from skills_developing/rag-retrieve-cli/Retrieval_Policy.md
rename to skills/developing/rag-retrieve-cli/Retrieval_Policy.md
diff --git a/skills_developing/rag-retrieve-cli/SKILL.md b/skills/developing/rag-retrieve-cli/SKILL.md
similarity index 100%
rename from skills_developing/rag-retrieve-cli/SKILL.md
rename to skills/developing/rag-retrieve-cli/SKILL.md
diff --git a/skills_developing/rag-retrieve-cli/scripts/rag_retrieve.py b/skills/developing/rag-retrieve-cli/scripts/rag_retrieve.py
similarity index 100%
rename from skills_developing/rag-retrieve-cli/scripts/rag_retrieve.py
rename to skills/developing/rag-retrieve-cli/scripts/rag_retrieve.py
diff --git a/skills_developing/rag-retrieve-cli/skill.yaml b/skills/developing/rag-retrieve-cli/skill.yaml
similarity index 100%
rename from skills_developing/rag-retrieve-cli/skill.yaml
rename to skills/developing/rag-retrieve-cli/skill.yaml
diff --git a/skills_developing/user-context-loader/.claude-plugin/plugin.json b/skills/developing/user-context-loader/.claude-plugin/plugin.json
similarity index 100%
rename from skills_developing/user-context-loader/.claude-plugin/plugin.json
rename to skills/developing/user-context-loader/.claude-plugin/plugin.json
diff --git a/skills_developing/user-context-loader/README.md b/skills/developing/user-context-loader/README.md
similarity index 100%
rename from skills_developing/user-context-loader/README.md
rename to skills/developing/user-context-loader/README.md
diff --git a/skills_developing/user-context-loader/hooks/memory_prompt.md b/skills/developing/user-context-loader/hooks/memory_prompt.md
similarity index 100%
rename from skills_developing/user-context-loader/hooks/memory_prompt.md
rename to skills/developing/user-context-loader/hooks/memory_prompt.md
diff --git a/skills_developing/user-context-loader/hooks/post_agent.py b/skills/developing/user-context-loader/hooks/post_agent.py
similarity index 100%
rename from skills_developing/user-context-loader/hooks/post_agent.py
rename to skills/developing/user-context-loader/hooks/post_agent.py
diff --git a/skills_developing/user-context-loader/hooks/pre_memory_prompt.py b/skills/developing/user-context-loader/hooks/pre_memory_prompt.py
similarity index 100%
rename from skills_developing/user-context-loader/hooks/pre_memory_prompt.py
rename to skills/developing/user-context-loader/hooks/pre_memory_prompt.py
diff --git a/skills_developing/user-context-loader/hooks/pre_prompt.py b/skills/developing/user-context-loader/hooks/pre_prompt.py
similarity index 100%
rename from skills_developing/user-context-loader/hooks/pre_prompt.py
rename to skills/developing/user-context-loader/hooks/pre_prompt.py
diff --git a/skills_developing/user-context-loader/hooks/pre_save.py b/skills/developing/user-context-loader/hooks/pre_save.py
similarity index 100%
rename from skills_developing/user-context-loader/hooks/pre_save.py
rename to skills/developing/user-context-loader/hooks/pre_save.py
diff --git a/utils/multi_project_manager.py b/utils/multi_project_manager.py
index f68f3ba..250777a 100644
--- a/utils/multi_project_manager.py
+++ b/utils/multi_project_manager.py
@@ -322,10 +322,22 @@ def create_robot_project(dataset_ids: List[str], bot_id: str, force_rebuild: boo
"""
skills = list(skills or [])
- has_rag_retrieve = any(re.search(r"rag-retrieve", skill) for skill in skills)
- if dataset_ids and not has_rag_retrieve:
- skills.append("@skills_autoload/rag-retrieve")
- logger.info("Auto loaded skill '@skills_autoload/rag-retrieve' because dataset_ids is not empty")
+ existing_skill_names = {Path(skill.lstrip("@")).name for skill in skills}
+ if os.path.isabs(settings.SKILLS_DIR):
+ autoload_skills_dir = Path(settings.SKILLS_DIR) / "autoload" / settings.SKILLS_SUBDIR
+ else:
+ autoload_skills_dir = project_path.parent / settings.SKILLS_DIR / "autoload" / settings.SKILLS_SUBDIR
+
+ if autoload_skills_dir.exists():
+ for item in sorted(autoload_skills_dir.iterdir()):
+ if not item.is_dir() or item.name in existing_skill_names:
+ continue
+ skill_path = f"@skills/autoload/{settings.SKILLS_SUBDIR}/{item.name}"
+ skills.append(skill_path)
+ existing_skill_names.add(item.name)
+ logger.info(f"Auto loaded skill '{skill_path}' from {autoload_skills_dir}")
+ else:
+ logger.warning(f"Autoload skills directory does not exist: {autoload_skills_dir}")
logger.info(f"Ensuring robot project exists: {bot_id}, skills: {skills}")
@@ -382,13 +394,13 @@ def _extract_skills_to_robot(bot_id: str, skills: List[str], project_path: Path)
- 如果是简单名称(如 "rag-retrieve"),从以下目录按优先级顺序查找:
1. projects/uploads/{bot_id}/skills/
2. skills/{SKILLS_SUBDIR}/
- - 如果是以 @ 开头的仓库相对路径(如 "@skills_autoload/rag-retrieve"),则从仓库根目录直接解析
+ - 如果是以 @ 开头的仓库相对路径(如 "@skills/autoload/support/rag-retrieve"),则从仓库根目录直接解析
- 搜索目录优先级:先搜索 projects/uploads/{bot_id}/skills/,再搜索 skills/{SKILLS_SUBDIR}/
+ 搜索目录优先级:先搜索 projects/uploads/{bot_id}/skills/,再搜索 skills/common,最后搜索 skills/{SKILLS_SUBDIR}
Args:
bot_id: 机器人 ID
- skills: 技能文件名列表(如 ["rag-retrieve", "@skills_autoload/rag-retrieve", "projects/uploads/{bot_id}/skills/rag-retrieve"])
+ skills: 技能文件名列表(如 ["rag-retrieve", "@skills/autoload/support/rag-retrieve", "projects/uploads/{bot_id}/skills/rag-retrieve"])
project_path: 项目路径
"""
# skills 源目录(按优先级顺序)