diff --git a/.features/skill/MEMORY.md b/.features/skill/MEMORY.md index c3e470f..b8a76ba 100644 --- a/.features/skill/MEMORY.md +++ b/.features/skill/MEMORY.md @@ -23,6 +23,7 @@ Skill 系统支持两种来源:官方 skills (`./skills/`) 和用户 skills (` - 2026-04-16: 为 `auto-daily-summary` 和 `competitor-news-intel` 新增 Python CLI 脚本 MVP,统一采用 `argparse + JSON stdout` 模式 - 2026-04-16: 新增 6 个纯 `SKILL.md` 型业务 skill:`market-academic-insight`、`financial-report-generator`、`contract-document-generator`、`sales-decision-report`、`auto-daily-summary`、`competitor-news-intel` +- 2026-04-19: `create_robot_project` 的 autoload 去重和 stale 清理补强,autoload 目录也纳入 managed 清理,避免 `rag-retrieve-only` 场景下旧的 `rag-retrieve` 残留 - 2026-04-18: `create_robot_project` 改为自动加载 `skills/autoload/{SKILLS_SUBDIR}` 下所有 skill,并跳过已显式传入的同名 skill - 2026-04-18: `/api/v1/skill/list` 的官方库改为同时读取 `skills/common` 和 `skills/{SKILLS_SUBDIR}`,并按目录顺序去重 - 2026-04-18: `_extract_skills_to_robot` 改为通过环境变量 `SKILLS_SUBDIR` 选择官方 skills 子目录,默认使用 `skills/common` @@ -37,7 +38,7 @@ Skill 系统支持两种来源:官方 skills (`./skills/`) 和用户 skills (` - ⚠️ `auto-daily-summary` 需要特别注意中文分句、action 边界截断、risk 窗口裁剪,否则容易把整句/整段吞进去 - ⚠️ `competitor-news-intel` 的 payload 校验应按命令拆分(collect/analyze/run),不要共用一套最小校验 - ⚠️ `competitor-news-intel` 的 `collect/run` 依赖 `BAIDU_API_KEY`;无该环境变量时应返回稳定错误 JSON,不要静默降级 - +- ⚠️ `create_robot_project` 的 autoload 去重是“包含匹配”,只要传入的 skill 字符串里包含 autoload skill 名,就不会重复自动加载 - ⚠️ `_extract_skills_to_robot` 只会从 `skills/{SKILLS_SUBDIR}` 读取官方 skills,默认是 `common` - ⚠️ 执行脚本必须使用绝对路径 diff --git a/utils/multi_project_manager.py b/utils/multi_project_manager.py index 2714b03..a4c4137 100644 --- a/utils/multi_project_manager.py +++ b/utils/multi_project_manager.py @@ -322,8 +322,15 @@ def create_robot_project(dataset_ids: List[str], bot_id: str, force_rebuild: boo str: 机器人项目目录路径 """ + def _skill_matches_autoload(skill: str, autoload_skill_name: str) -> bool: + normalized_skill = Path(skill.lstrip("@")).name.lower() + normalized_autoload_skill_name = autoload_skill_name.lower() + if re.search(re.escape(normalized_autoload_skill_name), normalized_skill): + return True + autoload_prefix = normalized_autoload_skill_name.split("-")[0] + return bool(autoload_prefix and re.search(re.escape(autoload_prefix), normalized_skill)) + skills = list(skills or []) - existing_skill_names = {Path(skill.lstrip("@")).name for skill in skills} if os.path.isabs(settings.SKILLS_DIR): autoload_skills_dir = Path(settings.SKILLS_DIR) / "autoload" / settings.SKILLS_SUBDIR else: @@ -331,11 +338,10 @@ def create_robot_project(dataset_ids: List[str], bot_id: str, force_rebuild: boo if autoload_skills_dir.exists(): for item in sorted(autoload_skills_dir.iterdir()): - if not item.is_dir() or item.name in existing_skill_names: + if not item.is_dir() or any(_skill_matches_autoload(skill, item.name) for skill in skills): continue skill_path = f"@skills/autoload/{settings.SKILLS_SUBDIR}/{item.name}" skills.append(skill_path) - existing_skill_names.add(item.name) logger.info(f"Auto loaded skill '{skill_path}' from {autoload_skills_dir}") else: logger.warning(f"Autoload skills directory does not exist: {autoload_skills_dir}") @@ -408,6 +414,7 @@ def _extract_skills_to_robot(bot_id: str, skills: List[str], project_path: Path) # skills 源目录(按优先级顺序) repo_root = Path(__file__).resolve().parent.parent official_skills_dir = repo_root / "skills" / settings.SKILLS_SUBDIR + autoload_skills_dir = repo_root / "skills" / "autoload" / settings.SKILLS_SUBDIR if not official_skills_dir.exists(): logger.warning(f"Official skills directory does not exist: {official_skills_dir}") skills_source_dirs = [ @@ -415,12 +422,13 @@ def _extract_skills_to_robot(bot_id: str, skills: List[str], project_path: Path) repo_root / "skills" / "common", official_skills_dir, ] + managed_skill_dirs = [*skills_source_dirs, autoload_skills_dir] skills_target_dir = project_path / "robot" / bot_id / "skills" skills_target_dir.mkdir(parents=True, exist_ok=True) logger.info(f"Copying skills to {skills_target_dir}") managed_skill_names = set() - for base_dir in skills_source_dirs: + for base_dir in managed_skill_dirs: if not base_dir.exists(): continue for item in base_dir.iterdir():