add skills

This commit is contained in:
朱潮 2025-12-22 23:47:12 +08:00
parent 95786d559e
commit aaad9df20a
10 changed files with 1248 additions and 49 deletions

3
.vscode/launch.json vendored
View File

@ -9,7 +9,8 @@
"console": "integratedTerminal",
"cwd": "${workspaceFolder}",
"envFile": "${workspaceFolder}/.env",
"python": "${workspaceFolder}/.venv/bin/python"
"python": "${workspaceFolder}/.venv/bin/python",
"justMyCode": false
}
]
}

View File

@ -4,6 +4,7 @@ import time
from typing import Any, Dict
from langchain.chat_models import init_chat_model
# from deepagents import create_deep_agent
from deepagents_cli.agent import create_cli_agent
from langchain.agents import create_agent
from langchain.agents.middleware import SummarizationMiddleware
from langchain_mcp_adapters.client import MultiServerMCPClient
@ -117,7 +118,6 @@ async def init_agent(config: AgentConfig):
# 检测或使用指定的提供商
model_provider,base_url = detect_provider(config.model_name, config.model_server)
# 构建模型参数
model_kwargs = {
"model": config.model_name,
@ -130,39 +130,48 @@ async def init_agent(config: AgentConfig):
model_kwargs.update(config.generate_cfg)
llm_instance = init_chat_model(**model_kwargs)
# 构建中间件列表
middleware = []
# 只有在 enable_thinking 为 True 时才添加 GuidelineMiddleware
if config.enable_thinking:
middleware.append(GuidelineMiddleware(llm_instance, config, system_prompt))
# 添加工具输出长度控制中间件
tool_output_middleware = ToolOutputLengthMiddleware(
max_length=getattr(config.generate_cfg, 'tool_output_max_length', None) if config.generate_cfg else None or TOOL_OUTPUT_MAX_LENGTH,
truncation_strategy=getattr(config.generate_cfg, 'tool_output_truncation_strategy', 'smart') if config.generate_cfg else 'smart',
tool_filters=getattr(config.generate_cfg, 'tool_output_filters', None) if config.generate_cfg else None, # 可配置特定工具
exclude_tools=getattr(config.generate_cfg, 'tool_output_exclude', []) if config.generate_cfg else [], # 排除的工具
preserve_code_blocks=getattr(config.generate_cfg, 'preserve_code_blocks', True) if config.generate_cfg else True,
preserve_json=getattr(config.generate_cfg, 'preserve_json', True) if config.generate_cfg else True
)
middleware.append(tool_output_middleware)
if checkpointer:
summarization_middleware = SummarizationMiddleware(
if config.robot_type == "deep_agent":
# 使用 DeepAgentX 创建 agent
agent, composite_backend = create_cli_agent(
model=llm_instance,
max_tokens_before_summary=SUMMARIZATION_MAX_TOKENS,
messages_to_keep=20, # 摘要后保留最近 20 条消息
summary_prompt="请简洁地总结以上对话的要点,包括重要的用户信息、讨论过的话题和关键结论。"
assistant_id=config.bot_id,
tools=mcp_tools,
auto_approve=True,
)
middleware.append(summarization_middleware)
else:
# 构建中间件列表
middleware = []
# 只有在 enable_thinking 为 True 时才添加 GuidelineMiddleware
if config.enable_thinking:
middleware.append(GuidelineMiddleware(llm_instance, config, system_prompt))
agent = create_agent(
model=llm_instance,
system_prompt=system_prompt,
tools=mcp_tools,
middleware=middleware,
checkpointer=checkpointer # 传入 checkpointer 以启用持久化
)
# 添加工具输出长度控制中间件
tool_output_middleware = ToolOutputLengthMiddleware(
max_length=getattr(config.generate_cfg, 'tool_output_max_length', None) if config.generate_cfg else None or TOOL_OUTPUT_MAX_LENGTH,
truncation_strategy=getattr(config.generate_cfg, 'tool_output_truncation_strategy', 'smart') if config.generate_cfg else 'smart',
tool_filters=getattr(config.generate_cfg, 'tool_output_filters', None) if config.generate_cfg else None, # 可配置特定工具
exclude_tools=getattr(config.generate_cfg, 'tool_output_exclude', []) if config.generate_cfg else [], # 排除的工具
preserve_code_blocks=getattr(config.generate_cfg, 'preserve_code_blocks', True) if config.generate_cfg else True,
preserve_json=getattr(config.generate_cfg, 'preserve_json', True) if config.generate_cfg else True
)
middleware.append(tool_output_middleware)
if checkpointer:
summarization_middleware = SummarizationMiddleware(
model=llm_instance,
max_tokens_before_summary=SUMMARIZATION_MAX_TOKENS,
messages_to_keep=20, # 摘要后保留最近 20 条消息
summary_prompt="请简洁地总结以上对话的要点,包括重要的用户信息、讨论过的话题和关键结论。"
)
middleware.append(summarization_middleware)
agent = create_agent(
model=llm_instance,
system_prompt=system_prompt,
tools=mcp_tools,
middleware=middleware,
checkpointer=checkpointer # 传入 checkpointer 以启用持久化
)
# 如果有缓存键,将 agent 加入缓存
if cache_key:

View File

@ -98,7 +98,9 @@ async def load_system_prompt_async(project_dir: str, language: str = None, syste
datetime_str = format_datetime_by_language(language) if language else format_datetime_by_language('en')
# 如果存在{language} 占位符,那么就直接使用 system_prompt
if robot_type == "general_agent" or robot_type == "catalog_agent":
if robot_type == "deep_agent":
return None
elif robot_type == "general_agent" or robot_type == "catalog_agent":
"""
优先使用项目目录的README.md没有才使用默认的system_prompt_{robot_type}.md
"""

View File

@ -12,7 +12,7 @@ from fastapi.middleware.cors import CORSMiddleware
from routes.file_manager import router as file_manager_router
import logging
from utils.symlink_utils import setup_project_directories
from utils.log_util.logger import init_with_fastapi
@ -25,6 +25,9 @@ init_with_fastapi(app)
logger = logging.getLogger('app')
# Setup project directories and symbolic links
setup_project_directories()
# 挂载public文件夹为静态文件服务
app.mount("/public", StaticFiles(directory="public"), name="static")

1090
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -98,8 +98,18 @@
- 如果用户未指定具体设备或房间,但使用了"这个设备"、"那个房间"、"它"等指代词,需要从最近的聊天记录中推断对应的设备或房间
- 优先考虑最近一次查询的设备信息如最近查询的房间设备、设备ID等
- 如果上下文中有多台设备,需要向用户确认具体操作哪台设备
2. **若用户已明确确认**:直接调用【设备控制】工具执行操作。
3. **若用户未确认且为新请求**:向用户发送确认提示:"即将为您 [操作内容] [设备名称] [具体参数],是否确认?",待用户确认后再执行。
2. **空调温度调节确认方式**
- 如果用户说"有点热"、"调低点"、"太热了"等,表示要降温:
1. 先查询当前室温
2. 默认将温度调低1度当前温度-1度
3. 回复格式:"现在室温xx度调整到xx度可以吗"
- 如果用户说"有点冷"、"调高点"、"太冷了"等,表示要升温:
1. 先查询当前室温
2. 默认将温度调高1度当前温度+1度
3. 回复格式:"现在室温xx度调整到xx度可以吗"
- 如果用户指定了具体温度(如"调到25度"),直接使用指定温度
3. **若用户已明确确认**:直接调用【设备控制】工具执行操作。
4. **若用户未确认且为新请求**:向用户发送确认提示:"即将为您 [操作内容] [设备名称] [具体参数],是否确认?",待用户确认后再执行。
4. 查询人员信息/wowtalk账号/人员位置
- **条件**:用户意图为查找某人、员工、同事或房间位置。

View File

@ -27,11 +27,12 @@ dependencies = [
"chardet>=5.0.0",
"psutil (>=7.1.3,<8.0.0)",
"uvloop (>=0.22.1,<0.23.0)",
"deepagents (>=0.3.0,<0.4.0)",
"deepagents (>=0.2.8,<0.4.0)",
"langchain-mcp-adapters (>=0.2.1,<0.3.0)",
"langchain-openai (>=1.1.1,<2.0.0)",
"cachetools (>=6.2.4,<7.0.0)",
"langgraph-checkpoint-sqlite (>=3.0.1,<4.0.0)",
"deepagents-cli (>=0.0.11,<0.0.12)",
]
[tool.poetry.requires-plugins]

View File

@ -84,6 +84,11 @@ from .multi_project_manager import (
generate_robot_readme
)
from .symlink_utils import (
setup_deepagents_symlink,
setup_project_directories
)
__all__ = [
# file_utils
'download_file',
@ -152,4 +157,8 @@ __all__ = [
'get_unique_folder_name',
'copy_dataset_folder',
'generate_robot_readme',
# symlink_utils
'setup_deepagents_symlink',
'setup_project_directories',
]

View File

@ -367,7 +367,8 @@ def format_messages_to_chat_history(messages: List[Dict[str, str]]) -> str:
def create_project_directory(dataset_ids: Optional[List[str]], bot_id: str, robot_type: str = "general_agent") -> Optional[str]:
"""创建项目目录的公共逻辑"""
# 只有当 robot_type == "catalog_agent" 且 dataset_ids 不为空时才创建目录
if robot_type != "catalog_agent" or not dataset_ids or len(dataset_ids) == 0:
if robot_type == "general_agent":
return None
try:

95
utils/symlink_utils.py Normal file
View File

@ -0,0 +1,95 @@
#!/usr/bin/env python3
"""
Utilities for managing symbolic links and directory setup.
"""
import os
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
def setup_deepagents_symlink():
"""
Create a symbolic link from projects/robot to ~/.deepagents
if it doesn't already exist.
"""
try:
# Get paths
project_root = Path(__file__).parent.parent
robot_dir = project_root / "projects" / "robot"
deepagents_dir = Path.home() / ".deepagents"
# Create robot directory if it doesn't exist
robot_dir.mkdir(parents=True, exist_ok=True)
# If ~/.deepagents already exists and is not a symlink, backup and remove it
if deepagents_dir.exists() and not deepagents_dir.is_symlink():
backup_dir = deepagents_dir.parent / f"{deepagents_dir.name}.backup"
logger.warning(f"~/.deepagents directory exists but is not a symlink.")
logger.warning(f"Creating backup at {backup_dir}")
try:
# Create backup
import shutil
if backup_dir.exists():
shutil.rmtree(backup_dir)
shutil.move(str(deepagents_dir), str(backup_dir))
logger.info(f"Successfully backed up existing directory to {backup_dir}")
except Exception as backup_error:
logger.error(f"Failed to backup existing directory: {backup_error}")
logger.error("Please manually remove or backup ~/.deepagents to proceed")
return False
# If ~/.deepagents is already a symlink pointing to the right place, do nothing
if deepagents_dir.is_symlink():
target = deepagents_dir.resolve()
if target == robot_dir.resolve():
logger.info(f"~/.deepagents already points to {robot_dir}")
return True
else:
# Remove existing symlink pointing elsewhere
deepagents_dir.unlink()
logger.info(f"Removed existing symlink pointing to {target}")
# Create the symbolic link
os.symlink(robot_dir, deepagents_dir, target_is_directory=True)
logger.info(f"Created symbolic link: {deepagents_dir} -> {robot_dir}")
return True
except Exception as e:
logger.error(f"Failed to create symbolic link: {e}")
return False
def setup_project_directories():
"""
Set up all necessary directories and symbolic links for the project.
"""
logger.info("Setting up project directories...")
# Setup ~/.deepagents symlink
symlink_success = setup_deepagents_symlink()
if symlink_success:
logger.info("Project directories setup completed successfully")
else:
logger.warning("Project directories setup completed with warnings")
return symlink_success
if __name__ == "__main__":
# Set up basic logging for standalone testing
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Test the function
success = setup_deepagents_symlink()
if success:
print("✅ Symbolic link setup successful")
else:
print("❌ Symbolic link setup failed")