diff --git a/agent/deep_assistant.py b/agent/deep_assistant.py index cc8e27a..99158b5 100644 --- a/agent/deep_assistant.py +++ b/agent/deep_assistant.py @@ -45,10 +45,11 @@ from langchain_core.language_models import BaseChatModel from langgraph.pregel import Pregel # 新版本导入:MemoryMiddleware 和 SkillsMiddleware 已迁移到 deepagents.middleware from deepagents.middleware import MemoryMiddleware, SkillsMiddleware -from langchain.agents.middleware import AgentMiddleware +from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse +from langchain.agents.middleware import HumanInTheLoopMiddleware, InterruptOnConfig, TodoListMiddleware +from langchain_core.messages import AIMessage, HumanMessage from langgraph.types import Checkpointer from deepagents_cli.config import settings, get_default_coding_instructions -from langchain.agents.middleware import HumanInTheLoopMiddleware, InterruptOnConfig, TodoListMiddleware from deepagents.middleware.filesystem import FilesystemMiddleware from deepagents.middleware.patch_tool_calls import PatchToolCallsMiddleware from langchain_anthropic.middleware import AnthropicPromptCachingMiddleware @@ -63,6 +64,51 @@ from .custom_filesystem_middleware import CustomFilesystemMiddleware # _global_checkpointer = MemorySaver() logger = logging.getLogger('app') + + +class EmptyResponseRetryMiddleware(AgentMiddleware): + """当模型返回空内容(无文字、无工具调用)时自动重试""" + + MAX_RETRIES = 5 + + def _is_empty_response(self, result: ModelResponse) -> bool: + """判断是否为空响应""" + if not result.result: + return True + msg = result.result[0] + if not isinstance(msg, AIMessage): + return False + content = msg.content or "" + has_text = bool(content.strip()) if isinstance(content, str) else bool(content) + return not has_text and len(msg.tool_calls) == 0 + + def wrap_model_call(self, request, handler): + result = handler(request) + retries = 0 + while self._is_empty_response(result) and retries < self.MAX_RETRIES: + retries += 1 + logger.warning(f"Empty response detected, retrying ({retries}/{self.MAX_RETRIES})") + retry_messages = list(request.messages) + [ + HumanMessage(content="Please continue your response.") + ] + request = request.override(messages=retry_messages) + result = handler(request) + return result + + async def awrap_model_call(self, request, handler): + result = await handler(request) + retries = 0 + while self._is_empty_response(result) and retries < self.MAX_RETRIES: + retries += 1 + logger.warning(f"Empty response detected, retrying ({retries}/{self.MAX_RETRIES})") + retry_messages = list(request.messages) + [ + HumanMessage(content="Please continue your response.") + ] + request = request.override(messages=retry_messages) + result = await handler(request) + return result + + # Utility functions def read_system_prompt(): """读取通用的无状态系统prompt""" @@ -220,6 +266,8 @@ async def init_agent(config: AgentConfig): # 构建中间件列表 middleware = [] + # 添加空响应重试中间件(最先执行,最外层包裹) + middleware.append(EmptyResponseRetryMiddleware()) # 首先添加 ToolUseCleanupMiddleware 来清理孤立的 tool_use middleware.append(ToolUseCleanupMiddleware()) # 添加工具输出长度控制中间件 @@ -407,22 +455,22 @@ def create_custom_cli_agent( # Use LocalShellBackend for filesystem + shell execution backend = LocalShellBackend( root_dir=workspace_root, - virtual_mode=True, + virtual_mode=False, inherit_env=True, env=final_shell_env, ) else: # No shell access - use plain FilesystemBackend - backend = FilesystemBackend(root_dir=workspace_root, virtual_mode=True) + backend = FilesystemBackend(root_dir=workspace_root, virtual_mode=False) # Set up composite backend with routing (参考新版本实现) large_results_backend = FilesystemBackend( root_dir=tempfile.mkdtemp(prefix="deepagents_large_results_"), - virtual_mode=True, + virtual_mode=False, ) conversation_history_backend = FilesystemBackend( root_dir=tempfile.mkdtemp(prefix="deepagents_conversation_history_"), - virtual_mode=True, + virtual_mode=False, ) composite_backend = CompositeBackend( default=backend, @@ -438,7 +486,7 @@ def create_custom_cli_agent( agent_middleware.append( CustomSkillsMiddleware( - backend=FilesystemBackend(root_dir=workspace_root, virtual_mode=True), + backend=FilesystemBackend(root_dir=workspace_root, virtual_mode=False), sources=skills_sources, ) ) diff --git a/agent/logging_handler.py b/agent/logging_handler.py index de0d3db..26aa6b1 100644 --- a/agent/logging_handler.py +++ b/agent/logging_handler.py @@ -41,16 +41,20 @@ class LoggingCallbackHandler(BaseCallbackHandler): if hasattr(response, 'generations') and response.generations: for gen_idx, generation_list in enumerate(response.generations): for msg_idx, generation in enumerate(generation_list): - if hasattr(generation, 'text'): - output_list = generation.text.split("\n") - for i, output in enumerate(output_list): - if output.strip(): - self.logger.info(f"{output}") - elif hasattr(generation, 'message'): - output_list = generation.message.split("\n") - for i, output in enumerate(output_list): - if output.strip(): - self.logger.info(f"{output}") + # ChatGeneration: 使用 text 属性获取内容 + if hasattr(generation, 'text') and generation.text: + for line in generation.text.split("\n"): + if line.strip(): + self.logger.info(f" {line}") + # 如果有 message 属性,输出额外信息(tool_calls 等) + if hasattr(generation, 'message') and generation.message: + msg = generation.message + content = msg.content if hasattr(msg, 'content') else '' + if not content or (isinstance(content, str) and not content.strip()): + self.logger.info(f" [EMPTY content]") + tool_calls = msg.tool_calls if hasattr(msg, 'tool_calls') else [] + if tool_calls: + self.logger.info(f" [tool_calls: {[tc.get('name', '') for tc in tool_calls]}") def on_llm_error( self, error: Exception, **kwargs: Any diff --git a/plans/deepagents-upgrade.md b/plans/deepagents-upgrade.md index af56e9f..fc9a060 100644 --- a/plans/deepagents-upgrade.md +++ b/plans/deepagents-upgrade.md @@ -60,7 +60,7 @@ from deepagents.backends import LocalShellBackend # 创建 backend,支持自定义环境变量 backend = LocalShellBackend( root_dir=workspace_root, - virtual_mode=True, + virtual_mode=False, env={"ASSISTANT_ID": "xxx", "USER_IDENTIFIER": "yyy"}, # 自定义环境变量 inherit_env=True, # 继承父进程环境变量 ) @@ -92,7 +92,7 @@ backend = LocalShellBackend( ```python # 当前实现 composite_backend = CompositeBackend( - default=FilesystemBackend(root_dir=workspace_root, virtual_mode=True), + default=FilesystemBackend(root_dir=workspace_root, virtual_mode=False), routes={}, ) ``` @@ -120,14 +120,14 @@ from deepagents.backends import LocalShellBackend # 创建带自定义环境变量的 backend shell_backend = LocalShellBackend( root_dir=workspace_root, - virtual_mode=True, + virtual_mode=False, env=shell_env, inherit_env=True, ) # 或使用 CompositeBackend 路由 composite_backend = CompositeBackend( - default=FilesystemBackend(root_dir=workspace_root, virtual_mode=True), + default=FilesystemBackend(root_dir=workspace_root, virtual_mode=False), routes={ "/shell/": shell_backend, # shell 命令路由 }, @@ -209,7 +209,7 @@ if enable_shell: final_shell_env = shell_env or {} shell_backend = LocalShellBackend( root_dir=workspace_root, - virtual_mode=True, + virtual_mode=False, env=final_shell_env, inherit_env=True, # 继承 os.environ ) diff --git a/routes/chat.py b/routes/chat.py index 40cf8ee..b7169b4 100644 --- a/routes/chat.py +++ b/routes/chat.py @@ -512,7 +512,7 @@ async def chat_completions(request: ChatRequest, authorization: Optional[str] = project_dir = create_project_directory(request.dataset_ids, bot_id, request.skills) # 收集额外参数作为 generate_cfg - exclude_fields = {'messages', 'model', 'model_server', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings' ,'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory', 'n', 'shell_env'} + exclude_fields = {'messages', 'model', 'model_server', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings' ,'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory', 'n', 'shell_env', 'max_tokens'} generate_cfg = {k: v for k, v in request.model_dump().items() if k not in exclude_fields} # 处理消息 messages = process_messages(request.messages, request.language) @@ -666,7 +666,7 @@ async def chat_warmup_v2(request: ChatRequestV2, authorization: Optional[str] = messages = process_messages(empty_messages, request.language or "ja") # 收集额外参数作为 generate_cfg - exclude_fields = {'messages', 'stream', 'tool_response', 'bot_id', 'language', 'user_identifier', 'session_id', 'n', 'model', 'model_server', 'api_key', 'shell_env'} + exclude_fields = {'messages', 'stream', 'tool_response', 'bot_id', 'language', 'user_identifier', 'session_id', 'n', 'model', 'model_server', 'api_key', 'shell_env', 'max_tokens'} generate_cfg = {k: v for k, v in request.model_dump().items() if k not in exclude_fields} # 从请求中提取 model/model_server/api_key,优先级高于 bot_config(排除 "whatever" 和空值) req_data = request.model_dump() @@ -773,7 +773,7 @@ async def chat_completions_v2(request: ChatRequestV2, authorization: Optional[st # 处理消息 messages = process_messages(request.messages, request.language) # 收集额外参数作为 generate_cfg - exclude_fields = {'messages', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings', 'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory', 'n', 'model', 'model_server', 'api_key', 'shell_env'} + exclude_fields = {'messages', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings', 'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory', 'n', 'model', 'model_server', 'api_key', 'shell_env', 'max_tokens'} generate_cfg = {k: v for k, v in request.model_dump().items() if k not in exclude_fields} # 从请求中提取 model/model_server/api_key,优先级高于 bot_config(排除 "whatever" 和空值) req_data = request.model_dump()