diff --git a/agent/agent_config.py b/agent/agent_config.py index c51202e..8351be8 100644 --- a/agent/agent_config.py +++ b/agent/agent_config.py @@ -147,7 +147,7 @@ class AgentConfig: @classmethod - async def from_v2_request(cls, request, bot_config: Dict, project_dir: Optional[str] = None, messages: Optional[List] = None, generate_cfg: Optional[Dict] = None): + async def from_v2_request(cls, request, bot_config: Dict, project_dir: Optional[str] = None, messages: Optional[List] = None, generate_cfg: Optional[Dict] = None, model_name: Optional[str] = None, model_server: Optional[str] = None, api_key: Optional[str] = None): """从v2请求创建配置""" # 延迟导入避免循环依赖 from .logging_handler import LoggingCallbackHandler @@ -175,17 +175,11 @@ class AgentConfig: enable_thinking = bot_config.get("enable_thinking", False) enable_memori = bot_config.get("enable_memory", False) - # generate_cfg 中的 model/model_server 为最高优先级(排除 "whatever" 和空值) - _gen_model = (generate_cfg or {}).get("model") or "" - _gen_model_server = (generate_cfg or {}).get("model_server") or "" - model_name = _gen_model if _gen_model and _gen_model != "whatever" else bot_config.get("model", "qwen/qwen3-next-80b-a3b-instruct") - model_server = _gen_model_server if _gen_model_server and _gen_model_server != "whatever" else bot_config.get("model_server", "") - config = cls( bot_id=request.bot_id, - api_key=bot_config.get("api_key"), - model_name=model_name, - model_server=model_server, + api_key=api_key or bot_config.get("api_key"), + model_name=model_name or bot_config.get("model", "qwen/qwen3-next-80b-a3b-instruct"), + model_server=model_server or bot_config.get("model_server", ""), language=language, system_prompt=system_prompt, mcp_settings=bot_config.get("mcp_settings", []), diff --git a/routes/chat.py b/routes/chat.py index 869b9eb..06dd624 100644 --- a/routes/chat.py +++ b/routes/chat.py @@ -628,11 +628,19 @@ async def chat_warmup_v2(request: ChatRequestV2, authorization: Optional[str] = messages = process_messages(empty_messages, request.language or "ja") # 收集额外参数作为 generate_cfg - exclude_fields = {'messages', 'stream', 'tool_response', 'bot_id', 'language', 'user_identifier', 'session_id', 'n'} + exclude_fields = {'messages', 'stream', 'tool_response', 'bot_id', 'language', 'user_identifier', 'session_id', 'n', 'model', 'model_server', 'api_key'} generate_cfg = {k: v for k, v in request.model_dump().items() if k not in exclude_fields} + # 从请求中提取 model/model_server/api_key,优先级高于 bot_config(排除 "whatever" 和空值) + req_data = request.model_dump() + req_model = req_data.get("model") or "" + req_model_server = req_data.get("model_server") or "" + req_api_key = req_data.get("api_key") or "" + model_name = req_model if req_model and req_model != "whatever" else None + model_server = req_model_server if req_model_server and req_model_server != "whatever" else None + api_key = req_api_key if req_api_key and req_api_key != "whatever" else None # 创建 AgentConfig 对象 - config = await AgentConfig.from_v2_request(request, bot_config, project_dir, messages, generate_cfg) + config = await AgentConfig.from_v2_request(request, bot_config, project_dir, messages, generate_cfg, model_name=model_name, model_server=model_server, api_key=api_key) # 预热 mcp_tools 缓存 logger.info(f"Warming up mcp_tools for bot_id: {bot_id}") @@ -727,10 +735,18 @@ async def chat_completions_v2(request: ChatRequestV2, authorization: Optional[st # 处理消息 messages = process_messages(request.messages, request.language) # 收集额外参数作为 generate_cfg - exclude_fields = {'messages', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings', 'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory', 'n'} + exclude_fields = {'messages', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings', 'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory', 'n', 'model', 'model_server', 'api_key'} generate_cfg = {k: v for k, v in request.model_dump().items() if k not in exclude_fields} + # 从请求中提取 model/model_server/api_key,优先级高于 bot_config(排除 "whatever" 和空值) + req_data = request.model_dump() + req_model = req_data.get("model") or "" + req_model_server = req_data.get("model_server") or "" + req_api_key = req_data.get("api_key") or "" + model_name = req_model if req_model and req_model != "whatever" else None + model_server = req_model_server if req_model_server and req_model_server != "whatever" else None + api_key = req_api_key if req_api_key and req_api_key != "whatever" else None # 创建 AgentConfig 对象 - config = await AgentConfig.from_v2_request(request, bot_config, project_dir, messages, generate_cfg) + config = await AgentConfig.from_v2_request(request, bot_config, project_dir, messages, generate_cfg, model_name=model_name, model_server=model_server, api_key=api_key) # 调用公共的agent创建和响应生成逻辑 return await create_agent_and_generate_response(config)