From a97ff5a1855fe4320910856f07a1f8fad533fdc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=B1=E6=BD=AE?= Date: Mon, 15 Dec 2025 11:57:31 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BC=98=E5=8C=96=E6=80=9D=E8=80=83?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .vscode/settings.json | 2 +- agent/guideline_middleware.py | 41 +++++++++++++++++++++++++---------- agent/prompt_loader.py | 3 +-- prompt/guideline_prompt.md | 5 ----- routes/chat.py | 3 --- utils/fastapi_utils.py | 2 +- 6 files changed, 32 insertions(+), 24 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index b80c1e2..6b96370 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,5 @@ { - "python.languageServer": "Pylance", + "python.languageServer": "None", "python.analysis.indexing": true, "python.analysis.autoSearchPaths": true, "python.analysis.diagnosticMode": "workspace", diff --git a/agent/guideline_middleware.py b/agent/guideline_middleware.py index d80ae11..36df44c 100644 --- a/agent/guideline_middleware.py +++ b/agent/guideline_middleware.py @@ -4,6 +4,8 @@ from agent.prompt_loader import load_guideline_prompt from utils.fastapi_utils import (extract_block_from_system_prompt, format_messages_to_chat_history, get_user_last_message_content) from langchain.chat_models import BaseChatModel from langgraph.runtime import Runtime + +from langchain_core.messages import SystemMessage from typing import Any, Callable from langchain_core.callbacks import BaseCallbackHandler from langchain_core.outputs import LLMResult @@ -48,13 +50,10 @@ Action: Provide concise, friendly, and personified natural responses. def get_guideline_prompt(self, messages: list[dict[str, Any]]) -> str: ## 处理terms terms_analysis = self.get_term_analysis(messages) - guideline_prompt = "" - if self.guidelines: chat_history = format_messages_to_chat_history(messages) - query_text = get_user_last_message_content(messages) - guideline_prompt = load_guideline_prompt(chat_history, query_text, self.guidelines, self.tool_description, self.scenarios, terms_analysis, self.language, self.user_identifier) + guideline_prompt = load_guideline_prompt(chat_history, self.guidelines, self.tool_description, self.scenarios, terms_analysis, self.language, self.user_identifier) return guideline_prompt @@ -90,20 +89,30 @@ Action: Provide concise, friendly, and personified natural responses. def before_agent(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None: if not self.guidelines: return None - + guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(state['messages'])) - + + # 准备完整的消息列表 + messages = state['messages'].copy() + + # 将guideline_prompt作为系统消息添加到消息列表 + system_message = SystemMessage(content=guideline_prompt) + + messages = [system_message,messages[-1]] + # 使用回调处理器调用模型 response = self.model.invoke( - guideline_prompt, + messages, config={"metadata": {"message_tag": "THINK"}} ) response.additional_kwargs["message_tag"] = "THINK" response.content = f"{response.content}" - messages = state['messages']+[response] + + # 将响应添加到原始消息列表 + final_messages = state['messages'] + [response] return { - "messages": messages + "messages": final_messages } async def abefore_agent(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None: @@ -112,17 +121,25 @@ Action: Provide concise, friendly, and personified natural responses. guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(state['messages'])) + # 准备完整的消息列表 + messages = state['messages'].copy() + + # 将guideline_prompt作为系统消息添加到消息列表 + system_message = SystemMessage(content=guideline_prompt) + messages = [system_message,messages[-1]] + # 使用回调处理器调用模型 response = await self.model.ainvoke( - guideline_prompt, + messages, config={"metadata": {"message_tag": "THINK"}} ) response.additional_kwargs["message_tag"] = "THINK" response.content = f"{response.content}" - messages = state['messages']+[response] + # 将响应添加到原始消息列表 + final_messages = state['messages'] + [response] return { - "messages": messages + "messages": final_messages } def wrap_model_call( diff --git a/agent/prompt_loader.py b/agent/prompt_loader.py index 5dd046a..68e4186 100644 --- a/agent/prompt_loader.py +++ b/agent/prompt_loader.py @@ -244,7 +244,7 @@ async def load_mcp_settings_async(project_dir: str, mcp_settings: list=None, bot return merged_settings -def load_guideline_prompt(chat_history: str, last_message: str, guidelines_text: str, tools: str, scenarios: str, terms: str, language: str, user_identifier: str = "") -> str: +def load_guideline_prompt(chat_history:str, guidelines_text: str, tools: str, scenarios: str, terms: str, language: str, user_identifier: str = "") -> str: """ 加载并处理guideline提示词 @@ -275,7 +275,6 @@ def load_guideline_prompt(chat_history: str, last_message: str, guidelines_text: # 替换模板中的占位符 system_prompt = guideline_template.format( chat_history=chat_history, - last_message=last_message, guidelines_text=guidelines_text, terms=terms, tools=tools, diff --git a/prompt/guideline_prompt.md b/prompt/guideline_prompt.md index 776885e..9d1d868 100644 --- a/prompt/guideline_prompt.md +++ b/prompt/guideline_prompt.md @@ -28,11 +28,6 @@ {chat_history} ``` -## 用户最新问题 (User's Last Message) -``` -{last_message} -``` - ## 工具列表 (Tools) ``` {tools} diff --git a/routes/chat.py b/routes/chat.py index dad7eae..1e25bca 100644 --- a/routes/chat.py +++ b/routes/chat.py @@ -13,7 +13,6 @@ from utils import ( ) from agent.sharded_agent_manager import init_global_sharded_agent_manager from utils.api_models import ChatRequestV2 -from agent.prompt_loader import load_guideline_prompt from utils.fastapi_utils import ( process_messages, format_messages_to_chat_history, create_project_directory, extract_api_key_from_auth, generate_v2_auth_token, fetch_bot_config, @@ -31,8 +30,6 @@ agent_manager = init_global_sharded_agent_manager( ) - - def append_user_last_message(messages: list, content: str) -> bool: """向最后一条用户消息追加内容 diff --git a/utils/fastapi_utils.py b/utils/fastapi_utils.py index c3427fd..cf78233 100644 --- a/utils/fastapi_utils.py +++ b/utils/fastapi_utils.py @@ -357,7 +357,7 @@ def format_messages_to_chat_history(messages: List[Dict[str, str]]) -> str: arguments = tool_call.get('function').get('arguments') chat_history.append(f"{function_name} call: {arguments}") - recent_chat_history = chat_history[-15:] if len(chat_history) > 15 else chat_history + recent_chat_history = chat_history[-16:-1] if len(chat_history) > 16 else chat_history[:-1] return "\n".join(recent_chat_history)