优化思考
This commit is contained in:
parent
c32ecdfeb6
commit
a97ff5a185
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"python.languageServer": "Pylance",
|
"python.languageServer": "None",
|
||||||
"python.analysis.indexing": true,
|
"python.analysis.indexing": true,
|
||||||
"python.analysis.autoSearchPaths": true,
|
"python.analysis.autoSearchPaths": true,
|
||||||
"python.analysis.diagnosticMode": "workspace",
|
"python.analysis.diagnosticMode": "workspace",
|
||||||
|
|||||||
@ -4,6 +4,8 @@ from agent.prompt_loader import load_guideline_prompt
|
|||||||
from utils.fastapi_utils import (extract_block_from_system_prompt, format_messages_to_chat_history, get_user_last_message_content)
|
from utils.fastapi_utils import (extract_block_from_system_prompt, format_messages_to_chat_history, get_user_last_message_content)
|
||||||
from langchain.chat_models import BaseChatModel
|
from langchain.chat_models import BaseChatModel
|
||||||
from langgraph.runtime import Runtime
|
from langgraph.runtime import Runtime
|
||||||
|
|
||||||
|
from langchain_core.messages import SystemMessage
|
||||||
from typing import Any, Callable
|
from typing import Any, Callable
|
||||||
from langchain_core.callbacks import BaseCallbackHandler
|
from langchain_core.callbacks import BaseCallbackHandler
|
||||||
from langchain_core.outputs import LLMResult
|
from langchain_core.outputs import LLMResult
|
||||||
@ -48,13 +50,10 @@ Action: Provide concise, friendly, and personified natural responses.
|
|||||||
def get_guideline_prompt(self, messages: list[dict[str, Any]]) -> str:
|
def get_guideline_prompt(self, messages: list[dict[str, Any]]) -> str:
|
||||||
## 处理terms
|
## 处理terms
|
||||||
terms_analysis = self.get_term_analysis(messages)
|
terms_analysis = self.get_term_analysis(messages)
|
||||||
|
|
||||||
guideline_prompt = ""
|
guideline_prompt = ""
|
||||||
|
|
||||||
if self.guidelines:
|
if self.guidelines:
|
||||||
chat_history = format_messages_to_chat_history(messages)
|
chat_history = format_messages_to_chat_history(messages)
|
||||||
query_text = get_user_last_message_content(messages)
|
guideline_prompt = load_guideline_prompt(chat_history, self.guidelines, self.tool_description, self.scenarios, terms_analysis, self.language, self.user_identifier)
|
||||||
guideline_prompt = load_guideline_prompt(chat_history, query_text, self.guidelines, self.tool_description, self.scenarios, terms_analysis, self.language, self.user_identifier)
|
|
||||||
|
|
||||||
return guideline_prompt
|
return guideline_prompt
|
||||||
|
|
||||||
@ -93,17 +92,27 @@ Action: Provide concise, friendly, and personified natural responses.
|
|||||||
|
|
||||||
guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(state['messages']))
|
guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(state['messages']))
|
||||||
|
|
||||||
|
# 准备完整的消息列表
|
||||||
|
messages = state['messages'].copy()
|
||||||
|
|
||||||
|
# 将guideline_prompt作为系统消息添加到消息列表
|
||||||
|
system_message = SystemMessage(content=guideline_prompt)
|
||||||
|
|
||||||
|
messages = [system_message,messages[-1]]
|
||||||
|
|
||||||
# 使用回调处理器调用模型
|
# 使用回调处理器调用模型
|
||||||
response = self.model.invoke(
|
response = self.model.invoke(
|
||||||
guideline_prompt,
|
messages,
|
||||||
config={"metadata": {"message_tag": "THINK"}}
|
config={"metadata": {"message_tag": "THINK"}}
|
||||||
)
|
)
|
||||||
|
|
||||||
response.additional_kwargs["message_tag"] = "THINK"
|
response.additional_kwargs["message_tag"] = "THINK"
|
||||||
response.content = f"<think>{response.content}</think>"
|
response.content = f"<think>{response.content}</think>"
|
||||||
messages = state['messages']+[response]
|
|
||||||
|
# 将响应添加到原始消息列表
|
||||||
|
final_messages = state['messages'] + [response]
|
||||||
return {
|
return {
|
||||||
"messages": messages
|
"messages": final_messages
|
||||||
}
|
}
|
||||||
|
|
||||||
async def abefore_agent(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
|
async def abefore_agent(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
|
||||||
@ -112,17 +121,25 @@ Action: Provide concise, friendly, and personified natural responses.
|
|||||||
|
|
||||||
guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(state['messages']))
|
guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(state['messages']))
|
||||||
|
|
||||||
|
# 准备完整的消息列表
|
||||||
|
messages = state['messages'].copy()
|
||||||
|
|
||||||
|
# 将guideline_prompt作为系统消息添加到消息列表
|
||||||
|
system_message = SystemMessage(content=guideline_prompt)
|
||||||
|
messages = [system_message,messages[-1]]
|
||||||
|
|
||||||
# 使用回调处理器调用模型
|
# 使用回调处理器调用模型
|
||||||
response = await self.model.ainvoke(
|
response = await self.model.ainvoke(
|
||||||
guideline_prompt,
|
messages,
|
||||||
config={"metadata": {"message_tag": "THINK"}}
|
config={"metadata": {"message_tag": "THINK"}}
|
||||||
)
|
)
|
||||||
response.additional_kwargs["message_tag"] = "THINK"
|
response.additional_kwargs["message_tag"] = "THINK"
|
||||||
response.content = f"<think>{response.content}</think>"
|
response.content = f"<think>{response.content}</think>"
|
||||||
|
|
||||||
messages = state['messages']+[response]
|
# 将响应添加到原始消息列表
|
||||||
|
final_messages = state['messages'] + [response]
|
||||||
return {
|
return {
|
||||||
"messages": messages
|
"messages": final_messages
|
||||||
}
|
}
|
||||||
|
|
||||||
def wrap_model_call(
|
def wrap_model_call(
|
||||||
|
|||||||
@ -244,7 +244,7 @@ async def load_mcp_settings_async(project_dir: str, mcp_settings: list=None, bot
|
|||||||
return merged_settings
|
return merged_settings
|
||||||
|
|
||||||
|
|
||||||
def load_guideline_prompt(chat_history: str, last_message: str, guidelines_text: str, tools: str, scenarios: str, terms: str, language: str, user_identifier: str = "") -> str:
|
def load_guideline_prompt(chat_history:str, guidelines_text: str, tools: str, scenarios: str, terms: str, language: str, user_identifier: str = "") -> str:
|
||||||
"""
|
"""
|
||||||
加载并处理guideline提示词
|
加载并处理guideline提示词
|
||||||
|
|
||||||
@ -275,7 +275,6 @@ def load_guideline_prompt(chat_history: str, last_message: str, guidelines_text:
|
|||||||
# 替换模板中的占位符
|
# 替换模板中的占位符
|
||||||
system_prompt = guideline_template.format(
|
system_prompt = guideline_template.format(
|
||||||
chat_history=chat_history,
|
chat_history=chat_history,
|
||||||
last_message=last_message,
|
|
||||||
guidelines_text=guidelines_text,
|
guidelines_text=guidelines_text,
|
||||||
terms=terms,
|
terms=terms,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
|
|||||||
@ -28,11 +28,6 @@
|
|||||||
{chat_history}
|
{chat_history}
|
||||||
```
|
```
|
||||||
|
|
||||||
## 用户最新问题 (User's Last Message)
|
|
||||||
```
|
|
||||||
{last_message}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 工具列表 (Tools)
|
## 工具列表 (Tools)
|
||||||
```
|
```
|
||||||
{tools}
|
{tools}
|
||||||
|
|||||||
@ -13,7 +13,6 @@ from utils import (
|
|||||||
)
|
)
|
||||||
from agent.sharded_agent_manager import init_global_sharded_agent_manager
|
from agent.sharded_agent_manager import init_global_sharded_agent_manager
|
||||||
from utils.api_models import ChatRequestV2
|
from utils.api_models import ChatRequestV2
|
||||||
from agent.prompt_loader import load_guideline_prompt
|
|
||||||
from utils.fastapi_utils import (
|
from utils.fastapi_utils import (
|
||||||
process_messages, format_messages_to_chat_history,
|
process_messages, format_messages_to_chat_history,
|
||||||
create_project_directory, extract_api_key_from_auth, generate_v2_auth_token, fetch_bot_config,
|
create_project_directory, extract_api_key_from_auth, generate_v2_auth_token, fetch_bot_config,
|
||||||
@ -31,8 +30,6 @@ agent_manager = init_global_sharded_agent_manager(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def append_user_last_message(messages: list, content: str) -> bool:
|
def append_user_last_message(messages: list, content: str) -> bool:
|
||||||
"""向最后一条用户消息追加内容
|
"""向最后一条用户消息追加内容
|
||||||
|
|
||||||
|
|||||||
@ -357,7 +357,7 @@ def format_messages_to_chat_history(messages: List[Dict[str, str]]) -> str:
|
|||||||
arguments = tool_call.get('function').get('arguments')
|
arguments = tool_call.get('function').get('arguments')
|
||||||
chat_history.append(f"{function_name} call: {arguments}")
|
chat_history.append(f"{function_name} call: {arguments}")
|
||||||
|
|
||||||
recent_chat_history = chat_history[-15:] if len(chat_history) > 15 else chat_history
|
recent_chat_history = chat_history[-16:-1] if len(chat_history) > 16 else chat_history[:-1]
|
||||||
return "\n".join(recent_chat_history)
|
return "\n".join(recent_chat_history)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user