diff --git a/agent/guideline_middleware.py b/agent/guideline_middleware.py
index f142447..d80ae11 100644
--- a/agent/guideline_middleware.py
+++ b/agent/guideline_middleware.py
@@ -95,15 +95,12 @@ Action: Provide concise, friendly, and personified natural responses.
# 使用回调处理器调用模型
response = self.model.invoke(
- guideline_prompt,
- config={"callbacks": [BaseCallbackHandler()]}
+ guideline_prompt,
+ config={"metadata": {"message_tag": "THINK"}}
)
- # 提取与之间的内容作为thinking
-
- match = re.search(r'(.*?)', response.content, re.DOTALL)
- response.additional_kwargs["thinking"] = match.group(1).strip() if match else response.content
-
+ response.additional_kwargs["message_tag"] = "THINK"
+ response.content = f"{response.content}"
messages = state['messages']+[response]
return {
"messages": messages
@@ -118,12 +115,10 @@ Action: Provide concise, friendly, and personified natural responses.
# 使用回调处理器调用模型
response = await self.model.ainvoke(
guideline_prompt,
- config={"callbacks": [BaseCallbackHandler()]}
+ config={"metadata": {"message_tag": "THINK"}}
)
-
- # 提取与之间的内容作为thinking
- match = re.search(r'(.*?)', response.content, re.DOTALL)
- response.additional_kwargs["thinking"] = match.group(1).strip() if match else response.content
+ response.additional_kwargs["message_tag"] = "THINK"
+ response.content = f"{response.content}"
messages = state['messages']+[response]
return {
diff --git a/prompt/guideline_prompt.md b/prompt/guideline_prompt.md
index a8aaa91..776885e 100644
--- a/prompt/guideline_prompt.md
+++ b/prompt/guideline_prompt.md
@@ -56,9 +56,8 @@
**语言要求**:所有用户交互和结果输出,必须使用[{language}]
## 输出格式
-按顺序输出执行步骤, 用和标签包裹,输出格式如下:
+按顺序输出执行步骤, 输出格式如下:
-
### 🎯 核心目标
[简洁描述用户的主要目标]
@@ -70,5 +69,3 @@
### ⚡ 计划阶段
**执行步骤**:
[按1,2,3...n顺序列出执行步骤]
-
-
diff --git a/routes/chat.py b/routes/chat.py
index 5fb843a..9aaca48 100644
--- a/routes/chat.py
+++ b/routes/chat.py
@@ -148,6 +148,7 @@ async def enhanced_generate_stream_response(
async for msg, metadata in agent.astream({"messages": messages}, stream_mode="messages"):
new_content = ""
+ print(metadata)
if isinstance(msg, AIMessageChunk):
# 判断是否有工具调用
if msg.tool_call_chunks: # 检查工具调用块
@@ -160,23 +161,20 @@ async def enhanced_generate_stream_response(
elif len(msg.content) > 0:
preamble_completed.set()
await output_queue.put(("preamble_done", None))
- if message_tag != "ANSWER":
- message_tag = "ANSWER"
- new_content = f"[{message_tag}]\n{msg.text}"
- elif message_tag == "ANSWER":
- new_content = msg.text
+ meta_message_tag = metadata.get("message_tag", "ANSWER")
+ if meta_message_tag != message_tag:
+ message_tag = meta_message_tag
+ new_content = f"[{meta_message_tag}]\n"
+ new_content += msg.text
elif message_tag == "TOOL_CALL" and \
(
("finish_reason" in msg.response_metadata and msg.response_metadata["finish_reason"] == "tool_calls") or \
("stop_reason" in msg.response_metadata and msg.response_metadata["stop_reason"] == "tool_use")
):
new_content = f"[{message_tag}] {function_name}\n{tool_args}"
- message_tag = "TOOL_CALL"
elif isinstance(msg, ToolMessage) and len(msg.content) > 0:
message_tag = "TOOL_RESPONSE"
new_content = f"[{message_tag}] {msg.name}\n{msg.text}"
- elif isinstance(msg, AIMessage) and msg.additional_kwargs and "thinking" in msg.additional_kwargs:
- new_content = "[THINK]\n" + msg.additional_kwargs["thinking"] + "\n"
# 只有当有新内容时才发送chunk
if new_content:
@@ -326,10 +324,10 @@ async def create_agent_and_generate_response(
response_text = ""
for msg in append_messages:
if isinstance(msg,AIMessage):
- if msg.additional_kwargs and "thinking" in msg.additional_kwargs:
- response_text += "[THINK]\n"+msg.additional_kwargs["thinking"]+ "\n"
- elif len(msg.text)>0:
- response_text += "[ANSWER]\n"+msg.text+ "\n"
+ if len(msg.text)>0:
+ meta_message_tag = msg.additional_kwargs.get("message_tag", "ANSWER")
+ output_text = msg.text.replace("","").replace("","") if meta_message_tag == "THINK" else msg.text
+ response_text += f"[{meta_message_tag}]\n"+output_text+ "\n"
if len(msg.tool_calls)>0:
response_text += "".join([f"[TOOL_CALL] {tool['name']}\n{json.dumps(tool["args"]) if isinstance(tool["args"],dict) else tool["args"]}\n" for tool in msg.tool_calls])
elif isinstance(msg,ToolMessage) and tool_response: