diff --git a/agent/guideline_middleware.py b/agent/guideline_middleware.py
index 9e0eb9e..f142447 100644
--- a/agent/guideline_middleware.py
+++ b/agent/guideline_middleware.py
@@ -8,15 +8,10 @@ from typing import Any, Callable
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
import logging
+import re
logger = logging.getLogger('app')
-class ThinkingCallbackHandler(BaseCallbackHandler):
- """自定义回调处理器,用于将模型响应内容转换为thinking格式"""
- def on_llm_end(self, response: LLMResult, **kwargs) -> None:
- """在LLM调用结束后处理响应,将内容转换为thinking格式"""
- logger.info("Successfully converted response content to thinking format")
-
class GuidelineMiddleware(AgentMiddleware):
def __init__(self, bot_id: str, model:BaseChatModel, prompt: str, robot_type: str, language: str, user_identifier: str):
self.model = model
@@ -98,17 +93,17 @@ Action: Provide concise, friendly, and personified natural responses.
guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(state['messages']))
- # 创建回调处理器实例
- thinking_handler = ThinkingCallbackHandler()
-
# 使用回调处理器调用模型
response = self.model.invoke(
guideline_prompt,
- config={"callbacks": [thinking_handler]}
+ config={"callbacks": [BaseCallbackHandler()]}
)
- response.additional_kwargs["thinking"] = response.content
-
+ # 提取与之间的内容作为thinking
+
+ match = re.search(r'(.*?)', response.content, re.DOTALL)
+ response.additional_kwargs["thinking"] = match.group(1).strip() if match else response.content
+
messages = state['messages']+[response]
return {
"messages": messages
@@ -123,16 +118,12 @@ Action: Provide concise, friendly, and personified natural responses.
# 使用回调处理器调用模型
response = await self.model.ainvoke(
guideline_prompt,
- config={"callbacks": [ThinkingCallbackHandler()]}
+ config={"callbacks": [BaseCallbackHandler()]}
)
# 提取与之间的内容作为thinking
- import re
match = re.search(r'(.*?)', response.content, re.DOTALL)
- if match:
- response.additional_kwargs["thinking"] = match.group(1).strip()
- else:
- response.additional_kwargs["thinking"] = response.content
+ response.additional_kwargs["thinking"] = match.group(1).strip() if match else response.content
messages = state['messages']+[response]
return {