From 7cc05c2c24ea27d34590d55c04381447bc202cec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=B1=E6=BD=AE?= Date: Tue, 25 Nov 2025 20:13:29 +0800 Subject: [PATCH] add guideline --- fastapi_app.py | 155 +++++++++++++++++++++++++++++++------ prompt/guideline_prompt.md | 16 ++-- 2 files changed, 136 insertions(+), 35 deletions(-) diff --git a/fastapi_app.py b/fastapi_app.py index b4c736f..2557065 100644 --- a/fastapi_app.py +++ b/fastapi_app.py @@ -1154,6 +1154,38 @@ async def call_guideline_llm(chat_history: str, guidelines_text: str, model_name return "" +def _get_optimal_batch_size(guidelines_count: int) -> int: + """根据guidelines数量决定最优批次数量(并发数)""" + if guidelines_count <= 10: + return 1 + elif guidelines_count <= 20: + return 2 + elif guidelines_count <= 30: + return 3 + else: + return 5 + + +async def process_guideline_batch( + guidelines_batch: List[str], + chat_history: str, + model_name: str, + api_key: str, + model_server: str +) -> str: + """处理单个guideline批次""" + try: + # 调用LLM分析这批guidelines + batch_guidelines_text = "\n".join(guidelines_batch) + batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, model_name, api_key, model_server) + + return batch_analysis + except Exception as e: + print(f"Error processing guideline batch: {e}") + return "" + + + async def create_agent_and_generate_response( bot_id: str, api_key: str, @@ -1176,35 +1208,110 @@ async def create_agent_and_generate_response( # 1. 从system_prompt提取guideline内容 guidelines_text = extract_guidelines_from_system_prompt(system_prompt) + print(f"guidelines_text: {guidelines_text}") - # 2. 如果有guideline内容,调用LLM进行分析 + # 2. 如果有guideline内容,进行并发处理 guideline_analysis = "" if guidelines_text: - chat_history = format_messages_to_chat_history(messages) - guideline_analysis = await call_guideline_llm(chat_history, guidelines_text, model_name, api_key, model_server) - print(f"Guideline analysis result: {guideline_analysis}") + # 按换行符分割guidelines + guidelines_list = [g.strip() for g in guidelines_text.split('\n') if g.strip()] + guidelines_count = len(guidelines_list) - # 将分析结果添加到最后一个消息的内容中 - if guideline_analysis and messages: - last_message = messages[-1] - if last_message.get('role') == 'user': - messages[-1]['content'] += f"\n\nActive Guidelines:\n{guideline_analysis}\nPlease follow these guidelines in your response." - print(messages[-1]['content']) + if guidelines_count > 0: + # 获取最优批次数量(并发数) + batch_count = _get_optimal_batch_size(guidelines_count) - # 从全局管理器获取或创建助手实例 - agent = await agent_manager.get_or_create_agent( - bot_id=bot_id, - project_dir=project_dir, - model_name=model_name, - api_key=api_key, - model_server=model_server, - generate_cfg=generate_cfg, - language=language, - system_prompt=system_prompt, - mcp_settings=mcp_settings, - robot_type=robot_type, - user_identifier=user_identifier - ) + # 计算每个批次应该包含多少条guideline + guidelines_per_batch = max(1, guidelines_count // batch_count) + + # 分批处理guidelines + batches = [] + for i in range(0, guidelines_count, guidelines_per_batch): + batch = guidelines_list[i:i + guidelines_per_batch] + batches.append(batch) + + # 确保批次数量不超过要求的并发数 + while len(batches) > batch_count: + # 将最后一个批次合并到倒数第二个批次 + batches[-2].extend(batches[-1]) + batches.pop() + + print(f"Processing {guidelines_count} guidelines in {len(batches)} batches with {batch_count} concurrent batches") + + # 准备chat_history + chat_history = format_messages_to_chat_history(messages) + + # 并发执行所有任务:guideline批次处理 + agent创建 + import asyncio + tasks = [] + + # 添加所有guideline批次任务 + for batch in batches: + task = process_guideline_batch( + guidelines_batch=batch, + chat_history=chat_history, + model_name=model_name, + api_key=api_key, + model_server=model_server + ) + tasks.append(task) + + # 添加agent创建任务 + agent_task = agent_manager.get_or_create_agent( + bot_id=bot_id, + project_dir=project_dir, + model_name=model_name, + api_key=api_key, + model_server=model_server, + generate_cfg=generate_cfg, + language=language, + system_prompt=system_prompt, + mcp_settings=mcp_settings, + robot_type=robot_type, + user_identifier=user_identifier + ) + tasks.append(agent_task) + + # 等待所有任务完成 + all_results = await asyncio.gather(*tasks, return_exceptions=True) + + # 处理结果:最后一个结果是agent,前面的是guideline批次结果 + agent = all_results[-1] # agent创建的结果 + batch_results = all_results[:-1] # guideline批次的结果 + + # 合并guideline分析结果 + valid_results = [] + for i, result in enumerate(batch_results): + if isinstance(result, Exception): + print(f"Guideline batch {i} failed: {result}") + continue + if result and result.strip(): + valid_results.append(result.strip()) + + if valid_results: + guideline_analysis = "\n\n".join(valid_results) + print(f"Merged guideline analysis result: {guideline_analysis}") + + # 将分析结果添加到最后一个消息的内容中 + if guideline_analysis and messages: + last_message = messages[-1] + if last_message.get('role') == 'user': + messages[-1]['content'] += f"\n\nActive Guidelines:\n{guideline_analysis}\nPlease follow these guidelines in your response." + else: + # 3. 从全局管理器获取或创建助手实例 + agent = await agent_manager.get_or_create_agent( + bot_id=bot_id, + project_dir=project_dir, + model_name=model_name, + api_key=api_key, + model_server=model_server, + generate_cfg=generate_cfg, + language=language, + system_prompt=system_prompt, + mcp_settings=mcp_settings, + robot_type=robot_type, + user_identifier=user_identifier + ) # 根据stream参数决定返回流式还是非流式响应 if stream: diff --git a/prompt/guideline_prompt.md b/prompt/guideline_prompt.md index d162587..e3e6cb4 100644 --- a/prompt/guideline_prompt.md +++ b/prompt/guideline_prompt.md @@ -13,9 +13,7 @@ to the most recent state of an interaction between yourself (an AI agent) and a Examples of Guideline Match Evaluations: ``` - 示例 1: 旅游咨询 - - Example #1: ### + Example #1: travel consultation - **Chat History**: user: Hi, I'm planning a trip to Italy next month. What can I do there? ai_agent: That sounds exciting! I can help you with that. Do you prefer exploring cities or enjoying scenic landscapes? @@ -52,9 +50,8 @@ Examples of Guideline Match Evaluations: ] } ``` - 示例 2: 课程咨询 - Example #2: ### + Example #2: Course Consultation - **Chat History**: user:Hi, I'm interested in your Python programming course, but I'm not sure if I'm ready for it. ai_agent:Happy to help! Could you share a bit about your background or experience with programming so far? @@ -93,9 +90,7 @@ Examples of Guideline Match Evaluations: } ``` - 示例 3: 登录问题 - - Example #3: ### + Example #3: Login issue - **Chat History**: user:I'm having trouble logging into my account. ai_agent:I'm sorry to hear that. Can you tell me what happens when you try to log in? @@ -119,9 +114,7 @@ Examples of Guideline Match Evaluations: ] } - 示例 4: 退货政策 - - Example #4: ### + Example #4: Return Policy - **Chat History**: user: Hi, I'm thinking about ordering this coat, but I need to know — what's your return policy? ai_agent: You can return items within 30 days either in-store or using our prepaid return label. @@ -152,6 +145,7 @@ Guidelines List: {guidelines_text} OUTPUT FORMAT: +The content in JSON format needs to be wrapped in "```json" and "```". Please specify the applicability of each guideline: ```json {{