add guideline
This commit is contained in:
parent
277fcca3d1
commit
7cc05c2c24
155
fastapi_app.py
155
fastapi_app.py
@ -1154,6 +1154,38 @@ async def call_guideline_llm(chat_history: str, guidelines_text: str, model_name
|
||||
return ""
|
||||
|
||||
|
||||
def _get_optimal_batch_size(guidelines_count: int) -> int:
|
||||
"""根据guidelines数量决定最优批次数量(并发数)"""
|
||||
if guidelines_count <= 10:
|
||||
return 1
|
||||
elif guidelines_count <= 20:
|
||||
return 2
|
||||
elif guidelines_count <= 30:
|
||||
return 3
|
||||
else:
|
||||
return 5
|
||||
|
||||
|
||||
async def process_guideline_batch(
|
||||
guidelines_batch: List[str],
|
||||
chat_history: str,
|
||||
model_name: str,
|
||||
api_key: str,
|
||||
model_server: str
|
||||
) -> str:
|
||||
"""处理单个guideline批次"""
|
||||
try:
|
||||
# 调用LLM分析这批guidelines
|
||||
batch_guidelines_text = "\n".join(guidelines_batch)
|
||||
batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, model_name, api_key, model_server)
|
||||
|
||||
return batch_analysis
|
||||
except Exception as e:
|
||||
print(f"Error processing guideline batch: {e}")
|
||||
return ""
|
||||
|
||||
|
||||
|
||||
async def create_agent_and_generate_response(
|
||||
bot_id: str,
|
||||
api_key: str,
|
||||
@ -1176,35 +1208,110 @@ async def create_agent_and_generate_response(
|
||||
|
||||
# 1. 从system_prompt提取guideline内容
|
||||
guidelines_text = extract_guidelines_from_system_prompt(system_prompt)
|
||||
print(f"guidelines_text: {guidelines_text}")
|
||||
|
||||
# 2. 如果有guideline内容,调用LLM进行分析
|
||||
# 2. 如果有guideline内容,进行并发处理
|
||||
guideline_analysis = ""
|
||||
if guidelines_text:
|
||||
chat_history = format_messages_to_chat_history(messages)
|
||||
guideline_analysis = await call_guideline_llm(chat_history, guidelines_text, model_name, api_key, model_server)
|
||||
print(f"Guideline analysis result: {guideline_analysis}")
|
||||
# 按换行符分割guidelines
|
||||
guidelines_list = [g.strip() for g in guidelines_text.split('\n') if g.strip()]
|
||||
guidelines_count = len(guidelines_list)
|
||||
|
||||
# 将分析结果添加到最后一个消息的内容中
|
||||
if guideline_analysis and messages:
|
||||
last_message = messages[-1]
|
||||
if last_message.get('role') == 'user':
|
||||
messages[-1]['content'] += f"\n\nActive Guidelines:\n{guideline_analysis}\nPlease follow these guidelines in your response."
|
||||
print(messages[-1]['content'])
|
||||
if guidelines_count > 0:
|
||||
# 获取最优批次数量(并发数)
|
||||
batch_count = _get_optimal_batch_size(guidelines_count)
|
||||
|
||||
# 从全局管理器获取或创建助手实例
|
||||
agent = await agent_manager.get_or_create_agent(
|
||||
bot_id=bot_id,
|
||||
project_dir=project_dir,
|
||||
model_name=model_name,
|
||||
api_key=api_key,
|
||||
model_server=model_server,
|
||||
generate_cfg=generate_cfg,
|
||||
language=language,
|
||||
system_prompt=system_prompt,
|
||||
mcp_settings=mcp_settings,
|
||||
robot_type=robot_type,
|
||||
user_identifier=user_identifier
|
||||
)
|
||||
# 计算每个批次应该包含多少条guideline
|
||||
guidelines_per_batch = max(1, guidelines_count // batch_count)
|
||||
|
||||
# 分批处理guidelines
|
||||
batches = []
|
||||
for i in range(0, guidelines_count, guidelines_per_batch):
|
||||
batch = guidelines_list[i:i + guidelines_per_batch]
|
||||
batches.append(batch)
|
||||
|
||||
# 确保批次数量不超过要求的并发数
|
||||
while len(batches) > batch_count:
|
||||
# 将最后一个批次合并到倒数第二个批次
|
||||
batches[-2].extend(batches[-1])
|
||||
batches.pop()
|
||||
|
||||
print(f"Processing {guidelines_count} guidelines in {len(batches)} batches with {batch_count} concurrent batches")
|
||||
|
||||
# 准备chat_history
|
||||
chat_history = format_messages_to_chat_history(messages)
|
||||
|
||||
# 并发执行所有任务:guideline批次处理 + agent创建
|
||||
import asyncio
|
||||
tasks = []
|
||||
|
||||
# 添加所有guideline批次任务
|
||||
for batch in batches:
|
||||
task = process_guideline_batch(
|
||||
guidelines_batch=batch,
|
||||
chat_history=chat_history,
|
||||
model_name=model_name,
|
||||
api_key=api_key,
|
||||
model_server=model_server
|
||||
)
|
||||
tasks.append(task)
|
||||
|
||||
# 添加agent创建任务
|
||||
agent_task = agent_manager.get_or_create_agent(
|
||||
bot_id=bot_id,
|
||||
project_dir=project_dir,
|
||||
model_name=model_name,
|
||||
api_key=api_key,
|
||||
model_server=model_server,
|
||||
generate_cfg=generate_cfg,
|
||||
language=language,
|
||||
system_prompt=system_prompt,
|
||||
mcp_settings=mcp_settings,
|
||||
robot_type=robot_type,
|
||||
user_identifier=user_identifier
|
||||
)
|
||||
tasks.append(agent_task)
|
||||
|
||||
# 等待所有任务完成
|
||||
all_results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# 处理结果:最后一个结果是agent,前面的是guideline批次结果
|
||||
agent = all_results[-1] # agent创建的结果
|
||||
batch_results = all_results[:-1] # guideline批次的结果
|
||||
|
||||
# 合并guideline分析结果
|
||||
valid_results = []
|
||||
for i, result in enumerate(batch_results):
|
||||
if isinstance(result, Exception):
|
||||
print(f"Guideline batch {i} failed: {result}")
|
||||
continue
|
||||
if result and result.strip():
|
||||
valid_results.append(result.strip())
|
||||
|
||||
if valid_results:
|
||||
guideline_analysis = "\n\n".join(valid_results)
|
||||
print(f"Merged guideline analysis result: {guideline_analysis}")
|
||||
|
||||
# 将分析结果添加到最后一个消息的内容中
|
||||
if guideline_analysis and messages:
|
||||
last_message = messages[-1]
|
||||
if last_message.get('role') == 'user':
|
||||
messages[-1]['content'] += f"\n\nActive Guidelines:\n{guideline_analysis}\nPlease follow these guidelines in your response."
|
||||
else:
|
||||
# 3. 从全局管理器获取或创建助手实例
|
||||
agent = await agent_manager.get_or_create_agent(
|
||||
bot_id=bot_id,
|
||||
project_dir=project_dir,
|
||||
model_name=model_name,
|
||||
api_key=api_key,
|
||||
model_server=model_server,
|
||||
generate_cfg=generate_cfg,
|
||||
language=language,
|
||||
system_prompt=system_prompt,
|
||||
mcp_settings=mcp_settings,
|
||||
robot_type=robot_type,
|
||||
user_identifier=user_identifier
|
||||
)
|
||||
|
||||
# 根据stream参数决定返回流式还是非流式响应
|
||||
if stream:
|
||||
|
||||
@ -13,9 +13,7 @@ to the most recent state of an interaction between yourself (an AI agent) and a
|
||||
|
||||
Examples of Guideline Match Evaluations:
|
||||
```
|
||||
示例 1: 旅游咨询
|
||||
|
||||
Example #1: ###
|
||||
Example #1: travel consultation
|
||||
- **Chat History**:
|
||||
user: Hi, I'm planning a trip to Italy next month. What can I do there?
|
||||
ai_agent: That sounds exciting! I can help you with that. Do you prefer exploring cities or enjoying scenic landscapes?
|
||||
@ -52,9 +50,8 @@ Examples of Guideline Match Evaluations:
|
||||
]
|
||||
}
|
||||
```
|
||||
示例 2: 课程咨询
|
||||
|
||||
Example #2: ###
|
||||
Example #2: Course Consultation
|
||||
- **Chat History**:
|
||||
user:Hi, I'm interested in your Python programming course, but I'm not sure if I'm ready for it.
|
||||
ai_agent:Happy to help! Could you share a bit about your background or experience with programming so far?
|
||||
@ -93,9 +90,7 @@ Examples of Guideline Match Evaluations:
|
||||
}
|
||||
```
|
||||
|
||||
示例 3: 登录问题
|
||||
|
||||
Example #3: ###
|
||||
Example #3: Login issue
|
||||
- **Chat History**:
|
||||
user:I'm having trouble logging into my account.
|
||||
ai_agent:I'm sorry to hear that. Can you tell me what happens when you try to log in?
|
||||
@ -119,9 +114,7 @@ Examples of Guideline Match Evaluations:
|
||||
]
|
||||
}
|
||||
|
||||
示例 4: 退货政策
|
||||
|
||||
Example #4: ###
|
||||
Example #4: Return Policy
|
||||
- **Chat History**:
|
||||
user: Hi, I'm thinking about ordering this coat, but I need to know — what's your return policy?
|
||||
ai_agent: You can return items within 30 days either in-store or using our prepaid return label.
|
||||
@ -152,6 +145,7 @@ Guidelines List:
|
||||
{guidelines_text}
|
||||
|
||||
OUTPUT FORMAT:
|
||||
The content in JSON format needs to be wrapped in "```json" and "```".
|
||||
Please specify the applicability of each guideline:
|
||||
```json
|
||||
{{
|
||||
|
||||
Loading…
Reference in New Issue
Block a user