This commit is contained in:
朱潮 2025-11-28 17:42:58 +08:00
parent 5e26d88d18
commit 13e2ba56a7
4 changed files with 9 additions and 4 deletions

View File

@ -138,3 +138,6 @@ If no guidelines apply, return an empty checks array:
"checks": []
}
```
Rationale Text Language:
{language}

View File

@ -70,7 +70,7 @@ For substantive requests and questions:
Remember to wrap your entire response in ```json ... ``` tags.
Preamble LANGAGE:
Preamble Text Language:
{language}
You will now be given the current state of the interaction to which you must generate the next preamble message.

View File

@ -144,6 +144,7 @@ async def process_guidelines_and_terms(
guidelines_batch=batch,
chat_history=chat_history,
terms=terms_analysis,
language=language,
model_name=model_name,
api_key=api_key,
model_server=model_server

View File

@ -551,7 +551,7 @@ async def call_preamble_llm(chat_history: str, last_message: str, preamble_choic
async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str, model_name: str, api_key: str, model_server: str) -> str:
async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str, language:str, model_name: str, api_key: str, model_server: str) -> str:
"""调用大语言模型处理guideline分析
Args:
@ -573,7 +573,7 @@ async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str,
return ""
# 替换模板中的占位符
system_prompt = guideline_template.replace('{chat_history}', chat_history).replace('{guidelines_text}', guidelines_text).replace('{terms}', terms)
system_prompt = guideline_template.replace('{chat_history}', chat_history).replace('{guidelines_text}', guidelines_text).replace('{terms}', terms).replace('{language}', get_language_text(language))
# 配置LLM
llm_config = {
@ -616,6 +616,7 @@ async def process_guideline_batch(
guidelines_batch: List[str],
chat_history: str,
terms: str,
language: str,
model_name: str,
api_key: str,
model_server: str
@ -628,7 +629,7 @@ async def process_guideline_batch(
# 调用LLM分析这批guidelines
batch_guidelines_text = "\n".join(guidelines_batch)
logger.info(f"Start processed guideline batch on attempt {attempt + 1}")
batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, terms, model_name, api_key, model_server)
batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, terms, language, model_name, api_key, model_server)
# 从响应中提取 ```json 和 ``` 包裹的内容
json_pattern = r'```json\s*\n(.*?)\n```'