diff --git a/prompt/guideline_prompt.md b/prompt/guideline_prompt.md index 2990a11..9381337 100644 --- a/prompt/guideline_prompt.md +++ b/prompt/guideline_prompt.md @@ -138,3 +138,6 @@ If no guidelines apply, return an empty checks array: "checks": [] } ``` + +Rationale Text Language: +{language} diff --git a/prompt/preamble_prompt.md b/prompt/preamble_prompt.md index df1ee01..08e5487 100644 --- a/prompt/preamble_prompt.md +++ b/prompt/preamble_prompt.md @@ -70,7 +70,7 @@ For substantive requests and questions: Remember to wrap your entire response in ```json ... ``` tags. -Preamble LANGAGE: +Preamble Text Language: {language} You will now be given the current state of the interaction to which you must generate the next preamble message. diff --git a/routes/chat.py b/routes/chat.py index 37a2050..112a9e2 100644 --- a/routes/chat.py +++ b/routes/chat.py @@ -144,6 +144,7 @@ async def process_guidelines_and_terms( guidelines_batch=batch, chat_history=chat_history, terms=terms_analysis, + language=language, model_name=model_name, api_key=api_key, model_server=model_server diff --git a/utils/fastapi_utils.py b/utils/fastapi_utils.py index dd372e6..c9c1c09 100644 --- a/utils/fastapi_utils.py +++ b/utils/fastapi_utils.py @@ -551,7 +551,7 @@ async def call_preamble_llm(chat_history: str, last_message: str, preamble_choic -async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str, model_name: str, api_key: str, model_server: str) -> str: +async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str, language:str, model_name: str, api_key: str, model_server: str) -> str: """调用大语言模型处理guideline分析 Args: @@ -573,7 +573,7 @@ async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str, return "" # 替换模板中的占位符 - system_prompt = guideline_template.replace('{chat_history}', chat_history).replace('{guidelines_text}', guidelines_text).replace('{terms}', terms) + system_prompt = guideline_template.replace('{chat_history}', chat_history).replace('{guidelines_text}', guidelines_text).replace('{terms}', terms).replace('{language}', get_language_text(language)) # 配置LLM llm_config = { @@ -616,6 +616,7 @@ async def process_guideline_batch( guidelines_batch: List[str], chat_history: str, terms: str, + language: str, model_name: str, api_key: str, model_server: str @@ -628,7 +629,7 @@ async def process_guideline_batch( # 调用LLM分析这批guidelines batch_guidelines_text = "\n".join(guidelines_batch) logger.info(f"Start processed guideline batch on attempt {attempt + 1}") - batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, terms, model_name, api_key, model_server) + batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, terms, language, model_name, api_key, model_server) # 从响应中提取 ```json 和 ``` 包裹的内容 json_pattern = r'```json\s*\n(.*?)\n```'