language
This commit is contained in:
parent
5e26d88d18
commit
13e2ba56a7
@ -138,3 +138,6 @@ If no guidelines apply, return an empty checks array:
|
|||||||
"checks": []
|
"checks": []
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Rationale Text Language:
|
||||||
|
{language}
|
||||||
|
|||||||
@ -70,7 +70,7 @@ For substantive requests and questions:
|
|||||||
|
|
||||||
Remember to wrap your entire response in ```json ... ``` tags.
|
Remember to wrap your entire response in ```json ... ``` tags.
|
||||||
|
|
||||||
Preamble LANGAGE:
|
Preamble Text Language:
|
||||||
{language}
|
{language}
|
||||||
|
|
||||||
You will now be given the current state of the interaction to which you must generate the next preamble message.
|
You will now be given the current state of the interaction to which you must generate the next preamble message.
|
||||||
|
|||||||
@ -144,6 +144,7 @@ async def process_guidelines_and_terms(
|
|||||||
guidelines_batch=batch,
|
guidelines_batch=batch,
|
||||||
chat_history=chat_history,
|
chat_history=chat_history,
|
||||||
terms=terms_analysis,
|
terms=terms_analysis,
|
||||||
|
language=language,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
model_server=model_server
|
model_server=model_server
|
||||||
|
|||||||
@ -551,7 +551,7 @@ async def call_preamble_llm(chat_history: str, last_message: str, preamble_choic
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str, model_name: str, api_key: str, model_server: str) -> str:
|
async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str, language:str, model_name: str, api_key: str, model_server: str) -> str:
|
||||||
"""调用大语言模型处理guideline分析
|
"""调用大语言模型处理guideline分析
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -573,7 +573,7 @@ async def call_guideline_llm(chat_history: str, guidelines_text: str, terms:str,
|
|||||||
return ""
|
return ""
|
||||||
|
|
||||||
# 替换模板中的占位符
|
# 替换模板中的占位符
|
||||||
system_prompt = guideline_template.replace('{chat_history}', chat_history).replace('{guidelines_text}', guidelines_text).replace('{terms}', terms)
|
system_prompt = guideline_template.replace('{chat_history}', chat_history).replace('{guidelines_text}', guidelines_text).replace('{terms}', terms).replace('{language}', get_language_text(language))
|
||||||
|
|
||||||
# 配置LLM
|
# 配置LLM
|
||||||
llm_config = {
|
llm_config = {
|
||||||
@ -616,6 +616,7 @@ async def process_guideline_batch(
|
|||||||
guidelines_batch: List[str],
|
guidelines_batch: List[str],
|
||||||
chat_history: str,
|
chat_history: str,
|
||||||
terms: str,
|
terms: str,
|
||||||
|
language: str,
|
||||||
model_name: str,
|
model_name: str,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
model_server: str
|
model_server: str
|
||||||
@ -628,7 +629,7 @@ async def process_guideline_batch(
|
|||||||
# 调用LLM分析这批guidelines
|
# 调用LLM分析这批guidelines
|
||||||
batch_guidelines_text = "\n".join(guidelines_batch)
|
batch_guidelines_text = "\n".join(guidelines_batch)
|
||||||
logger.info(f"Start processed guideline batch on attempt {attempt + 1}")
|
logger.info(f"Start processed guideline batch on attempt {attempt + 1}")
|
||||||
batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, terms, model_name, api_key, model_server)
|
batch_analysis = await call_guideline_llm(chat_history, batch_guidelines_text, terms, language, model_name, api_key, model_server)
|
||||||
|
|
||||||
# 从响应中提取 ```json 和 ``` 包裹的内容
|
# 从响应中提取 ```json 和 ``` 包裹的内容
|
||||||
json_pattern = r'```json\s*\n(.*?)\n```'
|
json_pattern = r'```json\s*\n(.*?)\n```'
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user