From 633d81b3fb2f89f3f7d947dc4b0e7293742605d3 Mon Sep 17 00:00:00 2001 From: shaohuzhang1 Date: Fri, 22 Mar 2024 18:03:36 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20=E5=8F=AF=E9=80=89=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E5=8E=BB=E6=8E=89=E6=97=A0=E6=B3=95=E4=B8=8B=E8=BD=BD=E7=9A=84?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ollama_model_provider/ollama_model_provider.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py index f965727c..19396953 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py @@ -76,15 +76,7 @@ model_dict = { 'llama2-chinese:13b': ModelInfo( 'llama2-chinese:13b', '由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'llama2-chinese:13b-maxkb': ModelInfo( - 'llama2-chinese:13b-maxkb', - '由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。fi2cloud专用', - ModelTypeConst.LLM, ollama_llm_model_credential), - 'baichuan2:13b-chat': ModelInfo( - 'baichuan2:13b-chat', - 'Baichuan 2 是百川智能推出的新一代开源大语言模型,采用 2.6 万亿 Tokens 的高质量语料训练,在权威的中文和英文 benchmark 上均取得同尺寸最好的效果', - ModelTypeConst.LLM, ollama_llm_model_credential), + ModelTypeConst.LLM, ollama_llm_model_credential) }