Merge upstream changes
This commit is contained in:
commit
6f15ebba05
24
apps/common/config/tokenizer_manage_config.py
Normal file
24
apps/common/config/tokenizer_manage_config.py
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
"""
|
||||||
|
@project: maxkb
|
||||||
|
@Author:虎
|
||||||
|
@file: tokenizer_manage_config.py
|
||||||
|
@date:2024/4/28 10:17
|
||||||
|
@desc:
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class TokenizerManage:
|
||||||
|
tokenizer = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_tokenizer():
|
||||||
|
from transformers import GPT2TokenizerFast
|
||||||
|
if TokenizerManage.tokenizer is None:
|
||||||
|
TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained(
|
||||||
|
'gpt2',
|
||||||
|
cache_dir="/opt/maxkb/model/tokenizer",
|
||||||
|
local_files_only=True,
|
||||||
|
resume_download=False,
|
||||||
|
force_download=False)
|
||||||
|
return TokenizerManage.tokenizer
|
||||||
@ -19,6 +19,7 @@ from common.util.file_util import get_file_content
|
|||||||
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
|
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
|
||||||
ModelInfo, \
|
ModelInfo, \
|
||||||
ModelTypeConst, ValidCode
|
ModelTypeConst, ValidCode
|
||||||
|
from setting.models_provider.impl.azure_model_provider.model.azure_chat_model import AzureChatModel
|
||||||
from smartdoc.conf import PROJECT_DIR
|
from smartdoc.conf import PROJECT_DIR
|
||||||
|
|
||||||
|
|
||||||
@ -119,8 +120,8 @@ class AzureModelProvider(IModelProvider):
|
|||||||
|
|
||||||
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> AzureChatOpenAI:
|
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> AzureChatOpenAI:
|
||||||
model_info: ModelInfo = model_dict.get(model_name)
|
model_info: ModelInfo = model_dict.get(model_name)
|
||||||
azure_chat_open_ai = AzureChatOpenAI(
|
azure_chat_open_ai = AzureChatModel(
|
||||||
openai_api_base=model_credential.get('api_base'),
|
azure_endpoint=model_credential.get('api_base'),
|
||||||
openai_api_version=model_info.api_version if model_name in model_dict else model_credential.get(
|
openai_api_version=model_info.api_version if model_name in model_dict else model_credential.get(
|
||||||
'api_version'),
|
'api_version'),
|
||||||
deployment_name=model_credential.get('deployment_name'),
|
deployment_name=model_credential.get('deployment_name'),
|
||||||
|
|||||||
@ -0,0 +1,24 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
"""
|
||||||
|
@project: maxkb
|
||||||
|
@Author:虎
|
||||||
|
@file: azure_chat_model.py
|
||||||
|
@date:2024/4/28 11:45
|
||||||
|
@desc:
|
||||||
|
"""
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||||
|
from langchain_openai import AzureChatOpenAI
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
|
|
||||||
|
|
||||||
|
class AzureChatModel(AzureChatOpenAI):
|
||||||
|
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||||
|
|
||||||
|
def get_num_tokens(self, text: str) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return len(tokenizer.encode(text))
|
||||||
@ -11,19 +11,7 @@ from typing import List
|
|||||||
from langchain_community.chat_models import ChatOpenAI
|
from langchain_community.chat_models import ChatOpenAI
|
||||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
class TokenizerManage:
|
|
||||||
tokenizer = None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_tokenizer():
|
|
||||||
from transformers import GPT2TokenizerFast
|
|
||||||
if TokenizerManage.tokenizer is None:
|
|
||||||
TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
|
|
||||||
cache_dir="/opt/maxkb/model/tokenizer",
|
|
||||||
resume_download=False,
|
|
||||||
force_download=False)
|
|
||||||
return TokenizerManage.tokenizer
|
|
||||||
|
|
||||||
|
|
||||||
class KimiChatModel(ChatOpenAI):
|
class KimiChatModel(ChatOpenAI):
|
||||||
|
|||||||
@ -11,19 +11,7 @@ from typing import List
|
|||||||
from langchain_community.chat_models import ChatOpenAI
|
from langchain_community.chat_models import ChatOpenAI
|
||||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
class TokenizerManage:
|
|
||||||
tokenizer = None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_tokenizer():
|
|
||||||
from transformers import GPT2TokenizerFast
|
|
||||||
if TokenizerManage.tokenizer is None:
|
|
||||||
TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
|
|
||||||
cache_dir="/opt/maxkb/model/tokenizer",
|
|
||||||
resume_download=False,
|
|
||||||
force_download=False)
|
|
||||||
return TokenizerManage.tokenizer
|
|
||||||
|
|
||||||
|
|
||||||
class OllamaChatModel(ChatOpenAI):
|
class OllamaChatModel(ChatOpenAI):
|
||||||
|
|||||||
@ -74,9 +74,49 @@ model_dict = {
|
|||||||
'llama2-chinese:13b',
|
'llama2-chinese:13b',
|
||||||
'由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。',
|
'由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。',
|
||||||
ModelTypeConst.LLM, ollama_llm_model_credential),
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
'qwen-14b': ModelInfo(
|
'llama3:8b': ModelInfo(
|
||||||
|
'llama3:8b',
|
||||||
|
'Meta Llama 3:迄今为止最有能力的公开产品LLM。8亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'llama3:70b': ModelInfo(
|
||||||
|
'llama3:70b',
|
||||||
|
'Meta Llama 3:迄今为止最有能力的公开产品LLM。70亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:0.5b': ModelInfo(
|
||||||
|
'qwen:0.5b',
|
||||||
|
'qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。0.5亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:1.8b': ModelInfo(
|
||||||
|
'qwen:1.8b',
|
||||||
|
'qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。1.8亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:4b': ModelInfo(
|
||||||
|
'qwen:4b',
|
||||||
|
'qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。4亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:7b': ModelInfo(
|
||||||
|
'qwen:7b',
|
||||||
|
'qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语1言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。7亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:14b': ModelInfo(
|
||||||
'qwen:14b',
|
'qwen:14b',
|
||||||
'qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。',
|
'qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。14亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:32b': ModelInfo(
|
||||||
|
'qwen:32b',
|
||||||
|
'qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。32亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:72b': ModelInfo(
|
||||||
|
'qwen:72b',
|
||||||
|
'qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。72亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'qwen:110b': ModelInfo(
|
||||||
|
'qwen:110b',
|
||||||
|
'qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。110亿参数。',
|
||||||
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
|
'phi3': ModelInfo(
|
||||||
|
'phi3',
|
||||||
|
'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。',
|
||||||
ModelTypeConst.LLM, ollama_llm_model_credential),
|
ModelTypeConst.LLM, ollama_llm_model_credential),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -11,19 +11,7 @@ from typing import List
|
|||||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
class TokenizerManage:
|
|
||||||
tokenizer = None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_tokenizer():
|
|
||||||
from transformers import GPT2TokenizerFast
|
|
||||||
if TokenizerManage.tokenizer is None:
|
|
||||||
TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
|
|
||||||
cache_dir="/opt/maxkb/model/tokenizer",
|
|
||||||
resume_download=False,
|
|
||||||
force_download=False)
|
|
||||||
return TokenizerManage.tokenizer
|
|
||||||
|
|
||||||
|
|
||||||
class OpenAIChatModel(ChatOpenAI):
|
class OpenAIChatModel(ChatOpenAI):
|
||||||
|
|||||||
@ -57,12 +57,39 @@ class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
|
|||||||
openai_llm_model_credential = OpenAILLMModelCredential()
|
openai_llm_model_credential = OpenAILLMModelCredential()
|
||||||
|
|
||||||
model_dict = {
|
model_dict = {
|
||||||
'gpt-3.5-turbo': ModelInfo('gpt-3.5-turbo', '', ModelTypeConst.LLM, openai_llm_model_credential,
|
'gpt-3.5-turbo': ModelInfo('gpt-3.5-turbo', '最新的gpt-3.5-turbo,随OpenAI调整而更新', ModelTypeConst.LLM,
|
||||||
|
openai_llm_model_credential,
|
||||||
),
|
),
|
||||||
'gpt-3.5-turbo-0613': ModelInfo('gpt-3.5-turbo-0613', '', ModelTypeConst.LLM, openai_llm_model_credential,
|
'gpt-3.5-turbo-0125': ModelInfo('gpt-3.5-turbo-0125',
|
||||||
|
'2024年1月25日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM,
|
||||||
|
openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-3.5-turbo-1106': ModelInfo('gpt-3.5-turbo-1106',
|
||||||
|
'2023年11月6日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM,
|
||||||
|
openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-3.5-turbo-0613': ModelInfo('gpt-3.5-turbo-0613',
|
||||||
|
'[Legacy] 2023年6月13日的gpt-3.5-turbo快照,将于2024年6月13日弃用',
|
||||||
|
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-4': ModelInfo('gpt-4', '最新的gpt-4,随OpenAI调整而更新', ModelTypeConst.LLM, openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-4-turbo': ModelInfo('gpt-4-turbo', '最新的gpt-4-turbo,随OpenAI调整而更新', ModelTypeConst.LLM,
|
||||||
|
openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-4-turbo-preview': ModelInfo('gpt-4-turbo-preview', '最新的gpt-4-turbo-preview,随OpenAI调整而更新',
|
||||||
|
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-4-turbo-2024-04-09': ModelInfo('gpt-4-turbo-2024-04-09',
|
||||||
|
'2024年4月9日的gpt-4-turbo快照,支持上下文长度128,000 tokens',
|
||||||
|
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-4-0125-preview': ModelInfo('gpt-4-0125-preview', '2024年1月25日的gpt-4-turbo快照,支持上下文长度128,000 tokens',
|
||||||
|
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||||
|
),
|
||||||
|
'gpt-4-1106-preview': ModelInfo('gpt-4-1106-preview', '2023年11月6日的gpt-4-turbo快照,支持上下文长度128,000 tokens',
|
||||||
|
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||||
),
|
),
|
||||||
'gpt-4': ModelInfo('gpt-4', '', ModelTypeConst.LLM, openai_llm_model_credential,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -0,0 +1,24 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
"""
|
||||||
|
@project: maxkb
|
||||||
|
@Author:虎
|
||||||
|
@file: qwen_chat_model.py
|
||||||
|
@date:2024/4/28 11:44
|
||||||
|
@desc:
|
||||||
|
"""
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from langchain_community.chat_models import ChatTongyi
|
||||||
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
|
|
||||||
|
|
||||||
|
class QwenChatModel(ChatTongyi):
|
||||||
|
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||||
|
|
||||||
|
def get_num_tokens(self, text: str) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return len(tokenizer.encode(text))
|
||||||
@ -18,6 +18,7 @@ from common.forms import BaseForm
|
|||||||
from common.util.file_util import get_file_content
|
from common.util.file_util import get_file_content
|
||||||
from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
|
from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
|
||||||
ModelInfo, IModelProvider, ValidCode
|
ModelInfo, IModelProvider, ValidCode
|
||||||
|
from setting.models_provider.impl.qwen_model_provider.model.qwen_chat_model import QwenChatModel
|
||||||
from smartdoc.conf import PROJECT_DIR
|
from smartdoc.conf import PROJECT_DIR
|
||||||
|
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ class QwenModelProvider(IModelProvider):
|
|||||||
return 3
|
return 3
|
||||||
|
|
||||||
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatTongyi:
|
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatTongyi:
|
||||||
chat_tong_yi = ChatTongyi(
|
chat_tong_yi = QwenChatModel(
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
dashscope_api_key=model_credential.get('api_key')
|
dashscope_api_key=model_credential.get('api_key')
|
||||||
)
|
)
|
||||||
|
|||||||
@ -18,19 +18,7 @@ from langchain.schema.output import ChatGenerationChunk
|
|||||||
from langchain.schema.runnable import RunnableConfig
|
from langchain.schema.runnable import RunnableConfig
|
||||||
from langchain_community.chat_models import QianfanChatEndpoint
|
from langchain_community.chat_models import QianfanChatEndpoint
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
class TokenizerManage:
|
|
||||||
tokenizer = None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_tokenizer():
|
|
||||||
from transformers import GPT2TokenizerFast
|
|
||||||
if TokenizerManage.tokenizer is None:
|
|
||||||
TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
|
|
||||||
cache_dir="/opt/maxkb/model/tokenizer",
|
|
||||||
resume_download=False,
|
|
||||||
force_download=False)
|
|
||||||
return TokenizerManage.tokenizer
|
|
||||||
|
|
||||||
|
|
||||||
class QianfanChatModel(QianfanChatEndpoint):
|
class QianfanChatModel(QianfanChatEndpoint):
|
||||||
|
|||||||
@ -12,11 +12,21 @@ from typing import List, Optional, Any, Iterator
|
|||||||
from langchain_community.chat_models import ChatSparkLLM
|
from langchain_community.chat_models import ChatSparkLLM
|
||||||
from langchain_community.chat_models.sparkllm import _convert_message_to_dict, _convert_delta_to_message_chunk
|
from langchain_community.chat_models.sparkllm import _convert_message_to_dict, _convert_delta_to_message_chunk
|
||||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||||
from langchain_core.messages import BaseMessage, AIMessageChunk
|
from langchain_core.messages import BaseMessage, AIMessageChunk, get_buffer_string
|
||||||
from langchain_core.outputs import ChatGenerationChunk
|
from langchain_core.outputs import ChatGenerationChunk
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
|
|
||||||
|
|
||||||
class XFChatSparkLLM(ChatSparkLLM):
|
class XFChatSparkLLM(ChatSparkLLM):
|
||||||
|
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||||
|
|
||||||
|
def get_num_tokens(self, text: str) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return len(tokenizer.encode(text))
|
||||||
|
|
||||||
def _stream(
|
def _stream(
|
||||||
self,
|
self,
|
||||||
messages: List[BaseMessage],
|
messages: List[BaseMessage],
|
||||||
|
|||||||
@ -0,0 +1,24 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
"""
|
||||||
|
@project: maxkb
|
||||||
|
@Author:虎
|
||||||
|
@file: zhipu_chat_model.py
|
||||||
|
@date:2024/4/28 11:42
|
||||||
|
@desc:
|
||||||
|
"""
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from langchain_community.chat_models import ChatZhipuAI
|
||||||
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||||
|
|
||||||
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
|
|
||||||
|
|
||||||
|
class ZhipuChatModel(ChatZhipuAI):
|
||||||
|
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||||
|
|
||||||
|
def get_num_tokens(self, text: str) -> int:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return len(tokenizer.encode(text))
|
||||||
@ -18,6 +18,7 @@ from common.forms import BaseForm
|
|||||||
from common.util.file_util import get_file_content
|
from common.util.file_util import get_file_content
|
||||||
from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
|
from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
|
||||||
ModelInfo, IModelProvider, ValidCode
|
ModelInfo, IModelProvider, ValidCode
|
||||||
|
from setting.models_provider.impl.zhipu_model_provider.model.zhipu_chat_model import ZhipuChatModel
|
||||||
from smartdoc.conf import PROJECT_DIR
|
from smartdoc.conf import PROJECT_DIR
|
||||||
|
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ class ZhiPuModelProvider(IModelProvider):
|
|||||||
return 3
|
return 3
|
||||||
|
|
||||||
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatZhipuAI:
|
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatZhipuAI:
|
||||||
zhipuai_chat = ChatZhipuAI(
|
zhipuai_chat = ZhipuChatModel(
|
||||||
temperature=0.5,
|
temperature=0.5,
|
||||||
api_key=model_credential.get('api_key'),
|
api_key=model_credential.get('api_key'),
|
||||||
model=model_name
|
model=model_name
|
||||||
|
|||||||
@ -12,25 +12,47 @@
|
|||||||
</div>
|
</div>
|
||||||
<div class="flex-center avatar">
|
<div class="flex-center avatar">
|
||||||
<el-tooltip effect="dark" :content="$t('layout.topbar.github')" placement="top">
|
<el-tooltip effect="dark" :content="$t('layout.topbar.github')" placement="top">
|
||||||
<AppIcon iconName="app-github" class="cursor color-secondary mr-8 ml-8" style="font-size: 20px"
|
<AppIcon
|
||||||
@click="toUrl('https://github.com/1Panel-dev/MaxKB')"></AppIcon>
|
iconName="app-github"
|
||||||
|
class="cursor color-secondary mr-8 ml-8"
|
||||||
|
style="font-size: 20px"
|
||||||
|
@click="toUrl('https://github.com/1Panel-dev/MaxKB')"
|
||||||
|
></AppIcon>
|
||||||
</el-tooltip>
|
</el-tooltip>
|
||||||
<el-tooltip effect="dark" :content="$t('layout.topbar.handbook')" placement="top">
|
<el-tooltip effect="dark" :content="$t('layout.topbar.handbook')" placement="top">
|
||||||
<AppIcon iconName="app-reading" class="cursor color-secondary mr-8 ml-8" style="font-size: 20px"
|
<AppIcon
|
||||||
@click="toUrl('https://github.com/1Panel-dev/MaxKB/wiki')"></AppIcon>
|
iconName="app-reading"
|
||||||
|
class="cursor color-secondary mr-8 ml-8"
|
||||||
|
style="font-size: 20px"
|
||||||
|
@click="toUrl('https://github.com/1Panel-dev/MaxKB/wiki')"
|
||||||
|
></AppIcon>
|
||||||
</el-tooltip>
|
</el-tooltip>
|
||||||
<el-tooltip effect="dark" :content="$t('layout.topbar.forum')" placement="top">
|
<el-tooltip effect="dark" :content="$t('layout.topbar.forum')" placement="top">
|
||||||
<AppIcon iconName="app-help" class="cursor color-secondary mr-8 ml-8" style="font-size: 20px"
|
<AppIcon
|
||||||
@click="toUrl('https://bbs.fit2cloud.com/c/mk/11')"></AppIcon>
|
iconName="app-help"
|
||||||
|
class="cursor color-secondary mr-8 ml-8"
|
||||||
|
style="font-size: 20px"
|
||||||
|
@click="toUrl('https://bbs.fit2cloud.com/c/mk/11')"
|
||||||
|
></AppIcon>
|
||||||
</el-tooltip>
|
</el-tooltip>
|
||||||
<el-dropdown trigger="click" type="primary">
|
<el-dropdown v-if="false" trigger="click" type="primary">
|
||||||
<template #dropdown>
|
<template #dropdown>
|
||||||
<el-dropdown-menu>
|
<el-dropdown-menu>
|
||||||
<el-dropdown-item v-for="(lang, index) in langList" :key="index" :value="lang.value"
|
<el-dropdown-item
|
||||||
@click="changeLang(lang.value)">{{ lang.label }}</el-dropdown-item>
|
v-for="(lang, index) in langList"
|
||||||
|
:key="index"
|
||||||
|
:value="lang.value"
|
||||||
|
@click="changeLang(lang.value)"
|
||||||
|
>{{ lang.label }}</el-dropdown-item
|
||||||
|
>
|
||||||
</el-dropdown-menu>
|
</el-dropdown-menu>
|
||||||
</template>
|
</template>
|
||||||
<AppIcon iconName="app-translate" class="cursor color-secondary mr-16 ml-8" style="font-size: 20px" @click="">
|
<AppIcon
|
||||||
|
iconName="app-translate"
|
||||||
|
class="cursor color-secondary mr-16 ml-8"
|
||||||
|
style="font-size: 20px"
|
||||||
|
@click=""
|
||||||
|
>
|
||||||
</AppIcon>
|
</AppIcon>
|
||||||
</el-dropdown>
|
</el-dropdown>
|
||||||
<Avatar></Avatar>
|
<Avatar></Avatar>
|
||||||
@ -41,15 +63,15 @@
|
|||||||
import TopMenu from './top-menu/index.vue'
|
import TopMenu from './top-menu/index.vue'
|
||||||
import Avatar from './avatar/index.vue'
|
import Avatar from './avatar/index.vue'
|
||||||
import { useRouter } from 'vue-router'
|
import { useRouter } from 'vue-router'
|
||||||
import { langList } from '@/locales/index';
|
import { langList } from '@/locales/index'
|
||||||
import { useLocale } from '@/locales/useLocale';
|
import { useLocale } from '@/locales/useLocale'
|
||||||
const router = useRouter()
|
const router = useRouter()
|
||||||
const defaultTitle = import.meta.env.VITE_APP_TITLE
|
const defaultTitle = import.meta.env.VITE_APP_TITLE
|
||||||
|
|
||||||
const { changeLocale } = useLocale();
|
const { changeLocale } = useLocale()
|
||||||
const changeLang = (lang: string) => {
|
const changeLang = (lang: string) => {
|
||||||
changeLocale(lang);
|
changeLocale(lang)
|
||||||
};
|
}
|
||||||
function toUrl(url: string) {
|
function toUrl(url: string) {
|
||||||
window.open(url, '_blank')
|
window.open(url, '_blank')
|
||||||
}
|
}
|
||||||
|
|||||||
@ -81,11 +81,8 @@
|
|||||||
</div>
|
</div>
|
||||||
<el-tooltip effect="dark" placement="right">
|
<el-tooltip effect="dark" placement="right">
|
||||||
<template #content>
|
<template #content>
|
||||||
<p>为供应商的 LLM 模型,支持自定义输入</p>
|
<p>若下拉选项没有列出想要添加的LLM模型,自定义输入模型名称后回车即可</p>
|
||||||
<p>
|
<p>注意,基础模型需要与供应商的模型名称一致</p>
|
||||||
下拉选项是 OpenAI
|
|
||||||
常用的一些大语言模型如:gpt-3.5-turbo-0613、gpt-3.5-turbo、gpt-4 等
|
|
||||||
</p>
|
|
||||||
</template>
|
</template>
|
||||||
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
||||||
</el-tooltip>
|
</el-tooltip>
|
||||||
@ -101,12 +98,21 @@
|
|||||||
allow-create
|
allow-create
|
||||||
default-first-option
|
default-first-option
|
||||||
>
|
>
|
||||||
<el-option
|
<el-option v-for="item in base_model_list" :key="item.name" :value="item.name">
|
||||||
v-for="item in base_model_list"
|
<template #default>
|
||||||
:key="item.name"
|
<div class="flex align-center" style="display: inline-flex">
|
||||||
:label="item.name"
|
<div class="flex-between mr-4">
|
||||||
:value="item.name"
|
<span>{{ item.name }} </span>
|
||||||
></el-option>
|
</div>
|
||||||
|
<el-tooltip effect="dark" placement="right" v-if="item.desc">
|
||||||
|
<template #content>
|
||||||
|
<p>{{ item.desc }}</p>
|
||||||
|
</template>
|
||||||
|
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
||||||
|
</el-tooltip>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
</el-option>
|
||||||
</el-select>
|
</el-select>
|
||||||
</el-form-item>
|
</el-form-item>
|
||||||
</template>
|
</template>
|
||||||
@ -204,6 +210,7 @@ const list_base_model = (model_type: any) => {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const close = () => {
|
const close = () => {
|
||||||
base_form_data.value = { name: '', model_type: '', model_name: '' }
|
base_form_data.value = { name: '', model_type: '', model_name: '' }
|
||||||
credential_form_data.value = {}
|
credential_form_data.value = {}
|
||||||
|
|||||||
@ -75,11 +75,8 @@
|
|||||||
</div>
|
</div>
|
||||||
<el-tooltip effect="dark" placement="right">
|
<el-tooltip effect="dark" placement="right">
|
||||||
<template #content>
|
<template #content>
|
||||||
<p>为供应商的 LLM 模型,支持自定义输入</p>
|
<p>若下拉选项没有列出想要添加的LLM模型,自定义输入模型名称后回车即可</p>
|
||||||
<p>
|
<p>注意,基础模型需要与供应商的模型名称一致</p>
|
||||||
下拉选项是 OpenAI
|
|
||||||
常用的一些大语言模型如:gpt-3.5-turbo-0613、gpt-3.5-turbo、gpt-4 等
|
|
||||||
</p>
|
|
||||||
</template>
|
</template>
|
||||||
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
||||||
</el-tooltip>
|
</el-tooltip>
|
||||||
@ -95,12 +92,21 @@
|
|||||||
allow-create
|
allow-create
|
||||||
default-first-option
|
default-first-option
|
||||||
>
|
>
|
||||||
<el-option
|
<el-option v-for="item in base_model_list" :key="item.name" :value="item.name">
|
||||||
v-for="item in base_model_list"
|
<template #default>
|
||||||
:key="item.name"
|
<div class="flex align-center" style="display: inline-flex">
|
||||||
:label="item.name"
|
<div class="flex-between mr-4">
|
||||||
:value="item.name"
|
<span>{{ item.name }} </span>
|
||||||
></el-option>
|
</div>
|
||||||
|
<el-tooltip effect="dark" placement="right" v-if="item.desc">
|
||||||
|
<template #content>
|
||||||
|
<p>{{ item.desc }}</p>
|
||||||
|
</template>
|
||||||
|
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
||||||
|
</el-tooltip>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
</el-option>
|
||||||
</el-select>
|
</el-select>
|
||||||
</el-form-item>
|
</el-form-item>
|
||||||
</template>
|
</template>
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user