refactor: image model get_num_tokens override
This commit is contained in:
parent
bf279898b9
commit
c46b7ab094
@ -2,12 +2,11 @@
|
|||||||
|
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from langchain_community.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
return chat_tong_yi
|
return chat_tong_yi
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
from typing import Dict
|
from typing import Dict, List
|
||||||
|
|
||||||
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||||
from langchain_openai import AzureChatOpenAI
|
from langchain_openai import AzureChatOpenAI
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
@ -26,3 +26,17 @@ class AzureOpenAIImage(MaxKBBaseModel, AzureChatOpenAI):
|
|||||||
streaming=True,
|
streaming=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||||
|
try:
|
||||||
|
return super().get_num_tokens_from_messages(messages)
|
||||||
|
except Exception as e:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||||
|
|
||||||
|
def get_num_tokens(self, text: str) -> int:
|
||||||
|
try:
|
||||||
|
return super().get_num_tokens(text)
|
||||||
|
except Exception as e:
|
||||||
|
tokenizer = TokenizerManage.get_tokenizer()
|
||||||
|
return len(tokenizer.encode(text))
|
||||||
|
|||||||
@ -1,15 +1,8 @@
|
|||||||
from typing import Dict
|
from typing import Dict
|
||||||
from urllib.parse import urlparse, ParseResult
|
from urllib.parse import urlparse, ParseResult
|
||||||
|
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
def custom_get_token_ids(text: str):
|
|
||||||
tokenizer = TokenizerManage.get_tokenizer()
|
|
||||||
return tokenizer.encode(text)
|
|
||||||
|
|
||||||
|
|
||||||
def get_base_url(url: str):
|
def get_base_url(url: str):
|
||||||
@ -20,7 +13,7 @@ def get_base_url(url: str):
|
|||||||
return result_url[:-1] if result_url.endswith("/") else result_url
|
return result_url[:-1] if result_url.endswith("/") else result_url
|
||||||
|
|
||||||
|
|
||||||
class OllamaImage(MaxKBBaseModel, ChatOpenAI):
|
class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -34,5 +27,6 @@ class OllamaImage(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_key=model_credential.get('api_key'),
|
openai_api_key=model_credential.get('api_key'),
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,17 +1,10 @@
|
|||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
def custom_get_token_ids(text: str):
|
class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
tokenizer = TokenizerManage.get_tokenizer()
|
|
||||||
return tokenizer.encode(text)
|
|
||||||
|
|
||||||
|
|
||||||
class OpenAIImage(MaxKBBaseModel, ChatOpenAI):
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -22,5 +15,6 @@ class OpenAIImage(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_key=model_credential.get('api_key'),
|
openai_api_key=model_credential.get('api_key'),
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -2,12 +2,11 @@
|
|||||||
|
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from langchain_community.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
return chat_tong_yi
|
return chat_tong_yi
|
||||||
|
|||||||
@ -1,17 +1,10 @@
|
|||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
def custom_get_token_ids(text: str):
|
class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
tokenizer = TokenizerManage.get_tokenizer()
|
|
||||||
return tokenizer.encode(text)
|
|
||||||
|
|
||||||
|
|
||||||
class TencentVision(MaxKBBaseModel, ChatOpenAI):
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -22,5 +15,6 @@ class TencentVision(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_key=model_credential.get('api_key'),
|
openai_api_key=model_credential.get('api_key'),
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,17 +1,10 @@
|
|||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
def custom_get_token_ids(text: str):
|
class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
tokenizer = TokenizerManage.get_tokenizer()
|
|
||||||
return tokenizer.encode(text)
|
|
||||||
|
|
||||||
|
|
||||||
class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI):
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -22,5 +15,6 @@ class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_base=model_credential.get('api_base'),
|
openai_api_base=model_credential.get('api_base'),
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,17 +1,10 @@
|
|||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
def custom_get_token_ids(text: str):
|
class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
tokenizer = TokenizerManage.get_tokenizer()
|
|
||||||
return tokenizer.encode(text)
|
|
||||||
|
|
||||||
|
|
||||||
class XinferenceImage(MaxKBBaseModel, ChatOpenAI):
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -22,5 +15,6 @@ class XinferenceImage(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_key=model_credential.get('api_key'),
|
openai_api_key=model_credential.get('api_key'),
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,17 +1,10 @@
|
|||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from langchain_openai.chat_models import ChatOpenAI
|
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
def custom_get_token_ids(text: str):
|
class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
tokenizer = TokenizerManage.get_tokenizer()
|
|
||||||
return tokenizer.encode(text)
|
|
||||||
|
|
||||||
|
|
||||||
class ZhiPuImage(MaxKBBaseModel, ChatOpenAI):
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||||
@ -22,5 +15,6 @@ class ZhiPuImage(MaxKBBaseModel, ChatOpenAI):
|
|||||||
openai_api_base='https://open.bigmodel.cn/api/paas/v4',
|
openai_api_base='https://open.bigmodel.cn/api/paas/v4',
|
||||||
# stream_options={"include_usage": True},
|
# stream_options={"include_usage": True},
|
||||||
streaming=True,
|
streaming=True,
|
||||||
|
stream_usage=True,
|
||||||
**optional_params,
|
**optional_params,
|
||||||
)
|
)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user