refactor: add is_cache_model method to various model classes to standardize cache behavior

This commit is contained in:
wxg0103 2025-07-08 17:11:18 +08:00
parent fce2f50a01
commit 521fff2818
34 changed files with 139 additions and 16 deletions

View File

@ -8,6 +8,10 @@ from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -19,6 +19,10 @@ class AliyunBaiLianSpeechToText(MaxKBBaseModel, BaseSpeechToText):
self.api_key = kwargs.get('api_key')
self.model = kwargs.get('model')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}

View File

@ -13,7 +13,6 @@ from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_tti import BaseTextToImage
class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage):
api_key: str
model_name: str
@ -25,6 +24,10 @@ class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage):
self.model_name = kwargs.get('model_name')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024*1024', 'style': '<auto>', 'n': 1}}

View File

@ -20,6 +20,10 @@ class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'voice': 'longxiaochun', 'speech_rate': 1.0}}
@ -52,5 +56,3 @@ class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
raise Exception(audio)
return audio
def is_cache_model(self):
return False

View File

@ -13,6 +13,10 @@ def custom_get_token_ids(text: str):
class AnthropicImage(MaxKBBaseModel, ChatAnthropic):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -14,6 +14,10 @@ def custom_get_token_ids(text: str):
class AzureOpenAIImage(MaxKBBaseModel, AzureChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -25,6 +25,10 @@ class AzureOpenAISpeechToText(MaxKBBaseModel, BaseSpeechToText):
self.api_base = kwargs.get('api_base')
self.api_version = kwargs.get('api_version')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}

View File

@ -27,6 +27,10 @@ class AzureOpenAITextToImage(MaxKBBaseModel, BaseTextToImage):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}

View File

@ -28,6 +28,10 @@ class AzureOpenAITextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'voice': 'alloy'}}

View File

@ -13,6 +13,10 @@ def custom_get_token_ids(text: str):
class GeminiImage(MaxKBBaseModel, ChatGoogleGenerativeAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -22,6 +22,10 @@ class GeminiSpeechToText(MaxKBBaseModel, BaseSpeechToText):
super().__init__(**kwargs)
self.api_key = kwargs.get('api_key')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}

View File

@ -15,6 +15,10 @@ def get_base_url(url: str):
class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
api_base = model_credential.get('api_base', '')

View File

@ -6,6 +6,10 @@ from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -19,6 +19,10 @@ class OpenAISpeechToText(MaxKBBaseModel, BaseSpeechToText):
api_key: str
model: str
@staticmethod
def is_cache_model():
return False
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.api_key = kwargs.get('api_key')

View File

@ -25,6 +25,10 @@ class OpenAITextToImage(MaxKBBaseModel, BaseTextToImage):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}

View File

@ -26,6 +26,10 @@ class OpenAITextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'voice': 'alloy'}}

View File

@ -6,6 +6,10 @@ from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class RegoloImage(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -25,6 +25,10 @@ class RegoloTextToImage(MaxKBBaseModel, BaseTextToImage):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
@ -38,9 +42,6 @@ class RegoloTextToImage(MaxKBBaseModel, BaseTextToImage):
**optional_params,
)
def is_cache_model(self):
return False
def check_auth(self):
chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
response_list = chat.models.with_raw_response.list()

View File

@ -6,6 +6,10 @@ from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -38,6 +38,10 @@ class SiliconCloudSpeechToText(MaxKBBaseModel, BaseSpeechToText):
**optional_params,
)
@staticmethod
def is_cache_model():
return False
def check_auth(self):
client = OpenAI(
base_url=self.api_base,
@ -56,4 +60,3 @@ class SiliconCloudSpeechToText(MaxKBBaseModel, BaseSpeechToText):
buffer.name = "file.mp3" # this is the important line
res = client.audio.transcriptions.create(model=self.model, language="zh", file=buffer)
return res.text

View File

@ -25,6 +25,10 @@ class SiliconCloudTextToImage(MaxKBBaseModel, BaseTextToImage):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
@ -38,9 +42,6 @@ class SiliconCloudTextToImage(MaxKBBaseModel, BaseTextToImage):
**optional_params,
)
def is_cache_model(self):
return False
def check_auth(self):
chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
response_list = chat.models.with_raw_response.list()

View File

@ -18,3 +18,7 @@ class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
stream_usage=True,
extra_body=optional_params
)
@staticmethod
def is_cache_model():
return False

View File

@ -18,3 +18,7 @@ class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
stream_usage=True,
extra_body=optional_params
)
@staticmethod
def is_cache_model():
return False

View File

@ -200,6 +200,10 @@ class VolcanicEngineSpeechToText(MaxKBBaseModel, BaseSpeechToText):
self.volcanic_app_id = kwargs.get('volcanic_app_id')
self.volcanic_cluster = kwargs.get('volcanic_cluster')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}

View File

@ -37,7 +37,6 @@ req_key_dict = {
}
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
@ -127,6 +126,10 @@ class VolcanicEngineTextToImage(MaxKBBaseModel, BaseTextToImage):
self.model_version = kwargs.get('model_version')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {}}

View File

@ -94,3 +94,7 @@ class XFSparkImage(MaxKBBaseModel, ChatSparkLLM):
if run_manager:
run_manager.on_llm_new_token(str(chunk.content), chunk=cg_chunk)
yield cg_chunk
@staticmethod
def is_cache_model():
return False

View File

@ -29,7 +29,6 @@ ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText):
spark_app_id: str
spark_api_key: str
@ -43,6 +42,10 @@ class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText):
self.spark_api_key = kwargs.get('spark_api_key')
self.spark_api_secret = kwargs.get('spark_api_secret')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}

View File

@ -23,7 +23,6 @@ from common.utils.common import _remove_empty_lines
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_tts import BaseTextToSpeech
STATUS_FIRST_FRAME = 0 # 第一帧的标识
STATUS_CONTINUE_FRAME = 1 # 中间帧标识
STATUS_LAST_FRAME = 2 # 最后一帧的标识
@ -48,6 +47,10 @@ class XFSparkTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
self.spark_api_secret = kwargs.get('spark_api_secret')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'vcn': 'xiaoyan', 'speed': 50}}

View File

@ -9,6 +9,10 @@ from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -23,6 +23,10 @@ class XInferenceSpeechToText(MaxKBBaseModel, BaseSpeechToText):
self.api_key = kwargs.get('api_key')
self.api_base = kwargs.get('api_base')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}

View File

@ -5,7 +5,7 @@ from openai import OpenAI
from common.config.tokenizer_manage_config import TokenizerManage
from common.utils.common import bytes_to_uploaded_file
#from dataset.serializers.file_serializers import FileSerializer
# from dataset.serializers.file_serializers import FileSerializer
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_tti import BaseTextToImage
@ -28,6 +28,10 @@ class XinferenceTextToImage(MaxKBBaseModel, BaseTextToImage):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
@ -57,7 +61,7 @@ class XinferenceTextToImage(MaxKBBaseModel, BaseTextToImage):
meta = {
'debug': True,
}
#file_url = FileSerializer(data={'file': file, 'meta': meta}).upload()
#file_urls.append(f'http://localhost:8080{file_url}')
# file_url = FileSerializer(data={'file': file, 'meta': meta}).upload()
# file_urls.append(f'http://localhost:8080{file_url}')
return file_urls

View File

@ -27,6 +27,10 @@ class XInferenceTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'voice': '中文女'}}

View File

@ -6,6 +6,10 @@ from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)

View File

@ -26,6 +26,10 @@ class ZhiPuTextToImage(MaxKBBaseModel, BaseTextToImage):
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024x1024'}}