From 2faabbe392b5df70a5200f4b49258e3c12dbb1a1 Mon Sep 17 00:00:00 2001 From: wxg0103 <727495428@qq.com> Date: Fri, 21 Mar 2025 11:02:43 +0800 Subject: [PATCH] refactor: bailian --- .../impl/aliyun_bai_lian_model_provider/model/llm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py index 6484e649..22328238 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py @@ -5,6 +5,7 @@ from typing import Dict from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def is_cache_model(): @@ -13,7 +14,7 @@ class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) - if model_name == 'qwen-omni-turbo': + if 'qwen-omni-turbo' in model_name: optional_params['streaming'] = True return BaiLianChatModel( model=model_name,