maxkb add log
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
Typos Check / Spell Check with Typos (push) Waiting to run

This commit is contained in:
朱潮 2025-08-25 22:49:43 +08:00
parent f359db8beb
commit 0c9da8e2eb
2 changed files with 15 additions and 6 deletions

View File

@ -120,7 +120,7 @@ class MaxKBMinerUConfig(MinerUConfig):
else:
model_id = self.vision_model_id
logger.debug(f"MaxKB: Calling model {model_id} with {len(messages)} messages")
logger.info(f"MaxKB: Calling model {model_id} with {len(messages)} messages, use_llm={use_llm}, model_type={model_type}")
# Check if this is a vision request (has images)
has_images = False
@ -171,6 +171,7 @@ class MaxKBMinerUConfig(MinerUConfig):
combined_prompt += msg.get('content', '')
if image_path:
logger.info(f"MaxKB: Calling vision_completion with model_id={model_id}, image_path={image_path[:100] if len(image_path) > 100 else image_path}")
response_text = await maxkb_model_client.vision_completion(
model_id=model_id,
image_path=image_path,
@ -179,6 +180,7 @@ class MaxKBMinerUConfig(MinerUConfig):
)
else:
# Fallback to text completion
logger.info(f"MaxKB: Falling back to chat_completion for vision model {model_id} (no image content)")
response_text = await maxkb_model_client.chat_completion(
model_id=model_id,
messages=messages,
@ -186,6 +188,7 @@ class MaxKBMinerUConfig(MinerUConfig):
)
else:
# Regular text completion
logger.info(f"MaxKB: Calling chat_completion with model_id={model_id}")
response_text = await maxkb_model_client.chat_completion(
model_id=model_id,
messages=messages,
@ -210,7 +213,7 @@ class MaxKBMinerUConfig(MinerUConfig):
return MockResponse(response_text)
except Exception as e:
logger.error(f"MaxKB model call failed: {str(e)}")
logger.error(f"MaxKB model call failed for model_id={model_id}, use_llm={use_llm}: {str(e)}")
# Return a mock response with error message
class MockResponse:
def __init__(self, content):

View File

@ -67,7 +67,7 @@ class MaxKBModelClient:
).first()
if model:
self.logger.info(f"Using default model: {model.name} (ID: {model.id})")
self.logger.info(f"Using default LLM model: {model.name} (ID: {model.id}, model_name: {model.model_name})")
if not model:
raise ValueError(f"No LLM model available (requested: {model_id})")
@ -144,7 +144,7 @@ class MaxKBModelClient:
).first()
if model:
self.logger.info(f"Using default model: {model.name} (ID: {model.id})")
self.logger.info(f"Using default vision model: {model.name} (ID: {model.id}, model_name: {model.model_name})")
if not model:
raise ValueError(f"No vision model available (requested: {model_id})")
@ -183,6 +183,7 @@ class MaxKBModelClient:
模型响应文本
"""
try:
self.logger.info(f"Calling chat completion with model_id: {model_id}")
# 获取模型实例
llm_model = await self.get_llm_model(model_id)
@ -207,7 +208,7 @@ class MaxKBModelClient:
return str(response)
except Exception as e:
self.logger.error(f"Chat completion failed: {str(e)}")
self.logger.error(f"Chat completion failed for model {model_id}: {str(e)}")
# 返回错误JSON而不是空字符串
import json
return json.dumps({
@ -230,6 +231,7 @@ class MaxKBModelClient:
模型响应文本
"""
try:
self.logger.info(f"Calling vision completion with model_id: {model_id}")
# 获取视觉模型实例
vision_model = await self.get_vision_model(model_id)
@ -242,6 +244,10 @@ class MaxKBModelClient:
"title": "No Model",
"description": "Vision model not available"
})
else:
# Log actual model name if available
actual_model_name = getattr(vision_model, 'model_name', 'unknown')
self.logger.info(f"Vision model instance created with actual model_name: {actual_model_name}")
# 读取图片并转换为base64
import base64
@ -292,7 +298,7 @@ class MaxKBModelClient:
return str(response)
except Exception as e:
self.logger.error(f"Vision completion failed: {str(e)}")
self.logger.error(f"Vision completion failed for model {model_id}: {str(e)}")
# 返回错误JSON而不是空字符串
import json
return json.dumps({