refactor: replace print statements with logging for improved error tracking

This commit is contained in:
CaptainB 2025-06-25 16:18:09 +08:00
parent 13dc156a58
commit 3aa0847506
9 changed files with 11 additions and 12 deletions

View File

@ -8,6 +8,7 @@
"""
import asyncio
import json
import logging
import re
import time
from functools import reduce
@ -125,7 +126,7 @@ def mcp_response_generator(chat_model, message_list, mcp_servers):
except StopAsyncIteration:
break
except Exception as e:
print(f'exception: {e}')
logging.getLogger("max_kb").error(f'Exception: {e}')
finally:
loop.close()

View File

@ -23,7 +23,6 @@ class BaseImageGenerateNode(IImageGenerateNode):
model_params_setting,
chat_record_id,
**kwargs) -> NodeResult:
print(model_params_setting)
application = self.workflow_manage.work_flow_post_handler.chat_info.application
workspace_id = self.workflow_manage.get_body().get('workspace_id')
tti_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
@ -35,7 +34,6 @@ class BaseImageGenerateNode(IImageGenerateNode):
message_list = self.generate_message_list(question, history_message)
self.context['message_list'] = message_list
self.context['dialogue_type'] = dialogue_type
print(message_list)
image_urls = tti_model.generate_image(question, negative_prompt)
# 保存图片
file_urls = []

View File

@ -25,7 +25,6 @@ class XlsParseTableHandle(BaseParseTableHandle):
for sheet in sheets:
# 获取合并单元格的范围信息
merged_cells = sheet.merged_cells
print(merged_cells)
data = []
paragraphs = []
# 获取第一行作为标题行

View File

@ -87,7 +87,7 @@ class XlsxParseTableHandle(BaseParseTableHandle):
if len(image_dict) > 0:
save_image(image_dict.values())
except Exception as e:
print(f'{e}')
logging.getLogger("max_kb").error(f'Exception: {e}')
image_dict = {}
md_tables = ''
# 如果未指定 sheet_name则使用第一个工作表

View File

@ -160,7 +160,6 @@ class BaseService(object):
def _check(self):
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"{now} Check service status: {self.name} -> ", end='')
if self.process:
try:
self.process.wait(1) # 不wait子进程可能无法回收
@ -168,9 +167,9 @@ class BaseService(object):
pass
if self.is_running:
print(f'running at {self.pid}')
logging.debug(f"{now} Check service status: {self.name} -> running at {self.pid}")
else:
print(f'stopped at {self.pid}')
logging.debug(f"{now} Check service status: {self.name} -> stopped at {self.pid}")
def _restart(self):
if self.retry > self.max_retry:

View File

@ -217,7 +217,7 @@ class CeleryThreadTaskFileHandler(CeleryThreadingLoggerHandler):
self.thread_id_fd_mapper[thread_id] = f
def handle_task_end(self, task_id):
print('handle_task_end')
logging.getLogger('max_kb').info('handle_task_end')
ident_id = self.task_id_thread_id_mapper.get(task_id, '')
f = self.thread_id_fd_mapper.pop(ident_id, None)
if f and not f.closed:

View File

@ -65,7 +65,7 @@ def add_celery_logger_handler(sender=None, logger=None, loglevel=None, format=No
@task_revoked.connect
def on_task_revoked(request, terminated, signum, expired, **kwargs):
print('task_revoked', terminated)
logging.getLogger('max_kb').info('task_revoked', terminated)
@task_prerun.connect

View File

@ -62,7 +62,7 @@ def get_celery_status():
active_queue_worker = set([n.split('@')[0] for n in active_nodes if n])
# Celery Worker 数量: 2
if len(active_queue_worker) < 2:
print("Not all celery worker worked")
logging.getLogger('max_kb').info("Not all celery worker worked")
return False
else:
return True

View File

@ -6,6 +6,8 @@
@date2024/3/19 16:29
@desc:
"""
import logging
from django.core.mail.backends.smtp import EmailBackend
from django.db.models import QuerySet
from rest_framework import serializers
@ -45,7 +47,7 @@ class EmailSettingSerializer(serializers.Serializer):
self.data.get("email_use_ssl")
).open()
except Exception as e:
print(e)
logging.getLogger("max_kb").error(f'Exception: {e}')
raise AppApiException(1004, _('Email verification failed'))
def update_or_save(self):