add SUMMARIZATION_MESSAGES_TO_KEEP

This commit is contained in:
朱潮 2025-12-23 22:31:26 +08:00
parent 9162b4258d
commit 06102559ef
2 changed files with 6 additions and 4 deletions

View File

@ -13,7 +13,7 @@ from utils.fastapi_utils import detect_provider
from .guideline_middleware import GuidelineMiddleware from .guideline_middleware import GuidelineMiddleware
from .tool_output_length_middleware import ToolOutputLengthMiddleware from .tool_output_length_middleware import ToolOutputLengthMiddleware
from .tool_use_cleanup_middleware import ToolUseCleanupMiddleware from .tool_use_cleanup_middleware import ToolUseCleanupMiddleware
from utils.settings import SUMMARIZATION_MAX_TOKENS, TOOL_OUTPUT_MAX_LENGTH, MCP_HTTP_TIMEOUT, MCP_SSE_READ_TIMEOUT from utils.settings import SUMMARIZATION_MAX_TOKENS, SUMMARIZATION_MESSAGES_TO_KEEP, TOOL_OUTPUT_MAX_LENGTH, MCP_HTTP_TIMEOUT, MCP_SSE_READ_TIMEOUT
from agent.agent_config import AgentConfig from agent.agent_config import AgentConfig
from agent.prompt_loader import load_system_prompt_async, load_mcp_settings_async from agent.prompt_loader import load_system_prompt_async, load_mcp_settings_async
from agent.agent_memory_cache import get_memory_cache_manager from agent.agent_memory_cache import get_memory_cache_manager
@ -185,7 +185,7 @@ async def init_agent(config: AgentConfig):
summarization_middleware = SummarizationMiddleware( summarization_middleware = SummarizationMiddleware(
model=llm_instance, model=llm_instance,
max_tokens_before_summary=SUMMARIZATION_MAX_TOKENS, max_tokens_before_summary=SUMMARIZATION_MAX_TOKENS,
messages_to_keep=20, messages_to_keep=SUMMARIZATION_MESSAGES_TO_KEEP,
summary_prompt="请简洁地总结以上对话的要点,包括重要的用户信息、讨论过的话题和关键结论。" summary_prompt="请简洁地总结以上对话的要点,包括重要的用户信息、讨论过的话题和关键结论。"
) )
middleware.append(summarization_middleware) middleware.append(summarization_middleware)

View File

@ -3,15 +3,16 @@ import os
# LLM Token Settings # LLM Token Settings
MAX_CONTEXT_TOKENS = int(os.getenv("MAX_CONTEXT_TOKENS", 262144)) MAX_CONTEXT_TOKENS = int(os.getenv("MAX_CONTEXT_TOKENS", 262144))
MAX_OUTPUT_TOKENS = int(os.getenv("MAX_OUTPUT_TOKENS", 8000)) MAX_OUTPUT_TOKENS = int(os.getenv("MAX_OUTPUT_TOKENS", 8000))
# Summarization Settings
SUMMARIZATION_MAX_TOKENS = MAX_CONTEXT_TOKENS - MAX_OUTPUT_TOKENS - 1000 SUMMARIZATION_MAX_TOKENS = MAX_CONTEXT_TOKENS - MAX_OUTPUT_TOKENS - 1000
SUMMARIZATION_MESSAGES_TO_KEEP = int(os.getenv("SUMMARIZATION_MESSAGES_TO_KEEP", 20))
# Agent Cache Settings # Agent Cache Settings
AGENT_CACHE_MAX_SIZE = int(os.getenv("AGENT_CACHE_MAX_SIZE", 20)) AGENT_CACHE_MAX_SIZE = int(os.getenv("AGENT_CACHE_MAX_SIZE", 20))
AGENT_CACHE_TTL = int(os.getenv("AGENT_CACHE_TTL", 180)) AGENT_CACHE_TTL = int(os.getenv("AGENT_CACHE_TTL", 180))
AGENT_CACHE_AUTO_RENEW = os.getenv("AGENT_CACHE_AUTO_RENEW", "true") == "true" AGENT_CACHE_AUTO_RENEW = os.getenv("AGENT_CACHE_AUTO_RENEW", "true") == "true"
# API Settings # API Settings
BACKEND_HOST = os.getenv("BACKEND_HOST", "https://api-dev.gptbase.ai") BACKEND_HOST = os.getenv("BACKEND_HOST", "https://api-dev.gptbase.ai")
MASTERKEY = os.getenv("MASTERKEY", "master") MASTERKEY = os.getenv("MASTERKEY", "master")
@ -33,6 +34,7 @@ TOOL_OUTPUT_TRUNCATION_STRATEGY = os.getenv("TOOL_OUTPUT_TRUNCATION_STRATEGY", "
# THINKING ENABLE # THINKING ENABLE
DEFAULT_THINKING_ENABLE = os.getenv("DEFAULT_THINKING_ENABLE", "true") == "true" DEFAULT_THINKING_ENABLE = os.getenv("DEFAULT_THINKING_ENABLE", "true") == "true"
# MCP Tool Timeout Settings # MCP Tool Timeout Settings
MCP_HTTP_TIMEOUT = int(os.getenv("MCP_HTTP_TIMEOUT", 60)) # HTTP 请求超时(秒) MCP_HTTP_TIMEOUT = int(os.getenv("MCP_HTTP_TIMEOUT", 60)) # HTTP 请求超时(秒)
MCP_SSE_READ_TIMEOUT = int(os.getenv("MCP_SSE_READ_TIMEOUT", 300)) # SSE 读取超时(秒) MCP_SSE_READ_TIMEOUT = int(os.getenv("MCP_SSE_READ_TIMEOUT", 300)) # SSE 读取超时(秒)