refactor: migrate from Memori to Mem0 for long-term memory

Replace Memori with Mem0 for memory management:
- Delete memori_config.py, memori_manager.py, memori_middleware.py
- Add mem0_config.py, mem0_manager.py, mem0_middleware.py
- Update environment variables (MEMORI_* -> MEM0_*)
- Integrate Mem0 with LangGraph middleware
- Add sync connection pool for Mem0 in DBPoolManager
- Move checkpoint message prep to config creation

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
朱潮 2026-01-20 21:15:30 +08:00
parent 5c041cdebe
commit f694101747
23 changed files with 1606 additions and 1423 deletions

View File

@ -41,11 +41,13 @@ class AgentConfig:
logging_handler: Optional['LoggingCallbackHandler'] = None
# Memori 长期记忆配置
enable_memori: bool = False
memori_semantic_search_top_k: int = 5
memori_semantic_search_threshold: float = 0.7
memori_inject_to_system_prompt: bool = True
# Mem0 长期记忆配置
enable_memori: bool = False # 保留名称以兼容 API实际使用 Mem0
memori_semantic_search_top_k: int = 20
_mem0_context: Optional[str] = None # Mem0 召回的记忆上下文,供中间件间传递使用
# Checkpointer 会话历史
_session_history: Optional[List] = field(default_factory=list) # 从 checkpointer 读取的历史聊天记录
def to_dict(self) -> Dict[str, Any]:
"""转换为字典格式,用于传递给需要**kwargs的函数"""
@ -70,8 +72,6 @@ class AgentConfig:
'messages': self.messages,
'enable_memori': self.enable_memori,
'memori_semantic_search_top_k': self.memori_semantic_search_top_k,
'memori_semantic_search_threshold': self.memori_semantic_search_threshold,
'memori_inject_to_system_prompt': self.memori_inject_to_system_prompt,
}
def safe_print(self):
@ -82,16 +82,18 @@ class AgentConfig:
logger.info(f"config={json.dumps(safe_dict, ensure_ascii=False)}")
@classmethod
def from_v1_request(cls, request, api_key: str, project_dir: Optional[str] = None, generate_cfg: Optional[Dict] = None, messages: Optional[List] = None):
async def from_v1_request(cls, request, api_key: str, project_dir: Optional[str] = None, generate_cfg: Optional[Dict] = None, messages: Optional[List] = None):
"""从v1请求创建配置"""
# 延迟导入避免循环依赖
from .logging_handler import LoggingCallbackHandler
from utils.fastapi_utils import get_preamble_text
from utils.settings import (
MEMORI_ENABLED,
MEMORI_SEMANTIC_SEARCH_TOP_K,
MEMORI_SEMANTIC_SEARCH_THRESHOLD,
MEM0_ENABLED,
MEM0_SEMANTIC_SEARCH_TOP_K,
)
from .checkpoint_utils import prepare_checkpoint_message
from .checkpoint_manager import get_checkpointer_manager
if messages is None:
messages = []
@ -102,10 +104,10 @@ class AgentConfig:
preamble_text, system_prompt = get_preamble_text(request.language, request.system_prompt)
enable_thinking = request.enable_thinking and "<guidelines>" in request.system_prompt
# 从请求中获取 Memori 配置,如果没有则使用全局配置
# 从请求中获取 Mem0 配置,如果没有则使用全局配置
enable_memori = getattr(request, 'enable_memori', None)
if enable_memori is None:
enable_memori = MEMORI_ENABLED
enable_memori = MEM0_ENABLED
config = cls(
bot_id=request.bot_id,
@ -129,24 +131,36 @@ class AgentConfig:
preamble_text=preamble_text,
dataset_ids=request.dataset_ids,
enable_memori=enable_memori,
memori_semantic_search_top_k=getattr(request, 'memori_semantic_search_top_k', None) or MEMORI_SEMANTIC_SEARCH_TOP_K,
memori_semantic_search_threshold=getattr(request, 'memori_semantic_search_threshold', None) or MEMORI_SEMANTIC_SEARCH_THRESHOLD,
memori_semantic_search_top_k=getattr(request, 'memori_semantic_search_top_k', None) or MEM0_SEMANTIC_SEARCH_TOP_K,
)
# 在创建 config 时尽早准备 checkpoint 消息
if config.session_id:
try:
manager = get_checkpointer_manager()
checkpointer = manager.checkpointer
if checkpointer:
await prepare_checkpoint_message(config, checkpointer)
except Exception as e:
logger.warning(f"Failed to load checkpointer: {e}")
config.safe_print()
return config
@classmethod
def from_v2_request(cls, request, bot_config: Dict, project_dir: Optional[str] = None, messages: Optional[List] = None):
async def from_v2_request(cls, request, bot_config: Dict, project_dir: Optional[str] = None, messages: Optional[List] = None):
"""从v2请求创建配置"""
# 延迟导入避免循环依赖
from .logging_handler import LoggingCallbackHandler
from utils.fastapi_utils import get_preamble_text
from utils.settings import (
MEMORI_ENABLED,
MEMORI_SEMANTIC_SEARCH_TOP_K,
MEMORI_SEMANTIC_SEARCH_THRESHOLD,
MEM0_ENABLED,
MEM0_SEMANTIC_SEARCH_TOP_K,
)
from .checkpoint_utils import prepare_checkpoint_message
from .checkpoint_manager import get_checkpointer_manager
if messages is None:
messages = []
language = request.language or bot_config.get("language", "zh")
@ -156,10 +170,10 @@ class AgentConfig:
robot_type = "deep_agent"
enable_thinking = request.enable_thinking and "<guidelines>" in bot_config.get("system_prompt")
# 从请求或后端配置中获取 Memori 配置
# 从请求或后端配置中获取 Mem0 配置
enable_memori = getattr(request, 'enable_memori', None)
if enable_memori is None:
enable_memori = bot_config.get("enable_memori", MEMORI_ENABLED)
enable_memori = bot_config.get("enable_memori", MEM0_ENABLED)
config = cls(
@ -184,9 +198,19 @@ class AgentConfig:
preamble_text=preamble_text,
dataset_ids=bot_config.get("dataset_ids", []), # 从后端配置获取dataset_ids
enable_memori=enable_memori,
memori_semantic_search_top_k=bot_config.get("memori_semantic_search_top_k", MEMORI_SEMANTIC_SEARCH_TOP_K),
memori_semantic_search_threshold=bot_config.get("memori_semantic_search_threshold", MEMORI_SEMANTIC_SEARCH_THRESHOLD),
memori_semantic_search_top_k=bot_config.get("memori_semantic_search_top_k", MEM0_SEMANTIC_SEARCH_TOP_K),
)
# 在创建 config 时尽早准备 checkpoint 消息
if config.session_id:
try:
manager = get_checkpointer_manager()
checkpointer = manager.checkpointer
if checkpointer:
await prepare_checkpoint_message(config, checkpointer)
except Exception as e:
logger.warning(f"Failed to load checkpointer: {e}")
config.safe_print()
return config

View File

@ -7,101 +7,89 @@ from langgraph.checkpoint.memory import MemorySaver
logger = logging.getLogger('app')
async def check_checkpoint_history(checkpointer: MemorySaver, thread_id: str) -> bool:
async def get_checkpoint_history(checkpointer: MemorySaver, thread_id: str) -> List:
"""
检查指定的 thread_id checkpointer 中是否已有历史记录
checkpointer 获取指定 thread_id 的历史聊天记录
Args:
checkpointer: MemorySaver 实例
thread_id: 线程ID通常是 session_id
Returns:
bool: True 表示有历史记录False 表示没有
List[Dict]: 历史消息列表如果没有历史记录或出错则返回空列表
"""
if not checkpointer or not thread_id:
logger.debug(f"No checkpointer or thread_id: checkpointer={bool(checkpointer)}, thread_id={thread_id}")
return False
try:
# 获取配置
config = {"configurable": {"thread_id": thread_id}}
# 调试信息:检查 checkpointer 类型
logger.debug(f"Checkpointer type: {type(checkpointer)}")
logger.debug(f"Checkpointer dir: {[attr for attr in dir(checkpointer) if not attr.startswith('_')]}")
latest_checkpoint = await checkpointer.aget_tuple(config)
logger.debug(f"aget_tuple result: {latest_checkpoint}")
if latest_checkpoint is not None:
logger.info(f"Found latest checkpoint for thread_id: {thread_id}")
# 解构 checkpoint tuple
return True
except Exception as e:
import traceback
logger.error(f"Error checking checkpoint history for thread_id {thread_id}: {e}")
logger.error(f"Full traceback: {traceback.format_exc()}")
# 出错时保守处理,返回 False
return False
def prepare_messages_for_agent(
messages: List[Dict[str, Any]],
has_history: bool
) -> List[Dict[str, Any]]:
"""
根据是否有历史记录来准备要发送给 agent 的消息
Args:
messages: 完整的消息列表
has_history: 是否已有历史记录
Returns:
List[Dict]: 要发送给 agent 的消息列表
"""
if not messages:
return []
# 如果有历史记录,只发送最后一条用户消息
try:
config = {"configurable": {"thread_id": thread_id}}
checkpoint_tuple = await checkpointer.aget_tuple(config)
if checkpoint_tuple is None or checkpoint_tuple.checkpoint is None:
logger.debug(f"No checkpoint found for thread_id: {thread_id}")
return []
# 从 checkpoint 中提取消息历史
checkpoint_data = checkpoint_tuple.checkpoint
# LangGraph checkpoint 中的消息通常在 channel_values['messages'] 中
if "channel_values" not in checkpoint_data:
logger.debug(f"No channel_values in checkpoint for thread_id: {thread_id}")
return []
channel_values = checkpoint_data["channel_values"]
if isinstance(channel_values, dict) and "messages" in channel_values:
history_messages = channel_values["messages"]
converted = history_messages
logger.info(f"Loaded {len(converted)} messages from checkpoint for thread_id: {thread_id}")
return converted
elif isinstance(channel_values, list):
# 有些情况下 channel_values 直接是消息列表
converted = channel_values
logger.info(f"Loaded {len(converted)} messages from checkpoint for thread_id: {thread_id}")
return converted
else:
logger.debug(f"Unexpected channel_values format: {type(channel_values)}")
return []
except Exception as e:
import traceback
logger.error(f"Error getting checkpoint history for thread_id {thread_id}: {e}")
logger.error(f"Full traceback: {traceback.format_exc()}")
return []
async def prepare_checkpoint_message(config, checkpointer):
"""
准备 checkpoint 相关的消息
1. 获取并过滤历史记录去除包含双引号/think的消息
2. 根据是否有历史决定发送哪些消息
"""
if not config.session_id or not checkpointer or len(config.messages) == 0:
logger.debug("No session_id/checkpointer or empty messages, skipping checkpoint")
return
# 获取历史记录
history = await get_checkpoint_history(checkpointer, config.session_id)
has_history = len(history) > 0
# 处理历史记录过滤并保留最近20条
if has_history:
# 找到最后一条用户消息
for msg in reversed(messages):
if msg.get('role') == 'user':
logger.info(f"Has history, sending only last user message: {msg.get('content', '')[:50]}...")
return [msg]
filtered_history = [
h for h in history
if getattr(h, "type", None) in ("human", "ai")
and "<think>" not in str(getattr(h, "content", "")).lower()
]
logger.info(f"Filtered {len(filtered_history)} human/ai messages from history")
config._session_history = filtered_history[-20:]
# 如果没有用户消息(理论上不应该发生),返回空列表
logger.warning("No user message found in messages")
return messages
# 如果没有历史记录,发送所有消息
logger.info(f"No history, sending all {len(messages)} messages")
return messages
def update_agent_config_for_checkpoint(
config_messages: List[Dict[str, Any]],
has_history: bool
) -> List[Dict[str, Any]]:
"""
更新 AgentConfig 中的 messages根据是否有历史记录决定发送哪些消息
这个函数可以在调用 agent 之前使用避免重复处理消息历史
Args:
config_messages: AgentConfig 中的原始消息列表
has_history: 是否已有历史记录
Returns:
List[Dict]: 更新后的消息列表
"""
return prepare_messages_for_agent(config_messages, has_history)
async def prepare_checkpoint_message(config,checkpointer):
# 如果有 checkpointer检查是否有历史记录
if config.session_id and checkpointer and len(config.messages) > 0:
has_history = await check_checkpoint_history(checkpointer, config.session_id)
config.messages = prepare_messages_for_agent(config.messages, has_history)
logger.info(f"Session {config.session_id}: has_history={has_history}, sending {len(config.messages)} messages")
# 处理要发送的消息:有历史只发最后一条用户消息,否则全发
if has_history:
last_user_msg = next((m for m in reversed(config.messages) if m.get('role') == 'user'), None)
if last_user_msg:
config.messages = [last_user_msg]
logger.info(f"Has history, sending last user message: {last_user_msg.get('content', '')[:50]}...")
else:
logger.debug(f"No session_id provided, skipping checkpoint check")
logger.info(f"No history, sending all {len(config.messages)} messages")

View File

@ -1,12 +1,13 @@
"""
全局 PostgreSQL 连接池管理器
checkpointchat_history 共享使用
checkpointchat_historymem0 共享使用
"""
import asyncio
import logging
from typing import Optional
from psycopg_pool import AsyncConnectionPool
from psycopg2 import pool as psycopg2_pool
from utils.settings import (
CHECKPOINT_DB_URL,
@ -24,14 +25,16 @@ class DBPoolManager:
全局 PostgreSQL 连接池管理器
主要功能
1. 使用 psycopg_pool.AsyncConnectionPool 管理连接
2. CheckpointerManagerChatHistoryManager 共享
3. 自动清理旧的 checkpoint 数据
4. 优雅关闭机制
1. 使用 psycopg_pool.AsyncConnectionPool 管理异步连接
2. 使用 psycopg2.pool.SimpleConnectionPool 管理同步连接 Mem0 使用
3. CheckpointerManagerChatHistoryManagerMem0Manager 共享
4. 自动清理旧的 checkpoint 数据
5. 优雅关闭机制
"""
def __init__(self):
self._pool: Optional[AsyncConnectionPool] = None
self._sync_pool: Optional[psycopg2_pool.SimpleConnectionPool] = None # 同步连接池
self._initialized = False
self._closed = False
# 清理调度任务
@ -49,26 +52,58 @@ class DBPoolManager:
)
try:
# 创建 psycopg 连接池
# 1. 创建异步 psycopg 连接池
self._pool = AsyncConnectionPool(
CHECKPOINT_DB_URL,
min_size=1,
max_size=CHECKPOINT_POOL_SIZE,
open=False,
)
# 打开连接池
await self._pool.open()
# 2. 创建同步 psycopg2 连接池(供 Mem0 使用)
self._sync_pool = self._create_sync_pool(CHECKPOINT_DB_URL, CHECKPOINT_POOL_SIZE)
self._initialized = True
logger.info("PostgreSQL connection pool initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize PostgreSQL connection pool: {e}")
raise
def _create_sync_pool(self, db_url: str, pool_size: int) -> psycopg2_pool.SimpleConnectionPool:
"""创建同步连接池(供 Mem0 使用)"""
# 解析连接 URL: postgresql://user:password@host:port/database
url_parts = db_url.replace("postgresql://", "").split("/")
conn_part = url_parts[0] if len(url_parts) > 1 else ""
dbname = url_parts[1] if len(url_parts) > 1 else "postgres"
if "@" in conn_part:
auth_part, host_part = conn_part.split("@")
user, password = auth_part.split(":") if ":" in auth_part else (auth_part, "")
else:
user = ""
password = ""
host_part = conn_part
if ":" in host_part:
host, port = host_part.split(":")
port = int(port)
else:
host = host_part
port = 5432
return psycopg2_pool.SimpleConnectionPool(
1, pool_size,
user=user,
password=password,
host=host,
port=port,
database=dbname
)
@property
def pool(self) -> AsyncConnectionPool:
"""获取连接池"""
"""获取异步连接池"""
if self._closed:
raise RuntimeError("DBPoolManager is closed")
@ -77,6 +112,20 @@ class DBPoolManager:
return self._pool
@property
def sync_pool(self) -> psycopg2_pool.SimpleConnectionPool:
"""获取同步连接池(供 Mem0 使用)"""
if self._closed:
raise RuntimeError("DBPoolManager is closed")
if not self._initialized:
raise RuntimeError("DBPoolManager not initialized, call initialize() first")
if self._sync_pool is None:
raise RuntimeError("Sync pool not available")
return self._sync_pool
async def close(self) -> None:
"""关闭连接池"""
if self._closed:
@ -93,10 +142,16 @@ class DBPoolManager:
logger.info("Closing DBPoolManager...")
# 关闭异步连接池
if self._pool is not None:
await self._pool.close()
self._pool = None
# 关闭同步连接池
if self._sync_pool is not None:
self._sync_pool.closeall()
self._sync_pool = None
self._closed = True
self._initialized = False
logger.info("DBPoolManager closed")

View File

@ -25,19 +25,13 @@ from utils.settings import (
TOOL_OUTPUT_MAX_LENGTH,
MCP_HTTP_TIMEOUT,
MCP_SSE_READ_TIMEOUT,
MEMORI_ENABLED,
MEMORI_API_KEY,
MEMORI_SEMANTIC_SEARCH_TOP_K,
MEMORI_SEMANTIC_SEARCH_THRESHOLD,
MEMORI_INJECT_TO_SYSTEM_PROMPT,
)
from agent.agent_config import AgentConfig
from .memori_manager import get_memori_manager
from .memori_middleware import create_memori_middleware
from .memori_config import MemoriConfig
from .mem0_manager import get_mem0_manager
from .mem0_middleware import create_mem0_middleware
from .mem0_config import Mem0Config
from agent.prompt_loader import load_system_prompt_async, load_mcp_settings_async
from agent.agent_memory_cache import get_memory_cache_manager
from .checkpoint_utils import prepare_checkpoint_message
from .checkpoint_manager import get_checkpointer_manager
from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver
from langgraph.checkpoint.memory import InMemorySaver
@ -180,13 +174,11 @@ async def init_agent(config: AgentConfig):
checkpointer = None
create_start = time.time()
# 从连接池获取 checkpointer需要在 Memori 初始化之前完成
# 从连接池获取 checkpointerprepare_checkpoint_message 已在 from_v1/from_v2_request 中调用
if config.session_id:
try:
manager = get_checkpointer_manager()
checkpointer = manager.checkpointer
if checkpointer:
await prepare_checkpoint_message(config, checkpointer)
except Exception as e:
logger.warning(f"Failed to load checkpointer: {e}")
@ -205,33 +197,34 @@ async def init_agent(config: AgentConfig):
)
middleware.append(tool_output_middleware)
# 添加 Memori 记忆中间件(如果启用)
# 添加 Mem0 记忆中间件(如果启用)
if config.enable_memori:
try:
# 确保有 user_identifier
if not config.user_identifier:
logger.warning("Memori enabled but user_identifier is missing, skipping Memori")
logger.warning("Mem0 enabled but user_identifier is missing, skipping Mem0")
else:
# 获取全局 MemoriManager已在 fastapi_app.py 中初始化)
memori_manager = get_memori_manager()
# 获取全局 Mem0Manager已在 fastapi_app.py 中初始化)
mem0_manager = get_mem0_manager()
# 创建 Memori 中间件
memori_middleware = create_memori_middleware(
# 创建 Mem0 中间件,传入现有的 llm_instance 和 config
mem0_middleware = create_mem0_middleware(
bot_id=config.bot_id,
user_identifier=config.user_identifier,
session_id=config.session_id or "default",
agent_config=config, # 传入 AgentConfig 用于中间件间传递数据
enabled=config.enable_memori,
semantic_search_top_k=config.memori_semantic_search_top_k,
semantic_search_threshold=config.memori_semantic_search_threshold,
memori_manager=memori_manager,
mem0_manager=mem0_manager,
llm_instance=llm_instance, # 传入现有 LLM 实例
)
if memori_middleware:
middleware.append(memori_middleware)
logger.info("Memori middleware added to agent")
if mem0_middleware:
middleware.append(mem0_middleware)
logger.info("Mem0 middleware added to agent")
except Exception as e:
logger.error(f"Failed to create Memori middleware: {e}, continuing without Memori")
logger.error(f"Failed to create Mem0 middleware: {e}, continuing without Mem0")
if config.robot_type == "deep_agent":
@ -257,8 +250,8 @@ async def init_agent(config: AgentConfig):
if config.session_id:
summarization_middleware = SummarizationMiddleware(
model=llm_instance,
max_tokens_before_summary=SUMMARIZATION_MAX_TOKENS,
messages_to_keep=SUMMARIZATION_MESSAGES_TO_KEEP,
trigger=('tokens', SUMMARIZATION_MAX_TOKENS),
keep=('messages', SUMMARIZATION_MESSAGES_TO_KEEP),
summary_prompt="请简洁地总结以上对话的要点,包括重要的用户信息、讨论过的话题和关键结论。"
)
middleware.append(summarization_middleware)

View File

@ -20,6 +20,7 @@ logger = logging.getLogger('app')
class GuidelineMiddleware(AgentMiddleware):
def __init__(self, model:BaseChatModel, config:AgentConfig, prompt: str):
self.model = model
self.config = config # 保存完整 config用于访问 _mem0_context
self.bot_id = config.bot_id
processed_system_prompt, guidelines, tool_description, scenarios, terms_list = extract_block_from_system_prompt(prompt)
@ -34,7 +35,7 @@ class GuidelineMiddleware(AgentMiddleware):
self.robot_type = config.robot_type
self.terms_list = terms_list
self.messages = config._origin_messages
self.messages = config.messages
if self.robot_type == "general_agent":
if not self.guidelines:
@ -52,13 +53,25 @@ Action: Provide concise, friendly, and personified natural responses.
- **Knowledge Base Retrieval**: For knowledge queries/other inquiries, prioritize searching the knowledge base rag_retrieve-rag_retrieve
"""
def get_guideline_prompt(self, messages: list[dict[str, Any]]) -> str:
## 处理terms
terms_analysis = self.get_term_analysis(messages)
def get_guideline_prompt(self, config: AgentConfig) -> str:
"""生成 guideline 提示词
Args:
config: AgentConfig 对象包含 _session_history _mem0_context
Returns:
str: 生成的 guideline 提示词
"""
messages = convert_to_openai_messages(config._session_history)
memory_text = config._mem0_context
# 处理terms修改 self.processed_system_prompt
self.get_term_analysis(messages)
guideline_prompt = ""
if self.guidelines:
chat_history = format_messages_to_chat_history(messages)
guideline_prompt = load_guideline_prompt(chat_history, self.guidelines, self.tool_description, self.scenarios, terms_analysis, self.language, self.user_identifier)
guideline_prompt = load_guideline_prompt(chat_history, memory_text, self.guidelines, self.tool_description, self.scenarios, self.language, self.user_identifier)
return guideline_prompt
@ -95,8 +108,7 @@ Action: Provide concise, friendly, and personified natural responses.
if not self.guidelines:
return None
guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(self.messages))
guideline_prompt = self.get_guideline_prompt(self.config)
# 准备完整的消息列表
messages = state['messages'].copy()
@ -115,20 +127,17 @@ Action: Provide concise, friendly, and personified natural responses.
response.content = f"<think>{response.content}</think>"
# 将响应添加到原始消息列表
final_messages = state['messages'] + [response]
return {
"messages": final_messages
}
state['messages'] = state['messages'] + [response]
return state
async def abefore_agent(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
if not self.guidelines:
return None
guideline_prompt = self.get_guideline_prompt(convert_to_openai_messages(self.messages))
# 准备完整的消息列表
# 准备完整的消息列表
messages = state['messages'].copy()
guideline_prompt = self.get_guideline_prompt(self.config)
# 将guideline_prompt作为系统消息添加到消息列表
system_message = SystemMessage(content=guideline_prompt)
messages = [system_message,messages[-1]]
@ -142,10 +151,8 @@ Action: Provide concise, friendly, and personified natural responses.
response.content = f"<think>{response.content}</think>"
# 将响应添加到原始消息列表
final_messages = state['messages'] + [response]
return {
"messages": final_messages
}
state['messages'] = state['messages'] + [response]
return state
def wrap_model_call(
self,

View File

@ -1,29 +1,26 @@
"""
Memori 配置数据类
用于管理 Memori 长期记忆系统的配置参数
Mem0 配置数据类
用于管理 Mem0 长期记忆系统的配置参数
"""
from dataclasses import dataclass
from typing import Optional
from typing import TYPE_CHECKING, Optional
# 避免循环导入
if TYPE_CHECKING:
from langchain_core.language_models import BaseChatModel
@dataclass
class MemoriConfig:
"""Memori 长期记忆配置类"""
class Mem0Config:
"""Mem0 长期记忆配置类"""
# 功能开关
enabled: bool = False
# API 配置
api_key: Optional[str] = None
# 语义搜索配置
semantic_search_top_k: int = 5
semantic_search_threshold: float = 0.7
semantic_search_embeddings_limit: int = 1000
semantic_search_top_k: int = 20
# 记忆注入配置
inject_memory_to_system_prompt: bool = True
memory_prompt_template: str = (
"\n\n=== 相关记忆 ===\n"
"以下是从历史对话中检索到的相关信息,可以帮助你更好地回答用户问题:\n"
@ -31,27 +28,26 @@ class MemoriConfig:
"==================\n"
)
# 增强配置
augmentation_enabled: bool = True
augmentation_wait_timeout: Optional[float] = None # None 表示后台异步执行
# 多租户配置
entity_id: Optional[str] = None # 用户标识
process_id: Optional[str] = None # Bot 标识
user_id: Optional[str] = None # 用户标识
agent_id: Optional[str] = None # Bot 标识
session_id: Optional[str] = None # 会话标识
# LLM 实例(用于 Mem0 的记忆提取和增强)
llm_instance: Optional["BaseChatModel"] = None # LangChain LLM 实例
def get_attribution_tuple(self) -> tuple[str, str]:
"""获取 attribution 所需的元组 (entity_id, process_id)
"""获取 attribution 所需的元组 (user_id, agent_id)
Returns:
(entity_id, process_id) 元组
(user_id, agent_id) 元组
"""
if not self.entity_id or not self.process_id:
raise ValueError("entity_id and process_id are required for attribution")
return (self.entity_id, self.process_id)
if not self.user_id or not self.agent_id:
raise ValueError("user_id and agent_id are required for attribution")
return (self.user_id, self.agent_id)
def is_enabled(self) -> bool:
"""检查 Memori 功能是否启用
"""检查 Mem0 功能是否启用
Returns:
bool: 是否启用
@ -73,15 +69,15 @@ class MemoriConfig:
memory_text = "\n".join(f"- {m}" for m in memories)
return self.memory_prompt_template.format(memories=memory_text)
def with_session(self, session_id: str) -> "MemoriConfig":
def with_session(self, session_id: str) -> "Mem0Config":
"""创建带有新 session_id 的配置副本
Args:
session_id: 新的会话 ID
Returns:
新的 MemoriConfig 实例
新的 Mem0Config 实例
"""
new_config = MemoriConfig(**self.__dict__)
new_config = Mem0Config(**self.__dict__)
new_config.session_id = session_id
return new_config

441
agent/mem0_manager.py Normal file
View File

@ -0,0 +1,441 @@
"""
Mem0 连接和实例管理器
负责管理 Mem0 客户端实例的创建缓存和生命周期
"""
import logging
from typing import Any, Dict, List, Optional
import json_repair
from psycopg2 import pool
from .mem0_config import Mem0Config
from utils.settings import MEM0_EMBEDDING_MODEL
logger = logging.getLogger("app")
# Monkey patch: 使用 json_repair 替换 mem0 的 remove_code_blocks
def _remove_code_blocks_with_repair(content: str) -> str:
"""
使用 json_repair 替换 mem0 remove_code_blocks 函数
json_repair.loads 会自动处理
- 移除代码块标记```json, ```
- 修复损坏的 JSON如尾随逗号注释单引号等
"""
import re
content_stripped = content.strip()
try:
# json_repair.loads 会自动去除代码块并修复 JSON
result = json_repair.loads(content_stripped)
if isinstance(result, (dict, list)):
import json
return json.dumps(result, ensure_ascii=False)
# 如果返回空字符串(非 JSON 输入),回退到原内容
if result == "" and content_stripped != "":
# 尝试简单的代码块去除(降级处理)
pattern = r"^```[a-zA-Z0-9]*\n([\s\S]*?)\n```$"
match = re.match(pattern, content_stripped)
if match:
return match.group(1).strip()
return content_stripped
return str(result)
except Exception:
# 如果解析失败,尝试简单的代码块去除(降级处理)
pattern = r"^```[a-zA-Z0-9]*\n([\s\S]*?)\n```$"
match = re.match(pattern, content_stripped)
if match:
return match.group(1).strip()
return content_stripped
# 执行 monkey patch在 mem0 导入之前或之后)
try:
import sys
import mem0.memory.utils as mem0_utils
mem0_utils.remove_code_blocks = _remove_code_blocks_with_repair
# 如果 mem0.memory.main 已经导入,也要 patch 它的本地引用
if 'mem0.memory.main' in sys.modules:
import mem0.memory.main
mem0.memory.main.remove_code_blocks = _remove_code_blocks_with_repair
logger.info("Successfully patched mem0.memory.main.remove_code_blocks with json_repair")
else:
logger.info("Successfully patched mem0.memory.utils.remove_code_blocks with json_repair")
except ImportError:
# mem0 还未导入patch 将在首次导入时生效
pass
except Exception as e:
logger.warning(f"Failed to patch mem0 remove_code_blocks: {e}")
class Mem0Manager:
"""
Mem0 连接和实例管理器
主要功能
1. 管理 Mem0 实例的创建和缓存
2. 支持多租户隔离user_id + agent_id
3. 使用共享的同步连接池 DBPoolManager 提供
4. 提供记忆召回和存储接口
"""
def __init__(
self,
sync_pool: Optional[pool.SimpleConnectionPool] = None,
):
"""初始化 Mem0Manager
Args:
sync_pool: PostgreSQL 同步连接池 DBPoolManager 共享
"""
self._sync_pool = sync_pool
# 缓存 Mem0 实例: key = f"{user_id}:{agent_id}"
self._instances: Dict[str, Any] = {}
self._initialized = False
async def initialize(self) -> None:
"""初始化 Mem0Manager
创建数据库表结构如果不存在
"""
if self._initialized:
return
logger.info("Initializing Mem0Manager...")
try:
# Mem0 会自动创建表结构,这里只需验证连接
if self._sync_pool:
logger.info("Mem0Manager initialized successfully")
else:
logger.warning("No database configuration provided for Mem0")
self._initialized = True
except Exception as e:
logger.error(f"Failed to initialize Mem0Manager: {e}")
# 不抛出异常,允许系统在没有 Mem0 的情况下运行
def _get_connection_pool(self) -> Optional[pool.SimpleConnectionPool]:
"""获取同步数据库连接池Mem0 需要)
Returns:
psycopg2.pool 连接池
"""
return self._sync_pool
async def get_mem0(
self,
user_id: str,
agent_id: str,
session_id: str,
config: Optional[Mem0Config] = None,
) -> Any:
"""获取或创建 Mem0 实例
Args:
user_id: 用户 ID对应 entity_id
agent_id: Agent/Bot ID对应 process_id
session_id: 会话 ID
config: Mem0 配置
Returns:
Mem0 实例
"""
# 缓存键包含 LLM 实例 ID以确保不同 LLM 使用不同实例
llm_suffix = ""
if config and config.llm_instance is not None:
llm_suffix = f":{id(config.llm_instance)}"
cache_key = f"{user_id}:{agent_id}{llm_suffix}"
# 检查缓存
if cache_key in self._instances:
return self._instances[cache_key]
# 创建新实例
mem0_instance = await self._create_mem0_instance(
user_id=user_id,
agent_id=agent_id,
session_id=session_id,
config=config,
)
# 缓存实例
self._instances[cache_key] = mem0_instance
return mem0_instance
async def _create_mem0_instance(
self,
user_id: str,
agent_id: str,
session_id: str,
config: Optional[Mem0Config] = None,
) -> Any:
"""创建新的 Mem0 实例
Args:
user_id: 用户 ID
agent_id: Agent/Bot ID
session_id: 会话 ID
config: Mem0 配置包含 LLM 实例
Returns:
Mem0 Memory 实例
"""
try:
from mem0 import Memory
except ImportError:
logger.error("mem0 package not installed")
raise RuntimeError("mem0 package is required but not installed")
# 获取同步连接池
connection_pool = self._get_connection_pool()
if not connection_pool:
raise ValueError("Database connection pool not available")
# 配置 Mem0 使用 Pgvector
config_dict = {
"vector_store": {
"provider": "pgvector",
"config": {
"connection_pool": connection_pool,
"collection_name": f"mem0_{agent_id}".replace("-", "_")[:50], # 按 agent_id 隔离
"embedding_model_dims": 384, # paraphrase-multilingual-MiniLM-L12-v2 的维度
}
},
"embedder": {
"provider": "huggingface",
"config": {
"model": MEM0_EMBEDDING_MODEL,
"embedding_dims":384
}
}
}
# 添加 LangChain LLM 配置(如果提供了)
if config and config.llm_instance is not None:
config_dict["llm"] = {
"provider": "langchain",
"config": {"model": config.llm_instance}
}
logger.info(
f"Configured LangChain LLM for Mem0: {type(config.llm_instance).__name__}"
)
# 创建 Mem0 实例
mem = Memory.from_config(config_dict)
logger.info(
f"Created Mem0 instance: user={user_id}, agent={agent_id}"
)
return mem
async def recall_memories(
self,
query: str,
user_id: str,
agent_id: str,
config: Optional[Mem0Config] = None,
) -> List[Dict[str, Any]]:
"""召回相关记忆(用户级别,跨会话共享)
Args:
query: 查询文本
user_id: 用户 ID
agent_id: Agent/Bot ID
config: Mem0 配置
Returns:
记忆列表每个记忆包含 content, similarity 等字段
"""
try:
mem = await self.get_mem0(user_id, agent_id, "default", config)
# 调用 search 进行语义搜索(使用 agent_id 参数过滤)
limit = config.semantic_search_top_k if config else 20
results = mem.search(
query=query,
limit=limit,
user_id=user_id,
agent_id=agent_id,
)
# 转换为统一格式
memories = []
for result in results["results"]:
# Mem0 返回结果可能是字符串或字典
content = result.get("memory", "")
score = result.get("score", 0.0)
result_metadata = result.get("metadata", {})
memory = {
"content": content,
"similarity": score,
"metadata": result_metadata,
"fact_type": result_metadata.get("category", "fact"),
}
memories.append(memory)
logger.info(f"Recalled {len(memories)} memories for user={user_id}, query: {query[:50]}...")
return memories
except Exception as e:
logger.error(f"Failed to recall memories: {e}")
return []
async def add_memory(
self,
text: str,
user_id: str,
agent_id: str,
metadata: Optional[Dict[str, Any]] = None,
config: Optional[Mem0Config] = None,
) -> Dict[str, Any]:
"""添加新记忆(用户级别,跨会话共享)
Args:
text: 记忆文本
user_id: 用户 ID
agent_id: Agent/Bot ID
metadata: 额外的元数据
config: Mem0 配置包含 LLM 实例用于记忆提取
Returns:
添加的记忆结果
"""
try:
mem = await self.get_mem0(user_id, agent_id, "default", config)
# 添加记忆(使用 agent_id 参数)
result = mem.add(
text,
user_id=user_id,
agent_id=agent_id,
metadata=metadata or {}
)
logger.info(f"Added memory for user={user_id}, agent={agent_id}: {text[:50]}...")
return result
except Exception as e:
logger.error(f"Failed to add memory: {e}")
return {}
async def get_all_memories(
self,
user_id: str,
agent_id: str,
) -> List[Dict[str, Any]]:
"""获取用户的所有记忆(用户级别)
Args:
user_id: 用户 ID
agent_id: Agent/Bot ID
Returns:
记忆列表
"""
try:
mem = await self.get_mem0(user_id, agent_id, "default")
# 获取所有记忆
memories = mem.get_all(user_id=user_id)
# 过滤 agent_id
filtered_memories = [
m for m in memories
if m.get("metadata", {}).get("agent_id") == agent_id
]
return filtered_memories
except Exception as e:
logger.error(f"Failed to get all memories: {e}")
return []
def clear_cache(self, user_id: Optional[str] = None, agent_id: Optional[str] = None) -> None:
"""清除缓存的 Mem0 实例
Args:
user_id: 用户 ID如果为 None清除所有
agent_id: Agent ID如果为 None清除所有
"""
if user_id is None and agent_id is None:
self._instances.clear()
logger.info("Cleared all Mem0 instances from cache")
else:
keys_to_remove = []
for key in self._instances:
# 新格式: "user_id:agent_id:llm_model_name" 或 "user_id:agent_id"
parts = key.split(":")
if len(parts) >= 2:
u_id = parts[0]
a_id = parts[1]
if user_id and u_id != user_id:
continue
if agent_id and a_id != agent_id:
continue
keys_to_remove.append(key)
for key in keys_to_remove:
del self._instances[key]
logger.info(f"Cleared {len(keys_to_remove)} Mem0 instances from cache")
async def close(self) -> None:
"""关闭管理器并清理资源"""
logger.info("Closing Mem0Manager...")
# 清理缓存的实例
self._instances.clear()
# 注意:不关闭共享的同步连接池(由 DBPoolManager 管理)
self._initialized = False
logger.info("Mem0Manager closed")
# 全局单例
_global_manager: Optional[Mem0Manager] = None
def get_mem0_manager() -> Mem0Manager:
"""获取全局 Mem0Manager 单例
Returns:
Mem0Manager 实例
"""
global _global_manager
if _global_manager is None:
_global_manager = Mem0Manager()
return _global_manager
async def init_global_mem0(
sync_pool: pool.SimpleConnectionPool,
) -> Mem0Manager:
"""初始化全局 Mem0Manager
Args:
sync_pool: PostgreSQL 同步连接池 DBPoolManager.sync_pool 获取
Returns:
Mem0Manager 实例
"""
manager = get_mem0_manager()
manager._sync_pool = sync_pool
await manager.initialize()
return manager
async def close_global_mem0() -> None:
"""关闭全局 Mem0Manager"""
global _global_manager
if _global_manager is not None:
await _global_manager.close()

381
agent/mem0_middleware.py Normal file
View File

@ -0,0 +1,381 @@
"""
Mem0 Agent 中间件
实现记忆召回和存储的 AgentMiddleware
"""
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from langchain.agents.middleware import AgentMiddleware, AgentState, ModelRequest
from langgraph.runtime import Runtime
from .mem0_config import Mem0Config
from .mem0_manager import Mem0Manager, get_mem0_manager
logger = logging.getLogger("app")
# 避免循环导入
if TYPE_CHECKING:
from langchain_core.language_models import BaseChatModel
class Mem0Middleware(AgentMiddleware):
"""
Mem0 记忆中间件
功能
1. before_agent: 召回相关记忆并注入到上下文
2. after_agent: 后台异步提取和存储新记忆
"""
def __init__(
self,
mem0_manager: Mem0Manager,
config: Mem0Config,
agent_config: "AgentConfig",
):
"""初始化 Mem0Middleware
Args:
mem0_manager: Mem0Manager 实例
config: Mem0Config 配置
agent_config: AgentConfig 实例用于中间件间传递数据
"""
self.mem0_manager = mem0_manager
self.config = config
self.agent_config = agent_config
def _extract_user_query(self, state: AgentState) -> str:
"""从状态中提取用户查询(最后一条 HumanMessage
Args:
state: Agent 状态
Returns:
用户查询文本
"""
from langchain_core.messages import HumanMessage
messages = state.get("messages", [])
if not messages:
return ""
# 查找最后一条 HumanMessage
for msg in reversed(messages):
if isinstance(msg, HumanMessage):
return str(msg.content) if msg.content else ""
return ""
def _extract_agent_response(self, state: AgentState) -> str:
"""从状态中提取 Agent 响应(最后一条 AIMessage
Args:
state: Agent 状态
Returns:
Agent 响应文本
"""
from langchain_core.messages import AIMessage
messages = state.get("messages", [])
if not messages:
return ""
# 查找最后一条 AIMessage
for msg in reversed(messages):
if isinstance(msg, AIMessage):
return str(msg.content) if msg.content else ""
return ""
def _format_memories(self, memories: List[Dict[str, Any]]) -> str:
"""格式化记忆列表为文本
Args:
memories: 记忆列表
Returns:
格式化的记忆文本
"""
if not memories:
return ""
lines = []
for i, memory in enumerate(memories, 1):
content = memory.get("content", "")
fact_type = memory.get("fact_type", "fact")
lines.append(f"{i}. [{fact_type}] {content}")
return "\n".join(lines)
def before_agent(self, state: AgentState, runtime: Runtime) -> Dict[str, Any] | None:
"""Agent 执行前:召回相关记忆(同步版本)
Args:
state: Agent 状态
runtime: 运行时上下文
Returns:
更新后的状态或 None
"""
if not self.config.is_enabled():
return None
try:
import asyncio
# 提取用户查询
query = self._extract_user_query(state)
if not query:
return None
# 获取 attribution 参数
user_id, agent_id = self.config.get_attribution_tuple()
session_id = self.config.session_id or runtime.config.get("configurable", {}).get("thread_id", "default")
# 召回记忆(同步方式 - 在后台任务中执行)
memories = asyncio.run(self._recall_memories_async(query, user_id, agent_id, session_id))
if memories:
# 格式化记忆并拼接 memory_prompt
memory_text = self._format_memories(memories)
memory_prompt = self.config.get_memory_prompt([memory_text])
self.agent_config._mem0_context = memory_prompt
logger.info(f"Recalled {len(memories)} memories for context")
else:
self.agent_config._mem0_context = None
return state
except Exception as e:
logger.error(f"Error in Mem0Middleware.before_agent: {e}")
return None
async def abefore_agent(self, state: AgentState, runtime: Runtime) -> Dict[str, Any] | None:
"""Agent 执行前:召回相关记忆(异步版本)
Args:
state: Agent 状态
runtime: 运行时上下文
Returns:
更新后的状态或 None
"""
if not self.config.is_enabled():
return None
try:
# 提取用户查询
query = self._extract_user_query(state)
if not query:
logger.debug("No user query found, skipping memory recall")
return None
# 获取 attribution 参数
user_id, agent_id = self.config.get_attribution_tuple()
# 召回记忆(用户级别,跨会话)
memories = await self._recall_memories_async(query, user_id, agent_id)
if memories:
# 格式化记忆并拼接 memory_prompt
memory_text = self._format_memories(memories)
memory_prompt = self.config.get_memory_prompt([memory_text])
self.agent_config._mem0_context = memory_prompt
logger.info(f"Recalled {len(memories)} memories for context")
else:
self.agent_config._mem0_context = None
return state
except Exception as e:
logger.error(f"Error in Mem0Middleware.abefore_agent: {e}")
return None
async def _recall_memories_async(
self, query: str, user_id: str, agent_id: str
) -> List[Dict[str, Any]]:
"""异步召回记忆
Args:
query: 查询文本
user_id: 用户 ID
agent_id: Agent/Bot ID
Returns:
记忆列表
"""
return await self.mem0_manager.recall_memories(
query=query,
user_id=user_id,
agent_id=agent_id,
config=self.config,
)
def after_agent(self, state: AgentState, runtime: Runtime) -> None:
"""Agent 执行后:触发记忆增强(同步版本)
Args:
state: Agent 状态
runtime: 运行时上下文
"""
if not self.config.is_enabled():
return
try:
import asyncio
# 触发后台增强任务
asyncio.create_task(self._trigger_augmentation_async(state, runtime))
except Exception as e:
logger.error(f"Error in Mem0Middleware.after_agent: {e}")
async def aafter_agent(self, state: AgentState, runtime: Runtime) -> None:
"""Agent 执行后:触发记忆增强(异步版本)
Args:
state: Agent 状态
runtime: 运行时上下文
"""
if not self.config.is_enabled():
return
try:
await self._trigger_augmentation_async(state, runtime)
except Exception as e:
logger.error(f"Error in Mem0Middleware.aafter_agent: {e}")
async def _trigger_augmentation_async(self, state: AgentState, runtime: Runtime) -> None:
"""触发记忆增强任务
从对话中提取信息并存储到 Mem0用户级别跨会话
Args:
state: Agent 状态
runtime: 运行时上下文
"""
try:
# 获取 attribution 参数
user_id, agent_id = self.config.get_attribution_tuple()
# 提取用户查询和 Agent 响应
user_query = self._extract_user_query(state)
agent_response = self._extract_agent_response(state)
# 将对话作为记忆存储(用户级别)
if user_query and agent_response:
conversation_text = f"User: {user_query}\nAssistant: {agent_response}"
await self.mem0_manager.add_memory(
text=conversation_text,
user_id=user_id,
agent_id=agent_id,
metadata={"type": "conversation"},
config=self.config,
)
logger.debug(f"Stored conversation as memory for user={user_id}, agent={agent_id}")
except Exception as e:
logger.error(f"Error in _trigger_augmentation_async: {e}")
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Any],
) -> Any:
"""包装模型调用,注入记忆到系统提示词(同步版本)
Args:
request: 模型请求
handler: 原始处理器
Returns:
模型响应
"""
# 从 agent_config 获取已拼接好的记忆 prompt
memory_prompt = self.agent_config._mem0_context
if not memory_prompt:
return handler(request)
# 获取当前系统提示词
current_system_prompt = ""
if request.system_message:
current_system_prompt = request.system_message.content if hasattr(request.system_message, "content") else str(request.system_message)
# 修改系统提示词
new_system_prompt = current_system_prompt + memory_prompt
return handler(request.override(system_prompt=new_system_prompt))
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Any],
) -> Any:
"""包装模型调用,注入记忆到系统提示词(异步版本)
Args:
request: 模型请求
handler: 原始处理器
Returns:
模型响应
"""
# 从 agent_config 获取已拼接好的记忆 prompt
memory_prompt = self.agent_config._mem0_context
if not memory_prompt:
return await handler(request)
# 获取当前系统提示词
current_system_prompt = ""
if request.system_message:
current_system_prompt = request.system_message.content if hasattr(request.system_message, "content") else str(request.system_message)
# 修改系统提示词
new_system_prompt = current_system_prompt + memory_prompt
return await handler(request.override(system_prompt=new_system_prompt))
def create_mem0_middleware(
bot_id: str,
user_identifier: str,
session_id: str,
agent_config: "AgentConfig",
enabled: bool = True,
semantic_search_top_k: int = 20,
mem0_manager: Optional[Mem0Manager] = None,
llm_instance: Optional["BaseChatModel"] = None,
) -> Optional[Mem0Middleware]:
"""创建 Mem0Middleware 的工厂函数
Args:
bot_id: Bot ID
user_identifier: 用户标识
session_id: 会话 ID
agent_config: AgentConfig 实例用于中间件间传递数据
enabled: 是否启用
semantic_search_top_k: 语义搜索返回数量
mem0_manager: Mem0Manager 实例如果为 None使用全局实例
llm_instance: LangChain LLM 实例用于 Mem0 的记忆提取和增强
Returns:
Mem0Middleware 实例或 None
"""
if not enabled:
return None
# 获取或使用提供的 manager
manager = mem0_manager or get_mem0_manager()
# 创建配置
config = Mem0Config(
enabled=True,
user_id=user_identifier,
agent_id=bot_id,
session_id=session_id,
semantic_search_top_k=semantic_search_top_k,
llm_instance=llm_instance,
)
return Mem0Middleware(mem0_manager=manager, config=config, agent_config=agent_config)

View File

@ -1,389 +0,0 @@
"""
Memori 连接和实例管理器
负责管理 Memori 客户端实例的创建缓存和生命周期
"""
import asyncio
import logging
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
from psycopg_pool import AsyncConnectionPool
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
from .memori_config import MemoriConfig
logger = logging.getLogger("app")
class MemoriManager:
"""
Memori 连接和实例管理器
主要功能
1. 管理 Memori 实例的创建和缓存
2. 支持多租户隔离entity_id + process_id
3. 处理数据库连接和会话管理
4. 提供记忆召回和存储接口
"""
def __init__(
self,
db_pool: Optional[AsyncConnectionPool] = None,
db_url: Optional[str] = None,
api_key: Optional[str] = None,
):
"""初始化 MemoriManager
Args:
db_pool: PostgreSQL 异步连接池 Checkpointer 共享
db_url: 数据库连接 URL如果不使用连接池
api_key: Memori API 密钥用于高级增强功能
"""
self._db_pool = db_pool
self._db_url = db_url
self._api_key = api_key
# 缓存 Memori 实例: key = f"{entity_id}:{process_id}"
self._instances: Dict[str, Any] = {}
self._sync_engines: Dict[str, Any] = {}
self._initialized = False
@property
def db_url(self) -> Optional[str]:
"""获取数据库 URL"""
if self._db_url:
return self._db_url
# Fallback 到 settings
from utils.settings import CHECKPOINT_DB_URL
return CHECKPOINT_DB_URL
async def initialize(self) -> None:
"""初始化 MemoriManager
创建数据库表结构如果不存在
"""
if self._initialized:
return
logger.info("Initializing MemoriManager...")
try:
# 创建第一个 Memori 实例来初始化表结构
if self._db_pool or self._db_url:
db_url = self._db_url or getattr(self._db_pool, "_url", None)
if db_url:
await self._build_schema(db_url)
self._initialized = True
logger.info("MemoriManager initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize MemoriManager: {e}")
# 不抛出异常,允许系统在没有 Memori 的情况下运行
async def _build_schema(self, db_url: str) -> None:
"""构建 Memori 数据库表结构
Args:
db_url: 数据库连接 URL
"""
try:
from memori import Memori
# 创建同步引擎用于初始化
engine = create_engine(db_url)
SessionLocal = sessionmaker(bind=engine)
# 创建 Memori 实例并构建表结构
mem = Memori(conn=SessionLocal)
mem.config.storage.build()
logger.info("Memori schema built successfully")
except ImportError:
logger.warning("memori package not available, skipping schema build")
except Exception as e:
logger.error(f"Failed to build Memori schema: {e}")
def _get_sync_session(self, db_url: str) -> Session:
"""获取同步数据库会话Memori 需要)
Args:
db_url: 数据库连接 URL
Returns:
SQLAlchemy Session
"""
if db_url not in self._sync_engines:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine(db_url, pool_pre_ping=True)
self._sync_engines[db_url] = sessionmaker(bind=engine)
return self._sync_engines[db_url]()
async def get_memori(
self,
entity_id: str,
process_id: str,
session_id: str,
config: Optional[MemoriConfig] = None,
) -> Any:
"""获取或创建 Memori 实例
Args:
entity_id: 实体 ID通常是 user_identifier
process_id: 进程 ID通常是 bot_id
session_id: 会话 ID
config: Memori 配置
Returns:
Memori 实例
"""
cache_key = f"{entity_id}:{process_id}"
# 检查缓存
if cache_key in self._instances:
memori_instance = self._instances[cache_key]
# 更新会话
memori_instance.config.session_id = session_id
return memori_instance
# 创建新实例
memori_instance = await self._create_memori_instance(
entity_id=entity_id,
process_id=process_id,
session_id=session_id,
config=config,
)
# 缓存实例
self._instances[cache_key] = memori_instance
return memori_instance
async def _create_memori_instance(
self,
entity_id: str,
process_id: str,
session_id: str,
config: Optional[MemoriConfig] = None,
) -> Any:
"""创建新的 Memori 实例
Args:
entity_id: 实体 ID
process_id: 进程 ID
session_id: 会话 ID
config: Memori 配置
Returns:
Memori 实例
"""
try:
from memori import Memori
except ImportError:
logger.error("memori package not installed")
raise RuntimeError("memori package is required but not installed")
# 获取数据库连接 URL
db_url = self.db_url
if not db_url:
raise ValueError("Database URL not available")
# 创建同步会话Memori 目前需要同步连接)
session_factory = self._get_sync_session(db_url)
# 创建 Memori 实例
mem = Memori(conn=session_factory)
# 设置 API 密钥(如果提供)
if self._api_key or (config and config.api_key):
api_key = config.api_key if config else self._api_key
mem.config.api_key = api_key
# 设置 attribution
mem.attribution(entity_id=entity_id, process_id=process_id)
# 设置会话
mem.config.session_id = session_id
# 配置召回参数
if config:
mem.config.recall_facts_limit = config.semantic_search_top_k
mem.config.recall_relevance_threshold = config.semantic_search_threshold
mem.config.recall_embeddings_limit = config.semantic_search_embeddings_limit
logger.info(
f"Created Memori instance: entity={entity_id}, process={process_id}, session={session_id}"
)
return mem
async def recall_memories(
self,
query: str,
entity_id: str,
process_id: str,
session_id: str,
config: Optional[MemoriConfig] = None,
) -> List[Dict[str, Any]]:
"""召回相关记忆
Args:
query: 查询文本
entity_id: 实体 ID
process_id: 进程 ID
session_id: 会话 ID
config: Memori 配置
Returns:
记忆列表每个记忆包含 content, similarity 等字段
"""
try:
mem = await self.get_memori(entity_id, process_id, session_id, config)
# 调用 recall 进行语义搜索
results = mem.recall(query=query, limit=config.semantic_search_top_k if config else 5)
# 转换为统一格式
memories = []
for result in results:
memory = {
"content": result.get("content", ""),
"similarity": result.get("similarity", 0.0),
"fact_type": result.get("fact_type", "unknown"),
"created_at": result.get("created_at"),
}
# 过滤低相关度记忆
threshold = config.semantic_search_threshold if config else 0.7
if memory["similarity"] >= threshold:
memories.append(memory)
logger.info(f"Recalled {len(memories)} memories for query: {query[:50]}...")
return memories
except Exception as e:
logger.error(f"Failed to recall memories: {e}")
return []
async def wait_for_augmentation(
self,
entity_id: str,
process_id: str,
session_id: str,
timeout: Optional[float] = None,
) -> None:
"""等待后台增强任务完成
Args:
entity_id: 实体 ID
process_id: 进程 ID
session_id: 会话 ID
timeout: 超时时间
"""
try:
mem = await self.get_memori(entity_id, process_id, session_id)
if timeout:
# 在线程池中运行同步的 wait()
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, lambda: mem.augmentation.wait(timeout=timeout))
else:
# 无限等待
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, mem.augmentation.wait)
except Exception as e:
logger.error(f"Failed to wait for augmentation: {e}")
def clear_cache(self, entity_id: Optional[str] = None, process_id: Optional[str] = None) -> None:
"""清除缓存的 Memori 实例
Args:
entity_id: 实体 ID如果为 None清除所有
process_id: 进程 ID如果为 None清除所有
"""
if entity_id is None and process_id is None:
self._instances.clear()
logger.info("Cleared all Memori instances from cache")
else:
keys_to_remove = []
for key in self._instances:
e_id, p_id = key.split(":")
if entity_id and e_id != entity_id:
continue
if process_id and p_id != process_id:
continue
keys_to_remove.append(key)
for key in keys_to_remove:
del self._instances[key]
logger.info(f"Cleared {len(keys_to_remove)} Memori instances from cache")
async def close(self) -> None:
"""关闭管理器并清理资源"""
logger.info("Closing MemoriManager...")
# 清理缓存的实例
self._instances.clear()
# 关闭同步引擎
for engine in self._sync_engines.values():
try:
engine.dispose()
except Exception as e:
logger.error(f"Error closing engine: {e}")
self._sync_engines.clear()
self._initialized = False
logger.info("MemoriManager closed")
# 全局单例
_global_manager: Optional[MemoriManager] = None
def get_memori_manager() -> MemoriManager:
"""获取全局 MemoriManager 单例
Returns:
MemoriManager 实例
"""
global _global_manager
if _global_manager is None:
_global_manager = MemoriManager()
return _global_manager
async def init_global_memori(
db_pool: Optional[AsyncConnectionPool] = None,
db_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> MemoriManager:
"""初始化全局 MemoriManager
Args:
db_pool: PostgreSQL 连接池
db_url: 数据库连接 URL
api_key: Memori API 密钥
Returns:
MemoriManager 实例
"""
manager = get_memori_manager()
manager._db_pool = db_pool
manager._db_url = db_url
manager._api_key = api_key
await manager.initialize()
return manager
async def close_global_memori() -> None:
"""关闭全局 MemoriManager"""
global _global_manager
if _global_manager is not None:
await _global_manager.close()

View File

@ -1,342 +0,0 @@
"""
Memori Agent 中间件
实现记忆召回和存储的 AgentMiddleware
"""
import asyncio
import logging
from typing import Any, Dict, List, Optional
from langchain.agents.middleware import AgentMiddleware, AgentState
from langgraph.runtime import Runtime
from .memori_config import MemoriConfig
from .memori_manager import MemoriManager, get_memori_manager
logger = logging.getLogger("app")
class MemoriMiddleware(AgentMiddleware):
"""
Memori 记忆中间件
功能
1. before_agent: 召回相关记忆并注入到上下文
2. after_agent: 后台异步提取和存储新记忆
"""
def __init__(
self,
memori_manager: MemoriManager,
config: MemoriConfig,
):
"""初始化 MemoriMiddleware
Args:
memori_manager: MemoriManager 实例
config: MemoriConfig 配置
"""
self.memori_manager = memori_manager
self.config = config
def _extract_user_query(self, state: AgentState) -> str:
"""从状态中提取用户查询
Args:
state: Agent 状态
Returns:
用户查询文本
"""
messages = state.get("messages", [])
if not messages:
return ""
# 获取最后一条消息
last_message = messages[-1]
# 尝试获取内容
content = getattr(last_message, "content", None)
if content is None:
content = last_message.get("content", "") if isinstance(last_message, dict) else ""
return str(content) if content else ""
def _format_memories(self, memories: List[Dict[str, Any]]) -> str:
"""格式化记忆列表为文本
Args:
memories: 记忆列表
Returns:
格式化的记忆文本
"""
if not memories:
return ""
lines = []
for i, memory in enumerate(memories, 1):
content = memory.get("content", "")
similarity = memory.get("similarity", 0.0)
fact_type = memory.get("fact_type", "fact")
# 添加相似度分数(调试用)
lines.append(f"{i}. [{fact_type}] {content}")
return "\n".join(lines)
def _inject_memory_context(self, state: AgentState, memory_text: str) -> AgentState:
"""将记忆上下文注入到状态中
Args:
state: 原始状态
memory_text: 记忆文本
Returns:
更新后的状态
"""
if not memory_text or not self.config.inject_memory_to_system_prompt:
return state
# 生成记忆提示
memory_prompt = self.config.get_memory_prompt([memory_text])
# 检查是否有系统消息
messages = state.get("messages", [])
if not messages:
return state
# 在系统消息后添加记忆上下文
from langchain_core.messages import SystemMessage
# 查找系统消息
system_message = None
for msg in messages:
if hasattr(msg, "type") and msg.type == "system":
system_message = msg
break
elif isinstance(msg, dict) and msg.get("role") == "system":
system_message = msg
break
if system_message:
# 修改现有系统消息
if hasattr(system_message, "content"):
original_content = system_message.content
system_message.content = original_content + memory_prompt
elif isinstance(system_message, dict):
original_content = system_message.get("content", "")
system_message["content"] = original_content + memory_prompt
else:
# 添加新的系统消息
new_messages = list(messages)
new_messages.insert(0, SystemMessage(content=memory_prompt))
state = {**state, "messages": new_messages}
return state
def before_agent(self, state: AgentState, runtime: Runtime) -> Dict[str, Any] | None:
"""Agent 执行前:召回相关记忆(同步版本)
Args:
state: Agent 状态
runtime: 运行时上下文
Returns:
更新后的状态或 None
"""
if not self.config.is_enabled():
return None
try:
# 提取用户查询
query = self._extract_user_query(state)
if not query:
return None
# 获取 attribution 参数
entity_id, process_id = self.config.get_attribution_tuple()
session_id = self.config.session_id or runtime.config.get("configurable", {}).get("thread_id", "default")
# 召回记忆(同步方式 - 在后台任务中执行)
memories = asyncio.run(self._recall_memories_async(query, entity_id, process_id, session_id))
if memories:
# 格式化记忆
memory_text = self._format_memories(memories)
# 注入到状态
updated_state = self._inject_memory_context(state, memory_text)
logger.info(f"Injected {len(memories)} memories into context")
return updated_state
return None
except Exception as e:
logger.error(f"Error in MemoriMiddleware.before_agent: {e}")
return None
async def abefore_agent(self, state: AgentState, runtime: Runtime) -> Dict[str, Any] | None:
"""Agent 执行前:召回相关记忆(异步版本)
Args:
state: Agent 状态
runtime: 运行时上下文
Returns:
更新后的状态或 None
"""
if not self.config.is_enabled():
return None
try:
# 提取用户查询
query = self._extract_user_query(state)
if not query:
logger.debug("No user query found, skipping memory recall")
return None
# 获取 attribution 参数
entity_id, process_id = self.config.get_attribution_tuple()
session_id = self.config.session_id or runtime.config.get("configurable", {}).get("thread_id", "default")
# 召回记忆
memories = await self._recall_memories_async(query, entity_id, process_id, session_id)
if memories:
# 格式化记忆
memory_text = self._format_memories(memories)
# 注入到状态
updated_state = self._inject_memory_context(state, memory_text)
logger.info(f"Injected {len(memories)} memories into context (similarity > {self.config.semantic_search_threshold})")
return updated_state
return None
except Exception as e:
logger.error(f"Error in MemoriMiddleware.abefore_agent: {e}")
return None
async def _recall_memories_async(
self, query: str, entity_id: str, process_id: str, session_id: str
) -> List[Dict[str, Any]]:
"""异步召回记忆
Args:
query: 查询文本
entity_id: 实体 ID
process_id: 进程 ID
session_id: 会话 ID
Returns:
记忆列表
"""
return await self.memori_manager.recall_memories(
query=query,
entity_id=entity_id,
process_id=process_id,
session_id=session_id,
config=self.config,
)
def after_agent(self, state: AgentState, runtime: Runtime) -> None:
"""Agent 执行后:触发记忆增强(同步版本)
Args:
state: Agent 状态
runtime: 运行时上下文
"""
if not self.config.is_enabled() or not self.config.augmentation_enabled:
return
try:
# 触发后台增强任务
asyncio.create_task(self._trigger_augmentation_async(state, runtime))
except Exception as e:
logger.error(f"Error in MemoriMiddleware.after_agent: {e}")
async def aafter_agent(self, state: AgentState, runtime: Runtime) -> None:
"""Agent 执行后:触发记忆增强(异步版本)
注意Memori 的增强会自动在后台执行这里主要是记录日志
Args:
state: Agent 状态
runtime: 运行时上下文
"""
if not self.config.is_enabled() or not self.config.augmentation_enabled:
return
try:
# 如果配置了等待超时,则等待增强完成
if self.config.augmentation_wait_timeout is not None:
entity_id, process_id = self.config.get_attribution_tuple()
session_id = self.config.session_id or runtime.config.get("configurable", {}).get("thread_id", "default")
await self.memori_manager.wait_for_augmentation(
entity_id=entity_id,
process_id=process_id,
session_id=session_id,
timeout=self.config.augmentation_wait_timeout,
)
except Exception as e:
logger.error(f"Error in MemoriMiddleware.aafter_agent: {e}")
async def _trigger_augmentation_async(self, state: AgentState, runtime: Runtime) -> None:
"""触发记忆增强任务
注意Memori LLM 客户端注册后会自动捕获对话并进行增强
这里不需要手动触发只是确保会话正确设置
Args:
state: Agent 状态
runtime: 运行时上下文
"""
# Memori 的增强是自动的,这里主要是确保配置正确
# 如果需要手动触发,可以在这里实现
pass
def create_memori_middleware(
bot_id: str,
user_identifier: str,
session_id: str,
enabled: bool = True,
semantic_search_top_k: int = 5,
semantic_search_threshold: float = 0.7,
memori_manager: Optional[MemoriManager] = None,
) -> Optional[MemoriMiddleware]:
"""创建 MemoriMiddleware 的工厂函数
Args:
bot_id: Bot ID
user_identifier: 用户标识
session_id: 会话 ID
enabled: 是否启用
semantic_search_top_k: 语义搜索返回数量
semantic_search_threshold: 语义搜索相似度阈值
memori_manager: MemoriManager 实例如果为 None使用全局实例
Returns:
MemoriMiddleware 实例或 None
"""
if not enabled:
return None
# 获取或使用提供的 manager
manager = memori_manager or get_memori_manager()
# 创建配置
config = MemoriConfig(
enabled=True,
entity_id=user_identifier,
process_id=bot_id,
session_id=session_id,
semantic_search_top_k=semantic_search_top_k,
semantic_search_threshold=semantic_search_threshold,
)
return MemoriMiddleware(memori_manager=manager, config=config)

View File

@ -253,17 +253,18 @@ async def load_mcp_settings_async(project_dir: str, mcp_settings: list=None, bot
return merged_settings
def load_guideline_prompt(chat_history:str, guidelines_text: str, tools: str, scenarios: str, terms: str, language: str, user_identifier: str = "") -> str:
def load_guideline_prompt(chat_history:str, memory_text: str, guidelines_text: str, tools: str, scenarios: str, language: str, user_identifier: str = "") -> str:
"""
加载并处理guideline提示词
Args:
chat_history: 聊天历史记录
memory_text: 记忆文本
guidelines_text: 指导原则文本
terms: 条款文本
tools: 工具描述文本
scenarios: 场景描述文本
language: 语言代码 'zh', 'en', 'jp'
user_identifier: 用户标识符默认为空
datetime_str: 时间字符串默认为空
Returns:
str: 处理后的guideline提示词
@ -285,12 +286,12 @@ def load_guideline_prompt(chat_history:str, guidelines_text: str, tools: str, sc
system_prompt = guideline_template.format(
chat_history=chat_history,
guidelines_text=guidelines_text,
terms=terms,
tools=tools,
scenarios=scenarios,
language=language_display,
user_identifier=user_identifier,
datetime=datetime_str
datetime=datetime_str,
memory_text=memory_text
)
return system_prompt

27
drop_mem0_tables.py Normal file
View File

@ -0,0 +1,27 @@
#!/usr/bin/env python
"""删除所有 mem0 相关的表"""
import psycopg2
from utils.settings import CHECKPOINT_DB_URL
conn = psycopg2.connect(CHECKPOINT_DB_URL)
cursor = conn.cursor()
# 查找所有 mem0 开头的表
cursor.execute("""
SELECT tablename FROM pg_tables
WHERE schemaname = 'public' AND tablename LIKE 'mem0_%'
""")
tables = cursor.fetchall()
print('找到的表:', tables)
# 删除每个表
for (table,) in tables:
cursor.execute(f'DROP TABLE IF EXISTS {table} CASCADE')
print(f'已删除: {table}')
conn.commit()
cursor.close()
conn.close()
print('删除完成')

View File

@ -6,6 +6,58 @@ import multiprocessing
import sys
from contextlib import asynccontextmanager
# ========== Monkey patch: 必须在所有其他导入之前执行 ==========
# 使用 json_repair 替换 mem0 的 remove_code_blocks 函数
# 这必须在导入任何 mem0 模块之前执行
import logging
_patch_logger = logging.getLogger('app')
try:
import json_repair
import re
def _remove_code_blocks_with_repair(content: str) -> str:
"""使用 json_repair 替换 mem0 的 remove_code_blocks 函数"""
content_stripped = content.strip()
try:
result = json_repair.loads(content_stripped)
if isinstance(result, (dict, list)):
import json
return json.dumps(result, ensure_ascii=False)
if result == "" and content_stripped != "":
pattern = r"^```[a-zA-Z0-9]*\n([\s\S]*?)\n```$"
match = re.match(pattern, content_stripped)
if match:
return match.group(1).strip()
return content_stripped
return str(result)
except Exception:
pattern = r"^```[a-zA-Z0-9]*\n([\s\S]*?)\n```$"
match = re.match(pattern, content_stripped)
if match:
return match.group(1).strip()
return content_stripped
# Patch mem0.memory.utils (源头)
import mem0.memory.utils
mem0.memory.utils.remove_code_blocks = _remove_code_blocks_with_repair
# Patch mem0.memory.main (如果已导入,替换其本地引用)
# 注意:必须在此模块导入后才能 patch 其本地引用
import sys
if 'mem0.memory.main' in sys.modules:
import mem0.memory.main
mem0.memory.main.remove_code_blocks = _remove_code_blocks_with_repair
_patch_logger.info("Successfully patched mem0.memory.main.remove_code_blocks")
else:
# 如果还没导入,设置一个导入钩子
_patch_logger.info("Successfully patched mem0.memory.utils.remove_code_blocks with json_repair")
except ImportError:
pass # json_repair 或 mem0 未安装
except Exception as e:
_patch_logger.warning(f"Failed to patch mem0 remove_code_blocks: {e}")
# ========== End Monkey patch ==========
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
@ -40,11 +92,11 @@ async def lifespan(app: FastAPI):
init_chat_history_manager,
close_chat_history_manager
)
from agent.memori_manager import (
init_global_memori,
close_global_memori
from agent.mem0_manager import (
init_global_mem0,
close_global_mem0
)
from utils.settings import CHECKPOINT_CLEANUP_ENABLED, MEMORI_ENABLED, MEMORI_API_KEY, CHECKPOINT_DB_URL
from utils.settings import CHECKPOINT_CLEANUP_ENABLED, MEM0_ENABLED
# 1. 初始化共享的数据库连接池
db_pool_manager = await init_global_db_pool()
@ -58,17 +110,13 @@ async def lifespan(app: FastAPI):
await init_chat_history_manager(db_pool_manager.pool)
logger.info("Chat history manager initialized")
# 4. 初始化 Memori 长期记忆系统 (如果启用)
if MEMORI_ENABLED:
# 4. 初始化 Mem0 长期记忆系统 (如果启用)
if MEM0_ENABLED:
try:
await init_global_memori(
db_pool=db_pool_manager.pool,
db_url=CHECKPOINT_DB_URL,
api_key=MEMORI_API_KEY
)
logger.info("Memori long-term memory initialized")
await init_global_mem0(sync_pool=db_pool_manager.sync_pool)
logger.info("Mem0 long-term memory initialized")
except Exception as e:
logger.warning(f"Memori initialization failed (continuing without): {e}")
logger.warning(f"Mem0 initialization failed (continuing without): {e}")
# 5. 启动 checkpoint 清理调度器
if CHECKPOINT_CLEANUP_ENABLED:
@ -86,13 +134,13 @@ async def lifespan(app: FastAPI):
# 关闭时清理(按相反顺序)
logger.info("Shutting down...")
# 关闭 Memori
if MEMORI_ENABLED:
# 关闭 Mem0
if MEM0_ENABLED:
try:
await close_global_memori()
logger.info("Memori long-term memory closed")
await close_global_mem0()
logger.info("Mem0 long-term memory closed")
except Exception as e:
logger.warning(f"Memori close failed (non-fatal): {e}")
logger.warning(f"Mem0 close failed (non-fatal): {e}")
await close_chat_history_manager()
logger.info("Chat history manager closed")
await close_global_checkpointer()

View File

@ -153,7 +153,6 @@ class AgentConfig:
enable_memori: bool = False
memori_api_key: Optional[str] = None
memori_semantic_search_top_k: int = 5
memori_semantic_search_threshold: float = 0.7
```
### 集成点修改
@ -176,7 +175,6 @@ async def init_agent(config: AgentConfig) -> tuple[agent, checkpointer]:
memori_config = MemoriConfig(
semantic_search_top_k=config.memori_semantic_search_top_k,
semantic_search_threshold=config.memori_semantic_search_threshold
)
memori_middleware = MemoriMiddleware(memori, memori_config)
@ -265,7 +263,6 @@ MemoriMiddleware → 后台提取新记忆(非阻塞)
- [ ] **配置控制**
- [ ] `enable_memori` 开关控制功能启用
- [ ] 可配置召回记忆数量 (`semantic_search_top_k`)
- [ ] 可配置相关性阈值 (`semantic_search_threshold`)
### 非功能需求
@ -381,7 +378,6 @@ qwen-agent/
MEMORI_ENABLED = os.getenv("MEMORI_ENABLED", "true") == "true"
MEMORI_API_KEY = os.getenv("MEMORI_API_KEY", "")
MEMORI_SEMANTIC_SEARCH_TOP_K = int(os.getenv("MEMORI_SEMANTIC_SEARCH_TOP_K", "5"))
MEMORI_SEMANTIC_SEARCH_THRESHOLD = float(os.getenv("MEMORI_SEMANTIC_SEARCH_THRESHOLD", "0.7"))
MEMORI_EMBEDDING_MODEL = os.getenv("MEMORI_EMBEDDING_MODEL", "paraphrase-multilingual-MiniLM-L12-v2")
```

591
poetry.lock generated
View File

@ -268,6 +268,18 @@ files = [
{file = "attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11"},
]
[[package]]
name = "backoff"
version = "2.2.1"
description = "Function decoration for backoff and retry"
optional = false
python-versions = ">=3.7,<4.0"
groups = ["main"]
files = [
{file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"},
{file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"},
]
[[package]]
name = "beautifulsoup4"
version = "4.14.3"
@ -291,26 +303,6 @@ charset-normalizer = ["charset-normalizer"]
html5lib = ["html5lib"]
lxml = ["lxml"]
[[package]]
name = "botocore"
version = "1.42.30"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "botocore-1.42.30-py3-none-any.whl", hash = "sha256:97070a438cac92430bb7b65f8ebd7075224f4a289719da4ee293d22d1e98db02"},
{file = "botocore-1.42.30.tar.gz", hash = "sha256:9bf1662b8273d5cc3828a49f71ca85abf4e021011c1f0a71f41a2ea5769a5116"},
]
[package.dependencies]
jmespath = ">=0.7.1,<2.0.0"
python-dateutil = ">=2.1,<3.0.0"
urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""}
[package.extras]
crt = ["awscrt (==0.29.2)"]
[[package]]
name = "bracex"
version = "2.6"
@ -798,58 +790,6 @@ files = [
{file = "et_xmlfile-2.0.0.tar.gz", hash = "sha256:dab3f4764309081ce75662649be815c4c9081e88f0837825f90fd28317d4da54"},
]
[[package]]
name = "faiss-cpu"
version = "1.11.0.post1"
description = "A library for efficient similarity search and clustering of dense vectors."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "faiss_cpu-1.11.0.post1-cp310-cp310-macosx_13_0_x86_64.whl", hash = "sha256:e079d44ea22919f6477fea553b05854c68838ab553e1c6b1237437a8becdf89d"},
{file = "faiss_cpu-1.11.0.post1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:4ded0c91cb67f462ae00a4d339718ea2fbb23eedbf260c3a07de77c32c23205a"},
{file = "faiss_cpu-1.11.0.post1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78812f4d7ff9d3773f50009efcf294f3da787cd8c835c1fc41d997a58100f7b5"},
{file = "faiss_cpu-1.11.0.post1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:76b133d746ddb3e6d39e6de62ff717cf4d45110d4af101a62d6a4fed4cd1d4d1"},
{file = "faiss_cpu-1.11.0.post1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9443bc89447f9988f2288477584d2f1c59424a5e9f9a202e4ada8708df816db1"},
{file = "faiss_cpu-1.11.0.post1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6acc20021b69bd30d3cb5cadb4f8dc1c338aec887cd5411b0982e8a3e48b3d7f"},
{file = "faiss_cpu-1.11.0.post1-cp310-cp310-win_amd64.whl", hash = "sha256:9dccf67d4087f9b0f937d4dccd1183929ebb6fe7622b75cba51b53e4f0055a0c"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-macosx_13_0_x86_64.whl", hash = "sha256:2c8c384e65cc1b118d2903d9f3a27cd35f6c45337696fc0437f71e05f732dbc0"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:36af46945274ed14751b788673125a8a4900408e4837a92371b0cad5708619ea"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b15412b22a05865433aecfdebf7664b9565bd49b600d23a0a27c74a5526893e"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:81c169ea74213b2c055b8240befe7e9b42a1f3d97cda5238b3b401035ce1a18b"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0794eb035c6075e931996cf2b2703fbb3f47c8c34bc2d727819ddc3e5e486a31"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:18d2221014813dc9a4236e47f9c4097a71273fbf17c3fe66243e724e2018a67a"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-win_amd64.whl", hash = "sha256:3ce8a8984a7dcc689fd192c69a476ecd0b2611c61f96fe0799ff432aa73ff79c"},
{file = "faiss_cpu-1.11.0.post1-cp311-cp311-win_arm64.whl", hash = "sha256:8384e05afb7c7968e93b81566759f862e744c0667b175086efb3d8b20949b39f"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-macosx_13_0_x86_64.whl", hash = "sha256:68f6ce2d9c510a5765af2f5711bd76c2c37bd598af747f3300224bdccf45378c"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:b940c530a8236cc0b9fd9d6e87b3d70b9c6c216bc2baf2649356c908902e52c9"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fafae1dcbcba3856a0bb82ffb0c3cae5922bdd6566fdd3b7feb2425cf4fca247"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5d1262702c19aba2d23144b73f4b5730ca988c1f4e43ecec87edf25171cafe3d"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:925feb69c06bfcc7f28869c99ab172f123e4b9d97a7e1353316fcc2748696f5b"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:00a837581b675f099c80c8c46908648dcf944a8992dd21e3887c61c6b110fe5f"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-win_amd64.whl", hash = "sha256:8bbaef5b56d1b0c01357ee6449d464ea4e52732fdb53a40bb5b9d77923af905f"},
{file = "faiss_cpu-1.11.0.post1-cp312-cp312-win_arm64.whl", hash = "sha256:57f85dbefe590f8399a95c07e839ee64373cfcc6db5dd35232a41137e3deefeb"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-macosx_13_0_x86_64.whl", hash = "sha256:caedaddfbfe365e3f1a57d5151cf94ea7b73c0e4789caf68eae05e0e10ca9fbf"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:202d11f1d973224ca0bde13e7ee8b862b6de74287e626f9f8820b360e6253d12"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f6086e25ef680301350d6db72db7315e3531582cf896a7ee3f26295b1da73c44"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b93131842996efbbf76f07dba1775d3a5f355f74b9ba34334f1149aef046b37f"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f26e3e93f537b2e1633212a1b0a7dab74d77825366ed575ca434dac2fa14cea6"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7f4b0e03cd758d03012d88aa4a70e673d10b66f31f7c122adc0c8c323cad2e33"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-win_amd64.whl", hash = "sha256:bc53fe59b546dbab63144dc19dcee534ad7a213db617b37aa4d0e33c26f9bbaf"},
{file = "faiss_cpu-1.11.0.post1-cp313-cp313-win_arm64.whl", hash = "sha256:9cebb720cd57afdbe9dd7ed8a689c65dc5cf1bad475c5aa6fa0d0daea890beb6"},
{file = "faiss_cpu-1.11.0.post1-cp39-cp39-macosx_13_0_x86_64.whl", hash = "sha256:3663059682589a42e3c4da0f3915492c466c886954cf9280273f92257bcfa0b4"},
{file = "faiss_cpu-1.11.0.post1-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:0348794ae91fb1454f2cddf7a9c7de23510f2a63e60c0fba0ae73bc7bf23a060"},
{file = "faiss_cpu-1.11.0.post1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8baf46be73b4fce99f4620d99a52cdb01f7823a849f00064f02802f554d8b59f"},
{file = "faiss_cpu-1.11.0.post1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:388a590ab2847e421ba2702ff2774835287f137fb77e24e679f0063c1c10a96f"},
{file = "faiss_cpu-1.11.0.post1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dc12b3f89cf48be3f2a20b37f310c3f1a7a5708fdf705f88d639339a24bb590b"},
{file = "faiss_cpu-1.11.0.post1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:773fa45aa98a210ab4e2c17c1b5fb45f6d7e9acb4979c9a0b320b678984428ac"},
{file = "faiss_cpu-1.11.0.post1-cp39-cp39-win_amd64.whl", hash = "sha256:6240c4b1551eedc07e76813c2e14a1583a1db6c319a92a3934bf212d0e4c7791"},
]
[package.dependencies]
numpy = ">=1.25.0,<3.0"
packaging = "*"
[[package]]
name = "fastapi"
version = "0.116.1"
@ -1064,6 +1004,69 @@ test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto
test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard ; python_version < \"3.14\""]
tqdm = ["tqdm"]
[[package]]
name = "greenlet"
version = "3.3.0"
description = "Lightweight in-process concurrent programming"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""
files = [
{file = "greenlet-3.3.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:6f8496d434d5cb2dce025773ba5597f71f5410ae499d5dd9533e0653258cdb3d"},
{file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b96dc7eef78fd404e022e165ec55327f935b9b52ff355b067eb4a0267fc1cffb"},
{file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:73631cd5cccbcfe63e3f9492aaa664d278fda0ce5c3d43aeda8e77317e38efbd"},
{file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b299a0cb979f5d7197442dccc3aee67fce53500cd88951b7e6c35575701c980b"},
{file = "greenlet-3.3.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7dee147740789a4632cace364816046e43310b59ff8fb79833ab043aefa72fd5"},
{file = "greenlet-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:39b28e339fc3c348427560494e28d8a6f3561c8d2bcf7d706e1c624ed8d822b9"},
{file = "greenlet-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3c374782c2935cc63b2a27ba8708471de4ad1abaa862ffdb1ef45a643ddbb7d"},
{file = "greenlet-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:b49e7ed51876b459bd645d83db257f0180e345d3f768a35a85437a24d5a49082"},
{file = "greenlet-3.3.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e29f3018580e8412d6aaf5641bb7745d38c85228dacf51a73bd4e26ddf2a6a8e"},
{file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a687205fb22794e838f947e2194c0566d3812966b41c78709554aa883183fb62"},
{file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4243050a88ba61842186cb9e63c7dfa677ec146160b0efd73b855a3d9c7fcf32"},
{file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:670d0f94cd302d81796e37299bcd04b95d62403883b24225c6b5271466612f45"},
{file = "greenlet-3.3.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cb3a8ec3db4a3b0eb8a3c25436c2d49e3505821802074969db017b87bc6a948"},
{file = "greenlet-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2de5a0b09eab81fc6a382791b995b1ccf2b172a9fec934747a7a23d2ff291794"},
{file = "greenlet-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4449a736606bd30f27f8e1ff4678ee193bc47f6ca810d705981cfffd6ce0d8c5"},
{file = "greenlet-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:7652ee180d16d447a683c04e4c5f6441bae7ba7b17ffd9f6b3aff4605e9e6f71"},
{file = "greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb"},
{file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3"},
{file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655"},
{file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c024b1e5696626890038e34f76140ed1daf858e37496d33f2af57f06189e70d7"},
{file = "greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b"},
{file = "greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53"},
{file = "greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614"},
{file = "greenlet-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7a34b13d43a6b78abf828a6d0e87d3385680eaf830cd60d20d52f249faabf39"},
{file = "greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739"},
{file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808"},
{file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54"},
{file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30a6e28487a790417d036088b3bcb3f3ac7d8babaa7d0139edbaddebf3af9492"},
{file = "greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527"},
{file = "greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39"},
{file = "greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8"},
{file = "greenlet-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38"},
{file = "greenlet-3.3.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:60c2ef0f578afb3c8d92ea07ad327f9a062547137afe91f38408f08aacab667f"},
{file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5d554d0712ba1de0a6c94c640f7aeba3f85b3a6e1f2899c11c2c0428da9365"},
{file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a898b1e9c5f7307ebbde4102908e6cbfcb9ea16284a3abe15cab996bee8b9b3"},
{file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dcd2bdbd444ff340e8d6bdf54d2f206ccddbb3ccfdcd3c25bf4afaa7b8f0cf45"},
{file = "greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955"},
{file = "greenlet-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ac0549373982b36d5fd5d30beb8a7a33ee541ff98d2b502714a09f1169f31b55"},
{file = "greenlet-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d198d2d977460358c3b3a4dc844f875d1adb33817f0613f663a656f463764ccc"},
{file = "greenlet-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170"},
{file = "greenlet-3.3.0-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:d6ed6f85fae6cdfdb9ce04c9bf7a08d666cfcfb914e7d006f44f840b46741931"},
{file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9125050fcf24554e69c4cacb086b87b3b55dc395a8b3ebe6487b045b2614388"},
{file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:87e63ccfa13c0a0f6234ed0add552af24cc67dd886731f2261e46e241608bee3"},
{file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2662433acbca297c9153a4023fe2161c8dcfdcc91f10433171cf7e7d94ba2221"},
{file = "greenlet-3.3.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3c6e9b9c1527a78520357de498b0e709fb9e2f49c3a513afd5a249007261911b"},
{file = "greenlet-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:286d093f95ec98fdd92fcb955003b8a3d054b4e2cab3e2707a5039e7b50520fd"},
{file = "greenlet-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c10513330af5b8ae16f023e8ddbfb486ab355d04467c4679c5cfe4659975dd9"},
{file = "greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb"},
]
[package.extras]
docs = ["Sphinx", "furo"]
test = ["objgraph", "psutil", "setuptools"]
[[package]]
name = "grpcio"
version = "1.76.0"
@ -1141,6 +1144,73 @@ typing-extensions = ">=4.12,<5.0"
[package.extras]
protobuf = ["grpcio-tools (>=1.76.0)"]
[[package]]
name = "grpcio-tools"
version = "1.71.2"
description = "Protobuf code generator for gRPC"
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "python_version >= \"3.13\""
files = [
{file = "grpcio_tools-1.71.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:ab8a28c2e795520d6dc6ffd7efaef4565026dbf9b4f5270de2f3dd1ce61d2318"},
{file = "grpcio_tools-1.71.2-cp310-cp310-macosx_10_14_universal2.whl", hash = "sha256:654ecb284a592d39a85556098b8c5125163435472a20ead79b805cf91814b99e"},
{file = "grpcio_tools-1.71.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b49aded2b6c890ff690d960e4399a336c652315c6342232c27bd601b3705739e"},
{file = "grpcio_tools-1.71.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7811a6fc1c4b4e5438e5eb98dbd52c2dc4a69d1009001c13356e6636322d41a"},
{file = "grpcio_tools-1.71.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:393a9c80596aa2b3f05af854e23336ea8c295593bbb35d9adae3d8d7943672bd"},
{file = "grpcio_tools-1.71.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:823e1f23c12da00f318404c4a834bb77cd150d14387dee9789ec21b335249e46"},
{file = "grpcio_tools-1.71.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9bfbea79d6aec60f2587133ba766ede3dc3e229641d1a1e61d790d742a3d19eb"},
{file = "grpcio_tools-1.71.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:32f3a67b10728835b5ffb63fbdbe696d00e19a27561b9cf5153e72dbb93021ba"},
{file = "grpcio_tools-1.71.2-cp310-cp310-win32.whl", hash = "sha256:7fcf9d92c710bfc93a1c0115f25e7d49a65032ff662b38b2f704668ce0a938df"},
{file = "grpcio_tools-1.71.2-cp310-cp310-win_amd64.whl", hash = "sha256:914b4275be810290266e62349f2d020bb7cc6ecf9edb81da3c5cddb61a95721b"},
{file = "grpcio_tools-1.71.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:0acb8151ea866be5b35233877fbee6445c36644c0aa77e230c9d1b46bf34b18b"},
{file = "grpcio_tools-1.71.2-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:b28f8606f4123edb4e6da281547465d6e449e89f0c943c376d1732dc65e6d8b3"},
{file = "grpcio_tools-1.71.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:cbae6f849ad2d1f5e26cd55448b9828e678cb947fa32c8729d01998238266a6a"},
{file = "grpcio_tools-1.71.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4d1027615cfb1e9b1f31f2f384251c847d68c2f3e025697e5f5c72e26ed1316"},
{file = "grpcio_tools-1.71.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bac95662dc69338edb9eb727cc3dd92342131b84b12b3e8ec6abe973d4cbf1b"},
{file = "grpcio_tools-1.71.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c50250c7248055040f89eb29ecad39d3a260a4b6d3696af1575945f7a8d5dcdc"},
{file = "grpcio_tools-1.71.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6ab1ad955e69027ef12ace4d700c5fc36341bdc2f420e87881e9d6d02af3d7b8"},
{file = "grpcio_tools-1.71.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dd75dde575781262b6b96cc6d0b2ac6002b2f50882bf5e06713f1bf364ee6e09"},
{file = "grpcio_tools-1.71.2-cp311-cp311-win32.whl", hash = "sha256:9a3cb244d2bfe0d187f858c5408d17cb0e76ca60ec9a274c8fd94cc81457c7fc"},
{file = "grpcio_tools-1.71.2-cp311-cp311-win_amd64.whl", hash = "sha256:00eb909997fd359a39b789342b476cbe291f4dd9c01ae9887a474f35972a257e"},
{file = "grpcio_tools-1.71.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:bfc0b5d289e383bc7d317f0e64c9dfb59dc4bef078ecd23afa1a816358fb1473"},
{file = "grpcio_tools-1.71.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:b4669827716355fa913b1376b1b985855d5cfdb63443f8d18faf210180199006"},
{file = "grpcio_tools-1.71.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:d4071f9b44564e3f75cdf0f05b10b3e8c7ea0ca5220acbf4dc50b148552eef2f"},
{file = "grpcio_tools-1.71.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a28eda8137d587eb30081384c256f5e5de7feda34776f89848b846da64e4be35"},
{file = "grpcio_tools-1.71.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b19c083198f5eb15cc69c0a2f2c415540cbc636bfe76cea268e5894f34023b40"},
{file = "grpcio_tools-1.71.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:784c284acda0d925052be19053d35afbf78300f4d025836d424cf632404f676a"},
{file = "grpcio_tools-1.71.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:381e684d29a5d052194e095546eef067201f5af30fd99b07b5d94766f44bf1ae"},
{file = "grpcio_tools-1.71.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3e4b4801fabd0427fc61d50d09588a01b1cfab0ec5e8a5f5d515fbdd0891fd11"},
{file = "grpcio_tools-1.71.2-cp312-cp312-win32.whl", hash = "sha256:84ad86332c44572305138eafa4cc30040c9a5e81826993eae8227863b700b490"},
{file = "grpcio_tools-1.71.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e1108d37eecc73b1c4a27350a6ed921b5dda25091700c1da17cfe30761cd462"},
{file = "grpcio_tools-1.71.2-cp313-cp313-linux_armv7l.whl", hash = "sha256:b0f0a8611614949c906e25c225e3360551b488d10a366c96d89856bcef09f729"},
{file = "grpcio_tools-1.71.2-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:7931783ea7ac42ac57f94c5047d00a504f72fbd96118bf7df911bb0e0435fc0f"},
{file = "grpcio_tools-1.71.2-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:d188dc28e069aa96bb48cb11b1338e47ebdf2e2306afa58a8162cc210172d7a8"},
{file = "grpcio_tools-1.71.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f36c4b3cc42ad6ef67430639174aaf4a862d236c03c4552c4521501422bfaa26"},
{file = "grpcio_tools-1.71.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bd9ed12ce93b310f0cef304176049d0bc3b9f825e9c8c6a23e35867fed6affd"},
{file = "grpcio_tools-1.71.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7ce27e76dd61011182d39abca38bae55d8a277e9b7fe30f6d5466255baccb579"},
{file = "grpcio_tools-1.71.2-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:dcc17bf59b85c3676818f2219deacac0156492f32ca165e048427d2d3e6e1157"},
{file = "grpcio_tools-1.71.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:706360c71bdd722682927a1fb517c276ccb816f1e30cb71f33553e5817dc4031"},
{file = "grpcio_tools-1.71.2-cp313-cp313-win32.whl", hash = "sha256:bcf751d5a81c918c26adb2d6abcef71035c77d6eb9dd16afaf176ee096e22c1d"},
{file = "grpcio_tools-1.71.2-cp313-cp313-win_amd64.whl", hash = "sha256:b1581a1133552aba96a730178bc44f6f1a071f0eb81c5b6bc4c0f89f5314e2b8"},
{file = "grpcio_tools-1.71.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:344aa8973850bc36fd0ce81aa6443bd5ab41dc3a25903b36cd1e70f71ceb53c9"},
{file = "grpcio_tools-1.71.2-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:4d32450a4c8a97567b32154379d97398b7eba090bce756aff57aef5d80d8c953"},
{file = "grpcio_tools-1.71.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f596dbc1e46f9e739e09af553bf3c3321be3d603e579f38ffa9f2e0e4a25f4f7"},
{file = "grpcio_tools-1.71.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7723ff599104188cb870d01406b65e67e2493578347cc13d50e9dc372db36ef"},
{file = "grpcio_tools-1.71.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b018b6b69641b10864a3f19dd3c2b7ca3dfce4460eb836ab28b058e7deb3e"},
{file = "grpcio_tools-1.71.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0dd058c06ce95a99f78851c05db30af507227878013d46a8339e44fb24855ff7"},
{file = "grpcio_tools-1.71.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b3312bdd5952bba2ef8e4314b2e2f886fa23b2f6d605cd56097605ae65d30515"},
{file = "grpcio_tools-1.71.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:085de63843946b967ae561e7dd832fa03147f01282f462a0a0cbe1571d9ee986"},
{file = "grpcio_tools-1.71.2-cp39-cp39-win32.whl", hash = "sha256:c1ff5f79f49768d4c561508b62878f27198b3420a87390e0c51969b8dbfcfca8"},
{file = "grpcio_tools-1.71.2-cp39-cp39-win_amd64.whl", hash = "sha256:c3e02b345cf96673dcf77599a61482f68c318a62c9cde20a5ae0882619ff8c98"},
{file = "grpcio_tools-1.71.2.tar.gz", hash = "sha256:b5304d65c7569b21270b568e404a5a843cf027c66552a6a0978b23f137679c09"},
]
[package.dependencies]
grpcio = ">=1.71.2"
protobuf = ">=5.26.1,<6.0dev"
setuptools = "*"
[[package]]
name = "grpclib"
version = "0.4.8"
@ -1259,6 +1329,7 @@ files = [
[package.dependencies]
anyio = "*"
certifi = "*"
h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""}
httpcore = "==1.*"
idna = "*"
@ -1504,18 +1575,6 @@ files = [
{file = "jiter-0.11.1.tar.gz", hash = "sha256:849dcfc76481c0ea0099391235b7ca97d7279e0fa4c86005457ac7c88e8b76dc"},
]
[[package]]
name = "jmespath"
version = "1.0.1"
description = "JSON Matching Expressions"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"},
{file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
]
[[package]]
name = "joblib"
version = "1.5.2"
@ -1528,6 +1587,18 @@ files = [
{file = "joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55"},
]
[[package]]
name = "json-repair"
version = "0.29.10"
description = "A package to repair broken json strings"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "json_repair-0.29.10-py3-none-any.whl", hash = "sha256:750eacc3c0228a72b512654855515c1a88174b641b7b834f769a01f49a0e65ca"},
{file = "json_repair-0.29.10.tar.gz", hash = "sha256:8050f9db6e6a42f843e21b3fe8410308b0f6085bfd81506343552522b6b707f8"},
]
[[package]]
name = "jsonpatch"
version = "1.33"
@ -2015,28 +2086,33 @@ files = [
]
[[package]]
name = "memori"
version = "3.1.3"
description = "Memori Python SDK"
name = "mem0ai"
version = "0.1.116"
description = "Long-term memory for AI Agents"
optional = false
python-versions = ">=3.10"
python-versions = "<4.0,>=3.9"
groups = ["main"]
files = [
{file = "memori-3.1.3-py3-none-any.whl", hash = "sha256:81934d61ecf0574ba948b9012f2cde52912f3d30887ad504408ecfa171734e36"},
{file = "memori-3.1.3.tar.gz", hash = "sha256:9f936c15ba0a684a1bc62619d7558af6a6e99171bf1ef1e6eb19bb952a9f6d94"},
{file = "mem0ai-0.1.116-py3-none-any.whl", hash = "sha256:245b08f1e615e057ebacc52462ab729a7282abe05e8d4957236d893b3d32a990"},
{file = "mem0ai-0.1.116.tar.gz", hash = "sha256:c33e08c5464f96b1cf109893dba5d394d8cc5788a8400d85cb1ceed696ee3204"},
]
[package.dependencies]
aiohttp = ">=3.9.0"
botocore = ">=1.34.0"
faiss-cpu = ">=1.7.0"
grpcio = ">=1.60.0"
numpy = ">=1.24.0"
protobuf = ">=4.25.0,<6.0.0"
psycopg = {version = ">=3.1.0", extras = ["binary"]}
pyfiglet = ">=0.8.0"
requests = ">=2.32.5"
sentence-transformers = ">=3.0.0"
openai = ">=1.33.0"
posthog = ">=3.5.0"
protobuf = ">=5.29.0,<6.0.0"
pydantic = ">=2.7.3"
pytz = ">=2024.1"
qdrant-client = ">=1.9.1"
sqlalchemy = ">=2.0.31"
[package.extras]
dev = ["isort (>=5.13.2)", "pytest (>=8.2.2)", "ruff (>=0.6.5)"]
extras = ["boto3 (>=1.34.0)", "elasticsearch (>=8.0.0)", "langchain-community (>=0.0.0)", "langchain-memgraph (>=0.1.0)", "opensearch-py (>=2.0.0)", "sentence-transformers (>=5.0.0)"]
graph = ["langchain-aws (>=0.2.23)", "langchain-neo4j (>=0.4.0)", "neo4j (>=5.23.1)", "rank-bm25 (>=0.2.2)"]
llms = ["google-genai (>=1.0.0)", "google-generativeai (>=0.3.0)", "groq (>=0.3.0)", "litellm (>=0.1.0)", "ollama (>=0.1.0)", "together (>=0.2.10)", "vertexai (>=0.1.0)"]
test = ["pytest (>=8.2.2)", "pytest-asyncio (>=0.23.7)", "pytest-mock (>=3.14.0)"]
vector-stores = ["azure-search-documents (>=11.4.0b8)", "chromadb (>=0.4.24)", "faiss-cpu (>=1.7.4)", "pinecone (<=7.3.0)", "pinecone-text (>=0.10.0)", "psycopg (>=3.2.8)", "pymochow (>=2.2.9)", "pymongo (>=4.13.2)", "upstash-vector (>=0.1.0)", "vecs (>=0.4.0)", "weaviate-client (>=4.4.0,<4.15.0)"]
[[package]]
name = "modal"
@ -3010,6 +3086,73 @@ files = [
dev = ["pre-commit", "tox"]
testing = ["coverage", "pytest", "pytest-benchmark"]
[[package]]
name = "portalocker"
version = "2.10.1"
description = "Wraps the portalocker recipe for easy usage"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version >= \"3.13\""
files = [
{file = "portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf"},
{file = "portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f"},
]
[package.dependencies]
pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""}
[package.extras]
docs = ["sphinx (>=1.7.1)"]
redis = ["redis"]
tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"]
[[package]]
name = "portalocker"
version = "3.2.0"
description = "Wraps the portalocker recipe for easy usage"
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "python_version == \"3.12\""
files = [
{file = "portalocker-3.2.0-py3-none-any.whl", hash = "sha256:3cdc5f565312224bc570c49337bd21428bba0ef363bbcf58b9ef4a9f11779968"},
{file = "portalocker-3.2.0.tar.gz", hash = "sha256:1f3002956a54a8c3730586c5c77bf18fae4149e07eaf1c29fc3faf4d5a3f89ac"},
]
[package.dependencies]
pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""}
[package.extras]
docs = ["portalocker[tests]"]
redis = ["redis"]
tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-rerunfailures (>=15.0)", "pytest-timeout (>=2.1.0)", "sphinx (>=6.0.0)", "types-pywin32 (>=310.0.0.20250429)", "types-redis"]
[[package]]
name = "posthog"
version = "7.6.0"
description = "Integrate PostHog into any python application."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "posthog-7.6.0-py3-none-any.whl", hash = "sha256:c4dd78cf77c4fecceb965f86066e5ac37886ef867d68ffe75a1db5d681d7d9ad"},
{file = "posthog-7.6.0.tar.gz", hash = "sha256:941dfd278ee427c9b14640f09b35b5bb52a71bdf028d7dbb7307e1838fd3002e"},
]
[package.dependencies]
backoff = ">=1.10.0"
distro = ">=1.5.0"
python-dateutil = ">=2.2"
requests = ">=2.7,<3.0"
six = ">=1.5"
typing-extensions = ">=4.2.0"
[package.extras]
dev = ["django-stubs", "lxml", "mypy", "mypy-baseline", "packaging", "pre-commit", "pydantic", "ruff", "setuptools", "tomli", "tomli_w", "twine", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six", "wheel"]
langchain = ["langchain (>=0.2.0)"]
test = ["anthropic (>=0.72)", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=1.0)", "langchain-community (>=0.4)", "langchain-core (>=1.0)", "langchain-openai (>=1.0)", "langgraph (>=1.0)", "mock (>=2.0.0)", "openai (>=2.0)", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"]
[[package]]
name = "prompt-toolkit"
version = "3.0.52"
@ -3224,7 +3367,6 @@ files = [
]
[package.dependencies]
psycopg-binary = {version = "3.3.2", optional = true, markers = "implementation_name != \"pypy\" and extra == \"binary\""}
typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""}
tzdata = {version = "*", markers = "sys_platform == \"win32\""}
@ -3236,72 +3378,6 @@ docs = ["Sphinx (>=5.0)", "furo (==2022.6.21)", "sphinx-autobuild (>=2021.3.14)"
pool = ["psycopg-pool"]
test = ["anyio (>=4.0)", "mypy (>=1.19.0) ; implementation_name != \"pypy\"", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"]
[[package]]
name = "psycopg-binary"
version = "3.3.2"
description = "PostgreSQL database adapter for Python -- C optimisation distribution"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "implementation_name != \"pypy\""
files = [
{file = "psycopg_binary-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0768c5f32934bb52a5df098317eca9bdcf411de627c5dca2ee57662b64b54b41"},
{file = "psycopg_binary-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:09b3014013f05cd89828640d3a1db5f829cc24ad8fa81b6e42b2c04685a0c9d4"},
{file = "psycopg_binary-3.3.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:3789d452a9d17a841c7f4f97bbcba51a21f957ea35641a4c98507520e6b6a068"},
{file = "psycopg_binary-3.3.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:44e89938d36acc4495735af70a886d206a5bfdc80258f95b69b52f68b2968d9e"},
{file = "psycopg_binary-3.3.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90ed9da805e52985b0202aed4f352842c907c6b4fc6c7c109c6e646c32e2f43b"},
{file = "psycopg_binary-3.3.2-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c3a9ccdfee4ae59cf9bf1822777e763bc097ed208f4901e21537fca1070e1391"},
{file = "psycopg_binary-3.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:de9173f8cc0efd88ac2a89b3b6c287a9a0011cdc2f53b2a12c28d6fd55f9f81c"},
{file = "psycopg_binary-3.3.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0611f4822674f3269e507a307236efb62ae5a828fcfc923ac85fe22ca19fd7c8"},
{file = "psycopg_binary-3.3.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:522b79c7db547767ca923e441c19b97a2157f2f494272a119c854bba4804e186"},
{file = "psycopg_binary-3.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1ea41c0229f3f5a3844ad0857a83a9f869aa7b840448fa0c200e6bcf85d33d19"},
{file = "psycopg_binary-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:8ea05b499278790a8fa0ff9854ab0de2542aca02d661ddff94e830df971ff640"},
{file = "psycopg_binary-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:94503b79f7da0b65c80d0dbb2f81dd78b300319ec2435d5e6dcf9622160bc2fa"},
{file = "psycopg_binary-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07a5f030e0902ec3e27d0506ceb01238c0aecbc73ecd7fa0ee55f86134600b5b"},
{file = "psycopg_binary-3.3.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e09d0d93d35c134704a2cb2b15f81ffc8174fd602f3e08f7b1a3d8896156cf0"},
{file = "psycopg_binary-3.3.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:649c1d33bedda431e0c1df646985fbbeb9274afa964e1aef4be053c0f23a2924"},
{file = "psycopg_binary-3.3.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5774272f754605059521ff037a86e680342e3847498b0aa86b0f3560c70963c"},
{file = "psycopg_binary-3.3.2-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d391b70c9cc23f6e1142729772a011f364199d2c5ddc0d596f5f43316fbf982d"},
{file = "psycopg_binary-3.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f3f601f32244a677c7b029ec39412db2772ad04a28bc2cbb4b1f0931ed0ffad7"},
{file = "psycopg_binary-3.3.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0ae60e910531cfcc364a8f615a7941cac89efeb3f0fffe0c4824a6d11461eef7"},
{file = "psycopg_binary-3.3.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7c43a773dd1a481dbb2fe64576aa303d80f328cce0eae5e3e4894947c41d1da7"},
{file = "psycopg_binary-3.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5a327327f1188b3fbecac41bf1973a60b86b2eb237db10dc945bd3dc97ec39e4"},
{file = "psycopg_binary-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:136c43f185244893a527540307167f5d3ef4e08786508afe45d6f146228f5aa9"},
{file = "psycopg_binary-3.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a9387ab615f929e71ef0f4a8a51e986fa06236ccfa9f3ec98a88f60fbf230634"},
{file = "psycopg_binary-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3ff7489df5e06c12d1829544eaec64970fe27fe300f7cf04c8495fe682064688"},
{file = "psycopg_binary-3.3.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:9742580ecc8e1ac45164e98d32ca6df90da509c2d3ff26be245d94c430f92db4"},
{file = "psycopg_binary-3.3.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d45acedcaa58619355f18e0f42af542fcad3fd84ace4b8355d3a5dea23318578"},
{file = "psycopg_binary-3.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d88f32ff8c47cb7f4e7e7a9d1747dcee6f3baa19ed9afa9e5694fd2fb32b61ed"},
{file = "psycopg_binary-3.3.2-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:59d0163c4617a2c577cb34afbed93d7a45b8c8364e54b2bd2020ff25d5f5f860"},
{file = "psycopg_binary-3.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e750afe74e6c17b2c7046d2c3e3173b5a3f6080084671c8aa327215323df155b"},
{file = "psycopg_binary-3.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f26f113013c4dcfbfe9ced57b5bad2035dda1a7349f64bf726021968f9bccad3"},
{file = "psycopg_binary-3.3.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:8309ee4569dced5e81df5aa2dcd48c7340c8dee603a66430f042dfbd2878edca"},
{file = "psycopg_binary-3.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c6464150e25b68ae3cb04c4e57496ea11ebfaae4d98126aea2f4702dd43e3c12"},
{file = "psycopg_binary-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:716a586f99bbe4f710dc58b40069fcb33c7627e95cc6fc936f73c9235e07f9cf"},
{file = "psycopg_binary-3.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fc5a189e89cbfff174588665bb18d28d2d0428366cc9dae5864afcaa2e57380b"},
{file = "psycopg_binary-3.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:083c2e182be433f290dc2c516fd72b9b47054fcd305cce791e0a50d9e93e06f2"},
{file = "psycopg_binary-3.3.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:ac230e3643d1c436a2dfb59ca84357dfc6862c9f372fc5dbd96bafecae581f9f"},
{file = "psycopg_binary-3.3.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d8c899a540f6c7585cee53cddc929dd4d2db90fd828e37f5d4017b63acbc1a5d"},
{file = "psycopg_binary-3.3.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:50ff10ab8c0abdb5a5451b9315538865b50ba64c907742a1385fdf5f5772b73e"},
{file = "psycopg_binary-3.3.2-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:23d2594af848c1fd3d874a9364bef50730124e72df7bb145a20cb45e728c50ed"},
{file = "psycopg_binary-3.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ea4fe6b4ead3bbbe27244ea224fcd1f53cb119afc38b71a2f3ce570149a03e30"},
{file = "psycopg_binary-3.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:742ce48cde825b8e52fb1a658253d6d1ff66d152081cbc76aa45e2986534858d"},
{file = "psycopg_binary-3.3.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:e22bf6b54df994aff37ab52695d635f1ef73155e781eee1f5fa75bc08b58c8da"},
{file = "psycopg_binary-3.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8db9034cde3bcdafc66980f0130813f5c5d19e74b3f2a19fb3cfbc25ad113121"},
{file = "psycopg_binary-3.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:df65174c7cf6b05ea273ce955927d3270b3a6e27b0b12762b009ce6082b8d3fc"},
{file = "psycopg_binary-3.3.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:9ca24062cd9b2270e4d77576042e9cc2b1d543f09da5aba1f1a3d016cea28390"},
{file = "psycopg_binary-3.3.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c749770da0947bc972e512f35366dd4950c0e34afad89e60b9787a37e97cb443"},
{file = "psycopg_binary-3.3.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:03b7cd73fb8c45d272a34ae7249713e32492891492681e3cf11dff9531cf37e9"},
{file = "psycopg_binary-3.3.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:43b130e3b6edcb5ee856c7167ccb8561b473308c870ed83978ae478613764f1c"},
{file = "psycopg_binary-3.3.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7c1feba5a8c617922321aef945865334e468337b8fc5c73074f5e63143013b5a"},
{file = "psycopg_binary-3.3.2-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cabb2a554d9a0a6bf84037d86ca91782f087dfff2a61298d0b00c19c0bc43f6d"},
{file = "psycopg_binary-3.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:74bc306c4b4df35b09bc8cecf806b271e1c5d708f7900145e4e54a2e5dedfed0"},
{file = "psycopg_binary-3.3.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:d79b0093f0fbf7a962d6a46ae292dc056c65d16a8ee9361f3cfbafd4c197ab14"},
{file = "psycopg_binary-3.3.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:1586e220be05547c77afc326741dd41cc7fba38a81f9931f616ae98865439678"},
{file = "psycopg_binary-3.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:458696a5fa5dad5b6fb5d5862c22454434ce4fe1cf66ca6c0de5f904cbc1ae3e"},
{file = "psycopg_binary-3.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:04bb2de4ba69d6f8395b446ede795e8884c040ec71d01dd07ac2b2d18d4153d1"},
]
[[package]]
name = "psycopg-pool"
version = "3.3.0"
@ -3555,18 +3631,6 @@ gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"]
toml = ["tomli (>=2.0.1)"]
yaml = ["pyyaml (>=6.0.1)"]
[[package]]
name = "pyfiglet"
version = "1.0.4"
description = "Pure-python FIGlet implementation"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pyfiglet-1.0.4-py3-none-any.whl", hash = "sha256:65b57b7a8e1dff8a67dc8e940a117238661d5e14c3e49121032bd404d9b2b39f"},
{file = "pyfiglet-1.0.4.tar.gz", hash = "sha256:db9c9940ed1bf3048deff534ed52ff2dafbbc2cd7610b17bb5eca1df6d4278ef"},
]
[[package]]
name = "pygments"
version = "2.19.2"
@ -3665,7 +3729,7 @@ description = "Python for Window Extensions"
optional = false
python-versions = "*"
groups = ["main"]
markers = "sys_platform == \"win32\""
markers = "sys_platform == \"win32\" or platform_system == \"Windows\""
files = [
{file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"},
{file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"},
@ -3772,6 +3836,58 @@ files = [
{file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"},
]
[[package]]
name = "qdrant-client"
version = "1.12.1"
description = "Client library for the Qdrant vector search engine"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version >= \"3.13\""
files = [
{file = "qdrant_client-1.12.1-py3-none-any.whl", hash = "sha256:b2d17ce18e9e767471368380dd3bbc4a0e3a0e2061fedc9af3542084b48451e0"},
{file = "qdrant_client-1.12.1.tar.gz", hash = "sha256:35e8e646f75b7b883b3d2d0ee4c69c5301000bba41c82aa546e985db0f1aeb72"},
]
[package.dependencies]
grpcio = ">=1.41.0"
grpcio-tools = ">=1.41.0"
httpx = {version = ">=0.20.0", extras = ["http2"]}
numpy = {version = ">=1.26", markers = "python_version >= \"3.12\""}
portalocker = ">=2.7.0,<3.0.0"
pydantic = ">=1.10.8"
urllib3 = ">=1.26.14,<3"
[package.extras]
fastembed = ["fastembed (==0.3.6) ; python_version < \"3.13\""]
fastembed-gpu = ["fastembed-gpu (==0.3.6) ; python_version < \"3.13\""]
[[package]]
name = "qdrant-client"
version = "1.16.2"
description = "Client library for the Qdrant vector search engine"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "python_version == \"3.12\""
files = [
{file = "qdrant_client-1.16.2-py3-none-any.whl", hash = "sha256:442c7ef32ae0f005e88b5d3c0783c63d4912b97ae756eb5e052523be682f17d3"},
{file = "qdrant_client-1.16.2.tar.gz", hash = "sha256:ca4ef5f9be7b5eadeec89a085d96d5c723585a391eb8b2be8192919ab63185f0"},
]
[package.dependencies]
grpcio = ">=1.41.0"
httpx = {version = ">=0.20.0", extras = ["http2"]}
numpy = {version = ">=1.26", markers = "python_version == \"3.12\""}
portalocker = ">=2.7.0,<4.0"
protobuf = ">=3.20.0"
pydantic = ">=1.10.8,<2.0.dev0 || >2.2.0"
urllib3 = ">=1.26.14,<3"
[package.extras]
fastembed = ["fastembed (>=0.7,<0.8)"]
fastembed-gpu = ["fastembed-gpu (>=0.7,<0.8)"]
[[package]]
name = "referencing"
version = "0.37.0"
@ -4362,6 +4478,28 @@ onnx-gpu = ["optimum[onnxruntime-gpu] (>=1.23.1)"]
openvino = ["optimum-intel[openvino] (>=1.20.0)"]
train = ["accelerate (>=0.20.3)", "datasets"]
[[package]]
name = "setuptools"
version = "80.9.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "python_version >= \"3.13\""
files = [
{file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"},
{file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"},
]
[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""]
core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
enabler = ["pytest-enabler (>=2.2)"]
test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"]
[[package]]
name = "shellingham"
version = "1.5.4"
@ -4410,6 +4548,97 @@ files = [
{file = "soupsieve-2.8.1.tar.gz", hash = "sha256:4cf733bc50fa805f5df4b8ef4740fc0e0fa6218cf3006269afd3f9d6d80fd350"},
]
[[package]]
name = "sqlalchemy"
version = "2.0.45"
description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "sqlalchemy-2.0.45-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c64772786d9eee72d4d3784c28f0a636af5b0a29f3fe26ff11f55efe90c0bd85"},
{file = "sqlalchemy-2.0.45-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7ae64ebf7657395824a19bca98ab10eb9a3ecb026bf09524014f1bb81cb598d4"},
{file = "sqlalchemy-2.0.45-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f02325709d1b1a1489f23a39b318e175a171497374149eae74d612634b234c0"},
{file = "sqlalchemy-2.0.45-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2c3684fca8a05f0ac1d9a21c1f4a266983a7ea9180efb80ffeb03861ecd01a0"},
{file = "sqlalchemy-2.0.45-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040f6f0545b3b7da6b9317fc3e922c9a98fc7243b2a1b39f78390fc0942f7826"},
{file = "sqlalchemy-2.0.45-cp310-cp310-win32.whl", hash = "sha256:830d434d609fe7bfa47c425c445a8b37929f140a7a44cdaf77f6d34df3a7296a"},
{file = "sqlalchemy-2.0.45-cp310-cp310-win_amd64.whl", hash = "sha256:0209d9753671b0da74da2cfbb9ecf9c02f72a759e4b018b3ab35f244c91842c7"},
{file = "sqlalchemy-2.0.45-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e90a344c644a4fa871eb01809c32096487928bd2038bf10f3e4515cb688cc56"},
{file = "sqlalchemy-2.0.45-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8c8b41b97fba5f62349aa285654230296829672fc9939cd7f35aab246d1c08b"},
{file = "sqlalchemy-2.0.45-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:12c694ed6468333a090d2f60950e4250b928f457e4962389553d6ba5fe9951ac"},
{file = "sqlalchemy-2.0.45-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f7d27a1d977a1cfef38a0e2e1ca86f09c4212666ce34e6ae542f3ed0a33bc606"},
{file = "sqlalchemy-2.0.45-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d62e47f5d8a50099b17e2bfc1b0c7d7ecd8ba6b46b1507b58cc4f05eefc3bb1c"},
{file = "sqlalchemy-2.0.45-cp311-cp311-win32.whl", hash = "sha256:3c5f76216e7b85770d5bb5130ddd11ee89f4d52b11783674a662c7dd57018177"},
{file = "sqlalchemy-2.0.45-cp311-cp311-win_amd64.whl", hash = "sha256:a15b98adb7f277316f2c276c090259129ee4afca783495e212048daf846654b2"},
{file = "sqlalchemy-2.0.45-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3ee2aac15169fb0d45822983631466d60b762085bc4535cd39e66bea362df5f"},
{file = "sqlalchemy-2.0.45-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba547ac0b361ab4f1608afbc8432db669bd0819b3e12e29fb5fa9529a8bba81d"},
{file = "sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:215f0528b914e5c75ef2559f69dca86878a3beeb0c1be7279d77f18e8d180ed4"},
{file = "sqlalchemy-2.0.45-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:107029bf4f43d076d4011f1afb74f7c3e2ea029ec82eb23d8527d5e909e97aa6"},
{file = "sqlalchemy-2.0.45-cp312-cp312-win32.whl", hash = "sha256:0c9f6ada57b58420a2c0277ff853abe40b9e9449f8d7d231763c6bc30f5c4953"},
{file = "sqlalchemy-2.0.45-cp312-cp312-win_amd64.whl", hash = "sha256:8defe5737c6d2179c7997242d6473587c3beb52e557f5ef0187277009f73e5e1"},
{file = "sqlalchemy-2.0.45-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fe187fc31a54d7fd90352f34e8c008cf3ad5d064d08fedd3de2e8df83eb4a1cf"},
{file = "sqlalchemy-2.0.45-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:672c45cae53ba88e0dad74b9027dddd09ef6f441e927786b05bec75d949fbb2e"},
{file = "sqlalchemy-2.0.45-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:470daea2c1ce73910f08caf10575676a37159a6d16c4da33d0033546bddebc9b"},
{file = "sqlalchemy-2.0.45-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9c6378449e0940476577047150fd09e242529b761dc887c9808a9a937fe990c8"},
{file = "sqlalchemy-2.0.45-cp313-cp313-win32.whl", hash = "sha256:4b6bec67ca45bc166c8729910bd2a87f1c0407ee955df110d78948f5b5827e8a"},
{file = "sqlalchemy-2.0.45-cp313-cp313-win_amd64.whl", hash = "sha256:afbf47dc4de31fa38fd491f3705cac5307d21d4bb828a4f020ee59af412744ee"},
{file = "sqlalchemy-2.0.45-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:83d7009f40ce619d483d26ac1b757dfe3167b39921379a8bd1b596cf02dab4a6"},
{file = "sqlalchemy-2.0.45-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d8a2ca754e5415cde2b656c27900b19d50ba076aa05ce66e2207623d3fe41f5a"},
{file = "sqlalchemy-2.0.45-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f46ec744e7f51275582e6a24326e10c49fbdd3fc99103e01376841213028774"},
{file = "sqlalchemy-2.0.45-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:883c600c345123c033c2f6caca18def08f1f7f4c3ebeb591a63b6fceffc95cce"},
{file = "sqlalchemy-2.0.45-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2c0b74aa79e2deade948fe8593654c8ef4228c44ba862bb7c9585c8e0db90f33"},
{file = "sqlalchemy-2.0.45-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a420169cef179d4c9064365f42d779f1e5895ad26ca0c8b4c0233920973db74"},
{file = "sqlalchemy-2.0.45-cp314-cp314-win32.whl", hash = "sha256:e50dcb81a5dfe4b7b4a4aa8f338116d127cb209559124f3694c70d6cd072b68f"},
{file = "sqlalchemy-2.0.45-cp314-cp314-win_amd64.whl", hash = "sha256:4748601c8ea959e37e03d13dcda4a44837afcd1b21338e637f7c935b8da06177"},
{file = "sqlalchemy-2.0.45-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd337d3526ec5298f67d6a30bbbe4ed7e5e68862f0bf6dd21d289f8d37b7d60b"},
{file = "sqlalchemy-2.0.45-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9a62b446b7d86a3909abbcd1cd3cc550a832f99c2bc37c5b22e1925438b9367b"},
{file = "sqlalchemy-2.0.45-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5964f832431b7cdfaaa22a660b4c7eb1dfcd6ed41375f67fd3e3440fd95cb3cc"},
{file = "sqlalchemy-2.0.45-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee580ab50e748208754ae8980cec79ec205983d8cf8b3f7c39067f3d9f2c8e22"},
{file = "sqlalchemy-2.0.45-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13e27397a7810163440c6bfed6b3fe46f1bfb2486eb540315a819abd2c004128"},
{file = "sqlalchemy-2.0.45-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ed3635353e55d28e7f4a95c8eda98a5cdc0a0b40b528433fbd41a9ae88f55b3d"},
{file = "sqlalchemy-2.0.45-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:db6834900338fb13a9123307f0c2cbb1f890a8656fcd5e5448ae3ad5bbe8d312"},
{file = "sqlalchemy-2.0.45-cp38-cp38-win32.whl", hash = "sha256:1d8b4a7a8c9b537509d56d5cd10ecdcfbb95912d72480c8861524efecc6a3fff"},
{file = "sqlalchemy-2.0.45-cp38-cp38-win_amd64.whl", hash = "sha256:ebd300afd2b62679203435f596b2601adafe546cb7282d5a0cd3ed99e423720f"},
{file = "sqlalchemy-2.0.45-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d29b2b99d527dbc66dd87c3c3248a5dd789d974a507f4653c969999fc7c1191b"},
{file = "sqlalchemy-2.0.45-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:59a8b8bd9c6bedf81ad07c8bd5543eedca55fe9b8780b2b628d495ba55f8db1e"},
{file = "sqlalchemy-2.0.45-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd93c6f5d65f254ceabe97548c709e073d6da9883343adaa51bf1a913ce93f8e"},
{file = "sqlalchemy-2.0.45-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d0beadc2535157070c9c17ecf25ecec31e13c229a8f69196d7590bde8082bf1"},
{file = "sqlalchemy-2.0.45-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e057f928ffe9c9b246a55b469c133b98a426297e1772ad24ce9f0c47d123bd5b"},
{file = "sqlalchemy-2.0.45-cp39-cp39-win32.whl", hash = "sha256:c1c2091b1489435ff85728fafeb990f073e64f6f5e81d5cd53059773e8521eb6"},
{file = "sqlalchemy-2.0.45-cp39-cp39-win_amd64.whl", hash = "sha256:56ead1f8dfb91a54a28cd1d072c74b3d635bcffbd25e50786533b822d4f2cde2"},
{file = "sqlalchemy-2.0.45-py3-none-any.whl", hash = "sha256:5225a288e4c8cc2308dbdd874edad6e7d0fd38eac1e9e5f23503425c8eee20d0"},
{file = "sqlalchemy-2.0.45.tar.gz", hash = "sha256:1632a4bda8d2d25703fdad6363058d882541bdaaee0e5e3ddfa0cd3229efce88"},
]
[package.dependencies]
greenlet = {version = ">=1", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""}
typing-extensions = ">=4.6.0"
[package.extras]
aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"]
aioodbc = ["aioodbc", "greenlet (>=1)"]
aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"]
asyncio = ["greenlet (>=1)"]
asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"]
mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"]
mssql = ["pyodbc"]
mssql-pymssql = ["pymssql"]
mssql-pyodbc = ["pyodbc"]
mypy = ["mypy (>=0.910)"]
mysql = ["mysqlclient (>=1.4.0)"]
mysql-connector = ["mysql-connector-python"]
oracle = ["cx_oracle (>=8)"]
oracle-oracledb = ["oracledb (>=1.0.1)"]
postgresql = ["psycopg2 (>=2.7)"]
postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"]
postgresql-pg8000 = ["pg8000 (>=1.29.1)"]
postgresql-psycopg = ["psycopg (>=3.0.7)"]
postgresql-psycopg2binary = ["psycopg2-binary"]
postgresql-psycopg2cffi = ["psycopg2cffi"]
postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
pymysql = ["pymysql"]
sqlcipher = ["sqlcipher3_binary"]
[[package]]
name = "sse-starlette"
version = "3.0.2"
@ -5820,4 +6049,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.12,<4.0"
content-hash = "962eb510d7d1004932e5036ab0fdb47311de42151d77c3fcc5a56771c7df3ccb"
content-hash = "3fe7331ac9104a8421b06b68ec3e316d8520e37c62fed52f5badc22551e90c18"

View File

@ -21,13 +21,18 @@
### 3. 计划阶段 (Planning)
- **步骤制定**: 只需要列出详细的工具调用步骤即可,不需要考虑回复用户的话术,步骤数量根据实际需求调整。
请按照上述思考框架进行完整分析,确保理解目标、分析问题和制定计划
请按照上述思考框架进行完整分析,确保理解目标、分析问题和制定计划
## 聊天记录 (Chat History)
```
{chat_history}
```
## 记忆文本 (Memory Text)
```
{memory_text}
```
## 工具列表 (Tools)
```
{tools}
@ -57,7 +62,7 @@
[简洁描述用户的主要目标]
### 📋 现状分析
- **关键信息**: [从聊天记录中提取的重要信息]
- **关键信息**: [从聊天记录和记忆文本中提取的重要信息]
- **限制条件**: [需要遵守的规则和约束]
- **可用资源**: [可以利用的工具和资源]

View File

@ -32,8 +32,9 @@ dependencies = [
"cachetools (>=6.2.4,<7.0.0)",
"langgraph-checkpoint-postgres (>=2.0.0,<3.0.0)",
"deepagents-cli (>=0.0.11,<0.0.12)",
"memori (>=3.1.0,<4.0.0)",
"mem0ai (>=0.1.50,<0.3.0)",
"psycopg2-binary (>=2.9.11,<3.0.0)",
"json-repair (>=0.29.0,<0.30.0)",
]
[tool.poetry.requires-plugins]

View File

@ -390,7 +390,7 @@ async def chat_completions(request: ChatRequest, authorization: Optional[str] =
# 处理消息
messages = process_messages(request.messages, request.language)
# 创建 AgentConfig 对象
config = AgentConfig.from_v1_request(request, api_key, project_dir, generate_cfg, messages)
config = await AgentConfig.from_v1_request(request, api_key, project_dir, generate_cfg, messages)
# 调用公共的agent创建和响应生成逻辑
return await create_agent_and_generate_response(config)
@ -445,7 +445,7 @@ async def chat_warmup_v1(request: ChatRequest, authorization: Optional[str] = He
messages = process_messages(empty_messages, request.language or "ja")
# 创建 AgentConfig 对象
config = AgentConfig.from_v1_request(request, api_key, project_dir, generate_cfg, messages)
config = await AgentConfig.from_v1_request(request, api_key, project_dir, generate_cfg, messages)
# 预热 mcp_tools 缓存
logger.info(f"Warming up mcp_tools for bot_id: {bot_id}")
@ -542,7 +542,7 @@ async def chat_warmup_v2(request: ChatRequestV2, authorization: Optional[str] =
messages = process_messages(empty_messages, request.language or "ja")
# 创建 AgentConfig 对象
config = AgentConfig.from_v2_request(request, bot_config, project_dir, messages)
config = await AgentConfig.from_v2_request(request, bot_config, project_dir, messages)
# 预热 mcp_tools 缓存
logger.info(f"Warming up mcp_tools for bot_id: {bot_id}")
@ -640,7 +640,7 @@ async def chat_completions_v2(request: ChatRequestV2, authorization: Optional[st
# 处理消息
messages = process_messages(request.messages, request.language)
# 创建 AgentConfig 对象
config = AgentConfig.from_v2_request(request, bot_config, project_dir, messages)
config = await AgentConfig.from_v2_request(request, bot_config, project_dir, messages)
# 调用公共的agent创建和响应生成逻辑
return await create_agent_and_generate_response(config)

View File

@ -1,75 +0,0 @@
"""
Memori 配置测试
"""
import pytest
from agent.memori_config import MemoriConfig
class TestMemoriConfig:
"""测试 MemoriConfig 配置类"""
def test_default_values(self):
"""测试默认值"""
config = MemoriConfig()
assert config.enabled is False
assert config.api_key is None
assert config.semantic_search_top_k == 5
assert config.semantic_search_threshold == 0.7
assert config.inject_memory_to_system_prompt is True
def test_enabled_check(self):
"""测试 is_enabled 方法"""
config = MemoriConfig(enabled=False)
assert config.is_enabled() is False
config.enabled = True
assert config.is_enabled() is True
def test_get_attribution_tuple(self):
"""测试 get_attribution_tuple 方法"""
config = MemoriConfig(
entity_id="user_123",
process_id="bot_456"
)
entity_id, process_id = config.get_attribution_tuple()
assert entity_id == "user_123"
assert process_id == "bot_456"
def test_get_attribution_tuple_missing_values(self):
"""测试缺少 entity_id 或 process_id 时抛出异常"""
config = MemoriConfig(entity_id="user_123")
with pytest.raises(ValueError, match="entity_id and process_id are required"):
config.get_attribution_tuple()
def test_get_memory_prompt(self):
"""测试 get_memory_prompt 方法"""
config = MemoriConfig()
memories = ["User likes coffee", "User prefers dark mode"]
prompt = config.get_memory_prompt(memories)
assert "相关记忆" in prompt
assert "User likes coffee" in prompt
assert "User prefers dark mode" in prompt
def test_get_memory_prompt_empty(self):
"""测试空记忆列表"""
config = MemoriConfig()
prompt = config.get_memory_prompt([])
assert prompt == ""
def test_with_session(self):
"""测试 with_session 方法"""
config = MemoriConfig(
entity_id="user_123",
process_id="bot_456",
session_id="session_789"
)
new_config = config.with_session("new_session_999")
assert new_config.session_id == "new_session_999"
assert new_config.entity_id == "user_123"
assert new_config.process_id == "bot_456"
# 原配置不受影响
assert config.session_id == "session_789"

View File

@ -1,65 +0,0 @@
"""
Memori 管理器测试
"""
import pytest
from agent.memori_manager import MemoriManager, get_memori_manager
class TestMemoriManager:
"""测试 MemoriManager 类"""
def test_singleton(self):
"""测试全局单例模式"""
manager1 = get_memori_manager()
manager2 = get_memori_manager()
assert manager1 is manager2
def test_initialization_state(self):
"""测试初始状态"""
manager = MemoriManager()
assert manager._initialized is False
assert manager._instances == {}
assert manager._sync_engines == {}
def test_clear_cache_all(self):
"""测试清除所有缓存"""
manager = MemoriManager()
manager._instances["key1"] = "value1"
manager._instances["key2"] = "value2"
manager.clear_cache()
assert manager._instances == {}
def test_clear_cache_selective(self):
"""测试选择性清除缓存"""
manager = MemoriManager()
manager._instances["user1:bot1"] = "value1"
manager._instances["user1:bot2"] = "value2"
manager._instances["user2:bot1"] = "value3"
# 只清除 user1 的缓存
manager.clear_cache(entity_id="user1")
assert "user1:bot1" not in manager._instances
assert "user1:bot2" not in manager._instances
assert "user2:bot1" in manager._instances
# 只清除 bot1 的缓存
manager.clear_cache(process_id="bot1")
assert "user2:bot1" not in manager._instances
def test_missing_db_url_and_pool(self):
"""测试没有提供 db_url 和 pool 的情况"""
import pytest
config = {
"entity_id": "user_123",
"process_id": "bot_456",
"session_id": "session_789",
}
# 需要 mock _create_memori_instance 或提供 db_url
# 这里只测试初始化逻辑
manager = MemoriManager()
assert manager._db_url is None
assert manager._db_pool is None

View File

@ -1,120 +0,0 @@
"""
Memori 中间件测试
"""
import pytest
from unittest.mock import Mock, AsyncMock, patch
from langchain.agents.middleware import AgentState
from langgraph.runtime import Runtime
from agent.memori_middleware import MemoriMiddleware, create_memori_middleware
from agent.memori_config import MemoriConfig
class TestMemoriMiddleware:
"""测试 MemoriMiddleware 类"""
def test_extract_user_query_empty(self):
"""测试空状态"""
config = MemoriConfig(
enabled=True,
entity_id="user_123",
process_id="bot_456",
)
manager = Mock()
middleware = MemoriMiddleware(manager, config)
state: AgentState = {"messages": []}
query = middleware._extract_user_query(state)
assert query == ""
def test_extract_user_query_from_message(self):
"""测试从消息中提取查询"""
config = MemoriConfig(
enabled=True,
entity_id="user_123",
process_id="bot_456",
)
manager = Mock()
middleware = MemoriMiddleware(manager, config)
# 创建模拟消息
mock_message = Mock()
mock_message.content = "What is the weather today?"
state: AgentState = {"messages": [mock_message]}
query = middleware._extract_user_query(state)
assert query == "What is the weather today?"
def test_extract_user_query_from_dict(self):
"""测试从字典消息中提取查询"""
config = MemoriConfig(
enabled=True,
entity_id="user_123",
process_id="bot_456",
)
manager = Mock()
middleware = MemoriMiddleware(manager, config)
state: AgentState = {"messages": [{"content": "Hello, world!"}]}
query = middleware._extract_user_query(state)
assert query == "Hello, world!"
def test_format_memories(self):
"""测试记忆格式化"""
config = MemoriConfig(
enabled=True,
entity_id="user_123",
process_id="bot_456",
)
manager = Mock()
middleware = MemoriMiddleware(manager, config)
memories = [
{"content": "User likes coffee", "similarity": 0.9, "fact_type": "preference"},
{"content": "User lives in Tokyo", "similarity": 0.8, "fact_type": "fact"},
]
formatted = middleware._format_memories(memories)
assert "User likes coffee" in formatted
assert "User lives in Tokyo" in formatted
assert "[preference]" in formatted
assert "[fact]" in formatted
def test_format_memories_empty(self):
"""测试空记忆列表"""
config = MemoriConfig(
enabled=True,
entity_id="user_123",
process_id="bot_456",
)
manager = Mock()
middleware = MemoriMiddleware(manager, config)
formatted = middleware._format_memories([])
assert formatted == ""
def test_create_memori_middleware_disabled(self):
"""测试创建中间件(禁用状态)"""
middleware = create_memori_middleware(
bot_id="bot_456",
user_identifier="user_123",
session_id="session_789",
enabled=False,
)
assert middleware is None
def test_create_memori_middleware_enabled(self):
"""测试创建中间件(启用状态)"""
middleware = create_memori_middleware(
bot_id="bot_456",
user_identifier="user_123",
session_id="session_789",
enabled=True,
semantic_search_top_k=10,
semantic_search_threshold=0.8,
)
assert middleware is not None
assert isinstance(middleware, MemoriMiddleware)
assert middleware.config.semantic_search_top_k == 10
assert middleware.config.semantic_search_threshold == 0.8

View File

@ -8,7 +8,7 @@ from typing import List, Dict, Optional, Union, Any
import aiohttp
from fastapi import HTTPException
import logging
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, convert_to_openai_messages
from langchain.chat_models import init_chat_model
from utils.settings import MASTERKEY, BACKEND_HOST
from agent.agent_config import AgentConfig
@ -591,7 +591,7 @@ async def call_preamble_llm(config: AgentConfig) -> str:
language = config.language
preamble_choices_text = config.preamble_text
last_message = get_user_last_message_content(config.messages)
chat_history = format_messages_to_chat_history(config.messages)
chat_history = format_messages_to_chat_history(convert_to_openai_messages(config._session_history))
# 替换模板中的占位符
system_prompt = preamble_template.replace('{preamble_choices_text}', preamble_choices_text).replace('{chat_history}', chat_history).replace('{last_message}', last_message).replace('{language}', get_language_text(language))

View File

@ -68,42 +68,24 @@ CHECKPOINT_CLEANUP_INTERVAL_HOURS = int(os.getenv("CHECKPOINT_CLEANUP_INTERVAL_H
# ============================================================
# Memori 长期记忆配置
# Mem0 长期记忆配置
# ============================================================
# Memori 功能开关(全局)
MEMORI_ENABLED = os.getenv("MEMORI_ENABLED", "true") == "true"
# Memori API 密钥(用于高级增强功能)
MEMORI_API_KEY = os.getenv("MEMORI_API_KEY", "")
# Mem0 功能开关(全局)
MEM0_ENABLED = os.getenv("MEM0_ENABLED", "true") == "true"
# 语义搜索配置
# 召回记忆数量
MEMORI_SEMANTIC_SEARCH_TOP_K = int(os.getenv("MEMORI_SEMANTIC_SEARCH_TOP_K", "5"))
# 相关性阈值0.0 - 1.0
MEMORI_SEMANTIC_SEARCH_THRESHOLD = float(os.getenv("MEMORI_SEMANTIC_SEARCH_THRESHOLD", "0.7"))
# 搜索嵌入限制
MEMORI_SEMANTIC_SEARCH_EMBEDDINGS_LIMIT = int(os.getenv("MEMORI_SEMANTIC_SEARCH_EMBEDDINGS_LIMIT", "1000"))
MEM0_SEMANTIC_SEARCH_TOP_K = int(os.getenv("MEM0_SEMANTIC_SEARCH_TOP_K", "20"))
# 记忆注入配置
# 是否将记忆注入到系统提示
MEMORI_INJECT_TO_SYSTEM_PROMPT = os.getenv("MEMORI_INJECT_TO_SYSTEM_PROMPT", "true") == "true"
# 增强配置
# 是否启用后台增强
MEMORI_AUGMENTATION_ENABLED = os.getenv("MEMORI_AUGMENTATION_ENABLED", "true") == "true"
# 增强等待超时None 表示后台异步执行
MEMORI_AUGMENTATION_WAIT_TIMEOUT = os.getenv("MEMORI_AUGMENTATION_WAIT_TIMEOUT")
if MEMORI_AUGMENTATION_WAIT_TIMEOUT:
MEMORI_AUGMENTATION_WAIT_TIMEOUT = float(MEMORI_AUGMENTATION_WAIT_TIMEOUT)
else:
MEMORI_AUGMENTATION_WAIT_TIMEOUT = None
MEM0_INJECT_TO_SYSTEM_PROMPT = os.getenv("MEM0_INJECT_TO_SYSTEM_PROMPT", "true") == "true"
# 嵌入模型(多语言支持)
MEMORI_EMBEDDING_MODEL = os.getenv(
"MEMORI_EMBEDDING_MODEL",
"paraphrase-multilingual-MiniLM-L12-v2"
# 使用本地 sentence-transformers 模型
MEM0_EMBEDDING_MODEL = os.getenv(
"MEM0_EMBEDDING_MODEL",
"./models/gte-tiny"
)