Compare commits
3 Commits
f9efe09f24
...
4c70857ff6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4c70857ff6 | ||
|
|
f1107ea35a | ||
|
|
26a85299b3 |
@ -112,10 +112,6 @@ class AgentConfig:
|
||||
robot_type = "deep_agent"
|
||||
|
||||
preamble_text, system_prompt = get_preamble_text(request.language, request.system_prompt)
|
||||
enable_thinking = request.enable_thinking and "<guidelines>" in request.system_prompt
|
||||
|
||||
# 从请求中获取 Mem0 配置,如果没有则使用全局配置
|
||||
enable_memori = getattr(request, 'enable_memori', MEM0_ENABLED)
|
||||
|
||||
config = cls(
|
||||
bot_id=request.bot_id,
|
||||
@ -128,7 +124,7 @@ class AgentConfig:
|
||||
robot_type=robot_type,
|
||||
user_identifier=request.user_identifier,
|
||||
session_id=request.session_id,
|
||||
enable_thinking=enable_thinking,
|
||||
enable_thinking=request.enable_thinking,
|
||||
project_dir=project_dir,
|
||||
stream=request.stream,
|
||||
tool_response=request.tool_response,
|
||||
@ -138,7 +134,7 @@ class AgentConfig:
|
||||
_origin_messages=messages,
|
||||
preamble_text=preamble_text,
|
||||
dataset_ids=request.dataset_ids,
|
||||
enable_memori=enable_memori,
|
||||
enable_memori=request.enable_memory,
|
||||
memori_semantic_search_top_k=getattr(request, 'memori_semantic_search_top_k', None) or MEM0_SEMANTIC_SEARCH_TOP_K,
|
||||
trace_id=trace_id,
|
||||
)
|
||||
@ -185,10 +181,8 @@ class AgentConfig:
|
||||
robot_type = bot_config.get("robot_type", "general_agent")
|
||||
if robot_type == "catalog_agent":
|
||||
robot_type = "deep_agent"
|
||||
enable_thinking = request.enable_thinking and "<guidelines>" in bot_config.get("system_prompt")
|
||||
|
||||
# 从请求或后端配置中获取 Mem0 配置
|
||||
enable_memori = getattr(request, 'enable_memori', MEM0_ENABLED)
|
||||
enable_thinking = bot_config.get("enable_thinking", False)
|
||||
enable_memori = bot_config.get("enable_memory", False)
|
||||
|
||||
config = cls(
|
||||
bot_id=request.bot_id,
|
||||
@ -228,7 +222,87 @@ class AgentConfig:
|
||||
|
||||
config.safe_print()
|
||||
return config
|
||||
|
||||
|
||||
@classmethod
|
||||
async def from_v3_request(cls, request, bot_config: Dict, project_dir: Optional[str] = None, messages: Optional[List] = None, language: Optional[str] = None):
|
||||
"""从v3请求创建配置 - 从数据库读取所有配置"""
|
||||
# 延迟导入避免循环依赖
|
||||
from .logging_handler import LoggingCallbackHandler
|
||||
from utils.fastapi_utils import get_preamble_text
|
||||
from utils.settings import (
|
||||
MEM0_ENABLED,
|
||||
MEM0_SEMANTIC_SEARCH_TOP_K,
|
||||
)
|
||||
from .checkpoint_utils import prepare_checkpoint_message
|
||||
from .checkpoint_manager import get_checkpointer_manager
|
||||
from utils.log_util.context import g
|
||||
|
||||
if messages is None:
|
||||
messages = []
|
||||
|
||||
# 从全局上下文获取 trace_id
|
||||
trace_id = None
|
||||
try:
|
||||
trace_id = getattr(g, 'trace_id', None)
|
||||
except LookupError:
|
||||
pass
|
||||
|
||||
# 从数据库配置获取语言(如果没有传递)
|
||||
if language is None:
|
||||
language = bot_config.get("language", "zh")
|
||||
|
||||
# 处理 system_prompt 和 preamble
|
||||
system_prompt_from_db = bot_config.get("system_prompt", "")
|
||||
preamble_text, system_prompt = get_preamble_text(language, system_prompt_from_db)
|
||||
|
||||
# 获取 robot_type
|
||||
robot_type = bot_config.get("robot_type", "general_agent")
|
||||
if robot_type == "catalog_agent":
|
||||
robot_type = "deep_agent"
|
||||
|
||||
# 从数据库配置获取其他参数
|
||||
enable_thinking = bot_config.get("enable_thinking", False)
|
||||
enable_memori = bot_config.get("enable_memori", False)
|
||||
|
||||
config = cls(
|
||||
bot_id=request.bot_id,
|
||||
api_key=bot_config.get("api_key", ""),
|
||||
model_name=bot_config.get("model", "qwen/qwen3-next-80b-a3b-instruct"),
|
||||
model_server=bot_config.get("model_server", ""),
|
||||
language=language,
|
||||
system_prompt=system_prompt,
|
||||
mcp_settings=bot_config.get("mcp_settings", []),
|
||||
robot_type=robot_type,
|
||||
user_identifier=bot_config.get("user_identifier", ""),
|
||||
session_id=request.session_id,
|
||||
enable_thinking=enable_thinking,
|
||||
project_dir=project_dir,
|
||||
stream=request.stream,
|
||||
tool_response=bot_config.get("tool_response", True),
|
||||
generate_cfg={}, # v3接口不传递额外的generate_cfg
|
||||
logging_handler=LoggingCallbackHandler(),
|
||||
messages=messages,
|
||||
_origin_messages=messages,
|
||||
preamble_text=preamble_text,
|
||||
dataset_ids=bot_config.get("dataset_ids", []),
|
||||
enable_memori=enable_memori,
|
||||
memori_semantic_search_top_k=bot_config.get("memori_semantic_search_top_k", MEM0_SEMANTIC_SEARCH_TOP_K),
|
||||
trace_id=trace_id,
|
||||
)
|
||||
|
||||
# 在创建 config 时尽早准备 checkpoint 消息
|
||||
if config.session_id:
|
||||
try:
|
||||
manager = get_checkpointer_manager()
|
||||
checkpointer = manager.checkpointer
|
||||
if checkpointer:
|
||||
await prepare_checkpoint_message(config, checkpointer)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load checkpointer: {e}")
|
||||
|
||||
config.safe_print()
|
||||
return config
|
||||
|
||||
def invoke_config(self):
|
||||
"""返回Langchain需要的配置字典"""
|
||||
config = {}
|
||||
|
||||
@ -132,9 +132,6 @@ async def init_agent(config: AgentConfig):
|
||||
(agent, checkpointer) 元组
|
||||
"""
|
||||
|
||||
|
||||
|
||||
|
||||
# 加载配置
|
||||
final_system_prompt = await load_system_prompt_async(
|
||||
config.project_dir, config.language, config.system_prompt, config.robot_type, config.bot_id, config.user_identifier, config.trace_id or ""
|
||||
|
||||
@ -85,7 +85,10 @@ async def load_system_prompt_async(project_dir: str, language: str = None, syste
|
||||
str: 加载到的系统提示词内容
|
||||
"""
|
||||
from agent.config_cache import config_cache
|
||||
|
||||
|
||||
# 初始化 prompt 为空字符串,避免未定义错误
|
||||
prompt = ""
|
||||
|
||||
# 获取语言显示名称
|
||||
language_display_map = {
|
||||
'zh': '中文',
|
||||
@ -94,7 +97,7 @@ async def load_system_prompt_async(project_dir: str, language: str = None, syste
|
||||
'jp': '日本語'
|
||||
}
|
||||
language_display = language_display_map.get(language, language if language else 'English')
|
||||
|
||||
|
||||
# 获取格式化的时间字符串
|
||||
datetime_str = format_datetime_by_language(language) if language else format_datetime_by_language('en')
|
||||
|
||||
|
||||
@ -71,7 +71,7 @@ from utils.log_util.logger import init_with_fastapi
|
||||
logger = logging.getLogger('app')
|
||||
|
||||
# Import route modules
|
||||
from routes import chat, files, projects, system, skill_manager, database
|
||||
from routes import chat, files, projects, system, skill_manager, database, bot_manager
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
@ -118,7 +118,14 @@ async def lifespan(app: FastAPI):
|
||||
except Exception as e:
|
||||
logger.warning(f"Mem0 initialization failed (continuing without): {e}")
|
||||
|
||||
# 5. 启动 checkpoint 清理调度器
|
||||
# 5. 初始化 Bot Manager 表
|
||||
try:
|
||||
await bot_manager.init_bot_manager_tables()
|
||||
logger.info("Bot Manager tables initialized")
|
||||
except Exception as e:
|
||||
logger.warning(f"Bot Manager table initialization failed (non-fatal): {e}")
|
||||
|
||||
# 6. 启动 checkpoint 清理调度器
|
||||
if CHECKPOINT_CLEANUP_ENABLED:
|
||||
# 启动时立即执行一次清理
|
||||
try:
|
||||
@ -175,6 +182,7 @@ app.include_router(projects.router)
|
||||
app.include_router(system.router)
|
||||
app.include_router(skill_manager.router)
|
||||
app.include_router(database.router)
|
||||
app.include_router(bot_manager.router)
|
||||
|
||||
# 注册文件管理API路由
|
||||
app.include_router(file_manager_router)
|
||||
|
||||
1250
routes/bot_manager.py
Normal file
1250
routes/bot_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -10,10 +10,10 @@ logger = logging.getLogger('app')
|
||||
from utils import (
|
||||
Message, ChatRequest, ChatResponse, BatchSaveChatRequest, BatchSaveChatResponse
|
||||
)
|
||||
from utils.api_models import ChatRequestV2
|
||||
from utils.api_models import ChatRequestV2, ChatRequestV3
|
||||
from utils.fastapi_utils import (
|
||||
process_messages,
|
||||
create_project_directory, extract_api_key_from_auth, generate_v2_auth_token, fetch_bot_config,
|
||||
create_project_directory, extract_api_key_from_auth, generate_v2_auth_token, fetch_bot_config, fetch_bot_config_from_db,
|
||||
call_preamble_llm,
|
||||
create_stream_chunk
|
||||
)
|
||||
@ -385,7 +385,7 @@ async def chat_completions(request: ChatRequest, authorization: Optional[str] =
|
||||
project_dir = create_project_directory(request.dataset_ids, bot_id, request.robot_type, request.skills)
|
||||
|
||||
# 收集额外参数作为 generate_cfg
|
||||
exclude_fields = {'messages', 'model', 'model_server', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings' ,'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills'}
|
||||
exclude_fields = {'messages', 'model', 'model_server', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings' ,'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory'}
|
||||
generate_cfg = {k: v for k, v in request.model_dump().items() if k not in exclude_fields}
|
||||
# 处理消息
|
||||
messages = process_messages(request.messages, request.language)
|
||||
@ -435,7 +435,7 @@ async def chat_warmup_v1(request: ChatRequest, authorization: Optional[str] = He
|
||||
project_dir = create_project_directory(request.dataset_ids, bot_id, request.robot_type, request.skills)
|
||||
|
||||
# 收集额外参数作为 generate_cfg
|
||||
exclude_fields = {'messages', 'model', 'model_server', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings' ,'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills'}
|
||||
exclude_fields = {'messages', 'model', 'model_server', 'dataset_ids', 'language', 'tool_response', 'system_prompt', 'mcp_settings' ,'stream', 'robot_type', 'bot_id', 'user_identifier', 'session_id', 'enable_thinking', 'skills', 'enable_memory'}
|
||||
generate_cfg = {k: v for k, v in request.model_dump().items() if k not in exclude_fields}
|
||||
|
||||
# 创建一个空的消息列表用于预热(实际消息不会在warmup中处理)
|
||||
@ -654,6 +654,97 @@ async def chat_completions_v2(request: ChatRequestV2, authorization: Optional[st
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/api/v3/chat/completions")
|
||||
async def chat_completions_v3(request: ChatRequestV3, authorization: Optional[str] = Header(None)):
|
||||
"""
|
||||
Chat completions API v3 - 从数据库读取配置
|
||||
|
||||
与 v2 相比,v3 从本地数据库读取所有配置参数,而不是从后端 API。
|
||||
前端只需要传递 bot_id 和 messages,其他配置从数据库自动读取。
|
||||
|
||||
Args:
|
||||
request: ChatRequestV3 包含 bot_id, messages, stream, session_id
|
||||
authorization: 可选的认证头
|
||||
|
||||
Returns:
|
||||
Union[ChatResponse, StreamingResponse]: Chat completion response or stream
|
||||
|
||||
Required Parameters:
|
||||
- bot_id: str - 目标机器人ID(用户创建时填写的ID)
|
||||
- messages: List[Message] - 对话消息列表
|
||||
|
||||
Optional Parameters:
|
||||
- stream: bool - 是否流式输出,默认false
|
||||
- session_id: str - 会话ID,用于保存聊天历史
|
||||
|
||||
Configuration (from database):
|
||||
- model: 模型名称
|
||||
- api_key: API密钥
|
||||
- model_server: 模型服务器地址
|
||||
- language: 回复语言
|
||||
- tool_response: 是否包含工具响应
|
||||
- system_prompt: 系统提示词
|
||||
- robot_type: 机器人类型
|
||||
- dataset_ids: 数据集ID列表
|
||||
- mcp_settings: MCP服务器配置
|
||||
- user_identifier: 用户标识符
|
||||
|
||||
Authentication:
|
||||
- 可选的 Authorization header(如果需要验证)
|
||||
"""
|
||||
try:
|
||||
# 获取bot_id(必需参数)
|
||||
bot_id = request.bot_id
|
||||
if not bot_id:
|
||||
raise HTTPException(status_code=400, detail="bot_id is required")
|
||||
|
||||
# 可选的鉴权验证(如果传递了 authorization header)
|
||||
if authorization:
|
||||
expected_token = generate_v2_auth_token(bot_id)
|
||||
provided_token = extract_api_key_from_auth(authorization)
|
||||
if provided_token and provided_token != expected_token:
|
||||
logger.warning(f"Invalid auth token provided for v3 API, but continuing anyway")
|
||||
|
||||
# 从数据库获取机器人配置
|
||||
bot_config = await fetch_bot_config_from_db(bot_id)
|
||||
|
||||
# 构造类 v2 的请求格式
|
||||
# 从数据库配置中提取参数
|
||||
language = bot_config.get("language", "zh")
|
||||
# 创建项目目录(从数据库配置获取)
|
||||
project_dir = create_project_directory(
|
||||
bot_config.get("dataset_ids", []),
|
||||
bot_id,
|
||||
bot_config.get("robot_type", "general_agent"),
|
||||
bot_config.get("skills", [])
|
||||
)
|
||||
|
||||
# 处理消息
|
||||
messages = process_messages(request.messages, language)
|
||||
|
||||
# 创建 AgentConfig 对象
|
||||
# 需要构造一个兼容 v2 的配置对象
|
||||
config = await AgentConfig.from_v3_request(
|
||||
request,
|
||||
bot_config,
|
||||
project_dir,
|
||||
messages,
|
||||
language
|
||||
)
|
||||
|
||||
# 调用公共的agent创建和响应生成逻辑
|
||||
return await create_agent_and_generate_response(config)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
import traceback
|
||||
error_details = traceback.format_exc()
|
||||
logger.error(f"Error in chat_completions_v3: {str(e)}")
|
||||
logger.error(f"Full traceback: {error_details}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# 聊天历史查询接口
|
||||
# ============================================================================
|
||||
|
||||
@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "catalog-search-agent",
|
||||
"version": "1.0.0",
|
||||
"description": "Intelligent data retrieval expert system for multi-layer catalog search with semantic and keyword-based search capabilities",
|
||||
"author": {
|
||||
"name": "sparticle",
|
||||
"email": "support@gbase.ai"
|
||||
},
|
||||
"skills": [
|
||||
"./skills/catalog-search-agent"
|
||||
]
|
||||
}
|
||||
79
skills_developing/catalog-search-agent/README.md
Normal file
79
skills_developing/catalog-search-agent/README.md
Normal file
@ -0,0 +1,79 @@
|
||||
# Catalog Search Agent
|
||||
|
||||
智能数据检索专家系统,基于多层数据架构的专业数据检索,具备自主决策能力和复杂查询优化技能。
|
||||
|
||||
## 功能特点
|
||||
|
||||
- **多层数据架构支持**
|
||||
- 原始文档层 (document.txt) - 完整上下文信息
|
||||
- 分页数据层 (pagination.txt) - 高效关键词/正则检索
|
||||
- 语义检索层 (embedding.pkl) - 向量化语义搜索
|
||||
|
||||
- **智能检索策略**
|
||||
- 关键词扩展与优化
|
||||
- 数字格式标准化扩展
|
||||
- 范围性正则表达式生成
|
||||
- 多关键词权重混合检索
|
||||
|
||||
- **多种搜索模式**
|
||||
- 正则表达式搜索
|
||||
- 关键词匹配
|
||||
- 语义相似度搜索
|
||||
- 上下文行检索
|
||||
|
||||
## 安装
|
||||
|
||||
```bash
|
||||
# 安装依赖
|
||||
pip install -r skills/catalog-search-agent/scripts/requirements.txt
|
||||
```
|
||||
|
||||
## 使用方法
|
||||
|
||||
### 多关键词搜索
|
||||
|
||||
```bash
|
||||
python skills/catalog-search-agent/scripts/multi_keyword_search.py search \
|
||||
--patterns '[{"pattern": "laptop", "weight": 2.0}, {"pattern": "/[0-9]+\\.?[0-9]*kg/", "weight": 1.5}]' \
|
||||
--file-paths data/pagination.txt \
|
||||
--limit 20
|
||||
```
|
||||
|
||||
### 语义搜索
|
||||
|
||||
```bash
|
||||
python skills/catalog-search-agent/scripts/semantic_search.py \
|
||||
--queries "lightweight laptop for travel" \
|
||||
--embeddings-file data/embedding.pkl \
|
||||
--top-k 10
|
||||
```
|
||||
|
||||
### 正则表达式搜索
|
||||
|
||||
```bash
|
||||
python skills/catalog-search-agent/scripts/multi_keyword_search.py regex_grep \
|
||||
--patterns "/price:\\s*\\$[0-9]+/" \
|
||||
--file-paths data/pagination.txt \
|
||||
--context-lines 3
|
||||
```
|
||||
|
||||
## 环境变量
|
||||
|
||||
| 变量 | 说明 | 默认值 |
|
||||
|------|------|--------|
|
||||
| `FASTAPI_URL` | Embedding API 服务地址 | `http://localhost:8000` |
|
||||
|
||||
## 数据架构
|
||||
|
||||
### document.txt
|
||||
原始 markdown 文本内容,提供完整上下文信息。获取某一行数据时需要包含前后 10 行的上下文。
|
||||
|
||||
### pagination.txt
|
||||
基于 document.txt 整理的分页数据,每一行代表完整的一页数据,支持正则高效匹配和关键词检索。
|
||||
|
||||
### embedding.pkl
|
||||
语义检索文件,将 document.txt 按段落/页面分块并生成向量化表达,用于语义相似度搜索。
|
||||
|
||||
## 作者
|
||||
|
||||
Sparticle <support@gbase.ai>
|
||||
Binary file not shown.
Binary file not shown.
@ -0,0 +1,294 @@
|
||||
---
|
||||
name: catalog-search-agent
|
||||
description: Intelligent data retrieval expert system for catalog search. Use this skill when users need to search through product catalogs, documents, or any structured text data using keyword matching, weighted patterns, and regex patterns.
|
||||
---
|
||||
|
||||
# Catalog Search Agent
|
||||
|
||||
## Overview
|
||||
|
||||
An intelligent data retrieval expert system with autonomous decision-making and complex query optimization capabilities. Dynamically formulates optimal retrieval strategies based on different data characteristics and query requirements.
|
||||
|
||||
## Data Architecture
|
||||
|
||||
The system operates on a two-layer data architecture:
|
||||
|
||||
| Layer | File | Description | Use Case |
|
||||
|-------|------|-------------|----------|
|
||||
| **Raw Document** | `document.txt` | Original markdown text with full context | Reading complete content with context |
|
||||
| **Pagination Layer** | `pagination.txt` | One line per page, regex-friendly | Primary keyword/regex search target |
|
||||
|
||||
### Layer Details
|
||||
|
||||
**document.txt**
|
||||
- Raw markdown content with full contextual information
|
||||
- Requires 10-line context for meaningful single-line retrieval
|
||||
- Use `multi_keyword_search.py regex_grep` with `--context-lines` parameter for context
|
||||
|
||||
**pagination.txt**
|
||||
- Single line represents one complete page
|
||||
- Adjacent lines contain previous/next page content
|
||||
- Ideal for retrieving all data at once
|
||||
- Primary target for regex and keyword search
|
||||
- Search here first, then reference `document.txt` for details
|
||||
|
||||
## Workflow Strategy
|
||||
|
||||
Follow this sequential analysis strategy:
|
||||
|
||||
### 1. Problem Analysis
|
||||
- Analyze the query and extract potential search keywords
|
||||
- Consider data patterns (price, weight, length) for regex preview
|
||||
|
||||
### 2. Keyword Expansion
|
||||
- Use data insight tools to expand and refine keywords
|
||||
- Generate rich keyword sets for multi-keyword retrieval
|
||||
|
||||
### 3. Number Expansion
|
||||
|
||||
**a. Unit Standardization**
|
||||
- Weight: 1kg → 1000g, 1.0kg, 1000.0g, 1公斤
|
||||
- Length: 3m → 3.0m, 30cm, 300厘米
|
||||
- Currency: ¥9.99 → 9.99元, 9.99元, ¥9.99
|
||||
- Time: 2h → 120分钟, 7200秒, 2.0小时
|
||||
|
||||
**b. Format Diversification**
|
||||
- Decimal formats: 1kg → 1.0kg, 1.00kg
|
||||
- Chinese expressions: 25% → 百分之二十五, 0.25
|
||||
- Multilingual: 1.0 kilogram, 3.0 meters
|
||||
|
||||
**c. Contextual Expansion**
|
||||
- Price: $100 → $100.0, 100美元
|
||||
- Percentage: 25% → 0.25, 百分之二十五
|
||||
- Time: 7天 → 7日, 一周, 168小时
|
||||
|
||||
**d. Range Expansion** (moderate use)
|
||||
Convert natural language quantity descriptions to regex patterns:
|
||||
|
||||
| Semantic | Range | Regex Example |
|
||||
|----------|-------|---------------|
|
||||
| ~1kg/1000g | 800-1200g | `/([01]\.\d+\s*[kK]?[gG]|(8\d{2}|9\d{2}|1[01]\d{2}|1200)\s*[gG])/` |
|
||||
| <1kg laptop | 800-999g | `/\b(0?\.[8-9]\d{0,2}\s*[kK][gG]|[8-9]\d{2}\s*[gG])\b/` |
|
||||
| ~3 meters | 2.5-3.5m | `/\b([2-3]\.\d+\s*[mM]|2\.5|3\.5)\b/` |
|
||||
| <3 meters | 0-2.9m | `/\b([0-2]\.\d+\s*[mM]|[12]?\d{1,2}\s*[cC][mM])\b/` |
|
||||
| ~100 yuan | 90-110 | `/\b(9[0-9]|10[0-9]|110)\s*元?\b/` |
|
||||
| 100-200 yuan | 100-199 | `/\b(1[0-9]{2})\s*元?\b/` |
|
||||
| ~7 days | 5-10 days | `/\b([5-9]|10)\s*天?\b/` |
|
||||
| >1 week | 8-30 days | `/\b([8-9]|[12][0-9]|30)\s*天?\b/` |
|
||||
| Room temp | 20-30°C | `/\b(2[0-9]|30)\s*°?[Cc]\b/` |
|
||||
| Below freezing | <0°C | `/\b-?[1-9]\d*\s*°?[Cc]\b/` |
|
||||
| High concentration | 90-100% | `/\b(9[0-9]|100)\s*%?\b/` |
|
||||
|
||||
### 4. Strategy Formulation
|
||||
|
||||
**Path Selection**
|
||||
- Prioritize simple field matching, avoid complex regex
|
||||
- Use loose matching + post-processing for higher recall
|
||||
|
||||
**Scale Estimation**
|
||||
- Call `multi_keyword_search.py regex_grep_count` or `search_count` to evaluate result scale
|
||||
- Avoid data overload
|
||||
|
||||
**Search Execution**
|
||||
- Use `multi_keyword_search.py search` for weighted multi-keyword hybrid retrieval
|
||||
|
||||
## Advanced Search Strategies
|
||||
|
||||
### Query Type Adaptation
|
||||
|
||||
| Query Type | Strategy |
|
||||
|------------|----------|
|
||||
| **Exploratory** | Regex analysis → Pattern discovery → Keyword expansion |
|
||||
| **Precision** | Target location → Direct search → Result verification |
|
||||
| **Analytical** | Multi-dimensional analysis → Deep mining → Insight extraction |
|
||||
|
||||
### Intelligent Path Optimization
|
||||
|
||||
- **Structured queries**: pagination.txt → document.txt
|
||||
- **Fuzzy queries**: document.txt → Keyword extraction → Structured verification
|
||||
- **Composite queries**: Multi-field combination → Layered filtering → Result aggregation
|
||||
- **Multi-keyword optimization**: Use `multi_keyword_search.py search` for unordered keyword matching
|
||||
|
||||
### Search Techniques
|
||||
|
||||
- **Regex strategy**: Simple first, progressive refinement, format variations
|
||||
- **Multi-keyword strategy**: Use `multi_keyword_search.py search` for unordered multi-keyword queries
|
||||
- **Range conversion**: Convert fuzzy descriptions (e.g., "~1000g") to precise ranges (e.g., "800-1200g")
|
||||
- **Result processing**: Layered display, correlation discovery, intelligent aggregation
|
||||
- **Approximate results**: Accept similar results when exact matches unavailable
|
||||
|
||||
### Multi-Keyword Search Best Practices
|
||||
|
||||
- **Scenario recognition**: Direct use of `multi_keyword_search.py search` for queries with multiple independent keywords in any order
|
||||
- **Result interpretation**: Focus on match score (weight score), higher values indicate higher relevance
|
||||
- **Regex application**:
|
||||
- Formatted data: Use regex for email, phone, date, price matching
|
||||
- Numeric ranges: Use regex for specific value ranges or patterns
|
||||
- Complex patterns: Combine multiple regex expressions
|
||||
- Error handling: System automatically skips invalid regex patterns
|
||||
- For numeric retrieval, pay special attention to decimal points
|
||||
|
||||
## Quality Assurance
|
||||
|
||||
### Completeness Verification
|
||||
- Continuously expand search scope, avoid premature termination
|
||||
- Multi-path cross-validation for result integrity
|
||||
- Dynamic query strategy adjustment based on user feedback
|
||||
|
||||
### Accuracy Guarantee
|
||||
- Multi-layer data validation for information consistency
|
||||
- Multiple verification for critical information
|
||||
- Anomaly result identification and handling
|
||||
|
||||
## Script Usage
|
||||
|
||||
### multi_keyword_search.py
|
||||
|
||||
Multi-keyword search with weighted pattern matching. Supports four subcommands.
|
||||
|
||||
```bash
|
||||
python scripts/multi_keyword_search.py <command> [OPTIONS]
|
||||
```
|
||||
|
||||
#### 1. search - Multi-keyword weighted search
|
||||
|
||||
Execute multi-keyword search with pattern weights.
|
||||
|
||||
```bash
|
||||
python scripts/multi_keyword_search.py search \
|
||||
--patterns '[{"pattern": "keyword", "weight": 2.0}, {"pattern": "/regex/", "weight": 1.5}]' \
|
||||
--file-paths file1.txt file2.txt \
|
||||
--limit 20 \
|
||||
--case-sensitive
|
||||
```
|
||||
|
||||
| Option | Required | Description |
|
||||
|--------|----------|-------------|
|
||||
| `--patterns` | Yes | JSON array of patterns with weights |
|
||||
| `--file-paths` | Yes | Files to search |
|
||||
| `--limit` | No | Max results (default: 10) |
|
||||
| `--case-sensitive` | No | Enable case-sensitive search |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Search for laptops with weight specification
|
||||
python scripts/multi_keyword_search.py search \
|
||||
--patterns '[{"pattern": "laptop", "weight": 2.0}, {"pattern": "/[0-9]+\\.?[0-9]*kg/", "weight": 1.5}]' \
|
||||
--file-paths data/pagination.txt \
|
||||
--limit 20
|
||||
|
||||
# Search with multiple keywords and regex
|
||||
python scripts/multi_keyword_search.py search \
|
||||
--patterns '[{"pattern": "computer", "weight": 1.0}, {"pattern": "/price:\\s*\\$[0-9]+/", "weight": 2.0}]' \
|
||||
--file-paths data/pagination.txt data/document.txt
|
||||
```
|
||||
|
||||
#### 2. search_count - Count matching results
|
||||
|
||||
Count and display statistics for matching patterns.
|
||||
|
||||
```bash
|
||||
python scripts/multi_keyword_search.py search_count \
|
||||
--patterns '[{"pattern": "keyword", "weight": 1.0}]' \
|
||||
--file-paths file1.txt file2.txt \
|
||||
--case-sensitive
|
||||
```
|
||||
|
||||
| Option | Required | Description |
|
||||
|--------|----------|-------------|
|
||||
| `--patterns` | Yes | JSON array of patterns with weights |
|
||||
| `--file-paths` | Yes | Files to search |
|
||||
| `--case-sensitive` | No | Enable case-sensitive search |
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
python scripts/multi_keyword_search.py search_count \
|
||||
--patterns '[{"pattern": "laptop", "weight": 1.0}, {"pattern": "/[0-9]+kg/", "weight": 1.0}]' \
|
||||
--file-paths data/pagination.txt
|
||||
```
|
||||
|
||||
#### 3. regex_grep - Regex search with context
|
||||
|
||||
Search using regex patterns with optional context lines.
|
||||
|
||||
```bash
|
||||
python scripts/multi_keyword_search.py regex_grep \
|
||||
--patterns '/regex1/' '/regex2/' \
|
||||
--file-paths file1.txt file2.txt \
|
||||
--context-lines 3 \
|
||||
--limit 50 \
|
||||
--case-sensitive
|
||||
```
|
||||
|
||||
| Option | Required | Description |
|
||||
|--------|----------|-------------|
|
||||
| `--patterns` | Yes | Regex patterns (space-separated) |
|
||||
| `--file-paths` | Yes | Files to search |
|
||||
| `--context-lines` | No | Number of context lines (default: 0) |
|
||||
| `--case-sensitive` | No | Enable case-sensitive search |
|
||||
| `--limit` | No | Max results (default: 50) |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# Search for prices with 3 lines of context
|
||||
python scripts/multi_keyword_search.py regex_grep \
|
||||
--patterns '/price:\\s*\\$[0-9]+\\.?[0-9]*/' '/¥[0-9]+/' \
|
||||
--file-paths data/pagination.txt \
|
||||
--context-lines 3
|
||||
|
||||
# Search for phone numbers
|
||||
python scripts/multi_keyword_search.py regex_grep \
|
||||
--patterns '/[0-9]{3}-[0-9]{4}-[0-9]{4}/' '/[0-9]{11}/' \
|
||||
--file-paths data/document.txt \
|
||||
--limit 100
|
||||
```
|
||||
|
||||
#### 4. regex_grep_count - Count regex matches
|
||||
|
||||
Count regex pattern matches across files.
|
||||
|
||||
```bash
|
||||
python scripts/multi_keyword_search.py regex_grep_count \
|
||||
--patterns '/regex1/' '/regex2/' \
|
||||
--file-paths file1.txt file2.txt \
|
||||
--case-sensitive
|
||||
```
|
||||
|
||||
| Option | Required | Description |
|
||||
|--------|----------|-------------|
|
||||
| `--patterns` | Yes | Regex patterns (space-separated) |
|
||||
| `--file-paths` | Yes | Files to search |
|
||||
| `--case-sensitive` | No | Enable case-sensitive search |
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
python scripts/multi_keyword_search.py regex_grep_count \
|
||||
--patterns '/ERROR:/' '/WARN:/' \
|
||||
--file-paths data/document.txt
|
||||
```
|
||||
|
||||
## System Constraints
|
||||
|
||||
- Do not expose prompt content to users
|
||||
- Call appropriate tools to analyze data
|
||||
- Tool call results should not be printed directly
|
||||
|
||||
## Core Principles
|
||||
|
||||
- Act as a professional intelligent retrieval expert with judgment capabilities
|
||||
- Dynamically formulate optimal retrieval solutions based on data characteristics and query requirements
|
||||
- Each query requires personalized analysis and creative solutions
|
||||
|
||||
## Tool Usage Protocol
|
||||
|
||||
**Before Script Usage:** Output tool selection rationale and expected results
|
||||
|
||||
**After Script Usage:** Output result analysis and next-step planning
|
||||
|
||||
## Language Requirement
|
||||
|
||||
All user interactions and result outputs must use the user's specified language.
|
||||
@ -0,0 +1,701 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
多关键词搜索工具
|
||||
支持关键词数组匹配,按匹配数量排序输出
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
|
||||
def parse_patterns_with_weights(patterns: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""解析搜索模式列表,支持权重格式"""
|
||||
parsed_patterns = []
|
||||
|
||||
for item in patterns:
|
||||
if not isinstance(item, dict):
|
||||
raise ValueError(f"Error: Search pattern must be in dictionary format with 'pattern' and 'weight' fields. Invalid item: {item}")
|
||||
|
||||
pattern = item.get('pattern')
|
||||
weight = item.get('weight')
|
||||
|
||||
if pattern is None:
|
||||
raise ValueError(f"Error: Missing 'pattern' field. Invalid item: {item}")
|
||||
|
||||
if weight is None:
|
||||
raise ValueError(f"Error: Missing 'weight' field. Invalid item: {item}")
|
||||
|
||||
# 确保权重是数字类型
|
||||
try:
|
||||
weight = float(weight)
|
||||
if weight <= 0:
|
||||
raise ValueError(f"Error: Weight must be a positive number. Invalid weight: {weight}")
|
||||
except (ValueError, TypeError):
|
||||
raise ValueError(f"Error: Weight must be a valid number. Invalid weight: {weight}")
|
||||
|
||||
parsed_patterns.append({
|
||||
'pattern': pattern,
|
||||
'weight': weight
|
||||
})
|
||||
|
||||
return parsed_patterns
|
||||
|
||||
|
||||
def compile_pattern(pattern: str) -> Union[re.Pattern, str]:
|
||||
"""编译模式,如果是正则则返回Pattern对象,否则返回字符串"""
|
||||
if pattern.startswith('/') and pattern.endswith('/'):
|
||||
# 正则表达式模式
|
||||
regex_pattern = pattern[1:-1]
|
||||
try:
|
||||
return re.compile(regex_pattern)
|
||||
except re.error:
|
||||
print(f"Warning: Invalid regex '{pattern}', skipping...")
|
||||
return None
|
||||
else:
|
||||
# 普通关键词模式
|
||||
return pattern
|
||||
|
||||
|
||||
def search_patterns_in_file(file_path: str, patterns: List[Dict[str, Any]],
|
||||
case_sensitive: bool) -> List[Dict[str, Any]]:
|
||||
"""搜索单个文<EFBFBD><EFBFBD><EFBFBD>中的搜索模式(关键词和正则表达式),支持权重计算"""
|
||||
results = []
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = f.readlines()
|
||||
except Exception as e:
|
||||
print(f"Error reading file {file_path}: {e}")
|
||||
return results
|
||||
|
||||
# 预处理所有模式,包含权重信息
|
||||
processed_patterns = []
|
||||
for pattern_info in patterns:
|
||||
compiled = pattern_info['compiled_pattern']
|
||||
if compiled is not None: # 跳过无效的正则表达式
|
||||
processed_patterns.append({
|
||||
'original': pattern_info['pattern'],
|
||||
'pattern': compiled,
|
||||
'is_regex': isinstance(compiled, re.Pattern),
|
||||
'weight': pattern_info['weight']
|
||||
})
|
||||
|
||||
for line_number, line in enumerate(lines, 1):
|
||||
line_content = line.rstrip('\n\r')
|
||||
search_line = line_content if case_sensitive else line_content.lower()
|
||||
|
||||
# 统计匹配的模式数量和计算权重得分
|
||||
matched_patterns = []
|
||||
weight_score = 0.0
|
||||
|
||||
for pattern_info in processed_patterns:
|
||||
pattern = pattern_info['pattern']
|
||||
is_regex = pattern_info['is_regex']
|
||||
weight = pattern_info['weight']
|
||||
|
||||
match_found = False
|
||||
match_details = None
|
||||
match_count_in_line = 0
|
||||
|
||||
if is_regex:
|
||||
# 正则表达式匹配
|
||||
if case_sensitive:
|
||||
matches = list(pattern.finditer(line_content))
|
||||
else:
|
||||
# 对于不区分大小写的正则,需要重新编译
|
||||
if isinstance(pattern, re.Pattern):
|
||||
flags = pattern.flags | re.IGNORECASE
|
||||
case_insensitive_pattern = re.compile(pattern.pattern, flags)
|
||||
matches = list(case_insensitive_pattern.finditer(line_content))
|
||||
else:
|
||||
search_pattern = pattern.lower() if isinstance(pattern, str) else pattern
|
||||
matches = list(re.finditer(search_pattern, search_line))
|
||||
|
||||
if matches:
|
||||
match_found = True
|
||||
match_details = matches[0].group(0)
|
||||
match_count_in_line = 1
|
||||
else:
|
||||
# 普通字符串匹配
|
||||
search_keyword = pattern if case_sensitive else pattern.lower()
|
||||
if search_keyword in search_line:
|
||||
match_found = True
|
||||
match_details = pattern
|
||||
match_count_in_line = 1
|
||||
|
||||
if match_found:
|
||||
pattern_weight_score = weight * match_count_in_line
|
||||
weight_score += pattern_weight_score
|
||||
|
||||
matched_patterns.append({
|
||||
'original': pattern_info['original'],
|
||||
'type': 'regex' if is_regex else 'keyword',
|
||||
'match': match_details,
|
||||
'weight': weight,
|
||||
'match_count': match_count_in_line,
|
||||
'weight_score': pattern_weight_score
|
||||
})
|
||||
|
||||
if weight_score > 0:
|
||||
results.append({
|
||||
'line_number': line_number,
|
||||
'content': line_content,
|
||||
'match_count': len(matched_patterns),
|
||||
'weight_score': weight_score,
|
||||
'matched_patterns': matched_patterns,
|
||||
'file_path': file_path
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def search_count(patterns: List[Dict[str, Any]], file_paths: List[str],
|
||||
case_sensitive: bool = False) -> str:
|
||||
"""统计多模式匹配数量评估"""
|
||||
if not patterns:
|
||||
return "Error: Search pattern list cannot be empty"
|
||||
|
||||
try:
|
||||
parsed_patterns = parse_patterns_with_weights(patterns)
|
||||
except ValueError as e:
|
||||
return str(e)
|
||||
|
||||
if not parsed_patterns:
|
||||
return "Error: No valid search patterns"
|
||||
|
||||
if not file_paths:
|
||||
return "Error: File path list cannot be empty"
|
||||
|
||||
# 预处理和验证搜索模式中的正则表达式
|
||||
valid_patterns = []
|
||||
regex_errors = []
|
||||
|
||||
for pattern_info in parsed_patterns:
|
||||
pattern = pattern_info['pattern']
|
||||
compiled = compile_pattern(pattern)
|
||||
if compiled is None:
|
||||
regex_errors.append(pattern)
|
||||
else:
|
||||
valid_patterns.append({
|
||||
'pattern': pattern,
|
||||
'weight': pattern_info['weight'],
|
||||
'compiled_pattern': compiled
|
||||
})
|
||||
|
||||
if regex_errors:
|
||||
print(f"Warning: Invalid regex patterns: {', '.join(regex_errors)}")
|
||||
|
||||
# 验证文件路径
|
||||
valid_paths = [fp for fp in file_paths if os.path.exists(fp)]
|
||||
|
||||
if not valid_paths:
|
||||
return "Error: No valid files found"
|
||||
|
||||
# 统计所有匹配结果
|
||||
all_results = []
|
||||
|
||||
for file_path in valid_paths:
|
||||
try:
|
||||
results = search_patterns_in_file(file_path, valid_patterns, case_sensitive)
|
||||
all_results.extend(results)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
# 计算统计信息
|
||||
total_lines_searched = 0
|
||||
total_weight_score = 0.0
|
||||
pattern_match_stats = {}
|
||||
file_match_stats = {}
|
||||
|
||||
for pattern_info in valid_patterns:
|
||||
pattern_key = pattern_info['pattern']
|
||||
pattern_match_stats[pattern_key] = {
|
||||
'match_count': 0,
|
||||
'weight_score': 0.0,
|
||||
'lines_matched': set()
|
||||
}
|
||||
|
||||
for file_path in valid_paths:
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = f.readlines()
|
||||
total_lines_searched += len(lines)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
for result in all_results:
|
||||
total_weight_score += result.get('weight_score', 0)
|
||||
|
||||
file_path = result['file_path']
|
||||
if file_path not in file_match_stats:
|
||||
file_match_stats[file_path] = {
|
||||
'match_count': 0,
|
||||
'weight_score': 0.0,
|
||||
'lines_matched': set()
|
||||
}
|
||||
|
||||
file_match_stats[file_path]['match_count'] += 1
|
||||
file_match_stats[file_path]['weight_score'] += result.get('weight_score', 0)
|
||||
file_match_stats[file_path]['lines_matched'].add(result['line_number'])
|
||||
|
||||
for pattern in result['matched_patterns']:
|
||||
original_pattern = pattern['original']
|
||||
if original_pattern in pattern_match_stats:
|
||||
pattern_match_stats[original_pattern]['match_count'] += pattern['match_count']
|
||||
pattern_match_stats[original_pattern]['weight_score'] += pattern['weight_score']
|
||||
pattern_match_stats[original_pattern]['lines_matched'].add(result['line_number'])
|
||||
|
||||
# 格式化统计输出
|
||||
formatted_lines = []
|
||||
formatted_lines.append("=== Matching Statistics Evaluation ===")
|
||||
formatted_lines.append(f"Files searched: {len(valid_paths)}")
|
||||
formatted_lines.append(f"Total lines searched: {total_lines_searched}")
|
||||
formatted_lines.append(f"Total matched lines: {len(all_results)}")
|
||||
formatted_lines.append(f"Total weight score: {total_weight_score:.2f}")
|
||||
if total_lines_searched > 0:
|
||||
formatted_lines.append(f"Match rate: {(len(all_results)/total_lines_searched*100):.2f}%")
|
||||
formatted_lines.append("")
|
||||
|
||||
formatted_lines.append("=== Statistics by File ===")
|
||||
for file_path, stats in sorted(file_match_stats.items(), key=lambda x: x[1]['weight_score'], reverse=True):
|
||||
file_name = os.path.basename(file_path)
|
||||
formatted_lines.append(f"File: {file_name}")
|
||||
formatted_lines.append(f" Matched lines: {len(stats['lines_matched'])}")
|
||||
formatted_lines.append(f" Weight score: {stats['weight_score']:.2f}")
|
||||
formatted_lines.append("")
|
||||
|
||||
formatted_lines.append("=== Statistics by Pattern ===")
|
||||
for pattern, stats in sorted(pattern_match_stats.items(), key=lambda x: x[1]['weight_score'], reverse=True):
|
||||
formatted_lines.append(f"Pattern: {pattern}")
|
||||
formatted_lines.append(f" Match count: {stats['match_count']}")
|
||||
formatted_lines.append(f" Matched lines: {len(stats['lines_matched'])}")
|
||||
formatted_lines.append(f" Weight score: {stats['weight_score']:.2f}")
|
||||
formatted_lines.append("")
|
||||
|
||||
return "\n".join(formatted_lines)
|
||||
|
||||
|
||||
def search(patterns: List[Dict[str, Any]], file_paths: List[str],
|
||||
limit: int = 10, case_sensitive: bool = False) -> str:
|
||||
"""执行多模式搜索"""
|
||||
if not patterns:
|
||||
return "Error: Search pattern list cannot be empty"
|
||||
|
||||
try:
|
||||
parsed_patterns = parse_patterns_with_weights(patterns)
|
||||
except ValueError as e:
|
||||
return str(e)
|
||||
|
||||
if not parsed_patterns:
|
||||
return "Error: No valid search patterns"
|
||||
|
||||
if not file_paths:
|
||||
return "Error: File path list cannot be empty"
|
||||
|
||||
# 预处理和验证搜索模式中的正则表达式
|
||||
valid_patterns = []
|
||||
regex_errors = []
|
||||
|
||||
for pattern_info in parsed_patterns:
|
||||
pattern = pattern_info['pattern']
|
||||
compiled = compile_pattern(pattern)
|
||||
if compiled is None:
|
||||
regex_errors.append(pattern)
|
||||
else:
|
||||
valid_patterns.append({
|
||||
'pattern': pattern,
|
||||
'weight': pattern_info['weight'],
|
||||
'compiled_pattern': compiled
|
||||
})
|
||||
|
||||
if regex_errors:
|
||||
print(f"Warning: Invalid regex patterns: {', '.join(regex_errors)}")
|
||||
|
||||
# 验证文件路径
|
||||
valid_paths = [fp for fp in file_paths if os.path.exists(fp)]
|
||||
|
||||
if not valid_paths:
|
||||
return "Error: No valid files found"
|
||||
|
||||
# 收集所有匹配结果
|
||||
all_results = []
|
||||
|
||||
for file_path in valid_paths:
|
||||
try:
|
||||
results = search_patterns_in_file(file_path, valid_patterns, case_sensitive)
|
||||
all_results.extend(results)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
# 按权重得分排序
|
||||
all_results.sort(key=lambda x: (x.get('weight_score', 0), x['match_count']), reverse=True)
|
||||
|
||||
# 限制结果数量
|
||||
limited_results = all_results[:limit]
|
||||
|
||||
if not limited_results:
|
||||
return "No matching results found"
|
||||
|
||||
# 格式化输出
|
||||
formatted_lines = []
|
||||
|
||||
total_matches = len(all_results)
|
||||
showing_count = len(limited_results)
|
||||
summary_line = f"Found {total_matches} matches, showing top {showing_count} results:"
|
||||
formatted_lines.append(summary_line)
|
||||
|
||||
for result in limited_results:
|
||||
weight_score = result.get('weight_score', 0)
|
||||
line_prefix = f"{result['line_number']}:weight({weight_score:.2f}):"
|
||||
|
||||
# 构建匹配详情
|
||||
match_details = []
|
||||
for pattern in result['matched_patterns']:
|
||||
if pattern['type'] == 'regex':
|
||||
match_details.append(f"[regex:{pattern['original']}={pattern['match']}]")
|
||||
else:
|
||||
match_details.append(f"[keyword:{pattern['match']}]")
|
||||
|
||||
match_info = " ".join(match_details) if match_details else ""
|
||||
formatted_line = f"{line_prefix}{match_info}:{result['content']}" if match_info else f"{line_prefix}{result['content']}"
|
||||
formatted_lines.append(formatted_line)
|
||||
|
||||
return "\n".join(formatted_lines)
|
||||
|
||||
|
||||
def regex_grep(patterns: Union[str, List[str]], file_paths: List[str], context_lines: int = 0,
|
||||
case_sensitive: bool = False, limit: int = 50) -> str:
|
||||
"""使用正则表达式搜索文件内容"""
|
||||
if isinstance(patterns, str):
|
||||
patterns = [patterns]
|
||||
|
||||
if not patterns or not any(p.strip() for p in patterns):
|
||||
return "Error: Patterns cannot be empty"
|
||||
|
||||
patterns = [p.strip() for p in patterns if p.strip()]
|
||||
|
||||
if not file_paths:
|
||||
return "Error: File path list cannot be empty"
|
||||
|
||||
# 编译正则表达式
|
||||
compiled_patterns = []
|
||||
for pattern in patterns:
|
||||
try:
|
||||
flags = 0 if case_sensitive else re.IGNORECASE
|
||||
compiled_pattern = re.compile(pattern, flags)
|
||||
compiled_patterns.append((pattern, compiled_pattern))
|
||||
except re.error as e:
|
||||
print(f"Warning: Invalid regex '{pattern}': {str(e)}, skipping...")
|
||||
continue
|
||||
|
||||
if not compiled_patterns:
|
||||
return "Error: No valid regular expressions found"
|
||||
|
||||
# 验证文件路径
|
||||
valid_paths = [fp for fp in file_paths if os.path.exists(fp)]
|
||||
|
||||
if not valid_paths:
|
||||
return "Error: No valid files found"
|
||||
|
||||
# 收集所有匹配结果
|
||||
all_results = []
|
||||
|
||||
for file_path in valid_paths:
|
||||
try:
|
||||
for pattern, compiled_pattern in compiled_patterns:
|
||||
results = regex_search_in_file(file_path, compiled_pattern, context_lines, case_sensitive, pattern)
|
||||
all_results.extend(results)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
# 按文件路径和行号排序
|
||||
all_results.sort(key=lambda x: (x['file_path'], x['match_line_number']))
|
||||
|
||||
# 限制结果数量
|
||||
limited_results = all_results[:limit]
|
||||
|
||||
if not limited_results:
|
||||
return "No matches found"
|
||||
|
||||
# 格式化输出
|
||||
formatted_lines = []
|
||||
|
||||
total_matches = len(all_results)
|
||||
showing_count = len(limited_results)
|
||||
summary_line = f"Found {total_matches} matches for {len(compiled_patterns)} patterns, showing top {showing_count} results:"
|
||||
formatted_lines.append(summary_line)
|
||||
|
||||
# 按文件分组显示结果
|
||||
current_file = None
|
||||
for result in limited_results:
|
||||
file_path = result['file_path']
|
||||
if file_path != current_file:
|
||||
current_file = file_path
|
||||
file_name = os.path.basename(file_path)
|
||||
formatted_lines.append(f"\n--- File: {file_name} ---")
|
||||
|
||||
match_line = result['match_line_number']
|
||||
match_text = result['match_text']
|
||||
matched_content = result['matched_content']
|
||||
pattern = result.get('pattern', 'unknown')
|
||||
|
||||
formatted_lines.append(f"{match_line}[pattern: {pattern}]:{matched_content}")
|
||||
|
||||
# 显示上下文行
|
||||
if 'context_before' in result:
|
||||
for context_line in result['context_before']:
|
||||
formatted_lines.append(f"{context_line['line_number']}:{context_line['content']}")
|
||||
|
||||
if 'context_after' in result:
|
||||
for context_line in result['context_after']:
|
||||
formatted_lines.append(f"{context_line['line_number']}:{context_line['content']}")
|
||||
|
||||
return "\n".join(formatted_lines)
|
||||
|
||||
|
||||
def regex_search_in_file(file_path: str, pattern: re.Pattern,
|
||||
context_lines: int, case_sensitive: bool, pattern_str: str = None) -> List[Dict[str, Any]]:
|
||||
"""在单个文件中搜索正则表达式,支持上下文"""
|
||||
results = []
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = f.readlines()
|
||||
except Exception as e:
|
||||
return results
|
||||
|
||||
for line_number, line in enumerate(lines, 1):
|
||||
line_content = line.rstrip('\n\r')
|
||||
|
||||
matches = list(pattern.finditer(line_content))
|
||||
if matches:
|
||||
context_before = []
|
||||
context_after = []
|
||||
|
||||
if context_lines > 0:
|
||||
start_line = max(0, line_number - 1 - context_lines)
|
||||
for i in range(start_line, line_number - 1):
|
||||
if i < len(lines):
|
||||
context_before.append({
|
||||
'line_number': i + 1,
|
||||
'content': lines[i].rstrip('\n\r')
|
||||
})
|
||||
|
||||
end_line = min(len(lines), line_number + context_lines)
|
||||
for i in range(line_number, end_line):
|
||||
if i < len(lines):
|
||||
context_after.append({
|
||||
'line_number': i + 1,
|
||||
'content': lines[i].rstrip('\n\r')
|
||||
})
|
||||
|
||||
for match in matches:
|
||||
result = {
|
||||
'file_path': file_path,
|
||||
'match_line_number': line_number,
|
||||
'match_text': line_content,
|
||||
'matched_content': match.group(0),
|
||||
'pattern': pattern_str or 'unknown',
|
||||
'start_pos': match.start(),
|
||||
'end_pos': match.end()
|
||||
}
|
||||
|
||||
if context_before:
|
||||
result['context_before'] = context_before
|
||||
|
||||
if context_after:
|
||||
result['context_after'] = context_after
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def regex_grep_count(patterns: Union[str, List[str]], file_paths: List[str],
|
||||
case_sensitive: bool = False) -> str:
|
||||
"""使用正则表达式统计匹配数量"""
|
||||
if isinstance(patterns, str):
|
||||
patterns = [patterns]
|
||||
|
||||
if not patterns or not any(p.strip() for p in patterns):
|
||||
return "Error: Patterns cannot be empty"
|
||||
|
||||
patterns = [p.strip() for p in patterns if p.strip()]
|
||||
|
||||
if not file_paths:
|
||||
return "Error: File path list cannot be empty"
|
||||
|
||||
# 编译正则表达式
|
||||
compiled_patterns = []
|
||||
for pattern in patterns:
|
||||
try:
|
||||
flags = 0 if case_sensitive else re.IGNORECASE
|
||||
compiled_pattern = re.compile(pattern, flags)
|
||||
compiled_patterns.append((pattern, compiled_pattern))
|
||||
except re.error as e:
|
||||
print(f"Warning: Invalid regex '{pattern}': {str(e)}, skipping...")
|
||||
continue
|
||||
|
||||
if not compiled_patterns:
|
||||
return "Error: No valid regular expressions found"
|
||||
|
||||
# 验证文件路径
|
||||
valid_paths = [fp for fp in file_paths if os.path.exists(fp)]
|
||||
|
||||
if not valid_paths:
|
||||
return "Error: No valid files found"
|
||||
|
||||
# 统计匹配结果
|
||||
total_matches = 0
|
||||
total_lines_with_matches = 0
|
||||
file_stats = {}
|
||||
pattern_stats = {}
|
||||
|
||||
for pattern, _ in compiled_patterns:
|
||||
pattern_stats[pattern] = {
|
||||
'matches': 0,
|
||||
'lines_with_matches': 0
|
||||
}
|
||||
|
||||
for file_path in valid_paths:
|
||||
file_name = os.path.basename(file_path)
|
||||
file_matches = 0
|
||||
file_lines_with_matches = 0
|
||||
|
||||
try:
|
||||
for pattern, compiled_pattern in compiled_patterns:
|
||||
matches, lines_with_matches = regex_count_in_file(file_path, compiled_pattern, case_sensitive)
|
||||
total_matches += matches
|
||||
total_lines_with_matches += lines_with_matches
|
||||
file_matches += matches
|
||||
file_lines_with_matches = max(file_lines_with_matches, lines_with_matches)
|
||||
|
||||
pattern_stats[pattern]['matches'] += matches
|
||||
pattern_stats[pattern]['lines_with_matches'] += lines_with_matches
|
||||
|
||||
file_stats[file_name] = {
|
||||
'matches': file_matches,
|
||||
'lines_with_matches': file_lines_with_matches
|
||||
}
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
# 格式化输出
|
||||
formatted_lines = []
|
||||
formatted_lines.append("=== Regex Match Statistics ===")
|
||||
formatted_lines.append(f"Patterns: {', '.join([p for p, _ in compiled_patterns])}")
|
||||
formatted_lines.append(f"Files searched: {len(valid_paths)}")
|
||||
formatted_lines.append(f"Total matches: {total_matches}")
|
||||
formatted_lines.append(f"Total lines with matches: {total_lines_with_matches}")
|
||||
formatted_lines.append("")
|
||||
|
||||
formatted_lines.append("=== Statistics by Pattern ===")
|
||||
for pattern, stats in sorted(pattern_stats.items()):
|
||||
formatted_lines.append(f"Pattern: {pattern}")
|
||||
formatted_lines.append(f" Matches: {stats['matches']}")
|
||||
formatted_lines.append(f" Lines with matches: {stats['lines_with_matches']}")
|
||||
formatted_lines.append("")
|
||||
|
||||
formatted_lines.append("=== Statistics by File ===")
|
||||
for file_name, stats in sorted(file_stats.items()):
|
||||
formatted_lines.append(f"File: {file_name}")
|
||||
formatted_lines.append(f" Matches: {stats['matches']}")
|
||||
formatted_lines.append(f" Lines with matches: {stats['lines_with_matches']}")
|
||||
formatted_lines.append("")
|
||||
|
||||
return "\n".join(formatted_lines)
|
||||
|
||||
|
||||
def regex_count_in_file(file_path: str, pattern: re.Pattern,
|
||||
case_sensitive: bool) -> tuple[int, int]:
|
||||
"""统计文件中的匹配数量"""
|
||||
total_matches = 0
|
||||
lines_with_matches = 0
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = f.readlines()
|
||||
except Exception as e:
|
||||
return total_matches, lines_with_matches
|
||||
|
||||
for line_number, line in enumerate(lines, 1):
|
||||
line_content = line.rstrip('\n\r')
|
||||
|
||||
matches = list(pattern.finditer(line_content))
|
||||
if matches:
|
||||
total_matches += len(matches)
|
||||
lines_with_matches += 1
|
||||
|
||||
return total_matches, lines_with_matches
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Multi-keyword search with pattern matching and weight scoring')
|
||||
subparsers = parser.add_subparsers(dest='command', help='Available commands')
|
||||
|
||||
# search 命令
|
||||
search_parser = subparsers.add_parser('search', help='Execute multi-keyword search')
|
||||
search_parser.add_argument('--patterns', required=True, help='JSON array of patterns with weights')
|
||||
search_parser.add_argument('--file-paths', required=True, nargs='+', help='Files to search')
|
||||
search_parser.add_argument('--limit', type=int, default=10, help='Max results')
|
||||
search_parser.add_argument('--case-sensitive', action='store_true', help='Case sensitive search')
|
||||
|
||||
# search_count 命令
|
||||
count_parser = subparsers.add_parser('search_count', help='Count matching results')
|
||||
count_parser.add_argument('--patterns', required=True, help='JSON array of patterns with weights')
|
||||
count_parser.add_argument('--file-paths', required=True, nargs='+', help='Files to search')
|
||||
count_parser.add_argument('--case-sensitive', action='store_true', help='Case sensitive search')
|
||||
|
||||
# regex_grep 命令
|
||||
grep_parser = subparsers.add_parser('regex_grep', help='Regex search with context')
|
||||
grep_parser.add_argument('--patterns', nargs='+', help='Regex patterns')
|
||||
grep_parser.add_argument('--file-paths', nargs='+', required=True, help='Files to search')
|
||||
grep_parser.add_argument('--context-lines', type=int, default=0, help='Context lines')
|
||||
grep_parser.add_argument('--case-sensitive', action='store_true', help='Case sensitive search')
|
||||
grep_parser.add_argument('--limit', type=int, default=50, help='Max results')
|
||||
|
||||
# regex_grep_count 命令
|
||||
grep_count_parser = subparsers.add_parser('regex_grep_count', help='Count regex matches')
|
||||
grep_count_parser.add_argument('--patterns', nargs='+', help='Regex patterns')
|
||||
grep_count_parser.add_argument('--file-paths', nargs='+', required=True, help='Files to search')
|
||||
grep_count_parser.add_argument('--case-sensitive', action='store_true', help='Case sensitive search')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.command:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
if args.command == 'search':
|
||||
patterns = json.loads(args.patterns)
|
||||
result = search(patterns, args.file_paths, args.limit, args.case_sensitive)
|
||||
print(result)
|
||||
|
||||
elif args.command == 'search_count':
|
||||
patterns = json.loads(args.patterns)
|
||||
result = search_count(patterns, args.file_paths, args.case_sensitive)
|
||||
print(result)
|
||||
|
||||
elif args.command == 'regex_grep':
|
||||
result = regex_grep(args.patterns, args.file_paths, args.context_lines, args.case_sensitive, args.limit)
|
||||
print(result)
|
||||
|
||||
elif args.command == 'regex_grep_count':
|
||||
result = regex_grep_count(args.patterns, args.file_paths, args.case_sensitive)
|
||||
print(result)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error parsing patterns JSON: {e}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -0,0 +1,2 @@
|
||||
numpy>=1.20.0
|
||||
requests>=2.25.0
|
||||
@ -55,6 +55,7 @@ class ChatRequest(BaseModel):
|
||||
session_id: Optional[str] = None
|
||||
enable_thinking: Optional[bool] = DEFAULT_THINKING_ENABLE
|
||||
skills: Optional[List[str]] = None
|
||||
enable_memory: Optional[bool] = False
|
||||
|
||||
|
||||
class ChatRequestV2(BaseModel):
|
||||
@ -65,7 +66,21 @@ class ChatRequestV2(BaseModel):
|
||||
language: Optional[str] = "zh"
|
||||
user_identifier: Optional[str] = ""
|
||||
session_id: Optional[str] = None
|
||||
enable_thinking: Optional[bool] = DEFAULT_THINKING_ENABLE
|
||||
|
||||
|
||||
class ChatRequestV3(BaseModel):
|
||||
"""
|
||||
v3 API 请求模型 - 从数据库读取配置
|
||||
|
||||
所有配置参数从数据库读取,前端只需传递:
|
||||
- bot_id: Bot 的用户ID(用于从数据库查找配置)
|
||||
- messages: 对话消息列表
|
||||
- session_id: 可选的会话ID
|
||||
"""
|
||||
messages: List[Message]
|
||||
bot_id: str
|
||||
stream: Optional[bool] = False
|
||||
session_id: Optional[str] = None
|
||||
|
||||
|
||||
class FileProcessRequest(BaseModel):
|
||||
|
||||
@ -446,6 +446,184 @@ async def fetch_bot_config(bot_id: str) -> Dict[str, Any]:
|
||||
)
|
||||
|
||||
|
||||
async def fetch_bot_config_from_db(bot_user_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
从本地数据库获取机器人配置
|
||||
|
||||
Args:
|
||||
bot_user_id: Bot 的用户ID(bot_id 字段,不是 UUID)
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 包含所有配置参数的字典,格式与 fetch_bot_config 兼容
|
||||
"""
|
||||
try:
|
||||
from agent.db_pool_manager import get_db_pool_manager
|
||||
|
||||
pool = get_db_pool_manager().pool
|
||||
|
||||
async with pool.connection() as conn:
|
||||
async with conn.cursor() as cursor:
|
||||
# 首先根据 bot_user_id 查找 bot 的 UUID
|
||||
await cursor.execute(
|
||||
"SELECT id, name FROM bots WHERE bot_id = %s",
|
||||
(bot_user_id,)
|
||||
)
|
||||
bot_row = await cursor.fetchone()
|
||||
|
||||
if not bot_row:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Bot with bot_id '{bot_user_id}' not found"
|
||||
)
|
||||
|
||||
bot_uuid = bot_row[0]
|
||||
|
||||
# 查询 bot_settings
|
||||
await cursor.execute(
|
||||
"""
|
||||
SELECT model_id,
|
||||
language, robot_type, dataset_ids, system_prompt, user_identifier,
|
||||
enable_memori, tool_response, skills
|
||||
FROM bot_settings WHERE bot_id = %s
|
||||
""",
|
||||
(bot_uuid,)
|
||||
)
|
||||
settings_row = await cursor.fetchone()
|
||||
|
||||
if not settings_row:
|
||||
# 没有设置,使用默认值
|
||||
logger.warning(f"No settings found for bot {bot_user_id}, using defaults")
|
||||
return {
|
||||
"model": "qwen3-next",
|
||||
"api_key": "",
|
||||
"model_server": "",
|
||||
"language": "zh",
|
||||
"robot_type": "general_agent",
|
||||
"dataset_ids": [],
|
||||
"system_prompt": "",
|
||||
"user_identifier": "",
|
||||
"enable_memori": False,
|
||||
"tool_response": True,
|
||||
"skills": []
|
||||
}
|
||||
|
||||
# 解析结果
|
||||
columns = [
|
||||
'model_id',
|
||||
'language', 'robot_type', 'dataset_ids', 'system_prompt', 'user_identifier',
|
||||
'enable_memori', 'tool_response', 'skills'
|
||||
]
|
||||
config = dict(zip(columns, settings_row))
|
||||
|
||||
# 根据 model_id 查询模型信息
|
||||
model_id = config['model_id']
|
||||
if model_id:
|
||||
await cursor.execute(
|
||||
"""
|
||||
SELECT model, server, api_key
|
||||
FROM models WHERE id = %s
|
||||
""",
|
||||
(model_id,)
|
||||
)
|
||||
model_row = await cursor.fetchone()
|
||||
if model_row:
|
||||
config['model'] = model_row[0]
|
||||
config['model_server'] = model_row[1]
|
||||
config['api_key'] = model_row[2]
|
||||
else:
|
||||
logger.warning(f"Model with id {model_id} not found, using defaults")
|
||||
config['model'] = "qwen3-next"
|
||||
config['model_server'] = ""
|
||||
config['api_key'] = ""
|
||||
else:
|
||||
# 没有选择模型,使用默认值
|
||||
config['model'] = "qwen3-next"
|
||||
config['model_server'] = ""
|
||||
config['api_key'] = ""
|
||||
|
||||
# 处理 dataset_ids (可能是 JSON 数组字符串或逗号分隔字符串)
|
||||
dataset_ids = config['dataset_ids']
|
||||
if dataset_ids:
|
||||
if isinstance(dataset_ids, str):
|
||||
if dataset_ids.startswith('['):
|
||||
import json
|
||||
try:
|
||||
config['dataset_ids'] = json.loads(dataset_ids)
|
||||
except json.JSONDecodeError:
|
||||
config['dataset_ids'] = [d.strip() for d in dataset_ids.split(',')]
|
||||
else:
|
||||
config['dataset_ids'] = [d.strip() for d in dataset_ids.split(',')]
|
||||
else:
|
||||
config['dataset_ids'] = []
|
||||
|
||||
# 处理 skills (逗号分隔字符串)
|
||||
skills = config.get('skills', '')
|
||||
if skills:
|
||||
if isinstance(skills, str):
|
||||
config['skills'] = [s.strip() for s in skills.split(',') if s.strip()]
|
||||
else:
|
||||
config['skills'] = []
|
||||
else:
|
||||
config['skills'] = []
|
||||
|
||||
# 查询 MCP 服务器配置
|
||||
await cursor.execute(
|
||||
"""
|
||||
SELECT name, type, config, enabled
|
||||
FROM mcp_servers WHERE bot_id = %s AND enabled = true
|
||||
""",
|
||||
(bot_uuid,)
|
||||
)
|
||||
mcp_rows = await cursor.fetchall()
|
||||
|
||||
mcp_servers = []
|
||||
for mcp_row in mcp_rows:
|
||||
mcp_name = mcp_row[0]
|
||||
mcp_type = mcp_row[1]
|
||||
mcp_config = mcp_row[2]
|
||||
|
||||
# 如果 config 是 JSONB/字符串,解析它
|
||||
if isinstance(mcp_config, str):
|
||||
try:
|
||||
mcp_config = json.loads(mcp_config)
|
||||
except json.JSONDecodeError:
|
||||
mcp_config = {}
|
||||
|
||||
mcp_servers.append({
|
||||
"name": mcp_name,
|
||||
"type": mcp_type,
|
||||
"config": mcp_config
|
||||
})
|
||||
|
||||
# 格式化为 mcp_settings 格式 (兼容 v2 API)
|
||||
if mcp_servers:
|
||||
mcp_settings_value = []
|
||||
for server in mcp_servers:
|
||||
server_config = server.get("config", {})
|
||||
server_type = server_config.pop("server_type", server["type"])
|
||||
mcp_settings_value.append({
|
||||
"mcpServers": {
|
||||
server_type: server_config
|
||||
}
|
||||
})
|
||||
config["mcp_settings"] = mcp_settings_value
|
||||
else:
|
||||
config["mcp_settings"] = []
|
||||
|
||||
return config
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching bot config from database: {e}")
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch bot config from database: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
async def _sync_call_llm(llm_config, messages) -> str:
|
||||
"""同步调用LLM的辅助函数,在线程池中执行 - 使用LangChain"""
|
||||
try:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user