289 lines
9.6 KiB
Python
289 lines
9.6 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
语义搜索MCP服务器
|
||
基于embedding向量进行语义相似度搜索
|
||
参考multi_keyword_search_server.py的实现方式
|
||
"""
|
||
|
||
import asyncio
|
||
import json
|
||
import os
|
||
import pickle
|
||
import sys
|
||
from typing import Any, Dict, List, Optional, Union
|
||
|
||
import numpy as np
|
||
from sentence_transformers import SentenceTransformer, util
|
||
from mcp_common import (
|
||
get_allowed_directory,
|
||
load_tools_from_json,
|
||
resolve_file_path,
|
||
find_file_in_project,
|
||
create_error_response,
|
||
create_success_response,
|
||
create_initialize_response,
|
||
create_ping_response,
|
||
create_tools_list_response,
|
||
handle_mcp_streaming
|
||
)
|
||
|
||
# 延迟加载模型
|
||
embedder = None
|
||
|
||
def get_model(model_name_or_path='sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2'):
|
||
"""获取模型实例(延迟加载)
|
||
|
||
Args:
|
||
model_name_or_path (str): 模型名称或本地路径
|
||
- 可以是 HuggingFace 模型名称
|
||
- 可以是本地模型路径
|
||
"""
|
||
global embedder
|
||
if embedder is None:
|
||
# 优先使用本地模型路径
|
||
local_model_path = "./models/paraphrase-multilingual-MiniLM-L12-v2"
|
||
|
||
# 从环境变量获取设备配置,默认为 CPU
|
||
device = os.environ.get('SENTENCE_TRANSFORMER_DEVICE', 'cpu')
|
||
if device not in ['cpu', 'cuda', 'mps']:
|
||
print(f"警告: 不支持的设备类型 '{device}',使用默认 CPU")
|
||
device = 'cpu'
|
||
|
||
# 检查本地模型是否存在
|
||
if os.path.exists(local_model_path):
|
||
print(f"使用本地模型: {local_model_path}")
|
||
embedder = SentenceTransformer(local_model_path, device=device)
|
||
else:
|
||
print(f"本地模型不存在,使用HuggingFace模型: {model_name_or_path}")
|
||
embedder = SentenceTransformer(model_name_or_path, device=device)
|
||
|
||
return embedder
|
||
|
||
|
||
def semantic_search(queries: Union[str, List[str]], embeddings_file: str, top_k: int = 20) -> Dict[str, Any]:
|
||
"""执行语义搜索,支持多个查询"""
|
||
# 处理查询输入
|
||
if isinstance(queries, str):
|
||
queries = [queries]
|
||
|
||
# 验证查询列表
|
||
if not queries or not any(q.strip() for q in queries):
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": "Error: Queries cannot be empty"
|
||
}
|
||
]
|
||
}
|
||
|
||
# 过滤空查询
|
||
queries = [q.strip() for q in queries if q.strip()]
|
||
|
||
# 验证embeddings文件路径
|
||
try:
|
||
# 解析文件路径,支持 folder/document.txt 和 document.txt 格式
|
||
resolved_embeddings_file = resolve_file_path(embeddings_file)
|
||
|
||
# 加载嵌入数据
|
||
with open(resolved_embeddings_file, 'rb') as f:
|
||
embedding_data = pickle.load(f)
|
||
|
||
# 兼容新旧数据结构
|
||
if 'chunks' in embedding_data:
|
||
# 新的数据结构(使用chunks)
|
||
sentences = embedding_data['chunks']
|
||
sentence_embeddings = embedding_data['embeddings']
|
||
# 从embedding_data中获取模型路径(如果有的话)
|
||
model_path = embedding_data.get('model_path', 'sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
|
||
model = get_model(model_path)
|
||
else:
|
||
# 旧的数据结构(使用sentences)
|
||
sentences = embedding_data['sentences']
|
||
sentence_embeddings = embedding_data['embeddings']
|
||
model = get_model()
|
||
|
||
# 编码所有查询
|
||
query_embeddings = model.encode(queries, convert_to_tensor=True)
|
||
|
||
# 计算所有查询的相似度
|
||
all_results = []
|
||
for i, query in enumerate(queries):
|
||
query_embedding = query_embeddings[i:i+1] # 保持2D形状
|
||
cos_scores = util.cos_sim(query_embedding, sentence_embeddings)[0]
|
||
|
||
# 获取top_k结果
|
||
top_results = np.argsort(-cos_scores.cpu().numpy())[:top_k]
|
||
|
||
# 格式化结果
|
||
for j, idx in enumerate(top_results):
|
||
sentence = sentences[idx]
|
||
score = cos_scores[idx].item()
|
||
all_results.append({
|
||
'query': query,
|
||
'rank': j + 1,
|
||
'content': sentence,
|
||
'similarity_score': score,
|
||
'file_path': embeddings_file
|
||
})
|
||
|
||
if not all_results:
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": "No matching results found"
|
||
}
|
||
]
|
||
}
|
||
|
||
# 按相似度分数排序所有结果
|
||
all_results.sort(key=lambda x: x['similarity_score'], reverse=True)
|
||
|
||
# 格式化输出
|
||
formatted_lines = []
|
||
formatted_lines.append(f"Found {len(all_results)} results for {len(queries)} queries:")
|
||
formatted_lines.append("")
|
||
|
||
for i, result in enumerate(all_results):
|
||
formatted_lines.append(f"#{i+1} [query: '{result['query']}'] [similarity:{result['similarity_score']:.4f}]: {result['content']}")
|
||
|
||
formatted_output = "\n".join(formatted_lines)
|
||
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": formatted_output
|
||
}
|
||
]
|
||
}
|
||
|
||
except FileNotFoundError:
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": f"Error: embeddings file {embeddings_file} not found"
|
||
}
|
||
]
|
||
}
|
||
except Exception as e:
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": f"Search error: {str(e)}"
|
||
}
|
||
]
|
||
}
|
||
|
||
|
||
|
||
|
||
def get_model_info() -> Dict[str, Any]:
|
||
"""获取当前模型信息"""
|
||
try:
|
||
# 检查本地模型路径
|
||
local_model_path = "./models/paraphrase-multilingual-MiniLM-L12-v2"
|
||
|
||
if os.path.exists(local_model_path):
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": f"✅ 使用本地模型: {local_model_path}\n"
|
||
f"模型状态: 已加载\n"
|
||
f"设备: CPU\n"
|
||
f"说明: 避免从HuggingFace下载,提高响应速度"
|
||
}
|
||
]
|
||
}
|
||
else:
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": f"⚠️ 本地模型不存在: {local_model_path}\n"
|
||
f"将使用HuggingFace模型: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2\n"
|
||
f"建议: 下载模型到本地以提高响应速度\n"
|
||
f"设备: CPU"
|
||
}
|
||
]
|
||
}
|
||
except Exception as e:
|
||
return {
|
||
"content": [
|
||
{
|
||
"type": "text",
|
||
"text": f"❌ 获取模型信息失败: {str(e)}"
|
||
}
|
||
]
|
||
}
|
||
|
||
|
||
async def handle_request(request: Dict[str, Any]) -> Dict[str, Any]:
|
||
"""Handle MCP request"""
|
||
try:
|
||
method = request.get("method")
|
||
params = request.get("params", {})
|
||
request_id = request.get("id")
|
||
|
||
if method == "initialize":
|
||
return create_initialize_response(request_id, "semantic-search")
|
||
|
||
elif method == "ping":
|
||
return create_ping_response(request_id)
|
||
|
||
elif method == "tools/list":
|
||
# 从 JSON 文件加载工具定义
|
||
tools = load_tools_from_json("semantic_search_tools.json")
|
||
return create_tools_list_response(request_id, tools)
|
||
|
||
elif method == "tools/call":
|
||
tool_name = params.get("name")
|
||
arguments = params.get("arguments", {})
|
||
|
||
if tool_name == "semantic_search":
|
||
queries = arguments.get("queries", [])
|
||
# 兼容旧的query参数
|
||
if not queries and "query" in arguments:
|
||
queries = arguments.get("query", "")
|
||
embeddings_file = arguments.get("embeddings_file", "")
|
||
top_k = arguments.get("top_k", 20)
|
||
|
||
result = semantic_search(queries, embeddings_file, top_k)
|
||
|
||
return {
|
||
"jsonrpc": "2.0",
|
||
"id": request_id,
|
||
"result": result
|
||
}
|
||
|
||
elif tool_name == "get_model_info":
|
||
result = get_model_info()
|
||
|
||
return {
|
||
"jsonrpc": "2.0",
|
||
"id": request_id,
|
||
"result": result
|
||
}
|
||
|
||
else:
|
||
return create_error_response(request_id, -32601, f"Unknown tool: {tool_name}")
|
||
|
||
else:
|
||
return create_error_response(request_id, -32601, f"Unknown method: {method}")
|
||
|
||
except Exception as e:
|
||
return create_error_response(request.get("id"), -32603, f"Internal error: {str(e)}")
|
||
|
||
|
||
async def main():
|
||
"""Main entry point."""
|
||
await handle_mcp_streaming(handle_request)
|
||
|
||
|
||
if __name__ == "__main__":
|
||
asyncio.run(main())
|