292 lines
8.6 KiB
Python
292 lines
8.6 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
API data models and response schemas.
|
|
"""
|
|
|
|
from typing import Dict, List, Optional, Any, AsyncGenerator
|
|
from pydantic import BaseModel, Field, field_validator, ConfigDict
|
|
|
|
|
|
class Message(BaseModel):
|
|
role: str
|
|
content: str
|
|
|
|
|
|
class DatasetRequest(BaseModel):
|
|
system_prompt: Optional[str] = None
|
|
mcp_settings: Optional[List[Dict]] = None
|
|
files: Optional[Dict[str, List[str]]] = Field(default=None, description="Files organized by key groups. Each key maps to a list of file paths (supports zip files)")
|
|
unique_id: Optional[str] = None
|
|
|
|
@field_validator('files', mode='before')
|
|
@classmethod
|
|
def validate_files(cls, v):
|
|
"""Validate dict format with key-grouped files"""
|
|
if v is None:
|
|
return None
|
|
if isinstance(v, dict):
|
|
# Validate dict format
|
|
for key, value in v.items():
|
|
if not isinstance(key, str):
|
|
raise ValueError(f"Key in files dict must be string, got {type(key)}")
|
|
if not isinstance(value, list):
|
|
raise ValueError(f"Value in files dict must be list, got {type(value)} for key '{key}'")
|
|
for item in value:
|
|
if not isinstance(item, str):
|
|
raise ValueError(f"File paths must be strings, got {type(item)} in key '{key}'")
|
|
return v
|
|
else:
|
|
raise ValueError(f"Files must be a dict with key groups, got {type(v)}")
|
|
|
|
|
|
class ChatRequest(BaseModel):
|
|
messages: List[Message]
|
|
model: str = "qwen3-next"
|
|
model_server: str = ""
|
|
unique_id: Optional[str] = None
|
|
stream: Optional[bool] = False
|
|
|
|
|
|
class FileProcessRequest(BaseModel):
|
|
unique_id: str
|
|
files: Optional[Dict[str, List[str]]] = Field(default=None, description="Files organized by key groups. Each key maps to a list of file paths (supports zip files)")
|
|
system_prompt: Optional[str] = None
|
|
mcp_settings: Optional[List[Dict]] = None
|
|
|
|
model_config = ConfigDict(extra='allow')
|
|
|
|
@field_validator('files', mode='before')
|
|
@classmethod
|
|
def validate_files(cls, v):
|
|
"""Validate dict format with key-grouped files"""
|
|
if v is None:
|
|
return None
|
|
if isinstance(v, dict):
|
|
# Validate dict format
|
|
for key, value in v.items():
|
|
if not isinstance(key, str):
|
|
raise ValueError(f"Key in files dict must be string, got {type(key)}")
|
|
if not isinstance(value, list):
|
|
raise ValueError(f"Value in files dict must be list, got {type(value)} for key '{key}'")
|
|
for item in value:
|
|
if not isinstance(item, str):
|
|
raise ValueError(f"File paths must be strings, got {type(item)} in key '{key}'")
|
|
return v
|
|
else:
|
|
raise ValueError(f"Files must be a dict with key groups, got {type(v)}")
|
|
|
|
|
|
class DatasetResponse(BaseModel):
|
|
success: bool
|
|
message: str
|
|
unique_id: Optional[str] = None
|
|
dataset_structure: Optional[str] = None
|
|
|
|
|
|
class ChatCompletionResponse(BaseModel):
|
|
id: str
|
|
object: str = "chat.completion"
|
|
created: int
|
|
model: str
|
|
choices: List[Dict[str, Any]]
|
|
usage: Optional[Dict[str, int]] = None
|
|
|
|
|
|
class ChatResponse(BaseModel):
|
|
choices: List[Dict]
|
|
usage: Optional[Dict] = None
|
|
|
|
|
|
class FileProcessResponse(BaseModel):
|
|
success: bool
|
|
message: str
|
|
unique_id: str
|
|
processed_files: List[str]
|
|
|
|
|
|
class ErrorResponse(BaseModel):
|
|
error: Dict[str, Any]
|
|
|
|
@classmethod
|
|
def create(cls, message: str, error_type: str = "invalid_request_error", code: Optional[str] = None):
|
|
error_data = {
|
|
"message": message,
|
|
"type": error_type
|
|
}
|
|
if code:
|
|
error_data["code"] = code
|
|
return cls(error=error_data)
|
|
|
|
|
|
class HealthCheckResponse(BaseModel):
|
|
status: str = "healthy"
|
|
timestamp: str
|
|
version: str = "1.0.0"
|
|
|
|
|
|
class SystemStatusResponse(BaseModel):
|
|
status: str
|
|
projects_count: int
|
|
total_projects: List[str]
|
|
active_projects: List[str]
|
|
system_info: Dict[str, Any]
|
|
|
|
|
|
class CacheStatusResponse(BaseModel):
|
|
cached_projects: List[str]
|
|
cache_info: Dict[str, Any]
|
|
|
|
|
|
class ProjectStatusResponse(BaseModel):
|
|
unique_id: str
|
|
project_exists: bool
|
|
project_path: Optional[str] = None
|
|
processed_files_count: int
|
|
processed_files: Dict[str, Dict]
|
|
document_files_count: int
|
|
document_files: List[str]
|
|
has_system_prompt: bool
|
|
has_mcp_settings: bool
|
|
readme_exists: bool
|
|
log_file_exists: bool
|
|
dataset_structure: Optional[str] = None
|
|
error: Optional[str] = None
|
|
|
|
|
|
class ProjectListResponse(BaseModel):
|
|
projects: List[str]
|
|
count: int
|
|
|
|
|
|
class ProjectStatsResponse(BaseModel):
|
|
unique_id: str
|
|
total_processed_files: int
|
|
total_document_files: int
|
|
total_document_size: int
|
|
total_document_size_mb: float
|
|
has_system_prompt: bool
|
|
has_mcp_settings: bool
|
|
has_readme: bool
|
|
document_files_detail: List[Dict[str, Any]]
|
|
embedding_files_count: int
|
|
embedding_files_detail: List[Dict[str, Any]]
|
|
|
|
|
|
class ProjectActionResponse(BaseModel):
|
|
success: bool
|
|
message: str
|
|
unique_id: str
|
|
action: str
|
|
|
|
|
|
# Utility functions for creating responses
|
|
def create_success_response(message: str, **kwargs) -> Dict[str, Any]:
|
|
"""Create a standardized success response"""
|
|
return {
|
|
"success": True,
|
|
"message": message,
|
|
**kwargs
|
|
}
|
|
|
|
|
|
def create_error_response(message: str, error_type: str = "error", **kwargs) -> Dict[str, Any]:
|
|
"""Create a standardized error response"""
|
|
return {
|
|
"success": False,
|
|
"error": error_type,
|
|
"message": message,
|
|
**kwargs
|
|
}
|
|
|
|
|
|
class QueueTaskRequest(BaseModel):
|
|
"""队列任务请求模型"""
|
|
unique_id: str
|
|
files: Optional[Dict[str, List[str]]] = Field(default=None, description="Files organized by key groups. Each key maps to a list of file paths (supports zip files)")
|
|
system_prompt: Optional[str] = None
|
|
mcp_settings: Optional[List[Dict]] = None
|
|
priority: Optional[int] = Field(default=0, description="Task priority (higher number = higher priority)")
|
|
delay: Optional[int] = Field(default=0, description="Delay execution by N seconds")
|
|
|
|
model_config = ConfigDict(extra='allow')
|
|
|
|
@field_validator('files', mode='before')
|
|
@classmethod
|
|
def validate_files(cls, v):
|
|
"""Validate dict format with key-grouped files"""
|
|
if v is None:
|
|
return None
|
|
if isinstance(v, dict):
|
|
# Validate dict format
|
|
for key, value in v.items():
|
|
if not isinstance(key, str):
|
|
raise ValueError(f"Key in files dict must be string, got {type(key)}")
|
|
if not isinstance(value, list):
|
|
raise ValueError(f"Value in files dict must be list, got {type(value)} for key '{key}'")
|
|
for item in value:
|
|
if not isinstance(item, str):
|
|
raise ValueError(f"File paths must be strings, got {type(item)} in key '{key}'")
|
|
return v
|
|
else:
|
|
raise ValueError(f"Files must be a dict with key groups, got {type(v)}")
|
|
|
|
|
|
class QueueTaskResponse(BaseModel):
|
|
"""队列任务响应模型"""
|
|
success: bool
|
|
message: str
|
|
unique_id: str
|
|
task_id: Optional[str] = None
|
|
task_status: Optional[str] = None
|
|
estimated_processing_time: Optional[int] = None # seconds
|
|
|
|
|
|
class QueueStatusResponse(BaseModel):
|
|
"""队列状态响应模型"""
|
|
success: bool
|
|
message: str
|
|
queue_stats: Dict[str, Any]
|
|
pending_tasks: List[Dict[str, Any]]
|
|
|
|
|
|
class TaskStatusResponse(BaseModel):
|
|
"""任务状态响应模型"""
|
|
success: bool
|
|
message: str
|
|
task_id: str
|
|
task_status: Optional[str] = None
|
|
task_result: Optional[Dict[str, Any]] = None
|
|
error: Optional[str] = None
|
|
|
|
|
|
def create_chat_response(
|
|
messages: List[Message],
|
|
model: str,
|
|
content: str,
|
|
usage: Optional[Dict[str, int]] = None
|
|
) -> Dict[str, Any]:
|
|
"""Create a chat completion response"""
|
|
import time
|
|
import uuid
|
|
|
|
return {
|
|
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
|
"object": "chat.completion",
|
|
"created": int(time.time()),
|
|
"model": model,
|
|
"choices": [
|
|
{
|
|
"index": 0,
|
|
"message": {
|
|
"role": "assistant",
|
|
"content": content
|
|
},
|
|
"finish_reason": "stop"
|
|
}
|
|
],
|
|
"usage": usage or {
|
|
"prompt_tokens": 0,
|
|
"completion_tokens": 0,
|
|
"total_tokens": 0
|
|
}
|
|
} |