feat(deep-agent): add custom workspace_root support for shell commands
- Add create_custom_cli_agent function to support custom workspace_root
- Set shell workspace to ~/.deepagents/{bot_id} for deep_agent type
- Pass system_prompt to create_custom_cli_agent for proper context
- Fix duplicate <env> tag in system_prompt_deep_agent.md
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
49a0447f9f
commit
7c9e270a66
@ -2,13 +2,18 @@ import json
|
||||
import logging
|
||||
import time
|
||||
import copy
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
from langchain.chat_models import init_chat_model
|
||||
# from deepagents import create_deep_agent
|
||||
from deepagents import create_deep_agent
|
||||
from deepagents.backends import CompositeBackend
|
||||
from deepagents.backends.filesystem import FilesystemBackend
|
||||
from deepagents.backends.sandbox import SandboxBackendProtocol
|
||||
from deepagents_cli.agent import create_cli_agent
|
||||
from langchain.agents import create_agent
|
||||
from langchain.agents.middleware import SummarizationMiddleware
|
||||
from langchain_mcp_adapters.client import MultiServerMCPClient
|
||||
from sympy.printing.cxx import none
|
||||
from utils.fastapi_utils import detect_provider
|
||||
from .guideline_middleware import GuidelineMiddleware
|
||||
from .tool_output_length_middleware import ToolOutputLengthMiddleware
|
||||
@ -19,6 +24,14 @@ from agent.prompt_loader import load_system_prompt_async, load_mcp_settings_asyn
|
||||
from agent.agent_memory_cache import get_memory_cache_manager
|
||||
from .checkpoint_utils import prepare_checkpoint_message
|
||||
from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver
|
||||
from langgraph.checkpoint.memory import InMemorySaver
|
||||
from langchain.tools import BaseTool
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langgraph.pregel import Pregel
|
||||
from deepagents_cli.shell import ShellMiddleware
|
||||
from deepagents_cli.agent_memory import AgentMemoryMiddleware
|
||||
from deepagents_cli.skills import SkillsMiddleware
|
||||
from deepagents_cli.config import settings, get_default_coding_instructions
|
||||
import os
|
||||
|
||||
# 全局 MemorySaver 实例
|
||||
@ -148,13 +161,15 @@ async def init_agent(config: AgentConfig):
|
||||
checkpointer = None
|
||||
create_start = time.time()
|
||||
if config.robot_type == "deep_agent":
|
||||
# 使用 DeepAgentX 创建 agent
|
||||
agent, composite_backend = create_cli_agent(
|
||||
# 使用 DeepAgentX 创建 agent,自定义 workspace_root
|
||||
workspace_root = str(Path.home() / ".deepagents" / config.bot_id)
|
||||
agent, composite_backend = create_custom_cli_agent(
|
||||
model=llm_instance,
|
||||
assistant_id=config.bot_id,
|
||||
system_prompt=system_prompt,
|
||||
tools=mcp_tools,
|
||||
auto_approve=True,
|
||||
enable_shell=False,
|
||||
workspace_root=workspace_root
|
||||
)
|
||||
else:
|
||||
# 构建中间件列表
|
||||
@ -198,4 +213,164 @@ async def init_agent(config: AgentConfig):
|
||||
checkpointer=checkpointer
|
||||
)
|
||||
logger.info(f"create {config.robot_type} elapsed: {time.time() - create_start:.3f}s")
|
||||
return agent, checkpointer
|
||||
return agent, checkpointer
|
||||
|
||||
|
||||
def create_custom_cli_agent(
|
||||
model: str | BaseChatModel,
|
||||
assistant_id: str,
|
||||
*,
|
||||
tools: list[BaseTool] | None = None,
|
||||
sandbox: SandboxBackendProtocol | None = None,
|
||||
sandbox_type: str | None = None,
|
||||
system_prompt: str | None = None,
|
||||
auto_approve: bool = False,
|
||||
enable_memory: bool = True,
|
||||
enable_skills: bool = True,
|
||||
enable_shell: bool = True,
|
||||
workspace_root: str | None = None,
|
||||
) -> tuple[Pregel, CompositeBackend]:
|
||||
"""Create a CLI-configured agent with custom workspace_root for shell commands.
|
||||
|
||||
This is a custom version of create_cli_agent that allows specifying a custom
|
||||
workspace_root for shell commands instead of using Path.cwd().
|
||||
|
||||
Args:
|
||||
model: LLM model to use (e.g., "anthropic:claude-sonnet-4-5-20250929")
|
||||
assistant_id: Agent identifier for memory/state storage
|
||||
tools: Additional tools to provide to agent (default: empty list)
|
||||
sandbox: Optional sandbox backend for remote execution (e.g., ModalBackend).
|
||||
If None, uses local filesystem + shell.
|
||||
sandbox_type: Type of sandbox provider ("modal", "runloop", "daytona").
|
||||
Used for system prompt generation.
|
||||
system_prompt: Override the default system prompt. If None, generates one
|
||||
based on sandbox_type and assistant_id.
|
||||
auto_approve: If True, automatically approves all tool calls without human
|
||||
confirmation. Useful for automated workflows.
|
||||
enable_memory: Enable AgentMemoryMiddleware for persistent memory
|
||||
enable_skills: Enable SkillsMiddleware for custom agent skills
|
||||
enable_shell: Enable ShellMiddleware for local shell execution (only in local mode)
|
||||
workspace_root: Working directory for shell commands. If None, uses Path.cwd().
|
||||
|
||||
Returns:
|
||||
2-tuple of (agent_graph, composite_backend)
|
||||
- agent_graph: Configured LangGraph Pregel instance ready for execution
|
||||
- composite_backend: CompositeBackend for file operations
|
||||
"""
|
||||
if tools is None:
|
||||
tools = []
|
||||
|
||||
# Setup agent directory for persistent memory (if enabled)
|
||||
if enable_memory or enable_skills:
|
||||
agent_dir = settings.ensure_agent_dir(assistant_id)
|
||||
agent_md = agent_dir / "agent.md"
|
||||
if not agent_md.exists():
|
||||
source_content = get_default_coding_instructions()
|
||||
agent_md.write_text(source_content)
|
||||
|
||||
# Skills directories (if enabled)
|
||||
skills_dir = none
|
||||
if enable_skills:
|
||||
skills_dir = settings.ensure_user_skills_dir(assistant_id)
|
||||
|
||||
# Build middleware stack based on enabled features
|
||||
agent_middleware = []
|
||||
|
||||
# CONDITIONAL SETUP: Local vs Remote Sandbox
|
||||
if sandbox is None:
|
||||
# ========== LOCAL MODE ==========
|
||||
composite_backend = CompositeBackend(
|
||||
default=FilesystemBackend(root_dir=workspace_root), # Current working directory
|
||||
routes={}, # No virtualization - use real paths
|
||||
)
|
||||
|
||||
# Add memory middleware
|
||||
if enable_memory:
|
||||
agent_middleware.append(
|
||||
AgentMemoryMiddleware(settings=settings, assistant_id=assistant_id)
|
||||
)
|
||||
|
||||
# Add skills middleware
|
||||
if enable_skills:
|
||||
agent_middleware.append(
|
||||
SkillsMiddleware(
|
||||
skills_dir=skills_dir,
|
||||
assistant_id=assistant_id
|
||||
)
|
||||
)
|
||||
|
||||
# Add shell middleware (only in local mode)
|
||||
if enable_shell:
|
||||
# Create environment for shell commands
|
||||
# Restore user's original LANGSMITH_PROJECT so their code traces separately
|
||||
shell_env = os.environ.copy()
|
||||
if settings.user_langchain_project:
|
||||
shell_env["LANGSMITH_PROJECT"] = settings.user_langchain_project
|
||||
|
||||
# Use custom workspace_root if provided, otherwise use current directory
|
||||
shell_workspace = workspace_root if workspace_root is not None else str(Path.cwd())
|
||||
|
||||
agent_middleware.append(
|
||||
ShellMiddleware(
|
||||
workspace_root=shell_workspace,
|
||||
env=shell_env,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# ========== REMOTE SANDBOX MODE ==========
|
||||
composite_backend = CompositeBackend(
|
||||
default=sandbox, # Remote sandbox (ModalBackend, etc.)
|
||||
routes={}, # No virtualization
|
||||
)
|
||||
|
||||
# Add memory middleware
|
||||
if enable_memory:
|
||||
agent_middleware.append(
|
||||
AgentMemoryMiddleware(settings=settings, assistant_id=assistant_id)
|
||||
)
|
||||
|
||||
# Add skills middleware
|
||||
if enable_skills:
|
||||
agent_middleware.append(
|
||||
SkillsMiddleware(
|
||||
skills_dir=skills_dir,
|
||||
assistant_id=assistant_id,
|
||||
project_skills_dir=project_skills_dir,
|
||||
)
|
||||
)
|
||||
|
||||
# Note: Shell middleware not used in sandbox mode
|
||||
# File operations and execute tool are provided by the sandbox backend
|
||||
|
||||
# Get or use custom system prompt
|
||||
if system_prompt is None:
|
||||
# Import get_system_prompt from deepagents_cli.agent
|
||||
from deepagents_cli.agent import get_system_prompt as _get_system_prompt
|
||||
system_prompt = _get_system_prompt(assistant_id=assistant_id, sandbox_type=sandbox_type)
|
||||
|
||||
# Import InterruptOnConfig
|
||||
from langchain.agents.middleware import InterruptOnConfig
|
||||
|
||||
# Configure interrupt_on based on auto_approve setting
|
||||
if auto_approve:
|
||||
# No interrupts - all tools run automatically
|
||||
interrupt_on = {}
|
||||
else:
|
||||
# Full HITL for destructive operations - import from deepagents_cli.agent
|
||||
from deepagents_cli.agent import _add_interrupt_on
|
||||
interrupt_on = _add_interrupt_on()
|
||||
|
||||
# Import config
|
||||
from deepagents_cli.config import config
|
||||
|
||||
# Create the agent
|
||||
agent = create_deep_agent(
|
||||
model=model,
|
||||
system_prompt=system_prompt,
|
||||
tools=tools,
|
||||
backend=composite_backend,
|
||||
middleware=agent_middleware,
|
||||
interrupt_on=interrupt_on,
|
||||
checkpointer=InMemorySaver(),
|
||||
).with_config(config)
|
||||
return agent, composite_backend
|
||||
@ -1,5 +1,4 @@
|
||||
<env>
|
||||
<env>
|
||||
Working directory: {agent_dir_path}
|
||||
Current User: {user_identifier}
|
||||
Current Time: {datetime}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user