diff --git a/agent/guideline_middleware.py b/agent/guideline_middleware.py
index 4597783..51dc4fb 100644
--- a/agent/guideline_middleware.py
+++ b/agent/guideline_middleware.py
@@ -38,8 +38,8 @@ class GuidelineMiddleware(AgentMiddleware):
if not self.guidelines:
self.guidelines = """
1. General Inquiries
-Condition: User inquiries about products, policies, troubleshooting, factual questions, etc.
-Action: Priority given to invoking the 【Knowledge Base Retrieval】 tool to query the knowledge base.
+Condition: User inquiries about products, policies, troubleshooting, factual questions, definitions, workflows, data lookups, or other knowledge-seeking requests.
+Action: First choose the most suitable 【Knowledge Base Retrieval】 tool by scenario. Use table_rag_retrieve first for structured data, lists, statistics, comparisons, extraction, mixed requests, or unclear cases. Use rag_retrieve first only for clearly pure concept / definition / workflow / policy explanation questions. If the first retrieval result is empty, errored, irrelevant, or only partially answers the request, call the other retrieval tool before replying. Only reply that no relevant information was found after both retrieval tools have been tried and still provide no sufficient evidence.
2.Social Dialogue
Condition: User intent involves small talk, greetings, expressions of thanks, compliments, or other non-substantive conversations.
@@ -47,7 +47,7 @@ Action: Provide concise, friendly, and personified natural responses.
"""
if not self.tool_description:
self.tool_description = """
-- **Knowledge Base Retrieval**: For knowledge queries/other inquiries, prioritize searching the knowledge base → rag_retrieve-rag_retrieve
+- **Knowledge Base Retrieval**: Choose retrieval order by scenario. Default to `table_rag_retrieve -> rag_retrieve` for structured, list, mixed, or unclear requests. Use `rag_retrieve -> table_rag_retrieve` only for clearly pure concept or workflow questions. Do not answer with "no result" until both tools have been tried when retrieval is needed.
"""
def get_guideline_prompt(self, config: AgentConfig) -> str:
diff --git a/mcp/rag_retrieve_server.py b/mcp/rag_retrieve_server.py
index 80a659f..6caa9c8 100644
--- a/mcp/rag_retrieve_server.py
+++ b/mcp/rag_retrieve_server.py
@@ -7,6 +7,7 @@ RAG检索MCP服务器
import asyncio
import hashlib
import json
+import re
import sys
import os
from typing import Any, Dict, List
@@ -218,11 +219,14 @@ def table_rag_retrieve(query: str) -> Dict[str, Any]:
if "markdown" in response_data:
markdown_content = response_data["markdown"]
+ text = markdown_content
+ if not re.search(r"^no excel files found", markdown_content, re.IGNORECASE):
+ text = TABLE_CITATION_INSTRUCTIONS + markdown_content
return {
"content": [
{
"type": "text",
- "text": TABLE_CITATION_INSTRUCTIONS + markdown_content
+ "text": text
}
]
}
diff --git a/mcp/tools/rag_retrieve_tools.json b/mcp/tools/rag_retrieve_tools.json
index 95a54fe..0db0f70 100644
--- a/mcp/tools/rag_retrieve_tools.json
+++ b/mcp/tools/rag_retrieve_tools.json
@@ -1,17 +1,17 @@
[
{
"name": "rag_retrieve",
- "description": "Retrieve relevant documents from the knowledge base. Returns markdown format results containing relevant content.\n\n[CALLING STRATEGY] This tool is the SECONDARY choice. Only call this tool FIRST when the question is clearly a pure knowledge/concept query (e.g. \"What does XX mean?\", \"How to use XX?\", \"What is the workflow for XX?\") that has NO relation to data, lists, summaries, or tabular output. In ALL other cases, call table_rag_retrieve FIRST, then use this tool to supplement if table_rag results are insufficient or need additional context.\n\n[PARAMETER USAGE — IMPORTANT]\n- Do NOT pass the user's raw question directly unless it already fits retrieval needs well.\n- You MUST construct `query` by following the rewriting strategy described in the `query` parameter schema below.\n- You MUST choose `top_k` dynamically by following the decision rules described in the `top_k` parameter schema below.\n- `query` and `top_k` are coupled: broader rewritten queries, list-style requests, historical coverage, or multi-branch recall usually require a higher `top_k`; narrow fact lookup should use a smaller `top_k`.\n- Prefer the smallest sufficient `top_k`, then expand only when coverage is insufficient.\n\n[WHEN TO USE AS SUPPLEMENT] After calling table_rag_retrieve, call this tool if:\n- table_rag_retrieve returned insufficient results and you need document context\n- The answer requires background explanation beyond the structured data\n- The user's question involves both data retrieval and conceptual understanding",
+ "description": "Retrieve relevant documents from the knowledge base. Returns markdown results. Use this tool first only for clearly pure concept, definition, workflow, policy, or explanation questions without structured data needs. If the result is insufficient, try table_rag_retrieve before replying with no result.",
"inputSchema": {
"type": "object",
"properties": {
"query": {
"type": "string",
- "description": "Retrieval query content. Before retrieval, rewrite the query to improve recall: extract the core entity, time scope, attributes, and intent; add meaningful variants such as synonyms, aliases, abbreviations, related titles, historical names, and category terms; expand enumeration-style queries more aggressively; preserve the original meaning and do not introduce unrelated topics; use both the original query and rewritten queries whenever possible. For historical or list-style queries, also add terms like title / organization variants, predecessor / successor, former / past / historical / all-time, and list / overview / roster / timeline / archive."
+ "description": "Retrieval query content. Rewrite the query when needed to improve recall."
},
"top_k": {
"type": "integer",
- "description": "Number of top results to retrieve. Use the smallest sufficient top_k and expand only when coverage is insufficient: 30 for simple fact lookup about one specific thing; 50 for moderate synthesis, comparison, summarization, or disambiguation; 100 for broad-recall queries needing high coverage, such as comprehensive analysis, scattered knowledge, multiple entities or periods, list / catalog / timeline / roster / overview requests, or all items / historical succession / many records. Raise top_k when query rewrite produces many useful keyword branches or when results are too few, repetitive, incomplete, sparse, or too narrow in coverage. Do not raise top_k just because the query is longer. Expansion sequence: 30 -> 50 -> 100. If uncertain, prefer passing 100. Default: 100.",
+ "description": "Number of top results to retrieve. Choose dynamically based on retrieval breadth and coverage needs.",
"default": 100
}
},
@@ -20,13 +20,13 @@
},
{
"name": "table_rag_retrieve",
- "description": "Retrieve relevant data from Excel/spreadsheet files in the knowledge base. Returns markdown format results containing table data analysis.\n\n[CALLING STRATEGY] This tool is the DEFAULT first choice. Call this tool FIRST in any of the following situations:\n- Questions involving specific values, prices, quantities, inventory, specifications, rankings, comparisons, statistics\n- Requests for tabular output (e.g. \"make a table\", \"list in a table\", \"一覧表にして\", \"整理成表格\")\n- Information extraction/organization requests (e.g. \"extract\", \"list\", \"summarize and list\", \"抽出\", \"提取\", \"列举\", \"汇总\")\n- Queries about specific person names, project names, or product names (e.g. \"XX議員の答弁を一覧にして\")\n- ANY question where you are unsure whether table data is needed — default to calling this tool first\n\n[PARAMETER USAGE — IMPORTANT]\n- Do NOT pass the user's raw question directly unless it already fits retrieval needs well.\n- You MUST construct `query` by following the rewriting strategy described in the `query` parameter schema below.\n- For list, extraction, comparison, historical, or name-based requests, rewrite `query` more aggressively to cover entity variants and intent variants.\n\n[RESPONSE HANDLING] When processing the returned results:\n1. Follow all instructions in [INSTRUCTION] and [EXTRA_INSTRUCTION] sections of the response (e.g. output format, source citation requirements)\n2. If Query result hint indicates truncation (e.g. \"Only the first N rows are included; the remaining M rows were omitted\"), you MUST explicitly tell the user: total matches (N+M), displayed count (N), and omitted count (M)\n3. If query result is empty, respond truthfully that no relevant data was found — do NOT fabricate data\n4. Cite data sources using file names from file_ref_table in the response",
+ "description": "Retrieve relevant table data from Excel or spreadsheet files in the knowledge base. Returns markdown results. Use this tool first for structured data, lists, statistics, extraction, mixed questions, and unclear cases. If the result is insufficient, try rag_retrieve before replying with no result.",
"inputSchema": {
"type": "object",
"properties": {
"query": {
"type": "string",
- "description": "Retrieval query content for table data. Before retrieval, rewrite the query to improve recall: extract the core entity, time scope, attributes, and intent; add meaningful variants such as synonyms, aliases, abbreviations, related titles, historical names, and category terms; expand enumeration-style queries more aggressively; preserve the original meaning and do not introduce unrelated topics; use both the original query and rewritten queries whenever possible. For historical or list-style queries, also add terms like title / organization variants, predecessor / successor, former / past / historical / all-time, and list / overview / roster / timeline / archive."
+ "description": "Retrieval query content for table data. Rewrite the query when needed to improve recall."
}
},
"required": ["query"]
diff --git a/prompt/system_prompt.md b/prompt/system_prompt.md
index a75b589..7751dd2 100644
--- a/prompt/system_prompt.md
+++ b/prompt/system_prompt.md
@@ -1,17 +1,6 @@
{extra_prompt}
-## CITATION REQUIREMENTS
-
-When your answer uses learned knowledge, you MUST generate `` tags. Follow the specific citation format instructions returned by each tool (`rag_retrieve`, `table_rag_retrieve`).
-
-### General Placement Rules
-1. Citations MUST appear IMMEDIATELY AFTER the paragraph or bullet list that uses the knowledge
-2. NEVER collect all citations and place them at the end of your response
-3. Limit to 1-2 citations per paragraph/bullet list - combine related facts under one citation
-4. If your answer uses learned knowledge, you MUST generate at least 1 `` in the response
-
-### Current Working Directory
-
+# Current Working Directory
PROJECT_ROOT: `{agent_dir_path}`
The filesystem backend is currently operating in: `{agent_dir_path}`
@@ -80,7 +69,56 @@ When creating scripts in `executable_code/`, follow these organization rules:
- Temporary script (when needed): `{agent_dir_path}/executable_code/tmp/test.py`
- Downloaded file: `{agent_dir_path}/download/report.pdf`
-## System Information
+# Retrieval Policy (Priority & Fallback)
+
+### 1. Retrieval Source Priority
+- Follow this section for source choice, tool choice, query rewrite, `top_k`, fallback, result handling, and citations.
+- Use this default source order: skill-enabled knowledge retrieval tools > `rag_retrieve` / `table_rag_retrieve` > local filesystem retrieval.
+- Treat the local filesystem as last resort. Do NOT browse or search files first when knowledge retrieval tools may answer the question.
+
+### 2. Tool Selection
+- Start with `rag_retrieve` or `table_rag_retrieve` when knowledge retrieval is needed. Do NOT answer from model knowledge first.
+- Use `table_rag_retrieve` first for values, prices, quantities, inventory, specifications, rankings, comparisons, summaries, extraction, lists, tables, name lookup, historical coverage, mixed questions, and unclear cases.
+- Use `rag_retrieve` first only for clearly pure concept, definition, workflow, policy, or explanation questions without structured data needs.
+
+### 3. Query Preparation
+- Do NOT pass the raw user question unless it already works well for retrieval.
+- Rewrite for recall: extract entity, time scope, attributes, and intent.
+- Add useful variants: synonyms, aliases, abbreviations, related titles, historical names, and category terms.
+- Expand list-style, extraction, overview, historical, roster, timeline, and archive queries more aggressively.
+- Preserve meaning. Do NOT introduce unrelated topics.
+
+### 4. Retrieval Breadth (`top_k`)
+- Apply `top_k` only to `rag_retrieve`. Use the smallest sufficient value, then expand only if coverage is insufficient.
+- Use `30` for simple fact lookup.
+- Use `50` for moderate synthesis, comparison, summarization, or disambiguation.
+- Use `100` for broad recall, such as comprehensive analysis, scattered knowledge, multiple entities or periods, or list / catalog / timeline / roster / overview requests.
+- Raise `top_k` when keyword branches are many or results are too few, repetitive, incomplete, sparse, or too narrow.
+- Use this expansion order: `30 -> 50 -> 100`. If unsure, use `100`.
+
+### 5. Result Evaluation
+- Treat results as insufficient if they are empty, start with `Error:`, say `no excel files found`, are off-topic, miss the core entity or scope, or provide no usable evidence.
+- Also treat results as insufficient when they cover only part of the request, or when full-list, historical, comparison, or mixed data + explanation requests return only partial or truncated coverage.
+
+### 6. Fallback and Sequential Retry
+- If the first retrieval result is insufficient, call the other retrieval tool before replying.
+- If `table_rag_retrieve` is empty, continue with `rag_retrieve`.
+- Say no relevant information was found only after both `rag_retrieve` and `table_rag_retrieve` have been tried and still do not provide enough evidence.
+
+### 7. Table Result Handling
+- Follow all `[INSTRUCTION]` and `[EXTRA_INSTRUCTION]` content in `table_rag_retrieve` results.
+- If results are truncated, explicitly tell the user total matches (`N+M`), displayed count (`N`), and omitted count (`M`).
+- Cite data sources using filenames from `file_ref_table`.
+
+### 8. Citation Requirements for Retrieved Knowledge
+- When using knowledge from `rag_retrieve` or `table_rag_retrieve`, you MUST generate `` tags.
+- Follow the citation format returned by each tool.
+- Place citations immediately after the paragraph or bullet list that uses the knowledge.
+- Do NOT collect citations at the end.
+- Use 1-2 citations per paragraph or bullet list when possible.
+- If learned knowledge is used, include at least 1 ``.
+
+# System Information
Working directory: {agent_dir_path}
Current User: {user_identifier}
@@ -90,9 +128,6 @@ Trace Id: {trace_id}
# Execution Guidelines
- **Tool-Driven**: All operations are implemented through tool interfaces.
-- **Retrieval Priority**: If earlier context does not explicitly specify a knowledge retrieval priority, the default order is: skill-enabled knowledge retrieval tools > `rag_retrieve` / `table_rag_retrieve` > local filesystem retrieval (including `datasets/` and any file browsing/search tools).
-- **RAG Priority**: When no higher-priority skill-enabled knowledge retrieval tool is specified or available, you MUST prioritize `rag_retrieve` and `table_rag_retrieve` as the first choice whenever knowledge retrieval is needed.
-- **Filesystem Last**: The local filesystem is the lowest-priority source. Do NOT start knowledge retrieval by browsing or searching files (for example with `ls`, `glob`, directory listing, or other filesystem tools) when the information may come from knowledge retrieval tools. Only use filesystem retrieval after higher-priority retrieval tools have been tried and are unavailable, insufficient, or clearly inapplicable.
- **No Premature File Exploration**: Do not inspect local files merely to "see what exists" before attempting RAG-based retrieval. File inspection is a fallback, not the default path.
- **Immediate Response**: Trigger the corresponding tool call as soon as the intent is identified.
- **Result-Oriented**: Directly return execution results, minimizing transitional language.
diff --git a/skills_developing/rag-retrieve/Retrieval_Policy.md b/skills_developing/rag-retrieve/Retrieval_Policy.md
new file mode 100644
index 0000000..517261e
--- /dev/null
+++ b/skills_developing/rag-retrieve/Retrieval_Policy.md
@@ -0,0 +1,48 @@
+## Retrieval Policy (Priority & Fallback)
+
+### 1. Retrieval Source Priority
+- If earlier context does not explicitly specify a knowledge retrieval priority, the default order is: skill-enabled knowledge retrieval tools > `rag_retrieve` / `table_rag_retrieve` > local filesystem retrieval (including `datasets/` and any file browsing/search tools).
+- Follow this `Retrieval Policy (Priority & Fallback)` section for retrieval source selection, tool selection order, query rewrite, `top_k`, result handling, fallback, and citation requirements.
+- The local filesystem is the lowest-priority source. Do NOT start knowledge retrieval by browsing or searching files (for example with `ls`, `glob`, directory listing, or other filesystem tools) when the information may come from knowledge retrieval tools. Only use filesystem retrieval after higher-priority retrieval tools have been tried and are unavailable, insufficient, or clearly inapplicable.
+
+### 2. Tool Selection
+- When knowledge retrieval is needed and no higher-priority skill-enabled retrieval tool is specified or available, you MUST start with `rag_retrieve` or `table_rag_retrieve` based on the question type. Do NOT answer from model knowledge before trying the appropriate retrieval tool.
+- Use `table_rag_retrieve` first for values, prices, quantities, inventory, specifications, rankings, comparisons, summaries, extraction, lists, tables, person / project / product name lookup, historical coverage, mixed questions, or any unclear case.
+- Use `rag_retrieve` first only for clearly pure concept / definition / workflow / policy / explanation questions that do not need structured data.
+
+### 3. Query Preparation
+- Do NOT pass the user's raw question directly unless it already fits retrieval needs well.
+- Rewrite the query to improve recall: extract the core entity, time scope, attributes, and intent.
+- Add meaningful variants such as synonyms, aliases, abbreviations, related titles, historical names, and category terms.
+- Expand enumeration-style, historical, roster, timeline, overview, archive, extraction, and list-style queries more aggressively.
+- Preserve the original meaning and do not introduce unrelated topics. Use both the original query and rewritten variants whenever possible.
+
+### 4. Retrieval Breadth (`top_k`)
+- `top_k` applies to `rag_retrieve`. Use the smallest sufficient `top_k` and expand only when coverage is insufficient.
+- Use `30` for simple fact lookup about one specific thing.
+- Use `50` for moderate synthesis, comparison, summarization, or disambiguation.
+- Use `100` for broad-recall queries needing high coverage, such as comprehensive analysis, scattered knowledge, multiple entities or periods, list / catalog / timeline / roster / overview requests, or all items / historical succession / many records.
+- Raise `top_k` when query rewrite produces many useful keyword branches or when results are too few, repetitive, incomplete, sparse, or too narrow in coverage. Do not raise `top_k` just because the query is longer.
+- Expansion sequence: `30 -> 50 -> 100`. If uncertain, prefer `100`.
+
+### 5. Result Evaluation
+- Treat the result as insufficient when it is empty, starts with `Error:`, says `no excel files found`, is off-topic, does not match the user's core entity / scope, or clearly contains no usable evidence.
+- Treat the result as insufficient when it only covers part of the user's request, or when the user asked for a complete list, historical coverage, comparison, or mixed data + explanation but the result is only partial or truncated.
+
+### 6. Fallback and Sequential Retry
+- If the first retrieval tool returns empty results, errors, clearly irrelevant content, or only partial coverage of the user's request, you MUST try the other retrieval tool before replying to the user.
+- If the table result is empty, continue with `rag_retrieve` before concluding that no relevant data exists.
+- You may say that no relevant information was found only after both `rag_retrieve` and `table_rag_retrieve` have been tried and still do not provide enough evidence to answer.
+
+### 7. Table Result Handling
+- When processing `table_rag_retrieve` results, follow all instructions in `[INSTRUCTION]` and `[EXTRA_INSTRUCTION]` sections of the response.
+- If Query result hint indicates truncation (for example, `Only the first N rows are included; the remaining M rows were omitted`), you MUST explicitly tell the user the total matches (`N+M`), displayed count (`N`), and omitted count (`M`).
+- Cite data sources using file names from `file_ref_table` in the response.
+
+### 8. Citation Requirements for Retrieved Knowledge
+- When your answer uses learned knowledge from `rag_retrieve` or `table_rag_retrieve`, you MUST generate `` tags.
+- Follow the specific citation format instructions returned by each tool.
+- Citations MUST appear IMMEDIATELY AFTER the paragraph or bullet list that uses the knowledge.
+- NEVER collect all citations and place them at the end of your response.
+- Limit to 1-2 citations per paragraph or bullet list, combining related facts under one citation when possible.
+- If your answer uses learned knowledge, you MUST generate at least 1 `` in the response.
diff --git a/skills_developing/rag-retrieve/SKILL.md b/skills_developing/rag-retrieve/SKILL.md
index 0eb0fbb..4584796 100644
--- a/skills_developing/rag-retrieve/SKILL.md
+++ b/skills_developing/rag-retrieve/SKILL.md
@@ -145,3 +145,53 @@ Executable Python script for RAG retrieval. Handles:
- Markdown response parsing
The script can be executed directly without loading into context.
+
+
+## Retrieval Policy (Priority & Fallback)
+
+### 1. Retrieval Source Priority
+- If earlier context does not explicitly specify a knowledge retrieval priority, the default order is: skill-enabled knowledge retrieval tools > `rag_retrieve` / `table_rag_retrieve` > local filesystem retrieval (including `datasets/` and any file browsing/search tools).
+- Follow this `Retrieval Policy (Priority & Fallback)` section for retrieval source selection, tool selection order, query rewrite, `top_k`, result handling, fallback, and citation requirements.
+- The local filesystem is the lowest-priority source. Do NOT start knowledge retrieval by browsing or searching files (for example with `ls`, `glob`, directory listing, or other filesystem tools) when the information may come from knowledge retrieval tools. Only use filesystem retrieval after higher-priority retrieval tools have been tried and are unavailable, insufficient, or clearly inapplicable.
+
+### 2. Tool Selection
+- When knowledge retrieval is needed and no higher-priority skill-enabled retrieval tool is specified or available, you MUST start with `rag_retrieve` or `table_rag_retrieve` based on the question type. Do NOT answer from model knowledge before trying the appropriate retrieval tool.
+- Use `table_rag_retrieve` first for values, prices, quantities, inventory, specifications, rankings, comparisons, summaries, extraction, lists, tables, person / project / product name lookup, historical coverage, mixed questions, or any unclear case.
+- Use `rag_retrieve` first only for clearly pure concept / definition / workflow / policy / explanation questions that do not need structured data.
+
+### 3. Query Preparation
+- Do NOT pass the user's raw question directly unless it already fits retrieval needs well.
+- Rewrite the query to improve recall: extract the core entity, time scope, attributes, and intent.
+- Add meaningful variants such as synonyms, aliases, abbreviations, related titles, historical names, and category terms.
+- Expand enumeration-style, historical, roster, timeline, overview, archive, extraction, and list-style queries more aggressively.
+- Preserve the original meaning and do not introduce unrelated topics. Use both the original query and rewritten variants whenever possible.
+
+### 4. Retrieval Breadth (`top_k`)
+- `top_k` applies to `rag_retrieve`. Use the smallest sufficient `top_k` and expand only when coverage is insufficient.
+- Use `30` for simple fact lookup about one specific thing.
+- Use `50` for moderate synthesis, comparison, summarization, or disambiguation.
+- Use `100` for broad-recall queries needing high coverage, such as comprehensive analysis, scattered knowledge, multiple entities or periods, list / catalog / timeline / roster / overview requests, or all items / historical succession / many records.
+- Raise `top_k` when query rewrite produces many useful keyword branches or when results are too few, repetitive, incomplete, sparse, or too narrow in coverage. Do not raise `top_k` just because the query is longer.
+- Expansion sequence: `30 -> 50 -> 100`. If uncertain, prefer `100`.
+
+### 5. Result Evaluation
+- Treat the result as insufficient when it is empty, starts with `Error:`, says `no excel files found`, is off-topic, does not match the user's core entity / scope, or clearly contains no usable evidence.
+- Treat the result as insufficient when it only covers part of the user's request, or when the user asked for a complete list, historical coverage, comparison, or mixed data + explanation but the result is only partial or truncated.
+
+### 6. Fallback and Sequential Retry
+- If the first retrieval tool returns empty results, errors, clearly irrelevant content, or only partial coverage of the user's request, you MUST try the other retrieval tool before replying to the user.
+- If the table result is empty, continue with `rag_retrieve` before concluding that no relevant data exists.
+- You may say that no relevant information was found only after both `rag_retrieve` and `table_rag_retrieve` have been tried and still do not provide enough evidence to answer.
+
+### 7. Table Result Handling
+- When processing `table_rag_retrieve` results, follow all instructions in `[INSTRUCTION]` and `[EXTRA_INSTRUCTION]` sections of the response.
+- If Query result hint indicates truncation (for example, `Only the first N rows are included; the remaining M rows were omitted`), you MUST explicitly tell the user the total matches (`N+M`), displayed count (`N`), and omitted count (`M`).
+- Cite data sources using file names from `file_ref_table` in the response.
+
+### 8. Citation Requirements for Retrieved Knowledge
+- When your answer uses learned knowledge from `rag_retrieve` or `table_rag_retrieve`, you MUST generate `` tags.
+- Follow the specific citation format instructions returned by each tool.
+- Citations MUST appear IMMEDIATELY AFTER the paragraph or bullet list that uses the knowledge.
+- NEVER collect all citations and place them at the end of your response.
+- Limit to 1-2 citations per paragraph or bullet list, combining related facts under one citation when possible.
+- If your answer uses learned knowledge, you MUST generate at least 1 `` in the response.