智能代理 #

概述 #

智能代理(Agent)是能够自主决策、使用工具、执行多步骤任务的 AI 系统。与简单的查询引擎不同,代理可以根据用户问题动态选择和执行操作。

text
┌─────────────────────────────────────────────────────────────┐
│                    智能代理架构                              │
├─────────────────────────────────────────────────────────────┤
│                                                             │
│   用户问题                                                   │
│      │                                                       │
│      ▼                                                       │
│   ┌─────────────────────────────────────────────────────┐  │
│   │                    Agent                            │  │
│   │                                                     │  │
│   │   ┌─────────────────────────────────────────────┐  │  │
│   │   │              推理循环                        │  │  │
│   │   │                                             │  │  │
│   │   │  1. 分析问题                                │  │  │
│   │   │  2. 选择工具                                │  │  │
│   │   │  3. 执行操作                                │  │  │
│   │   │  4. 观察结果                                │  │  │
│   │   │  5. 继续或结束                              │  │  │
│   │   │                                             │  │  │
│   │   └─────────────────────────────────────────────┘  │  │
│   │                                                     │  │
│   │   ┌─────────┐ ┌─────────┐ ┌─────────┐            │  │
│   │   │ Tool 1  │ │ Tool 2  │ │ Tool 3  │            │  │
│   │   └─────────┘ └─────────┘ └─────────┘            │  │
│   │                                                     │  │
│   └─────────────────────────────────────────────────────┘  │
│                           │                                  │
│                           ▼                                  │
│   最终回答                                                   │
│                                                             │
└─────────────────────────────────────────────────────────────┘

基本用法 #

ReAct 代理 #

python
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.agent import ReActAgent
from llama_index.core.tools import QueryEngineTool

documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)

query_engine_tool = QueryEngineTool.from_defaults(
    query_engine=index.as_query_engine(),
    name="knowledge_base",
    description="用于查询知识库中的信息",
)

agent = ReActAgent.from_tools(
    [query_engine_tool],
    verbose=True,
)

response = agent.chat("文档的主要内容是什么?")
print(response)

OpenAI Functions 代理 #

python
from llama_index.core import VectorStoreIndex
from llama_index.core.agent import FunctionCallingAgent
from llama_index.core.tools import QueryEngineTool
from llama_index.llms.openai import OpenAI

llm = OpenAI(model="gpt-4o")
index = VectorStoreIndex.from_documents(documents)

query_engine_tool = QueryEngineTool.from_defaults(
    query_engine=index.as_query_engine(),
    name="knowledge_base",
    description="查询知识库",
)

agent = FunctionCallingAgent.from_tools(
    [query_engine_tool],
    llm=llm,
    verbose=True,
)

response = agent.chat("你的问题")

工具定义 #

查询引擎工具 #

python
from llama_index.core.tools import QueryEngineTool

tool = QueryEngineTool.from_defaults(
    query_engine=query_engine,
    name="search_tool",
    description="搜索知识库中的相关信息",
    return_direct=False,
)

自定义函数工具 #

python
from llama_index.core.tools import FunctionTool

def multiply(a: int, b: int) -> int:
    """将两个数字相乘"""
    return a * b

def add(a: int, b: int) -> int:
    """将两个数字相加"""
    return a + b

multiply_tool = FunctionTool.from_defaults(fn=multiply)
add_tool = FunctionTool.from_defaults(fn=add)

agent = ReActAgent.from_tools(
    [multiply_tool, add_tool],
    verbose=True,
)

response = agent.chat("计算 5 乘以 3,然后加上 10")
print(response)

带参数验证的工具 #

python
from llama_index.core.tools import FunctionTool
from pydantic import BaseModel, Field

class CalculatorInput(BaseModel):
    a: int = Field(description="第一个数字")
    b: int = Field(description="第二个数字")

def divide(a: int, b: int) -> float:
    """将两个数字相除"""
    if b == 0:
        raise ValueError("除数不能为零")
    return a / b

divide_tool = FunctionTool.from_defaults(
    fn=divide,
    name="divide",
    description="将两个数字相除",
)

异步工具 #

python
from llama_index.core.tools import FunctionTool
import asyncio

async def async_search(query: str) -> str:
    """异步搜索"""
    await asyncio.sleep(1)
    return f"搜索结果: {query}"

async_search_tool = FunctionTool.from_defaults(fn=async_search)

工具规范 #

ToolMetadata #

python
from llama_index.core.tools import ToolMetadata, FunctionTool

def my_function(query: str, count: int = 5) -> str:
    """
    搜索函数
    
    Args:
        query: 搜索关键词
        count: 返回结果数量
    
    Returns:
        搜索结果字符串
    """
    return f"找到 {count} 个关于 '{query}' 的结果"

metadata = ToolMetadata(
    name="search",
    description="搜索相关内容",
    fn_schema=my_function.__annotations__,
)

tool = FunctionTool(fn=my_function, metadata=metadata)

动态工具 #

python
from llama_index.core.tools import ToolMetadata, FunctionTool
from typing import Callable, Any

def create_dynamic_tool(
    name: str,
    description: str,
    fn: Callable,
) -> FunctionTool:
    return FunctionTool.from_defaults(
        fn=fn,
        name=name,
        description=description,
    )

def get_weather(city: str) -> str:
    return f"{city} 的天气:晴朗,25°C"

weather_tool = create_dynamic_tool(
    name="get_weather",
    description="获取指定城市的天气信息",
    fn=get_weather,
)

代理类型 #

ReActAgent #

基于推理-行动循环的代理:

python
from llama_index.core.agent import ReActAgent

agent = ReActAgent.from_tools(
    tools=[tool1, tool2],
    llm=llm,
    verbose=True,
    max_iterations=10,
)

response = agent.chat("你的问题")

FunctionCallingAgent #

基于函数调用的代理(需要支持 Function Calling 的 LLM):

python
from llama_index.core.agent import FunctionCallingAgent
from llama_index.llms.openai import OpenAI

llm = OpenAI(model="gpt-4o")

agent = FunctionCallingAgent.from_tools(
    tools=[tool1, tool2],
    llm=llm,
    verbose=True,
)

response = agent.chat("你的问题")

OpenAIAgent #

OpenAI 专用代理:

python
from llama_index.core.agent import AgentRunner
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI

llm = OpenAI(model="gpt-4o")

agent = OpenAIAgent.from_tools(
    tools=[tool1, tool2],
    llm=llm,
    verbose=True,
    system_prompt="你是一个专业的助手",
)

response = agent.chat("你的问题")

多代理协作 #

代理作为工具 #

python
from llama_index.core.agent import ReActAgent
from llama_index.core.tools import AgentTool

research_agent = ReActAgent.from_tools(
    [search_tool],
    name="research_agent",
    description="负责搜索和研究信息",
)

analysis_agent = ReActAgent.from_tools(
    [analysis_tool],
    name="analysis_agent",
    description="负责分析数据",
)

research_tool = AgentTool.from_agent(research_agent)
analysis_tool = AgentTool.from_agent(analysis_agent)

coordinator_agent = ReActAgent.from_tools(
    [research_tool, analysis_tool],
    verbose=True,
)

response = coordinator_agent.chat("研究并分析 Python 的最新发展")

记忆管理 #

基本记忆 #

python
from llama_index.core.memory import ChatMemoryBuffer

memory = ChatMemoryBuffer.from_defaults(token_limit=4096)

agent = ReActAgent.from_tools(
    tools=[tool1, tool2],
    memory=memory,
    verbose=True,
)

response1 = agent.chat("第一个问题")
response2 = agent.chat("基于之前的回答,继续...")

自定义记忆 #

python
from llama_index.core.memory import BaseMemory
from llama_index.core.chat_engine.types import ChatMessage
from typing import List

class CustomMemory(BaseMemory):
    def __init__(self, token_limit: int = 4096):
        self.token_limit = token_limit
        self.messages: List[ChatMessage] = []
    
    def get(self, input_str: str) -> List[ChatMessage]:
        return self.messages
    
    def put(self, message: ChatMessage) -> None:
        self.messages.append(message)
    
    def set(self, messages: List[ChatMessage]) -> None:
        self.messages = messages
    
    def reset(self) -> None:
        self.messages = []
    
    def get_all(self) -> List[ChatMessage]:
        return self.messages

memory = CustomMemory()

流式输出 #

python
agent = ReActAgent.from_tools(
    tools=[tool1, tool2],
    verbose=True,
)

response = agent.stream_chat("你的问题")

for token in response.response_gen:
    print(token, end="", flush=True)
print()

自定义提示词 #

python
from llama_index.core.agent import ReActAgent
from llama_index.core import PromptTemplate

system_prompt = """你是一个专业的 AI 助手。

你可以使用以下工具:
{tools}

请根据用户问题选择合适的工具来完成任务。
"""

agent = ReActAgent.from_tools(
    tools=[tool1, tool2],
    system_prompt=system_prompt,
    verbose=True,
)

完整示例 #

python
import os
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.agent import ReActAgent
from llama_index.core.tools import QueryEngineTool, FunctionTool

os.environ["OPENAI_API_KEY"] = "sk-your-key"

Settings.llm = OpenAI(model="gpt-4o-mini")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")

documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)

query_engine_tool = QueryEngineTool.from_defaults(
    query_engine=index.as_query_engine(),
    name="knowledge_base",
    description="查询知识库中的信息",
)

def get_current_time() -> str:
    """获取当前时间"""
    from datetime import datetime
    return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

def calculate(expression: str) -> str:
    """计算数学表达式"""
    try:
        result = eval(expression)
        return str(result)
    except Exception as e:
        return f"计算错误: {str(e)}"

time_tool = FunctionTool.from_defaults(fn=get_current_time)
calc_tool = FunctionTool.from_defaults(fn=calculate)

agent = ReActAgent.from_tools(
    [query_engine_tool, time_tool, calc_tool],
    verbose=True,
)

print("\n=== 智能代理系统 ===")
print("输入 'quit' 退出\n")

while True:
    question = input("问题: ").strip()
    if question.lower() == "quit":
        break
    
    if not question:
        continue
    
    print()
    response = agent.chat(question)
    print(f"\n回答: {response}\n")

下一步 #

掌握智能代理后,接下来学习 评估与优化 了解如何评估 RAG 应用质量!

最后更新:2026-03-30