检索器 #

概述 #

检索器(Retriever)负责从索引中检索相关节点,是查询引擎的核心组件。理解检索器可以帮助你定制检索逻辑,提高检索质量。

text
┌─────────────────────────────────────────────────────────────┐
│                    检索器架构                                │
├─────────────────────────────────────────────────────────────┤
│                                                             │
│   Query                                                     │
│      │                                                       │
│      ▼                                                       │
│   ┌─────────────────────────────────────────────────────┐  │
│   │                  Retriever                          │  │
│   │                                                     │  │
│   │   ┌───────────────────────────────────────────┐    │  │
│   │   │              Index                         │    │  │
│   │   │  Node1  Node2  Node3  Node4  Node5  ...   │    │  │
│   │   └───────────────────────────────────────────┘    │  │
│   │                    │                                │  │
│   │                    ▼                                │  │
│   │   ┌───────────────────────────────────────────┐    │  │
│   │   │         Retrieved Nodes                    │    │  │
│   │   │  Node2 (0.95)  Node5 (0.87)  Node1 (0.72) │    │  │
│   │   └───────────────────────────────────────────┘    │  │
│   │                                                     │  │
│   └─────────────────────────────────────────────────────┘  │
│                                                             │
└─────────────────────────────────────────────────────────────┘

基本用法 #

从索引创建检索器 #

python
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader

documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)

retriever = index.as_retriever()

nodes = retriever.retrieve("你的查询")

for node in nodes:
    print(f"相似度: {node.score:.4f}")
    print(f"内容: {node.node.text[:100]}...")
    print("---")

配置检索数量 #

python
retriever = index.as_retriever(
    similarity_top_k=5
)

nodes = retriever.retrieve("查询内容")
print(f"检索到 {len(nodes)} 个节点")

异步检索 #

python
import asyncio

async def async_retrieve():
    retriever = index.as_retriever()
    nodes = await retriever.aretrieve("查询内容")
    
    for node in nodes:
        print(f"相似度: {node.score:.4f}")

asyncio.run(async_retrieve())

检索器类型 #

VectorIndexRetriever #

基于向量相似度的检索器:

python
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever

index = VectorStoreIndex.from_documents(documents)

retriever = VectorIndexRetriever(
    index=index,
    similarity_top_k=5,
    vector_store_query_mode="default",
)

nodes = retriever.retrieve("查询内容")

SummaryIndexRetriever #

遍历所有节点的检索器:

python
from llama_index.core import SummaryIndex
from llama_index.core.retrievers import SummaryIndexRetriever

index = SummaryIndex.from_documents(documents)

retriever = SummaryIndexRetriever(index=index)

nodes = retriever.retrieve("查询内容")

TreeIndexRetriever #

树形结构检索器:

python
from llama_index.core import TreeIndex
from llama_index.core.retrievers import TreeRetriever

index = TreeIndex.from_documents(documents)

retriever = TreeRetriever(
    index=index,
    child_branch_factor=2,
)

nodes = retriever.retrieve("查询内容")

KeywordTableRetriever #

关键词匹配检索器:

python
from llama_index.core import KeywordTableIndex
from llama_index.core.retrievers import KeywordTableRetriever

index = KeywordTableIndex.from_documents(documents)

retriever = KeywordTableRetriever(index=index)

nodes = retriever.retrieve("Python 编程")

高级检索器 #

RouterRetriever #

根据查询自动选择检索器:

python
from llama_index.core import VectorStoreIndex, KeywordTableIndex
from llama_index.core.retrievers import (
    VectorIndexRetriever,
    KeywordTableRetriever,
    RouterRetriever,
)
from llama_index.core.tools import RetrieverTool

vector_index = VectorStoreIndex.from_documents(documents)
keyword_index = KeywordTableIndex.from_documents(documents)

vector_retriever = VectorIndexRetriever(
    index=vector_index,
    similarity_top_k=3,
)

keyword_retriever = KeywordTableRetriever(
    index=keyword_index,
)

retriever = RouterRetriever(
    retrievers=[
        RetrieverTool.from_defaults(
            retriever=vector_retriever,
            description="适合语义搜索和自然语言查询",
        ),
        RetrieverTool.from_defaults(
            retriever=keyword_retriever,
            description="适合关键词搜索和精确匹配",
        ),
    ],
)

nodes = retriever.retrieve("查询内容")

AutoMergingRetriever #

自动合并相关节点:

python
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import AutoMergingRetriever
from llama_index.core.node_parser import HierarchicalNodeParser

parser = HierarchicalNodeParser.from_defaults(
    chunk_sizes=[2048, 512, 128]
)
nodes = parser.get_nodes_from_documents(documents)

index = VectorStoreIndex(nodes)

retriever = AutoMergingRetriever(
    index.as_retriever(similarity_top_k=10),
    storage_context=index.storage_context,
)

nodes = retriever.retrieve("查询内容")

RecursiveRetriever #

递归检索,支持嵌套索引:

python
from llama_index.core import VectorStoreIndex, Document
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.schema import IndexNode

doc = Document(text="主文档内容...")
index = VectorStoreIndex.from_documents([doc])

all_nodes_dict = {node.node_id: node for node in index.docstore.docs.values()}

retriever = RecursiveRetriever(
    "vector",
    retriever_dict={"vector": index.as_retriever()},
    node_dict=all_nodes_dict,
)

nodes = retriever.retrieve("查询内容")

BM25Retriever #

基于 BM25 算法的检索器:

bash
pip install llama-index-retrievers-bm25 rank-bm25
python
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.retrievers.bm25 import BM25Retriever

documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)

bm25_retriever = BM25Retriever.from_defaults(
    nodes=list(index.docstore.docs.values()),
    similarity_top_k=5,
)

nodes = bm25_retriever.retrieve("查询内容")

混合检索 #

向量 + BM25 混合检索 #

python
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import QueryFusionRetriever

index = VectorStoreIndex.from_documents(documents)

vector_retriever = VectorIndexRetriever(
    index=index,
    similarity_top_k=5,
)

bm25_retriever = BM25Retriever.from_defaults(
    nodes=list(index.docstore.docs.values()),
    similarity_top_k=5,
)

fusion_retriever = QueryFusionRetriever(
    retrievers=[vector_retriever, bm25_retriever],
    similarity_top_k=5,
    num_queries=1,
    mode="reciprocal_rerank",
)

nodes = fusion_retriever.retrieve("查询内容")

QueryFusionRetriever #

多检索器融合:

python
from llama_index.core.retrievers import QueryFusionRetriever

fusion_retriever = QueryFusionRetriever(
    retrievers=[retriever1, retriever2, retriever3],
    similarity_top_k=10,
    num_queries=1,
    mode="reciprocal_rerank",
    use_async=True,
)

nodes = fusion_retriever.retrieve("查询内容")

融合模式 #

python
fusion_retriever = QueryFusionRetriever(
    retrievers=[retriever1, retriever2],
    mode="reciprocal_rerank",
)

fusion_retriever = QueryFusionRetriever(
    retrievers=[retriever1, retriever2],
    mode="simple",
)

fusion_retriever = QueryFusionRetriever(
    retrievers=[retriever1, retriever2],
    mode="dist_based",
)

元数据过滤 #

基本过滤 #

python
from llama_index.core.vector_stores import MetadataFilters, MetadataFilter

filters = MetadataFilters(
    filters=[
        MetadataFilter(key="category", value="tech"),
    ]
)

retriever = index.as_retriever(
    similarity_top_k=5,
    filters=filters,
)

nodes = retriever.retrieve("查询内容")

多条件过滤 #

python
from llama_index.core.vector_stores import MetadataFilters, MetadataFilter, FilterOperator

filters = MetadataFilters(
    filters=[
        MetadataFilter(key="category", value="tech"),
        MetadataFilter(key="year", value=2024, operator=FilterOperator.GTE),
        MetadataFilter(key="author", value="admin", operator=FilterOperator.NE),
    ]
)

retriever = index.as_retriever(filters=filters)

动态过滤 #

python
from llama_index.core import VectorStoreIndex
from llama_index.core.vector_stores import MetadataFilters, MetadataFilter

def get_retriever_with_filter(category: str):
    filters = MetadataFilters(
        filters=[
            MetadataFilter(key="category", value=category),
        ]
    )
    return index.as_retriever(filters=filters)

tech_retriever = get_retriever_with_filter("tech")
news_retriever = get_retriever_with_filter("news")

自定义检索器 #

继承 BaseRetriever #

python
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.schema import NodeWithScore, QueryBundle
from typing import List

class CustomRetriever(BaseRetriever):
    def __init__(self, index, top_k: int = 3):
        self.index = index
        self.top_k = top_k
    
    def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
        base_retriever = self.index.as_retriever(similarity_top_k=self.top_k * 2)
        nodes = base_retriever.retrieve(query_bundle)
        
        filtered_nodes = [
            node for node in nodes
            if len(node.node.text) > 100
        ][:self.top_k]
        
        return filtered_nodes

custom_retriever = CustomRetriever(index, top_k=5)
nodes = custom_retriever.retrieve("查询内容")

组合检索器 #

python
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.schema import NodeWithScore, QueryBundle
from typing import List

class CombinedRetriever(BaseRetriever):
    def __init__(self, retrievers: List[BaseRetriever], top_k: int = 5):
        self.retrievers = retrievers
        self.top_k = top_k
    
    def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
        all_nodes = []
        seen_ids = set()
        
        for retriever in self.retrievers:
            nodes = retriever.retrieve(query_bundle)
            for node in nodes:
                if node.node.node_id not in seen_ids:
                    all_nodes.append(node)
                    seen_ids.add(node.node.node_id)
        
        all_nodes.sort(key=lambda x: x.score or 0, reverse=True)
        return all_nodes[:self.top_k]

combined_retriever = CombinedRetriever([retriever1, retriever2])
nodes = combined_retriever.retrieve("查询内容")

检索结果处理 #

重排序 #

python
from llama_index.core import VectorStoreIndex
from llama_index.postprocessor.sentence_transformer_rerank import (
    SentenceTransformerRerank,
)

index = VectorStoreIndex.from_documents(documents)
retriever = index.as_retriever(similarity_top_k=10)

nodes = retriever.retrieve("查询内容")

reranker = SentenceTransformerRerank(
    model="cross-encoder/ms-marco-MiniLM-L-6-v2",
    top_n=3,
)
reranked_nodes = reranker.postprocess_nodes(nodes, query_str="查询内容")

for node in reranked_nodes:
    print(f"分数: {node.score:.4f}")

去重 #

python
def deduplicate_nodes(nodes):
    seen = set()
    unique_nodes = []
    
    for node in nodes:
        if node.node.node_id not in seen:
            seen.add(node.node.node_id)
            unique_nodes.append(node)
    
    return unique_nodes

nodes = retriever.retrieve("查询内容")
unique_nodes = deduplicate_nodes(nodes)

相似度过滤 #

python
def filter_by_similarity(nodes, threshold=0.7):
    return [node for node in nodes if node.score and node.score >= threshold]

nodes = retriever.retrieve("查询内容")
filtered_nodes = filter_by_similarity(nodes, threshold=0.8)

完整示例 #

python
import os
from llama_index.core import (
    VectorStoreIndex,
    SimpleDirectoryReader,
    Settings,
)
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.vector_stores import MetadataFilters, MetadataFilter

os.environ["OPENAI_API_KEY"] = "sk-your-key"

Settings.llm = OpenAI(model="gpt-4o-mini")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")

documents = SimpleDirectoryReader("./data").load_data()
print(f"加载了 {len(documents)} 个文档")

index = VectorStoreIndex.from_documents(documents)

retriever = VectorIndexRetriever(
    index=index,
    similarity_top_k=5,
)

print("\n=== 检索测试 ===\n")

queries = [
    "LlamaIndex 是什么?",
    "如何使用索引?",
    "查询引擎的功能",
]

for query in queries:
    print(f"查询: {query}")
    nodes = retriever.retrieve(query)
    
    print(f"检索到 {len(nodes)} 个节点:")
    for i, node in enumerate(nodes):
        print(f"  [{i+1}] 相似度: {node.score:.4f}")
        print(f"      内容: {node.node.text[:80]}...")
    print()

下一步 #

掌握检索器后,接下来学习 响应合成器 了解如何生成高质量回答!

最后更新:2026-03-30