集成应用 #

概述 #

Pinecone 可以与多种 AI 框架和工具无缝集成,构建强大的 AI 应用。

text
┌─────────────────────────────────────────────────────────────┐
│                    Pinecone 集成生态                         │
├─────────────────────────────────────────────────────────────┤
│                                                             │
│  ┌─────────────────────────────────────────────────────┐   │
│  │                    应用层                            │   │
│  │  ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐   │   │
│  │  │ RAG 应用│ │ 推荐系统│ │ 语义搜索│ │ 聊天机器人│   │   │
│  │  └─────────┘ └─────────┘ └─────────┘ └─────────┘   │   │
│  └─────────────────────────────────────────────────────┘   │
│                          │                                  │
│  ┌─────────────────────────────────────────────────────┐   │
│  │                    框架层                            │   │
│  │  ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐   │   │
│  │  │LangChain│ │LlamaIndex│ │ Haystack│ │  DSPy   │   │   │
│  │  └─────────┘ └─────────┘ └─────────┘ └─────────┘   │   │
│  └─────────────────────────────────────────────────────┘   │
│                          │                                  │
│  ┌─────────────────────────────────────────────────────┐   │
│  │                    模型层                            │   │
│  │  ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐   │   │
│  │  │ OpenAI  │ │  Cohere │ │ HuggingFace│ │ 本地模型 │   │   │
│  │  └─────────┘ └─────────┘ └─────────┘ └─────────┘   │   │
│  └─────────────────────────────────────────────────────┘   │
│                          │                                  │
│  ┌─────────────────────────────────────────────────────┐   │
│  │                    存储层                            │   │
│  │  ┌─────────────────────────────────────────────┐   │   │
│  │  │              Pinecone 向量数据库              │   │   │
│  │  └─────────────────────────────────────────────┘   │   │
│  └─────────────────────────────────────────────────────┘   │
│                                                             │
└─────────────────────────────────────────────────────────────┘

与 OpenAI 集成 #

安装依赖 #

bash
pip install pinecone openai

基础集成 #

python
import os
from openai import OpenAI
from pinecone import Pinecone, ServerlessSpec

openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))

def get_embedding(text, model="text-embedding-3-small"):
    response = openai_client.embeddings.create(
        model=model,
        input=text
    )
    return response.data[0].embedding

def create_index_if_not_exists(name, dimension=1536):
    if name not in pc.list_indexes().names():
        pc.create_index(
            name=name,
            dimension=dimension,
            metric="cosine",
            spec=ServerlessSpec(cloud="aws", region="us-east-1")
        )
    return pc.Index(name)

index = create_index_if_not_exists("openai-demo")

文档索引 #

python
def index_documents(documents):
    vectors = []
    
    for doc in documents:
        embedding = get_embedding(doc["content"])
        vectors.append((
            doc["id"],
            embedding,
            {
                "title": doc["title"],
                "content": doc["content"][:1000],
                "source": doc.get("source", ""),
                "category": doc.get("category", "")
            }
        ))
    
    index.upsert(vectors=vectors)
    print(f"已索引 {len(vectors)} 个文档")

documents = [
    {
        "id": "doc-1",
        "title": "Pinecone 简介",
        "content": "Pinecone 是一个完全托管的向量数据库...",
        "category": "database"
    },
    {
        "id": "doc-2",
        "title": "向量搜索原理",
        "content": "向量搜索是一种基于语义相似性的搜索方式...",
        "category": "technology"
    }
]

index_documents(documents)

语义搜索 #

python
def semantic_search(query, top_k=5, filters=None):
    query_embedding = get_embedding(query)
    
    query_params = {
        "vector": query_embedding,
        "top_k": top_k,
        "include_metadata": True
    }
    
    if filters:
        query_params["filter"] = filters
    
    results = index.query(**query_params)
    
    return [
        {
            "id": match.id,
            "score": match.score,
            "title": match.metadata.get("title"),
            "content": match.metadata.get("content")
        }
        for match in results.matches
    ]

results = semantic_search("什么是向量数据库", top_k=3)

for r in results:
    print(f"Score: {r['score']:.4f}")
    print(f"Title: {r['title']}")
    print(f"Content: {r['content'][:100]}...")
    print("---")

RAG 应用 #

python
def rag_query(query, top_k=5):
    query_embedding = get_embedding(query)
    
    results = index.query(
        vector=query_embedding,
        top_k=top_k,
        include_metadata=True
    )
    
    context = "\n\n".join([
        f"标题: {match.metadata.get('title')}\n内容: {match.metadata.get('content')}"
        for match in results.matches
    ])
    
    prompt = f"""基于以下上下文回答问题。如果上下文中没有相关信息,请说明。

上下文:
{context}

问题:{query}

请提供详细、准确的回答:"""

    response = openai_client.chat.completions.create(
        model="gpt-4",
        messages=[
            {"role": "system", "content": "你是一个专业的AI助手,基于提供的上下文回答问题。"},
            {"role": "user", "content": prompt}
        ],
        temperature=0.7
    )
    
    return {
        "answer": response.choices[0].message.content,
        "sources": [
            {
                "id": match.id,
                "title": match.metadata.get("title"),
                "score": match.score
            }
            for match in results.matches
        ]
    }

result = rag_query("Pinecone 有什么优势?")
print("回答:", result["answer"])
print("\n来源:")
for source in result["sources"]:
    print(f"  - {source['title']} (score: {source['score']:.4f})")

与 LangChain 集成 #

安装依赖 #

bash
pip install langchain langchain-openai langchain-pinecone

基础集成 #

python
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_pinecone import PineconeVectorStore
from pinecone import Pinecone
import os

pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))
index = pc.Index("langchain-demo")

embeddings = OpenAIEmbeddings(
    model="text-embedding-3-small",
    openai_api_key=os.getenv("OPENAI_API_KEY")
)

vectorstore = PineconeVectorStore(
    index=index,
    embedding=embeddings,
    text_key="content"
)

文档处理 #

python
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader, PyPDFLoader

def load_and_split_documents(file_path, chunk_size=1000, chunk_overlap=200):
    if file_path.endswith(".pdf"):
        loader = PyPDFLoader(file_path)
    else:
        loader = TextLoader(file_path)
    
    documents = loader.load()
    
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap
    )
    
    chunks = text_splitter.split_documents(documents)
    
    return chunks

chunks = load_and_split_documents("document.pdf")

vectorstore.add_documents(chunks)

print(f"已添加 {len(chunks)} 个文档块")

相似性搜索 #

python
def similarity_search(query, k=4):
    results = vectorstore.similarity_search(query, k=k)
    
    for i, doc in enumerate(results):
        print(f"结果 {i+1}:")
        print(f"内容: {doc.page_content[:200]}...")
        print(f"元数据: {doc.metadata}")
        print("---")

similarity_search("什么是向量数据库")

RAG 链 #

python
from langchain.chains import RetrievalQA
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    model="gpt-4",
    temperature=0,
    openai_api_key=os.getenv("OPENAI_API_KEY")
)

qa_chain = RetrievalQA.from_chain_type(
    llm=llm,
    chain_type="stuff",
    retriever=vectorstore.as_retriever(
        search_kwargs={"k": 4}
    ),
    return_source_documents=True
)

def ask_question(question):
    result = qa_chain({"query": question})
    
    print("回答:", result["result"])
    print("\n来源文档:")
    for doc in result["source_documents"]:
        print(f"  - {doc.metadata.get('source', 'Unknown')}")

ask_question("Pinecone 的主要特点是什么?")

对话式 RAG #

python
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory

memory = ConversationBufferMemory(
    memory_key="chat_history",
    return_messages=True
)

qa_chain = ConversationalRetrievalChain.from_llm(
    llm=llm,
    retriever=vectorstore.as_retriever(),
    memory=memory,
    return_source_documents=True
)

def chat(question):
    result = qa_chain({"question": question})
    return result["answer"]

print(chat("Pinecone 是什么?"))
print(chat("它有哪些优势?"))
print(chat("如何开始使用?"))

与 LlamaIndex 集成 #

安装依赖 #

bash
pip install llama-index llama-index-vector-stores-pinecone llama-index-embeddings-openai

基础集成 #

python
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding
from pinecone import Pinecone
import os

pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))
pinecone_index = pc.Index("llamaindex-demo")

Settings.embed_model = OpenAIEmbedding(
    model="text-embedding-3-small",
    api_key=os.getenv("OPENAI_API_KEY")
)

vector_store = PineconeVectorStore(
    pinecone_index=pinecone_index,
    add_sparse_vector=True
)

构建索引 #

python
from llama_index.core import StorageContext

documents = SimpleDirectoryReader("./documents").load_data()

storage_context = StorageContext.from_defaults(
    vector_store=vector_store
)

index = VectorStoreIndex.from_documents(
    documents,
    storage_context=storage_context
)

查询引擎 #

python
query_engine = index.as_query_engine(
    similarity_top_k=5,
    response_mode="compact"
)

response = query_engine.query("什么是向量数据库?")

print("回答:", response.response)
print("\n来源:")
for node in response.source_nodes:
    print(f"  - Score: {node.score:.4f}")
    print(f"    Content: {node.text[:100]}...")

聊天引擎 #

python
chat_engine = index.as_chat_engine(
    chat_mode="context",
    verbose=True
)

response = chat_engine.chat("Pinecone 有什么特点?")
print(response)

response = chat_engine.chat("它如何与其他数据库比较?")
print(response)

其他集成 #

与 Cohere 集成 #

python
import cohere
from pinecone import Pinecone

co = cohere.Client(os.getenv("COHERE_API_KEY"))
pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))

def get_cohere_embedding(text):
    response = co.embed(
        texts=[text],
        model="embed-english-v3.0"
    )
    return response.embeddings[0]

def search_with_cohere(query, top_k=5):
    query_embedding = get_cohere_embedding(query)
    
    index = pc.Index("cohere-demo")
    
    results = index.query(
        vector=query_embedding,
        top_k=top_k,
        include_metadata=True
    )
    
    return results

与 HuggingFace 集成 #

python
from sentence_transformers import SentenceTransformer
from pinecone import Pinecone

model = SentenceTransformer('all-MiniLM-L6-v2')
pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))

def get_hf_embedding(text):
    return model.encode(text).tolist()

def search_with_hf(query, top_k=5):
    query_embedding = get_hf_embedding(query)
    
    index = pc.Index("hf-demo")
    
    results = index.query(
        vector=query_embedding,
        top_k=top_k,
        include_metadata=True
    )
    
    return results

完整 RAG 应用示例 #

python
import os
from openai import OpenAI
from pinecone import Pinecone, ServerlessSpec

class RAGApplication:
    def __init__(self):
        self.openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
        self.pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))
        self.index_name = "rag-app"
        self._ensure_index()
    
    def _ensure_index(self):
        if self.index_name not in self.pc.list_indexes().names():
            self.pc.create_index(
                name=self.index_name,
                dimension=1536,
                metric="cosine",
                spec=ServerlessSpec(cloud="aws", region="us-east-1")
            )
        self.index = self.pc.Index(self.index_name)
    
    def _get_embedding(self, text):
        response = self.openai_client.embeddings.create(
            model="text-embedding-3-small",
            input=text
        )
        return response.data[0].embedding
    
    def add_document(self, doc_id, content, metadata=None):
        embedding = self._get_embedding(content)
        
        vector_metadata = {"content": content[:1000]}
        if metadata:
            vector_metadata.update(metadata)
        
        self.index.upsert(
            vectors=[(doc_id, embedding, vector_metadata)]
        )
    
    def search(self, query, top_k=5, filters=None):
        query_embedding = self._get_embedding(query)
        
        query_params = {
            "vector": query_embedding,
            "top_k": top_k,
            "include_metadata": True
        }
        
        if filters:
            query_params["filter"] = filters
        
        return self.index.query(**query_params)
    
    def ask(self, question, top_k=5):
        results = self.search(question, top_k=top_k)
        
        context = "\n\n".join([
            match.metadata.get("content", "")
            for match in results.matches
        ])
        
        prompt = f"""基于以下上下文回答问题:

上下文:
{context}

问题:{question}

回答:"""

        response = self.openai_client.chat.completions.create(
            model="gpt-4",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.7
        )
        
        return {
            "answer": response.choices[0].message.content,
            "sources": [
                {
                    "id": match.id,
                    "score": match.score,
                    "content": match.metadata.get("content", "")[:200]
                }
                for match in results.matches
            ]
        }

rag = RAGApplication()

rag.add_document(
    "doc-1",
    "Pinecone 是一个完全托管的向量数据库,专为大规模向量搜索而设计。",
    {"category": "database"}
)

rag.add_document(
    "doc-2",
    "向量搜索是一种基于语义相似性的搜索方式,能够理解查询的语义含义。",
    {"category": "technology"}
)

result = rag.ask("什么是 Pinecone?")
print("回答:", result["answer"])

下一步 #

现在你已经掌握了集成应用,接下来学习 最佳实践,了解生产环境中的最佳实践!

最后更新:2026-04-04