Nexevo.aiNexevo.ai
← All examples
RAG / Retrieval

Full RAG pipeline

Embedding retrieval → rerank fine-ranking → chat answering. 10k docs at ~$0.06 vs ~$5+ raw prompt.

python
from nexevo_ai import Nexevo
client = Nexevo()    # 默认读 NEXEVO_API_KEY 环境变量

# ── 1. 离线一次性索引 ──
all_docs = ["...文档1全文...", "...文档2全文...", "..."]   # 1 万篇文档
emb_resp = client.embeddings.create(model="bge-m3", input=all_docs)
vectors = [d["embedding"] for d in emb_resp["data"]]
# (省略向量库存取代码 — 用 Pinecone / Milvus / pgvector 都行)

# ── 2. 用户提问时(每次请求触发)──
question = "如何重置员工 VPN?"

# 2a) embed 问题 → 向量库 top-50 召回
q_emb = client.embeddings.create(model="bge-m3", input=question)
top50 = vector_db.query(q_emb["data"][0]["embedding"], k=50)

# 2b) rerank top-50 → top-5 最相关
ranked = client.rerank.create(
    model="rerank-v3.5",
    query=question,
    documents=[d.text for d in top50],
    top_n=5,
)["results"]
top5 = [top50[r["index"]] for r in ranked]

# 2c) chat 用 top-5 答题
context = "\n\n".join(d.text for d in top5)
ans = client.chat.completions.create(
    model="nexevo/balanced",
    messages=[
        {"role": "system",
         "content": f"基于以下材料答题(只能用这些材料,不要编):\n\n{context}"},
        {"role": "user", "content": question},
    ],
)
print(ans.choices[0].message.content)
Full RAG pipeline — Nexevo Cookbook | Nexevo.ai