Integration scenario
LangChain integration (langchain-nexevo)
Drop-in replacement for ChatOpenAI — get smart routing, ELO duels, cascade cost optimization in one import. Plus LangGraph checkpoint saver bridged to /v1/conversations.
Python
python
# pip install langchain-nexevo
# Optional: pip install "langchain-nexevo[langgraph]" for the checkpoint saver
from langchain_nexevo import ChatNexevo
from langchain_core.messages import HumanMessage, SystemMessage
# 1) Drop-in chat — replaces ChatOpenAI in any LangChain app
chat = ChatNexevo(
model="nexevo/balanced", # 4-layer smart routing (capability + bandit + ELO + cascade)
api_key="sk-...", # or set NEXEVO_API_KEY env var
temperature=0.2,
)
resp = chat.invoke([
SystemMessage("You are concise."),
HumanMessage("Capital of France?"),
])
print(resp.content) # → "Paris"
print(resp.response_metadata["trace_id"]) # → "abcd...32hex" — for /v1/feedback
# 2) Streaming + tool calling work out of the box (inherited from ChatOpenAI)
for chunk in chat.stream([HumanMessage("Stream please")]):
print(chunk.content, end="", flush=True)
# 3) LangGraph + persistent checkpoints (instead of MemorySaver / Postgres)
from langgraph.graph import StateGraph
from langchain_nexevo import NexevoCheckpointSaver
saver = NexevoCheckpointSaver(api_key="sk-...")
# graph = StateGraph(...).compile(checkpointer=saver)
# State persists in /v1/conversations — multi-pod safe + admin auditable.
# thread_id ↔ conversation_id auto-mapped
# resume across process restarts
# 4) Embeddings (same OpenAI-compat surface)
from langchain_nexevo import NexevoEmbeddings
emb = NexevoEmbeddings(model="nexevo-embed-balanced")
vec = emb.embed_query("hello world") # 1536-dim by default