SDKs
Python
Use the OpenAI Python SDK with AssistantRouter.
Python
Use the official OpenAI Python SDK with AssistantRouter.
Installation
pip install openaiConfiguration
import os
from openai import OpenAI
client = OpenAI(
base_url="https://api.assistantrouter.com/v1",
api_key=os.environ["ASSISTANTROUTER_API_KEY"], # sk-xxx...
)Basic Usage
Chat Completions
Use your assistant ID to make chat completion requests:
response = client.chat.completions.create(
extra_body={"assistant_id": "your-assistant-uuid"},
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
)
print(response.choices[0].message.content)When using an assistant, your dashboard-configured system prompt, model, and tools are automatically applied. Alternatively, provide model instead of assistant_id for direct mode without assistant features.
Streaming
stream = client.chat.completions.create(
extra_body={"assistant_id": "your-assistant-uuid"},
messages=[{"role": "user", "content": "Tell me a story."}],
stream=True,
)
for chunk in stream:
content = chunk.choices[0].delta.content or ""
print(content, end="", flush=True)With Optional Overrides
You can override assistant settings per-request:
response = client.chat.completions.create(
extra_body={
"assistant_id": "your-assistant-uuid",
"user_id": "user_123", # Track by user
"conversation_id": "conv_abc", # Continue a conversation
},
messages=[{"role": "user", "content": "Hello!"}],
model="anthropic/claude-haiku-4.5", # Override the model
temperature=0.7, # Override temperature
max_tokens=1000, # Limit output length
)Async Support
import asyncio
import os
from openai import AsyncOpenAI
client = AsyncOpenAI(
base_url="https://api.assistantrouter.com/v1",
api_key=os.environ["ASSISTANTROUTER_API_KEY"],
)
async def main():
response = await client.chat.completions.create(
extra_body={"assistant_id": "your-assistant-uuid"},
messages=[{"role": "user", "content": "Hello!"}],
)
print(response.choices[0].message.content)
asyncio.run(main())Error Handling
import os
from openai import OpenAI, APIError, RateLimitError
client = OpenAI(
base_url="https://api.assistantrouter.com/v1",
api_key=os.environ["ASSISTANTROUTER_API_KEY"],
)
try:
response = client.chat.completions.create(
extra_body={"assistant_id": "your-assistant-uuid"},
messages=[{"role": "user", "content": "Hello!"}],
)
except RateLimitError:
print("Rate limited. Please slow down.")
except APIError as e:
error_type = getattr(e, "type", None)
if error_type == "insufficient_balance":
print("Please add credits to your wallet")
else:
print(f"API Error: {e.message}")Function Calling
import json
response = client.chat.completions.create(
extra_body={"assistant_id": "your-assistant-uuid"},
messages=[{"role": "user", "content": "What's the weather in Paris?"}],
tools=[
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"},
},
"required": ["location"],
},
},
}
],
)
if response.choices[0].finish_reason == "tool_calls":
tool_call = response.choices[0].message.tool_calls[0]
args = json.loads(tool_call.function.arguments)
print(f"Function: {tool_call.function.name}")
print(f"Arguments: {args}")
# Execute your function and continue
weather = get_weather(args["location"])
final_response = client.chat.completions.create(
extra_body={"assistant_id": "your-assistant-uuid"},
messages=[
{"role": "user", "content": "What's the weather in Paris?"},
response.choices[0].message,
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(weather),
},
],
)Django Integration
# views.py
import json
import os
from django.http import JsonResponse
from openai import OpenAI
client = OpenAI(
base_url="https://api.assistantrouter.com/v1",
api_key=os.environ["ASSISTANTROUTER_API_KEY"],
)
def chat_view(request):
if request.method == "POST":
data = json.loads(request.body)
response = client.chat.completions.create(
extra_body={"assistant_id": os.environ["ASSISTANT_ID"]},
messages=data["messages"],
)
return JsonResponse({
"message": response.choices[0].message.content
})FastAPI Integration
import os
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from openai import OpenAI
app = FastAPI()
client = OpenAI(
base_url="https://api.assistantrouter.com/v1",
api_key=os.environ["ASSISTANTROUTER_API_KEY"],
)
class ChatRequest(BaseModel):
messages: list
@app.post("/chat")
async def chat(request: ChatRequest):
response = client.chat.completions.create(
extra_body={"assistant_id": os.environ["ASSISTANT_ID"]},
messages=request.messages,
)
return {"message": response.choices[0].message.content}
@app.post("/chat/stream")
async def chat_stream(request: ChatRequest):
def generate():
stream = client.chat.completions.create(
extra_body={"assistant_id": os.environ["ASSISTANT_ID"]},
messages=request.messages,
stream=True,
)
for chunk in stream:
content = chunk.choices[0].delta.content or ""
yield content
return StreamingResponse(generate(), media_type="text/plain")Other API Endpoints
Check Wallet Balance
import requests
import os
response = requests.get(
"https://api.assistantrouter.com/v1/wallet",
headers={"Authorization": f"Bearer {os.environ['ASSISTANTROUTER_API_KEY']}"}
)
data = response.json()["data"]
print(f"Balance: ${data['available_cents'] / 100:.2f}")Get Usage Statistics
response = requests.get(
"https://api.assistantrouter.com/v1/usage",
headers={"Authorization": f"Bearer {os.environ['ASSISTANTROUTER_API_KEY']}"}
)
data = response.json()["data"]
print(f"Total cost: ${data['summary']['total_cost_cents'] / 100:.2f}")List Available Models
response = requests.get(
"https://api.assistantrouter.com/v1/models",
headers={"Authorization": f"Bearer {os.environ['ASSISTANTROUTER_API_KEY']}"}
)
result = response.json()
print("Recommended models:", [m["name"] for m in result["recommended"]])