Integrate AITracer with just 3 lines of code. Time required: about 5 minutes
First, create an AITracer account and obtain your API key.
at-xxxx...)Install the SDK for your programming language.
Install with pip:
pip install aitracer
Requires Python 3.8 or higher.
Install with Composer:
composer require haro/aitracer
Requires PHP 8.0 or higher.
Install with npm or yarn:
# npm
npm install @haro/aitracer
# yarn
yarn add @haro/aitracer
Requires Node.js 18 or higher.
Simply wrap your existing LLM client with AITracer to automatically log all requests.
from aitracer import AITracer
from openai import OpenAI
tracer = AITracer(api_key="at-xxxx", project="my-project")
client = tracer.wrap_openai(OpenAI())
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}]
)
from aitracer import AITracer
from anthropic import Anthropic
tracer = AITracer(api_key="at-xxxx", project="my-project")
client = tracer.wrap_anthropic(Anthropic())
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}]
)
from aitracer import AITracer
import google.generativeai as genai
tracer = AITracer(api_key="at-xxxx", project="my-project")
model = tracer.wrap_gemini(genai.GenerativeModel("gemini-pro"))
response = model.generate_content("Hello!")
from aitracer.integrations.langchain import AITracerCallbackHandler
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
# Set up the LangChain callback handler
handler = AITracerCallbackHandler(api_key="at-xxxx", project="my-project")
llm = ChatOpenAI(model="gpt-4", callbacks=[handler])
response = llm.invoke([HumanMessage(content="Hello!")])
<?php
use AITracer\AITracer;
use OpenAI;
$tracer = new AITracer(['api_key' => 'at-xxxx', 'project' => 'my-project']);
$client = $tracer->wrapOpenAI(OpenAI::client('your-openai-key'));
$response = $client->chat()->create([
'model' => 'gpt-4',
'messages' => [
['role' => 'user', 'content' => 'Hello!']
]
]);
<?php
use AITracer\AITracer;
use Anthropic\Anthropic;
$tracer = new AITracer(['api_key' => 'at-xxxx', 'project' => 'my-project']);
$client = $tracer->wrapAnthropic(Anthropic::client('your-anthropic-key'));
$response = $client->messages()->create([
'model' => 'claude-sonnet-4-20250514',
'max_tokens' => 1024,
'messages' => [
['role' => 'user', 'content' => 'Hello!']
]
]);
<?php
use AITracer\AITracer;
use Gemini\Client as GeminiClient;
$tracer = new AITracer(['api_key' => 'at-xxxx', 'project' => 'my-project']);
$client = $tracer->wrapGemini(GeminiClient::factory()->withApiKey('your-gemini-key')->make());
$response = $client->geminiPro()->generateContent('Hello!');
import { AITracer } from '@haro/aitracer';
import OpenAI from 'openai';
const tracer = new AITracer({ apiKey: 'at-xxxx', projectId: 'my-project' });
const openai = new OpenAI();
const start = Date.now();
const response = await openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Hello!' }]
});
// Log the request
await tracer.log({
model: 'gpt-4',
provider: 'openai',
inputData: { messages: [{ role: 'user', content: 'Hello!' }] },
outputData: response.choices[0].message,
inputTokens: response.usage?.prompt_tokens,
outputTokens: response.usage?.completion_tokens,
latencyMs: Date.now() - start
});
import { AITracer } from '@haro/aitracer';
import Anthropic from '@anthropic-ai/sdk';
const tracer = new AITracer({ apiKey: 'at-xxxx', projectId: 'my-project' });
const anthropic = new Anthropic();
const start = Date.now();
const response = await anthropic.messages.create({
model: 'claude-sonnet-4-20250514',
max_tokens: 1024,
messages: [{ role: 'user', content: 'Hello!' }]
});
// Log the request
await tracer.log({
model: 'claude-sonnet-4-20250514',
provider: 'anthropic',
inputData: { messages: [{ role: 'user', content: 'Hello!' }] },
outputData: response.content,
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
latencyMs: Date.now() - start
});
AITRACER_API_KEY and AITRACER_PROJECT, you can omit these parameters.
After running your code, check the AITracer dashboard to verify your logs.
| Provider | Method | Supported SDKs |
|---|---|---|
| OpenAI | tracer.wrap_openai(client) |
Python, PHP |
| Anthropic | tracer.wrap_anthropic(client) |
Python, PHP |
| Google Gemini | tracer.wrap_gemini(model) |
Python, PHP |
| LangChain | AITracerCallbackHandler |
Python |
| Manual Logging | tracer.log() |
Python, PHP, TypeScript |
You can specify the following options when initializing:
| Option | Type | Description | Default |
|---|---|---|---|
api_key |
string | AITracer API key | Environment variable AITRACER_API_KEY |
project |
string | Project name (detailed guide) | None |
enabled |
bool | Enable/disable logging | true |
base_url |
string | API endpoint | https://api.aitracer.co |
| Option | Type | Description | Default |
|---|---|---|---|
sync |
bool | Synchronous mode (recommended for Lambda/Cloud Functions) | false |
batch_size |
int | Batch size for async sending | 10 |
flush_interval |
float | Auto-flush interval in seconds (async mode) | 5.0 |
flush_on_exit |
bool | Auto-flush on process exit | true |
| Option | Type | Description | Default |
|---|---|---|---|
pii_detection |
bool | Enable/disable PII detection | false |
pii_action |
string | Action on PII detection (mask, redact, hash, none) | mask |
pii_types |
array | Types of PII to detect | ["email", "phone", "credit_card", "ssn"] |
from aitracer import AITracer
tracer = AITracer(
api_key="at-xxxx",
project="my-chatbot",
# PII settings
pii_detection=True,
pii_action="mask",
# Sending settings
batch_size=20,
flush_interval=10.0,
# Sync mode (for serverless)
sync=False
)
sync=True for serverless environments to prevent log loss.
Aggregate logs by user session to visualize conversation flows.
# Use with statement to manage sessions (auto start/end)
with tracer.session(
session_id="session-abc123",
user_id="user-456",
metadata={"channel": "web", "device": "mobile"}
) as session:
# All requests in session are automatically grouped
response1 = client.chat.completions.create(...)
response2 = client.chat.completions.create(...)
# Record user feedback
session.thumbs_up() # For the last response
# Session automatically ends when with block exits
Session Analytics Features:
The App Users feature lets you track usage at the end-user level of your AI application. Understand cost, usage, and behavior patterns per user to improve billing strategies and user experience.
When starting a session with user_id, all logs for that user are automatically aggregated.
# Specify user in session (recommended)
with tracer.session(
session_id="session-abc123",
user_id="user-456", # Your app's user ID
) as session:
# All requests in this session are linked to user-456
response = client.chat.completions.create(...)
Alternatively, you can specify it per-request via metadata.
# Specify user_id in metadata
response = client.chat.completions.create(
model="gpt-4",
messages=[...],
extra_body={
"aitracer_metadata": {
"user_id": "user-456"
}
}
)
Access the "App Users" menu in the dashboard to view user list and details.
| Use Case | Description |
|---|---|
| Usage-Based Billing | Track API costs per user to calculate billing amounts |
| Rate Limiting | Identify heavy users and design rate limit policies |
| Error Investigation | Investigate causes of errors for specific users |
| Experience Improvement | Identify users with high latency and improve their experience |
For more details, see Dashboard Guide - App User Analytics.
Group multiple API calls into a single trace. Useful for RAG pipelines and multi-step processing.
# Group multiple API calls into a single trace
with tracer.trace("user-query-123") as trace:
# First API call
response1 = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Summarize this article..."}]
)
# Second API call (belongs to same trace)
response2 = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": f"Translate: {response1.choices[0].message.content}"}]
)
# Add metadata
trace.set_metadata({
"user_id": "user-456",
"feature": "summarization",
"article_id": "article-789"
})
Attach arbitrary metadata to each request for later searching and filtering.
# Attach metadata per request
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}],
extra_body={
"aitracer_metadata": {
"user_id": "user-456",
"session_id": "session-abc",
"feature": "chat",
"version": "2.0"
}
}
)
Metadata supports up to 10 keys, with each value limited to 1000 characters.