Python SDK
Full Python SDK reference with configuration and examples.
AI agents are hard to debug. Requests fly by, context builds up, and when something fails you’re left guessing. SideSeat captures every LLM call, tool call, and agent decision, then displays them in a web UI as they happen. It works with the SideSeat SDK (one line of setup) or with any standard OpenTelemetry exporter — no SDK required.
Start SideSeat
npx sideseatInstall the SDK
pip install sideseatuv add sideseatnpm install @sideseat/sdkSelect your framework or provider, install it, and add one line to your code.
Open http://localhost:5388 — you’ll see a live timeline of each prompt, tool call, and model response.
pip install sideseat strands-agentsuv add sideseat strands-agentsfrom sideseat import SideSeat, Frameworksfrom strands import Agent
SideSeat(framework=Frameworks.Strands)
agent = Agent()print(agent("What is 2+2?"))npm install @sideseat/sdk @strands-agents/sdkimport { init, Frameworks } from '@sideseat/sdk';import { Agent } from '@strands-agents/sdk';
init({ framework: Frameworks.Strands });
const agent = new Agent({ model: 'global.anthropic.claude-haiku-4-5-20251001-v1:0' });const result = await agent.invoke('What is 2+2?');console.log(result.toString());pip install sideseat langgraph langchain-openaiuv add sideseat langgraph langchain-openaifrom sideseat import SideSeat, Frameworksfrom langgraph.prebuilt import create_react_agentfrom langchain_openai import ChatOpenAI
SideSeat(framework=Frameworks.LangGraph)
llm = ChatOpenAI(model="gpt-5-mini")agent = create_react_agent(llm, tools=[])result = agent.invoke({"messages": [("user", "What is 2+2?")]})print(result["messages"][-1].content)pip install "sideseat[openai-agents]" openai-agentsuv add "sideseat[openai-agents]" openai-agentsfrom sideseat import SideSeat, Frameworksfrom agents import Agent, Runner
SideSeat(framework=Frameworks.OpenAIAgents)
agent = Agent(name="Assistant", instructions="You are helpful.")result = Runner.run_sync(agent, "What is 2+2?")print(result.final_output)pip install sideseat crewaiuv add sideseat crewaifrom sideseat import SideSeat, Frameworksfrom crewai import Agent, Task, Crew
SideSeat(framework=Frameworks.CrewAI)
researcher = Agent(role="Researcher", goal="Find information", backstory="Expert researcher")task = Task(description="What is 2+2?", expected_output="The answer", agent=researcher)result = Crew(agents=[researcher], tasks=[task]).kickoff()print(result)pip install sideseat google-adkuv add sideseat google-adkimport asynciofrom sideseat import SideSeat, Frameworksfrom google.adk.agents import Agentfrom google.adk.runners import Runnerfrom google.adk.sessions import InMemorySessionServicefrom google.genai import types
SideSeat(framework=Frameworks.GoogleADK)
agent = Agent(model="gemini-2.5-flash", name="assistant", instruction="You are helpful.")
async def main(): session_service = InMemorySessionService() runner = Runner(agent=agent, app_name="app", session_service=session_service) session = await session_service.create_session(app_name="app", user_id="user") message = types.Content(role="user", parts=[types.Part(text="What is 2+2?")]) async for event in runner.run_async(session_id=session.id, user_id="user", new_message=message): if event.content and event.content.parts: for part in event.content.parts: if hasattr(part, "text") and part.text: print(part.text)
asyncio.run(main())pip install sideseat agent-frameworkuv add sideseat agent-frameworkimport asynciofrom sideseat import SideSeat, Frameworksfrom agent_framework import Agentfrom agent_framework.openai import OpenAIChatClient
SideSeat(framework=Frameworks.AgentFramework)
agent = Agent(client=OpenAIChatClient(model_id="gpt-5-nano-2025-08-07"), instructions="You are helpful.")result = asyncio.run(agent.run("What is 2+2?"))print(result.text)npm install @sideseat/sdk ai @ai-sdk/amazon-bedrockimport { init, Frameworks } from '@sideseat/sdk';import { generateText } from 'ai';import { bedrock } from '@ai-sdk/amazon-bedrock';
init({ framework: Frameworks.VercelAI });
const { text } = await generateText({model: bedrock('us.anthropic.claude-sonnet-4-5-20250929-v1:0'),prompt: 'What is 2+2?',experimental_telemetry: { isEnabled: true },});console.log(text);pip install sideseat boto3uv add sideseat boto3from sideseat import SideSeat, Frameworksimport boto3
SideSeat(framework=Frameworks.Bedrock)
client = boto3.client("bedrock-runtime", region_name="us-east-1")response = client.converse( modelId="us.anthropic.claude-sonnet-4-5-20250929-v1:0", messages=[{"role": "user", "content": [{"text": "What is 2+2?"}]}],)print(response["output"]["message"]["content"][0]["text"])pip install sideseat anthropicuv add sideseat anthropicfrom sideseat import SideSeat, Frameworksimport anthropic
SideSeat(framework=Frameworks.Anthropic)
client = anthropic.Anthropic()message = client.messages.create( model="claude-sonnet-4-5-20250929", max_tokens=1024, messages=[{"role": "user", "content": "What is 2+2?"}],)print(message.content[0].text)pip install sideseat openaiuv add sideseat openaifrom sideseat import SideSeat, Frameworksfrom openai import OpenAI
SideSeat(framework=Frameworks.OpenAI)
client = OpenAI()response = client.chat.completions.create( model="gpt-5-mini", messages=[{"role": "user", "content": "What is 2+2?"}],)print(response.choices[0].message.content)pip install sideseat google-genaiuv add sideseat google-genaifrom sideseat import SideSeat, Frameworksfrom google import genai
SideSeat(framework=Frameworks.GoogleGenAI)
client = genai.Client(api_key="YOUR_API_KEY")response = client.models.generate_content(model="gemini-2.5-flash", contents="What is 2+2?")print(response.text)pip install sideseat google-cloud-aiplatform vertexaiuv add sideseat google-cloud-aiplatform vertexaifrom sideseat import SideSeat, Frameworksimport vertexaifrom vertexai.generative_models import GenerativeModel
SideSeat(framework=Frameworks.VertexAI)
vertexai.init(project="YOUR_PROJECT_ID", location="us-central1")model = GenerativeModel("gemini-2.5-flash")print(model.generate_content("What is 2+2?").text)See all supported frameworks and providers.
SideSeat accepts standard OpenTelemetry traces from any framework.
Start SideSeat
npx sideseatSet the endpoint
export OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:5388/otel/defaultSelect your framework, install it, and add the telemetry setup below.
Open http://localhost:5388 — traces appear in real time.
pip install 'strands-agents[otel]'uv add 'strands-agents[otel]'from strands.telemetry import StrandsTelemetryfrom strands import Agent
telemetry = StrandsTelemetry()telemetry.setup_otlp_exporter()telemetry.setup_meter(enable_otlp_exporter=True)
agent = Agent()response = agent("What is 2+2?")print(response)npm install @strands-agents/sdk @opentelemetry/sdk-trace-node @opentelemetry/sdk-trace-base @opentelemetry/exporter-trace-otlp-httpimport { NodeTracerProvider } from '@opentelemetry/sdk-trace-node';import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base';import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http';import { Agent } from '@strands-agents/sdk';
const provider = new NodeTracerProvider({spanProcessors: [new BatchSpanProcessor(new OTLPTraceExporter())],});provider.register();
const agent = new Agent({ model: 'global.anthropic.claude-haiku-4-5-20251001-v1:0' });const result = await agent.invoke('What is 2+2?');console.log(result.toString());
await provider.shutdown();pip install langgraph langchain-openai openinference-instrumentation-langchain opentelemetry-exporter-otlpuv add langgraph langchain-openai openinference-instrumentation-langchain opentelemetry-exporter-otlpfrom opentelemetry import tracefrom opentelemetry.sdk.trace import TracerProviderfrom opentelemetry.sdk.trace.export import BatchSpanProcessorfrom opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporterfrom openinference.instrumentation.langchain import LangChainInstrumentor
provider = TracerProvider()provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter()))trace.set_tracer_provider(provider)LangChainInstrumentor().instrument(tracer_provider=provider, skip_dep_check=True)
from langgraph.prebuilt import create_react_agentfrom langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-5-mini")agent = create_react_agent(llm, tools=[])result = agent.invoke({"messages": [("user", "What is 2+2?")]})print(result["messages"][-1].content)pip install crewai openinference-instrumentation-crewai opentelemetry-exporter-otlpuv add crewai openinference-instrumentation-crewai opentelemetry-exporter-otlpfrom opentelemetry import tracefrom opentelemetry.sdk.trace import TracerProviderfrom opentelemetry.sdk.trace.export import BatchSpanProcessorfrom opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporterfrom openinference.instrumentation.crewai import CrewAIInstrumentor
provider = TracerProvider()provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter()))trace.set_tracer_provider(provider)CrewAIInstrumentor().instrument(tracer_provider=provider, skip_dep_check=True)
from crewai import Agent, Task, Crew
researcher = Agent(role="Researcher", goal="Find information", backstory="Expert researcher")task = Task(description="What is 2+2?", expected_output="The answer", agent=researcher)result = Crew(agents=[researcher], tasks=[task]).kickoff()print(result)pip install google-adk opentelemetry-sdk opentelemetry-exporter-otlpuv add google-adk opentelemetry-sdk opentelemetry-exporter-otlpimport asynciofrom opentelemetry import tracefrom opentelemetry.sdk.trace import TracerProviderfrom opentelemetry.sdk.trace.export import BatchSpanProcessorfrom opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
provider = TracerProvider()provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter()))trace.set_tracer_provider(provider)
from google.adk.agents import Agentfrom google.adk.runners import Runnerfrom google.adk.sessions import InMemorySessionServicefrom google.genai import types
agent = Agent(model="gemini-2.5-flash", name="assistant", instruction="You are helpful.")
async def main(): session_service = InMemorySessionService() runner = Runner(agent=agent, app_name="app", session_service=session_service) session = await session_service.create_session(app_name="app", user_id="user") message = types.Content(role="user", parts=[types.Part(text="What is 2+2?")]) async for event in runner.run_async(session_id=session.id, user_id="user", new_message=message): if event.content and event.content.parts: for part in event.content.parts: if hasattr(part, "text") and part.text: print(part.text)
asyncio.run(main())pip install agent-framework opentelemetry-sdk opentelemetry-exporter-otlpuv add agent-framework opentelemetry-sdk opentelemetry-exporter-otlpimport asynciofrom agent_framework.observability import OBSERVABILITY_SETTINGSfrom agent_framework import Agentfrom agent_framework.openai import OpenAIChatClientfrom opentelemetry import tracefrom opentelemetry.sdk.trace import TracerProviderfrom opentelemetry.sdk.trace.export import BatchSpanProcessorfrom opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
OBSERVABILITY_SETTINGS.enable_instrumentation = TrueOBSERVABILITY_SETTINGS.enable_sensitive_data = True
provider = TracerProvider()provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter( endpoint="http://localhost:5388/otel/default/v1/traces")))trace.set_tracer_provider(provider)
client = OpenAIChatClient(model_id="gpt-5-nano-2025-08-07")agent = Agent(client=client, instructions="You are a helpful assistant.")result = asyncio.run(agent.run("What is 2+2?"))print(result.text)pip install openai-agents "logfire>=4.29.0" opentelemetry-exporter-otlpuv add openai-agents "logfire>=4.29.0" opentelemetry-exporter-otlpimport logfirefrom opentelemetry import tracefrom opentelemetry.sdk.trace.export import BatchSpanProcessorfrom opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
logfire.configure(send_to_logfire=False, console=False)logfire.instrument_openai_agents()
provider = trace.get_tracer_provider()provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter()))
from agents import Agent, Runner
agent = Agent(name="Assistant", instructions="You are helpful.")result = Runner.run_sync(agent, "What is 2+2?")print(result.final_output)npm install ai @ai-sdk/amazon-bedrock @opentelemetry/sdk-node @opentelemetry/exporter-trace-otlp-httpimport { NodeSDK } from '@opentelemetry/sdk-node';import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http';
const sdk = new NodeSDK({ traceExporter: new OTLPTraceExporter() });sdk.start();
import { generateText } from 'ai';import { bedrock } from '@ai-sdk/amazon-bedrock';
const { text } = await generateText({model: bedrock('us.anthropic.claude-sonnet-4-5-20250929-v1:0'),prompt: 'What is 2+2?',experimental_telemetry: { isEnabled: true },});console.log(text);SideSeat includes a built-in MCP server that gives your coding agent direct access to your agent’s traces, conversations, and costs. Connect it and let your coding tool optimize prompts, debug failures, and reduce costs using real data.
# Kiro CLIkiro-cli mcp add --name sideseat --url http://localhost:5388/api/v1/projects/default/mcp
# Claude Codeclaude mcp add --transport http sideseat http://localhost:5388/api/v1/projects/default/mcp
# OpenAI Codexcodex mcp add --transport http sideseat http://localhost:5388/api/v1/projects/default/mcpSee the MCP Server guide for Kiro, Cursor, and other clients.
SideSeat runs locally by default. Your data stays on your machine.
| Benefit | What It Means |
|---|---|
| No signup | Run npx sideseat and start debugging immediately |
| No data egress | Traces stay on your machine — no cloud uploads |
| No latency | Real-time streaming without network roundtrips |
| No vendor lock-in | Standard OpenTelemetry traces work with any backend |
Python SDK
Full Python SDK reference with configuration and examples.
TypeScript SDK
Full TypeScript SDK reference for Node.js apps.
Integrations
Connect your framework — Strands, LangGraph, CrewAI, and more.
Concepts