Integrations
Integrate AgentLens with popular AI frameworks.
LangChain
Integrate with LangChain by wrapping tool functions and agent calls with AgentLens decorators:
import agentlens
from agentlens import track_agent, track_tool_call
from langchain.agents import initialize_agent, Tool
from langchain.llms import OpenAI
agentlens.init(endpoint="http://localhost:3000")
# Wrap your tools with AgentLens tracking
@track_tool_call(tool_name="search")
def search_tool(query: str) -> str:
"""Search the web."""
return do_search(query)
# Create LangChain tools
tools = [
Tool(name="search", func=search_tool, description="Search the web"),
]
# Track the agent execution
@track_agent(model="gpt-4")
def run_agent(query: str) -> str:
session = agentlens.start_session(agent_name="langchain-agent")
llm = OpenAI(model="gpt-4")
agent = initialize_agent(tools, llm, agent="zero-shot-react-description")
result = agent.run(query)
agentlens.end_session()
return result
OpenAI API
Track OpenAI API calls by wrapping the completion call:
import agentlens
import openai
import time
agentlens.init(endpoint="http://localhost:3000")
def tracked_completion(messages, model="gpt-4", **kwargs):
"""OpenAI chat completion with AgentLens tracking."""
start = time.perf_counter()
response = openai.chat.completions.create(
model=model,
messages=messages,
**kwargs,
)
elapsed = (time.perf_counter() - start) * 1000
usage = response.usage
agentlens.track(
event_type="llm_call",
model=model,
input_data={"messages": [m["content"][:200] for m in messages]},
output_data={"response": response.choices[0].message.content[:500]},
tokens_in=usage.prompt_tokens,
tokens_out=usage.completion_tokens,
duration_ms=elapsed,
)
return response
Anthropic Claude
import agentlens
import anthropic
import time
agentlens.init(endpoint="http://localhost:3000")
def tracked_claude(prompt, model="claude-3-5-sonnet-20241022", **kwargs):
"""Anthropic completion with AgentLens tracking."""
client = anthropic.Anthropic()
start = time.perf_counter()
response = client.messages.create(
model=model,
messages=[{"role": "user", "content": prompt}],
max_tokens=1024,
**kwargs,
)
elapsed = (time.perf_counter() - start) * 1000
agentlens.track(
event_type="llm_call",
model=model,
input_data={"prompt": prompt[:200]},
output_data={"response": response.content[0].text[:500]},
tokens_in=response.usage.input_tokens,
tokens_out=response.usage.output_tokens,
duration_ms=elapsed,
)
return response
Custom Frameworks
AgentLens works with any Python code. The general pattern is:
- Call
agentlens.init()at startup - Create a session with
agentlens.start_session() - Use
@track_agent/@track_tool_calldecorators on your functions - Or use
agentlens.track()for manual instrumentation - Call
agentlens.end_session()when done
💡 Non-Python agents
If your agent isn't written in Python, you can still use AgentLens by sending events directly to the REST API. Any language that can make HTTP POST requests can integrate with AgentLens.
REST API Integration
For non-Python agents, send events directly:
# Create a session
curl -X POST http://localhost:3000/events \
-H "Content-Type: application/json" \
-d '{"events": [{"event_type": "session_start", "session_id": "my-session", "agent_name": "js-agent"}]}'
# Track events
curl -X POST http://localhost:3000/events \
-H "Content-Type: application/json" \
-d '{"events": [{"event_type": "llm_call", "session_id": "my-session", "model": "gpt-4", "tokens_in": 100}]}'
# End session
curl -X POST http://localhost:3000/events \
-H "Content-Type: application/json" \
-d '{"events": [{"event_type": "session_end", "session_id": "my-session", "status": "completed"}]}'