Skip to content

Commit f9e714f

Browse files
authored
feat(core): Instrument LangGraph Agent (#18114)
This PR adds official support for instrumenting LangGraph StateGraph operations in Node with Sentry tracing, following OpenTelemetry semantic conventions for Generative AI. ### Currently supported: Node.js - Both agent creation and invocation are instrumented in this PR ESM and CJS - Both module systems are supported The langGraphIntegration() accepts the following options: ``` // The integration respects your sendDefaultPii client option interface LangGraphOptions { recordInputs?: boolean; // Whether to record input messages recordOutputs?: boolean; // Whether to record response text and tool calls } ``` e.g ``` Sentry.init({ dsn: '__DSN__', sendDefaultPii: false, // Even with PII disabled globally integrations: [ Sentry.langGraphIntegration({ recordInputs: true, // Force recording input messages recordOutputs: true, // Force recording response text }), ], }); ``` ### Operations traced: - gen_ai.create_agent - Spans created when StateGraph.compile() is called - gen_ai.invoke_agent - Spans created when CompiledGraph.invoke() is called
1 parent 8efeeeb commit f9e714f

File tree

21 files changed

+1107
-0
lines changed

21 files changed

+1107
-0
lines changed

dev-packages/node-integration-tests/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
"@hono/node-server": "^1.19.4",
3232
"@langchain/anthropic": "^0.3.10",
3333
"@langchain/core": "^0.3.28",
34+
"@langchain/langgraph": "^0.2.32",
3435
"@nestjs/common": "^11",
3536
"@nestjs/core": "^11",
3637
"@nestjs/platform-express": "^11",
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
});
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
});
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
import { tool } from '@langchain/core/tools';
2+
import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
3+
import { ToolNode } from '@langchain/langgraph/prebuilt';
4+
import * as Sentry from '@sentry/node';
5+
import { z } from 'zod';
6+
7+
async function run() {
8+
await Sentry.startSpan({ op: 'function', name: 'langgraph-tools-test' }, async () => {
9+
// Define tools
10+
const getWeatherTool = tool(
11+
async ({ city }) => {
12+
return JSON.stringify({ city, temperature: 72, condition: 'sunny' });
13+
},
14+
{
15+
name: 'get_weather',
16+
description: 'Get the current weather for a given city',
17+
schema: z.object({
18+
city: z.string().describe('The city to get weather for'),
19+
}),
20+
},
21+
);
22+
23+
const getTimeTool = tool(
24+
async () => {
25+
return new Date().toISOString();
26+
},
27+
{
28+
name: 'get_time',
29+
description: 'Get the current time',
30+
schema: z.object({}),
31+
},
32+
);
33+
34+
const tools = [getWeatherTool, getTimeTool];
35+
const toolNode = new ToolNode(tools);
36+
37+
// Define mock LLM function that returns without tool calls
38+
const mockLlm = () => {
39+
return {
40+
messages: [
41+
{
42+
role: 'assistant',
43+
content: 'Response without calling tools',
44+
response_metadata: {
45+
model_name: 'gpt-4-0613',
46+
finish_reason: 'stop',
47+
tokenUsage: {
48+
promptTokens: 25,
49+
completionTokens: 15,
50+
totalTokens: 40,
51+
},
52+
},
53+
tool_calls: [],
54+
},
55+
],
56+
};
57+
};
58+
59+
// Routing function - check if there are tool calls
60+
const shouldContinue = state => {
61+
const messages = state.messages;
62+
const lastMessage = messages[messages.length - 1];
63+
64+
// If the last message has tool_calls, route to tools, otherwise end
65+
if (lastMessage.tool_calls && lastMessage.tool_calls.length > 0) {
66+
return 'tools';
67+
}
68+
return END;
69+
};
70+
71+
// Create graph with conditional edge to tools
72+
const graph = new StateGraph(MessagesAnnotation)
73+
.addNode('agent', mockLlm)
74+
.addNode('tools', toolNode)
75+
.addEdge(START, 'agent')
76+
.addConditionalEdges('agent', shouldContinue, {
77+
tools: 'tools',
78+
[END]: END,
79+
})
80+
.addEdge('tools', 'agent')
81+
.compile({ name: 'tool_agent' });
82+
83+
// Simple invocation - won't call tools since mockLlm returns empty tool_calls
84+
await graph.invoke({
85+
messages: [{ role: 'user', content: 'What is the weather?' }],
86+
});
87+
88+
// Define mock LLM function that returns with tool calls
89+
let callCount = 0;
90+
const mockLlmWithTools = () => {
91+
callCount++;
92+
93+
// First call - return tool calls
94+
if (callCount === 1) {
95+
return {
96+
messages: [
97+
{
98+
role: 'assistant',
99+
content: '',
100+
response_metadata: {
101+
model_name: 'gpt-4-0613',
102+
finish_reason: 'tool_calls',
103+
tokenUsage: {
104+
promptTokens: 30,
105+
completionTokens: 20,
106+
totalTokens: 50,
107+
},
108+
},
109+
tool_calls: [
110+
{
111+
name: 'get_weather',
112+
args: { city: 'San Francisco' },
113+
id: 'call_123',
114+
type: 'tool_call',
115+
},
116+
],
117+
},
118+
],
119+
};
120+
}
121+
122+
// Second call - return final response after tool execution
123+
return {
124+
messages: [
125+
{
126+
role: 'assistant',
127+
content: 'Based on the weather data, it is sunny and 72 degrees in San Francisco.',
128+
response_metadata: {
129+
model_name: 'gpt-4-0613',
130+
finish_reason: 'stop',
131+
tokenUsage: {
132+
promptTokens: 50,
133+
completionTokens: 20,
134+
totalTokens: 70,
135+
},
136+
},
137+
tool_calls: [],
138+
},
139+
],
140+
};
141+
};
142+
143+
// Create graph with tool calls enabled
144+
const graphWithTools = new StateGraph(MessagesAnnotation)
145+
.addNode('agent', mockLlmWithTools)
146+
.addNode('tools', toolNode)
147+
.addEdge(START, 'agent')
148+
.addConditionalEdges('agent', shouldContinue, {
149+
tools: 'tools',
150+
[END]: END,
151+
})
152+
.addEdge('tools', 'agent')
153+
.compile({ name: 'tool_calling_agent' });
154+
155+
// Invocation that actually calls tools
156+
await graphWithTools.invoke({
157+
messages: [{ role: 'user', content: 'What is the weather in San Francisco?' }],
158+
});
159+
});
160+
161+
await Sentry.flush(2000);
162+
}
163+
164+
run();
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
2+
import * as Sentry from '@sentry/node';
3+
4+
async function run() {
5+
await Sentry.startSpan({ op: 'function', name: 'langgraph-test' }, async () => {
6+
// Define a simple mock LLM function
7+
const mockLlm = () => {
8+
return {
9+
messages: [
10+
{
11+
role: 'assistant',
12+
content: 'Mock LLM response',
13+
response_metadata: {
14+
model_name: 'mock-model',
15+
finish_reason: 'stop',
16+
tokenUsage: {
17+
promptTokens: 20,
18+
completionTokens: 10,
19+
totalTokens: 30,
20+
},
21+
},
22+
},
23+
],
24+
};
25+
};
26+
27+
// Create and compile the graph
28+
const graph = new StateGraph(MessagesAnnotation)
29+
.addNode('agent', mockLlm)
30+
.addEdge(START, 'agent')
31+
.addEdge('agent', END)
32+
.compile({ name: 'weather_assistant' });
33+
34+
// Test: basic invocation
35+
await graph.invoke({
36+
messages: [{ role: 'user', content: 'What is the weather today?' }],
37+
});
38+
39+
// Test: invocation with multiple messages
40+
await graph.invoke({
41+
messages: [
42+
{ role: 'user', content: 'Hello' },
43+
{ role: 'assistant', content: 'Hi there!' },
44+
{ role: 'user', content: 'Tell me about the weather' },
45+
],
46+
});
47+
});
48+
49+
await Sentry.flush(2000);
50+
}
51+
52+
run();

0 commit comments

Comments
 (0)