Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .size-limit.js
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ module.exports = [
import: createImport('init'),
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: true,
limit: '158 KB',
limit: '159 KB',
},
{
name: '@sentry/node - without tracing',
Expand Down
1 change: 1 addition & 0 deletions dev-packages/cloudflare-integration-tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"test:watch": "yarn test --watch"
},
"dependencies": {
"@langchain/langgraph": "^1.0.1",
"@sentry/cloudflare": "10.23.0"
},
"devDependencies": {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
import * as Sentry from '@sentry/cloudflare';

interface Env {
SENTRY_DSN: string;
}

export default Sentry.withSentry(
(env: Env) => ({
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
sendDefaultPii: true,
}),
{
async fetch(_request, _env, _ctx) {
// Define simple mock LLM function
const mockLlm = (): {
messages: {
role: string;
content: string;
response_metadata: {
model_name: string;
finish_reason: string;
tokenUsage: { promptTokens: number; completionTokens: number; totalTokens: number };
};
tool_calls: never[];
}[];
} => {
return {
messages: [
{
role: 'assistant',
content: 'Mock response from LangGraph agent',
response_metadata: {
model_name: 'mock-model',
finish_reason: 'stop',
tokenUsage: {
promptTokens: 20,
completionTokens: 10,
totalTokens: 30,
},
},
tool_calls: [],
},
],
};
};

// Create and instrument the graph
const graph = new StateGraph(MessagesAnnotation)
.addNode('agent', mockLlm)
.addEdge(START, 'agent')
.addEdge('agent', END);

Sentry.instrumentLangGraph(graph, { recordInputs: true, recordOutputs: true });

const compiled = graph.compile({ name: 'weather_assistant' });

await compiled.invoke({
messages: [{ role: 'user', content: 'What is the weather in SF?' }],
});

return new Response(JSON.stringify({ success: true }));
},
},
);
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import { expect, it } from 'vitest';
import { createRunner } from '../../../runner';

// These tests are not exhaustive because the instrumentation is
// already tested in the node integration tests and we merely
// want to test that the instrumentation does not break in our
// cloudflare SDK.

it('traces langgraph compile and invoke operations', async ({ signal }) => {
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
const transactionEvent = envelope[1]?.[0]?.[1] as any;

expect(transactionEvent.transaction).toBe('GET /');

// Check create_agent span
const createAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.create_agent');
expect(createAgentSpan).toMatchObject({
data: {
'gen_ai.operation.name': 'create_agent',
'sentry.op': 'gen_ai.create_agent',
'sentry.origin': 'auto.ai.langgraph',
'gen_ai.agent.name': 'weather_assistant',
},
description: 'create_agent weather_assistant',
op: 'gen_ai.create_agent',
origin: 'auto.ai.langgraph',
});

// Check invoke_agent span
const invokeAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.invoke_agent');
expect(invokeAgentSpan).toMatchObject({
data: expect.objectContaining({
'gen_ai.operation.name': 'invoke_agent',
'sentry.op': 'gen_ai.invoke_agent',
'sentry.origin': 'auto.ai.langgraph',
'gen_ai.agent.name': 'weather_assistant',
'gen_ai.pipeline.name': 'weather_assistant',
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in SF?"}]',
'gen_ai.response.model': 'mock-model',
'gen_ai.usage.input_tokens': 20,
'gen_ai.usage.output_tokens': 10,
'gen_ai.usage.total_tokens': 30,
}),
description: 'invoke_agent weather_assistant',
op: 'gen_ai.invoke_agent',
origin: 'auto.ai.langgraph',
});

// Verify tools are captured
if (invokeAgentSpan.data['gen_ai.request.available_tools']) {
expect(invokeAgentSpan.data['gen_ai.request.available_tools']).toMatch(/get_weather/);
}
})
.start(signal);
await runner.makeRequest('get', '/');
await runner.completed();
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"name": "worker-name",
"compatibility_date": "2025-06-17",
"main": "index.ts",
"compatibility_flags": ["nodejs_compat"]
}

1 change: 1 addition & 0 deletions dev-packages/node-integration-tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
"@hono/node-server": "^1.19.4",
"@langchain/anthropic": "^0.3.10",
"@langchain/core": "^0.3.28",
"@langchain/langgraph": "^0.2.32",
"@nestjs/common": "^11",
"@nestjs/core": "^11",
"@nestjs/platform-express": "^11",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: false,
transport: loggingTransport,
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import { tool } from '@langchain/core/tools';
import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
import { ToolNode } from '@langchain/langgraph/prebuilt';
import * as Sentry from '@sentry/node';
import { z } from 'zod';

async function run() {
await Sentry.startSpan({ op: 'function', name: 'langgraph-tools-test' }, async () => {
// Define tools
const getWeatherTool = tool(
async ({ city }) => {
return JSON.stringify({ city, temperature: 72, condition: 'sunny' });
},
{
name: 'get_weather',
description: 'Get the current weather for a given city',
schema: z.object({
city: z.string().describe('The city to get weather for'),
}),
},
);

const getTimeTool = tool(
async () => {
return new Date().toISOString();
},
{
name: 'get_time',
description: 'Get the current time',
schema: z.object({}),
},
);

const tools = [getWeatherTool, getTimeTool];
const toolNode = new ToolNode(tools);

// Define mock LLM function that returns without tool calls
const mockLlm = () => {
return {
messages: [
{
role: 'assistant',
content: 'Response without calling tools',
response_metadata: {
model_name: 'gpt-4-0613',
finish_reason: 'stop',
tokenUsage: {
promptTokens: 25,
completionTokens: 15,
totalTokens: 40,
},
},
tool_calls: [],
},
],
};
};

// Routing function - check if there are tool calls
const shouldContinue = state => {
const messages = state.messages;
const lastMessage = messages[messages.length - 1];

// If the last message has tool_calls, route to tools, otherwise end
if (lastMessage.tool_calls && lastMessage.tool_calls.length > 0) {
return 'tools';
}
return END;
};

// Create graph with conditional edge to tools
const graph = new StateGraph(MessagesAnnotation)
.addNode('agent', mockLlm)
.addNode('tools', toolNode)
.addEdge(START, 'agent')
.addConditionalEdges('agent', shouldContinue, {
tools: 'tools',
[END]: END,
})
.addEdge('tools', 'agent')
.compile({ name: 'tool_agent' });

// Simple invocation - won't call tools since mockLlm returns empty tool_calls
await graph.invoke({
messages: [{ role: 'user', content: 'What is the weather?' }],
});
});

await Sentry.flush(2000);
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph';
import * as Sentry from '@sentry/node';

async function run() {
await Sentry.startSpan({ op: 'function', name: 'langgraph-test' }, async () => {
// Define a simple mock LLM function
const mockLlm = () => {
return {
messages: [
{
role: 'assistant',
content: 'Mock LLM response',
response_metadata: {
model_name: 'mock-model',
finish_reason: 'stop',
tokenUsage: {
promptTokens: 20,
completionTokens: 10,
totalTokens: 30,
},
},
},
],
};
};

// Create and compile the graph
const graph = new StateGraph(MessagesAnnotation)
.addNode('agent', mockLlm)
.addEdge(START, 'agent')
.addEdge('agent', END)
.compile({ name: 'weather_assistant' });

// Test: basic invocation
await graph.invoke({
messages: [{ role: 'user', content: 'What is the weather today?' }],
});

// Test: invocation with multiple messages
await graph.invoke({
messages: [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there!' },
{ role: 'user', content: 'Tell me about the weather' },
],
});
});

await Sentry.flush(2000);
}

run();
Loading
Loading