Skip to content

Commit c3a91b2

Browse files
mikecannConvex, Inc.
authored andcommitted
Mikec/token limit mcp logs (#40515)
<img width="1017" height="207" alt="image" src="https://github.com/user-attachments/assets/6c0a3757-85ac-4724-a60d-947d03db76ba" /> https://www.youtube.com/watch?v=1hlDpwePKsg&lc=UgwkBqoiqbmQ6Dj6XSV4AaABAg This user noted that there is a 25k token limit with Claude code and MCP calls and that its quite frustrating to work with the MCP logs tool currently because of this. This PR seeks to rectify this by allowing the model to optionally specify a token limit which the code then attempts to estimate. The model can still supply an optional line limit if it wants to, both limits will be applied and sliced to the nearest entry boundry. Its currently set to 20k limit to allow for inaccuracies in token estimation. As mentioned in the `tokens.ts` file I borrowed the estimation logic from this repo: https://github.com/johannschopplich/tokenx I decided not to take a dependency on it and instead just copy it in as its simple and self contained. I dont know what the policy is on this brining in of third party code, but its MIT licensed so I assume its okay? I have tested this all by running it locally and it all works as expected. GitOrigin-RevId: a54bab51381988127382be5257a068ff78c7dcfc
1 parent 2fcde28 commit c3a91b2

File tree

1 file changed

+72
-13
lines changed
  • npm-packages/convex/src/cli/lib/mcp/tools

1 file changed

+72
-13
lines changed

npm-packages/convex/src/cli/lib/mcp/tools/logs.ts

Lines changed: 72 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,23 @@ const inputSchema = z.object({
1414
.describe(
1515
"Optional cursor (in ms) to start reading from. Use 0 to read from the beginning.",
1616
),
17-
limit: z
17+
entriesLimit: z
1818
.number()
1919
.int()
2020
.positive()
2121
.max(1000)
2222
.optional()
2323
.describe(
24-
"Maximum number of log entries to return. If omitted, returns all available in this chunk.",
24+
"Maximum number of log entries to return (from the end). If omitted, returns all available in this chunk.",
25+
),
26+
tokensLimit: z
27+
.number()
28+
.int()
29+
.positive()
30+
.default(20000)
31+
.optional()
32+
.describe(
33+
"Approximate maximum number of tokens to return (applied to the JSON payload). Defaults to 20000.",
2534
),
2635
});
2736

@@ -30,6 +39,10 @@ const outputSchema = z.object({
3039
newCursor: z.number(),
3140
});
3241

42+
const logsResponseSchema = outputSchema;
43+
44+
type LogEntry = z.infer<typeof logsResponseSchema>["entries"][number];
45+
3346
const description = `
3447
Fetch a chunk of recent log entries from your Convex deployment.
3548
@@ -63,25 +76,71 @@ export const LogsTool: ConvexTool<typeof inputSchema, typeof outputSchema> = {
6376
const response = await fetch(`/api/stream_function_logs?cursor=${cursor}`, {
6477
method: "GET",
6578
});
79+
6680
if (!response.ok) {
6781
return await ctx.crash({
6882
exitCode: 1,
6983
errorType: "fatal",
7084
printedMessage: `HTTP error ${response.status}: ${await response.text()}`,
7185
});
7286
}
73-
const { entries, newCursor } = (await response.json()) as {
74-
entries: unknown[];
75-
newCursor: number;
76-
};
7787

78-
// Optionally limit the number of entries returned from the end.
79-
const limitedEntries =
80-
typeof args.limit === "number" && entries.length > args.limit
81-
? entries.slice(entries.length - args.limit)
82-
: entries;
88+
const { entries, newCursor } = await response
89+
.json()
90+
.then(logsResponseSchema.parse);
8391

84-
const parsed = outputSchema.parse({ entries: limitedEntries, newCursor });
85-
return parsed;
92+
return {
93+
entries: limitLogs({
94+
entries,
95+
tokensLimit: args.tokensLimit ?? 20000,
96+
entriesLimit: args.entriesLimit ?? entries.length,
97+
}),
98+
newCursor,
99+
};
86100
},
87101
};
102+
103+
export function limitLogs({
104+
entries,
105+
tokensLimit,
106+
entriesLimit,
107+
}: {
108+
entries: LogEntry[];
109+
tokensLimit: number;
110+
entriesLimit: number;
111+
}): LogEntry[] {
112+
// 1) Apply entries limit first so we cut off neatly at entry boundaries (latest entries kept)
113+
const limitedByEntries = entries.slice(entries.length - entriesLimit);
114+
115+
// 2) Apply token limit by iterating over log lines from newest to oldest and
116+
// only include lines while within token budget. We cut off at the nearest log line.
117+
const limitedByTokens = limitEntriesByTokenBudget({
118+
entries: limitedByEntries,
119+
tokensLimit,
120+
});
121+
122+
return limitedByTokens;
123+
}
124+
125+
function limitEntriesByTokenBudget({
126+
entries,
127+
tokensLimit,
128+
}: {
129+
entries: LogEntry[];
130+
tokensLimit: number;
131+
}): LogEntry[] {
132+
const result: LogEntry[] = [];
133+
let tokens = 0;
134+
for (const entry of entries) {
135+
const entryString = JSON.stringify(entry);
136+
const entryTokens = estimateTokenCount(entryString);
137+
tokens += entryTokens;
138+
if (tokens > tokensLimit) break;
139+
result.push(entry);
140+
}
141+
return result;
142+
}
143+
144+
function estimateTokenCount(entryString: string): number {
145+
return entryString.length * 0.33;
146+
}

0 commit comments

Comments
 (0)