1- import { convertToModelMessages , UIMessage , stepCountIs , streamText , ToolSet } from 'ai' ;
2- import { systemPrompt } from '@/lib/ai/searchPrompt' ;
3- import { createModel } from '@/lib/ai/models' ;
4- import { validateRateLimit } from '@/utils/rateLimit' ;
5- import { trackEvent } from '@/lib/analytics' ;
1+ import {
2+ convertToModelMessages ,
3+ stepCountIs ,
4+ streamText ,
5+ type ToolSet ,
6+ type UIMessage ,
7+ } from "ai" ;
8+ import { createModel } from "@/lib/ai/models" ;
9+ import { systemPrompt } from "@/lib/ai/searchPrompt" ;
10+ import { trackEvent } from "@/lib/analytics" ;
11+ import { validateRateLimit } from "@/utils/rateLimit" ;
612
713export async function POST ( req : Request ) {
814 const rateLimitError = validateRateLimit ( req ) ;
9- if ( rateLimitError ) return rateLimitError ;
10-
15+ if ( rateLimitError ) {
16+ return rateLimitError ;
17+ }
18+
1119 let reqJson : { messages : UIMessage [ ] ; clientId ?: string } ;
1220 try {
1321 reqJson = await req . json ( ) ;
1422 } catch {
15- return new Response ( 'Invalid request' , { status : 400 } ) ;
23+ return new Response ( "Invalid request" , { status : 400 } ) ;
24+ }
25+
26+ if ( ! reqJson ?. messages ?. length ) {
27+ return new Response ( "Invalid request" , { status : 400 } ) ;
1628 }
1729
1830 try {
19- const { model, tools : webSearchTools } = createModel ( ) ;
20- const clientId = reqJson . clientId
21-
22- const tools = {
23- ...webSearchTools ,
24- } as ToolSet ;
25-
26- const startTime = Date . now ( ) ;
27- const provider = ( process . env . AI_PROVIDER || 'openai' ) as string ;
28- const modelId = process . env . AI_MODEL || 'default' ;
29-
30- const textPart = reqJson . messages [ reqJson . messages . length - 1 ] . parts . find ( ( part ) => part . type === 'text' ) ;
31-
32- const userQuery = textPart ?. text || '' ;
33-
34- const result = streamText ( {
35- model,
36- tools,
37- system : systemPrompt ,
38- messages : convertToModelMessages ( reqJson . messages , {
39- ignoreIncompleteToolCalls : true ,
40- } ) ,
41- stopWhen : stepCountIs ( 10 ) ,
42- onFinish : async ( { text, toolCalls, steps } ) => {
43- const duration = Date . now ( ) - startTime ;
44-
45- const allParts = steps . map ( ( step , index ) => {
46- const stepInfo : Record < string , unknown > = {
47- step : index + 1 ,
48- type : step . toolCalls ?. length ? 'tool_call' : 'text' ,
49- } ;
50-
51- if ( step . text ) {
52- stepInfo . text = step . text ;
53- }
54-
55- if ( step . toolCalls ?. length ) {
56- stepInfo . tools = step . toolCalls . map ( tc => ( {
57- name : tc . toolName ,
58- args : 'args' in tc ? tc . args : undefined ,
59- } ) ) ;
60- }
61-
62- if ( step . toolResults ?. length ) {
63- stepInfo . tool_results = step . toolResults . map ( tr => {
64- const result = 'result' in tr ? tr . result : undefined ;
65- return {
66- name : tr . toolName ,
67- result : typeof result === 'string' ? result . substring ( 0 , 200 ) : result ,
68- } ;
69- } ) ;
70- }
71-
72- return stepInfo ;
73- } ) ;
74-
75- trackEvent ( 'a:docs:ai:search' , {
76- query : userQuery ,
77- response : text ,
78- all_parts : JSON . stringify ( allParts ) ,
79- model : modelId ,
80- provider,
81- duration_ms : duration ,
82- tool_calls : toolCalls ?. length || 0 ,
83- } , clientId ) ;
84- } ,
85- } ) ;
86-
87- return result . toUIMessageStreamResponse ( ) ;
31+ const { model, tools : webSearchTools } = createModel ( ) ;
32+ const clientId = reqJson . clientId ;
33+
34+ const tools = {
35+ ...webSearchTools ,
36+ } as ToolSet ;
37+
38+ const startTime = Date . now ( ) ;
39+ const provider = ( process . env . AI_PROVIDER || "openai" ) as string ;
40+ const modelId = process . env . AI_MODEL || "default" ;
41+
42+ const textPart = reqJson . messages
43+ ?. at ( - 1 )
44+ ?. parts . find ( ( part ) => part . type === "text" ) ;
45+
46+ const userQuery = textPart ?. text || "" ;
47+
48+ const result = streamText ( {
49+ model,
50+ tools,
51+ system : systemPrompt ,
52+ messages : convertToModelMessages ( reqJson . messages , {
53+ ignoreIncompleteToolCalls : true ,
54+ } ) ,
55+ stopWhen : stepCountIs ( 10 ) ,
56+ onFinish : async ( { text, toolCalls, steps } ) => {
57+ const duration = Date . now ( ) - startTime ;
58+
59+ const allParts = steps . map ( ( step , index ) => {
60+ const stepInfo : Record < string , unknown > = {
61+ step : index + 1 ,
62+ type : step . toolCalls ?. length ? "tool_call" : "text" ,
63+ } ;
64+
65+ if ( step . text ) {
66+ stepInfo . text = step . text ;
67+ }
68+
69+ if ( step . toolCalls ?. length ) {
70+ stepInfo . tools = step . toolCalls . map ( ( tc ) => ( {
71+ name : tc . toolName ,
72+ args : "args" in tc ? tc . args : undefined ,
73+ } ) ) ;
74+ }
75+
76+ if ( step . toolResults ?. length ) {
77+ stepInfo . tool_results = step . toolResults . map ( ( tr ) => {
78+ const result = "result" in tr ? tr . result : undefined ;
79+ return {
80+ name : tr . toolName ,
81+ result :
82+ typeof result === "string"
83+ ? result . substring ( 0 , 200 )
84+ : result ,
85+ } ;
86+ } ) ;
87+ }
88+
89+ return stepInfo ;
90+ } ) ;
91+
92+ trackEvent (
93+ "a:docs:ai:search" ,
94+ {
95+ query : userQuery ,
96+ response : text ,
97+ all_parts : JSON . stringify ( allParts ) ,
98+ model : modelId ,
99+ provider,
100+ duration_ms : duration ,
101+ tool_calls : toolCalls ?. length || 0 ,
102+ } ,
103+ clientId
104+ ) ;
105+ } ,
106+ } ) ;
107+
108+ return result . toUIMessageStreamResponse ( ) ;
88109 } catch ( error ) {
89- console . error ( ' Error in chat API:' , error ) ;
90- return new Response ( ' Internal Server Error' , { status : 500 } ) ;
110+ console . error ( " Error in chat API:" , error ) ;
111+ return new Response ( " Internal Server Error" , { status : 500 } ) ;
91112 }
92- }
113+ }
0 commit comments