@@ -197,8 +197,153 @@ async function runRobustAgentWorkflow(userMessage: any, codebolt: CodeboltAPI) {
197197 }
198198}
199199
200+ /**
201+ * Example using the new addLLMResponse method in PromptBuilder
202+ * This demonstrates the simplified workflow with conversation management
203+ */
204+ async function runWorkflowWithPromptBuilder ( userMessage : any , codebolt : CodeboltAPI ) {
205+ try {
206+ // Step 1: Build the initial prompt
207+ const promptBuilder = new PromptBuilder ( userMessage , codebolt ) ;
208+ let currentPrompt = await promptBuilder
209+ . addMCPTools ( )
210+ . addAgentTools ( )
211+ . addEnvironmentDetails ( )
212+ . addSystemPrompt ( 'agent.yaml' , 'test' , 'example.md' )
213+ . addTaskInstruction ( 'task.yaml' , 'main_task' )
214+ . buildInferenceParams ( ) ;
215+
216+ let maxTurns = 20 ;
217+ let turn = 0 ;
218+
219+ // Step 2: Main conversation loop using PromptBuilder for conversation management
220+ while ( ! promptBuilder . isTaskCompleted ( ) && turn < maxTurns ) {
221+ console . log ( `\n--- Conversation Turn ${ turn + 1 } ---` ) ;
222+
223+ // Get LLM response
224+ const llmResponse = llm . inference ( currentPrompt ) ;
225+
226+ // Add LLM response to conversation history
227+ await promptBuilder . addLLMResponse ( llmResponse ) ;
228+
229+ // Process the response using LLMOutputHandler
230+ const outputHandler = new LLMOutputHandler ( llmResponse , codebolt ) ;
231+ await outputHandler . sendMessageToUser ( ) ;
232+
233+ // Check if task is completed (using both methods for reliability)
234+ if ( outputHandler . isCompleted ( ) || promptBuilder . isTaskCompleted ( ) ) {
235+ console . log ( "Task completed successfully!" ) ;
236+ break ;
237+ }
238+
239+ // Execute tools and get results
240+ const toolResults = await outputHandler . runTools ( ) ;
241+
242+ // Add tool results to conversation
243+ if ( toolResults . length > 0 ) {
244+ promptBuilder . addToolResults ( toolResults ) ;
245+ } else {
246+ // Add default continuation message when no tools executed
247+ promptBuilder . addDefaultContinuationMessage ( ) ;
248+ }
249+
250+ // Check if conversation should be summarized
251+ if ( promptBuilder . shouldSummarizeConversation ( 30 ) ) {
252+ console . log ( "Conversation getting long, consider summarization..." ) ;
253+ // Note: Actual summarization would require additional implementation
254+ }
255+
256+ // Build next prompt for the loop
257+ currentPrompt = await promptBuilder . buildInferenceParams ( ) ;
258+ turn ++ ;
259+ }
260+
261+ if ( turn >= maxTurns ) {
262+ console . log ( "Maximum conversation turns reached. Stopping." ) ;
263+ }
264+
265+ console . log ( `Workflow completed in ${ turn } turns` ) ;
266+ console . log ( `Final conversation length: ${ promptBuilder . getConversationLength ( ) } messages` ) ;
267+
268+ } catch ( error ) {
269+ console . error ( "Error in PromptBuilder workflow:" , error ) ;
270+ throw error ;
271+ }
272+ }
273+
274+ /**
275+ * Example matching the user's agent code structure with the new addLLMResponse method
276+ * This shows how to update the existing agent to use PromptBuilder for conversation management
277+ */
278+ async function runUpdatedAgentWorkflow ( userMessage : any , codebolt : CodeboltAPI ) {
279+ try {
280+ // Step 1: Build the initial prompt
281+ const promptBuilderObject = new PromptBuilder ( userMessage , codebolt ) ;
282+ let userPrompt = await promptBuilderObject
283+ . addMCPTools ( )
284+ . addAgentTools ( )
285+ . addEnvironmentDetails ( )
286+ . addSystemPrompt ( 'agent.yaml' , 'test' , 'example.md' )
287+ . addTaskInstruction ( 'task.yaml' , 'main_task' )
288+ . buildInferenceParams ( ) ;
289+
290+ // Step 2: Get initial LLM response
291+ let llmOutput = llm . inference ( userPrompt ) ;
292+
293+ // Add the initial LLM response to conversation history
294+ await promptBuilderObject . addLLMResponse ( llmOutput ) ;
295+
296+ let llmOutputObject = new LLMOutputHandler ( llmOutput , codebolt ) ;
297+
298+ // Step 3: Main conversation loop with improved conversation management
299+ while ( ! llmOutputObject . isCompleted ( ) && ! promptBuilderObject . isTaskCompleted ( ) ) {
300+ // Send the assistant's message to the user
301+ await llmOutputObject . sendMessageToUser ( ) ;
302+
303+ // Execute any tool calls in the response
304+ const toolCallResult = await llmOutputObject . runTools ( ) ;
305+
306+ // Add tool results to conversation history
307+ if ( toolCallResult . length > 0 ) {
308+ promptBuilderObject . addToolResults ( toolCallResult ) ;
309+ } else {
310+ // Add default continuation message when no tools executed
311+ promptBuilderObject . addDefaultContinuationMessage ( ) ;
312+ }
313+
314+ // Check if conversation should be summarized
315+ if ( promptBuilderObject . shouldSummarizeConversation ( 30 ) ) {
316+ console . log ( "Conversation getting long, consider summarization..." ) ;
317+ }
318+
319+ // Step 4: Build next prompt using PromptBuilder's conversation history
320+ const nextUserPrompt = await promptBuilderObject . buildInferenceParams ( ) ;
321+
322+ // Step 5: Get next LLM response
323+ llmOutput = llm . inference ( nextUserPrompt ) ;
324+
325+ // Add the new LLM response to conversation history
326+ await promptBuilderObject . addLLMResponse ( llmOutput ) ;
327+
328+ llmOutputObject = new LLMOutputHandler ( llmOutput , codebolt ) ;
329+
330+ // Update userPrompt for next iteration
331+ userPrompt = nextUserPrompt ;
332+ }
333+
334+ console . log ( "Agent workflow completed successfully!" ) ;
335+ console . log ( `Final conversation length: ${ promptBuilderObject . getConversationLength ( ) } messages` ) ;
336+
337+ } catch ( error ) {
338+ console . error ( "Error in agent workflow:" , error ) ;
339+ throw error ;
340+ }
341+ }
342+
200343export {
201344 runAgentWorkflow ,
202345 runDetailedAgentWorkflow ,
203- runRobustAgentWorkflow
346+ runRobustAgentWorkflow ,
347+ runWorkflowWithPromptBuilder ,
348+ runUpdatedAgentWorkflow
204349} ;
0 commit comments