Skip to content

Commit 8930ec7

Browse files
llm output handler and followupquestion added
1 parent 8d83287 commit 8930ec7

File tree

9 files changed

+1209
-44
lines changed

9 files changed

+1209
-44
lines changed

examples/agent-workflow-example.ts

Lines changed: 204 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,204 @@
1+
import { PromptBuilder, LLMOutputHandler, FollowUpQuestionBuilder } from '../src/agentlib/promptbuilder';
2+
import llm from '../src/modules/llm';
3+
import type { CodeboltAPI } from '../src/types/libFunctionTypes';
4+
5+
/**
6+
* Example demonstrating the agent workflow using the new classes
7+
*/
8+
async function runAgentWorkflow(userMessage: any, codebolt: CodeboltAPI) {
9+
try {
10+
// Step 1: Build the initial prompt
11+
const promptBuilderObject = new PromptBuilder(userMessage, codebolt);
12+
const userPrompt = await promptBuilderObject
13+
.addMCPTools()
14+
.addAgentTools()
15+
.addEnvironmentDetails()
16+
.addSystemPrompt('agent.yaml', 'test', 'example.md')
17+
.addTaskInstruction('task.yaml', 'main_task')
18+
.buildInferenceParams(); // Use buildInferenceParams for LLM inference
19+
20+
// Step 2: Get initial LLM response
21+
let llmOutput = llm.inference(userPrompt);
22+
let llmOutputObject = new LLMOutputHandler(llmOutput, codebolt);
23+
24+
// Step 3: Main conversation loop
25+
while (!llmOutputObject.isCompleted()) {
26+
// Send the assistant's message to the user
27+
await llmOutputObject.sendMessageToUser();
28+
29+
// Execute any tool calls in the response
30+
const toolCallResult = await llmOutputObject.runTools();
31+
32+
// Step 4: Build follow-up prompt with tool results
33+
const followUpQuestionObject = new FollowUpQuestionBuilder(codebolt);
34+
const nextUserPrompt = await followUpQuestionObject
35+
.addPreviousConversation(userPrompt)
36+
.addToolResult(toolCallResult)
37+
.checkAndSummarizeConversationIfLong()
38+
.buildInferenceParams(); // Use buildInferenceParams for LLM inference
39+
40+
// Step 5: Get next LLM response
41+
llmOutput = llm.inference(nextUserPrompt);
42+
llmOutputObject = new LLMOutputHandler(llmOutput, codebolt);
43+
44+
// Update userPrompt for next iteration
45+
userPrompt.messages = nextUserPrompt.messages;
46+
}
47+
48+
console.log("Agent workflow completed successfully!");
49+
50+
} catch (error) {
51+
console.error("Error in agent workflow:", error);
52+
throw error;
53+
}
54+
}
55+
56+
/**
57+
* Alternative example with more detailed control
58+
*/
59+
async function runDetailedAgentWorkflow(userMessage: any, codebolt: CodeboltAPI) {
60+
try {
61+
// Step 1: Build the initial prompt with more control
62+
const promptBuilder = new PromptBuilder(userMessage, codebolt);
63+
64+
// Add components step by step
65+
promptBuilder
66+
.addMCPTools(['codebolt', 'filesystem', 'browser'])
67+
.addAgentTools()
68+
.addEnvironmentDetails()
69+
.addSystemPrompt('agent.yaml', 'test')
70+
.addTaskInstruction('task.yaml', 'main_task')
71+
.addCustomSection('Additional Context', 'This is a complex task requiring multiple steps');
72+
73+
const initialPrompt = await promptBuilder.buildInferenceParams();
74+
75+
// Step 2: Start the conversation
76+
let currentPrompt = initialPrompt;
77+
let conversationTurn = 0;
78+
const maxTurns = 20; // Prevent infinite loops
79+
80+
while (conversationTurn < maxTurns) {
81+
console.log(`\n--- Conversation Turn ${conversationTurn + 1} ---`);
82+
83+
// Get LLM response
84+
const llmResponse = llm.inference(currentPrompt);
85+
const outputHandler = new LLMOutputHandler(llmResponse, codebolt);
86+
87+
// Send message to user
88+
await outputHandler.sendMessageToUser();
89+
90+
// Check if completed
91+
if (outputHandler.isCompleted()) {
92+
console.log("Task completed successfully!");
93+
break;
94+
}
95+
96+
// Execute tools
97+
const toolResults = await outputHandler.runTools();
98+
99+
// Build next prompt
100+
const followUpBuilder = new FollowUpQuestionBuilder(codebolt);
101+
followUpBuilder
102+
.addPreviousConversation(currentPrompt)
103+
.addToolResult(toolResults)
104+
.setMaxConversationLength(30); // Summarize after 30 messages
105+
106+
// Check if we need to summarize
107+
if (followUpBuilder.shouldSummarize()) {
108+
console.log("Summarizing conversation due to length...");
109+
followUpBuilder.checkAndSummarizeConversationIfLong();
110+
}
111+
112+
currentPrompt = await followUpBuilder.buildInferenceParams();
113+
conversationTurn++;
114+
}
115+
116+
if (conversationTurn >= maxTurns) {
117+
console.log("Maximum conversation turns reached. Stopping.");
118+
}
119+
120+
} catch (error) {
121+
console.error("Error in detailed agent workflow:", error);
122+
throw error;
123+
}
124+
}
125+
126+
/**
127+
* Example with error handling and recovery
128+
*/
129+
async function runRobustAgentWorkflow(userMessage: any, codebolt: CodeboltAPI) {
130+
const maxRetries = 3;
131+
let retryCount = 0;
132+
133+
while (retryCount < maxRetries) {
134+
try {
135+
// Build initial prompt
136+
const promptBuilder = new PromptBuilder(userMessage, codebolt);
137+
const userPrompt = await promptBuilder
138+
.addAllAutomatic() // Add all tools and environment automatically
139+
.addSystemPrompt('agent.yaml', 'robust_agent')
140+
.buildInferenceParams();
141+
142+
let currentPrompt = userPrompt;
143+
let conversationActive = true;
144+
145+
while (conversationActive) {
146+
try {
147+
// Get LLM response with timeout handling
148+
const llmResponse = llm.inference(currentPrompt);
149+
const outputHandler = new LLMOutputHandler(llmResponse, codebolt);
150+
151+
// Process response
152+
await outputHandler.sendMessageToUser();
153+
154+
if (outputHandler.isCompleted()) {
155+
conversationActive = false;
156+
console.log("Workflow completed successfully!");
157+
return; // Success - exit all retry loops
158+
}
159+
160+
// Execute tools with error handling
161+
const toolResults = await outputHandler.runTools();
162+
163+
// Build next prompt
164+
const followUpBuilder = new FollowUpQuestionBuilder(codebolt);
165+
currentPrompt = await followUpBuilder
166+
.addPreviousConversation(currentPrompt)
167+
.addToolResult(toolResults)
168+
.checkAndSummarizeConversationIfLong()
169+
.buildInferenceParams();
170+
171+
} catch (stepError) {
172+
console.error("Error in conversation step:", stepError);
173+
174+
// Add error recovery message
175+
const followUpBuilder = new FollowUpQuestionBuilder(codebolt);
176+
currentPrompt = await followUpBuilder
177+
.addPreviousConversation(currentPrompt)
178+
.addUserMessage(`An error occurred: ${stepError}. Please try a different approach.`)
179+
.buildInferenceParams();
180+
}
181+
}
182+
183+
return; // Success
184+
185+
} catch (error) {
186+
retryCount++;
187+
console.error(`Workflow attempt ${retryCount} failed:`, error);
188+
189+
if (retryCount >= maxRetries) {
190+
console.error("Maximum retries reached. Workflow failed.");
191+
throw error;
192+
}
193+
194+
console.log(`Retrying... (${retryCount}/${maxRetries})`);
195+
await new Promise(resolve => setTimeout(resolve, 1000 * retryCount)); // Exponential backoff
196+
}
197+
}
198+
}
199+
200+
export {
201+
runAgentWorkflow,
202+
runDetailedAgentWorkflow,
203+
runRobustAgentWorkflow
204+
};

src/agentlib/README.md

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
# Agent Library
2+
3+
This library provides a set of classes for building and managing AI agent workflows with tool execution and conversation management.
4+
5+
## Classes
6+
7+
### PromptBuilder
8+
9+
The `PromptBuilder` class helps construct complex prompts with tools, environment details, and system instructions.
10+
11+
```typescript
12+
import { PromptBuilder } from './promptbuilder';
13+
14+
const promptBuilder = new PromptBuilder(userMessage, codebolt);
15+
const prompt = await promptBuilder
16+
.addMCPTools()
17+
.addAgentTools()
18+
.addEnvironmentDetails()
19+
.addSystemPrompt('agent.yaml', 'test', 'example.md')
20+
.addTaskInstruction('task.yaml', 'main_task')
21+
.buildInferenceParams();
22+
```
23+
24+
### LLMOutputHandler
25+
26+
The `LLMOutputHandler` class processes LLM responses, executes tool calls, and manages completion detection.
27+
28+
```typescript
29+
import { LLMOutputHandler } from './llmoutputhandler';
30+
31+
const llmOutput = llm.inference(prompt);
32+
const outputHandler = new LLMOutputHandler(llmOutput, codebolt);
33+
34+
// Send message to user
35+
await outputHandler.sendMessageToUser();
36+
37+
// Execute tools
38+
const toolResults = await outputHandler.runTools();
39+
40+
// Check if completed
41+
if (outputHandler.isCompleted()) {
42+
console.log("Task completed!");
43+
}
44+
```
45+
46+
### FollowUpQuestionBuilder
47+
48+
The `FollowUpQuestionBuilder` class manages conversation continuation and summarization.
49+
50+
```typescript
51+
import { FollowUpQuestionBuilder } from './followupquestionbuilder';
52+
53+
const followUpBuilder = new FollowUpQuestionBuilder(codebolt);
54+
const nextPrompt = await followUpBuilder
55+
.addPreviousConversation(previousPrompt)
56+
.addToolResult(toolResults)
57+
.checkAndSummarizeConversationIfLong()
58+
.buildInferenceParams();
59+
```
60+
61+
## Complete Workflow Example
62+
63+
```typescript
64+
import { PromptBuilder, LLMOutputHandler, FollowUpQuestionBuilder } from './promptbuilder';
65+
import llm from '../modules/llm';
66+
67+
async function runAgentWorkflow(userMessage: any, codebolt: CodeboltAPI) {
68+
// Step 1: Build initial prompt
69+
const promptBuilder = new PromptBuilder(userMessage, codebolt);
70+
let userPrompt = await promptBuilder
71+
.addMCPTools()
72+
.addAgentTools()
73+
.addEnvironmentDetails()
74+
.addSystemPrompt('agent.yaml', 'test', 'example.md')
75+
.addTaskInstruction('task.yaml', 'main_task')
76+
.buildInferenceParams();
77+
78+
// Step 2: Main conversation loop
79+
let llmOutput = llm.inference(userPrompt);
80+
let llmOutputObject = new LLMOutputHandler(llmOutput, codebolt);
81+
82+
while (!llmOutputObject.isCompleted()) {
83+
// Send message to user
84+
await llmOutputObject.sendMessageToUser();
85+
86+
// Execute tools
87+
const toolCallResult = await llmOutputObject.runTools();
88+
89+
// Build follow-up prompt
90+
const followUpBuilder = new FollowUpQuestionBuilder(codebolt);
91+
const nextUserPrompt = await followUpBuilder
92+
.addPreviousConversation(userPrompt)
93+
.addToolResult(toolCallResult)
94+
.checkAndSummarizeConversationIfLong()
95+
.buildInferenceParams();
96+
97+
// Get next response
98+
llmOutput = llm.inference(nextUserPrompt);
99+
llmOutputObject = new LLMOutputHandler(llmOutput, codebolt);
100+
101+
// Update for next iteration
102+
userPrompt = nextUserPrompt;
103+
}
104+
105+
console.log("Agent workflow completed successfully!");
106+
}
107+
```
108+
109+
## Key Features
110+
111+
- **Fluent Interface**: All builders use method chaining for easy configuration
112+
- **Automatic Tool Loading**: Automatically loads MCP tools and agent tools
113+
- **Environment Integration**: Includes file contents and project structure
114+
- **Conversation Management**: Handles conversation history and summarization
115+
- **Tool Execution**: Executes tools and manages results
116+
- **Completion Detection**: Automatically detects when tasks are complete
117+
- **Error Handling**: Robust error handling and recovery mechanisms
118+
119+
## Architecture
120+
121+
The library follows a modular design:
122+
123+
1. **PromptBuilder**: Constructs initial prompts with all necessary context
124+
2. **LLMOutputHandler**: Processes LLM responses and executes tools
125+
3. **FollowUpQuestionBuilder**: Manages conversation continuation and summarization
126+
127+
This separation allows for flexible usage patterns and easy testing of individual components.

0 commit comments

Comments
 (0)