Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion app/actions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,8 @@ async function submit(formData?: FormData, skip?: boolean) {
)

const groupeId = nanoid()
const useSpecificAPI = process.env.USE_SPECIFIC_API_FOR_WRITER === 'true'
const { model, behavior } = getModel()
const useSpecificAPI = behavior === 'aggressive'
const maxMessages = useSpecificAPI ? 5 : 10
messages.splice(0, Math.max(messages.length - maxMessages, 0))

Expand Down
451 changes: 436 additions & 15 deletions bun.lock

Large diffs are not rendered by default.

7 changes: 7 additions & 0 deletions jest.config.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
transform: {
'^.+\\.tsx?$': 'ts-jest',
},
};
3 changes: 2 additions & 1 deletion lib/agents/inquire.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@ export async function inquire(
);

let finalInquiry: PartialInquiry = {};
const { model } = getModel();
const result = await streamObject({
model: getModel() as LanguageModel,
model: model as LanguageModel,
system: `...`, // Your system prompt remains unchanged
messages,
schema: inquirySchema,
Expand Down
3 changes: 2 additions & 1 deletion lib/agents/query-suggestor.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ export async function querySuggestor(
)

let finalRelatedQueries: PartialRelated = {}
const { model } = getModel()
const result = await streamObject({
model: getModel() as LanguageModel,
model: model as LanguageModel,
system: `As a professional web researcher, your task is to generate a set of three queries that explore the subject matter more deeply, building upon the initial query and the information uncovered in its search results.

For instance, if the original query was "Starship's third test flight key milestones", your output should follow this format:
Expand Down
3 changes: 2 additions & 1 deletion lib/agents/researcher.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,9 @@ Analysis & Planning

const systemToUse = dynamicSystemPrompt && dynamicSystemPrompt.trim() !== '' ? dynamicSystemPrompt : default_system_prompt;

const { model } = getModel();
const result = await nonexperimental_streamText({
model: getModel() as LanguageModel,
model: model as LanguageModel,
maxTokens: 2500,
system: systemToUse, // Use the dynamic or default system prompt
messages,
Expand Down
3 changes: 2 additions & 1 deletion lib/agents/resolution-search.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,9 @@ Analyze the user's prompt and the image to provide a holistic understanding of t
const filteredMessages = messages.filter(msg => msg.role !== 'system');

// Use generateObject to get the full object at once.
const { model } = getModel();
const { object } = await generateObject({
model: getModel(),
model: model,
system: systemPrompt,
messages: filteredMessages,
schema: resolutionSearchSchema,
Expand Down
36 changes: 18 additions & 18 deletions lib/agents/task-manager.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -15,25 +15,25 @@ export async function taskManager(messages: CoreMessage[]) {
}
}

const { model, behavior } = getModel()
const systemPrompt =
behavior === 'aggressive'
? `As a planet computer, you are an aggressive and proactive AI assistant. Your primary objective is to fully comprehend the user's query, conduct thorough web searches, and use Geospatial tools to gather all necessary information to provide a comprehensive response. You should actively seek out opportunities to use your tools to enhance the user's experience.

- "proceed": If the user's query is clear and you are confident you can provide a complete answer with your tools, choose this option.
- "inquire": If the user's query is ambiguous or lacks detail, present a form to the user to gather the required information.

Your default behavior should be to "proceed" unless the query is exceptionally vague.`
: `As a planet computer, you are a conservative and cautious AI assistant. Your primary objective is to fully comprehend the user's query and provide an accurate response. You should only use your tools when absolutely necessary to answer the user's question.

- "proceed": If the user's query can be answered directly with your existing knowledge, choose this option.
- "inquire": If you require additional information to answer the query, present a form to the user.

Your default behavior should be to "inquire" unless the query is exceptionally clear and specific.`

const result = await generateObject({
model: getModel() as LanguageModel,
system: `As a planet computer, your primary objective is to fully comprehend the user's query, conduct thorough web searches and use Geospatial tools to gather preview the necessary information, and provide an appropriate response.
To achieve this, you must first analyze the user's input and determine the optimal course of action. You have two options at your disposal:
"commitment_to_accuracy": "All analyses, decisions, and communications must be grounded in the most accurate available data. Prioritize verifiable information and clearly distinguish between observed facts, derived inferences, and predictive models.",
"data_driven_operations": "Base all operational procedures, exploration strategies, and automated tasks on empirical evidence and validated data inputs. Assumptions made due to incomplete data must be explicitly stated.",
"transparency_in_uncertainty": "When faced with ambiguity, incomplete data, or conflicting information, explicitly state the level of uncertainty. Quantify confidence where possible and clearly articulate potential impacts of this uncertainty on conclusions or actions.",
"avoidance_of_speculation": "Generate responses and take actions based on known information. Do not invent, fabricate, or present unsubstantiated claims as facts. If information is unavailable, state so clearly.",
"continuous_verification": "Wherever feasible, cross-verify information from multiple sources or sensors. Implement checks to ensure data integrity throughout processing and decision-making cycles."
1. "proceed": If the provided information is sufficient to address the query effectively, choose this option to proceed with the research and formulate a response.
2. "inquire": If you believe that additional information from the user would enhance your ability to provide a comprehensive response, select this option. You may present a form to the user, offering default selections or free-form input fields, to gather the required details.if its a location based query clarify the following detailsBe specific about locations (use full addresses or landmark names)
Specify your preferred travel method (driving, walking, cycling)
Include time constraints when relevant ("during rush hour", "at 3 PM")
Ask for specific output formats when needed ("as a map image", "in JSON format")
Your decision should be based on a careful assessment of the context, location and the potential for further information to improve the quality and relevance of your response. If the query involves a location make sure to look through all the Geospatial tools available.
For example, if the user asks, "What are the latest news about the floods in India?", you may choose to "proceed" as the query is clear and can be answered effectively with web research alone.
However, if the user asks, "What's the warmest temperature in my area?", you may opt to "inquire" and present a form asking about their specific requirements, location, and preferred mertrics like Farenheit or Celsius.
Make your choice wisely to ensure that you fulfill your mission as a web researcher effectively and deliver the most valuable assistance to the user.
`,
model: model as LanguageModel,
system: systemPrompt,
messages,
schema: nextActionSchema
})
Expand Down
3 changes: 2 additions & 1 deletion lib/agents/writer.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@ export async function writer(

const systemToUse = dynamicSystemPrompt && dynamicSystemPrompt.trim() !== '' ? dynamicSystemPrompt : default_system_prompt;

const { model } = getModel();
const result = await nonexperimental_streamText({
model: getModel() as LanguageModel,
model: model as LanguageModel,
maxTokens: 2500,
system: systemToUse, // Use the dynamic or default system prompt
messages
Expand Down
16 changes: 9 additions & 7 deletions lib/utils/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,22 +22,24 @@ export function getModel() {
const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY
const awsRegion = process.env.AWS_REGION
const bedrockModelId = ''
let modelBehavior = process.env.MODEL_BEHAVIOR || 'conservative'
if (modelBehavior !== 'aggressive' && modelBehavior !== 'conservative') {
modelBehavior = 'conservative'
}

if (xaiApiKey) {
const xai = createXai({
apiKey: xaiApiKey,
baseURL: 'https://api.x.ai/v1',
})
// Optionally, add a check for credit status or skip xAI if credits are exhausted
try {
return xai('grok-4-fast-non-reasoning')
const model = xai('grok-4-fast-non-reasoning')
return { model, behavior: modelBehavior }
} catch (error) {
console.warn('xAI API unavailable, falling back to OpenAI:')
}
}

// AWS Bedrock

if (awsAccessKeyId && awsSecretAccessKey) {
const bedrock = createAmazonBedrock({
bedrockOptions: {
Expand All @@ -51,12 +53,12 @@ export function getModel() {
const model = bedrock(bedrockModelId, {
additionalModelRequestFields: { top_k: 350 },
})
return model
return { model, behavior: modelBehavior }
}

// Default fallback (OpenAI)
const openai = createOpenAI({
apiKey: process.env.OPENAI_API_KEY,
})
return openai('gpt-4o')
const model = openai('gpt-4o')
return { model, behavior: modelBehavior }
}
3 changes: 3 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"@supabase/supabase-js": "^2.0.0",
"@tailwindcss/typography": "^0.5.16",
"@turf/turf": "^7.2.0",
"@types/jest": "^30.0.0",
"@types/mapbox__mapbox-gl-draw": "^1.4.8",
"@types/pg": "^8.15.4",
"@upstash/redis": "^1.35.0",
Expand All @@ -58,6 +59,7 @@
"exa-js": "^1.6.13",
"framer-motion": "^12.23.24",
"glassmorphic": "^0.0.3",
"jest": "^30.2.0",
"katex": "^0.16.22",
"lottie-react": "^2.4.1",
"lucide-react": "^0.507.0",
Expand All @@ -82,6 +84,7 @@
"sonner": "^1.7.4",
"tailwind-merge": "^2.6.0",
"tailwindcss-animate": "^1.0.7",
"ts-jest": "^29.4.5",
"use-mcp": "^0.0.9",
"uuid": "^9.0.0",
"zod": "^3.23.8"
Expand Down
37 changes: 37 additions & 0 deletions tests/model-behavior.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import { getModel } from '../lib/utils';

describe('getModel', () => {
const originalEnv = process.env;

beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});

afterAll(() => {
process.env = originalEnv;
});

it('should return the conservative model by default', () => {
const { behavior } = getModel();
expect(behavior).toBe('conservative');
});

it('should return the aggressive model when MODEL_BEHAVIOR is set to "aggressive"', () => {
process.env.MODEL_BEHAVIOR = 'aggressive';
const { behavior } = getModel();
expect(behavior).toBe('aggressive');
});

it('should return the conservative model when MODEL_BEHAVIOR is set to "conservative"', () => {
process.env.MODEL_BEHAVIOR = 'conservative';
const { behavior } = getModel();
expect(behavior).toBe('conservative');
});

it('should return the conservative model when MODEL_BEHAVIOR is set to an invalid value', () => {
process.env.MODEL_BEHAVIOR = 'invalid';
const { behavior } = getModel();
expect(behavior).toBe('conservative');
});
});