|
| 1 | +import { setupWorkspace, shouldRunIntegrationTests } from "./setup"; |
| 2 | +import { |
| 3 | + sendMessageWithModel, |
| 4 | + createEventCollector, |
| 5 | + assertStreamSuccess, |
| 6 | + modelString, |
| 7 | +} from "./helpers"; |
| 8 | + |
| 9 | +// Skip all tests if TEST_INTEGRATION is not set |
| 10 | +const describeIntegration = shouldRunIntegrationTests() ? describe : describe.skip; |
| 11 | + |
| 12 | +// Ollama doesn't require API keys - it's a local service |
| 13 | +// Tests require Ollama to be running with the gpt-oss:20b model installed |
| 14 | + |
| 15 | +describeIntegration("IpcMain Ollama integration tests", () => { |
| 16 | + // Enable retries in CI for potential network flakiness with Ollama |
| 17 | + if (process.env.CI && typeof jest !== "undefined" && jest.retryTimes) { |
| 18 | + jest.retryTimes(3, { logErrorsBeforeRetry: true }); |
| 19 | + } |
| 20 | + |
| 21 | + // Load tokenizer modules once before all tests (takes ~14s) |
| 22 | + // This ensures accurate token counts for API calls without timing out individual tests |
| 23 | + beforeAll(async () => { |
| 24 | + const { loadTokenizerModules } = await import("../../src/utils/main/tokenizer"); |
| 25 | + await loadTokenizerModules(); |
| 26 | + }, 30000); // 30s timeout for tokenizer loading |
| 27 | + |
| 28 | + test.concurrent( |
| 29 | + "should successfully send message to Ollama and receive response", |
| 30 | + async () => { |
| 31 | + // Setup test environment |
| 32 | + const { env, workspaceId, cleanup } = await setupWorkspace("ollama"); |
| 33 | + try { |
| 34 | + // Send a simple message to verify basic connectivity |
| 35 | + const result = await sendMessageWithModel( |
| 36 | + env.mockIpcRenderer, |
| 37 | + workspaceId, |
| 38 | + "Say 'hello' and nothing else", |
| 39 | + "ollama", |
| 40 | + "gpt-oss:20b" |
| 41 | + ); |
| 42 | + |
| 43 | + // Verify the IPC call succeeded |
| 44 | + expect(result.success).toBe(true); |
| 45 | + |
| 46 | + // Collect and verify stream events |
| 47 | + const collector = createEventCollector(env.sentEvents, workspaceId); |
| 48 | + const streamEnd = await collector.waitForEvent("stream-end", 30000); |
| 49 | + |
| 50 | + expect(streamEnd).toBeDefined(); |
| 51 | + assertStreamSuccess(collector); |
| 52 | + |
| 53 | + // Verify we received deltas |
| 54 | + const deltas = collector.getDeltas(); |
| 55 | + expect(deltas.length).toBeGreaterThan(0); |
| 56 | + |
| 57 | + // Verify the response contains expected content |
| 58 | + const text = deltas.join("").toLowerCase(); |
| 59 | + expect(text).toMatch(/hello/i); |
| 60 | + } finally { |
| 61 | + await cleanup(); |
| 62 | + } |
| 63 | + }, |
| 64 | + 45000 // Ollama can be slower than cloud APIs, especially first run |
| 65 | + ); |
| 66 | + |
| 67 | + test.concurrent( |
| 68 | + "should successfully call tools with Ollama", |
| 69 | + async () => { |
| 70 | + const { env, workspaceId, cleanup } = await setupWorkspace("ollama"); |
| 71 | + try { |
| 72 | + // Ask for current time which should trigger bash tool |
| 73 | + const result = await sendMessageWithModel( |
| 74 | + env.mockIpcRenderer, |
| 75 | + workspaceId, |
| 76 | + "What is the current date and time? Use the bash tool to find out.", |
| 77 | + "ollama", |
| 78 | + "gpt-oss:20b" |
| 79 | + ); |
| 80 | + |
| 81 | + expect(result.success).toBe(true); |
| 82 | + |
| 83 | + // Wait for stream to complete |
| 84 | + const collector = createEventCollector(env.sentEvents, workspaceId); |
| 85 | + await collector.waitForEvent("stream-end", 60000); |
| 86 | + |
| 87 | + assertStreamSuccess(collector); |
| 88 | + |
| 89 | + // Verify bash tool was called via events |
| 90 | + const events = collector.getEvents(); |
| 91 | + const toolCallStarts = events.filter((e: any) => e.type === "tool-call-start"); |
| 92 | + expect(toolCallStarts.length).toBeGreaterThan(0); |
| 93 | + |
| 94 | + const bashCall = toolCallStarts.find((e: any) => e.toolName === "bash"); |
| 95 | + expect(bashCall).toBeDefined(); |
| 96 | + |
| 97 | + // Verify we got a text response with date/time info |
| 98 | + const deltas = collector.getDeltas(); |
| 99 | + const responseText = deltas.join("").toLowerCase(); |
| 100 | + |
| 101 | + // Should mention time or date in response |
| 102 | + expect(responseText).toMatch(/time|date|am|pm|2024|2025/i); |
| 103 | + } finally { |
| 104 | + await cleanup(); |
| 105 | + } |
| 106 | + }, |
| 107 | + 90000 // Tool calling can take longer |
| 108 | + ); |
| 109 | + |
| 110 | + test.concurrent( |
| 111 | + "should handle file operations with Ollama", |
| 112 | + async () => { |
| 113 | + const { env, workspaceId, cleanup } = await setupWorkspace("ollama"); |
| 114 | + try { |
| 115 | + // Ask to read a file that should exist |
| 116 | + const result = await sendMessageWithModel( |
| 117 | + env.mockIpcRenderer, |
| 118 | + workspaceId, |
| 119 | + "Read the package.json file and tell me the project name.", |
| 120 | + "ollama", |
| 121 | + "gpt-oss:20b" |
| 122 | + ); |
| 123 | + |
| 124 | + expect(result.success).toBe(true); |
| 125 | + |
| 126 | + // Wait for stream to complete |
| 127 | + const collector = createEventCollector(env.sentEvents, workspaceId); |
| 128 | + await collector.waitForEvent("stream-end", 60000); |
| 129 | + |
| 130 | + assertStreamSuccess(collector); |
| 131 | + |
| 132 | + // Verify file_read tool was called via events |
| 133 | + const events = collector.getEvents(); |
| 134 | + const toolCallStarts = events.filter((e: any) => e.type === "tool-call-start"); |
| 135 | + expect(toolCallStarts.length).toBeGreaterThan(0); |
| 136 | + |
| 137 | + const fileReadCall = toolCallStarts.find((e: any) => e.toolName === "file_read"); |
| 138 | + expect(fileReadCall).toBeDefined(); |
| 139 | + |
| 140 | + // Verify response mentions the project (cmux) |
| 141 | + const deltas = collector.getDeltas(); |
| 142 | + const responseText = deltas.join("").toLowerCase(); |
| 143 | + |
| 144 | + expect(responseText).toMatch(/cmux/i); |
| 145 | + } finally { |
| 146 | + await cleanup(); |
| 147 | + } |
| 148 | + }, |
| 149 | + 90000 // File operations with reasoning |
| 150 | + ); |
| 151 | + |
| 152 | + test.concurrent( |
| 153 | + "should handle errors gracefully when Ollama is not running", |
| 154 | + async () => { |
| 155 | + const { env, workspaceId, cleanup } = await setupWorkspace("ollama"); |
| 156 | + try { |
| 157 | + // Override baseUrl to point to non-existent server |
| 158 | + const result = await sendMessageWithModel( |
| 159 | + env.mockIpcRenderer, |
| 160 | + workspaceId, |
| 161 | + "This should fail", |
| 162 | + "ollama", |
| 163 | + "gpt-oss:20b", |
| 164 | + { |
| 165 | + providerOptions: { |
| 166 | + ollama: {}, |
| 167 | + }, |
| 168 | + } |
| 169 | + ); |
| 170 | + |
| 171 | + // If Ollama is running, test will pass |
| 172 | + // If not running, we should get an error |
| 173 | + if (!result.success) { |
| 174 | + expect(result.error).toBeDefined(); |
| 175 | + } else { |
| 176 | + // If it succeeds, that's fine - Ollama is running |
| 177 | + const collector = createEventCollector(env.sentEvents, workspaceId); |
| 178 | + await collector.waitForEvent("stream-end", 30000); |
| 179 | + } |
| 180 | + } finally { |
| 181 | + await cleanup(); |
| 182 | + } |
| 183 | + }, |
| 184 | + 45000 |
| 185 | + ); |
| 186 | +}); |
0 commit comments