Skip to content

Commit 6421f45

Browse files
Improve ChatMK model initialisation and loading process
1 parent d382c32 commit 6421f45

File tree

3 files changed

+189
-34
lines changed

3 files changed

+189
-34
lines changed

_layouts/post.html

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -274,16 +274,6 @@ <h5 class="block-title">Metadata</h5>
274274
<script src="/assets/js/hashArt.js"></script>
275275

276276
<!-- ChatMK Scripts -->
277-
<script type="module">
278-
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.2';
279-
280-
// Configure transformers.js to use Hugging Face CDN for model files
281-
env.remoteURL = 'https://huggingface.co/';
282-
env.allowRemoteModels = true;
283-
env.localURL = null;
284-
285-
window.transformers = { pipeline, env };
286-
</script>
287277
<script src="/assets/js/chatmkSearch.js"></script>
288278
<script type="module" src="/assets/js/chatmkAI.js"></script>
289279
<script src="/assets/js/chatmkModal.js"></script>

assets/js/chatmkModal.js

Lines changed: 34 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -57,10 +57,32 @@ function openChatMKModal() {
5757
window.chatMKSearch = new ChatMKSearch();
5858
}
5959

60-
// Initialize AI model when modal is first opened
61-
if (window.chatMKAI && !window.chatMKAI.isLoading && !window.chatMKAI.isLoaded) {
62-
console.log('ChatMK: Starting AI model initialization...');
63-
window.chatMKAI.initialize();
60+
// Load models in the right order: embedding first, then AI
61+
initializeChatMKModels();
62+
}
63+
64+
/**
65+
* Initialize ChatMK models in the correct order
66+
*/
67+
async function initializeChatMKModels() {
68+
console.log('ChatMK: Starting model initialization...');
69+
70+
try {
71+
// 1. Load embedding model first (smaller, faster, core functionality)
72+
if (window.chatMKSearch && !window.chatMKSearch.model) {
73+
console.log('ChatMK: Loading embedding model first...');
74+
// Pre-load the embedding model
75+
await window.chatMKSearch.preloadEmbeddingModel();
76+
}
77+
78+
// 2. Then start AI model loading (larger, enhancement)
79+
if (window.chatMKAI && !window.chatMKAI.isLoading && !window.chatMKAI.isLoaded) {
80+
console.log('ChatMK: Starting AI model initialization...');
81+
window.chatMKAI.initialize();
82+
}
83+
84+
} catch (error) {
85+
console.error('ChatMK: Error during model initialization:', error);
6486
}
6587
}
6688

@@ -175,7 +197,6 @@ async function sendChatMKMessage() {
175197
// Add user message
176198
addChatMKMessage('user', query);
177199

178-
179200
try {
180201
// Initialize ChatMK search if not already done
181202
if (!window.chatMKSearch) {
@@ -198,14 +219,16 @@ async function sendChatMKMessage() {
198219
searchResponse += `${index + 1}. **[${result.title}](${result.url})**\n`;
199220
});
200221
addChatMKMessage('assistant', searchResponse);
222+
} else {
223+
addChatMKMessage('assistant', "I couldn't find any relevant information about that topic. Try asking about data analytics, teaching, or other topics from Michal's knowledge base.");
201224
}
202225

203226
// Try to get AI response if available
204227
if (window.chatMKAI && window.chatMKAI.isReady()) {
205228
try {
206229
console.log('ChatMK: Generating AI response...');
207230

208-
// Add loading indicator
231+
// Add loading indicator for AI response
209232
addChatMKMessage('assistant', '<div class="typing-indicator"><span></span><span></span><span></span></div>');
210233
const loadingMessage = document.querySelector('.chatmk-messages .chatmk-message:last-child');
211234

@@ -216,20 +239,18 @@ async function sendChatMKMessage() {
216239
loadingMessage.remove();
217240
}
218241
addChatMKMessage('assistant', aiResponse);
219-
return;
242+
220243
} catch (aiError) {
221-
console.warn('ChatMK: AI response failed, falling back to search results:', aiError);
244+
console.warn('ChatMK: AI response failed:', aiError);
222245
// Remove loading indicator if there was an error
223246
const loadingMessage = document.querySelector('.chatmk-messages .chatmk-message:last-child');
224247
if (loadingMessage && loadingMessage.innerHTML.includes('typing-indicator')) {
225248
loadingMessage.remove();
226249
}
227250
}
228-
}
229-
230-
// Fallback: only show message if no search results were found at all
231-
if (results.length === 0) {
232-
addChatMKMessage('assistant', "I couldn't find any relevant information about that topic. Try asking about data analytics, teaching, or other topics from Michal's knowledge base.");
251+
} else {
252+
// AI model not ready yet - this is fine, user already has search results
253+
console.log('ChatMK: AI model not ready yet, showing search results only');
233254
}
234255

235256
} catch (error) {

assets/js/chatmkSearch.js

Lines changed: 155 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,48 @@ class ChatMKSearch {
88
this.brainData = null;
99
this.isLoaded = false;
1010
this.model = null;
11+
this.transformersLoaded = false;
12+
}
13+
14+
/**
15+
* Dynamically load transformers.js library
16+
*/
17+
async loadTransformersJS() {
18+
if (this.transformersLoaded && window.transformers) {
19+
return;
20+
}
21+
22+
console.log('ChatMK: Loading transformers.js library...');
23+
24+
try {
25+
// Dynamically import transformers.js
26+
const { pipeline, env } = await import('https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.2');
27+
28+
// Configure transformers.js to use Hugging Face CDN for model files and prevent local path attempts
29+
env.remoteURL = 'https://huggingface.co/';
30+
env.allowRemoteModels = true;
31+
env.localURL = null;
32+
env.allowLocalModels = false;
33+
34+
// Force disable local model loading by configuring backends
35+
env.backends = {
36+
onnx: {
37+
wasm: {
38+
wasmPaths: 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.2/dist/',
39+
}
40+
}
41+
};
42+
43+
// Make available globally
44+
window.transformers = { pipeline, env };
45+
this.transformersLoaded = true;
46+
47+
console.log('ChatMK: Transformers.js loaded and configured');
48+
49+
} catch (error) {
50+
console.error('ChatMK: Failed to load transformers.js:', error);
51+
throw error;
52+
}
1153
}
1254

1355
/**
@@ -27,32 +69,121 @@ class ChatMKSearch {
2769
}
2870
}
2971

72+
/**
73+
* Show embedding model loading progress
74+
*/
75+
showEmbeddingProgress(percent, mbLoaded, mbTotal) {
76+
const messagesContainer = document.getElementById('chatmk-messages');
77+
if (messagesContainer) {
78+
// Check if there's already a progress message and update it
79+
const existingProgress = messagesContainer.querySelector('.embedding-loading');
80+
81+
if (existingProgress) {
82+
// Update existing progress message
83+
const content = existingProgress.querySelector('.message-content');
84+
if (content) {
85+
content.innerHTML = `<p>Embedding model loading: ${percent}% (${mbLoaded}/${mbTotal}MB)</p>`;
86+
}
87+
} else {
88+
// Create new progress message
89+
const messageDiv = document.createElement('div');
90+
messageDiv.className = 'chatmk-message system embedding-loading';
91+
messageDiv.innerHTML = `
92+
<div class="message-content">
93+
<p>Embedding model loading: ${percent}% (${mbLoaded}/${mbTotal}MB)</p>
94+
</div>
95+
`;
96+
97+
messagesContainer.appendChild(messageDiv);
98+
messagesContainer.scrollTop = messagesContainer.scrollHeight;
99+
}
100+
}
101+
}
102+
103+
/**
104+
* Simulate embedding model loading progress
105+
*/
106+
async simulateEmbeddingProgress() {
107+
const totalMB = 25; // Embedding model is ~25MB
108+
109+
// Simulate progress from 0% to 100%
110+
for (let percent = 0; percent <= 100; percent += 20) {
111+
const mbLoaded = Math.round((percent / 100) * totalMB);
112+
this.showEmbeddingProgress(percent, mbLoaded, totalMB);
113+
114+
if (percent < 100) {
115+
await new Promise(resolve => setTimeout(resolve, 200));
116+
}
117+
}
118+
}
119+
120+
/**
121+
* Pre-load the embedding model when modal opens
122+
*/
123+
async preloadEmbeddingModel() {
124+
if (this.model) {
125+
console.log('ChatMK: Embedding model already loaded');
126+
return;
127+
}
128+
129+
console.log('ChatMK: Pre-loading embedding model...');
130+
131+
try {
132+
// Start progress simulation
133+
const progressPromise = this.simulateEmbeddingProgress();
134+
135+
// Load transformers.js library and model
136+
await this.loadTransformersJS();
137+
138+
// Check if transformers.js is available
139+
if (!window.transformers) {
140+
console.warn('Transformers.js not available');
141+
throw new Error('Transformers not available');
142+
}
143+
144+
// Load the embedding model
145+
this.model = await window.transformers.pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
146+
147+
// Wait for progress to complete
148+
await progressPromise;
149+
150+
console.log('ChatMK: Embedding model pre-loaded successfully');
151+
152+
// Keep showing 100% when done (like AI model)
153+
this.showEmbeddingProgress(100, 25, 25);
154+
155+
} catch (error) {
156+
console.error('ChatMK: Failed to pre-load embedding model:', error);
157+
this.hideModelLoadingMessage();
158+
// Don't throw - allow fallback to keyword search
159+
}
160+
}
161+
30162
/**
31163
* Generate embedding for a query text using transformers.js
32164
*/
33165
async generateQueryEmbedding(text) {
34166
console.log('Generating embedding for query:', text);
35167

36168
try {
169+
// First, load transformers.js library dynamically if not already loaded
170+
await this.loadTransformersJS();
171+
37172
// Check if transformers.js is available
38173
if (!window.transformers) {
39174
console.warn('Transformers.js not available, falling back to keyword search');
40175
throw new Error('Transformers not available');
41176
}
42177

43-
// Initialize the model (same as used for content embeddings)
178+
// If model not loaded, try to load it (fallback)
44179
if (!this.model) {
45180
console.log('Loading embedding model...');
46-
47-
// Configure transformers.js to use Hugging Face CDN
48-
if (window.transformers.env) {
49-
window.transformers.env.remoteURL = 'https://huggingface.co/';
50-
window.transformers.env.allowRemoteModels = true;
51-
window.transformers.env.localURL = null;
52-
}
53-
54-
this.model = await window.transformers.pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
55-
console.log('Model loaded successfully');
181+
await this.preloadEmbeddingModel();
182+
}
183+
184+
// If still no model, fall back to keyword search
185+
if (!this.model) {
186+
throw new Error('Embedding model not available');
56187
}
57188

58189
// Generate embedding
@@ -69,6 +200,19 @@ class ChatMKSearch {
69200
}
70201
}
71202

203+
/**
204+
* Hide loading message for model initialization
205+
*/
206+
hideModelLoadingMessage() {
207+
const messagesContainer = document.getElementById('chatmk-messages');
208+
if (messagesContainer) {
209+
const loadingMessage = messagesContainer.querySelector('.embedding-loading');
210+
if (loadingMessage) {
211+
loadingMessage.remove();
212+
}
213+
}
214+
}
215+
72216
/**
73217
* Calculate cosine similarity between two vectors
74218
*/

0 commit comments

Comments
 (0)