@@ -86,7 +86,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
8686 try {
8787
8888 // Check model exists
89- let modelExists = await ollamaCheckModel ( inferenceConfig . endpoint , inferenceConfig . modelName ) ;
89+ let modelExists = await ollamaCheckModel ( inferenceConfig . endpoint , inferenceConfig . modelName , inferenceConfig . bearerToken ) ;
9090 if ( token . isCancellationRequested ) {
9191 info ( `Canceled after AI completion.` ) ;
9292 return ;
@@ -111,7 +111,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
111111
112112 // Perform download
113113 this . statusbar . text = `$(sync~spin) Downloading` ;
114- await ollamaDownloadModel ( inferenceConfig . endpoint , inferenceConfig . modelName ) ;
114+ await ollamaDownloadModel ( inferenceConfig . endpoint , inferenceConfig . modelName , inferenceConfig . bearerToken ) ;
115115 this . statusbar . text = `$(sync~spin) Llama Coder` ;
116116 }
117117 if ( token . isCancellationRequested ) {
@@ -125,6 +125,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
125125 prefix : prepared . prefix ,
126126 suffix : prepared . suffix ,
127127 endpoint : inferenceConfig . endpoint ,
128+ bearerToken : inferenceConfig . bearerToken ,
128129 model : inferenceConfig . modelName ,
129130 format : inferenceConfig . modelFormat ,
130131 maxLines : inferenceConfig . maxLines ,
0 commit comments