44import sys
55import time
66import inquirer
7+ import psutil
8+ import wget
79from interpreter import interpreter
810
9-
1011def get_ram ():
11- import psutil
12-
1312 total_ram = psutil .virtual_memory ().total / (
1413 1024 * 1024 * 1024
1514 ) # Convert bytes to GB
1615 return total_ram
1716
1817def download_model (models_dir , models , interpreter ):
19- # For some reason, these imports need to be inside the function
20- import inquirer
21- import psutil
22- import wget
23-
2418 # Get RAM and disk information
2519 total_ram = get_ram ()
2620 free_disk_space = psutil .disk_usage ("/" ).free / (
@@ -77,6 +71,12 @@ def download_model(models_dir, models, interpreter):
7771 "size" : 1.96 ,
7872 "url" : "https://huggingface.co/jartine/phi-2-llamafile/resolve/main/phi-2.Q5_K_M.llamafile?download=true" ,
7973 },
74+ {
75+ "name" : "Phi-3-mini" ,
76+ "file_name" : "Phi-3-mini-4k-instruct.Q5_K_M.llamafile" ,
77+ "size" : 2.84 ,
78+ "url" : "https://huggingface.co/jartine/Phi-3-mini-4k-instruct-llamafile/resolve/main/Phi-3-mini-4k-instruct.Q5_K_M.llamafile?download=true" ,
79+ },
8080 {
8181 "name" : "LLaVA 1.5" ,
8282 "file_name" : "llava-v1.5-7b-q4.llamafile" ,
@@ -89,6 +89,12 @@ def download_model(models_dir, models, interpreter):
8989 "size" : 5.15 ,
9090 "url" : "https://huggingface.co/jartine/Mistral-7B-Instruct-v0.2-llamafile/resolve/main/mistral-7b-instruct-v0.2.Q5_K_M.llamafile?download=true" ,
9191 },
92+ {
93+ "name" : "Llama-3-8B-Instruct" ,
94+ "file_name" : " Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile" ,
95+ "size" : 5.76 ,
96+ "url" : "https://huggingface.co/jartine/Meta-Llama-3-8B-Instruct-llamafile/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile?download=true" ,
97+ },
9298 {
9399 "name" : "WizardCoder-Python-13B" ,
94100 "file_name" : "wizardcoder-python-13b.llamafile" ,
@@ -130,6 +136,9 @@ def download_model(models_dir, models, interpreter):
130136 )
131137 ]
132138 answers = inquirer .prompt (questions )
139+
140+ if answers == None :
141+ exit ()
133142
134143 # Get the selected model
135144 selected_model = next (
@@ -195,6 +204,8 @@ def download_model(models_dir, models, interpreter):
195204]
196205answers = inquirer .prompt (questions )
197206
207+ if answers == None :
208+ exit ()
198209
199210selected_model = answers ["model" ]
200211
@@ -260,9 +271,13 @@ def list_ollama_models():
260271 ),
261272 ]
262273 name_answer = inquirer .prompt (name_question )
263- selected_name = name_answer ["name" ] if name_answer else None
264274
265- if selected_name is "llama3" :
275+ if name_answer == None :
276+ exit ()
277+
278+ selected_name = name_answer ["name" ]
279+
280+ if selected_name == "llama3" :
266281 # If the user selects llama3, we need to check if it's installed, and if not, install it
267282 all_models = list_ollama_models ()
268283 if "llama3" not in all_models :
@@ -310,7 +325,11 @@ def list_ollama_models():
310325 ),
311326 ]
312327 model_name_answer = inquirer .prompt (model_name_question )
313- jan_model_name = model_name_answer ["jan_model_name" ] if model_name_answer else None
328+
329+ if model_name_answer == None :
330+ exit ()
331+
332+ jan_model_name = model_name_answer ["jan_model_name" ]
314333 interpreter .llm .model = f"jan/{ jan_model_name } "
315334 interpreter .display_message (f"\n Using Jan model: `{ jan_model_name } ` \n " )
316335 time .sleep (1 )
@@ -354,6 +373,9 @@ def list_ollama_models():
354373 )
355374 ]
356375 answers = inquirer .prompt (questions )
376+
377+ if answers == None :
378+ exit ()
357379
358380 if answers ["model" ] == " ↓ Download new model" :
359381 model_path = download_model (models_dir , models , interpreter )
@@ -383,7 +405,7 @@ def list_ollama_models():
383405 print ("Model process terminated." )
384406
385407 # Set flags for Llamafile to work with interpreter
386- interpreter .llm .model = "local"
408+ interpreter .llm .model = "openai/ local"
387409 interpreter .llm .temperature = 0
388410 interpreter .llm .api_base = "http://localhost:8080/v1"
389411 interpreter .llm .supports_functions = False
0 commit comments