Skip to content

Commit 51e7781

Browse files
committed
fix local3 notebook
1 parent c68469a commit 51e7781

File tree

1 file changed

+66
-11
lines changed

1 file changed

+66
-11
lines changed

examples/local3.ipynb

Lines changed: 66 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
"cell_type": "markdown",
55
"metadata": {},
66
"source": [
7-
"This example uses Codestral via Ollama"
7+
"This notebook replicates running Open Interpreter locally and uses Llama3 via llamafile"
88
]
99
},
1010
{
@@ -13,7 +13,13 @@
1313
"metadata": {},
1414
"outputs": [],
1515
"source": [
16-
"!ollama run codestral"
16+
"## Download LLama3\n",
17+
"\n",
18+
"# Download the Meta-Llama-3-8B-Instruct.llamafile\n",
19+
"!curl -L -o Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile 'https://huggingface.co/Mozilla/Meta-Llama-3-8B-Instruct-llamafile/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile?download=true'\n",
20+
"\n",
21+
"# Make the downloaded file executable\n",
22+
"!chmod +x Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile"
1723
]
1824
},
1925
{
@@ -22,7 +28,10 @@
2228
"metadata": {},
2329
"outputs": [],
2430
"source": [
25-
"!pip install open-interpreter"
31+
"## Install OI\n",
32+
"\n",
33+
"!pip install open-interpreter --quiet\n",
34+
"!pip install opencv-python --quiet"
2635
]
2736
},
2837
{
@@ -31,19 +40,65 @@
3140
"metadata": {},
3241
"outputs": [],
3342
"source": [
43+
"## Configure OI\n",
44+
"\n",
45+
"import cv2\n",
46+
"import subprocess\n",
3447
"from interpreter import interpreter\n",
3548
"\n",
3649
"interpreter.offline = True\n",
37-
"interpreter.llm.model = \"ollama/codestral\"\n",
38-
"interpreter.llm.api_base = \"http://localhost:11434\"\n",
39-
"interpreter.llm.context_window = 32000\n",
40-
"interpreter.llm.max_tokens = 8000\n",
41-
"interpreter.auto_run = True\n",
4250
"\n",
43-
"def run(prompt):\n",
44-
" interpreter.chat(prompt)\n",
51+
"interpreter.llm.model = \"openai/local\" # Tells OI to use an OpenAI-compatible server\n",
52+
"interpreter.llm.api_key = \"dummy_key\"\n",
53+
"interpreter.llm.api_base = \"http://localhost:8081/v1\"\n",
54+
"interpreter.llm.context_window = 7000\n",
55+
"interpreter.llm.max_tokens = 1000\n",
56+
"interpreter.llm.supports_functions = False"
57+
]
58+
},
59+
{
60+
"cell_type": "code",
61+
"execution_count": null,
62+
"metadata": {},
63+
"outputs": [],
64+
"source": [
65+
"## Start server, run OI\n",
66+
"\n",
67+
"import subprocess\n",
68+
"import threading\n",
69+
"import os\n",
70+
"\n",
71+
"def read_output(process):\n",
72+
" while True:\n",
73+
" output = process.stdout.readline()\n",
74+
" if output == b'' and process.poll() is not None:\n",
75+
" break\n",
76+
" if output:\n",
77+
" print(output.decode().strip())\n",
78+
"\n",
79+
"# Check if the file exists and has execute permissions\n",
80+
"file_path = os.path.abspath('Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile')\n",
81+
"\n",
82+
"# Why are the arguments not being used??\n",
83+
"command = [file_path, \"--nobrowser\", \"-ngl\", \"9999\"]\n",
84+
"print(command)\n",
85+
"\n",
86+
"# Setting up the Popen call with stderr redirected to stdout\n",
87+
"process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n",
88+
"\n",
89+
"# Thread to handle the output asynchronously\n",
90+
"thread = threading.Thread(target=read_output, args=(process,))\n",
91+
"thread.start()\n",
92+
"\n",
93+
"# Here you can do other tasks concurrently\n",
94+
"# For example:\n",
95+
"interpreter.chat()\n",
96+
"\n",
97+
"# Wait for the thread to finish if the process completes\n",
98+
"thread.join()\n",
4599
"\n",
46-
"run(\"how many files are on my desktop?\")"
100+
"# Ensure the process has completed\n",
101+
"process.wait()"
47102
]
48103
}
49104
],

0 commit comments

Comments
 (0)