|
4 | 4 | "cell_type": "markdown", |
5 | 5 | "metadata": {}, |
6 | 6 | "source": [ |
7 | | - "This example uses Codestral via Ollama" |
| 7 | + "This notebook replicates running Open Interpreter locally and uses Llama3 via llamafile" |
8 | 8 | ] |
9 | 9 | }, |
10 | 10 | { |
|
13 | 13 | "metadata": {}, |
14 | 14 | "outputs": [], |
15 | 15 | "source": [ |
16 | | - "!ollama run codestral" |
| 16 | + "## Download LLama3\n", |
| 17 | + "\n", |
| 18 | + "# Download the Meta-Llama-3-8B-Instruct.llamafile\n", |
| 19 | + "!curl -L -o Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile 'https://huggingface.co/Mozilla/Meta-Llama-3-8B-Instruct-llamafile/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile?download=true'\n", |
| 20 | + "\n", |
| 21 | + "# Make the downloaded file executable\n", |
| 22 | + "!chmod +x Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile" |
17 | 23 | ] |
18 | 24 | }, |
19 | 25 | { |
|
22 | 28 | "metadata": {}, |
23 | 29 | "outputs": [], |
24 | 30 | "source": [ |
25 | | - "!pip install open-interpreter" |
| 31 | + "## Install OI\n", |
| 32 | + "\n", |
| 33 | + "!pip install open-interpreter --quiet\n", |
| 34 | + "!pip install opencv-python --quiet" |
26 | 35 | ] |
27 | 36 | }, |
28 | 37 | { |
|
31 | 40 | "metadata": {}, |
32 | 41 | "outputs": [], |
33 | 42 | "source": [ |
| 43 | + "## Configure OI\n", |
| 44 | + "\n", |
| 45 | + "import cv2\n", |
| 46 | + "import subprocess\n", |
34 | 47 | "from interpreter import interpreter\n", |
35 | 48 | "\n", |
36 | 49 | "interpreter.offline = True\n", |
37 | | - "interpreter.llm.model = \"ollama/codestral\"\n", |
38 | | - "interpreter.llm.api_base = \"http://localhost:11434\"\n", |
39 | | - "interpreter.llm.context_window = 32000\n", |
40 | | - "interpreter.llm.max_tokens = 8000\n", |
41 | | - "interpreter.auto_run = True\n", |
42 | 50 | "\n", |
43 | | - "def run(prompt):\n", |
44 | | - " interpreter.chat(prompt)\n", |
| 51 | + "interpreter.llm.model = \"openai/local\" # Tells OI to use an OpenAI-compatible server\n", |
| 52 | + "interpreter.llm.api_key = \"dummy_key\"\n", |
| 53 | + "interpreter.llm.api_base = \"http://localhost:8081/v1\"\n", |
| 54 | + "interpreter.llm.context_window = 7000\n", |
| 55 | + "interpreter.llm.max_tokens = 1000\n", |
| 56 | + "interpreter.llm.supports_functions = False" |
| 57 | + ] |
| 58 | + }, |
| 59 | + { |
| 60 | + "cell_type": "code", |
| 61 | + "execution_count": null, |
| 62 | + "metadata": {}, |
| 63 | + "outputs": [], |
| 64 | + "source": [ |
| 65 | + "## Start server, run OI\n", |
| 66 | + "\n", |
| 67 | + "import subprocess\n", |
| 68 | + "import threading\n", |
| 69 | + "import os\n", |
| 70 | + "\n", |
| 71 | + "def read_output(process):\n", |
| 72 | + " while True:\n", |
| 73 | + " output = process.stdout.readline()\n", |
| 74 | + " if output == b'' and process.poll() is not None:\n", |
| 75 | + " break\n", |
| 76 | + " if output:\n", |
| 77 | + " print(output.decode().strip())\n", |
| 78 | + "\n", |
| 79 | + "# Check if the file exists and has execute permissions\n", |
| 80 | + "file_path = os.path.abspath('Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile')\n", |
| 81 | + "\n", |
| 82 | + "# Why are the arguments not being used??\n", |
| 83 | + "command = [file_path, \"--nobrowser\", \"-ngl\", \"9999\"]\n", |
| 84 | + "print(command)\n", |
| 85 | + "\n", |
| 86 | + "# Setting up the Popen call with stderr redirected to stdout\n", |
| 87 | + "process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n", |
| 88 | + "\n", |
| 89 | + "# Thread to handle the output asynchronously\n", |
| 90 | + "thread = threading.Thread(target=read_output, args=(process,))\n", |
| 91 | + "thread.start()\n", |
| 92 | + "\n", |
| 93 | + "# Here you can do other tasks concurrently\n", |
| 94 | + "# For example:\n", |
| 95 | + "interpreter.chat()\n", |
| 96 | + "\n", |
| 97 | + "# Wait for the thread to finish if the process completes\n", |
| 98 | + "thread.join()\n", |
45 | 99 | "\n", |
46 | | - "run(\"how many files are on my desktop?\")" |
| 100 | + "# Ensure the process has completed\n", |
| 101 | + "process.wait()" |
47 | 102 | ] |
48 | 103 | } |
49 | 104 | ], |
|
0 commit comments