Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4",
"authorship_tag": "ABX9TyP5A+4jBwHa+8xeTeQY7q1j"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"**Assignment**: build a tool that takes a technical question, and responds with an explanation."
],
"metadata": {
"id": "jc57C2kha5-g"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "H8kU5EtiYEiU"
},
"outputs": [],
"source": [
"#install ollama\n",
"!curl -fsSL https://ollama.com/install.sh | sh"
]
},
{
"cell_type": "code",
"source": [
"# Start Ollama server\n",
"\n",
"import subprocess\n",
"\n",
"# Start the command in a background process\n",
"process = subprocess.Popen([\"ollama\", \"serve\"])\n",
"\n",
"# The kernel can continue execution while the process runs in the background\n",
"print(\"The 'ollama serve' process is running in the background.\")"
],
"metadata": {
"id": "KGsTG4x2cbye"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#test server\n",
"import requests\n",
"\n",
"requests.get(\"http://localhost:11434\").content"
],
"metadata": {
"id": "J_sgo2BUbJUc"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#download llama model\n",
"!ollama pull llama3.2"
],
"metadata": {
"id": "myfRsLSAcYrG"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"def create_messages(user_query):\n",
" system_prompt = \"\"\"\n",
"You are a helpful technical assistant who knows programming languages.\n",
"You will answer the question about given code by explaining what that code does.\n",
" \"\"\"\n",
" user_prompt = \"Answer the below technical question:\\n\" + user_query\n",
" messages = [\n",
" {\"role\":\"system\", \"content\":system_prompt},\n",
" {\"role\":\"user\", \"content\":user_prompt},\n",
" ]\n",
" return messages"
],
"metadata": {
"id": "tEwjDW2scoiM"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"from openai import OpenAI\n",
"\n",
"OLLAMA_BASE_URL = \"http://localhost:11434/v1\"\n",
"\n",
"ollama_api = OpenAI(base_url=OLLAMA_BASE_URL, api_key='ollama')"
],
"metadata": {
"id": "AWTsU3RggDWl"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Llama model working good with code tasks(explanation)\n",
"\n",
"def call_model(question, enable_stream = False):\n",
" response = ollama_api.chat.completions.create(\n",
" model = \"llama3.2\",\n",
" messages = create_messages(question),\n",
" stream = enable_stream\n",
" )\n",
" return response"
],
"metadata": {
"id": "2uCECVbii1Ul"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Test - normal call\n",
"\n",
"result = call_model(\"\"\"\n",
"what below python code does, correct if it is wrong:\n",
"[x for x in range(5)].map(lambda x:x**2)\n",
"\"\"\")\n",
"display(result.choices[0].message.content)"
],
"metadata": {
"id": "Uck-yp44jP2P"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Test with streaming - simulates typewriting\n",
"from IPython.display import display, update_display\n",
"\n",
"stream = call_model(\"explain in just 2 or 3 lines, what - a,b = b,a - does in python\", True)\n",
"\n",
"response=\"\"\n",
"display_handle = display(\"\", display_id=True)\n",
"for chunk in stream:\n",
" response += chunk.choices[0].delta.content or \"\"\n",
" update_display(response, display_id = display_handle.display_id)\n"
],
"metadata": {
"id": "-g7VHt78jTtl"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"process.kill()"
],
"metadata": {
"id": "c-A0bFmKrA0O"
},
"execution_count": null,
"outputs": []
}
]
}