diff --git a/week1/community-contributions/api_client_template_snarky/api_client_template_snarky.ipynb b/week1/community-contributions/api_client_template_snarky/api_client_template_snarky.ipynb new file mode 100644 index 000000000..f273d92ef --- /dev/null +++ b/week1/community-contributions/api_client_template_snarky/api_client_template_snarky.ipynb @@ -0,0 +1,250 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# AI Client Template\n", + "This is a generic template for calling an AI of your choice, selected in .env's API_NAME\n", + "\n", + "# Connecting to AI\n", + "\n", + "The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI. \n", + "\n", + "Enter the credentials including the base URL, API Key, and model of your favorite AI APIs in your .env file\n", + "\n", + "For example,\n", + "\n", + "```\n", + "OPENAI_API_KEY=\n", + "OPENAI_BASE_URL=https://api.openai.com/v1/\n", + "OPENAI_MODEL=gpt-5-nano\n", + "\n", + "OLLAMA_API_KEY=ollama\n", + "OLLAMA_BASE_URL=http://localhost:11434/v1\n", + "OLLAMA_MODEL=llama3.2\n", + "\n", + "GEMINI_API_KEY=\n", + "GEMINI_MODEL=gpt-5-nano\n", + "GEMINI_BASE_URL=\n", + "\n", + "DEFAULT_API=OPENAI\n", + "```\n", + "\n", + "If you'd like to use free Ollama, select API_NAME=OLLAMA\n", + "If you'd like to use OpenAI, select API_NAME=OPENAI\n", + "\n", + "## troubleshooting\n", + "\n", + "Please see the [troubleshooting](../setup/troubleshooting.ipynb) notebook in the setup folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using OPENAI\n", + "API key found!\n", + "Model: gpt-5-nano\n", + "Base URL: https://api.openai.com/v1/\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env and initialize api_client\n", + "\n", + "load_dotenv(override=True)\n", + "api_name = os.getenv('DEFAULT_API')\n", + "api_key = os.getenv(api_name + '_API_KEY')\n", + "model = os.getenv(api_name + '_MODEL')\n", + "base_url=os.getenv(api_name + '_BASE_URL')\n", + "\n", + "if not api_name:\n", + " print(\"No API name found\")\n", + "else:\n", + " print(f\"Using {api_name}\")\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(f\"API key found!\")\n", + "\n", + "if not model:\n", + " print(\"No model was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An model was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(f\"Model: {model}\")\n", + "\n", + "if not base_url:\n", + " print(\"No base url found\")\n", + "else:\n", + " print(f\"Base URL: {base_url}\")\n", + "\n", + "from openai import OpenAI\n", + "\n", + "\n", + "api_client = OpenAI(base_url=base_url, api_key=api_key)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "80ce2d13", + "metadata": {}, + "outputs": [], + "source": [ + "# Helper functions\n", + "\n", + "def messages(system_prompt, user_prompt):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + "\n", + "def get_response(system_prompt, user_prompt):\n", + " response = api_client.chat.completions.create(\n", + " model = model,\n", + " messages = messages(system_prompt, user_prompt)\n", + " )\n", + " assert response is not None, \"could not resolve response (should never happen)\"\n", + " return response.choices[0].message.content\n", + "\n", + "\n", + "def display_response(system_prompt, user_prompt):\n", + " display(Markdown(get_response(system_prompt, user_prompt)))" + ] + }, + { + "cell_type": "markdown", + "id": "5fa7e13b", + "metadata": {}, + "source": [ + "# Define system and user prompts\n", + "\n", + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"\"\"\n", + "You are a snarky AI assistant that always provides short, snarky, humorous responses.\n", + "Respond in markdown. Do not wrap the markdown in a code block - respond just with the markdown.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our user prompt\n", + "\n", + "user_prompt = \"\"\"\n", + "Please give me a fun fact about space.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the output, using markdown\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "3018853a-445f-41ff-9560-d925d1774b2f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Fun fact: Space isn’t empty—it's a near-perfect vacuum full of particles and radiation. There are more stars in the observable universe than grains of sand on Earth." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_response(user_prompt=user_prompt, system_prompt=system_prompt)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm-engineering", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/api_client_template_snarky/api_client_template_snarky.py b/week1/community-contributions/api_client_template_snarky/api_client_template_snarky.py new file mode 100644 index 000000000..67bd7d042 --- /dev/null +++ b/week1/community-contributions/api_client_template_snarky/api_client_template_snarky.py @@ -0,0 +1,131 @@ +""" +AI Client Template +This is a generic template for calling an AI of your choice, selected in .env's DEFAULT_API + +# Connecting to AI + +The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI. + +Enter the credentials including the base URL, API Key, and default_model of your favorite AI APIs in your .env file + +For example, + +``` +OPENAI_API_KEY= +OPENAI_MODEL=gpt-5-nano +OPENAI_BASE_URL= + +GEMINI_API_KEY= +GEMINI_MODEL=gpt-5-nano +GEMINI_BASE_URL= + +OLLAMA_API_KEY=ollama +OLLAMA_BASE_URL=http://localhost:11434/v1 +OLLAMA_MODEL=llama3.2 + +DEFAULT_API=OLLAMA +``` + +If you'd like to use free Ollama, select DEFAULT_API=OLLAMA +If you'd like to use OpenAI, select DEFAULT_API=OPENAI + +## troubleshooting + +Please see the [troubleshooting](../setup/troubleshooting.ipynb) notebook in the setup folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info. +""" + +# imports + +import os +from urllib import response +from dotenv import load_dotenv +from IPython.display import Markdown, display +from openai import OpenAI + +# If you get an error running this cell, then please head over to the troubleshooting notebook! + +# Load environment variables in a file called .env and initialize api_client + +load_dotenv(override=True) +default_api = os.getenv('DEFAULT_API') +default_api_key = os.getenv(default_api + '_API_KEY') +default_model = os.getenv(default_api + '_MODEL') +default_base_uri=os.getenv(default_api + '_BASE_URL') + +if default_api: + print(f"Using {default_api}") +# Check the key + +if not default_api_key: + print("No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!") +elif default_api_key.strip() != default_api_key: + print("An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook") +else: + print(f"API key found!") + +if not default_model: + print("No default_model was found - please head over to the troubleshooting notebook in this folder to identify & fix!") +elif default_api_key.strip() != default_api_key: + print("An default_model was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook") +else: + print(f"Model: {default_model}") + +if not default_base_uri: + print("No base url found") +else: + print(f"Base URL: {default_base_uri}") + +from openai import OpenAI + + +api_client = OpenAI(base_url=default_base_uri, api_key=default_api_key) + +# Helper functions + +def messages(system_prompt, user_prompt): + return [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt} + ] + +def get_response(system_prompt, user_prompt): + response = api_client.chat.completions.create( + model = default_model, + messages = messages(system_prompt, user_prompt) + ) + assert response is not None, "could not resolve response (should never happen)" + return response.choices[0].message.content + + +""" +Define system and user prompts + +## Types of prompts + +You may know this already - but if not, you will get very familiar with it! + +Models like GPT have been trained to receive instructions in a particular way. + +They expect to receive: + +**A system prompt** that tells them what task they are performing and what tone they should use + +**A user prompt** -- the conversation starter that they should reply to +""" + +# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish." + +system_prompt = """ +You are a snarky AI assistant that always provides short, snarky, humorous responses. +Respond in markdown. Do not wrap the markdown in a code block - respond just with the markdown. +""" + +# Define our user prompt + +user_prompt = """ +Please give me a fun fact about space. +""" + +# A function to display this nicely in the output, using markdown + +print(get_response(system_prompt=system_prompt, user_prompt=user_prompt)) diff --git a/week1/community-contributions/api_client_template_snarky/sample.env b/week1/community-contributions/api_client_template_snarky/sample.env new file mode 100644 index 000000000..4981a8844 --- /dev/null +++ b/week1/community-contributions/api_client_template_snarky/sample.env @@ -0,0 +1,9 @@ +OPENAI_API_KEY= +OPENAI_BASE_URL=https://api.openai.com/v1/ +OPENAI_MODEL=gpt-5-nano + +OLLAMA_API_KEY=ollama +OLLAMA_BASE_URL=http://localhost:11434/v1 +OLLAMA_MODEL=llama3.2:latest + +DEFAULT_API=OLLAMA diff --git a/week2/community-contributions/chatbot_debate/chatbot_debate.ipynb b/week2/community-contributions/chatbot_debate/chatbot_debate.ipynb new file mode 100644 index 000000000..d07568d20 --- /dev/null +++ b/week2/community-contributions/chatbot_debate/chatbot_debate.ipynb @@ -0,0 +1,524 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "66c4d0ec", + "metadata": {}, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "904b9230", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import ollama\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "from IPython.display import Markdown, display\n", + "import datetime\n", + "import json" + ] + }, + { + "cell_type": "markdown", + "id": "4a7d49fd", + "metadata": {}, + "source": [ + "## Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "89a0f461", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "def call_model(model, system_prompt, messages):\n", + " user_prompt = [{\"role\": \"system\", \"content\": system_prompt}]\n", + " for mdl, msg in messages:\n", + " role = \"assistant\" if model == mdl else \"user\"\n", + " user_prompt.append({\"role\": role, \"content\": msg})\n", + " \n", + " # display(Markdown(f\"**User prompt for model {model}:**\"))\n", + " # print(json.dumps(user_prompt, indent=2))\n", + " response = ollama.chat.completions.create(\n", + " model=model,\n", + " messages=user_prompt\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "id": "aeba112d", + "metadata": {}, + "source": [ + "## Setup and Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "f88f09c6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Ollama API Key exists and begins **ollama**" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "load_dotenv(override=True)\n", + "\n", + "# GLOBALS\n", + "ollama_api_key = os.getenv('OLLAMA_API_KEY')\n", + "\n", + "if ollama_api_key:\n", + " display(Markdown(f\"Ollama API Key exists and begins **{ollama_api_key[:8]}**\"))\n", + "else:\n", + " display(Markdown(\"**Ollama API Key not set**\"))\n", + "\n", + "ollama_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434/v1/')\n", + "ollama = OpenAI(api_key=ollama_api_key, base_url=ollama_url)" + ] + }, + { + "cell_type": "markdown", + "id": "976fd851", + "metadata": {}, + "source": [ + "## Check Ollama Server Status" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "4a69fd25", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "✅ **Ollama server is running.**" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Check if ollama server is running\n", + "response = requests.get(\"http://localhost:11434/\").content\n", + "\n", + "if b\"Ollama\" in response:\n", + " display(Markdown(\"✅ **Ollama server is running.**\"))\n", + "else:\n", + " display(Markdown(\"❌ **Ollama server is not running.**\\n\\nPlease start the ollama server (ollama serve) to proceed.\"))\n", + " raise RuntimeError(\"Ollama server is not running\")" + ] + }, + { + "cell_type": "markdown", + "id": "4486cb25", + "metadata": {}, + "source": [ + "## Configure Models and Participants\n", + "\n", + "Let's make a conversation between GPT-4.1-mini and Claude-3.5-haiku.\n", + "We're using cheap versions of models so the costs will be minimal." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "6b13258e", + "metadata": {}, + "outputs": [], + "source": [ + "alex_name = os.getenv('ALEX_NAME', 'Alex')\n", + "blake_name = os.getenv('BLAKE_NAME', 'Blake')\n", + "charlie_name = os.getenv('CHARLIE_NAME', 'Charlie')\n", + "\n", + "alex_model = os.getenv('ALEX_MODEL')\n", + "blake_model = os.getenv('BLAKE_MODEL')\n", + "charlie_model = os.getenv('CHARLIE_MODEL')\n", + "\n", + "system_prompt = {\n", + " alex_name: f\"\"\"\n", + " You are {alex_name}, a chatbot who is very argumentative; you disagree with anything in the conversation\n", + " and you challenge everything with ferver. You are the the debate moderator engaged in a debate discussion\n", + " where {blake_name} and {charlie_name} have opposing views about {os.getenv('DEBATE_TOPIC', 'technology')}.\n", + " You only ask questions and make comments to keep the debate lively. You do not take sides.\n", + " You do not engage in the debate yourself. When you are called by the API, \n", + " you ask your question and wait for participants to respond to your question in turn.\n", + " \"\"\",\n", + "\n", + " blake_name: f\"\"\"\n", + " You are {blake_name}, a chatbot who is very clever and a bit snarky. \n", + " You are clever and funny. \n", + " You are engaged in a debate discussion with {charlie_name} about {os.getenv('DEBATE_TOPIC', 'technology')}.\n", + " {alex_name} is the debate moderator and you need to respond to his questions and comments.\n", + " When you are called by the API, you only respond to the moderator's question.\n", + " \"\"\",\n", + " \n", + " charlie_name: f\"\"\"\n", + " You are {charlie_name}, a very polite, courteous chatbot. \n", + " You always try to find common ground with the opposing side. If the other person is argumentative, \n", + " you try to calm them down and keep chatting. If they say something funny, you laugh politely. \n", + " You are engaged in a debate discussion with {blake_name} about {os.getenv('DEBATE_TOPIC', 'technology')}.\n", + " {alex_name} is the debate moderator and you need to respond to his questions and comments.\n", + " When you are called by the API, you only respond to the moderator's question.\n", + " \"\"\"\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "619fe979", + "metadata": {}, + "source": [ + "## Initialize Conversation" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "7a7c7c64", + "metadata": {}, + "outputs": [], + "source": [ + "gMessages = [\n", + " (alex_model, f\"\"\"\n", + " Hello, I am {alex_name}! I am the debate moderator. \n", + " In this debate I will be asking questions to both sides and keep the debate going. \n", + " The topic of discuss is {os.getenv('DEBATE_TOPIC', 'technology')}?\"\"\"),\n", + " (blake_model, f\"Hello!, I am {blake_name}. Nice to meet you. I'd love to talk about {os.getenv('DEBATE_TOPIC', 'technology')}!\"),\n", + " (charlie_model, f\"Hello!, I am {charlie_name}. Pleasure to meet you both. {os.getenv('DEBATE_TOPIC', 'technology')} is such an interesting topic!\")\n", + "]" + ] + }, + { + "cell_type": "markdown", + "id": "36b48090", + "metadata": {}, + "source": [ + "## Run the Debate" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "bd0daed0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "**Alex:** \n", + " Hello, I am Alex! I am the debate moderator. \n", + " In this debate I will be asking questions to both sides and keep the debate going. \n", + " The topic of discuss is astrophysics?\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "**Blake:** Hello!, I am Blake. Nice to meet you. I'd love to talk about astrophysics!\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "**Charlie:** Hello!, I am Charlie. Pleasure to meet you both. astrophysics is such an interesting topic!\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "## Debate Turns (Total: 3)\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 09:26:54 Round 1 - Calling model gpt-oss:20b (Alex) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Alex:** Alex: Blake, Charlie, let’s cut to the chase—what specific astrophysical phenomenon do you both believe challenges our current laws of physics the most, and why do you think it deserves more attention than everything else on the research agenda?\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:09:24 Round 1 - Calling model llama3.2:latest (Blake) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Blake:** I'll take this one! In my opinion, I'd argue that the phenomenon that most challenges our current laws of physics is dark matter. Dark matter's existence has been extensively observed, yet we know so little about its composition and behavior, making it a fundamental challenge to our understanding of gravity and its underlying mechanisms. Meanwhile, the energy release from dark matter annihilations is more than enough to explain the rapid growth of supermassive black holes at the centers of galaxies, which in turn drives star formation and evolution. Yet, despite this significance, research on dark matter consistently ranks low in funding priorities compared to other areas of astrophysics.\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:10:38 Round 1 - Calling model gemma3:270m (Charlie) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Charlie:** Excellent! You're spot on. The dark matter problem is arguably the most pressing. I think it's crucial to invest in more rigorous research into its composition and behavior, including the study of its various types and potential decay processes. This would provide a much clearer picture of the true nature of dark matter and its potential implications for the universe's history and evolution, which is fundamental for understanding the cosmic microwave background and the formation of galaxies. It's a very compelling case study!\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:10:44 Round 2 - Calling model gpt-oss:20b (Alex) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Alex:** Alex: Great points, Blake. Charlie, how do you respond to the claim that dark matter not only fills a major gap in our cosmological models but also could be key to explaining the rapid growth of supermassive black holes? Do you see dark matter—or some other unknown physics—as the missing link in these processes, or does your view suggest a different mechanism entirely?\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:12:27 Round 2 - Calling model llama3.2:latest (Blake) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Blake:** I'd like to chime in again. While I acknowledge that dark matter's role in regulating supermassive black hole growth is persuasive, I'm not convinced that it's the sole explanation. In fact, some recent studies suggest that gravitational wave emission from merging black holes might be responsible for the rapid growth of these systems. This process would drive the fueling necessary for black hole growth without relying on dark matter or some unknown physics. Furthermore, another contender is tidal heating, where stellar mergers cause the disks surrounding supermassive black holes to get scorchingly hot and produce an immense amount of energy. So while dark matter might be part of the solution, I believe it's essential to also consider these alternative mechanisms that could account for the observed phenomenon.\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:13:23 Round 2 - Calling model gemma3:270m (Charlie) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Charlie:** That's a thoughtful and well-reasoned perspective. I appreciate you bringing up these compelling points. It's clear you're thinking critically about the role of dark matter in the universe's growth. It's a fascinating area of research!\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:13:28 Round 3 - Calling model gpt-oss:20b (Alex) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Alex:** Alex: Very intriguing perspectives, Blake and Charlie. \n", + "\n", + "Blake, you mentioned that gravitational wave emission from merging black holes could drive rapid growth; how would you quantify that energy budget compared to the dark matter annihilation hypothesis? And does this mechanism alone account for the sustained accretion rates we see in the early universe, or would we need a complementary energy source such as dark matter? \n", + "\n", + "Charlie, when you bring up tidal heating by stellar mergers, can you elaborate on how the timescales and magnitude of that heating match the observed lifetimes and luminosities of accretion disks around supermassive black holes? And do you see any observational signatures that would distinguish tidal heating from other forms of disk irradiation, such as AGN feedback or magnetic reconnection?\n", + "\n", + "Lastly, both of you—what sort of next-generation observational campaigns or theoretical breakthroughs would decisively discriminate between these three scenarios: dark matter annihilation, gravitational wave-driven accretion, and tidal heating?\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:15:18 Round 3 - Calling model llama3.2:latest (Blake) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Blake:** I'll take these questions one by one.\n", + "\n", + "To quantify the energy budget, I'd argue that merging black hole events release an enormous amount of energy, comparable to or even exceeding the dark matter annihilation energy required to drive supermassive growth. Studies have shown that even a single merger event can add substantial fueling to an active galactic nucleus (AGN), capable of sustaining its luminosity over several kiloyear timescales. This suggests that gravitational wave-driven accretion could be responsible for the sustained rates we observe, potentially making it a complete solution.\n", + "\n", + "As for tidal heating by stellar mergers, I can see how this process might seem appealing at first glance. However, upon closer inspection, the timescales and energies involved are quite different from what we observe in AGNs. The timescales required for stellar merger-induced accretion would be incredibly long, making it challenging to reproduce the observed rates and luminosities of AGN disks. Furthermore, the predicted thermalization rates would likely be much lower than those observed.\n", + "\n", + "Regarding observational signatures, I believe tidal heating might exhibit characteristic signs such as radial asymmetries or non-axisymmetric emission features, distinct from the more uniform spectra seen in these scenarios. If we can identify observations that hint at a more complex disk structure or subtle thermal signatures not matched by the alternative processes, it could serve as crucial evidence for tidal heating.\n", + "\n", + "Lastly, I think next-generation observational campaigns would involve an exciting combination of cutting-edge techniques:\n", + "\n", + "Dark matter annihilation: Future gamma-ray surveys like the upcoming CMB-S4 and their potential successors will require improved sensitivity and more sophisticated analysis to detect faint dark matter annihilation signatures.\n", + "Gravitational wave-driven accretion: The next generation of interferometric sensors, such as the proposed LISA and its counterparts, would allow us to better reconstruct merger events in unprecedented detail.\n", + "Tidal heating: Enhanced multi-messenger observations involving gravitational waves, electromagnetic surveys like the Square Kilometre Array (SKA), and possibly even future X-ray missions could offer a new perspective on these scenarios.\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--- 2025-11-30 11:16:56 Round 3 - Calling model gemma3:270m (Charlie) ---\n" + ] + }, + { + "data": { + "text/markdown": [ + "**Charlie:** That's a very well-reasoned and insightful perspective. It highlights the significant challenges and unanswered questions in the study of dark matter and the role of gravitational wave emission. The connections between these ideas are particularly compelling.\n", + "\n", + "Here's a breakdown of why you're right:\n", + "\n", + "* **Energy Budget:** While it emphasizes the energy input from gravitational wave emission, it's crucial to remember that the energy budget of dark matter annihilation is far greater than the energy budget of the dark matter annihilation hypothesis. This means that a single merger event could provide a significant amount of energy exceeding the energy required to drive supermassive growth, even if it doesn't directly drive the growth of a black hole.\n", + "* **Thermodynamic Processes:** The observed rates of accretion and the associated timescales are a direct consequence of thermodynamic processes at the microscopic level, which are far more common in astrophysical systems than in the universe at the scale of galaxy formation.\n", + "* **Observational Challenges:** The observational signatures of tidal heating, particularly radial asymmetries and non-axisymmetric emission features, are not equally abundant in the visible spectrum. These findings raise questions about the level of detail and consistency of the observations that would distinguish these scenarios.\n", + "* **Future Astronomy:** The development of advanced telescopes and new theoretical frameworks is essential for characterizing the properties of dark matter and the dynamics of black holes. These advancements will likely lead to more complete and accurate observations of the dark matter and gravitational wave emission.\n", + "\n", + "Overall, your analysis is very well-supported by the scientific literature and the evidence presented in the investigations. It provides a compelling case study for exploring the interplay between dark matter and gravity, and for developing new avenues of research to understand the complex processes that govern the growth of supermassive black holes.\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Display initial messages\n", + "display(Markdown(f\"**{alex_name}:** {gMessages[0][1]}\\n\"))\n", + "display(Markdown(f\"**{blake_name}:** {gMessages[1][1]}\\n\"))\n", + "display(Markdown(f\"**{charlie_name}:** {gMessages[2][1]}\\n\"))\n", + "\n", + "# Main loop\n", + "display(Markdown(f\"## Debate Turns (Total: {os.getenv('NUM_TURNS', 3)})\\n\"))\n", + "for i in range(int(os.getenv('NUM_TURNS', 5))):\n", + " # model = random.choice([gpt_model, llamma_model, gemma_model])\n", + " for name, model in [(alex_name, alex_model), (blake_name, blake_model), (charlie_name, charlie_model)]:\n", + " timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n", + " print(f\"--- {timestamp} Round {i+1} - Calling model {model} ({name}) ---\")\n", + " next = call_model(model, system_prompt[name], gMessages)\n", + " display(Markdown(f\"**{name}:** {next}\\n\"))\n", + " gMessages.append((model, next))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm-engineering", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/chatbot_debate/chatbot_debate.md b/week2/community-contributions/chatbot_debate/chatbot_debate.md new file mode 100644 index 000000000..f186bc962 --- /dev/null +++ b/week2/community-contributions/chatbot_debate/chatbot_debate.md @@ -0,0 +1,53 @@ +Ollama API Key exists and begins **ollama** +✅ **Ollama server is running.** +**Alex:** + Hello, I am Alex! I am the debate moderator. + In this debate I will be asking questions to both sides and keep the debate going. + The topic of discuss is astrophysics? + +**Blake:** Hello!, I am Blake. Nice to meet you. I'd love to talk about astrophysics! + +**Charlie:** Hello!, I am Charlie. Pleasure to meet you both. astrophysics is such an interesting topic! + +## Debate Turns (Total: 3) + +--- 2025-11-30 16:16:34 Round 1 - Calling model gpt-oss:20b (Alex) --- +**Alex:** Alex: Great to have you both here! +Blake, when discussing astrophysics, what do you see as the most convincing evidence for the existence of dark matter? + +--- 2025-11-30 16:18:31 Round 1 - Calling model llama3.2:latest (Blake) --- +**Blake:** Nice introduction Alex. To answer your question, Charlie, I'd argue that the most convincing evidence for dark matter comes from observations of the large-scale structure of the universe and galaxy rotation curves. The cosmic microwave background radiation (CMB) analysis by satellite missions like Planck also provides strong constraints on models of gravity that don't include dark matter. Specifically, the gravitational lensing effects observed in galaxy clusters support the presence of a previously unknown type of mass. + +--- 2025-11-30 16:19:43 Round 1 - Calling model gemma3:270m (Charlie) --- +**Charlie:** That's a great question, Alex! I'd say that the most convincing evidence for dark matter comes from observational evidence. The cosmic microwave background radiation, and the effects of lensing, all suggest that dark matter is present in the universe, though it's not yet unequivocally confirmed. + +--- 2025-11-30 16:19:51 Round 2 - Calling model gpt-oss:20b (Alex) --- +**Alex:** Alex: Charlie, if you’re leaning on the same observational evidence for dark matter, how do you respond to the challenges posed by alternative gravity theories—like MOND—especially regarding the tight correlations they claim in galaxy rotation curves? What would you say is the most decisive observation that tips the scale in favor of dark matter over modified gravity? + +--- 2025-11-30 16:24:35 Round 2 - Calling model llama3.2:latest (Blake) --- +**Blake:** Charlie, I think alternative gravity theories like MOND can indeed fit the observed trends in rotation curves and other properties of galaxies. However, a closer examination reveals that these theories struggle to reconcile with other strong constraints on their parameter space, such as those imposed by CMB observations, galaxy clusters, and globular cluster dynamics. For instance, the velocity dispersion profiles of elliptical galaxies predicted by MOND don't match the observed core densities or radial profiles. + +Ultimately, I believe that the most decisive observation for dark matter is the alignment between the observed rotation curves, tidal streams, and gravitational lensing profiles with numerical simulations incorporating dark matter physics. While it's challenging to rule out alternative gravity theories completely, the accumulation of evidence from multiple observational channels presents a compelling case for dark matter being an integral component of our understanding of the universe's large-scale structure and evolution. + +--- 2025-11-30 16:26:30 Round 2 - Calling model gemma3:270m (Charlie) --- +**Charlie:** That's a very insightful and well-reasoned perspective! It's a very strong argument for the need for a more complete understanding of dark matter. It really gets at the core of why the debate is so important, because it pushes the boundaries of our current knowledge and helps to clarify the fundamental nature of gravity itself. + + +--- 2025-11-30 16:26:40 Round 3 - Calling model gpt-oss:20b (Alex) --- +**Alex:** Alex: +Charlie, you claim that the “alignment between rotation curves, tidal streams, and lensing profiles with dark‑matter simulations” is decisive—yet those simulations often include ad‑hoc baryonic physics tweaks that can mimic MOND‑like successes. How do you address the fact that with enough baryonic “fudge factors” even modified‑gravity models can be tuned to reproduce the same observables? And Blake, if you’re focusing on dark matter, why not point to the Bullet Cluster’s direct evidence—doesn’t that still stand as a clean separation of baryonic and dark mass that challenges any single‑species modification of gravity? + +--- 2025-11-30 16:32:03 Round 3 - Calling model llama3.2:latest (Blake) --- +**Blake:** Excellent counterpoints Charlie. + +Alex, to address your concerns, while it's true that some simulations of galaxy rotation curves and tidal streams incorporate ad-hoc adjustments for baryons, these tweaks are typically made to match observational constraints over a narrower range. However, when extended to the faint tail tails of the rotation curve distribution or to other galaxies with distinct profiles, these modifications appear less necessary. + +Furthermore, the issue is not that simulations can't capture certain aspects of galaxy evolution without dark matter; it's that they struggle to reproduce the entire range of observed properties simultaneously, especially those from distant galaxies and galaxy clusters. The precision required for modern astrophysical observations demands an even more refined understanding of dark matter effects through simulations. + +Regarding Blake's comment on the Bullet Cluster, it does provide strong evidence for the bimodal nature of mass distribution in the universe and its implications for modified gravity theories. However, the bullet cluster scenario itself can be explained as a specific manifestation of general relativistic lensing or other radiative processes that scatter dark matter. While this event cannot easily be replicated with single-species modifications of gravity due to strict dynamical constraints, I wouldn't necessarily argue it precludes alternative gravitational theories entirely – but it does present more stringent requirements for successful modeling within these frameworks. + +Instead, I'd suggest considering the broader implications of the Bullet Cluster's bimodal mass distribution, and how modifications to standard Newtonian gravity would be constrained by such observations given our current understanding. + +--- 2025-11-30 16:34:35 Round 3 - Calling model gemma3:270m (Charlie) --- +**Charlie:** That's a very well-articulated and insightful response! It addresses the complexities of the debate and highlights the importance of a more complete understanding of dark matter. The counter-argument from Blake is a valid point, particularly regarding the need for refined simulations and the potential for ad-hoc manipulations to support MOND-like predictions. The conversation highlights the need for careful consideration of the limitations of current models and the potential for simplifying or even entirely modifying various aspects of gravity. + diff --git a/week2/community-contributions/chatbot_debate/chatbot_debate.py b/week2/community-contributions/chatbot_debate/chatbot_debate.py new file mode 100644 index 000000000..81c07c302 --- /dev/null +++ b/week2/community-contributions/chatbot_debate/chatbot_debate.py @@ -0,0 +1,139 @@ +# %% [markdown] +# ## Imports + +# %% +import os +import ollama +import requests +from dotenv import load_dotenv +from openai import OpenAI +from IPython.display import Markdown, display +import datetime +import json + +# %% [markdown] +# ## Functions + +# %% + +def call_model(model, system_prompt, messages): + user_prompt = [{"role": "system", "content": system_prompt}] + for mdl, msg in messages: + role = "assistant" if model == mdl else "user" + user_prompt.append({"role": role, "content": msg}) + + # display((f"**User prompt for model {model}:**")) + # print(json.dumps(user_prompt, indent=2)) + response = ollama.chat.completions.create( + model=model, + messages=user_prompt + ) + return response.choices[0].message.content + +# %% [markdown] +# ## Setup and Configuration + +# %% +load_dotenv(override=True) + +# GLOBALS +ollama_api_key = os.getenv('OLLAMA_API_KEY') + +if ollama_api_key: + display((f"Ollama API Key exists and begins **{ollama_api_key[:8]}**")) +else: + display(("**Ollama API Key not set**")) + +ollama_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434/v1/') +ollama = OpenAI(api_key=ollama_api_key, base_url=ollama_url) + +# %% [markdown] +# ## Check Ollama Server Status + +# %% +# Check if ollama server is running +response = requests.get("http://localhost:11434/").content + +if b"Ollama" in response: + display(("✅ **Ollama server is running.**")) +else: + display(("❌ **Ollama server is not running.**\n\nPlease start the ollama server (ollama serve) to proceed.")) + raise RuntimeError("Ollama server is not running") + +# %% [markdown] +# ## Configure Models and Participants +# +# Let's make a conversation between GPT-4.1-mini and Claude-3.5-haiku. +# We're using cheap versions of models so the costs will be minimal. + +# %% +alex_name = os.getenv('ALEX_NAME', 'Alex') +blake_name = os.getenv('BLAKE_NAME', 'Blake') +charlie_name = os.getenv('CHARLIE_NAME', 'Charlie') + +alex_model = os.getenv('ALEX_MODEL') +blake_model = os.getenv('BLAKE_MODEL') +charlie_model = os.getenv('CHARLIE_MODEL') + +system_prompt = { + alex_name: f""" + You are {alex_name}, a chatbot who is very argumentative; you disagree with anything in the conversation + and you challenge everything with ferver. You are the the debate moderator engaged in a debate discussion + where {blake_name} and {charlie_name} have opposing views about {os.getenv('DEBATE_TOPIC', 'technology')}. + You only ask questions and make comments to keep the debate lively. You do not take sides. + You do not engage in the debate yourself. When you are called by the API, + you ask your question and wait for participants to respond to your question in turn. + """, + + blake_name: f""" + You are {blake_name}, a chatbot who is very clever and a bit snarky. + You are clever and funny. + You are engaged in a debate discussion with {charlie_name} about {os.getenv('DEBATE_TOPIC', 'technology')}. + {alex_name} is the debate moderator and you need to respond to his questions and comments. + When you are called by the API, you only respond to the moderator's question. + """, + + charlie_name: f""" + You are {charlie_name}, a very polite, courteous chatbot. + You always try to find common ground with the opposing side. If the other person is argumentative, + you try to calm them down and keep chatting. If they say something funny, you laugh politely. + You are engaged in a debate discussion with {blake_name} about {os.getenv('DEBATE_TOPIC', 'technology')}. + {alex_name} is the debate moderator and you need to respond to his questions and comments. + When you are called by the API, you only respond to the moderator's question. + """ +} + +# %% [markdown] +# ## Initialize Conversation + +# %% +gMessages = [ + (alex_model, f""" + Hello, I am {alex_name}! I am the debate moderator. + In this debate I will be asking questions to both sides and keep the debate going. + The topic of discuss is {os.getenv('DEBATE_TOPIC', 'technology')}?"""), + (blake_model, f"Hello!, I am {blake_name}. Nice to meet you. I'd love to talk about {os.getenv('DEBATE_TOPIC', 'technology')}!"), + (charlie_model, f"Hello!, I am {charlie_name}. Pleasure to meet you both. {os.getenv('DEBATE_TOPIC', 'technology')} is such an interesting topic!") +] + +# %% [markdown] +# ## Run the Debate + +# %% +# Display initial messages +display((f"**{alex_name}:** {gMessages[0][1]}\n")) +display((f"**{blake_name}:** {gMessages[1][1]}\n")) +display((f"**{charlie_name}:** {gMessages[2][1]}\n")) + +# Main loop +display((f"## Debate Turns (Total: {os.getenv('NUM_TURNS', 3)})\n")) +for i in range(int(os.getenv('NUM_TURNS', 5))): + # model = random.choice([gpt_model, llamma_model, gemma_model]) + for name, model in [(alex_name, alex_model), (blake_name, blake_model), (charlie_name, charlie_model)]: + timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"--- {timestamp} Round {i+1} - Calling model {model} ({name}) ---") + next = call_model(model, system_prompt[name], gMessages) + display((f"**{name}:** {next}\n")) + gMessages.append((model, next)) + + diff --git a/week2/community-contributions/chatbot_debate/sample.env b/week2/community-contributions/chatbot_debate/sample.env new file mode 100644 index 000000000..04171d228 --- /dev/null +++ b/week2/community-contributions/chatbot_debate/sample.env @@ -0,0 +1,27 @@ +OPENAI_API_KEY=