@@ -5,7 +5,7 @@ The Ollama Python library provides the easiest way to integrate Python 3.8+ proj
55## Prerequisites
66
77- [ Ollama] ( https://ollama.com/download ) should be installed and running
8- - Pull a model to use with the library: ` ollama pull <model> ` e.g. ` ollama pull llama3.2 `
8+ - Pull a model to use with the library: ` ollama pull <model> ` e.g. ` ollama pull gemma3 `
99 - See [ Ollama.com] ( https://ollama.com/search ) for more information on the models available.
1010
1111## Install
@@ -20,7 +20,7 @@ pip install ollama
2020from ollama import chat
2121from ollama import ChatResponse
2222
23- response: ChatResponse = chat(model = ' llama3.2 ' , messages = [
23+ response: ChatResponse = chat(model = ' gemma3 ' , messages = [
2424 {
2525 ' role' : ' user' ,
2626 ' content' : ' Why is the sky blue?' ,
@@ -41,7 +41,7 @@ Response streaming can be enabled by setting `stream=True`.
4141from ollama import chat
4242
4343stream = chat(
44- model = ' llama3.2 ' ,
44+ model = ' gemma3 ' ,
4545 messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }],
4646 stream = True ,
4747)
@@ -61,7 +61,7 @@ client = Client(
6161 host = ' http://localhost:11434' ,
6262 headers = {' x-some-header' : ' some-value' }
6363)
64- response = client.chat(model = ' llama3.2 ' , messages = [
64+ response = client.chat(model = ' gemma3 ' , messages = [
6565 {
6666 ' role' : ' user' ,
6767 ' content' : ' Why is the sky blue?' ,
@@ -79,7 +79,7 @@ from ollama import AsyncClient
7979
8080async def chat ():
8181 message = {' role' : ' user' , ' content' : ' Why is the sky blue?' }
82- response = await AsyncClient().chat(model = ' llama3.2 ' , messages = [message])
82+ response = await AsyncClient().chat(model = ' gemma3 ' , messages = [message])
8383
8484asyncio.run(chat())
8585```
@@ -92,7 +92,7 @@ from ollama import AsyncClient
9292
9393async def chat ():
9494 message = {' role' : ' user' , ' content' : ' Why is the sky blue?' }
95- async for part in await AsyncClient().chat(model = ' llama3.2 ' , messages = [message], stream = True ):
95+ async for part in await AsyncClient().chat(model = ' gemma3 ' , messages = [message], stream = True ):
9696 print (part[' message' ][' content' ], end = ' ' , flush = True )
9797
9898asyncio.run(chat())
@@ -105,13 +105,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/
105105### Chat
106106
107107``` python
108- ollama.chat(model = ' llama3.2 ' , messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }])
108+ ollama.chat(model = ' gemma3 ' , messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }])
109109```
110110
111111### Generate
112112
113113``` python
114- ollama.generate(model = ' llama3.2 ' , prompt = ' Why is the sky blue?' )
114+ ollama.generate(model = ' gemma3 ' , prompt = ' Why is the sky blue?' )
115115```
116116
117117### List
@@ -123,49 +123,49 @@ ollama.list()
123123### Show
124124
125125``` python
126- ollama.show(' llama3.2 ' )
126+ ollama.show(' gemma3 ' )
127127```
128128
129129### Create
130130
131131``` python
132- ollama.create(model = ' example' , from_ = ' llama3.2 ' , system = " You are Mario from Super Mario Bros." )
132+ ollama.create(model = ' example' , from_ = ' gemma3 ' , system = " You are Mario from Super Mario Bros." )
133133```
134134
135135### Copy
136136
137137``` python
138- ollama.copy(' llama3.2 ' , ' user/llama3.2 ' )
138+ ollama.copy(' gemma3 ' , ' user/gemma3 ' )
139139```
140140
141141### Delete
142142
143143``` python
144- ollama.delete(' llama3.2 ' )
144+ ollama.delete(' gemma3 ' )
145145```
146146
147147### Pull
148148
149149``` python
150- ollama.pull(' llama3.2 ' )
150+ ollama.pull(' gemma3 ' )
151151```
152152
153153### Push
154154
155155``` python
156- ollama.push(' user/llama3.2 ' )
156+ ollama.push(' user/gemma3 ' )
157157```
158158
159159### Embed
160160
161161``` python
162- ollama.embed(model = ' llama3.2 ' , input = ' The sky is blue because of rayleigh scattering' )
162+ ollama.embed(model = ' gemma3 ' , input = ' The sky is blue because of rayleigh scattering' )
163163```
164164
165165### Embed (batch)
166166
167167``` python
168- ollama.embed(model = ' llama3.2 ' , input = [' The sky is blue because of rayleigh scattering' , ' Grass is green because of chlorophyll' ])
168+ ollama.embed(model = ' gemma3 ' , input = [' The sky is blue because of rayleigh scattering' , ' Grass is green because of chlorophyll' ])
169169```
170170
171171### Ps
0 commit comments