Skip to content

Commit e08bf0e

Browse files
authored
update dspy.settings.configure and dspy.settings.context to dspy.configure and dspy.context (#9060)
1 parent 6ad51eb commit e08bf0e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+163
-163
lines changed

docs/docs/cheatsheet.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ print(f"Final Predicted Answer (after ReAct process): {result.answer}")
6767

6868
```python
6969
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
70-
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
70+
dspy.configure(rm=colbertv2_wiki17_abstracts)
7171

7272
#Define Retrieve Module
7373
retriever = dspy.Retrieve(k=3)
@@ -450,7 +450,7 @@ asyncio.run(dspy_program(question="What is DSPy"))
450450

451451
```python
452452
import dspy
453-
dspy.settings.configure(track_usage=True)
453+
dspy.configure(track_usage=True)
454454

455455
result = dspy.ChainOfThought(BasicQA)(question="What is 2+2?")
456456
print(f"Token usage: {result.get_lm_usage()}")

docs/docs/learn/programming/language_models.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ To enable the Responses API, just set `model_type="responses"` when creating the
229229
import dspy
230230

231231
# Configure DSPy to use the Responses API for your language model
232-
dspy.settings.configure(
232+
dspy.configure(
233233
lm=dspy.LM(
234234
"openai/gpt-5-mini",
235235
model_type="responses",

docs/docs/learn/programming/modules.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ print(hop(claim="Stephen Curry is the best 3 pointer shooter ever in the human h
260260
DSPy provides built-in tracking of language model usage across all module calls. To enable tracking:
261261

262262
```python
263-
dspy.settings.configure(track_usage=True)
263+
dspy.configure(track_usage=True)
264264
```
265265

266266
Once enabled, you can access usage statistics from any `dspy.Prediction` object:
@@ -275,7 +275,7 @@ The usage data is returned as a dictionary that maps each language model name to
275275
import dspy
276276

277277
# Configure DSPy with tracking enabled
278-
dspy.settings.configure(
278+
dspy.configure(
279279
lm=dspy.LM("openai/gpt-4o-mini", cache=False),
280280
track_usage=True
281281
)
@@ -326,7 +326,7 @@ When using DSPy's caching features (either in-memory or on-disk via litellm), ca
326326

327327
```python
328328
# Enable caching
329-
dspy.settings.configure(
329+
dspy.configure(
330330
lm=dspy.LM("openai/gpt-4o-mini", cache=True),
331331
track_usage=True
332332
)

docs/docs/tutorials/audio/index.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@
117117
"Now let's configure our LLM which can process input audio. \n",
118118
"\n",
119119
"```python\n",
120-
"dspy.settings.configure(lm=dspy.LM(model='gpt-4o-mini-audio-preview-2024-12-17'))\n",
120+
"dspy.configure(lm=dspy.LM(model='gpt-4o-mini-audio-preview-2024-12-17'))\n",
121121
"```\n",
122122
"\n",
123123
"Note: Using `dspy.Audio` in signatures allows passing in audio directly to the model. "
@@ -332,7 +332,7 @@
332332
" audio = generate_dspy_audio(raw_line, out.openai_instruction)\n",
333333
" return dspy.Prediction(audio=audio)\n",
334334
" \n",
335-
"dspy.settings.configure(lm=dspy.LM(model='gpt-4o-mini'))"
335+
"dspy.configure(lm=dspy.LM(model='gpt-4o-mini'))"
336336
]
337337
},
338338
{

docs/docs/tutorials/cache/index.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import time
2525

2626
os.environ["OPENAI_API_KEY"] = "{your_openai_key}"
2727

28-
dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"), track_usage=True)
28+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"), track_usage=True)
2929

3030
predict = dspy.Predict("question->answer")
3131

@@ -167,7 +167,7 @@ import time
167167

168168
os.environ["OPENAI_API_KEY"] = "{your_openai_key}"
169169

170-
dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
170+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
171171

172172
predict = dspy.Predict("question->answer")
173173

@@ -193,7 +193,7 @@ from hashlib import sha256
193193

194194
os.environ["OPENAI_API_KEY"] = "{your_openai_key}"
195195

196-
dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
196+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
197197

198198
class CustomCache(dspy.clients.Cache):
199199

docs/docs/tutorials/conversation_history/index.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ import os
1212

1313
os.environ["OPENAI_API_KEY"] = "{your_openai_api_key}"
1414

15-
dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
15+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
1616

1717
class QA(dspy.Signature):
1818
question: str = dspy.InputField()
@@ -121,7 +121,7 @@ For example:
121121
```python
122122
import dspy
123123

124-
dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
124+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
125125

126126

127127
class QA(dspy.Signature):

docs/docs/tutorials/deployment/index.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ Below, we'll assume you have the following simple DSPy program that you want to
77
```python
88
import dspy
99

10-
dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
10+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
1111
dspy_program = dspy.ChainOfThought("question -> answer")
1212
```
1313

@@ -40,7 +40,7 @@ class Question(BaseModel):
4040

4141
# Configure your language model and 'asyncify' your DSPy program.
4242
lm = dspy.LM("openai/gpt-4o-mini")
43-
dspy.settings.configure(lm=lm, async_max_workers=4) # default is 8
43+
dspy.configure(lm=lm, async_max_workers=4) # default is 8
4444
dspy_program = dspy.ChainOfThought("question -> answer")
4545
dspy_program = dspy.asyncify(dspy_program)
4646

@@ -163,7 +163,7 @@ mlflow.set_tracking_uri("http://127.0.0.1:5000/")
163163
mlflow.set_experiment("deploy_dspy_program")
164164

165165
lm = dspy.LM("openai/gpt-4o-mini")
166-
dspy.settings.configure(lm=lm)
166+
dspy.configure(lm=lm)
167167

168168
class MyProgram(dspy.Module):
169169
def __init__(self):

docs/docs/tutorials/entity_extraction/index.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@
172172
"Specifically, we'll:\n",
173173
"- Create a `PeopleExtraction` DSPy Signature to specify the input (`tokens`) and output (`extracted_people`) fields.\n",
174174
"- Define a `people_extractor` program that uses DSPy's built-in `dspy.ChainOfThought` module to implement the `PeopleExtraction` signature. The program extracts entities referring to people from a list of input tokens using language model (LM) prompting.\n",
175-
"- Use the `dspy.LM` class and `dspy.settings.configure()` method to configure the language model that DSPy will use when invoking the program."
175+
"- Use the `dspy.LM` class and `dspy.configure()` method to configure the language model that DSPy will use when invoking the program."
176176
]
177177
},
178178
{
@@ -208,7 +208,7 @@
208208
"outputs": [],
209209
"source": [
210210
"lm = dspy.LM(model=\"openai/gpt-4o-mini\")\n",
211-
"dspy.settings.configure(lm=lm)"
211+
"dspy.configure(lm=lm)"
212212
]
213213
},
214214
{

docs/docs/tutorials/image_generation_prompting/index.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@
8080
"from IPython.display import display\n",
8181
"\n",
8282
"lm = dspy.LM(model=\"gpt-4o-mini\", temperature=0.5)\n",
83-
"dspy.settings.configure(lm=lm)"
83+
"dspy.configure(lm=lm)"
8484
]
8585
},
8686
{

docs/docs/tutorials/program_of_thought/index.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969
"source": [
7070
"llama31_70b = dspy.LM(\"openai/meta-llama/Meta-Llama-3-70b-Instruct\", api_base=\"API_BASE\", api_key=\"None\")\n",
7171
"\n",
72-
"dspy.settings.configure(lm=llama31_70b)"
72+
"dspy.configure(lm=llama31_70b)"
7373
]
7474
},
7575
{

0 commit comments

Comments
 (0)