Skip to content

Commit 48164ec

Browse files
authored
Add prompt_cache_retention to ModelSettings (#2095)
1 parent 95d71fa commit 48164ec

File tree

4 files changed

+9
-0
lines changed

4 files changed

+9
-0
lines changed

src/agents/model_settings.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,12 @@ class ModelSettings:
116116
For Responses API: automatically enabled when not specified.
117117
For Chat Completions API: disabled when not specified."""
118118

119+
prompt_cache_retention: Literal["in_memory", "24h"] | None = None
120+
"""The retention policy for the prompt cache. Set to `24h` to enable extended
121+
prompt caching, which keeps cached prefixes active for longer, up to a maximum
122+
of 24 hours.
123+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention)."""
124+
119125
include_usage: bool | None = None
120126
"""Whether to include usage chunk.
121127
Only available for Chat Completions API."""

src/agents/models/openai_chatcompletions.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,7 @@ async def _fetch_response(
308308
reasoning_effort=self._non_null_or_omit(reasoning_effort),
309309
verbosity=self._non_null_or_omit(model_settings.verbosity),
310310
top_logprobs=self._non_null_or_omit(model_settings.top_logprobs),
311+
prompt_cache_retention=self._non_null_or_omit(model_settings.prompt_cache_retention),
311312
extra_headers=self._merge_headers(model_settings),
312313
extra_query=model_settings.extra_query,
313314
extra_body=model_settings.extra_body,

src/agents/models/openai_responses.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -326,6 +326,7 @@ async def _fetch_response(
326326
extra_body=model_settings.extra_body,
327327
text=response_format,
328328
store=self._non_null_or_omit(model_settings.store),
329+
prompt_cache_retention=self._non_null_or_omit(model_settings.prompt_cache_retention),
329330
reasoning=self._non_null_or_omit(model_settings.reasoning),
330331
metadata=self._non_null_or_omit(model_settings.metadata),
331332
**extra_args,

tests/model_settings/test_serialization.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ def test_all_fields_serialization() -> None:
5656
reasoning=Reasoning(),
5757
metadata={"foo": "bar"},
5858
store=False,
59+
prompt_cache_retention="24h",
5960
include_usage=False,
6061
response_include=["reasoning.encrypted_content"],
6162
top_logprobs=1,

0 commit comments

Comments
 (0)