File tree Expand file tree Collapse file tree 3 files changed +5
-0
lines changed Expand file tree Collapse file tree 3 files changed +5
-0
lines changed Original file line number Diff line number Diff line change @@ -40,6 +40,9 @@ class ModelSettings:
4040 max_tokens : int | None = None
4141 """The maximum number of output tokens to generate."""
4242
43+ metadata : dict [str , str ] | None = None
44+ """Metadata to include with the model response call."""
45+
4346 store : bool | None = None
4447 """Whether to store the generated model response for later retrieval.
4548 Defaults to True if not provided."""
Original file line number Diff line number Diff line change @@ -537,6 +537,7 @@ async def _fetch_response(
537537 stream_options = {"include_usage" : True } if stream else NOT_GIVEN ,
538538 store = store ,
539539 extra_headers = _HEADERS ,
540+ metadata = model_settings .metadata ,
540541 )
541542
542543 if isinstance (ret , ChatCompletion ):
Original file line number Diff line number Diff line change @@ -247,6 +247,7 @@ async def _fetch_response(
247247 extra_headers = _HEADERS ,
248248 text = response_format ,
249249 store = self ._non_null_or_not_given (model_settings .store ),
250+ metadata = model_settings .metadata ,
250251 )
251252
252253 def _get_client (self ) -> AsyncOpenAI :
You can’t perform that action at this time.
0 commit comments