@@ -21,22 +21,19 @@ class TestResponses:
2121
2222 @parametrize
2323 def test_method_create_overload_1 (self , client : OpenAI ) -> None :
24- response = client .responses .create (
25- input = "string" ,
26- model = "gpt-4o" ,
27- )
24+ response = client .responses .create ()
2825 assert_matches_type (Response , response , path = ["response" ])
2926
3027 @parametrize
3128 def test_method_create_with_all_params_overload_1 (self , client : OpenAI ) -> None :
3229 response = client .responses .create (
33- input = "string" ,
34- model = "gpt-4o" ,
3530 background = True ,
3631 include = ["file_search_call.results" ],
32+ input = "string" ,
3733 instructions = "instructions" ,
3834 max_output_tokens = 0 ,
3935 metadata = {"foo" : "string" },
36+ model = "gpt-4o" ,
4037 parallel_tool_calls = True ,
4138 previous_response_id = "previous_response_id" ,
4239 prompt = {
@@ -72,10 +69,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
7269
7370 @parametrize
7471 def test_raw_response_create_overload_1 (self , client : OpenAI ) -> None :
75- http_response = client .responses .with_raw_response .create (
76- input = "string" ,
77- model = "gpt-4o" ,
78- )
72+ http_response = client .responses .with_raw_response .create ()
7973
8074 assert http_response .is_closed is True
8175 assert http_response .http_request .headers .get ("X-Stainless-Lang" ) == "python"
@@ -84,10 +78,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
8478
8579 @parametrize
8680 def test_streaming_response_create_overload_1 (self , client : OpenAI ) -> None :
87- with client .responses .with_streaming_response .create (
88- input = "string" ,
89- model = "gpt-4o" ,
90- ) as http_response :
81+ with client .responses .with_streaming_response .create () as http_response :
9182 assert not http_response .is_closed
9283 assert http_response .http_request .headers .get ("X-Stainless-Lang" ) == "python"
9384
@@ -99,23 +90,21 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
9990 @parametrize
10091 def test_method_create_overload_2 (self , client : OpenAI ) -> None :
10192 response_stream = client .responses .create (
102- input = "string" ,
103- model = "gpt-4o" ,
10493 stream = True ,
10594 )
10695 response_stream .response .close ()
10796
10897 @parametrize
10998 def test_method_create_with_all_params_overload_2 (self , client : OpenAI ) -> None :
11099 response_stream = client .responses .create (
111- input = "string" ,
112- model = "gpt-4o" ,
113100 stream = True ,
114101 background = True ,
115102 include = ["file_search_call.results" ],
103+ input = "string" ,
116104 instructions = "instructions" ,
117105 max_output_tokens = 0 ,
118106 metadata = {"foo" : "string" },
107+ model = "gpt-4o" ,
119108 parallel_tool_calls = True ,
120109 previous_response_id = "previous_response_id" ,
121110 prompt = {
@@ -151,8 +140,6 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
151140 @parametrize
152141 def test_raw_response_create_overload_2 (self , client : OpenAI ) -> None :
153142 response = client .responses .with_raw_response .create (
154- input = "string" ,
155- model = "gpt-4o" ,
156143 stream = True ,
157144 )
158145
@@ -163,8 +150,6 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
163150 @parametrize
164151 def test_streaming_response_create_overload_2 (self , client : OpenAI ) -> None :
165152 with client .responses .with_streaming_response .create (
166- input = "string" ,
167- model = "gpt-4o" ,
168153 stream = True ,
169154 ) as response :
170155 assert not response .is_closed
@@ -358,22 +343,19 @@ class TestAsyncResponses:
358343
359344 @parametrize
360345 async def test_method_create_overload_1 (self , async_client : AsyncOpenAI ) -> None :
361- response = await async_client .responses .create (
362- input = "string" ,
363- model = "gpt-4o" ,
364- )
346+ response = await async_client .responses .create ()
365347 assert_matches_type (Response , response , path = ["response" ])
366348
367349 @parametrize
368350 async def test_method_create_with_all_params_overload_1 (self , async_client : AsyncOpenAI ) -> None :
369351 response = await async_client .responses .create (
370- input = "string" ,
371- model = "gpt-4o" ,
372352 background = True ,
373353 include = ["file_search_call.results" ],
354+ input = "string" ,
374355 instructions = "instructions" ,
375356 max_output_tokens = 0 ,
376357 metadata = {"foo" : "string" },
358+ model = "gpt-4o" ,
377359 parallel_tool_calls = True ,
378360 previous_response_id = "previous_response_id" ,
379361 prompt = {
@@ -409,10 +391,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
409391
410392 @parametrize
411393 async def test_raw_response_create_overload_1 (self , async_client : AsyncOpenAI ) -> None :
412- http_response = await async_client .responses .with_raw_response .create (
413- input = "string" ,
414- model = "gpt-4o" ,
415- )
394+ http_response = await async_client .responses .with_raw_response .create ()
416395
417396 assert http_response .is_closed is True
418397 assert http_response .http_request .headers .get ("X-Stainless-Lang" ) == "python"
@@ -421,10 +400,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
421400
422401 @parametrize
423402 async def test_streaming_response_create_overload_1 (self , async_client : AsyncOpenAI ) -> None :
424- async with async_client .responses .with_streaming_response .create (
425- input = "string" ,
426- model = "gpt-4o" ,
427- ) as http_response :
403+ async with async_client .responses .with_streaming_response .create () as http_response :
428404 assert not http_response .is_closed
429405 assert http_response .http_request .headers .get ("X-Stainless-Lang" ) == "python"
430406
@@ -436,23 +412,21 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
436412 @parametrize
437413 async def test_method_create_overload_2 (self , async_client : AsyncOpenAI ) -> None :
438414 response_stream = await async_client .responses .create (
439- input = "string" ,
440- model = "gpt-4o" ,
441415 stream = True ,
442416 )
443417 await response_stream .response .aclose ()
444418
445419 @parametrize
446420 async def test_method_create_with_all_params_overload_2 (self , async_client : AsyncOpenAI ) -> None :
447421 response_stream = await async_client .responses .create (
448- input = "string" ,
449- model = "gpt-4o" ,
450422 stream = True ,
451423 background = True ,
452424 include = ["file_search_call.results" ],
425+ input = "string" ,
453426 instructions = "instructions" ,
454427 max_output_tokens = 0 ,
455428 metadata = {"foo" : "string" },
429+ model = "gpt-4o" ,
456430 parallel_tool_calls = True ,
457431 previous_response_id = "previous_response_id" ,
458432 prompt = {
@@ -488,8 +462,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
488462 @parametrize
489463 async def test_raw_response_create_overload_2 (self , async_client : AsyncOpenAI ) -> None :
490464 response = await async_client .responses .with_raw_response .create (
491- input = "string" ,
492- model = "gpt-4o" ,
493465 stream = True ,
494466 )
495467
@@ -500,8 +472,6 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
500472 @parametrize
501473 async def test_streaming_response_create_overload_2 (self , async_client : AsyncOpenAI ) -> None :
502474 async with async_client .responses .with_streaming_response .create (
503- input = "string" ,
504- model = "gpt-4o" ,
505475 stream = True ,
506476 ) as response :
507477 assert not response .is_closed
0 commit comments