1212# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313# See the License for the specific language governing permissions and
1414# limitations under the License.
15- from unittest .mock import AsyncMock , MagicMock , Mock , patch
15+ from unittest .mock import MagicMock , Mock , patch
1616from typing import List
1717
1818import openai
@@ -428,10 +428,11 @@ async def test_openai_llm_ainvoke_happy_path(mock_import: Mock) -> None:
428428 mock_response = MagicMock ()
429429 mock_response .choices = [mock_choice ]
430430
431- # Async mock for the chat completion
432- mock_openai .AsyncOpenAI .return_value .chat .completions .create = AsyncMock (
433- return_value = mock_response
434- )
431+ # Async function instead of AsyncMock
432+ async def async_create (* args , ** kwargs ): # type: ignore[no-untyped-def]
433+ return mock_response
434+
435+ mock_openai .AsyncOpenAI .return_value .chat .completions .create = async_create
435436
436437 model_name = "gpt-3.5-turbo"
437438 input_text = "may thy knife chip and shatter"
@@ -441,8 +442,8 @@ async def test_openai_llm_ainvoke_happy_path(mock_import: Mock) -> None:
441442 response = await llm .ainvoke (input_text )
442443
443444 # Assert we got the expected content in LLMResponse
444- assert response .content == "Return text"
445445 assert isinstance (response , LLMResponse )
446+ assert response .content == "Return text"
446447
447448
448449# LLM Interface V2 Tests
0 commit comments