|
| 1 | +# ------------------------------------ |
| 2 | +# Copyright (c) Microsoft Corporation. |
| 3 | +# Licensed under the MIT License. |
| 4 | +# ------------------------------------ |
| 5 | + |
| 6 | +""" |
| 7 | +DESCRIPTION: |
| 8 | + This sample demonstrates how to run a responses operation with image input |
| 9 | + using the synchronous AIProject and OpenAI clients. The sample shows how to |
| 10 | + send both text and image content to a model for analysis. |
| 11 | +
|
| 12 | + See also https://platform.openai.com/docs/api-reference/responses/create?lang=python |
| 13 | +
|
| 14 | +USAGE: |
| 15 | + python sample_responses_image_input.py |
| 16 | +
|
| 17 | + Before running the sample: |
| 18 | +
|
| 19 | + pip install "azure-ai-projects>=2.0.0b1" openai azure-identity python-dotenv |
| 20 | +
|
| 21 | + Set these environment variables with your own values: |
| 22 | + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview |
| 23 | + page of your Azure AI Foundry portal. |
| 24 | + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in |
| 25 | + the "Models + endpoints" tab in your Azure AI Foundry project. |
| 26 | +""" |
| 27 | + |
| 28 | +import os |
| 29 | +import base64 |
| 30 | +from dotenv import load_dotenv |
| 31 | +from azure.identity import DefaultAzureCredential |
| 32 | +from azure.ai.projects import AIProjectClient |
| 33 | + |
| 34 | +load_dotenv() |
| 35 | + |
| 36 | +project_client = AIProjectClient( |
| 37 | + endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], |
| 38 | + credential=DefaultAzureCredential(), |
| 39 | +) |
| 40 | + |
| 41 | + |
| 42 | +def image_to_base64(image_path: str) -> str: |
| 43 | + if not os.path.isfile(image_path): |
| 44 | + raise FileNotFoundError(f"File not found at: {image_path}") |
| 45 | + |
| 46 | + try: |
| 47 | + with open(image_path, "rb") as image_file: |
| 48 | + file_data = image_file.read() |
| 49 | + return base64.b64encode(file_data).decode("utf-8") |
| 50 | + except Exception as exc: |
| 51 | + raise OSError(f"Error reading file '{image_path}'") from exc |
| 52 | + |
| 53 | + |
| 54 | +with project_client: |
| 55 | + |
| 56 | + openai_client = project_client.get_openai_client() |
| 57 | + |
| 58 | + image_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_input.png")) |
| 59 | + |
| 60 | + response = openai_client.responses.create( |
| 61 | + input=[ |
| 62 | + { |
| 63 | + "type": "message", |
| 64 | + "role": "user", |
| 65 | + "content": [ |
| 66 | + {"type": "input_text", "text": "what's in this image?"}, |
| 67 | + { |
| 68 | + "type": "input_image", |
| 69 | + "detail": "auto", |
| 70 | + "image_url": f"data:image/png;base64,{image_to_base64(image_file_path)}", |
| 71 | + }, |
| 72 | + ], |
| 73 | + } |
| 74 | + ], |
| 75 | + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], |
| 76 | + ) |
| 77 | + print(f"Response output: {response.output_text}") |
0 commit comments