Skip to content

Commit d10d223

Browse files
authored
samples update (#43879)
* Samples update * resolved comment * update
1 parent aa7162a commit d10d223

File tree

3 files changed

+77
-3
lines changed

3 files changed

+77
-3
lines changed

sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,8 @@
3131
3) BING_PROJECT_CONNECTION_ID - The Bing project connection ID, as found in the "Connections" tab in your Azure AI Foundry project.
3232
"""
3333

34-
from asyncio import events
3534
import os
3635
from dotenv import load_dotenv
37-
from azure.ai.projects.models._models import ResponseCompletedEvent
3836
from azure.identity import DefaultAzureCredential
3937
from azure.ai.projects import AIProjectClient
4038
from azure.ai.projects.models import (
@@ -100,7 +98,6 @@
10098
elif event.type == "response.completed":
10199
print(f"\nFollow-up completed!")
102100
print(f"Full response: {event.response.output_text}")
103-
print(event)
104101

105102
print("\nCleaning up...")
106103
project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)
51.2 KB
Loading
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
# ------------------------------------
2+
# Copyright (c) Microsoft Corporation.
3+
# Licensed under the MIT License.
4+
# ------------------------------------
5+
6+
"""
7+
DESCRIPTION:
8+
This sample demonstrates how to run a responses operation with image input
9+
using the synchronous AIProject and OpenAI clients. The sample shows how to
10+
send both text and image content to a model for analysis.
11+
12+
See also https://platform.openai.com/docs/api-reference/responses/create?lang=python
13+
14+
USAGE:
15+
python sample_responses_image_input.py
16+
17+
Before running the sample:
18+
19+
pip install "azure-ai-projects>=2.0.0b1" openai azure-identity python-dotenv
20+
21+
Set these environment variables with your own values:
22+
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
23+
page of your Azure AI Foundry portal.
24+
2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in
25+
the "Models + endpoints" tab in your Azure AI Foundry project.
26+
"""
27+
28+
import os
29+
import base64
30+
from dotenv import load_dotenv
31+
from azure.identity import DefaultAzureCredential
32+
from azure.ai.projects import AIProjectClient
33+
34+
load_dotenv()
35+
36+
project_client = AIProjectClient(
37+
endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
38+
credential=DefaultAzureCredential(),
39+
)
40+
41+
42+
def image_to_base64(image_path: str) -> str:
43+
if not os.path.isfile(image_path):
44+
raise FileNotFoundError(f"File not found at: {image_path}")
45+
46+
try:
47+
with open(image_path, "rb") as image_file:
48+
file_data = image_file.read()
49+
return base64.b64encode(file_data).decode("utf-8")
50+
except Exception as exc:
51+
raise OSError(f"Error reading file '{image_path}'") from exc
52+
53+
54+
with project_client:
55+
56+
openai_client = project_client.get_openai_client()
57+
58+
image_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_input.png"))
59+
60+
response = openai_client.responses.create(
61+
input=[
62+
{
63+
"type": "message",
64+
"role": "user",
65+
"content": [
66+
{"type": "input_text", "text": "what's in this image?"},
67+
{
68+
"type": "input_image",
69+
"detail": "auto",
70+
"image_url": f"data:image/png;base64,{image_to_base64(image_file_path)}",
71+
},
72+
],
73+
}
74+
],
75+
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
76+
)
77+
print(f"Response output: {response.output_text}")

0 commit comments

Comments
 (0)