-
Notifications
You must be signed in to change notification settings - Fork 1.5k
Expand file tree
/
Copy pathProgram.cs
More file actions
33 lines (26 loc) · 1.39 KB
/
Program.cs
File metadata and controls
33 lines (26 loc) · 1.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
// Copyright (c) Microsoft. All rights reserved.
// This sample shows how to use Image Multi-Modality with an AI agent.
using Azure.AI.OpenAI;
using Azure.Identity;
using Microsoft.Extensions.AI;
using OpenAI.Chat;
using ChatMessage = Microsoft.Extensions.AI.ChatMessage;
var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set.");
var deploymentName = System.Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-5.4-mini";
// WARNING: DefaultAzureCredential is convenient for development but requires careful consideration in production.
// In production, consider using a specific credential (e.g., ManagedIdentityCredential) to avoid
// latency issues, unintended credential probing, and potential security risks from fallback mechanisms.
var agent = new AzureOpenAIClient(new Uri(endpoint), new DefaultAzureCredential())
.GetChatClient(deploymentName)
.AsAIAgent(
name: "VisionAgent",
instructions: "You are a helpful agent that can analyze images");
ChatMessage message = new(ChatRole.User, [
new TextContent("What do you see in this image?"),
await DataContent.LoadFromAsync("Assets/walkway.jpg"),
]);
var session = await agent.CreateSessionAsync();
await foreach (var update in agent.RunStreamingAsync(message, session))
{
Console.WriteLine(update);
}