@@ -17,7 +17,7 @@ Depending on the devices you have and the platform you are running on, you will
1717PM> Install-Package Microsoft.ML.OnnxRuntime.DirectML
1818```
1919
20- ### GPU support for both NVIDIA and AMD?
20+ ### GPU support for NVIDIA
2121```
2222PM> Install-Package Microsoft.ML.OnnxRuntime.Gpu
2323```
@@ -42,192 +42,58 @@ https://github.com/ffbinaries/ffbinaries-prebuilt/releases/download/v6.1/ffprobe
4242```
4343
4444
45- ## .NET Console Application Example
45+ # C# Stable Diffusion Examples
46+ Excample Model: https://huggingface.co/runwayml/stable-diffusion-v1-5 (onnx branch)
4647
47- Required Nuget Packages for example
48- ``` nuget
49- Microsoft.Extensions.Hosting
50- Microsoft.Extensions.Logging
51- ```
5248
49+ ## Basic Stable Diffusion
50+ Run a simple Stable Diffusion process with a basic prompt
5351``` csharp
54- using Microsoft .Extensions .DependencyInjection ;
55- using Microsoft .Extensions .Hosting ;
56- using OnnxStack .StableDiffusion .Common ;
57- using OnnxStack .StableDiffusion .Config ;
52+ // Create Pipeline
53+ var pipeline = StableDiffusionPipeline .CreatePipeline (" D:\\ Repositories\\ stable-diffusion-v1-5" );
5854
59- internal class Program
60- {
61- static async Task Main (string [] _ )
62- {
63- var builder = Host .CreateApplicationBuilder ();
64- builder .Logging .ClearProviders ();
65- builder .Services .AddLogging ((loggingBuilder ) => loggingBuilder .SetMinimumLevel (LogLevel .Error ));
55+ // Set Prompt Options
56+ var promptOptions = new PromptOptions { Prompt = " Photo of a cute dog." };
6657
67- // Add OnnxStack Stable Diffusion
68- builder . Services . AddOnnxStackStableDiffusion ( );
58+ // Run Pipleine
59+ var result = await pipeline . RunAsync ( promptOptions );
6960
70- // Add AppService
71- builder .Services .AddHostedService <AppService >();
61+ // Save image result
62+ var image = result .ToImage ();
63+ await image .SaveAsPngAsync (" D:\\ Results\\ Image.png" );
7264
73- // Start
74- await builder .Build ().RunAsync ();
75- }
76- }
65+ // Unload Pipleine
66+ await pipeline .UnloadAsync ();
67+ ```
68+
69+ ## Stable Diffusion Batch Example
70+ Run Stable Diffusion process and return a batch of results
71+ ``` csharp
72+ // Create Pipeline
73+ var pipeline = StableDiffusionPipeline .CreatePipeline (" D:\\ Repositories\\ stable-diffusion-v1-5" );
74+
75+ // Prompt
76+ var promptOptions = new PromptOptions { Prompt = " Photo of a cat" };
7777
78- internal class AppService : IHostedService
78+ // Batch Of 5 Images with unique seeds
79+ var batchOptions = new BatchOptions
7980{
80- private readonly string _outputDirectory ;
81- private readonly IStableDiffusionService _stableDiffusionService ;
82-
83- public AppService (IStableDiffusionService stableDiffusionService )
84- {
85- _stableDiffusionService = stableDiffusionService ;
86- _outputDirectory = Path .Combine (Directory .GetCurrentDirectory (), " Images" );
87- }
88-
89- public async Task StartAsync (CancellationToken cancellationToken )
90- {
91- Directory .CreateDirectory (_outputDirectory );
92-
93- while (true )
94- {
95- System .Console .WriteLine (" Please type a prompt and press ENTER" );
96- var prompt = System .Console .ReadLine ();
97-
98- System .Console .WriteLine (" Please type a negative prompt and press ENTER (optional)" );
99- var negativePrompt = System .Console .ReadLine ();
100-
101-
102- // Example only, full config depends on model
103- // appsettings.json is recommended for ease of use
104- var modelOptions = new ModelOptions
105- {
106- Name = " Stable Diffusion 1.5" ,
107- ExecutionProvider = ExecutionProvider .DirectML ,
108- ModelConfigurations = new List <OnnxModelSessionConfig >
109- {
110- new OnnxModelSessionConfig
111- {
112- Type = OnnxModelType .Unet ,
113- OnnxModelPath = " model path"
114- }
115- }
116- };
117-
118- var promptOptions = new PromptOptions
119- {
120- Prompt = prompt ,
121- NegativePrompt = negativePrompt ,
122- DiffuserType = DiffuserType .TextToImage ,
123-
124- // Input for ImageToImage
125- // InputImage = new InputImage(File.ReadAllBytesAsync("image to image filename"))
126- };
127-
128- var schedulerOptions = new SchedulerOptions
129- {
130- Seed = Random .Shared .Next (),
131- GuidanceScale = 7 . 5 f ,
132- InferenceSteps = 30 ,
133- Height = 512 ,
134- Width = 512 ,
135- SchedulerType = SchedulerType .LMS ,
136- };
137-
138-
139- // Generate Image Example
140- var outputFilename = Path .Combine (_outputDirectory , $" {schedulerOptions .Seed }_{schedulerOptions .SchedulerType }.png" );
141- var result = await _stableDiffusionService .GenerateAsImageAsync (modelOptions , promptOptions , schedulerOptions );
142- if (result is not null )
143- {
144- // Save image to disk
145- await result .SaveAsPngAsync (outputFilename );
146- }
147-
148-
149-
150-
151- // Generate Batch Example
152- var batchOptions = new BatchOptions
153- {
154- BatchType = BatchOptionType .Seed ,
155- ValueTo = 20
156- };
157-
158- await foreach (var batchResult in _stableDiffusionService .GenerateBatchAsImageAsync (modelOptions , promptOptions , schedulerOptions , batchOptions ))
159- {
160- // Save image to disk
161- await batchResult .SaveAsPngAsync (outputFilename );
162- }
163-
164-
165- }
166- }
167-
168- public Task StopAsync (CancellationToken cancellationToken )
169- {
170- return Task .CompletedTask ;
171- }
81+ ValueTo = 5 ,
82+ BatchType = BatchOptionType .Seed
83+ };
84+
85+ // Run Pipleine
86+ await foreach (var result in pipeline .RunBatchAsync (batchOptions , promptOptions ))
87+ {
88+ // Save Image result
89+ var image = result .ImageResult .ToImage ();
90+ await image .SaveAsPngAsync ($" D:\\ Results\\ Image_{result .SchedulerOptions .Seed }.png" );
17291}
92+
93+ // Unload Pipleine
94+ await pipeline .UnloadAsync ();
95+
17396```
17497
17598
176- ## Configuration
177- The ` appsettings.json ` is the easiest option for configuring model sets. Below is an example of ` Stable Diffusion 1.5 ` .
178- The example adds the necessary paths to each model file required for Stable Diffusion, as well as any model-specific configurations.
179- Each model can be assigned to its own device, which is handy if you have only a small GPU. This way, you can offload only what you need. There are limitations depending on the version of the ` Microsoft.ML.OnnxRuntime ` package you are using, but in most cases, you can split the load between CPU and GPU.
18099
181- ``` json
182- {
183- "Logging" : {
184- "LogLevel" : {
185- "Default" : " Information" ,
186- "Microsoft.AspNetCore" : " Warning"
187- }
188- },
189-
190- "OnnxStackConfig" : {
191- "Name" : " StableDiffusion 1.5" ,
192- "IsEnabled" : true ,
193- "PadTokenId" : 49407 ,
194- "BlankTokenId" : 49407 ,
195- "TokenizerLimit" : 77 ,
196- "EmbeddingsLength" : 768 ,
197- "ScaleFactor" : 0.18215 ,
198- "PipelineType" : " StableDiffusion" ,
199- "Diffusers" : [
200- " TextToImage" ,
201- " ImageToImage" ,
202- " ImageInpaintLegacy"
203- ],
204- "DeviceId" : 0 ,
205- "InterOpNumThreads" : 0 ,
206- "IntraOpNumThreads" : 0 ,
207- "ExecutionMode" : " ORT_SEQUENTIAL" ,
208- "ExecutionProvider" : " DirectML" ,
209- "ModelConfigurations" : [
210- {
211- "Type" : " Tokenizer" ,
212- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ cliptokenizer.onnx"
213- },
214- {
215- "Type" : " Unet" ,
216- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ unet\\ model.onnx"
217- },
218- {
219- "Type" : " TextEncoder" ,
220- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ text_encoder\\ model.onnx"
221- },
222- {
223- "Type" : " VaeEncoder" ,
224- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ vae_encoder\\ model.onnx"
225- },
226- {
227- "Type" : " VaeDecoder" ,
228- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ vae_decoder\\ model.onnx"
229- }
230- ]
231- }
232- }
233- ```
0 commit comments