Skip to content
This repository was archived by the owner on Nov 27, 2024. It is now read-only.

Commit 513ab05

Browse files
committed
Update README
1 parent a889bd1 commit 513ab05

File tree

2 files changed

+116
-14
lines changed

2 files changed

+116
-14
lines changed

OnnxStack.Core/README.md

Lines changed: 88 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,18 @@
11
# OnnxStack.Core - Onnx Services for .NET Applications
22

3-
OnnxStack.Core is a library that provides higher-level services for use in .NET applications. It offers extensive support for features such as dependency injection, .NET configuration implementations, ASP.NET Core integration, and IHostedService support.
3+
OnnxStack.Core is a library that provides higher-level ONNX services for use in .NET applications. It offers extensive support for features such as dependency injection, .NET configuration implementations, ASP.NET Core integration, and IHostedService support.
4+
5+
You can configure a model set for runtime, offloading individual models to different devices to make better use of resources or run on lower-end hardware. The first use-case is StableDiffusion; however, it will be expanded, and other model sets, such as object detection and classification, will be added.
46

57
## Getting Started
68

9+
10+
OnnxStack.Core can be found via the nuget package manager, download and install it.
11+
```
12+
PM> Install-Package OnnxStack.Core
13+
```
14+
15+
716
### .NET Core Registration
817

918
You can easily integrate `OnnxStack.Core` into your application services layer. This registration process sets up the necessary services and loads the `appsettings.json` configuration.
@@ -13,6 +22,69 @@ Example: Registering OnnxStack
1322
builder.Services.AddOnnxStack();
1423
```
1524

25+
## Configuration example
26+
The `appsettings.json` is the easiest option for configuring model sets. Below is an example of `Stable Diffusion 1.5`. The example adds the necessary paths to each model file required for Stable Diffusion, as well as any model-specific configurations. Each model can be assigned to its own device, which is handy if you have only a small GPU. This way, you can offload only what you need. There are limitations depending on the version of the `Microsoft.ML.OnnxRuntime` package you are using, but in most cases, you can split the load between CPU and GPU.
27+
28+
```json
29+
{
30+
"Logging": {
31+
"LogLevel": {
32+
"Default": "Information",
33+
"Microsoft.AspNetCore": "Warning"
34+
}
35+
},
36+
"AllowedHosts": "*",
37+
38+
"OnnxStackConfig": {
39+
"Name": "StableDiffusion 1.5",
40+
"PadTokenId": 49407,
41+
"BlankTokenId": 49407,
42+
"InputTokenLimit": 512,
43+
"TokenizerLimit": 77,
44+
"EmbeddingsLength": 768,
45+
"ScaleFactor": 0.18215,
46+
"ModelConfigurations": [
47+
{
48+
"Type": "Unet",
49+
"DeviceId": 0,
50+
"ExecutionProvider": "DirectML",
51+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\unet\\model.onnx"
52+
},
53+
{
54+
"Type": "Tokenizer",
55+
"DeviceId": 0,
56+
"ExecutionProvider": "Cpu",
57+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\cliptokenizer.onnx"
58+
},
59+
{
60+
"Type": "TextEncoder",
61+
"DeviceId": 0,
62+
"ExecutionProvider": "Cpu",
63+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\text_encoder\\model.onnx"
64+
},
65+
{
66+
"Type": "VaeEncoder",
67+
"DeviceId": 0,
68+
"ExecutionProvider": "Cpu",
69+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\vae_encoder\\model.onnx"
70+
},
71+
{
72+
"Type": "VaeDecoder",
73+
"DeviceId": 0,
74+
"ExecutionProvider": "Cpu",
75+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\vae_decoder\\model.onnx"
76+
},
77+
{
78+
"Type": "SafetyChecker",
79+
"IsDisabled": true,
80+
"DeviceId": 0,
81+
"ExecutionProvider": "Cpu",
82+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\safety_checker\\model.onnx"
83+
}
84+
]
85+
}
86+
}
87+
```
1688

1789

1890

@@ -21,12 +93,19 @@ builder.Services.AddOnnxStack();
2193
// Create Configuration
2294
var onnxStackConfig = new OnnxStackConfig
2395
{
24-
IsSafetyModelEnabled = false,
25-
ExecutionProviderTarget = ExecutionProvider.DirectML,
26-
OnnxUnetPath = "stable-diffusion-v1-5\\unet\\model.onnx",
27-
OnnxVaeDecoderPath = "stable-diffusion-v1-5\\vae_decoder\\model.onnx",
28-
OnnxTextEncoderPath = "stable-diffusion-v1-5\\text_encoder\\model.onnx",
29-
OnnxSafetyModelPath = "stable-diffusion-v1-5\\safety_checker\\model.onnx"
96+
Name = "OnnxStack",
97+
TokenizerLimit = 77,
98+
ModelConfigurations = new List<OnnxModelSessionConfig>
99+
{
100+
new OnnxModelSessionConfig
101+
{
102+
DeviceId = 0,
103+
ExecutionProvider = ExecutionProvider.DirectML,
104+
105+
Type = OnnxModelType.Tokenizer,
106+
OnnxModelPath = "clip_tokenizer.onnx",
107+
}
108+
}
30109
};
31110

32111
// Create Service
@@ -38,14 +117,14 @@ var text = "Text To Tokenize";
38117
var inputTensor = new DenseTensor<string>(new string[] { text }, new int[] { 1 });
39118
var inputString = new List<NamedOnnxValue>
40119
{
41-
NamedOnnxValue.CreateFromTensor("string_input", inputTensor)
120+
NamedOnnxValue.CreateFromTensor("string_input", inputTensor)
42121
};
43122

44123
// Create an InferenceSession from the Onnx clip tokenizer.
45124
// Run session and send the input data in to get inference output.
46125
using (var tokens = onnxModelService.RunInference(OnnxModelType.Tokenizer, inputString))
47126
{
48-
var resultTensor = tokens.FirstElementAs<Tensor<long>>();
127+
var resultTensor = tokens.ToArray();
49128
}
50129

51130
```

OnnxStack.StableDiffusion/README.md

Lines changed: 28 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,26 @@
44

55
## Getting Started
66

7+
OnnxStack.StableDiffusion can be found via the nuget package manager, download and install it.
8+
```
9+
PM> Install-Package OnnxStack.StableDiffusion
10+
```
11+
12+
### Microsoft.ML.OnnxRuntime
13+
Depending on the devices you have and the platform you are running on, you will want to install the Microsoft.ML.OnnxRuntime package that best suits your needs.
14+
15+
### CPU-GPU via Microsoft Drirect ML
16+
```
17+
PM> Install-Package Microsoft.ML.OnnxRuntime.DirectML
18+
```
19+
20+
### GPU support for both NVIDIA and AMD?
21+
```
22+
PM> Install-Package Microsoft.ML.OnnxRuntime.Gpu
23+
```
24+
25+
26+
727
### .NET Core Registration
828

929
You can easily integrate `OnnxStack.StableDiffusion` into your application services layer. This registration process sets up the necessary services and loads the `appsettings.json` configuration.
@@ -18,11 +38,10 @@ builder.Services.AddOnnxStackStableDiffusion();
1838

1939
## .NET Console Application Example
2040

21-
Required Nuget Packages
41+
Required Nuget Packages for example
2242
```nuget
2343
Microsoft.Extensions.Hosting
2444
Microsoft.Extensions.Logging
25-
Microsoft.ML.OnnxRuntime.DirectML
2645
```
2746

2847
```csharp
@@ -81,7 +100,10 @@ internal class AppService : IHostedService
81100
Prompt = prompt,
82101
NegativePrompt = negativePrompt,
83102
SchedulerType = SchedulerType.LMSScheduler,
84-
InputImage = inputImageFile
103+
InputImage = new InputImage
104+
{
105+
ImagePath = inputImageFile
106+
}
85107
};
86108

87109
var schedulerOptions = new SchedulerOptions
@@ -96,9 +118,10 @@ internal class AppService : IHostedService
96118

97119
System.Console.WriteLine("Generating Image...");
98120
var outputFilename = Path.Combine(_outputDirectory, $"{schedulerOptions.Seed}_{promptOptions.SchedulerType}.png");
99-
var result = await _stableDiffusionService.TextToImageFile(promptOptions, schedulerOptions, outputFilename);
121+
var result = await _stableDiffusionService.GenerateAsImageAsync(prompt, options);
100122
if (result is not null)
101-
{
123+
{
124+
await result.SaveAsPngAsync(outputFilename);
102125
System.Console.WriteLine($"Image Created, FilePath: {outputFilename}");
103126
}
104127
}

0 commit comments

Comments
 (0)