Skip to content
This repository was archived by the owner on Nov 27, 2024. It is now read-only.

Commit 99ee5fd

Browse files
committed
Update README
1 parent 513ab05 commit 99ee5fd

File tree

2 files changed

+95
-38
lines changed

2 files changed

+95
-38
lines changed

OnnxStack.Core/README.md

Lines changed: 28 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ builder.Services.AddOnnxStack();
2323
```
2424

2525
## Configuration example
26-
The `appsettings.json` is the easiest option for configuring model sets. Below is an example of `Stable Diffusion 1.5`. The example adds the necessary paths to each model file required for Stable Diffusion, as well as any model-specific configurations. Each model can be assigned to its own device, which is handy if you have only a small GPU. This way, you can offload only what you need. There are limitations depending on the version of the `Microsoft.ML.OnnxRuntime` package you are using, but in most cases, you can split the load between CPU and GPU.
26+
The `appsettings.json` is the easiest option for configuring model sets. Below is an example of `clip tokenizer`.
2727

2828
```json
2929
{
@@ -36,50 +36,14 @@ The `appsettings.json` is the easiest option for configuring model sets. Below i
3636
"AllowedHosts": "*",
3737

3838
"OnnxStackConfig": {
39-
"Name": "StableDiffusion 1.5",
40-
"PadTokenId": 49407,
41-
"BlankTokenId": 49407,
42-
"InputTokenLimit": 512,
39+
"Name": "Clip Tokenizer",
4340
"TokenizerLimit": 77,
44-
"EmbeddingsLength": 768,
45-
"ScaleFactor": 0.18215,
4641
"ModelConfigurations": [
47-
{
48-
"Type": "Unet",
49-
"DeviceId": 0,
50-
"ExecutionProvider": "DirectML",
51-
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\unet\\model.onnx"
52-
},
5342
{
5443
"Type": "Tokenizer",
5544
"DeviceId": 0,
5645
"ExecutionProvider": "Cpu",
5746
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\cliptokenizer.onnx"
58-
},
59-
{
60-
"Type": "TextEncoder",
61-
"DeviceId": 0,
62-
"ExecutionProvider": "Cpu",
63-
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\text_encoder\\model.onnx"
64-
},
65-
{
66-
"Type": "VaeEncoder",
67-
"DeviceId": 0,
68-
"ExecutionProvider": "Cpu",
69-
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\vae_encoder\\model.onnx"
70-
},
71-
{
72-
"Type": "VaeDecoder",
73-
"DeviceId": 0,
74-
"ExecutionProvider": "Cpu",
75-
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\vae_decoder\\model.onnx"
76-
},
77-
{
78-
"Type": "SafetyChecker",
79-
"IsDisabled": true,
80-
"DeviceId": 0,
81-
"ExecutionProvider": "Cpu",
82-
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\safety_checker\\model.onnx"
8347
}
8448
]
8549
}
@@ -90,6 +54,32 @@ The `appsettings.json` is the easiest option for configuring model sets. Below i
9054

9155
### Basic C# Example
9256
```csharp
57+
58+
// From DI
59+
IOnnxModelService _onnxModelService;
60+
61+
62+
// Tokenizer model Example
63+
var text = "Text To Tokenize";
64+
var inputTensor = new DenseTensor<string>(new string[] { text }, new int[] { 1 });
65+
var inputString = new List<NamedOnnxValue>
66+
{
67+
NamedOnnxValue.CreateFromTensor("string_input", inputTensor)
68+
};
69+
70+
// Create an InferenceSession from the Onnx clip tokenizer.
71+
// Run session and send the input data in to get inference output.
72+
using (var tokens = _onnxModelService.RunInference(OnnxModelType.Tokenizer, inputString))
73+
{
74+
var resultTensor = tokens.ToArray();
75+
}
76+
77+
```
78+
79+
80+
81+
### Basic C# Example (No DI)
82+
```csharp
9383
// Create Configuration
9484
var onnxStackConfig = new OnnxStackConfig
9585
{

OnnxStack.StableDiffusion/README.md

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,4 +132,71 @@ internal class AppService : IHostedService
132132
return Task.CompletedTask;
133133
}
134134
}
135+
```
136+
137+
138+
## Configuration
139+
The `appsettings.json` is the easiest option for configuring model sets. Below is an example of `Stable Diffusion 1.5`.
140+
The example adds the necessary paths to each model file required for Stable Diffusion, as well as any model-specific configurations.
141+
Each model can be assigned to its own device, which is handy if you have only a small GPU. This way, you can offload only what you need. There are limitations depending on the version of the `Microsoft.ML.OnnxRuntime` package you are using, but in most cases, you can split the load between CPU and GPU.
142+
143+
```json
144+
{
145+
"Logging": {
146+
"LogLevel": {
147+
"Default": "Information",
148+
"Microsoft.AspNetCore": "Warning"
149+
}
150+
},
151+
"AllowedHosts": "*",
152+
153+
"OnnxStackConfig": {
154+
"Name": "StableDiffusion 1.5",
155+
"PadTokenId": 49407,
156+
"BlankTokenId": 49407,
157+
"InputTokenLimit": 512,
158+
"TokenizerLimit": 77,
159+
"EmbeddingsLength": 768,
160+
"ScaleFactor": 0.18215,
161+
"ModelConfigurations": [
162+
{
163+
"Type": "Unet",
164+
"DeviceId": 0,
165+
"ExecutionProvider": "DirectML",
166+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\unet\\model.onnx"
167+
},
168+
{
169+
"Type": "Tokenizer",
170+
"DeviceId": 0,
171+
"ExecutionProvider": "Cpu",
172+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\cliptokenizer.onnx"
173+
},
174+
{
175+
"Type": "TextEncoder",
176+
"DeviceId": 0,
177+
"ExecutionProvider": "Cpu",
178+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\text_encoder\\model.onnx"
179+
},
180+
{
181+
"Type": "VaeEncoder",
182+
"DeviceId": 0,
183+
"ExecutionProvider": "Cpu",
184+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\vae_encoder\\model.onnx"
185+
},
186+
{
187+
"Type": "VaeDecoder",
188+
"DeviceId": 0,
189+
"ExecutionProvider": "Cpu",
190+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\vae_decoder\\model.onnx"
191+
},
192+
{
193+
"Type": "SafetyChecker",
194+
"IsDisabled": true,
195+
"DeviceId": 0,
196+
"ExecutionProvider": "Cpu",
197+
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\safety_checker\\model.onnx"
198+
}
199+
]
200+
}
201+
}
135202
```

0 commit comments

Comments
 (0)