Skip to content
This repository was archived by the owner on Nov 27, 2024. It is now read-only.

Commit 7616411

Browse files
committed
Remove MathNet.Numerics & NumSharp dependencies
1 parent e17e737 commit 7616411

File tree

10 files changed

+125
-137
lines changed

10 files changed

+125
-137
lines changed

OnnxStack.StableDiffusion/Helpers/ArrayHelpers.cs

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,19 @@ public static float[] Linspace(float start, float end, int partitions, bool roun
1414
: result.Select(x => MathF.Round(x)).ToArray();
1515
}
1616

17+
18+
public static float[] Range(int start, int end)
19+
{
20+
return Enumerable.Range(start, end)
21+
.Select(x => (float)x)
22+
.ToArray();
23+
}
24+
25+
public static float[] Log(float[] array)
26+
{
27+
return array
28+
.Select(x => MathF.Log(x))
29+
.ToArray();
30+
}
1731
}
1832
}
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
using System;
2+
3+
namespace OnnxStack.StableDiffusion.Helpers
4+
{
5+
internal class MathHelpers
6+
{
7+
/// <summary>
8+
/// Approximation of the definite integral of an analytic smooth function on a closed interval.
9+
/// </summary>
10+
/// <param name="function">The analytic smooth function to integrate.</param>
11+
/// <param name="start">The start.</param>
12+
/// <param name="end">The end.</param>
13+
/// <param name="epsilon">The expected relative accuracy.</param>
14+
/// <returns></returns>
15+
public static float IntegrateOnClosedInterval(Func<double, double> function, double start, double end, double epsilon = 1e-4)
16+
{
17+
return (float)AdaptiveSimpson(function, start, end, epsilon);
18+
}
19+
20+
21+
/// <summary>
22+
/// Initializes the adaptive Simpson's rule by calculating initial values and calling the auxiliary method.
23+
/// </summary>
24+
/// <param name="f">The f.</param>
25+
/// <param name="a">a.</param>
26+
/// <param name="b">The b.</param>
27+
/// <param name="epsilon">The epsilon.</param>
28+
/// <returns></returns>
29+
private static double AdaptiveSimpson(Func<double, double> f, double a, double b, double epsilon)
30+
{
31+
double c = (a + b) / 2.0;
32+
double h = b - a;
33+
double fa = f(a);
34+
double fb = f(b);
35+
double fc = f(c);
36+
double s = (h / 6) * (fa + 4 * fc + fb);
37+
return AdaptiveSimpsonAux(f, a, b, epsilon, s, fa, fb, fc);
38+
}
39+
40+
41+
/// <summary>
42+
/// Recursively applies the Simpson's rule and adapts the interval size based on the estimated error.
43+
/// </summary>
44+
/// <param name="f">The f.</param>
45+
/// <param name="a">a.</param>
46+
/// <param name="b">The b.</param>
47+
/// <param name="epsilon">The epsilon.</param>
48+
/// <param name="s">The s.</param>
49+
/// <param name="fa">The fa.</param>
50+
/// <param name="fb">The fb.</param>
51+
/// <param name="fc">The fc.</param>
52+
/// <returns></returns>
53+
private static double AdaptiveSimpsonAux(Func<double, double> f, double a, double b, double epsilon, double s, double fa, double fb, double fc)
54+
{
55+
double c = (a + b) / 2.0;
56+
double h = b - a;
57+
double d = (a + c) / 2.0;
58+
double e = (c + b) / 2.0;
59+
double fd = f(d);
60+
double fe = f(e);
61+
double s1 = (h / 12) * (fa + 4 * fd + fc);
62+
double s2 = (h / 12) * (fc + 4 * fe + fb);
63+
double s_ = s1 + s2;
64+
if (Math.Abs(s_ - s) <= 15 * epsilon)
65+
{
66+
return s_ + (s_ - s) / 15.0;
67+
}
68+
return AdaptiveSimpsonAux(f, a, c, epsilon / 2, s1, fa, fc, fd) + AdaptiveSimpsonAux(f, c, b, epsilon / 2, s2, fc, fb, fe);
69+
}
70+
}
71+
}

OnnxStack.StableDiffusion/OnnxStack.StableDiffusion.csproj

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
<Project Sdk="Microsoft.NET.Sdk">
22

33
<PropertyGroup>
4-
<Version>0.31.0</Version>
4+
<Version>0.31.16</Version>
55
<TargetFramework>net7.0</TargetFramework>
66
<ImplicitUsings>disable</ImplicitUsings>
77
<Nullable>disable</Nullable>
@@ -50,12 +50,6 @@
5050
<ProjectReference Include="..\OnnxStack.Core\OnnxStack.Core.csproj" Condition=" '$(Configuration)' == 'Debug' OR '$(Configuration)'=='Debug-Nvidia'" />
5151
</ItemGroup>
5252

53-
54-
<ItemGroup>
55-
<PackageReference Include="MathNet.Numerics" Version="5.0.0" />
56-
<PackageReference Include="NumSharp" Version="0.30.0" />
57-
</ItemGroup>
58-
5953
<ItemGroup>
6054
<None Update="README.md">
6155
<Pack>True</Pack>

OnnxStack.StableDiffusion/Schedulers/StableDiffusion/DDIMScheduler.cs

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -114,27 +114,26 @@ public override SchedulerStepResult Step(DenseTensor<float> modelOutput, int tim
114114
DenseTensor<float> predOriginalSample = null;
115115
if (Options.PredictionType == PredictionType.Epsilon)
116116
{
117-
var sampleBeta = sample.SubtractTensors(modelOutput.MultiplyTensorByFloat((float)Math.Sqrt(betaProdT)));
118-
predOriginalSample = sampleBeta.DivideTensorByFloat((float)Math.Sqrt(alphaProdT));
117+
var sampleBeta = sample.SubtractTensors(modelOutput.MultiplyTensorByFloat(MathF.Sqrt(betaProdT)));
118+
predOriginalSample = sampleBeta.DivideTensorByFloat(MathF.Sqrt(alphaProdT));
119119
predEpsilon = modelOutput;
120120
}
121121
else if (Options.PredictionType == PredictionType.Sample)
122122
{
123123
predOriginalSample = modelOutput;
124124
predEpsilon = sample.SubtractTensors(predOriginalSample
125-
.MultiplyTensorByFloat((float)Math.Sqrt(alphaProdT)))
126-
.DivideTensorByFloat((float)Math.Sqrt(betaProdT));
125+
.MultiplyTensorByFloat(MathF.Sqrt(alphaProdT)))
126+
.DivideTensorByFloat(MathF.Sqrt(betaProdT));
127127
}
128128
else if (Options.PredictionType == PredictionType.VariablePrediction)
129129
{
130-
var alphaSqrt = (float)Math.Sqrt(alphaProdT);
131-
var betaSqrt = (float)Math.Sqrt(betaProdT);
132-
predOriginalSample = sample
133-
.MultiplyTensorByFloat(alphaSqrt)
134-
.SubtractTensors(modelOutput.MultiplyTensorByFloat(betaSqrt));
135-
predEpsilon = modelOutput
136-
.MultiplyTensorByFloat(alphaSqrt)
137-
.AddTensors(sample.MultiplyTensorByFloat(betaSqrt));
130+
var tmp = sample.MultiplyTensorByFloat(MathF.Pow(alphaProdT, 0.5f));
131+
var tmp2 = modelOutput.MultiplyTensorByFloat(MathF.Pow(betaProdT, 0.5f));
132+
predOriginalSample = tmp.Subtract(tmp2);
133+
134+
var tmp3 = modelOutput.MultiplyTensorByFloat(MathF.Pow(alphaProdT, 0.5f));
135+
var tmp4 = sample.MultiplyTensorByFloat(MathF.Pow(betaProdT, 0.5f));
136+
predEpsilon = tmp3.Add(tmp4);
138137
}
139138

140139

@@ -154,24 +153,23 @@ public override SchedulerStepResult Step(DenseTensor<float> modelOutput, int tim
154153
//# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
155154
var eta = 0f;
156155
var variance = GetVariance(currentTimestep, previousTimestep);
157-
var stdDevT = eta * (float)Math.Sqrt(variance);
156+
var stdDevT = eta * MathF.Pow(variance, 0.5f);
158157

159158
var useClippedModelOutput = false;
160159
if (useClippedModelOutput)
161160
{
162161
//# the pred_epsilon is always re-derived from the clipped x_0 in Glide
163162
predEpsilon = sample
164-
.SubtractTensors(predOriginalSample.MultiplyTensorByFloat((float)Math.Sqrt(alphaProdT)))
165-
.DivideTensorByFloat((float)Math.Sqrt(betaProdT));
163+
.SubtractTensors(predOriginalSample.MultiplyTensorByFloat(MathF.Pow(alphaProdT, 0.5f)))
164+
.DivideTensorByFloat(MathF.Pow(betaProdT, 0.5f));
166165
}
167166

168167

169168
//# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
170-
var predSampleDirection = predEpsilon.MultiplyTensorByFloat((float)Math.Sqrt(1f - alphaProdTPrev - Math.Pow(stdDevT, 2f)));
171-
169+
var predSampleDirection = predEpsilon.MultiplyTensorByFloat(MathF.Pow(1.0f - alphaProdTPrev - MathF.Pow(stdDevT, 2f), 0.5f));
172170

173171
//# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
174-
var prevSample = predSampleDirection.AddTensors(predOriginalSample.MultiplyTensorByFloat((float)Math.Sqrt(alphaProdTPrev)));
172+
var prevSample = predSampleDirection.AddTensors(predOriginalSample.MultiplyTensorByFloat(MathF.Pow(alphaProdTPrev, 0.5f)));
175173

176174
if (eta > 0)
177175
prevSample = prevSample.AddTensors(CreateRandomSample(modelOutput.Dimensions).MultiplyTensorByFloat(stdDevT));
@@ -192,8 +190,8 @@ public override DenseTensor<float> AddNoise(DenseTensor<float> originalSamples,
192190
// Ref: https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddpm.py#L456
193191
int timestep = timesteps[0];
194192
float alphaProd = _alphasCumProd[timestep];
195-
float sqrtAlpha = (float)Math.Sqrt(alphaProd);
196-
float sqrtOneMinusAlpha = (float)Math.Sqrt(1.0f - alphaProd);
193+
float sqrtAlpha = MathF.Sqrt(alphaProd);
194+
float sqrtOneMinusAlpha = MathF.Sqrt(1.0f - alphaProd);
197195

198196
return noise
199197
.MultiplyTensorByFloat(sqrtOneMinusAlpha)

OnnxStack.StableDiffusion/Schedulers/StableDiffusion/DDPMScheduler.cs

Lines changed: 1 addition & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
using Microsoft.ML.OnnxRuntime.Tensors;
2-
using NumSharp;
32
using OnnxStack.Core;
43
using OnnxStack.StableDiffusion.Config;
54
using OnnxStack.StableDiffusion.Enums;
@@ -115,7 +114,7 @@ public override SchedulerStepResult Step(DenseTensor<float> modelOutput, int tim
115114
if (Options.Thresholding)
116115
{
117116
// TODO: https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddpm.py#L322
118-
predOriginalSample = ThresholdSample(predOriginalSample);
117+
// predOriginalSample = ThresholdSample(predOriginalSample);
119118
}
120119
else if (Options.ClipSample)
121120
{
@@ -255,87 +254,6 @@ private float GetVariance(int timestep, float predictedVariance = 0f)
255254
}
256255

257256

258-
/// <summary>
259-
/// Thresholds the sample.
260-
/// </summary>
261-
/// <param name="input">The input.</param>
262-
/// <param name="dynamicThresholdingRatio">The dynamic thresholding ratio.</param>
263-
/// <param name="sampleMaxValue">The sample maximum value.</param>
264-
/// <returns></returns>
265-
private DenseTensor<float> ThresholdSample(DenseTensor<float> input, float dynamicThresholdingRatio = 0.995f, float sampleMaxValue = 1f)
266-
{
267-
var sample = new NDArray(input.ToArray(), new Shape(input.Dimensions.ToArray()));
268-
var batch_size = sample.shape[0];
269-
var channels = sample.shape[1];
270-
var height = sample.shape[2];
271-
var width = sample.shape[3];
272-
273-
// Flatten sample for doing quantile calculation along each image
274-
var flatSample = sample.reshape(batch_size, channels * height * width);
275-
276-
// Calculate the absolute values of the sample
277-
var absSample = np.abs(flatSample);
278-
279-
// Calculate the quantile for each row
280-
var quantiles = new List<float>();
281-
for (int i = 0; i < batch_size; i++)
282-
{
283-
var row = absSample[$"{i},:"].MakeGeneric<float>();
284-
var percentileValue = CalculatePercentile(row, dynamicThresholdingRatio);
285-
percentileValue = Math.Clamp(percentileValue, 1f, sampleMaxValue);
286-
quantiles.Add(percentileValue);
287-
}
288-
289-
// Create an NDArray from quantiles
290-
var quantileArray = np.array(quantiles.ToArray());
291-
292-
// Calculate the thresholded sample
293-
var sExpanded = np.expand_dims(quantileArray, 1); // Expand to match the sample shape
294-
var negSExpanded = np.negative(sExpanded); // Get the negation of sExpanded
295-
var thresholdedSample = sample - negSExpanded; // Element-wise subtraction
296-
thresholdedSample = np.maximum(thresholdedSample, negSExpanded); // Ensure values are not less than -sExpanded
297-
thresholdedSample = np.minimum(thresholdedSample, sExpanded); // Ensure values are not greater than sExpanded
298-
thresholdedSample = thresholdedSample / sExpanded;
299-
300-
// Reshape to the original shape
301-
thresholdedSample = thresholdedSample.reshape(batch_size, channels, height, width);
302-
303-
return new DenseTensor<float>(thresholdedSample.ToArray<float>(), thresholdedSample.shape);
304-
}
305-
306-
307-
/// <summary>
308-
/// Calculates the percentile.
309-
/// </summary>
310-
/// <param name="data">The data.</param>
311-
/// <param name="percentile">The percentile.</param>
312-
/// <returns></returns>
313-
private float CalculatePercentile(NDArray data, float percentile)
314-
{
315-
// Sort the data indices in ascending order
316-
var sortedIndices = np.argsort<float>(data);
317-
318-
// Calculate the index corresponding to the percentile
319-
var index = (int)Math.Ceiling(percentile / 100f * (data.Shape[0] - 1));
320-
321-
// Retrieve the value at the calculated index
322-
var percentileValue = data[sortedIndices[index]];
323-
324-
return percentileValue.GetSingle();
325-
}
326-
327-
328-
/// <summary>
329-
/// Determines whether the VarianceType is learned.
330-
/// </summary>
331-
/// <returns>
332-
/// <c>true</c> if the VarianceType is learned; otherwise, <c>false</c>.
333-
/// </returns>
334-
private bool IsVarianceTypeLearned()
335-
{
336-
return Options.VarianceType == VarianceType.Learned || Options.VarianceType == VarianceType.LearnedRange;
337-
}
338-
339257
protected override void Dispose(bool disposing)
340258
{
341259
_alphasCumProd = null;

OnnxStack.StableDiffusion/Schedulers/StableDiffusion/EulerAncestralScheduler.cs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
using Microsoft.ML.OnnxRuntime.Tensors;
2-
using NumSharp;
32
using OnnxStack.Core;
43
using OnnxStack.StableDiffusion.Config;
5-
using OnnxStack.StableDiffusion.Enums;
64
using OnnxStack.StableDiffusion.Helpers;
75
using System;
86
using System.Collections.Generic;
@@ -55,14 +53,14 @@ protected override int[] SetTimesteps()
5553
{
5654
var sigmas = _sigmas.ToArray();
5755
var timesteps = GetTimesteps();
58-
var log_sigmas = np.log(sigmas).ToArray<float>();
59-
var range = np.arange(0, (float)_sigmas.Length).ToArray<float>();
56+
var logSigmas = ArrayHelpers.Log(sigmas);
57+
var range = ArrayHelpers.Range(0, _sigmas.Length);
6058
sigmas = Interpolate(timesteps, range, _sigmas);
6159

6260
if (Options.UseKarrasSigmas)
6361
{
6462
sigmas = ConvertToKarras(sigmas);
65-
timesteps = SigmaToTimestep(sigmas, log_sigmas);
63+
timesteps = SigmaToTimestep(sigmas, logSigmas);
6664
}
6765

6866
_sigmas = sigmas

OnnxStack.StableDiffusion/Schedulers/StableDiffusion/EulerScheduler.cs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
using Microsoft.ML.OnnxRuntime.Tensors;
2-
using NumSharp;
32
using OnnxStack.Core;
43
using OnnxStack.StableDiffusion.Config;
54
using OnnxStack.StableDiffusion.Enums;
@@ -55,19 +54,20 @@ protected override int[] SetTimesteps()
5554
{
5655
var sigmas = _sigmas.ToArray();
5756
var timesteps = GetTimesteps();
58-
var log_sigmas = np.log(sigmas).ToArray<float>();
59-
var range = np.arange(0, (float)_sigmas.Length).ToArray<float>();
57+
var logSigmas = ArrayHelpers.Log(sigmas);
58+
var range = ArrayHelpers.Range(0, sigmas.Length);
6059

6160
// TODO: Implement "interpolation_type"
62-
var interpolation_type = "linear";
63-
sigmas = interpolation_type == "log_linear"
64-
? np.exp(np.linspace(np.log(sigmas.Last()), np.log(sigmas.First()), timesteps.Length + 1)).ToArray<float>()
65-
: Interpolate(timesteps, range, _sigmas);
61+
//var interpolation_type = "linear";
62+
//sigmas = interpolation_type == "log_linear"
63+
// ? np.exp(np.linspace(np.log(sigmas.Last()), np.log(sigmas.First()), timesteps.Length + 1)).ToArray<float>()
64+
// : Interpolate(timesteps, range, _sigmas);
6665

66+
sigmas = Interpolate(timesteps, range, _sigmas);
6767
if (Options.UseKarrasSigmas)
6868
{
6969
sigmas = ConvertToKarras(sigmas);
70-
timesteps = SigmaToTimestep(sigmas, log_sigmas);
70+
timesteps = SigmaToTimestep(sigmas, logSigmas);
7171
}
7272

7373
_sigmas = sigmas

OnnxStack.StableDiffusion/Schedulers/StableDiffusion/KDPM2Scheduler.cs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
using Microsoft.ML.OnnxRuntime.Tensors;
2-
using NumSharp;
32
using OnnxStack.Core;
43
using OnnxStack.StableDiffusion.Config;
54
using OnnxStack.StableDiffusion.Enums;
5+
using OnnxStack.StableDiffusion.Helpers;
66
using System;
77
using System.Collections.Generic;
88
using System.Linq;
@@ -63,8 +63,8 @@ protected override int[] SetTimesteps()
6363
// Create timesteps based on the specified strategy
6464
var sigmas = _sigmas.ToArray();
6565
var timesteps = GetTimesteps();
66-
var logSigmas = np.log(sigmas).ToArray<float>();
67-
var range = np.arange(0, (float)_sigmas.Length).ToArray<float>();
66+
var logSigmas = ArrayHelpers.Log(sigmas);
67+
var range = ArrayHelpers.Range(0, _sigmas.Length);
6868
sigmas = Interpolate(timesteps, range, _sigmas);
6969

7070
if (Options.UseKarrasSigmas)

0 commit comments

Comments
 (0)