|
7 | 7 | # It compares fixed-decimal types against the builtin Int and Float types of various sizes. |
8 | 8 | # The output is written to a .csv file in the same directory as this file. |
9 | 9 |
|
10 | | -module DecimalRepresentationComparisons |
11 | | - |
12 | 10 | using FixedPointDecimals |
13 | 11 | using Random |
14 | 12 | using BenchmarkTools, Statistics |
15 | | -using DataFrames |
16 | | -using CSV |
| 13 | + |
| 14 | +# # TODO: remove this file once BenchmarkTools has a built-in solution for diffing two |
| 15 | +# # @benchmarkable runs |
| 16 | +# include("subtract-benchmarks.jl") |
| 17 | + |
| 18 | +# Define a parent BenchmarkGroup to contain our suite |
| 19 | +const SUITE = BenchmarkGroup() |
17 | 20 |
|
18 | 21 | decimal_precision = 2 |
19 | 22 |
|
@@ -42,8 +45,8 @@ type(T::Type{<:Union{Int32, Int64}}) = " $T" |
42 | 45 | type(T::Type{Int128}) = " $T" |
43 | 46 | type(::Type{FixedPointDecimals.FixedDecimal{T,f}}) where {T,f} = "FD{$T,$f}" |
44 | 47 | type(::Type{FixedPointDecimals.FixedDecimal{T,f}}) where {T<:Union{Int32,Int64},f} = "FD{ $T,$f}" |
45 | | -opname(f) = Symbol(f) |
46 | | -opname(f::typeof(identity1)) = :identity |
| 48 | +opname(f) = string(Symbol(f)) |
| 49 | +opname(f::typeof(identity1)) = "identity" |
47 | 50 |
|
48 | 51 | # --------- Define benchmark functions ------------- |
49 | 52 | # Some care is taken here to prevent the compiler from optimizing away the operations: |
|
71 | 74 | end |
72 | 75 | end |
73 | 76 |
|
74 | | -# ------------ Run the Benchmarks ------------------------- |
75 | | -function perform_benchmark() |
76 | | - # Collect the results |
77 | | - results = DataFrame(Operation=Symbol[], Category=String[], Type=String[], |
78 | | - DurationNs=Float64[], Allocations=Int[], MinGcTime=Number[], |
79 | | - Value=Number[]) |
80 | | - |
81 | | - # Run the benchmarks |
82 | | - for op in allops |
83 | | - println("$op") |
84 | | - for T in alltypes |
85 | | - print("$T ") |
86 | | - |
87 | | - N = 1_000_000 |
88 | | - initial_value = zero(T) |
89 | | - a = one(T) |
90 | | - |
91 | | - # For some reason this is necessary to eliminate mysterious "1 allocation" |
92 | | - fbase = @eval (out::Ref{$T})->baseline($T, $a, $N, out) |
93 | | - fbench = @eval (out::Ref{$T})->benchmark($T, $op, $a, $N, out) |
94 | | - |
95 | | - # Run the benchmark |
96 | | - outbase = Ref(initial_value) |
97 | | - bbase = median(@benchmark $fbase($outbase) evals=1 setup=($outbase[]=$initial_value)) |
98 | | - outbench = Ref(initial_value) |
99 | | - bbench = median(@benchmark $fbench($outbench) evals=1 setup=($outbench[]=$initial_value)) |
100 | | - |
101 | | - # Compute results |
102 | | - difftime = (bbench.time - bbase.time) |
103 | | - println("$(round(difftime, digits=2)) ns ($(bbench.allocs) allocations)") |
104 | | - println(outbench[]) |
105 | | - println(outbase[]) |
106 | | - value = outbench |
107 | | - |
108 | | - push!(results, Dict(:Operation=>opname(op), :Category=>category(T), :Type=>type(T), |
109 | | - :DurationNs=>difftime/N, # average (b.times reports ns) |
110 | | - :Allocations=>bbench.allocs, :MinGcTime=>bbench.gctime, |
111 | | - :Value=>value[])) |
112 | | - end |
| 77 | +# Define the benchmark structure |
| 78 | +for op in allops |
| 79 | + SUITE[opname(op)] = BenchmarkGroup() |
| 80 | + for T in alltypes |
| 81 | + SUITE[opname(op)][type(T)] = BenchmarkGroup(["diff"]) |
113 | 82 | end |
114 | | - |
115 | | - println(results) |
116 | | - CSV.write("$(@__DIR__)/comparisons-benchmark-results.csv", results) |
117 | | - return results |
118 | 83 | end |
119 | 84 |
|
120 | | -results = perform_benchmark() |
121 | | - |
| 85 | +for op in allops |
| 86 | + println() |
| 87 | + println("$op") |
| 88 | + for T in alltypes |
| 89 | + print("$T ") |
| 90 | + |
| 91 | + N = 1 # _000 #_000 |
| 92 | + initial_value = zero(T) |
| 93 | + a = one(T) |
| 94 | + |
| 95 | + # For some reason this is necessary to eliminate mysterious "1 allocation" |
| 96 | + fbase = @eval (out::Ref{$T})->baseline($T, $a, $N, out) |
| 97 | + fbench = @eval (out::Ref{$T})->benchmark($T, $op, $a, $N, out) |
| 98 | + |
| 99 | + # Run the benchmark |
| 100 | + outbase = Ref(initial_value) |
| 101 | + bbase = @benchmarkable $fbase($outbase) evals=1 setup=($outbase[]=$initial_value) |
| 102 | + outbench = Ref(initial_value) |
| 103 | + bbench = @benchmarkable $fbench($outbench) evals=1 setup=($outbench[]=$initial_value) |
| 104 | + bdiff = bbench - bbase |
| 105 | + SUITE[opname(op)][type(T)]["diff"] = bdiff |
| 106 | + end |
122 | 107 | end |
0 commit comments