@@ -53,6 +53,7 @@ func BenchmarkSerializeNode(b *testing.B) {
5353type benchSetCase struct {
5454 kcount int
5555 bitwidth int
56+ // flushInterval int
5657}
5758
5859var benchSetCaseTable []benchSetCase
@@ -76,6 +77,9 @@ func init() {
7677 7 ,
7778 8 ,
7879 }
80+ // flushIntervals := []int{
81+ // 1,
82+ // }
7983 // bucketsize-aka-arraywidth? maybe someday.
8084 for _ , c := range kCounts {
8185 for _ , bw := range bitwidths {
@@ -150,9 +154,9 @@ func BenchmarkFill(b *testing.B) {
150154//
151155// The number of *additional* blocks per entry is reported.
152156// This number is usually less than one, because the bulk flush means changes might be amortized.
153- func BenchmarkSetBulk (b * testing.B ) {
154- doBenchmarkSetSuite (b , false )
155- }
157+ // func BenchmarkSetBulk(b *testing.B) {
158+ // doBenchmarkSetSuite(b, false)
159+ // }
156160
157161// BenchmarkSetIndividual is the same as BenchmarkSetBulk, but flushes more.
158162// Flush happens per insert.
@@ -166,8 +170,9 @@ func BenchmarkSetIndividual(b *testing.B) {
166170}
167171
168172func doBenchmarkSetSuite (b * testing.B , flushPer bool ) {
169- for _ , t := range benchSetCaseTable {
173+ for j , t := range benchSetCaseTable {
170174 b .Run (fmt .Sprintf ("n=%dk/bitwidth=%d" , t .kcount , t .bitwidth ), func (b * testing.B ) {
175+ fmt .Printf ("Case: %d, b.N=%d\n " , j , b .N )
171176 for i := 0 ; i < b .N ; i ++ {
172177 r := rander {rand .New (rand .NewSource (int64 (i )))}
173178 blockstore := newMockBlocks ()
0 commit comments