@@ -28,16 +28,9 @@ static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx)
2828
2929static void secp256k1_ecmult_gen_context_build (secp256k1_ecmult_gen_context * ctx , void * * prealloc ) {
3030#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
31- #ifdef USE_COMB
3231 secp256k1_ge prec [COMB_POINTS_TOTAL + COMB_OFFSET ];
3332 secp256k1_gej u , sum ;
3433 int block , index , spacing , stride , tooth ;
35- #else
36- secp256k1_ge prec [ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G ];
37- secp256k1_gej gj ;
38- secp256k1_gej nums_gej ;
39- int i , j ;
40- #endif
4134 size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE ;
4235 void * const base = * prealloc ;
4336#endif
@@ -46,7 +39,6 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
4639 return ;
4740 }
4841#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
49- #ifdef USE_COMB
5042 ctx -> prec = (secp256k1_ge_storage (* )[COMB_BLOCKS ][COMB_POINTS ])manual_alloc (prealloc , prealloc_size , base , prealloc_size );
5143
5244 /* get the generator */
@@ -95,72 +87,12 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
9587 ctx -> offset = prec [COMB_POINTS_TOTAL ];
9688#endif
9789
98- #else
99- ctx -> prec = (secp256k1_ge_storage (* )[ECMULT_GEN_PREC_N ][ECMULT_GEN_PREC_G ])manual_alloc (prealloc , prealloc_size , base , prealloc_size );
100-
101- /* get the generator */
102- secp256k1_gej_set_ge (& gj , & secp256k1_ge_const_g );
103-
104- /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
105- {
106- static const unsigned char nums_b32 [33 ] = "The scalar for this x is unknown" ;
107- secp256k1_fe nums_x ;
108- secp256k1_ge nums_ge ;
109- int r ;
110- r = secp256k1_fe_set_b32 (& nums_x , nums_b32 );
111- (void )r ;
112- VERIFY_CHECK (r );
113- r = secp256k1_ge_set_xo_var (& nums_ge , & nums_x , 0 );
114- (void )r ;
115- VERIFY_CHECK (r );
116- secp256k1_gej_set_ge (& nums_gej , & nums_ge );
117- /* Add G to make the bits in x uniformly distributed. */
118- secp256k1_gej_add_ge_var (& nums_gej , & nums_gej , & secp256k1_ge_const_g , NULL );
119- }
120-
121- /* compute prec. */
122- {
123- secp256k1_gej precj [ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G ]; /* Jacobian versions of prec. */
124- secp256k1_gej gbase ;
125- secp256k1_gej numsbase ;
126- gbase = gj ; /* PREC_G^j * G */
127- numsbase = nums_gej ; /* 2^j * nums. */
128- for (j = 0 ; j < ECMULT_GEN_PREC_N ; j ++ ) {
129- /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */
130- precj [j * ECMULT_GEN_PREC_G ] = numsbase ;
131- for (i = 1 ; i < ECMULT_GEN_PREC_G ; i ++ ) {
132- secp256k1_gej_add_var (& precj [j * ECMULT_GEN_PREC_G + i ], & precj [j * ECMULT_GEN_PREC_G + i - 1 ], & gbase , NULL );
133- }
134- /* Multiply gbase by PREC_G. */
135- for (i = 0 ; i < ECMULT_GEN_PREC_B ; i ++ ) {
136- secp256k1_gej_double_var (& gbase , & gbase , NULL );
137- }
138- /* Multiply numbase by 2. */
139- secp256k1_gej_double_var (& numsbase , & numsbase , NULL );
140- if (j == ECMULT_GEN_PREC_N - 2 ) {
141- /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
142- secp256k1_gej_neg (& numsbase , & numsbase );
143- secp256k1_gej_add_var (& numsbase , & numsbase , & nums_gej , NULL );
144- }
145- }
146- secp256k1_ge_set_all_gej_var (prec , precj , ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G );
147- }
148- for (j = 0 ; j < ECMULT_GEN_PREC_N ; j ++ ) {
149- for (i = 0 ; i < ECMULT_GEN_PREC_G ; i ++ ) {
150- secp256k1_ge_to_storage (& (* ctx -> prec )[j ][i ], & prec [j * ECMULT_GEN_PREC_G + i ]);
151- }
152- }
153- #endif
15490#else
15591 (void )prealloc ;
156- #if USE_COMB
15792 ctx -> prec = (secp256k1_ge_storage (* )[COMB_BLOCKS ][COMB_POINTS ])secp256k1_ecmult_gen_ctx_prec ;
15893#if COMB_OFFSET
15994 secp256k1_ge_from_storage (& ctx -> offset , & secp256k1_ecmult_gen_ctx_offset );
16095#endif
161- #else
162- ctx -> prec = (secp256k1_ge_storage (* )[ECMULT_GEN_PREC_N ][ECMULT_GEN_PREC_G ])secp256k1_ecmult_static_context ;
163- #endif
16496#endif
16597 secp256k1_ecmult_gen_blind (ctx , NULL );
16698}
@@ -171,28 +103,20 @@ static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_cont
171103
172104static void secp256k1_ecmult_gen_context_finalize_memcpy (secp256k1_ecmult_gen_context * dst , const secp256k1_ecmult_gen_context * src ) {
173105#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
174- #ifdef USE_COMB
175106 if (src -> prec != NULL ) {
176107 /* We cast to void* first to suppress a -Wcast-align warning. */
177108 dst -> prec = (secp256k1_ge_storage (* )[COMB_BLOCKS ][COMB_POINTS ])(void * )((unsigned char * )dst + ((unsigned char * )src -> prec - (unsigned char * )src ));
178109 }
179110#if COMB_OFFSET
180111 dst -> offset = src -> offset ;
181112#endif
182- #else
183- if (src -> prec != NULL ) {
184- dst -> prec = (secp256k1_ge_storage (* )[ECMULT_GEN_PREC_N ][ECMULT_GEN_PREC_G ])(void * )((unsigned char * )dst + ((unsigned char * )src -> prec - (unsigned char * )src ));
185- }
186- #endif
187113#endif
188114 (void )dst , (void )src ;
189115}
190116
191117static void secp256k1_ecmult_gen_context_clear (secp256k1_ecmult_gen_context * ctx ) {
192- #ifdef USE_COMB
193118#if COMB_OFFSET
194119 secp256k1_ge_clear (& ctx -> offset );
195- #endif
196120#endif
197121 secp256k1_scalar_clear (& ctx -> blind );
198122 secp256k1_gej_clear (& ctx -> initial );
@@ -205,8 +129,6 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
205129 secp256k1_scalar gnb ;
206130 uint32_t bits ;
207131
208- #ifdef USE_COMB
209-
210132 uint32_t abs , bit_pos , block , comb_off , index , sign ;
211133#if !COMB_GROUPED
212134 uint32_t bit , tooth ;
@@ -245,6 +167,16 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
245167 VERIFY_CHECK (abs < COMB_POINTS );
246168
247169 for (index = 0 ; index < COMB_POINTS ; ++ index ) {
170+ /** This uses a conditional move to avoid any secret data in array indexes.
171+ * _Any_ use of secret indexes has been demonstrated to result in timing
172+ * sidechannels, even when the cache-line access patterns are uniform.
173+ * See also:
174+ * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
175+ * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
176+ * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
177+ * by Dag Arne Osvik, Adi Shamir, and Eran Tromer
178+ * (http://www.tau.ac.il/~tromer/papers/cache.pdf)
179+ */
248180 secp256k1_ge_storage_cmov (& adds , & (* ctx -> prec )[block ][index ], index == abs );
249181 }
250182
@@ -267,32 +199,6 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
267199 abs = 0 ;
268200 sign = 0 ;
269201
270- #else
271- int i , j ;
272- memset (& adds , 0 , sizeof (adds ));
273- * r = ctx -> initial ;
274- /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
275- secp256k1_scalar_add (& gnb , gn , & ctx -> blind );
276- add .infinity = 0 ;
277- for (j = 0 ; j < ECMULT_GEN_PREC_N ; j ++ ) {
278- bits = secp256k1_scalar_get_bits (& gnb , j * ECMULT_GEN_PREC_B , ECMULT_GEN_PREC_B );
279- for (i = 0 ; i < ECMULT_GEN_PREC_G ; i ++ ) {
280- /** This uses a conditional move to avoid any secret data in array indexes.
281- * _Any_ use of secret indexes has been demonstrated to result in timing
282- * sidechannels, even when the cache-line access patterns are uniform.
283- * See also:
284- * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
285- * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
286- * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
287- * by Dag Arne Osvik, Adi Shamir, and Eran Tromer
288- * (https://www.tau.ac.il/~tromer/papers/cache.pdf)
289- */
290- secp256k1_ge_storage_cmov (& adds , & (* ctx -> prec )[j ][i ], i == bits );
291- }
292- secp256k1_ge_from_storage (& add , & adds );
293- secp256k1_gej_add_ge (r , r , & add );
294- }
295- #endif
296202 bits = 0 ;
297203 secp256k1_ge_clear (& add );
298204 memset (& adds , 0 , sizeof (adds ));
@@ -301,9 +207,7 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
301207
302208/* Setup blinding values for secp256k1_ecmult_gen. */
303209static void secp256k1_ecmult_gen_blind (secp256k1_ecmult_gen_context * ctx , const unsigned char * seed32 ) {
304- #ifdef USE_COMB
305210 int spacing ;
306- #endif
307211 secp256k1_scalar b ;
308212 secp256k1_gej gb ;
309213 secp256k1_fe s ;
@@ -316,13 +220,11 @@ static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const
316220 secp256k1_gej_set_ge (& ctx -> initial , & secp256k1_ge_const_g );
317221 secp256k1_gej_neg (& ctx -> initial , & ctx -> initial );
318222 secp256k1_scalar_set_int (& ctx -> blind , 1 );
319- #ifdef USE_COMB
320223 for (spacing = 1 ; spacing < COMB_SPACING ; ++ spacing ) {
321224 secp256k1_scalar_add (& ctx -> blind , & ctx -> blind , & ctx -> blind );
322225 }
323226#if COMB_OFFSET
324227 secp256k1_gej_add_ge (& ctx -> initial , & ctx -> initial , & ctx -> offset );
325- #endif
326228#endif
327229 }
328230 /* The prior blinding value (if not reset) is chained forward by including it in the hash. */
@@ -355,13 +257,11 @@ static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const
355257 secp256k1_scalar_negate (& b , & b );
356258 ctx -> blind = b ;
357259 ctx -> initial = gb ;
358- #ifdef USE_COMB
359260 for (spacing = 1 ; spacing < COMB_SPACING ; ++ spacing ) {
360261 secp256k1_scalar_add (& ctx -> blind , & ctx -> blind , & ctx -> blind );
361262 }
362263#if COMB_OFFSET
363264 secp256k1_gej_add_ge (& ctx -> initial , & ctx -> initial , & ctx -> offset );
364- #endif
365265#endif
366266 secp256k1_scalar_clear (& b );
367267 secp256k1_gej_clear (& gb );
0 commit comments