@@ -925,6 +925,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
925925 eq ( self . bucket ( index) . as_ref ( ) )
926926 } ) ;
927927
928+ // Avoid `Option::map` because it bloats LLVM IR.
928929 match result {
929930 Some ( index) => Some ( unsafe { self . bucket ( index) } ) ,
930931 None => None ,
@@ -1255,30 +1256,29 @@ impl<A: Allocator + Clone> RawTableInner<A> {
12551256 }
12561257 }
12571258
1258- /// Searches for an element in the table.
1259+ /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of
1260+ /// code generated, but it is eliminated by LLVM optimizations.
12591261 #[ inline]
12601262 fn find_inner ( & self , hash : u64 , eq : & mut dyn FnMut ( usize ) -> bool ) -> Option < usize > {
1261- unsafe {
1262- let h2_hash = h2 ( hash) ;
1263- let mut probe_seq = self . probe_seq ( hash) ;
1264-
1265- loop {
1266- let group = Group :: load ( self . ctrl ( probe_seq. pos ) ) ;
1263+ let h2_hash = h2 ( hash) ;
1264+ let mut probe_seq = self . probe_seq ( hash) ;
12671265
1268- for bit in group . match_byte ( h2_hash ) {
1269- let index = ( probe_seq. pos + bit ) & self . bucket_mask ;
1266+ loop {
1267+ let group = unsafe { Group :: load ( self . ctrl ( probe_seq. pos ) ) } ;
12701268
1271- if likely ( eq ( index) ) {
1272- return Some ( index) ;
1273- }
1274- }
1269+ for bit in group. match_byte ( h2_hash) {
1270+ let index = ( probe_seq. pos + bit) & self . bucket_mask ;
12751271
1276- if likely ( group . match_empty ( ) . any_bit_set ( ) ) {
1277- return None ;
1272+ if likely ( eq ( index ) ) {
1273+ return Some ( index ) ;
12781274 }
1275+ }
12791276
1280- probe_seq. move_next ( self . bucket_mask ) ;
1277+ if likely ( group. match_empty ( ) . any_bit_set ( ) ) {
1278+ return None ;
12811279 }
1280+
1281+ probe_seq. move_next ( self . bucket_mask ) ;
12821282 }
12831283 }
12841284
0 commit comments