@@ -5,6 +5,7 @@ use core::iter::FusedIterator;
55use core:: marker:: PhantomData ;
66use core:: mem;
77use core:: mem:: ManuallyDrop ;
8+ use core:: ops:: Range ;
89use core:: ptr:: NonNull ;
910use scopeguard:: guard;
1011
@@ -23,6 +24,17 @@ fn unlikely(b: bool) -> bool {
2324 b
2425}
2526
27+ #[ cfg( feature = "nightly" ) ]
28+ #[ inline]
29+ unsafe fn offset_from < T > ( to : * const T , from : * const T ) -> usize {
30+ to. offset_from ( from) as usize
31+ }
32+ #[ cfg( not( feature = "nightly" ) ) ]
33+ #[ inline]
34+ unsafe fn offset_from < T > ( to : * const T , from : * const T ) -> usize {
35+ ( to as usize - from as usize ) / mem:: size_of :: < T > ( )
36+ }
37+
2638// Use the SSE2 implementation if possible: it allows us to scan 16 buckets at
2739// once instead of 8.
2840#[ cfg( all(
@@ -299,16 +311,8 @@ impl<T> RawTable<T> {
299311
300312 /// Returns the index of a bucket from a `Bucket`.
301313 #[ inline]
302- #[ cfg( feature = "nightly" ) ]
303314 unsafe fn bucket_index ( & self , bucket : & Bucket < T > ) -> usize {
304- bucket. ptr . as_ptr ( ) . offset_from ( self . data . as_ptr ( ) ) as usize
305- }
306-
307- /// Returns the index of a bucket from a `Bucket`.
308- #[ inline]
309- #[ cfg( not( feature = "nightly" ) ) ]
310- unsafe fn bucket_index ( & self , bucket : & Bucket < T > ) -> usize {
311- ( bucket. ptr . as_ptr ( ) as usize - self . data . as_ptr ( ) as usize ) / mem:: size_of :: < T > ( )
315+ offset_from ( bucket. ptr . as_ptr ( ) , self . data . as_ptr ( ) )
312316 }
313317
314318 /// Returns a pointer to a control byte.
@@ -704,14 +708,8 @@ impl<T> RawTable<T> {
704708 /// struct, we have to make the `iter` method unsafe.
705709 #[ inline]
706710 pub unsafe fn iter ( & self ) -> RawIter < T > {
707- let current_group = Group :: load_aligned ( self . ctrl . as_ptr ( ) )
708- . match_empty_or_deleted ( )
709- . invert ( ) ;
710711 RawIter {
711- data : self . data . as_ptr ( ) ,
712- ctrl : self . ctrl . as_ptr ( ) ,
713- current_group,
714- end : self . ctrl ( self . bucket_mask ) ,
712+ iter : RawIterRange :: new ( self . ctrl . as_ptr ( ) , self . data . as_ptr ( ) , 0 ..self . buckets ( ) ) ,
715713 items : self . items ,
716714 }
717715 }
@@ -819,33 +817,75 @@ impl<T> IntoIterator for RawTable<T> {
819817 }
820818}
821819
822- /// Iterator which returns a raw pointer to every full bucket in the table.
823- pub struct RawIter < T > {
820+ /// Iterator over a a sub-range of a table. Unlike `RawIter` this iterator does
821+ /// not track an item count.
822+ pub struct RawIterRange < T > {
824823 // Using *const here for covariance
825824 data : * const T ,
826825 ctrl : * const u8 ,
827826 current_group : BitMask ,
828827 end : * const u8 ,
829- items : usize ,
830828}
831829
832- unsafe impl < T > Send for RawIter < T > where T : Send { }
833- unsafe impl < T > Sync for RawIter < T > where T : Sync { }
830+ impl < T > RawIterRange < T > {
831+ /// Returns a `RawIterRange` covering a subset of a table.
832+ ///
833+ /// The start offset must be aligned to the group width.
834+ #[ inline]
835+ unsafe fn new (
836+ ctrl : * const u8 ,
837+ data : * const T ,
838+ range : Range < usize > ,
839+ ) -> RawIterRange < T > {
840+ debug_assert_eq ! ( range. start % Group :: WIDTH , 0 ) ;
841+ let ctrl = ctrl. add ( range. start ) ;
842+ let data = data. add ( range. start ) ;
843+ let end = ctrl. add ( range. end ) ;
844+ let current_group = Group :: load_aligned ( ctrl) . match_empty_or_deleted ( ) . invert ( ) ;
845+ RawIterRange {
846+ data,
847+ ctrl,
848+ current_group,
849+ end,
850+ }
851+ }
834852
835- impl < T > Clone for RawIter < T > {
853+ /// Splits a `RawIterRange` into two halves.
854+ ///
855+ /// This will fail if the total range is smaller than the group width.
856+ #[ inline]
857+ #[ cfg( feature = "rayon" ) ]
858+ pub unsafe fn split ( mut self ) -> ( RawIterRange < T > , Option < RawIterRange < T > > ) {
859+ let len = offset_from ( self . end , self . ctrl ) ;
860+ debug_assert ! ( len. is_power_of_two( ) ) ;
861+ if len <= Group :: WIDTH {
862+ ( self , None )
863+ } else {
864+ debug_assert_eq ! ( len % ( Group :: WIDTH * 2 ) , 0 ) ;
865+ let mid = len / 2 ;
866+ let tail = RawIterRange :: new ( self . ctrl , self . data , mid..len) ;
867+ self . end = self . ctrl . add ( mid) ;
868+ ( self , Some ( tail) )
869+ }
870+ }
871+ }
872+
873+ unsafe impl < T > Send for RawIterRange < T > where T : Send { }
874+ unsafe impl < T > Sync for RawIterRange < T > where T : Sync { }
875+
876+ impl < T > Clone for RawIterRange < T > {
836877 #[ inline]
837878 fn clone ( & self ) -> Self {
838- RawIter {
879+ RawIterRange {
839880 data : self . data ,
840881 ctrl : self . ctrl ,
841882 current_group : self . current_group ,
842883 end : self . end ,
843- items : self . items ,
844884 }
845885 }
846886}
847887
848- impl < T > Iterator for RawIter < T > {
888+ impl < T > Iterator for RawIterRange < T > {
849889 type Item = Bucket < T > ;
850890
851891 #[ inline]
@@ -854,16 +894,11 @@ impl<T> Iterator for RawIter<T> {
854894 loop {
855895 if let Some ( index) = self . current_group . lowest_set_bit ( ) {
856896 self . current_group = self . current_group . remove_lowest_bit ( ) ;
857- self . items -= 1 ;
858897 return Some ( Bucket :: from_ptr ( self . data . add ( index) ) ) ;
859898 }
860899
861900 self . ctrl = self . ctrl . add ( Group :: WIDTH ) ;
862901 if self . ctrl >= self . end {
863- // We don't check against items == 0 here to allow the
864- // compiler to optimize away the item count entirely if the
865- // iterator length is never queried.
866- debug_assert_eq ! ( self . items, 0 ) ;
867902 return None ;
868903 }
869904
@@ -875,6 +910,51 @@ impl<T> Iterator for RawIter<T> {
875910 }
876911 }
877912
913+ #[ inline]
914+ fn size_hint ( & self ) -> ( usize , Option < usize > ) {
915+ // We don't have an item count, so just guess based on the range size.
916+ ( 0 , Some ( unsafe { offset_from ( self . end , self . ctrl ) } ) )
917+ }
918+ }
919+
920+ impl < T > FusedIterator for RawIterRange < T > { }
921+
922+ /// Iterator which returns a raw pointer to every full bucket in the table.
923+ pub struct RawIter < T > {
924+ pub iter : RawIterRange < T > ,
925+ items : usize ,
926+ }
927+
928+ impl < T > Clone for RawIter < T > {
929+ #[ inline]
930+ fn clone ( & self ) -> Self {
931+ RawIter {
932+ iter : self . iter . clone ( ) ,
933+ items : self . items ,
934+ }
935+ }
936+ }
937+
938+ impl < T > Iterator for RawIter < T > {
939+ type Item = Bucket < T > ;
940+
941+ #[ inline]
942+ fn next ( & mut self ) -> Option < Bucket < T > > {
943+ match self . iter . next ( ) {
944+ Some ( b) => {
945+ self . items -= 1 ;
946+ Some ( b)
947+ }
948+ None => {
949+ // We don't check against items == 0 here to allow the
950+ // compiler to optimize away the item count entirely if the
951+ // iterator length is never queried.
952+ debug_assert_eq ! ( self . items, 0 ) ;
953+ None
954+ }
955+ }
956+ }
957+
878958 #[ inline]
879959 fn size_hint ( & self ) -> ( usize , Option < usize > ) {
880960 ( self . items , Some ( self . items ) )
0 commit comments