@@ -796,7 +796,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
796796 {
797797 let index = self . bucket_index ( & bucket) ;
798798 let old_ctrl = * self . table . ctrl ( index) ;
799- debug_assert ! ( is_full ( old_ctrl ) ) ;
799+ debug_assert ! ( self . is_bucket_full ( index ) ) ;
800800 let old_growth_left = self . table . growth_left ;
801801 let item = self . remove ( bucket) ;
802802 if let Some ( new_item) = f ( item) {
@@ -928,6 +928,16 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
928928 self . table . bucket_mask + 1
929929 }
930930
931+ /// Checks whether the bucket at `index` is full.
932+ ///
933+ /// # Safety
934+ ///
935+ /// The caller must ensure `index` is less than the number of buckets.
936+ #[ inline]
937+ pub unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
938+ self . table . is_bucket_full ( index)
939+ }
940+
931941 /// Returns an iterator over every element in the table. It is up to
932942 /// the caller to ensure that the `RawTable` outlives the `RawIter`.
933943 /// Because we cannot make the `next` method unsafe on the `RawIter`
@@ -1148,7 +1158,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
11481158 // table. This second scan is guaranteed to find an empty
11491159 // slot (due to the load factor) before hitting the trailing
11501160 // control bytes (containing EMPTY).
1151- if unlikely ( is_full ( * self . ctrl ( result) ) ) {
1161+ if unlikely ( self . is_bucket_full ( result) ) {
11521162 debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
11531163 debug_assert_ne ! ( probe_seq. pos, 0 ) ;
11541164 return Group :: load_aligned ( self . ctrl ( 0 ) )
@@ -1329,6 +1339,17 @@ impl<A: Allocator + Clone> RawTableInner<A> {
13291339 self . bucket_mask + 1
13301340 }
13311341
1342+ /// Checks whether the bucket at `index` is full.
1343+ ///
1344+ /// # Safety
1345+ ///
1346+ /// The caller must ensure `index` is less than the number of buckets.
1347+ #[ inline]
1348+ unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
1349+ debug_assert ! ( index < self . buckets( ) ) ;
1350+ is_full ( * self . ctrl ( index) )
1351+ }
1352+
13321353 #[ inline]
13331354 fn num_ctrl_bytes ( & self ) -> usize {
13341355 self . bucket_mask + 1 + Group :: WIDTH
@@ -1427,7 +1448,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
14271448
14281449 // Copy all elements to the new table.
14291450 for i in 0 ..self . buckets ( ) {
1430- if !is_full ( * self . ctrl ( i ) ) {
1451+ if !self . is_bucket_full ( i ) {
14311452 continue ;
14321453 }
14331454
@@ -1573,7 +1594,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
15731594
15741595 #[ inline]
15751596 unsafe fn erase ( & mut self , index : usize ) {
1576- debug_assert ! ( is_full ( * self . ctrl ( index) ) ) ;
1597+ debug_assert ! ( self . is_bucket_full ( index) ) ;
15771598 let index_before = index. wrapping_sub ( Group :: WIDTH ) & self . bucket_mask ;
15781599 let empty_before = Group :: load ( self . ctrl ( index_before) ) . match_empty ( ) ;
15791600 let empty_after = Group :: load ( self . ctrl ( index) ) . match_empty ( ) ;
@@ -1723,7 +1744,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
17231744 let mut guard = guard ( ( 0 , & mut * self ) , |( index, self_) | {
17241745 if mem:: needs_drop :: < T > ( ) && !self_. is_empty ( ) {
17251746 for i in 0 ..=* index {
1726- if is_full ( * self_. table . ctrl ( i ) ) {
1747+ if self_. is_bucket_full ( i ) {
17271748 self_. bucket ( i) . drop ( ) ;
17281749 }
17291750 }
0 commit comments