@@ -796,7 +796,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
796796 {
797797 let index = self . bucket_index ( & bucket) ;
798798 let old_ctrl = * self . table . ctrl ( index) ;
799- debug_assert ! ( self . is_full ( index) ) ;
799+ debug_assert ! ( self . is_bucket_full ( index) ) ;
800800 let old_growth_left = self . table . growth_left ;
801801 let item = self . remove ( bucket) ;
802802 if let Some ( new_item) = f ( item) {
@@ -929,9 +929,13 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
929929 }
930930
931931 /// Checks whether the bucket at `index` is full.
932+ ///
933+ /// # Safety
934+ ///
935+ /// The caller must ensure `index` is less than the number of buckets.
932936 #[ inline]
933- pub fn is_full ( & self , index : usize ) -> bool {
934- self . table . is_full ( index)
937+ pub unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
938+ self . table . is_bucket_full ( index)
935939 }
936940
937941 /// Returns an iterator over every element in the table. It is up to
@@ -1154,7 +1158,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
11541158 // table. This second scan is guaranteed to find an empty
11551159 // slot (due to the load factor) before hitting the trailing
11561160 // control bytes (containing EMPTY).
1157- if unlikely ( self . is_full ( result) ) {
1161+ if unlikely ( self . is_bucket_full ( result) ) {
11581162 debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
11591163 debug_assert_ne ! ( probe_seq. pos, 0 ) ;
11601164 return Group :: load_aligned ( self . ctrl ( 0 ) )
@@ -1336,10 +1340,14 @@ impl<A: Allocator + Clone> RawTableInner<A> {
13361340 }
13371341
13381342 /// Checks whether the bucket at `index` is full.
1343+ ///
1344+ /// # Safety
1345+ ///
1346+ /// The caller must ensure `index` is less than the number of buckets.
13391347 #[ inline]
1340- fn is_full ( & self , index : usize ) -> bool {
1348+ unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
13411349 debug_assert ! ( index < self . buckets( ) ) ;
1342- is_full ( unsafe { * self . ctrl ( index) } )
1350+ is_full ( * self . ctrl ( index) )
13431351 }
13441352
13451353 #[ inline]
@@ -1440,7 +1448,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
14401448
14411449 // Copy all elements to the new table.
14421450 for i in 0 ..self . buckets ( ) {
1443- if !self . is_full ( i) {
1451+ if !self . is_bucket_full ( i) {
14441452 continue ;
14451453 }
14461454
@@ -1585,7 +1593,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
15851593
15861594 #[ inline]
15871595 unsafe fn erase ( & mut self , index : usize ) {
1588- debug_assert ! ( self . is_full ( index) ) ;
1596+ debug_assert ! ( self . is_bucket_full ( index) ) ;
15891597 let index_before = index. wrapping_sub ( Group :: WIDTH ) & self . bucket_mask ;
15901598 let empty_before = Group :: load ( self . ctrl ( index_before) ) . match_empty ( ) ;
15911599 let empty_after = Group :: load ( self . ctrl ( index) ) . match_empty ( ) ;
@@ -1735,7 +1743,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
17351743 let mut guard = guard ( ( 0 , & mut * self ) , |( index, self_) | {
17361744 if mem:: needs_drop :: < T > ( ) && !self_. is_empty ( ) {
17371745 for i in 0 ..=* index {
1738- if self_. is_full ( i) {
1746+ if self_. is_bucket_full ( i) {
17391747 self_. bucket ( i) . drop ( ) ;
17401748 }
17411749 }
0 commit comments