66//! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
77//! stable sorting implementation.
88
9- // ignore-tidy-undocumented-unsafe
10-
119use crate :: cmp;
1210use crate :: mem:: { self , MaybeUninit } ;
1311use crate :: ptr;
@@ -291,6 +289,9 @@ where
291289 } else if start_r < end_r {
292290 block_l = rem;
293291 } else {
292+ // There were the same number of elements to switch on both blocks during the last
293+ // iteration, so there are no remaining elements on either block. Cover the remaining
294+ // items with roughly equally-sized blocks.
294295 block_l = rem / 2 ;
295296 block_r = rem - block_l;
296297 }
@@ -437,6 +438,17 @@ where
437438 // Move its remaining out-of-order elements to the far right.
438439 debug_assert_eq ! ( width( l, r) , block_l) ;
439440 while start_l < end_l {
441+ // remaining-elements-safety
442+ // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it
443+ // is safe to point `end_l` to the previous element.
444+ //
445+ // The `ptr::swap` is safe if both its arguments are valid for reads and writes:
446+ // - Per the debug assert above, the distance between `l` and `r` is `block_l`
447+ // elements, so there can be at most `block_l` remaining offsets between `start_l`
448+ // and `end_l`. This means `r` will be moved at most `block_l` steps back, which
449+ // makes the `r.offset` calls valid (at that point `l == r`).
450+ // - `offsets_l` contains valid offsets into `v` collected during the partitioning of
451+ // the last block, so the `l.offset` calls are valid.
440452 unsafe {
441453 end_l = end_l. offset ( -1 ) ;
442454 ptr:: swap ( l. offset ( * end_l as isize ) , r. offset ( -1 ) ) ;
@@ -449,6 +461,7 @@ where
449461 // Move its remaining out-of-order elements to the far left.
450462 debug_assert_eq ! ( width( l, r) , block_r) ;
451463 while start_r < end_r {
464+ // SAFETY: See the reasoning in [remaining-elements-safety].
452465 unsafe {
453466 end_r = end_r. offset ( -1 ) ;
454467 ptr:: swap ( l, r. offset ( -( * end_r as isize ) - 1 ) ) ;
@@ -481,6 +494,8 @@ where
481494
482495 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
483496 // operation panics, the pivot will be automatically written back into the slice.
497+
498+ // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe.
484499 let mut tmp = mem:: ManuallyDrop :: new ( unsafe { ptr:: read ( pivot) } ) ;
485500 let _pivot_guard = CopyOnDrop { src : & mut * tmp, dest : pivot } ;
486501 let pivot = & * tmp;
@@ -646,6 +661,12 @@ where
646661
647662 if len >= 8 {
648663 // Swaps indices so that `v[a] <= v[b]`.
664+ // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of
665+ // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in
666+ // corresponding calls to `sort3` with valid 3-item neighborhoods around each
667+ // pointer, which in turn means the calls to `sort2` are done with valid
668+ // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap`
669+ // call.
649670 let mut sort2 = |a : & mut usize , b : & mut usize | unsafe {
650671 if is_less ( v. get_unchecked ( * b) , v. get_unchecked ( * a) ) {
651672 ptr:: swap ( a, b) ;
0 commit comments