@@ -104,51 +104,9 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mid: *mut T, mut right: usize) {
104104 }
105105}
106106
107- unsafe fn ptr_swap_u8 ( a : * mut u8 , b : * mut u8 , n : usize ) {
108- for i in 0 ..n {
109- ptr:: swap ( a. offset ( i as isize ) , b. offset ( i as isize ) ) ;
110- }
111- }
112- unsafe fn ptr_swap_u16 ( a : * mut u16 , b : * mut u16 , n : usize ) {
113- for i in 0 ..n {
114- ptr:: swap ( a. offset ( i as isize ) , b. offset ( i as isize ) ) ;
115- }
116- }
117- unsafe fn ptr_swap_u32 ( a : * mut u32 , b : * mut u32 , n : usize ) {
118- for i in 0 ..n {
119- ptr:: swap ( a. offset ( i as isize ) , b. offset ( i as isize ) ) ;
120- }
121- }
122- unsafe fn ptr_swap_u64 ( a : * mut u64 , b : * mut u64 , n : usize ) {
123- for i in 0 ..n {
124- ptr:: swap ( a. offset ( i as isize ) , b. offset ( i as isize ) ) ;
125- }
126- }
127-
128107unsafe fn ptr_swap_n < T > ( a : * mut T , b : * mut T , n : usize ) {
129- // Doing this as a generic is 16% & 40% slower in two of the `String`
130- // benchmarks, as (based on the block names) LLVM doesn't vectorize it.
131- // Since this is just operating on raw memory, dispatch to a version
132- // with appropriate alignment. Helps with code size as well, by
133- // avoiding monomorphizing different unrolled loops for `i32`,
134- // `u32`, `f32`, `[u32; 1]`, etc.
135- let size_of_t = mem:: size_of :: < T > ( ) ;
136- let align_of_t = mem:: align_of :: < T > ( ) ;
137-
138- let a64 = mem:: align_of :: < u64 > ( ) ;
139- if a64 == 8 && align_of_t % a64 == 0 {
140- return ptr_swap_u64 ( a as * mut u64 , b as * mut u64 , n * ( size_of_t / 8 ) ) ;
141- }
142-
143- let a32 = mem:: align_of :: < u32 > ( ) ;
144- if a32 == 4 && align_of_t % a32 == 0 {
145- return ptr_swap_u32 ( a as * mut u32 , b as * mut u32 , n * ( size_of_t / 4 ) ) ;
146- }
147-
148- let a16 = mem:: align_of :: < u16 > ( ) ;
149- if a16 == 2 && align_of_t % a16 == 0 {
150- return ptr_swap_u16 ( a as * mut u16 , b as * mut u16 , n * ( size_of_t / 2 ) ) ;
108+ for i in 0 ..n {
109+ // These are nonoverlapping, so use mem::swap instead of ptr::swap
110+ mem:: swap ( & mut * a. offset ( i as isize ) , & mut * b. offset ( i as isize ) ) ;
151111 }
152-
153- ptr_swap_u8 ( a as * mut u8 , b as * mut u8 , n * size_of_t) ;
154112}
0 commit comments