1- use core:: intrinsics ;
1+ use core:: arch ;
22use core:: mem;
3+ use core:: sync:: atomic:: { AtomicU32 , Ordering } ;
34
45// Kernel-provided user-mode helper functions:
56// https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
67unsafe fn __kuser_cmpxchg ( oldval : u32 , newval : u32 , ptr : * mut u32 ) -> bool {
78 let f: extern "C" fn ( u32 , u32 , * mut u32 ) -> u32 = mem:: transmute ( 0xffff0fc0usize as * const ( ) ) ;
89 f ( oldval, newval, ptr) == 0
910}
11+
1012unsafe fn __kuser_memory_barrier ( ) {
1113 let f: extern "C" fn ( ) = mem:: transmute ( 0xffff0fa0usize as * const ( ) ) ;
1214 f ( ) ;
@@ -54,13 +56,46 @@ fn insert_aligned(aligned: u32, val: u32, shift: u32, mask: u32) -> u32 {
5456 ( aligned & !( mask << shift) ) | ( ( val & mask) << shift)
5557}
5658
59+ /// Atomically loads the value at `ptr`. The size of `T` is how many of the bytes pointed to by
60+ /// `ptr` are in bounds: if `T` is smaller than 4, then inline ASM is used to bypass the Rust
61+ /// requirement that all pointer reads must be in bounds.
62+ ///
63+ /// # Safety
64+ ///
65+ /// `size_of::<T>()` must be 1, 2 or 4.
66+ /// `ptr` must be aligned and point to memory within a page that allows read access.
67+ /// If `T` has a size of 4, then `ptr` must be valid for a relaxed atomic read.
68+ unsafe fn atomic_load_aligned < T > ( ptr : * mut u32 ) -> u32 {
69+ if mem:: size_of :: < T > ( ) == 4 {
70+ // SAFETY: As `T` has a size of 4, the caller garantees this is sound.
71+ unsafe { AtomicU32 :: from_ptr ( ptr) . load ( Ordering :: Relaxed ) }
72+ } else {
73+ // SAFETY:
74+ // As all 4 bytes pointed to by `ptr` might not be dereferenceable due to being out of
75+ // bounds when doing atomic operations on a `u8`/`i8`/`u16`/`i16`, inline ASM is used to
76+ // avoid causing undefined behaviour. The `ldr` instruction does not touch the stack or
77+ // flags, or write to memory, so `nostack`, `preserves_flags` and `readonly` are sound. The
78+ // caller garantees that `ptr` is aligned, as required by `ldr`.
79+ unsafe {
80+ let res: u32 ;
81+ arch:: asm!(
82+ "ldr {res}, [{ptr}]" ,
83+ ptr = in( reg) ptr,
84+ res = lateout( reg) res,
85+ options( nostack, preserves_flags, readonly)
86+ ) ;
87+ res
88+ }
89+ }
90+ }
91+
5792// Generic atomic read-modify-write operation
5893unsafe fn atomic_rmw < T , F : Fn ( u32 ) -> u32 , G : Fn ( u32 , u32 ) -> u32 > ( ptr : * mut T , f : F , g : G ) -> u32 {
5994 let aligned_ptr = align_ptr ( ptr) ;
6095 let ( shift, mask) = get_shift_mask ( ptr) ;
6196
6297 loop {
63- let curval_aligned = intrinsics :: atomic_load_unordered ( aligned_ptr) ;
98+ let curval_aligned = atomic_load_aligned :: < T > ( aligned_ptr) ;
6499 let curval = extract_aligned ( curval_aligned, shift, mask) ;
65100 let newval = f ( curval) ;
66101 let newval_aligned = insert_aligned ( curval_aligned, newval, shift, mask) ;
@@ -76,7 +111,7 @@ unsafe fn atomic_cmpxchg<T>(ptr: *mut T, oldval: u32, newval: u32) -> u32 {
76111 let ( shift, mask) = get_shift_mask ( ptr) ;
77112
78113 loop {
79- let curval_aligned = intrinsics :: atomic_load_unordered ( aligned_ptr) ;
114+ let curval_aligned = atomic_load_aligned :: < T > ( aligned_ptr) ;
80115 let curval = extract_aligned ( curval_aligned, shift, mask) ;
81116 if curval != oldval {
82117 return curval;
0 commit comments