@@ -4,11 +4,11 @@ use core::mem;
44// Kernel-provided user-mode helper functions:
55// https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
66unsafe fn __kuser_cmpxchg ( oldval : u32 , newval : u32 , ptr : * mut u32 ) -> bool {
7- let f: extern "C" fn ( u32 , u32 , * mut u32 ) -> u32 = mem:: transmute ( 0xffff0fc0u32 ) ;
7+ let f: extern "C" fn ( u32 , u32 , * mut u32 ) -> u32 = mem:: transmute ( 0xffff0fc0u32 ) ;
88 f ( oldval, newval, ptr) == 0
99}
1010unsafe fn __kuser_memory_barrier ( ) {
11- let f: extern "C" fn ( ) = mem:: transmute ( 0xffff0fa0u32 ) ;
11+ let f: extern "C" fn ( ) = mem:: transmute ( 0xffff0fa0u32 ) ;
1212 f ( ) ;
1313}
1414
@@ -94,24 +94,28 @@ macro_rules! atomic_rmw {
9494 pub unsafe extern "C" fn $name( ptr: * mut $ty, val: $ty) -> $ty {
9595 atomic_rmw( ptr, |x| $op( x as $ty, val) as u32 ) as $ty
9696 }
97- }
97+ } ;
9898}
9999macro_rules! atomic_cmpxchg {
100100 ( $name: ident, $ty: ty) => {
101101 #[ cfg_attr( not( feature = "mangled-names" ) , no_mangle) ]
102102 pub unsafe extern "C" fn $name( ptr: * mut $ty, oldval: $ty, newval: $ty) -> $ty {
103103 atomic_cmpxchg( ptr, oldval as u32 , newval as u32 ) as $ty
104104 }
105- }
105+ } ;
106106}
107107
108108atomic_rmw ! ( __sync_fetch_and_add_1, u8 , |a: u8 , b: u8 | a. wrapping_add( b) ) ;
109- atomic_rmw ! ( __sync_fetch_and_add_2, u16 , |a: u16 , b: u16 | a. wrapping_add( b) ) ;
110- atomic_rmw ! ( __sync_fetch_and_add_4, u32 , |a: u32 , b: u32 | a. wrapping_add( b) ) ;
109+ atomic_rmw ! ( __sync_fetch_and_add_2, u16 , |a: u16 , b: u16 | a
110+ . wrapping_add( b) ) ;
111+ atomic_rmw ! ( __sync_fetch_and_add_4, u32 , |a: u32 , b: u32 | a
112+ . wrapping_add( b) ) ;
111113
112114atomic_rmw ! ( __sync_fetch_and_sub_1, u8 , |a: u8 , b: u8 | a. wrapping_sub( b) ) ;
113- atomic_rmw ! ( __sync_fetch_and_sub_2, u16 , |a: u16 , b: u16 | a. wrapping_sub( b) ) ;
114- atomic_rmw ! ( __sync_fetch_and_sub_4, u32 , |a: u32 , b: u32 | a. wrapping_sub( b) ) ;
115+ atomic_rmw ! ( __sync_fetch_and_sub_2, u16 , |a: u16 , b: u16 | a
116+ . wrapping_sub( b) ) ;
117+ atomic_rmw ! ( __sync_fetch_and_sub_4, u32 , |a: u32 , b: u32 | a
118+ . wrapping_sub( b) ) ;
115119
116120atomic_rmw ! ( __sync_fetch_and_and_1, u8 , |a: u8 , b: u8 | a & b) ;
117121atomic_rmw ! ( __sync_fetch_and_and_2, u16 , |a: u16 , b: u16 | a & b) ;
@@ -129,21 +133,69 @@ atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b));
129133atomic_rmw ! ( __sync_fetch_and_nand_2, u16 , |a: u16 , b: u16 | !( a & b) ) ;
130134atomic_rmw ! ( __sync_fetch_and_nand_4, u32 , |a: u32 , b: u32 | !( a & b) ) ;
131135
132- atomic_rmw ! ( __sync_fetch_and_max_1, i8 , |a: i8 , b: i8 | if a > b { a } else { b } ) ;
133- atomic_rmw ! ( __sync_fetch_and_max_2, i16 , |a: i16 , b: i16 | if a > b { a } else { b } ) ;
134- atomic_rmw ! ( __sync_fetch_and_max_4, i32 , |a: i32 , b: i32 | if a > b { a } else { b } ) ;
135-
136- atomic_rmw ! ( __sync_fetch_and_umax_1, u8 , |a: u8 , b: u8 | if a > b { a } else { b } ) ;
137- atomic_rmw ! ( __sync_fetch_and_umax_2, u16 , |a: u16 , b: u16 | if a > b { a } else { b } ) ;
138- atomic_rmw ! ( __sync_fetch_and_umax_4, u32 , |a: u32 , b: u32 | if a > b { a } else { b } ) ;
139-
140- atomic_rmw ! ( __sync_fetch_and_min_1, i8 , |a: i8 , b: i8 | if a < b { a } else { b } ) ;
141- atomic_rmw ! ( __sync_fetch_and_min_2, i16 , |a: i16 , b: i16 | if a < b { a } else { b } ) ;
142- atomic_rmw ! ( __sync_fetch_and_min_4, i32 , |a: i32 , b: i32 | if a < b { a } else { b } ) ;
143-
144- atomic_rmw ! ( __sync_fetch_and_umin_1, u8 , |a: u8 , b: u8 | if a < b { a } else { b } ) ;
145- atomic_rmw ! ( __sync_fetch_and_umin_2, u16 , |a: u16 , b: u16 | if a < b { a } else { b } ) ;
146- atomic_rmw ! ( __sync_fetch_and_umin_4, u32 , |a: u32 , b: u32 | if a < b { a } else { b } ) ;
136+ atomic_rmw ! ( __sync_fetch_and_max_1, i8 , |a: i8 , b: i8 | if a > b {
137+ a
138+ } else {
139+ b
140+ } ) ;
141+ atomic_rmw ! ( __sync_fetch_and_max_2, i16 , |a: i16 , b: i16 | if a > b {
142+ a
143+ } else {
144+ b
145+ } ) ;
146+ atomic_rmw ! ( __sync_fetch_and_max_4, i32 , |a: i32 , b: i32 | if a > b {
147+ a
148+ } else {
149+ b
150+ } ) ;
151+
152+ atomic_rmw ! ( __sync_fetch_and_umax_1, u8 , |a: u8 , b: u8 | if a > b {
153+ a
154+ } else {
155+ b
156+ } ) ;
157+ atomic_rmw ! ( __sync_fetch_and_umax_2, u16 , |a: u16 , b: u16 | if a > b {
158+ a
159+ } else {
160+ b
161+ } ) ;
162+ atomic_rmw ! ( __sync_fetch_and_umax_4, u32 , |a: u32 , b: u32 | if a > b {
163+ a
164+ } else {
165+ b
166+ } ) ;
167+
168+ atomic_rmw ! ( __sync_fetch_and_min_1, i8 , |a: i8 , b: i8 | if a < b {
169+ a
170+ } else {
171+ b
172+ } ) ;
173+ atomic_rmw ! ( __sync_fetch_and_min_2, i16 , |a: i16 , b: i16 | if a < b {
174+ a
175+ } else {
176+ b
177+ } ) ;
178+ atomic_rmw ! ( __sync_fetch_and_min_4, i32 , |a: i32 , b: i32 | if a < b {
179+ a
180+ } else {
181+ b
182+ } ) ;
183+
184+ atomic_rmw ! ( __sync_fetch_and_umin_1, u8 , |a: u8 , b: u8 | if a < b {
185+ a
186+ } else {
187+ b
188+ } ) ;
189+ atomic_rmw ! ( __sync_fetch_and_umin_2, u16 , |a: u16 , b: u16 | if a < b {
190+ a
191+ } else {
192+ b
193+ } ) ;
194+ atomic_rmw ! ( __sync_fetch_and_umin_4, u32 , |a: u32 , b: u32 | if a < b {
195+ a
196+ } else {
197+ b
198+ } ) ;
147199
148200atomic_rmw ! ( __sync_lock_test_and_set_1, u8 , |_: u8 , b: u8 | b) ;
149201atomic_rmw ! ( __sync_lock_test_and_set_2, u16 , |_: u16 , b: u16 | b) ;
0 commit comments