1+ /* SPDX-License-Identifier: GPL-2.0-only */
12/*
23 * Based on arch/arm/include/asm/atomic.h
34 *
@@ -211,6 +212,73 @@ static inline uint32_t atomic_cmpxchg_relaxed32(uint32_t *ptr, uint32_t exp, uin
211212 return old ;
212213}
213214
215+ static inline int atomic_try_cmpxchg_acquire32 (uint32_t * ptr , uint32_t * exp , uint32_t val ) {
216+ uint32_t tmp = * exp ;
217+ uint32_t old = atomic_cmpxchg_acquire32 (ptr , tmp , val );
218+ if (old != tmp ) {
219+ * exp = old ;
220+ return 0 ;
221+ } else {
222+ return 1 ;
223+ }
224+ }
225+
226+ static inline int atomic_try_cmpxchg_relaxed32 (uint32_t * ptr , uint32_t * exp , uint32_t val ) {
227+ uint32_t tmp = * exp ;
228+ uint32_t old = atomic_cmpxchg_relaxed32 (ptr , tmp , val );
229+ if (old != tmp ) {
230+ * exp = old ;
231+ return 0 ;
232+ } else {
233+ return 1 ;
234+ }
235+ }
236+
237+ /**
238+ * atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
239+ * @i: int value
240+ * @v: pointer to atomic_t
241+ *
242+ * Atomically updates @v to (@v | @i) with acquire ordering.
243+ *
244+ * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
245+ *
246+ * Return: The original value of @v.
247+ */
248+ static __always_inline uint32_t
249+ atomic_fetch_or_acquire32 (uint32_t i , atomic_t * v )
250+ {
251+ #if defined(__x86_64__ )
252+ uint32_t old_val = v -> counter ;
253+ while (!atomic_try_cmpxchg_acquire32 (& v -> counter , & old_val , old_val | i ));
254+ return old_val ;
255+ #elif defined(__aarch64__ )
256+ uint32_t old_val ;
257+ #if defined(USE_LSE )
258+ asm volatile ( \
259+ "ldseta %w[_val], %w[_old], %[_loc]\n"
260+ : [_old ] "=&r" (old_val )
261+ : [_loc ] "Q" (* (uint32_t * )(& v -> counter )), [_val ] "r" (i )
262+ : "memory" );
263+ #else
264+ uint32_t tmp , new_val ;
265+ asm volatile (
266+ " prfm pstl1strm, %[_loc]\n"
267+ "1: ldaxr %w[_old], %[_loc]\n"
268+ " orr %w[_new_val], %w[_old], %w[_val]\n"
269+ " stlxr %w[_tmp], %w[_new_val], %w[_loc]\n"
270+ " cbnz %w[_tmp], 1b\n"
271+ : [_old ]"=&r" (old_val ), [_new_val ] "=&r" (new_val ), [_tmp ] "=&r" (tmp )
272+ : [_loc ] "Q" (* (uint32_t * )(& v -> counter )), [_val ] "r" (i )
273+ : "memory" );
274+ #endif
275+ return old_val ;
276+ #else
277+ #error "Unable to define atomic_fetch_or_acquire"
278+ #endif
279+ }
280+
281+
214282static inline uint16_t xchg_release16 (uint16_t * ptr , uint16_t val ) {
215283#if defined(__x86_64__ )
216284 asm volatile ("xchgw %w0, %1\n"
@@ -354,10 +422,24 @@ do { \
354422 VAL = smp_load_acquire(__PTR); \
355423 if (cond_expr) \
356424 break; \
357- __cmpwait_relaxed(__PTR, VAL); \
425+ __cmpwait_relaxed(__PTR, (unsigned long) ( VAL)); \
358426 } \
359427 VAL; \
360428})
429+
430+ #define smp_cond_load_relaxed (ptr , cond_expr ) \
431+ ({ \
432+ typeof(ptr) __PTR = (ptr); \
433+ typeof(*ptr) VAL; \
434+ for (;;) { \
435+ VAL = READ_ONCE(*__PTR); \
436+ if (cond_expr) \
437+ break; \
438+ __cmpwait_relaxed(__PTR, (unsigned long) (VAL)); \
439+ } \
440+ VAL; \
441+ })
442+
361443#else
362444#define __smp_store_release (p , v ) \
363445do { \
@@ -384,6 +466,19 @@ do { \
384466 barrier(); \
385467 VAL; \
386468})
469+
470+ #define smp_cond_load_relaxed (ptr , cond_expr ) ({ \
471+ typeof(ptr) __PTR = (ptr); \
472+ typeof(*ptr) VAL; \
473+ for (;;) { \
474+ VAL = READ_ONCE(*__PTR); \
475+ if (cond_expr) \
476+ break; \
477+ cpu_relax(); \
478+ } \
479+ VAL; \
480+ })
481+
387482#endif
388483
389484#define arch_mcs_spin_lock_contended (l ) \
@@ -398,3 +493,19 @@ do { \
398493#define ATOMIC_INIT (i ) { (i) }
399494#define atomic_read (v ) READ_ONCE((v)->counter)
400495#define atomic_set (v , i ) WRITE_ONCE(((v)->counter), (i))
496+
497+ # define likely (x ) __builtin_expect(!!(x), 1)
498+ # define unlikely (x ) __builtin_expect(!!(x), 0)
499+
500+ #ifndef bool
501+ #define bool int
502+ #define true 1
503+ #define false 0
504+ #endif
505+
506+ #ifndef NULL
507+ #define NULL ((void *)0)
508+ #endif
509+
510+ #define atomic_cond_read_acquire (v , c ) smp_cond_load_acquire(&(v)->counter, (c))
511+ #define atomic_cond_read_relaxed (v , c ) smp_cond_load_relaxed(&(v)->counter, (c))
0 commit comments