Skip to content

Commit 4a3340b

Browse files
pavelvkozlovabrodkin
authored andcommitted
ARCv3: add new version of 32-bit arch_atomic_* with atld.<op> instrs
Add new implementation of 32-bit atomic funcs with atld.<op> instructions that can be used if ARC_HAS_ATLD enabled. Also add possibility to use ARC_HAS_ATLD with or without ARC_HAS_LLSC. It will allow to use ATLD variant in case when LLSC is not desirable (because of llock/scond livelock issue) or combine and use ATLD for atomic functions and LLSC for cmpxchg. Signed-off-by: Pavel Kozlov <pavel.kozlov@synopsys.com>
1 parent 02509ae commit 4a3340b

File tree

6 files changed

+143
-37
lines changed

6 files changed

+143
-37
lines changed

arch/arc/Kconfig

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -430,10 +430,10 @@ config ARC_HAS_LLSC
430430
depends on !ARC_CANT_LLSC
431431

432432
config ARC_HAS_ATLD
433-
bool "Insn: ATLD (efficient fetch-and-operate atomic ops)"
433+
bool "Insn: ATLD (efficient atomic ops)"
434434
default n
435435
depends on ISA_ARCV3
436-
depends on ARC_HAS_LLSC
436+
depends on !ARC_CANT_LLSC
437437

438438
config ARC_HAS_SWAPE
439439
bool "Insn: SWAPE (endian-swap)"

arch/arc/include/asm/atomic-atld.h

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
3+
#ifndef _ASM_ARC_ATOMIC_ATLD_H
4+
#define _ASM_ARC_ATOMIC_ATLD_H
5+
6+
static inline void arch_atomic_set(atomic_t *v, int i)
7+
{
8+
ATOMIC_OPS_FLAGS_VAR_DEF
9+
10+
atomic_ops_lock(flags);
11+
WRITE_ONCE(v->counter, i);
12+
atomic_ops_unlock(flags);
13+
}
14+
15+
#ifndef CONFIG_ARC_HAS_LLSC
16+
#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
17+
#endif
18+
19+
#define ATOMIC_OP(op, asm_op) \
20+
static inline void arch_atomic_##op(int i, atomic_t *v) \
21+
{ \
22+
int val = i; \
23+
ATOMIC_OPS_FLAGS_VAR_DEF \
24+
\
25+
atomic_ops_lock(flags); \
26+
__asm__ __volatile__( \
27+
" atld."#asm_op" %[val], %[ctr] \n" \
28+
: [val] "+r"(val), \
29+
[ctr] "+ATOMC" (v->counter) \
30+
: \
31+
: "memory"); \
32+
atomic_ops_unlock(flags); \
33+
\
34+
}
35+
36+
#define ATOMIC_OP_RETURN(op, asm_op) \
37+
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
38+
{ \
39+
int val = i; \
40+
ATOMIC_OPS_FLAGS_VAR_DEF \
41+
\
42+
atomic_ops_lock(flags); \
43+
__asm__ __volatile__( \
44+
" atld."#asm_op" %[val], %[ctr] \n" \
45+
" "#asm_op" %[val], %[val], %[i] \n" \
46+
: [val] "+&r"(val), \
47+
[ctr] "+ATOMC" (v->counter) \
48+
: [i] "ir" (i) \
49+
: "memory"); \
50+
atomic_ops_unlock(flags); \
51+
\
52+
return val; \
53+
}
54+
55+
#define ATOMIC_FETCH_OP(op, asm_op) \
56+
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
57+
{ \
58+
int orig = i; \
59+
ATOMIC_OPS_FLAGS_VAR_DEF \
60+
\
61+
atomic_ops_lock(flags); \
62+
__asm__ __volatile__( \
63+
" atld."#asm_op" %[orig], %[ctr] \n" \
64+
: [orig] "+r"(orig), \
65+
[ctr] "+ATOMC" (v->counter) \
66+
: \
67+
: "memory"); \
68+
atomic_ops_unlock(flags); \
69+
\
70+
return orig; \
71+
}
72+
73+
#define ATOMIC_OPS(op, asm_op) \
74+
ATOMIC_OP(op, asm_op) \
75+
ATOMIC_OP_RETURN(op, asm_op)
76+
77+
ATOMIC_OPS(add, add)
78+
79+
// Special form for sub - ATOMIC_OPS(sub, sub)
80+
static inline void arch_atomic_sub(int i, atomic_t *v)
81+
{
82+
arch_atomic_add(-i, v);
83+
}
84+
85+
static inline int arch_atomic_sub_return_relaxed(int i, atomic_t *v)
86+
{
87+
return arch_atomic_add_return_relaxed(-i, v);
88+
}
89+
90+
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
91+
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
92+
93+
#undef ATOMIC_OPS
94+
#define ATOMIC_OPS(op, asm_op) \
95+
ATOMIC_OP(op, asm_op)
96+
97+
ATOMIC_OPS(and, and)
98+
ATOMIC_OPS(or, or)
99+
ATOMIC_OPS(xor, xor)
100+
101+
102+
// Fetches
103+
ATOMIC_FETCH_OP(add, add)
104+
ATOMIC_FETCH_OP(and, and)
105+
ATOMIC_FETCH_OP(xor, xor)
106+
ATOMIC_FETCH_OP(or, or)
107+
108+
// Special form for sub - ATOMIC_FETCH_OP(sub, sub)
109+
static inline int arch_atomic_fetch_sub_relaxed(int i, atomic_t *v)
110+
{
111+
return arch_atomic_fetch_add_relaxed(-i, v);
112+
}
113+
114+
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
115+
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
116+
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
117+
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
118+
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
119+
120+
#undef ATOMIC_OPS
121+
#undef ATOMIC_FETCH_OP
122+
#undef ATOMIC_OP_RETURN
123+
#undef ATOMIC_OP
124+
125+
#endif

arch/arc/include/asm/atomic-llsc.h

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -67,23 +67,6 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
6767
return orig; \
6868
}
6969

70-
#ifdef CONFIG_ARC_HAS_ATLD
71-
#define ATOMIC_FETCH_ATLD_OP(op, asm_op) \
72-
static inline int arch_atomic_fetch_atld_##op##_relaxed(int i, atomic_t *v) \
73-
{ \
74-
unsigned int orig = i; \
75-
\
76-
__asm__ __volatile__( \
77-
" atld."#asm_op" %[orig], %[ctr] \n" \
78-
: [orig] "+r"(orig), \
79-
[ctr] "+ATOMC" (v->counter) \
80-
: \
81-
: "memory"); \
82-
\
83-
return orig; \
84-
}
85-
#endif
86-
8770
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
8871
#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
8972

@@ -103,21 +86,6 @@ ATOMIC_OPS(andnot, bic)
10386
ATOMIC_OPS(or, or)
10487
ATOMIC_OPS(xor, xor)
10588

106-
#ifdef CONFIG_ARC_HAS_ATLD
107-
108-
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_atld_add_relaxed
109-
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_atld_and_relaxed
110-
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_atld_or_relaxed
111-
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_atld_xor_relaxed
112-
113-
ATOMIC_FETCH_ATLD_OP(add, add)
114-
ATOMIC_FETCH_ATLD_OP(and, and)
115-
ATOMIC_FETCH_ATLD_OP(xor, xor)
116-
ATOMIC_FETCH_ATLD_OP(or, or)
117-
118-
ATOMIC_FETCH_OP(sub, sub)
119-
ATOMIC_FETCH_OP(andnot, bic)
120-
#else
12189

12290
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
12391
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
@@ -131,7 +99,6 @@ ATOMIC_OPS(xor, xor)
13199

132100
ATOMIC_FETCH_OP(sub, sub)
133101
ATOMIC_FETCH_OP(andnot, bic)
134-
#endif
135102

136103
#define arch_atomic_andnot arch_atomic_andnot
137104

arch/arc/include/asm/atomic.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@
1616

1717
#define arch_atomic_read(v) READ_ONCE((v)->counter)
1818

19-
#ifdef CONFIG_ARC_HAS_LLSC
19+
#ifdef CONFIG_ARC_HAS_ATLD
20+
#include <asm/atomic-atld.h>
21+
#elif defined(CONFIG_ARC_HAS_LLSC)
2022
#include <asm/atomic-llsc.h>
2123
#else
2224
#include <asm/atomic-spinlock.h>

arch/arc/include/asm/smp.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,10 @@ static inline const char *arc_platform_smp_cpuinfo(void)
100100
#ifndef CONFIG_ARC_HAS_LLSC
101101

102102
#include <linux/irqflags.h>
103+
104+
#define ATOMIC_OPS_FLAGS_VAR_DEF \
105+
unsigned long flags;
106+
103107
#ifdef CONFIG_SMP
104108

105109
#include <asm/spinlock.h>
@@ -123,6 +127,13 @@ extern arch_spinlock_t smp_atomic_ops_lock;
123127

124128
#endif /* !CONFIG_SMP */
125129

130+
#else /* CONFIG_ARC_HAS_LLSC */
131+
132+
#define ATOMIC_OPS_FLAGS_VAR_DEF
133+
134+
#define atomic_ops_lock(flags)
135+
#define atomic_ops_unlock(flags)
136+
126137
#endif /* !CONFIG_ARC_HAS_LLSC */
127138

128139
#endif

arch/arc/kernel/setup.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,8 +335,9 @@ static int arcv3_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
335335
scnprintf(mpy_nm, 32, "mpy[opt %d] ", mpy_opt);
336336
}
337337

338-
n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s\n",
338+
n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s%s%s\n",
339339
IS_AVAIL2(isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
340+
IS_AVAIL2(isa.atomic, "atomic2 ", CONFIG_ARC_HAS_ATLD),
340341
IS_AVAIL2(isa.ldst128, "ll128 ", CONFIG_ARC_HAS_LL128),
341342
IS_AVAIL2(isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
342343
mpy_nm,

0 commit comments

Comments
 (0)