Skip to content

Commit c7cbe2e

Browse files
committed
[libcpu-riscv]: [common64 virt64]:Add the specific implementation of the spinlock.
The specific implementation of the spinlock is added in risc-v/virt64/interrupt.c. Due to the need for atomic operations, a new file atomic_riscv.c (copied from the common directory) is added under risc-v/common64. Signed-off-by: Mengchen Teng <teng_mengchen@163.com>
1 parent 7948f50 commit c7cbe2e

File tree

2 files changed

+198
-3
lines changed

2 files changed

+198
-3
lines changed
Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
/*
2+
* Copyright (c) 2006-2023, RT-Thread Development Team
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*
6+
* Change Logs:
7+
* Date Author Notes
8+
* 2023-03-14 WangShun first version
9+
*/
10+
11+
#include <rtthread.h>
12+
13+
rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
14+
{
15+
rt_atomic_t result = 0;
16+
#if __riscv_xlen == 32
17+
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
18+
#elif __riscv_xlen == 64
19+
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
20+
#endif
21+
return result;
22+
}
23+
24+
rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
25+
{
26+
rt_atomic_t result = 0;
27+
#if __riscv_xlen == 32
28+
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
29+
#elif __riscv_xlen == 64
30+
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
31+
#endif
32+
return result;
33+
}
34+
35+
rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
36+
{
37+
rt_atomic_t result = 0;
38+
val = -val;
39+
#if __riscv_xlen == 32
40+
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
41+
#elif __riscv_xlen == 64
42+
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
43+
#endif
44+
return result;
45+
}
46+
47+
rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
48+
{
49+
rt_atomic_t result = 0;
50+
#if __riscv_xlen == 32
51+
asm volatile ("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
52+
#elif __riscv_xlen == 64
53+
asm volatile ("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
54+
#endif
55+
return result;
56+
}
57+
58+
rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
59+
{
60+
rt_atomic_t result = 0;
61+
#if __riscv_xlen == 32
62+
asm volatile ("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
63+
#elif __riscv_xlen == 64
64+
asm volatile ("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
65+
#endif
66+
return result;
67+
}
68+
69+
rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
70+
{
71+
rt_atomic_t result = 0;
72+
#if __riscv_xlen == 32
73+
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
74+
#elif __riscv_xlen == 64
75+
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
76+
#endif
77+
return result;
78+
}
79+
80+
rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
81+
{
82+
rt_atomic_t result = 0;
83+
#if __riscv_xlen == 32
84+
asm volatile ("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
85+
#elif __riscv_xlen == 64
86+
asm volatile ("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
87+
#endif
88+
return result;
89+
}
90+
91+
void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
92+
{
93+
rt_atomic_t result = 0;
94+
#if __riscv_xlen == 32
95+
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
96+
#elif __riscv_xlen == 64
97+
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
98+
#endif
99+
}
100+
101+
rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
102+
{
103+
rt_atomic_t result = 0;
104+
rt_atomic_t temp = 1;
105+
#if __riscv_xlen == 32
106+
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
107+
#elif __riscv_xlen == 64
108+
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
109+
#endif
110+
return result;
111+
}
112+
113+
void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
114+
{
115+
rt_atomic_t result = 0;
116+
#if __riscv_xlen == 32
117+
asm volatile ("amoand.w %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
118+
#elif __riscv_xlen == 64
119+
asm volatile ("amoand.d %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
120+
#endif
121+
}
122+
123+
rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t desired)
124+
{
125+
rt_atomic_t tmp = *old;
126+
rt_atomic_t result = 0;
127+
#if __riscv_xlen == 32
128+
asm volatile(
129+
" fence iorw, ow\n"
130+
"1: lr.w.aq %[result], (%[ptr])\n"
131+
" bne %[result], %[tmp], 2f\n"
132+
" sc.w.rl %[tmp], %[desired], (%[ptr])\n"
133+
" bnez %[tmp], 1b\n"
134+
" li %[result], 1\n"
135+
" j 3f\n"
136+
" 2:sw %[result], (%[old])\n"
137+
" li %[result], 0\n"
138+
" 3:\n"
139+
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
140+
: [desired]"r" (desired), [old]"r"(old)
141+
: "memory");
142+
#elif __riscv_xlen == 64
143+
asm volatile(
144+
" fence iorw, ow\n"
145+
"1: lr.d.aq %[result], (%[ptr])\n"
146+
" bne %[result], %[tmp], 2f\n"
147+
" sc.d.rl %[tmp], %[desired], (%[ptr])\n"
148+
" bnez %[tmp], 1b\n"
149+
" li %[result], 1\n"
150+
" j 3f\n"
151+
" 2:sd %[result], (%[old])\n"
152+
" li %[result], 0\n"
153+
" 3:\n"
154+
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
155+
: [desired]"r" (desired), [old]"r"(old)
156+
: "memory");
157+
#endif
158+
return result;
159+
}

libcpu/risc-v/virt64/interrupt.c

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,17 +104,53 @@ rt_bool_t rt_hw_interrupt_is_disabled(void)
104104

105105
void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock)
106106
{
107-
107+
// union rt_hw_spinlock_t *lock = (void *)_lock;
108+
// _lock->slock = 0;
108109
}
109110

110111
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
111112
{
112-
113+
// /* Use ticket lock implemented on top of the 32/64-bit atomic AMO ops.
114+
// * The combined word layout (slock) maps two uint16_t fields:
115+
// * low 16 bits: owner
116+
// * high 16 bits: next (ticket allocator)
117+
// * We atomically increment the "next" field by (1 << 16) and use the
118+
// * returned old value to compute our ticket. Then wait until owner == ticket.
119+
// */
120+
// rt_atomic_t prev;
121+
// rt_atomic_t ticket;
122+
// rt_atomic_t owner;
123+
124+
// /* Allocate a ticket by adding (1 << 16) to slock, prev holds previous value */
125+
// prev = rt_hw_atomic_add((volatile rt_atomic_t *)&lock->slock, (rt_atomic_t)(1UL << 16));
126+
// ticket = (prev >> 16) & 0xffffUL;
127+
128+
// /* Wait until owner equals our ticket */
129+
// for (;;)
130+
// {
131+
// owner = rt_hw_atomic_load((volatile rt_atomic_t *)&lock->slock) & 0xffffUL;
132+
// if (owner == ticket)
133+
// break;
134+
// /* TODO: low-power wait for interrupt while spinning */
135+
// // __asm__ volatile("wfi" ::: "memory");
136+
// }
137+
138+
// /* Ensure all following memory accesses are ordered after acquiring the lock */
139+
// __asm__ volatile("fence rw, rw" ::: "memory");
113140
}
114141

115142
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
116143
{
117-
144+
// /* Ensure memory operations before unlock are visible before owner increment */
145+
// __asm__ volatile("fence rw, rw" ::: "memory");
146+
147+
// /* Increment owner (low 16 bits) to hand over lock to next ticket */
148+
// rt_hw_atomic_add((volatile rt_atomic_t *)&lock->slock, (rt_atomic_t)1);
149+
150+
// // TODO: IPI interrupt to wake up other harts waiting for the lock
151+
152+
// /* Make the increment visible to other harts */
153+
// __asm__ volatile("fence rw, rw" ::: "memory");
118154
}
119155

120156
void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)

0 commit comments

Comments
 (0)