Skip to content

Commit d892bf8

Browse files
committed
[libcpu-riscv]: [surpport SMP]: Add dynamic startup based on core configuration.
Fix issues with non-standard formatting Signed-off-by: Mengchen Teng <teng_mengchen@163.com>
1 parent 7a770ce commit d892bf8

File tree

5 files changed

+145
-145
lines changed

5 files changed

+145
-145
lines changed

bsp/qemu-virt64-riscv/driver/board.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ void rt_hw_board_init(void)
8888
#endif /* RT_USING_CONSOLE */
8989

9090
rt_hw_tick_init();
91-
91+
9292
#ifdef RT_USING_SMP
9393
/* ipi init */
9494
rt_hw_ipi_init();

libcpu/risc-v/common64/atomic_riscv.c

Lines changed: 46 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
1414
{
1515
rt_atomic_t result = 0;
1616
#if __riscv_xlen == 32
17-
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
17+
asm volatile("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
1818
#elif __riscv_xlen == 64
19-
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
19+
asm volatile("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
2020
#endif
2121
return result;
2222
}
@@ -25,9 +25,9 @@ rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
2525
{
2626
rt_atomic_t result = 0;
2727
#if __riscv_xlen == 32
28-
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
28+
asm volatile("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
2929
#elif __riscv_xlen == 64
30-
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
30+
asm volatile("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
3131
#endif
3232
return result;
3333
}
@@ -37,9 +37,9 @@ rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
3737
rt_atomic_t result = 0;
3838
val = -val;
3939
#if __riscv_xlen == 32
40-
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
40+
asm volatile("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
4141
#elif __riscv_xlen == 64
42-
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
42+
asm volatile("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
4343
#endif
4444
return result;
4545
}
@@ -48,9 +48,9 @@ rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
4848
{
4949
rt_atomic_t result = 0;
5050
#if __riscv_xlen == 32
51-
asm volatile ("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
51+
asm volatile("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
5252
#elif __riscv_xlen == 64
53-
asm volatile ("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
53+
asm volatile("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
5454
#endif
5555
return result;
5656
}
@@ -59,9 +59,9 @@ rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
5959
{
6060
rt_atomic_t result = 0;
6161
#if __riscv_xlen == 32
62-
asm volatile ("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
62+
asm volatile("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
6363
#elif __riscv_xlen == 64
64-
asm volatile ("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
64+
asm volatile("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
6565
#endif
6666
return result;
6767
}
@@ -70,9 +70,9 @@ rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
7070
{
7171
rt_atomic_t result = 0;
7272
#if __riscv_xlen == 32
73-
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
73+
asm volatile("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
7474
#elif __riscv_xlen == 64
75-
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
75+
asm volatile("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
7676
#endif
7777
return result;
7878
}
@@ -81,9 +81,9 @@ rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
8181
{
8282
rt_atomic_t result = 0;
8383
#if __riscv_xlen == 32
84-
asm volatile ("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
84+
asm volatile("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
8585
#elif __riscv_xlen == 64
86-
asm volatile ("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
86+
asm volatile("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
8787
#endif
8888
return result;
8989
}
@@ -92,9 +92,9 @@ void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
9292
{
9393
rt_atomic_t result = 0;
9494
#if __riscv_xlen == 32
95-
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
95+
asm volatile("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
9696
#elif __riscv_xlen == 64
97-
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
97+
asm volatile("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
9898
#endif
9999
}
100100

@@ -103,9 +103,9 @@ rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
103103
rt_atomic_t result = 0;
104104
rt_atomic_t temp = 1;
105105
#if __riscv_xlen == 32
106-
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
106+
asm volatile("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
107107
#elif __riscv_xlen == 64
108-
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
108+
asm volatile("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
109109
#endif
110110
return result;
111111
}
@@ -114,9 +114,9 @@ void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
114114
{
115115
rt_atomic_t result = 0;
116116
#if __riscv_xlen == 32
117-
asm volatile ("amoand.w %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
117+
asm volatile("amoand.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
118118
#elif __riscv_xlen == 64
119-
asm volatile ("amoand.d %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
119+
asm volatile("amoand.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
120120
#endif
121121
}
122122

@@ -126,34 +126,34 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_a
126126
rt_atomic_t result = 0;
127127
#if __riscv_xlen == 32
128128
asm volatile(
129-
" fence iorw, ow\n"
130-
"1: lr.w.aq %[result], (%[ptr])\n"
131-
" bne %[result], %[tmp], 2f\n"
132-
" sc.w.rl %[tmp], %[desired], (%[ptr])\n"
133-
" bnez %[tmp], 1b\n"
134-
" li %[result], 1\n"
135-
" j 3f\n"
136-
" 2:sw %[result], (%[old])\n"
137-
" li %[result], 0\n"
138-
" 3:\n"
139-
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
140-
: [desired]"r" (desired), [old]"r"(old)
141-
: "memory");
129+
" fence iorw, ow\n"
130+
"1: lr.w.aq %[result], (%[ptr])\n"
131+
" bne %[result], %[tmp], 2f\n"
132+
" sc.w.rl %[tmp], %[desired], (%[ptr])\n"
133+
" bnez %[tmp], 1b\n"
134+
" li %[result], 1\n"
135+
" j 3f\n"
136+
" 2:sw %[result], (%[old])\n"
137+
" li %[result], 0\n"
138+
" 3:\n"
139+
: [result] "+r"(result), [tmp] "+r"(tmp), [ptr] "+r"(ptr)
140+
: [desired] "r"(desired), [old] "r"(old)
141+
: "memory");
142142
#elif __riscv_xlen == 64
143143
asm volatile(
144-
" fence iorw, ow\n"
145-
"1: lr.d.aq %[result], (%[ptr])\n"
146-
" bne %[result], %[tmp], 2f\n"
147-
" sc.d.rl %[tmp], %[desired], (%[ptr])\n"
148-
" bnez %[tmp], 1b\n"
149-
" li %[result], 1\n"
150-
" j 3f\n"
151-
" 2:sd %[result], (%[old])\n"
152-
" li %[result], 0\n"
153-
" 3:\n"
154-
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
155-
: [desired]"r" (desired), [old]"r"(old)
156-
: "memory");
144+
" fence iorw, ow\n"
145+
"1: lr.d.aq %[result], (%[ptr])\n"
146+
" bne %[result], %[tmp], 2f\n"
147+
" sc.d.rl %[tmp], %[desired], (%[ptr])\n"
148+
" bnez %[tmp], 1b\n"
149+
" li %[result], 1\n"
150+
" j 3f\n"
151+
" 2:sd %[result], (%[old])\n"
152+
" li %[result], 0\n"
153+
" 3:\n"
154+
: [result] "+r"(result), [tmp] "+r"(tmp), [ptr] "+r"(ptr)
155+
: [desired] "r"(desired), [old] "r"(old)
156+
: "memory");
157157
#endif
158158
return result;
159159
}

libcpu/risc-v/common64/cpuport.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,15 @@
1919
#include <encoding.h>
2020

2121
#ifdef ARCH_RISCV_FPU
22-
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
22+
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
2323
#else
24-
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
24+
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
2525
#endif
2626

2727
#ifdef ARCH_RISCV_VECTOR
28-
#define K_SSTATUS_DEFAULT (K_SSTATUS_DEFAULT_BASE | SSTATUS_VS)
28+
#define K_SSTATUS_DEFAULT (K_SSTATUS_DEFAULT_BASE | SSTATUS_VS)
2929
#else
30-
#define K_SSTATUS_DEFAULT K_SSTATUS_DEFAULT_BASE
30+
#define K_SSTATUS_DEFAULT K_SSTATUS_DEFAULT_BASE
3131
#endif
3232
#ifdef RT_USING_SMART
3333
#include <lwp_arch.h>
@@ -51,8 +51,7 @@ volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
5151

5252
void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
5353
{
54-
rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)
55-
((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
54+
rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
5655

5756
rt_memset(frame, 0, sizeof(struct rt_hw_switch_frame));
5857

@@ -69,7 +68,7 @@ int rt_hw_cpu_id(void)
6968
#else
7069
/* Currently, the hartid is stored in the satp register. */
7170
uint32_t hart_id;
72-
asm volatile ("csrr %0, satp" : "=r"(hart_id));
71+
asm volatile("csrr %0, satp" : "=r"(hart_id));
7372
return hart_id;
7473
#endif /* RT_USING_SMP */
7574
}
@@ -126,7 +125,7 @@ void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t
126125
}
127126
#else
128127
void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread)
129-
{
128+
{
130129
/* Perform architecture-specific context switch. This call will
131130
* restore the target thread context and should not return when a
132131
* switch is performed. The caller (scheduler) invoked this function
@@ -171,7 +170,8 @@ void rt_hw_secondary_cpu_up(void)
171170

172171
for (hart = 0; hart < RT_CPUS_NR; hart++)
173172
{
174-
if (hart == boot_hartid) continue;
173+
if (hart == boot_hartid)
174+
continue;
175175

176176
ret = sbi_hsm_hart_start((unsigned long)hart,
177177
(unsigned long)entry_pa,
@@ -188,7 +188,7 @@ void secondary_cpu_entry(void)
188188
/* The PLIC peripheral interrupts are currently handled by the boot_hart. */
189189
/* Enable the Supervisor-Timer bit in SIE */
190190
rt_hw_tick_init();
191-
191+
192192
#ifdef RT_USING_SMP
193193
/* ipi init */
194194
rt_hw_ipi_init();
@@ -198,4 +198,4 @@ void secondary_cpu_entry(void)
198198
/* invoke system scheduler start for secondary CPU */
199199
rt_system_scheduler_start();
200200
}
201-
#endif /* RT_USING_SMP */
201+
#endif /* RT_USING_SMP */

0 commit comments

Comments
 (0)