1616#include "interrupt.h"
1717
1818struct rt_irq_desc irq_desc [MAX_HANDLERS ];
19+ #ifdef RT_USING_SMP
20+ struct rt_irq_desc ipi_desc [RT_MAX_IPI ];
21+ uint8_t ipi_vectors [RT_CPUS_NR ] = {0 };
22+ #endif
1923
2024static rt_isr_handler_t rt_hw_interrupt_handle (rt_uint32_t vector , void * param )
2125{
@@ -94,6 +98,16 @@ void rt_hw_interrupt_init()
9498}
9599
96100#ifdef RT_USING_SMP
101+ void rt_hw_interrupt_set_priority (int vector , unsigned int priority )
102+ {
103+ plic_set_priority (vector , priority );
104+ }
105+
106+ unsigned int rt_hw_interrupt_get_priority (int vector )
107+ {
108+ return (* (uint32_t * )PLIC_PRIORITY (vector ));
109+ }
110+
97111rt_bool_t rt_hw_interrupt_is_disabled (void )
98112{
99113 /* Determine the interrupt enable state */
@@ -104,57 +118,109 @@ rt_bool_t rt_hw_interrupt_is_disabled(void)
104118
105119void rt_hw_spin_lock_init (rt_hw_spinlock_t * _lock )
106120{
107- // union rt_hw_spinlock_t *lock = (void *)_lock;
108- // _lock->slock = 0;
121+ union rt_hw_spinlock_t * lock = (void * )_lock ;
122+ _lock -> slock = 0 ;
109123}
110124
111125void rt_hw_spin_lock (rt_hw_spinlock_t * lock )
112126{
113- // / * Use ticket lock implemented on top of the 32/64-bit atomic AMO ops.
114- // * The combined word layout (slock) maps two uint16_t fields:
115- // * low 16 bits: owner
116- // * high 16 bits: next (ticket allocator)
117- // * We atomically increment the "next" field by (1 << 16) and use the
118- // * returned old value to compute our ticket. Then wait until owner == ticket.
119- // */
120- // rt_atomic_t prev;
121- // rt_atomic_t ticket;
122- // rt_atomic_t owner;
123-
124- // / * Allocate a ticket by adding (1 << 16) to slock, prev holds previous value */
125- // prev = rt_hw_atomic_add((volatile rt_atomic_t *)&lock->slock, (rt_atomic_t)(1UL << 16));
126- // ticket = (prev >> 16) & 0xffffUL;
127-
128- // / * Wait until owner equals our ticket */
129- // for (;;)
130- // {
131- // owner = rt_hw_atomic_load((volatile rt_atomic_t *)&lock->slock) & 0xffffUL;
132- // if (owner == ticket)
133- // break;
134- // /* TODO: low-power wait for interrupt while spinning */
135- // // __asm__ volatile("wfi" ::: "memory");
136- // }
137-
138- // / * Ensure all following memory accesses are ordered after acquiring the lock */
139- // __asm__ volatile("fence rw, rw" ::: "memory");
127+ /* Use ticket lock implemented on top of the 32/64-bit atomic AMO ops.
128+ * The combined word layout (slock) maps two uint16_t fields:
129+ * low 16 bits: owner
130+ * high 16 bits: next (ticket allocator)
131+ * We atomically increment the "next" field by (1 << 16) and use the
132+ * returned old value to compute our ticket. Then wait until owner == ticket.
133+ */
134+ rt_atomic_t prev ;
135+ rt_atomic_t ticket ;
136+ rt_atomic_t owner ;
137+
138+ /* Allocate a ticket by adding (1 << 16) to slock, prev holds previous value */
139+ prev = rt_hw_atomic_add ((volatile rt_atomic_t * )& lock -> slock , (rt_atomic_t )(1UL << 16 ));
140+ ticket = (prev >> 16 ) & 0xffffUL ;
141+
142+ /* Wait until owner equals our ticket */
143+ for (;;)
144+ {
145+ owner = rt_hw_atomic_load ((volatile rt_atomic_t * )& lock -> slock ) & 0xffffUL ;
146+ if (owner == ticket )
147+ break ;
148+ /* TODO: low-power wait for interrupt while spinning */
149+ // __asm__ volatile("wfi" ::: "memory");
150+ }
151+
152+ /* Ensure all following memory accesses are ordered after acquiring the lock */
153+ __asm__ volatile ("fence rw, rw" ::: "memory" );
140154}
141155
142156void rt_hw_spin_unlock (rt_hw_spinlock_t * lock )
143157{
144- // / * Ensure memory operations before unlock are visible before owner increment */
145- // __asm__ volatile("fence rw, rw" ::: "memory");
158+ /* Ensure memory operations before unlock are visible before owner increment */
159+ __asm__ volatile ("fence rw, rw" ::: "memory" );
146160
147- // / * Increment owner (low 16 bits) to hand over lock to next ticket */
148- // rt_hw_atomic_add((volatile rt_atomic_t *)&lock->slock, (rt_atomic_t)1);
161+ /* Increment owner (low 16 bits) to hand over lock to next ticket */
162+ rt_hw_atomic_add ((volatile rt_atomic_t * )& lock -> slock , (rt_atomic_t )1 );
149163
150- // // TODO: IPI interrupt to wake up other harts waiting for the lock
164+ // TODO: IPI interrupt to wake up other harts waiting for the lock
151165
152- // / * Make the increment visible to other harts */
153- // __asm__ volatile("fence rw, rw" ::: "memory");
166+ /* Make the increment visible to other harts */
167+ __asm__ volatile ("fence rw, rw" ::: "memory" );
154168}
155169
156170void rt_hw_ipi_send (int ipi_vector , unsigned int cpu_mask )
157171{
158-
172+ int cpuid = cpu_mask & - cpu_mask ; // get the lowest set bit
173+ ipi_vectors [cpuid ] |= (uint8_t )ipi_vector ;
174+ sbi_send_ipi ((const unsigned long * )& cpu_mask );
175+ }
176+
177+ void rt_hw_ipi_init (void )
178+ {
179+ int idx = 0 , cpuid = rt_cpu_get_id ();
180+ ipi_vectors [cpuid ] = 0 ;
181+ /* init exceptions table */
182+ for (idx = 0 ; idx < RT_MAX_IPI ; idx ++ )
183+ {
184+ ipi_desc [idx ].handler = RT_NULL ;
185+ ipi_desc [idx ].param = RT_NULL ;
186+ #ifdef RT_USING_INTERRUPT_INFO
187+ rt_snprintf (ipi_desc [idx ].name , RT_NAME_MAX - 1 , "default" );
188+ ipi_desc [idx ].counter = 0 ;
189+ #endif
190+ }
191+ set_csr (sie , SIP_SSIP );
192+ }
193+
194+ void rt_hw_ipi_handler_install (int ipi_vector , rt_isr_handler_t ipi_isr_handler )
195+ {
196+ if (ipi_vector < RT_MAX_IPI )
197+ {
198+ if (ipi_isr_handler != RT_NULL )
199+ {
200+ ipi_desc [ipi_vector ].handler = (rt_isr_handler_t )ipi_isr_handler ;
201+ ipi_desc [ipi_vector ].param = RT_NULL ;
202+ }
203+ }
204+ }
205+
206+ void rt_hw_ipi_handler (void )
207+ {
208+ rt_uint32_t ipi_vector ;
209+
210+ ipi_vector = ipi_vectors [rt_cpu_get_id ()];
211+ while (ipi_vector )
212+ {
213+ int bitpos = __builtin_ctz (ipi_vector );
214+ ipi_vector &= ~(1 << bitpos );
215+ if (bitpos < RT_MAX_IPI && ipi_desc [bitpos ].handler != RT_NULL )
216+ {
217+ /* call the irq service routine */
218+ ipi_desc [bitpos ].handler (bitpos , ipi_desc [bitpos ].param );
219+ }
220+ }
221+ ipi_vectors [rt_cpu_get_id ()] = 0 ;
222+
223+ // clear software interrupt pending bit
224+ clear_csr (sip , SIP_SSIP );
159225}
160226#endif /* RT_USING_SMP */
0 commit comments