11#include <assert.h>
22#include <errno.h>
3+ #include <stdatomic.h>
34#include <stdbool.h>
5+ #include <stdlib.h>
6+ #include <string.h>
47
5- #include "atomics.h"
68#include "lfq.h"
79
810#define MAX_FREE 150
@@ -20,14 +22,14 @@ static bool in_hp(struct lfq_ctx *ctx, struct lfq_node *node)
2022static void insert_pool (struct lfq_ctx * ctx , struct lfq_node * node )
2123{
2224 atomic_store (& node -> free_next , NULL );
23- struct lfq_node * old_tail = XCHG (& ctx -> fpt , node ); /* seq_cst */
25+ struct lfq_node * old_tail = atomic_exchange (& ctx -> fpt , node ); /* seq_cst */
2426 atomic_store (& old_tail -> free_next , node );
2527}
2628
2729static void free_pool (struct lfq_ctx * ctx , bool freeall )
2830{
2931 bool old = 0 ;
30- if (!CAS (& ctx -> is_freeing , & old , 1 ))
32+ if (!atomic_compare_exchange_strong (& ctx -> is_freeing , & old , 1 ))
3133 return ;
3234
3335 for (int i = 0 ; i < MAX_FREE || freeall ; i ++ ) {
@@ -39,20 +41,20 @@ static void free_pool(struct lfq_ctx *ctx, bool freeall)
3941 free (p );
4042 }
4143 atomic_store (& ctx -> is_freeing , false);
42- smp_mb ( );
44+ atomic_thread_fence ( memory_order_seq_cst );
4345}
4446
4547static void safe_free (struct lfq_ctx * ctx , struct lfq_node * node )
4648{
4749 if (atomic_load (& node -> can_free ) && !in_hp (ctx , node )) {
4850 /* free is not thread-safe */
4951 bool old = 0 ;
50- if (CAS (& ctx -> is_freeing , & old , 1 )) {
52+ if (atomic_compare_exchange_strong (& ctx -> is_freeing , & old , 1 )) {
5153 /* poison the pointer to detect use-after-free */
5254 node -> next = (void * ) -1 ;
5355 free (node ); /* we got the lock; actually free */
5456 atomic_store (& ctx -> is_freeing , false);
55- smp_mb ( );
57+ atomic_thread_fence ( memory_order_seq_cst );
5658 } else /* we did not get the lock; only add to a freelist */
5759 insert_pool (ctx , node );
5860 } else
@@ -65,7 +67,7 @@ static int alloc_tid(struct lfq_ctx *ctx)
6567 for (int i = 0 ; i < ctx -> MAX_HP_SIZE ; i ++ ) {
6668 if (ctx -> tid_map [i ] == 0 ) {
6769 int old = 0 ;
68- if (CAS (& ctx -> tid_map [i ], & old , 1 ))
70+ if (atomic_compare_exchange_strong (& ctx -> tid_map [i ], & old , 1 ))
6971 return i ;
7072 }
7173 }
@@ -141,7 +143,7 @@ int lfq_enqueue(struct lfq_ctx *ctx, void *data)
141143 return - errno ;
142144
143145 insert_node -> data = data ;
144- struct lfq_node * old_tail = XCHG (& ctx -> tail , insert_node );
146+ struct lfq_node * old_tail = atomic_exchange (& ctx -> tail , insert_node );
145147 /* We have claimed our spot in the insertion order by modifying tail.
146148 * we are the only inserting thread with a pointer to the old tail.
147149 *
@@ -162,13 +164,13 @@ void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid)
162164 /* HP[tid] is necessary for deallocation. */
163165 do {
164166 retry :
165- /* continue jumps to the bottom of the loop, and would attempt a CAS
166- * with uninitialized new_head.
167+ /* continue jumps to the bottom of the loop, and would attempt a
168+ * atomic_compare_exchange_strong with uninitialized new_head.
167169 */
168170 old_head = atomic_load (& ctx -> head );
169171
170172 atomic_store (& ctx -> HP [tid ], old_head );
171- mb ( );
173+ atomic_thread_fence ( memory_order_seq_cst );
172174
173175 /* another thread freed it before seeing our HP[tid] store */
174176 if (old_head != atomic_load (& ctx -> head ))
@@ -179,7 +181,7 @@ void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid)
179181 atomic_store (& ctx -> HP [tid ], 0 );
180182 return NULL ; /* never remove the last node */
181183 }
182- } while (!CAS (& ctx -> head , & old_head , new_head ));
184+ } while (!atomic_compare_exchange_strong (& ctx -> head , & old_head , new_head ));
183185
184186 /* We have atomically advanced head, and we are the thread that won the race
185187 * to claim a node. We return the data from the *new* head. The list starts
@@ -191,7 +193,7 @@ void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid)
191193 atomic_store (& new_head -> can_free , true);
192194
193195 /* we need to avoid freeing until other readers are definitely not going to
194- * load its ->next in the CAS loop
196+ * load its ->next in the atomic_compare_exchange_strong loop
195197 */
196198 safe_free (ctx , (struct lfq_node * ) old_head );
197199
@@ -208,4 +210,4 @@ void *lfq_dequeue(struct lfq_ctx *ctx)
208210 void * ret = lfq_dequeue_tid (ctx , tid );
209211 free_tid (ctx , tid );
210212 return ret ;
211- }
213+ }
0 commit comments