2626
2727using utest::v1::Case;
2828
29-
3029namespace {
3130
3231/* Lock-free operations will be much faster - keep runtime down */
33- #if MBED_ATOMIC_INT_LOCK_FREE
34- #define ADD_ITERATIONS (SystemCoreClock / 1000 )
35- #else
36- #define ADD_ITERATIONS (SystemCoreClock / 8000 )
37- #endif
32+ #define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000 )
33+ #define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000 )
3834
39- template <typename T >
40- void add_incrementer (T *ptr )
35+ template <typename A >
36+ static inline long add_iterations (A &a )
4137{
42- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
43- core_util_atomic_fetch_add (ptr, T (1 ));
44- }
38+ return a.is_lock_free () ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS;
4539}
4640
47- template <typename T>
48- void add_release_incrementer (T *ptr)
49- {
50- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
51- core_util_atomic_fetch_add_explicit (ptr, T (1 ), mbed_memory_order_release);
41+ template <typename A>
42+ struct add_incrementer {
43+ static void op (A *ptr)
44+ {
45+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
46+ ++(*ptr);
47+ }
5248 }
53- }
49+ };
5450
55- template <typename T>
56- void sub_incrementer (T *ptr)
57- {
58- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
59- core_util_atomic_fetch_sub (ptr, T (-1 ));
51+ template <typename A>
52+ struct add_release_incrementer {
53+ static void op (A *ptr)
54+ {
55+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
56+ ptr->fetch_add (1 , mbed::memory_order_release);
57+ }
6058 }
61- }
59+ };
6260
63- template <typename T >
64- void bitops_incrementer (T *ptr)
65- {
66- for ( long i = ADD_ITERATIONS; i > 0 ; i--) {
67- core_util_atomic_fetch_add (ptr, T ( 1 ));
68- core_util_atomic_fetch_and (ptr, T (-1 ) );
69- core_util_atomic_fetch_or (ptr, T ( 0 ));
61+ template <typename A >
62+ struct sub_incrementer {
63+ static void op (A *ptr)
64+ {
65+ for ( long i = add_iterations (*ptr); i > 0 ; i--) {
66+ ptr-> fetch_sub (-1 );
67+ }
7068 }
71- }
69+ };
7270
73- template <typename T>
74- void weak_incrementer (T *ptr)
75- {
76- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
77- T val = core_util_atomic_load (ptr);
78- do {
79- } while (!core_util_atomic_compare_exchange_weak (ptr, &val, T (val + 1 )));
71+ template <typename A>
72+ struct bitops_incrementer {
73+ static void op (A *ptr)
74+ {
75+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
76+ (*ptr) += 1 ;
77+ (*ptr) &= -1 ;
78+ (*ptr) |= 0 ;
79+ }
8080 }
81- }
81+ };
8282
83- template <typename T>
84- void strong_incrementer (T *ptr)
85- {
86- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
87- T val = core_util_atomic_load (ptr);
88- do {
89- } while (!core_util_atomic_compare_exchange_strong (ptr, &val, T (val + 1 )));
83+ template <typename A>
84+ struct weak_incrementer {
85+ static void op (A *ptr)
86+ {
87+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
88+ typename A::value_type val = ptr->load ();
89+ do {
90+ } while (!ptr->compare_exchange_weak (val, val + 1 ));
91+ }
9092 }
91- }
93+ };
94+
95+ template <typename A>
96+ struct strong_incrementer {
97+ static void op (A *ptr)
98+ {
99+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
100+ typename A::value_type val = ptr->load ();
101+ do {
102+ } while (!ptr->compare_exchange_strong (val, val + 1 ));
103+ }
104+ }
105+ };
106+
92107
93108
94109/*
@@ -100,32 +115,34 @@ void strong_incrementer(T *ptr)
100115 * Using core_util_atomic_ templates, and exercising
101116 * load and store briefly.
102117 */
103- template <typename T, void (*Fn)(T *) >
118+ template <typename T, template < typename A> class Fn >
104119void test_atomic_add ()
105120{
106121 struct {
107122 volatile T nonatomic1;
108- T atomic1;
109- T atomic2;
123+ Atomic<T> atomic1;
124+ volatile Atomic<T> atomic2; // use volatile just to exercise the templates' volatile methods
110125 volatile T nonatomic2;
111- } data;
126+ } data = { 0 , { 0 }, { 1 }, 0 }; // test initialisation
127+
128+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.nonatomic1 );
129+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.atomic1 );
130+ TEST_ASSERT_EQUAL (4 * sizeof (T), sizeof data);
112131
113- data.nonatomic1 = 0 ;
114- core_util_atomic_store (&data.atomic1 , T (0 ));
115- core_util_atomic_store (&data.atomic2 , T (0 ));
116- data.nonatomic2 = 0 ;
132+ // test store
133+ data.atomic2 = 0 ;
117134
118135 Thread t1 (osPriorityNormal, THREAD_STACK);
119136 Thread t2 (osPriorityNormal, THREAD_STACK);
120137 Thread t3 (osPriorityNormal, THREAD_STACK);
121138 Thread t4 (osPriorityNormal, THREAD_STACK);
122139
123- TEST_ASSERT_EQUAL (osOK, t1.start (callback (Fn, &data.atomic1 )));
124- TEST_ASSERT_EQUAL (osOK, t2.start (callback (Fn, &data.atomic1 )));
125- TEST_ASSERT_EQUAL (osOK, t3.start (callback (Fn, &data.atomic2 )));
126- TEST_ASSERT_EQUAL (osOK, t4.start (callback (Fn, &data.atomic2 )));
140+ TEST_ASSERT_EQUAL (osOK, t1.start (callback (Fn< decltype (data. atomic1 )>::op , &data.atomic1 )));
141+ TEST_ASSERT_EQUAL (osOK, t2.start (callback (Fn< decltype (data. atomic1 )>::op , &data.atomic1 )));
142+ TEST_ASSERT_EQUAL (osOK, t3.start (callback (Fn< decltype (data. atomic2 )>::op , &data.atomic2 )));
143+ TEST_ASSERT_EQUAL (osOK, t4.start (callback (Fn< decltype (data. atomic2 )>::op , &data.atomic2 )));
127144
128- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
145+ for (long i = ADD_UNLOCKED_ITERATIONS ; i > 0 ; i--) {
129146 data.nonatomic1 ++;
130147 data.nonatomic2 ++;
131148 }
@@ -135,10 +152,83 @@ void test_atomic_add()
135152 t3.join ();
136153 t4.join ();
137154
138- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic1 );
139- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (&data.atomic1 ));
140- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (&data.atomic2 ));
141- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic2 );
155+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS), data.nonatomic1 );
156+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data.atomic1 )), data.atomic1 );
157+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data.atomic2 )), data.atomic2 );
158+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS), data.nonatomic2 );
159+ }
160+
161+ // This should fit into a uint32_t container, and there
162+ // will be 1 byte of padding to ignore.
163+ struct small {
164+ uint8_t a;
165+ uint8_t b;
166+ uint8_t c;
167+ };
168+
169+ // An 11-byte weird structure. Should work with critical sections.
170+ struct large {
171+ uint8_t a;
172+ uint8_t b;
173+ uint8_t c;
174+ uint8_t dummy[8 ];
175+ };
176+
177+ template <typename A>
178+ void struct_incrementer_a (A *data)
179+ {
180+ for (long i = add_iterations (*data); i > 0 ; i--) {
181+ typename A::value_type curval = *data, newval;
182+ do {
183+ newval = curval;
184+ newval.a ++;
185+ } while (!data->compare_exchange_weak (curval, newval));
186+ }
187+ }
188+
189+ template <typename A>
190+ void struct_incrementer_b (A *data)
191+ {
192+ for (long i = add_iterations (*data); i > 0 ; i--) {
193+ typename A::value_type curval = *data, newval;
194+ do {
195+ newval = curval;
196+ newval.b ++;
197+ } while (!data->compare_exchange_weak (curval, newval));
198+ }
199+ }
200+
201+ template <typename T, size_t N>
202+ void test_atomic_struct ()
203+ {
204+ TEST_ASSERT_EQUAL (N, sizeof (Atomic<T>));
205+
206+ // Small structures don't have value constructor implemented;
207+ Atomic<T> data;
208+ atomic_init (&data, T{0 , 0 , 0 });
209+
210+ Thread t1 (osPriorityNormal, THREAD_STACK);
211+ Thread t2 (osPriorityNormal, THREAD_STACK);
212+
213+ TEST_ASSERT_EQUAL (osOK, t1.start (callback (struct_incrementer_a<Atomic<T> >, &data)));
214+ TEST_ASSERT_EQUAL (osOK, t2.start (callback (struct_incrementer_b<Atomic<T> >, &data)));
215+
216+ for (long i = add_iterations (data); i > 0 ; i--) {
217+ T curval = data, newval;
218+ do {
219+ newval = curval;
220+ newval.c ++;
221+ } while (!data.compare_exchange_weak (curval, newval));
222+ }
223+
224+ t1.join ();
225+ t2.join ();
226+
227+ T final_val = data;
228+
229+ TEST_ASSERT_EQUAL (uint8_t (add_iterations (data)), final_val.a );
230+ TEST_ASSERT_EQUAL (uint8_t (add_iterations (data)), final_val.b );
231+ TEST_ASSERT_EQUAL (uint8_t (add_iterations (data)), final_val.c );
142232}
143233
144234} // namespace
@@ -174,7 +264,9 @@ Case cases[] = {
174264 Case (" Test atomic compare exchange strong 8-bit" , test_atomic_add<uint8_t , strong_incrementer>),
175265 Case (" Test atomic compare exchange strong 16-bit" , test_atomic_add<uint16_t , strong_incrementer>),
176266 Case (" Test atomic compare exchange strong 32-bit" , test_atomic_add<uint32_t , strong_incrementer>),
177- Case (" Test atomic compare exchange strong 64-bit" , test_atomic_add<uint64_t , strong_incrementer>)
267+ Case (" Test atomic compare exchange strong 64-bit" , test_atomic_add<uint64_t , strong_incrementer>),
268+ Case (" Test small atomic custom structure" , test_atomic_struct<small, 4 >),
269+ Case (" Test large atomic custom structure" , test_atomic_struct<large, 11 >)
178270};
179271
180272utest::v1::Specification specification (test_setup, cases);
0 commit comments