11/// Sealed traits and implementations for `spsc`
22pub mod spsc {
33 #[ cfg( has_atomics) ]
4- use crate :: spsc:: { MultiCore , SingleCore } ;
5- #[ cfg( has_atomics) ]
6- use core:: sync:: atomic:: { self , AtomicU16 , AtomicU8 , AtomicUsize , Ordering } ;
7-
8- pub unsafe trait XCore {
9- fn is_multi_core ( ) -> bool ;
10- }
11-
12- #[ cfg( has_atomics) ]
13- unsafe impl XCore for SingleCore {
14- fn is_multi_core ( ) -> bool {
15- false
16- }
17- }
18-
19- #[ cfg( has_atomics) ]
20- unsafe impl XCore for MultiCore {
21- fn is_multi_core ( ) -> bool {
22- true
23- }
24- }
4+ use core:: sync:: atomic:: { AtomicU16 , AtomicU8 , AtomicUsize , Ordering } ;
255
266 pub unsafe trait Uxx : Into < usize > + Send {
277 #[ doc( hidden) ]
@@ -32,19 +12,15 @@ pub mod spsc {
3212
3313 #[ cfg( has_atomics) ]
3414 #[ doc( hidden) ]
35- unsafe fn load_acquire < C > ( x : * const Self ) -> Self
36- where
37- C : XCore ;
15+ unsafe fn load_acquire ( x : * const Self ) -> Self ;
3816
3917 #[ cfg( has_atomics) ]
4018 #[ doc( hidden) ]
4119 fn load_relaxed ( x : * const Self ) -> Self ;
4220
4321 #[ cfg( has_atomics) ]
4422 #[ doc( hidden) ]
45- unsafe fn store_release < C > ( x : * const Self , val : Self )
46- where
47- C : XCore ;
23+ unsafe fn store_release ( x : * const Self , val : Self ) ;
4824 }
4925
5026 unsafe impl Uxx for u8 {
@@ -62,17 +38,11 @@ pub mod spsc {
6238 }
6339
6440 #[ cfg( has_atomics) ]
65- unsafe fn load_acquire < C > ( x : * const Self ) -> Self
66- where
67- C : XCore ,
68- {
69- if C :: is_multi_core ( ) {
70- ( * ( x as * const AtomicU8 ) ) . load ( Ordering :: Acquire )
71- } else {
72- let y = ( * ( x as * const AtomicU8 ) ) . load ( Ordering :: Relaxed ) ; // read
73- atomic:: compiler_fence ( Ordering :: Acquire ) ; // ▼
74- y
75- }
41+ unsafe fn load_acquire ( x : * const Self ) -> Self {
42+ ( * ( x as * const AtomicU8 ) ) . load ( Ordering :: Acquire )
43+ // let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read
44+ // atomic::compiler_fence(Ordering::Acquire); // ▼
45+ // y
7646 }
7747
7848 #[ cfg( has_atomics) ]
@@ -81,16 +51,10 @@ pub mod spsc {
8151 }
8252
8353 #[ cfg( has_atomics) ]
84- unsafe fn store_release < C > ( x : * const Self , val : Self )
85- where
86- C : XCore ,
87- {
88- if C :: is_multi_core ( ) {
89- ( * ( x as * const AtomicU8 ) ) . store ( val, Ordering :: Release )
90- } else {
91- atomic:: compiler_fence ( Ordering :: Release ) ; // ▲
92- ( * ( x as * const AtomicU8 ) ) . store ( val, Ordering :: Relaxed ) // write
93- }
54+ unsafe fn store_release ( x : * const Self , val : Self ) {
55+ ( * ( x as * const AtomicU8 ) ) . store ( val, Ordering :: Release )
56+ // atomic::compiler_fence(Ordering::Release); // ▲
57+ // (*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write
9458 }
9559 }
9660
@@ -109,17 +73,11 @@ pub mod spsc {
10973 }
11074
11175 #[ cfg( has_atomics) ]
112- unsafe fn load_acquire < C > ( x : * const Self ) -> Self
113- where
114- C : XCore ,
115- {
116- if C :: is_multi_core ( ) {
117- ( * ( x as * const AtomicU16 ) ) . load ( Ordering :: Acquire )
118- } else {
119- let y = ( * ( x as * const AtomicU16 ) ) . load ( Ordering :: Relaxed ) ; // read
120- atomic:: compiler_fence ( Ordering :: Acquire ) ; // ▼
121- y
122- }
76+ unsafe fn load_acquire ( x : * const Self ) -> Self {
77+ ( * ( x as * const AtomicU16 ) ) . load ( Ordering :: Acquire )
78+ // let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read
79+ // atomic::compiler_fence(Ordering::Acquire); // ▼
80+ // y
12381 }
12482
12583 #[ cfg( has_atomics) ]
@@ -128,16 +86,10 @@ pub mod spsc {
12886 }
12987
13088 #[ cfg( has_atomics) ]
131- unsafe fn store_release < C > ( x : * const Self , val : Self )
132- where
133- C : XCore ,
134- {
135- if C :: is_multi_core ( ) {
136- ( * ( x as * const AtomicU16 ) ) . store ( val, Ordering :: Release )
137- } else {
138- atomic:: compiler_fence ( Ordering :: Release ) ; // ▲
139- ( * ( x as * const AtomicU16 ) ) . store ( val, Ordering :: Relaxed ) // write
140- }
89+ unsafe fn store_release ( x : * const Self , val : Self ) {
90+ ( * ( x as * const AtomicU16 ) ) . store ( val, Ordering :: Release )
91+ // atomic::compiler_fence(Ordering::Release); // ▲
92+ // (*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write
14193 }
14294 }
14395
@@ -151,17 +103,11 @@ pub mod spsc {
151103 }
152104
153105 #[ cfg( has_atomics) ]
154- unsafe fn load_acquire < C > ( x : * const Self ) -> Self
155- where
156- C : XCore ,
157- {
158- if C :: is_multi_core ( ) {
159- ( * ( x as * const AtomicUsize ) ) . load ( Ordering :: Acquire )
160- } else {
161- let y = ( * ( x as * const AtomicUsize ) ) . load ( Ordering :: Relaxed ) ; // read
162- atomic:: compiler_fence ( Ordering :: Acquire ) ; // ▼
163- y
164- }
106+ unsafe fn load_acquire ( x : * const Self ) -> Self {
107+ ( * ( x as * const AtomicUsize ) ) . load ( Ordering :: Acquire )
108+ // let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read
109+ // atomic::compiler_fence(Ordering::Acquire); // ▼
110+ // y
165111 }
166112
167113 #[ cfg( has_atomics) ]
@@ -170,16 +116,10 @@ pub mod spsc {
170116 }
171117
172118 #[ cfg( has_atomics) ]
173- unsafe fn store_release < C > ( x : * const Self , val : Self )
174- where
175- C : XCore ,
176- {
177- if C :: is_multi_core ( ) {
178- ( * ( x as * const AtomicUsize ) ) . store ( val, Ordering :: Release )
179- } else {
180- atomic:: compiler_fence ( Ordering :: Release ) ; // ▲
181- ( * ( x as * const AtomicUsize ) ) . store ( val, Ordering :: Relaxed ) ; // write
182- }
119+ unsafe fn store_release ( x : * const Self , val : Self ) {
120+ ( * ( x as * const AtomicUsize ) ) . store ( val, Ordering :: Release )
121+ // atomic::compiler_fence(Ordering::Release); // ▲
122+ // (*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write
183123 }
184124 }
185125}
0 commit comments