@@ -66,8 +66,7 @@ class alignas(Size) atomic_impl {
6666 }
6767};
6868
69- // FIXME: get this to build reliably
70- #if 0 && defined(_WIN64)
69+ #if defined(_WIN64)
7170#include < intrin.h>
7271
7372// / MSVC's std::atomic uses an inline spin lock for 16-byte atomics,
@@ -76,11 +75,7 @@ class alignas(Size) atomic_impl {
7675// / AMD processors that lack cmpxchg16b, so we just use the intrinsic.
7776template <class Value >
7877class alignas (2 * sizeof (void *)) atomic_impl<Value, 2 * sizeof (void *)> {
79- // MSVC is not strict about aliasing, so we can get away with this.
80- union {
81- volatile Value atomicValue;
82- volatile __int64 atomicArray[2];
83- };
78+ volatile Value atomicValue;
8479public:
8580 constexpr atomic_impl (Value initialValue) : atomicValue (initialValue) {}
8681
@@ -98,10 +93,14 @@ class alignas(2 * sizeof(void*)) atomic_impl<Value, 2 * sizeof(void*)> {
9893 __int64 resultArray[2 ] = {};
9994#if SWIFT_HAS_MSVC_ARM_ATOMICS
10095 if (order != std::memory_order_acquire) {
101- (void) _InterlockedCompareExchange128_nf(atomicArray, 0, 0, resultArray);
96+ (void ) _InterlockedCompareExchange128_nf (
97+ reinterpret_cast <volatile __int64*>(&atomicValue),
98+ 0 , 0 , resultArray);
10299 } else {
103100#endif
104- (void) _InterlockedCompareExchange128(atomicArray, 0, 0, resultArray);
101+ (void ) _InterlockedCompareExchange128 (
102+ reinterpret_cast <volatile __int64*>(&atomicValue),
103+ 0 , 0 , resultArray);
105104#if SWIFT_HAS_MSVC_ARM_ATOMICS
106105 }
107106#endif
@@ -116,31 +115,33 @@ class alignas(2 * sizeof(void*)) atomic_impl<Value, 2 * sizeof(void*)> {
116115 failureOrder == std::memory_order_consume);
117116 assert (successOrder == std::memory_order_relaxed ||
118117 successOrder == std::memory_order_release);
119- __int64 newValueArray[2];
120- memcpy(newValueArray, &newValue, sizeof(Value));
121118#if SWIFT_HAS_MSVC_ARM_ATOMICS
122119 if (successOrder == std::memory_order_relaxed &&
123120 failureOrder != std::memory_order_acquire) {
124- return _InterlockedCompareExchange128_nf(atomicArray,
125- newValueArray[0],
126- newValueArray[1],
127- reinterpret_cast<__int64*>(&oldValue));
121+ return _InterlockedCompareExchange128_nf (
122+ reinterpret_cast <volatile __int64*>(&atomicValue),
123+ reinterpret_cast <const __int64*>(&newValue)[1 ],
124+ reinterpret_cast <const __int64*>(&newValue)[0 ],
125+ reinterpret_cast <__int64*>(&oldValue));
128126 } else if (successOrder == std::memory_order_relaxed) {
129- return _InterlockedCompareExchange128_acq(atomicArray,
130- newValueArray[0],
131- newValueArray[1],
132- reinterpret_cast<__int64*>(&oldValue));
127+ return _InterlockedCompareExchange128_acq (
128+ reinterpret_cast <volatile __int64*>(&atomicValue),
129+ reinterpret_cast <const __int64*>(&newValue)[1 ],
130+ reinterpret_cast <const __int64*>(&newValue)[0 ],
131+ reinterpret_cast <__int64*>(&oldValue));
133132 } else if (failureOrder != std::memory_order_acquire) {
134- return _InterlockedCompareExchange128_rel(atomicArray,
135- newValueArray[0],
136- newValueArray[1],
137- reinterpret_cast<__int64*>(&oldValue));
133+ return _InterlockedCompareExchange128_rel (
134+ reinterpret_cast <volatile __int64*>(&atomicValue),
135+ reinterpret_cast <const __int64*>(&newValue)[1 ],
136+ reinterpret_cast <const __int64*>(&newValue)[0 ],
137+ reinterpret_cast <__int64*>(&oldValue));
138138 } else {
139139#endif
140- return _InterlockedCompareExchange128(atomicArray,
141- newValueArray[0],
142- newValueArray[1],
143- reinterpret_cast<__int64*>(&oldValue));
140+ return _InterlockedCompareExchange128 (
141+ reinterpret_cast <volatile __int64*>(&atomicValue),
142+ reinterpret_cast <const __int64*>(&newValue)[1 ],
143+ reinterpret_cast <const __int64*>(&newValue)[0 ],
144+ reinterpret_cast <__int64*>(&oldValue));
144145#if SWIFT_HAS_MSVC_ARM_ATOMICS
145146 }
146147#endif
0 commit comments