@@ -2881,6 +2881,39 @@ namespace xsimd
28812881 self.store_aligned (data.data ());
28822882 return set (batch<T, A>(), A (), data[idx]...);
28832883 }
2884+
2885+ template <class A , uint64_t V0, uint64_t V1>
2886+ XSIMD_INLINE batch<uint64_t , A> swizzle (batch<uint64_t , A> const & self,
2887+ batch_constant<uint64_t , A, V0, V1>,
2888+ requires_arch<neon>) noexcept
2889+ {
2890+ XSIMD_IF_CONSTEXPR (V0 == 0 && V1 == 0 )
2891+ {
2892+ auto lo = vget_low_u64 (self);
2893+ return vcombine_u64 (lo, lo);
2894+ }
2895+ XSIMD_IF_CONSTEXPR (V0 == 1 && V1 == 1 )
2896+ {
2897+ auto hi = vget_high_u64 (self);
2898+ return vcombine_u64 (hi, hi);
2899+ }
2900+ XSIMD_IF_CONSTEXPR (V0 == 0 && V1 == 1 )
2901+ {
2902+ return self;
2903+ }
2904+ else
2905+ {
2906+ return vextq_u64 (self, self, 1 );
2907+ }
2908+ }
2909+
2910+ template <class A , uint64_t V0, uint64_t V1>
2911+ XSIMD_INLINE batch<int64_t , A> swizzle (batch<int64_t , A> const & self,
2912+ batch_constant<int64_t , A, V0, V1> mask,
2913+ requires_arch<neon>) noexcept
2914+ {
2915+ return vreinterpretq_s64_u64 (swizzle (vreinterpretq_u64_s64 (self), mask, A {}));
2916+ }
28842917 }
28852918
28862919}
0 commit comments