Skip to content

Commit 9cb09c4

Browse files
Fix rotate_left / rotate_right behavior and documentation
Their meaning was swapped, this should fix #1062
1 parent a9d021a commit 9cb09c4

File tree

9 files changed

+46
-46
lines changed

9 files changed

+46
-46
lines changed

include/xsimd/arch/generic/xsimd_generic_memory.hpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -284,9 +284,9 @@ namespace xsimd
284284
return detail::load_unaligned<A>(mem, cvt, generic {}, detail::conversion_type<A, T_in, T_out> {});
285285
}
286286

287-
// rotate_left
287+
// rotate_right
288288
template <size_t N, class A, class T>
289-
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& self, requires_arch<generic>) noexcept
289+
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& self, requires_arch<generic>) noexcept
290290
{
291291
struct rotate_generator
292292
{
@@ -300,14 +300,14 @@ namespace xsimd
300300
}
301301

302302
template <size_t N, class A, class T>
303-
XSIMD_INLINE batch<std::complex<T>, A> rotate_left(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
303+
XSIMD_INLINE batch<std::complex<T>, A> rotate_right(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
304304
{
305-
return { rotate_left<N>(self.real()), rotate_left<N>(self.imag()) };
305+
return { rotate_right<N>(self.real()), rotate_right<N>(self.imag()) };
306306
}
307307

308-
// rotate_right
308+
// rotate_left
309309
template <size_t N, class A, class T>
310-
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& self, requires_arch<generic>) noexcept
310+
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& self, requires_arch<generic>) noexcept
311311
{
312312
struct rotate_generator
313313
{
@@ -321,9 +321,9 @@ namespace xsimd
321321
}
322322

323323
template <size_t N, class A, class T>
324-
XSIMD_INLINE batch<std::complex<T>, A> rotate_right(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
324+
XSIMD_INLINE batch<std::complex<T>, A> rotate_left(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
325325
{
326-
return { rotate_right<N>(self.real()), rotate_right<N>(self.imag()) };
326+
return { rotate_left<N>(self.real()), rotate_left<N>(self.imag()) };
327327
}
328328

329329
// Scatter with runtime indexes.

include/xsimd/arch/xsimd_avx2.hpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -655,16 +655,16 @@ namespace xsimd
655655
}
656656
}
657657

658-
// rotate_right
658+
// rotate_left
659659
template <size_t N, class A>
660-
XSIMD_INLINE batch<uint16_t, A> rotate_right(batch<uint16_t, A> const& self, requires_arch<avx2>) noexcept
660+
XSIMD_INLINE batch<uint16_t, A> rotate_left(batch<uint16_t, A> const& self, requires_arch<avx2>) noexcept
661661
{
662662
return _mm256_alignr_epi8(self, self, N);
663663
}
664664
template <size_t N, class A>
665-
XSIMD_INLINE batch<int16_t, A> rotate_right(batch<int16_t, A> const& self, requires_arch<avx2>) noexcept
665+
XSIMD_INLINE batch<int16_t, A> rotate_left(batch<int16_t, A> const& self, requires_arch<avx2>) noexcept
666666
{
667-
return bitwise_cast<int16_t>(rotate_right<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
667+
return bitwise_cast<int16_t>(rotate_left<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
668668
}
669669

670670
// sadd

include/xsimd/arch/xsimd_avx512bw.hpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -358,16 +358,16 @@ namespace xsimd
358358
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_NE>(self, other);
359359
}
360360

361-
// rotate_right
361+
// rotate_left
362362
template <size_t N, class A>
363-
XSIMD_INLINE batch<uint16_t, A> rotate_right(batch<uint16_t, A> const& self, requires_arch<avx512bw>) noexcept
363+
XSIMD_INLINE batch<uint16_t, A> rotate_left(batch<uint16_t, A> const& self, requires_arch<avx512bw>) noexcept
364364
{
365365
return _mm512_alignr_epi8(self, self, N);
366366
}
367367
template <size_t N, class A>
368-
XSIMD_INLINE batch<int16_t, A> rotate_right(batch<int16_t, A> const& self, requires_arch<avx512bw>) noexcept
368+
XSIMD_INLINE batch<int16_t, A> rotate_left(batch<int16_t, A> const& self, requires_arch<avx512bw>) noexcept
369369
{
370-
return bitwise_cast<int16_t>(rotate_right<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
370+
return bitwise_cast<int16_t>(rotate_left<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
371371
}
372372

373373
// sadd

include/xsimd/arch/xsimd_neon.hpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2743,38 +2743,38 @@ namespace xsimd
27432743
}
27442744

27452745
/****************
2746-
* rotate_right *
2746+
* rotate_left *
27472747
****************/
27482748
namespace wrap
27492749
{
27502750
template <size_t N>
2751-
XSIMD_INLINE uint8x16_t rotate_right_u8(uint8x16_t a, uint8x16_t b) noexcept { return vextq_u8(a, b, N); }
2751+
XSIMD_INLINE uint8x16_t rotate_left_u8(uint8x16_t a, uint8x16_t b) noexcept { return vextq_u8(a, b, N); }
27522752
template <size_t N>
2753-
XSIMD_INLINE int8x16_t rotate_right_s8(int8x16_t a, int8x16_t b) noexcept { return vextq_s8(a, b, N); }
2753+
XSIMD_INLINE int8x16_t rotate_left_s8(int8x16_t a, int8x16_t b) noexcept { return vextq_s8(a, b, N); }
27542754
template <size_t N>
2755-
XSIMD_INLINE uint16x8_t rotate_right_u16(uint16x8_t a, uint16x8_t b) noexcept { return vextq_u16(a, b, N); }
2755+
XSIMD_INLINE uint16x8_t rotate_left_u16(uint16x8_t a, uint16x8_t b) noexcept { return vextq_u16(a, b, N); }
27562756
template <size_t N>
2757-
XSIMD_INLINE int16x8_t rotate_right_s16(int16x8_t a, int16x8_t b) noexcept { return vextq_s16(a, b, N); }
2757+
XSIMD_INLINE int16x8_t rotate_left_s16(int16x8_t a, int16x8_t b) noexcept { return vextq_s16(a, b, N); }
27582758
template <size_t N>
2759-
XSIMD_INLINE uint32x4_t rotate_right_u32(uint32x4_t a, uint32x4_t b) noexcept { return vextq_u32(a, b, N); }
2759+
XSIMD_INLINE uint32x4_t rotate_left_u32(uint32x4_t a, uint32x4_t b) noexcept { return vextq_u32(a, b, N); }
27602760
template <size_t N>
2761-
XSIMD_INLINE int32x4_t rotate_right_s32(int32x4_t a, int32x4_t b) noexcept { return vextq_s32(a, b, N); }
2761+
XSIMD_INLINE int32x4_t rotate_left_s32(int32x4_t a, int32x4_t b) noexcept { return vextq_s32(a, b, N); }
27622762
template <size_t N>
2763-
XSIMD_INLINE uint64x2_t rotate_right_u64(uint64x2_t a, uint64x2_t b) noexcept { return vextq_u64(a, b, N); }
2763+
XSIMD_INLINE uint64x2_t rotate_left_u64(uint64x2_t a, uint64x2_t b) noexcept { return vextq_u64(a, b, N); }
27642764
template <size_t N>
2765-
XSIMD_INLINE int64x2_t rotate_right_s64(int64x2_t a, int64x2_t b) noexcept { return vextq_s64(a, b, N); }
2765+
XSIMD_INLINE int64x2_t rotate_left_s64(int64x2_t a, int64x2_t b) noexcept { return vextq_s64(a, b, N); }
27662766
template <size_t N>
2767-
XSIMD_INLINE float32x4_t rotate_right_f32(float32x4_t a, float32x4_t b) noexcept { return vextq_f32(a, b, N); }
2767+
XSIMD_INLINE float32x4_t rotate_left_f32(float32x4_t a, float32x4_t b) noexcept { return vextq_f32(a, b, N); }
27682768
}
27692769

27702770
template <size_t N, class A, class T, detail::enable_neon_type_t<T> = 0>
2771-
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& a, requires_arch<neon>) noexcept
2771+
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& a, requires_arch<neon>) noexcept
27722772
{
27732773
using register_type = typename batch<T, A>::register_type;
27742774
const detail::neon_dispatcher::binary dispatcher = {
2775-
std::make_tuple(wrap::rotate_right_u8<N>, wrap::rotate_right_s8<N>, wrap::rotate_right_u16<N>, wrap::rotate_right_s16<N>,
2776-
wrap::rotate_right_u32<N>, wrap::rotate_right_s32<N>, wrap::rotate_right_u64<N>, wrap::rotate_right_s64<N>,
2777-
wrap::rotate_right_f32<N>)
2775+
std::make_tuple(wrap::rotate_left_u8<N>, wrap::rotate_left_s8<N>, wrap::rotate_left_u16<N>, wrap::rotate_left_s16<N>,
2776+
wrap::rotate_left_u32<N>, wrap::rotate_left_s32<N>, wrap::rotate_left_u64<N>, wrap::rotate_left_s64<N>,
2777+
wrap::rotate_left_f32<N>)
27782778
};
27792779
return dispatcher.apply(register_type(a), register_type(a));
27802780
}

include/xsimd/arch/xsimd_neon64.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1265,10 +1265,10 @@ namespace xsimd
12651265
}
12661266

12671267
/****************
1268-
* rotate_right *
1268+
* rotate_left *
12691269
****************/
12701270
template <size_t N, class A>
1271-
XSIMD_INLINE batch<double, A> rotate_right(batch<double, A> const& a, requires_arch<neon64>) noexcept
1271+
XSIMD_INLINE batch<double, A> rotate_left(batch<double, A> const& a, requires_arch<neon64>) noexcept
12721272
{
12731273
return vextq_f64(a, a, N);
12741274
}

include/xsimd/arch/xsimd_ssse3.hpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,16 +105,16 @@ namespace xsimd
105105
}
106106
}
107107

108-
// rotate_right
108+
// rotate_left
109109
template <size_t N, class A>
110-
XSIMD_INLINE batch<uint16_t, A> rotate_right(batch<uint16_t, A> const& self, requires_arch<ssse3>) noexcept
110+
XSIMD_INLINE batch<uint16_t, A> rotate_left(batch<uint16_t, A> const& self, requires_arch<ssse3>) noexcept
111111
{
112112
return _mm_alignr_epi8(self, self, N);
113113
}
114114
template <size_t N, class A>
115-
XSIMD_INLINE batch<int16_t, A> rotate_right(batch<int16_t, A> const& self, requires_arch<ssse3>) noexcept
115+
XSIMD_INLINE batch<int16_t, A> rotate_left(batch<int16_t, A> const& self, requires_arch<ssse3>) noexcept
116116
{
117-
return bitwise_cast<int16_t>(rotate_right<N, A>(bitwise_cast<uint16_t>(self), ssse3 {}));
117+
return bitwise_cast<int16_t>(rotate_left<N, A>(bitwise_cast<uint16_t>(self), ssse3 {}));
118118
}
119119

120120
// swizzle (dynamic mask)

include/xsimd/arch/xsimd_sve.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -713,9 +713,9 @@ namespace xsimd
713713
* Permutation *
714714
***************/
715715

716-
// rotate_right
716+
// rotate_left
717717
template <size_t N, class A, class T, detail::sve_enable_all_t<T> = 0>
718-
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& a, requires_arch<sve>) noexcept
718+
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& a, requires_arch<sve>) noexcept
719719
{
720720
return svext(a, a, N);
721721
}

include/xsimd/types/xsimd_api.hpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1893,11 +1893,11 @@ namespace xsimd
18931893
/**
18941894
* @ingroup batch_data_transfer
18951895
*
1896-
* Slide the whole batch to the left by \c n bytes, and reintroduce the
1896+
* Slide the whole batch to the left by \c n elements, and reintroduce the
18971897
* slided out elements from the right. This is different from
1898-
* \c rol that rotates each batch element to the left.
1898+
* \c rotl that rotates each batch element to the left.
18991899
*
1900-
* @tparam N Amount of bytes to rotated to the left.
1900+
* @tparam N Amount of elements to rotate to the left.
19011901
* @param x batch of integer values.
19021902
* @return rotated batch.
19031903
*/
@@ -1911,11 +1911,11 @@ namespace xsimd
19111911
/**
19121912
* @ingroup batch_data_transfer
19131913
*
1914-
* Slide the whole batch to the right by \c n bytes, and reintroduce the
1914+
* Slide the whole batch to the right by \c n elements, and reintroduce the
19151915
* slided out elements from the left. This is different from
1916-
* \c rol that rotates each batch element to the left.
1916+
* \c rotr that rotates each batch element to the right.
19171917
*
1918-
* @tparam N Amount of bytes to rotate to the right.
1918+
* @tparam N Amount of elements to rotate to the right.
19191919
* @param x batch of integer values.
19201920
* @return rotated batch.
19211921
*/

test/test_batch_manip.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ namespace xsimd
4040
exped_reverse[i] = lhs_in[N - 1 - i];
4141
exped_fill[i] = lhs_in[N - 1];
4242
exped_dup[i] = lhs_in[2 * (i / 2)];
43-
exped_ror[i] = lhs_in[(i + 1) % N];
44-
exped_rol[i] = lhs_in[(i - 1) % N];
43+
exped_ror[i] = lhs_in[(i - 1) % N];
44+
exped_rol[i] = lhs_in[(i + 1) % N];
4545
}
4646
vects.push_back(std::move(exped_reverse));
4747
vects.push_back(std::move(exped_fill));

0 commit comments

Comments
 (0)