diff --git a/include/xsimd/arch/generic/xsimd_generic_memory.hpp b/include/xsimd/arch/generic/xsimd_generic_memory.hpp
index 10c1ffe66..fbe1bbc13 100644
--- a/include/xsimd/arch/generic/xsimd_generic_memory.hpp
+++ b/include/xsimd/arch/generic/xsimd_generic_memory.hpp
@@ -284,9 +284,9 @@ namespace xsimd
return detail::load_unaligned(mem, cvt, generic {}, detail::conversion_type {});
}
- // rotate_left
+ // rotate_right
template
- XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
{
struct rotate_generator
{
@@ -300,14 +300,14 @@ namespace xsimd
}
template
- XSIMD_INLINE batch, A> rotate_left(batch, A> const& self, requires_arch) noexcept
+ XSIMD_INLINE batch, A> rotate_right(batch, A> const& self, requires_arch) noexcept
{
- return { rotate_left(self.real()), rotate_left(self.imag()) };
+ return { rotate_right(self.real()), rotate_right(self.imag()) };
}
- // rotate_right
+ // rotate_left
template
- XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
{
struct rotate_generator
{
@@ -321,9 +321,9 @@ namespace xsimd
}
template
- XSIMD_INLINE batch, A> rotate_right(batch, A> const& self, requires_arch) noexcept
+ XSIMD_INLINE batch, A> rotate_left(batch, A> const& self, requires_arch) noexcept
{
- return { rotate_right(self.real()), rotate_right(self.imag()) };
+ return { rotate_left(self.real()), rotate_left(self.imag()) };
}
// Scatter with runtime indexes.
diff --git a/include/xsimd/arch/xsimd_avx2.hpp b/include/xsimd/arch/xsimd_avx2.hpp
index a7b421d8e..506299a0d 100644
--- a/include/xsimd/arch/xsimd_avx2.hpp
+++ b/include/xsimd/arch/xsimd_avx2.hpp
@@ -655,16 +655,16 @@ namespace xsimd
}
}
- // rotate_right
+ // rotate_left
template
- XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
{
return _mm256_alignr_epi8(self, self, N);
}
template
- XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
{
- return bitwise_cast(rotate_right(bitwise_cast(self), avx2 {}));
+ return bitwise_cast(rotate_left(bitwise_cast(self), avx2 {}));
}
// sadd
diff --git a/include/xsimd/arch/xsimd_avx512bw.hpp b/include/xsimd/arch/xsimd_avx512bw.hpp
index 8b381986c..724ced087 100644
--- a/include/xsimd/arch/xsimd_avx512bw.hpp
+++ b/include/xsimd/arch/xsimd_avx512bw.hpp
@@ -358,16 +358,16 @@ namespace xsimd
return detail::compare_int_avx512bw(self, other);
}
- // rotate_right
+ // rotate_left
template
- XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
{
return _mm512_alignr_epi8(self, self, N);
}
template
- XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
{
- return bitwise_cast(rotate_right(bitwise_cast(self), avx2 {}));
+ return bitwise_cast(rotate_left(bitwise_cast(self), avx2 {}));
}
// sadd
diff --git a/include/xsimd/arch/xsimd_neon.hpp b/include/xsimd/arch/xsimd_neon.hpp
index 87f5d34b5..2d0a24452 100644
--- a/include/xsimd/arch/xsimd_neon.hpp
+++ b/include/xsimd/arch/xsimd_neon.hpp
@@ -2743,38 +2743,38 @@ namespace xsimd
}
/****************
- * rotate_right *
+ * rotate_left *
****************/
namespace wrap
{
template
- XSIMD_INLINE uint8x16_t rotate_right_u8(uint8x16_t a, uint8x16_t b) noexcept { return vextq_u8(a, b, N); }
+ XSIMD_INLINE uint8x16_t rotate_left_u8(uint8x16_t a, uint8x16_t b) noexcept { return vextq_u8(a, b, N); }
template
- XSIMD_INLINE int8x16_t rotate_right_s8(int8x16_t a, int8x16_t b) noexcept { return vextq_s8(a, b, N); }
+ XSIMD_INLINE int8x16_t rotate_left_s8(int8x16_t a, int8x16_t b) noexcept { return vextq_s8(a, b, N); }
template
- XSIMD_INLINE uint16x8_t rotate_right_u16(uint16x8_t a, uint16x8_t b) noexcept { return vextq_u16(a, b, N); }
+ XSIMD_INLINE uint16x8_t rotate_left_u16(uint16x8_t a, uint16x8_t b) noexcept { return vextq_u16(a, b, N); }
template
- XSIMD_INLINE int16x8_t rotate_right_s16(int16x8_t a, int16x8_t b) noexcept { return vextq_s16(a, b, N); }
+ XSIMD_INLINE int16x8_t rotate_left_s16(int16x8_t a, int16x8_t b) noexcept { return vextq_s16(a, b, N); }
template
- XSIMD_INLINE uint32x4_t rotate_right_u32(uint32x4_t a, uint32x4_t b) noexcept { return vextq_u32(a, b, N); }
+ XSIMD_INLINE uint32x4_t rotate_left_u32(uint32x4_t a, uint32x4_t b) noexcept { return vextq_u32(a, b, N); }
template
- XSIMD_INLINE int32x4_t rotate_right_s32(int32x4_t a, int32x4_t b) noexcept { return vextq_s32(a, b, N); }
+ XSIMD_INLINE int32x4_t rotate_left_s32(int32x4_t a, int32x4_t b) noexcept { return vextq_s32(a, b, N); }
template
- XSIMD_INLINE uint64x2_t rotate_right_u64(uint64x2_t a, uint64x2_t b) noexcept { return vextq_u64(a, b, N); }
+ XSIMD_INLINE uint64x2_t rotate_left_u64(uint64x2_t a, uint64x2_t b) noexcept { return vextq_u64(a, b, N); }
template
- XSIMD_INLINE int64x2_t rotate_right_s64(int64x2_t a, int64x2_t b) noexcept { return vextq_s64(a, b, N); }
+ XSIMD_INLINE int64x2_t rotate_left_s64(int64x2_t a, int64x2_t b) noexcept { return vextq_s64(a, b, N); }
template
- XSIMD_INLINE float32x4_t rotate_right_f32(float32x4_t a, float32x4_t b) noexcept { return vextq_f32(a, b, N); }
+ XSIMD_INLINE float32x4_t rotate_left_f32(float32x4_t a, float32x4_t b) noexcept { return vextq_f32(a, b, N); }
}
template = 0>
- XSIMD_INLINE batch rotate_right(batch const& a, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& a, requires_arch) noexcept
{
using register_type = typename batch::register_type;
const detail::neon_dispatcher::binary dispatcher = {
- std::make_tuple(wrap::rotate_right_u8, wrap::rotate_right_s8, wrap::rotate_right_u16, wrap::rotate_right_s16,
- wrap::rotate_right_u32, wrap::rotate_right_s32, wrap::rotate_right_u64, wrap::rotate_right_s64,
- wrap::rotate_right_f32)
+ std::make_tuple(wrap::rotate_left_u8, wrap::rotate_left_s8, wrap::rotate_left_u16, wrap::rotate_left_s16,
+ wrap::rotate_left_u32, wrap::rotate_left_s32, wrap::rotate_left_u64, wrap::rotate_left_s64,
+ wrap::rotate_left_f32)
};
return dispatcher.apply(register_type(a), register_type(a));
}
diff --git a/include/xsimd/arch/xsimd_neon64.hpp b/include/xsimd/arch/xsimd_neon64.hpp
index be7c534cf..933b1e680 100644
--- a/include/xsimd/arch/xsimd_neon64.hpp
+++ b/include/xsimd/arch/xsimd_neon64.hpp
@@ -1265,10 +1265,10 @@ namespace xsimd
}
/****************
- * rotate_right *
+ * rotate_left *
****************/
template
- XSIMD_INLINE batch rotate_right(batch const& a, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& a, requires_arch) noexcept
{
return vextq_f64(a, a, N);
}
diff --git a/include/xsimd/arch/xsimd_ssse3.hpp b/include/xsimd/arch/xsimd_ssse3.hpp
index 9424d4ada..decaa5e22 100644
--- a/include/xsimd/arch/xsimd_ssse3.hpp
+++ b/include/xsimd/arch/xsimd_ssse3.hpp
@@ -105,16 +105,16 @@ namespace xsimd
}
}
- // rotate_right
+ // rotate_left
template
- XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
{
return _mm_alignr_epi8(self, self, N);
}
template
- XSIMD_INLINE batch rotate_right(batch const& self, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& self, requires_arch) noexcept
{
- return bitwise_cast(rotate_right(bitwise_cast(self), ssse3 {}));
+ return bitwise_cast(rotate_left(bitwise_cast(self), ssse3 {}));
}
// swizzle (dynamic mask)
diff --git a/include/xsimd/arch/xsimd_sve.hpp b/include/xsimd/arch/xsimd_sve.hpp
index 1586b8e0b..9958692a8 100644
--- a/include/xsimd/arch/xsimd_sve.hpp
+++ b/include/xsimd/arch/xsimd_sve.hpp
@@ -713,9 +713,9 @@ namespace xsimd
* Permutation *
***************/
- // rotate_right
+ // rotate_left
template = 0>
- XSIMD_INLINE batch rotate_right(batch const& a, requires_arch) noexcept
+ XSIMD_INLINE batch rotate_left(batch const& a, requires_arch) noexcept
{
return svext(a, a, N);
}
diff --git a/include/xsimd/types/xsimd_api.hpp b/include/xsimd/types/xsimd_api.hpp
index 5cd87c205..c77628e95 100644
--- a/include/xsimd/types/xsimd_api.hpp
+++ b/include/xsimd/types/xsimd_api.hpp
@@ -1893,11 +1893,11 @@ namespace xsimd
/**
* @ingroup batch_data_transfer
*
- * Slide the whole batch to the left by \c n bytes, and reintroduce the
+ * Slide the whole batch to the left by \c n elements, and reintroduce the
* slided out elements from the right. This is different from
* \c rol that rotates each batch element to the left.
*
- * @tparam N Amount of bytes to rotated to the left.
+ * @tparam N Amount of elements to rotate to the left.
* @param x batch of integer values.
* @return rotated batch.
*/
@@ -1911,11 +1911,11 @@ namespace xsimd
/**
* @ingroup batch_data_transfer
*
- * Slide the whole batch to the right by \c n bytes, and reintroduce the
+ * Slide the whole batch to the right by \c n elements, and reintroduce the
* slided out elements from the left. This is different from
* \c rol that rotates each batch element to the left.
*
- * @tparam N Amount of bytes to rotate to the right.
+ * @tparam N Amount of elements to rotate to the right.
* @param x batch of integer values.
* @return rotated batch.
*/
diff --git a/test/test_batch_manip.cpp b/test/test_batch_manip.cpp
index ad25284b8..10fc16d13 100644
--- a/test/test_batch_manip.cpp
+++ b/test/test_batch_manip.cpp
@@ -40,8 +40,8 @@ namespace xsimd
exped_reverse[i] = lhs_in[N - 1 - i];
exped_fill[i] = lhs_in[N - 1];
exped_dup[i] = lhs_in[2 * (i / 2)];
- exped_ror[i] = lhs_in[(i + 1) % N];
- exped_rol[i] = lhs_in[(i - 1) % N];
+ exped_ror[i] = lhs_in[(i - 1) % N];
+ exped_rol[i] = lhs_in[(i + 1) % N];
}
vects.push_back(std::move(exped_reverse));
vects.push_back(std::move(exped_fill));