Skip to content

Commit

Permalink
Fix rotate_left / rotate_right behavior and documentation
Browse files Browse the repository at this point in the history
Their meaning was swapped, this should fix #1062
  • Loading branch information
serge-sans-paille committed Nov 24, 2024
1 parent a9d021a commit 8bffdb4
Show file tree
Hide file tree
Showing 9 changed files with 46 additions and 46 deletions.
16 changes: 8 additions & 8 deletions include/xsimd/arch/generic/xsimd_generic_memory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -284,9 +284,9 @@ namespace xsimd
return detail::load_unaligned<A>(mem, cvt, generic {}, detail::conversion_type<A, T_in, T_out> {});
}

// rotate_left
// rotate_right
template <size_t N, class A, class T>
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& self, requires_arch<generic>) noexcept
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& self, requires_arch<generic>) noexcept
{
struct rotate_generator
{
Expand All @@ -300,14 +300,14 @@ namespace xsimd
}

template <size_t N, class A, class T>
XSIMD_INLINE batch<std::complex<T>, A> rotate_left(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
XSIMD_INLINE batch<std::complex<T>, A> rotate_right(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
{
return { rotate_left<N>(self.real()), rotate_left<N>(self.imag()) };
return { rotate_right<N>(self.real()), rotate_right<N>(self.imag()) };
}

// rotate_right
// rotate_left
template <size_t N, class A, class T>
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& self, requires_arch<generic>) noexcept
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& self, requires_arch<generic>) noexcept
{
struct rotate_generator
{
Expand All @@ -321,9 +321,9 @@ namespace xsimd
}

template <size_t N, class A, class T>
XSIMD_INLINE batch<std::complex<T>, A> rotate_right(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
XSIMD_INLINE batch<std::complex<T>, A> rotate_left(batch<std::complex<T>, A> const& self, requires_arch<generic>) noexcept
{
return { rotate_right<N>(self.real()), rotate_right<N>(self.imag()) };
return { rotate_left<N>(self.real()), rotate_left<N>(self.imag()) };
}

// Scatter with runtime indexes.
Expand Down
8 changes: 4 additions & 4 deletions include/xsimd/arch/xsimd_avx2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -655,16 +655,16 @@ namespace xsimd
}
}

// rotate_right
// rotate_left
template <size_t N, class A>
XSIMD_INLINE batch<uint16_t, A> rotate_right(batch<uint16_t, A> const& self, requires_arch<avx2>) noexcept
XSIMD_INLINE batch<uint16_t, A> rotate_left(batch<uint16_t, A> const& self, requires_arch<avx2>) noexcept
{
return _mm256_alignr_epi8(self, self, N);
}
template <size_t N, class A>
XSIMD_INLINE batch<int16_t, A> rotate_right(batch<int16_t, A> const& self, requires_arch<avx2>) noexcept
XSIMD_INLINE batch<int16_t, A> rotate_left(batch<int16_t, A> const& self, requires_arch<avx2>) noexcept
{
return bitwise_cast<int16_t>(rotate_right<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
return bitwise_cast<int16_t>(rotate_left<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
}

// sadd
Expand Down
8 changes: 4 additions & 4 deletions include/xsimd/arch/xsimd_avx512bw.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -358,16 +358,16 @@ namespace xsimd
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_NE>(self, other);
}

// rotate_right
// rotate_left
template <size_t N, class A>
XSIMD_INLINE batch<uint16_t, A> rotate_right(batch<uint16_t, A> const& self, requires_arch<avx512bw>) noexcept
XSIMD_INLINE batch<uint16_t, A> rotate_left(batch<uint16_t, A> const& self, requires_arch<avx512bw>) noexcept
{
return _mm512_alignr_epi8(self, self, N);
}
template <size_t N, class A>
XSIMD_INLINE batch<int16_t, A> rotate_right(batch<int16_t, A> const& self, requires_arch<avx512bw>) noexcept
XSIMD_INLINE batch<int16_t, A> rotate_left(batch<int16_t, A> const& self, requires_arch<avx512bw>) noexcept
{
return bitwise_cast<int16_t>(rotate_right<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
return bitwise_cast<int16_t>(rotate_left<N, A>(bitwise_cast<uint16_t>(self), avx2 {}));
}

// sadd
Expand Down
28 changes: 14 additions & 14 deletions include/xsimd/arch/xsimd_neon.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2743,38 +2743,38 @@ namespace xsimd
}

/****************
* rotate_right *
* rotate_left *
****************/
namespace wrap
{
template <size_t N>
XSIMD_INLINE uint8x16_t rotate_right_u8(uint8x16_t a, uint8x16_t b) noexcept { return vextq_u8(a, b, N); }
XSIMD_INLINE uint8x16_t rotate_left_u8(uint8x16_t a, uint8x16_t b) noexcept { return vextq_u8(a, b, N); }
template <size_t N>
XSIMD_INLINE int8x16_t rotate_right_s8(int8x16_t a, int8x16_t b) noexcept { return vextq_s8(a, b, N); }
XSIMD_INLINE int8x16_t rotate_left_s8(int8x16_t a, int8x16_t b) noexcept { return vextq_s8(a, b, N); }
template <size_t N>
XSIMD_INLINE uint16x8_t rotate_right_u16(uint16x8_t a, uint16x8_t b) noexcept { return vextq_u16(a, b, N); }
XSIMD_INLINE uint16x8_t rotate_left_u16(uint16x8_t a, uint16x8_t b) noexcept { return vextq_u16(a, b, N); }
template <size_t N>
XSIMD_INLINE int16x8_t rotate_right_s16(int16x8_t a, int16x8_t b) noexcept { return vextq_s16(a, b, N); }
XSIMD_INLINE int16x8_t rotate_left_s16(int16x8_t a, int16x8_t b) noexcept { return vextq_s16(a, b, N); }
template <size_t N>
XSIMD_INLINE uint32x4_t rotate_right_u32(uint32x4_t a, uint32x4_t b) noexcept { return vextq_u32(a, b, N); }
XSIMD_INLINE uint32x4_t rotate_left_u32(uint32x4_t a, uint32x4_t b) noexcept { return vextq_u32(a, b, N); }
template <size_t N>
XSIMD_INLINE int32x4_t rotate_right_s32(int32x4_t a, int32x4_t b) noexcept { return vextq_s32(a, b, N); }
XSIMD_INLINE int32x4_t rotate_left_s32(int32x4_t a, int32x4_t b) noexcept { return vextq_s32(a, b, N); }
template <size_t N>
XSIMD_INLINE uint64x2_t rotate_right_u64(uint64x2_t a, uint64x2_t b) noexcept { return vextq_u64(a, b, N); }
XSIMD_INLINE uint64x2_t rotate_left_u64(uint64x2_t a, uint64x2_t b) noexcept { return vextq_u64(a, b, N); }
template <size_t N>
XSIMD_INLINE int64x2_t rotate_right_s64(int64x2_t a, int64x2_t b) noexcept { return vextq_s64(a, b, N); }
XSIMD_INLINE int64x2_t rotate_left_s64(int64x2_t a, int64x2_t b) noexcept { return vextq_s64(a, b, N); }
template <size_t N>
XSIMD_INLINE float32x4_t rotate_right_f32(float32x4_t a, float32x4_t b) noexcept { return vextq_f32(a, b, N); }
XSIMD_INLINE float32x4_t rotate_left_f32(float32x4_t a, float32x4_t b) noexcept { return vextq_f32(a, b, N); }
}

template <size_t N, class A, class T, detail::enable_neon_type_t<T> = 0>
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& a, requires_arch<neon>) noexcept
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& a, requires_arch<neon>) noexcept
{
using register_type = typename batch<T, A>::register_type;
const detail::neon_dispatcher::binary dispatcher = {
std::make_tuple(wrap::rotate_right_u8<N>, wrap::rotate_right_s8<N>, wrap::rotate_right_u16<N>, wrap::rotate_right_s16<N>,
wrap::rotate_right_u32<N>, wrap::rotate_right_s32<N>, wrap::rotate_right_u64<N>, wrap::rotate_right_s64<N>,
wrap::rotate_right_f32<N>)
std::make_tuple(wrap::rotate_left_u8<N>, wrap::rotate_left_s8<N>, wrap::rotate_left_u16<N>, wrap::rotate_left_s16<N>,
wrap::rotate_left_u32<N>, wrap::rotate_left_s32<N>, wrap::rotate_left_u64<N>, wrap::rotate_left_s64<N>,
wrap::rotate_left_f32<N>)
};
return dispatcher.apply(register_type(a), register_type(a));
}
Expand Down
4 changes: 2 additions & 2 deletions include/xsimd/arch/xsimd_neon64.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1265,10 +1265,10 @@ namespace xsimd
}

/****************
* rotate_right *
* rotate_left *
****************/
template <size_t N, class A>
XSIMD_INLINE batch<double, A> rotate_right(batch<double, A> const& a, requires_arch<neon64>) noexcept
XSIMD_INLINE batch<double, A> rotate_left(batch<double, A> const& a, requires_arch<neon64>) noexcept
{
return vextq_f64(a, a, N);
}
Expand Down
8 changes: 4 additions & 4 deletions include/xsimd/arch/xsimd_ssse3.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,16 +105,16 @@ namespace xsimd
}
}

// rotate_right
// rotate_left
template <size_t N, class A>
XSIMD_INLINE batch<uint16_t, A> rotate_right(batch<uint16_t, A> const& self, requires_arch<ssse3>) noexcept
XSIMD_INLINE batch<uint16_t, A> rotate_left(batch<uint16_t, A> const& self, requires_arch<ssse3>) noexcept
{
return _mm_alignr_epi8(self, self, N);
}
template <size_t N, class A>
XSIMD_INLINE batch<int16_t, A> rotate_right(batch<int16_t, A> const& self, requires_arch<ssse3>) noexcept
XSIMD_INLINE batch<int16_t, A> rotate_left(batch<int16_t, A> const& self, requires_arch<ssse3>) noexcept
{
return bitwise_cast<int16_t>(rotate_right<N, A>(bitwise_cast<uint16_t>(self), ssse3 {}));
return bitwise_cast<int16_t>(rotate_left<N, A>(bitwise_cast<uint16_t>(self), ssse3 {}));
}

// swizzle (dynamic mask)
Expand Down
4 changes: 2 additions & 2 deletions include/xsimd/arch/xsimd_sve.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -713,9 +713,9 @@ namespace xsimd
* Permutation *
***************/

// rotate_right
// rotate_left
template <size_t N, class A, class T, detail::sve_enable_all_t<T> = 0>
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& a, requires_arch<sve>) noexcept
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& a, requires_arch<sve>) noexcept
{
return svext(a, a, N);
}
Expand Down
12 changes: 6 additions & 6 deletions include/xsimd/types/xsimd_api.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1893,11 +1893,11 @@ namespace xsimd
/**
* @ingroup batch_data_transfer
*
* Slide the whole batch to the left by \c n bytes, and reintroduce the
* Slide the whole batch to the left by \c n elements, and reintroduce the
* slided out elements from the right. This is different from
* \c rol that rotates each batch element to the left.
* \c rotl that rotates each batch element to the left.
*
* @tparam N Amount of bytes to rotated to the left.
* @tparam N Amount of elements to rotate to the left.
* @param x batch of integer values.
* @return rotated batch.
*/
Expand All @@ -1911,11 +1911,11 @@ namespace xsimd
/**
* @ingroup batch_data_transfer
*
* Slide the whole batch to the right by \c n bytes, and reintroduce the
* Slide the whole batch to the right by \c n elements, and reintroduce the
* slided out elements from the left. This is different from
* \c rol that rotates each batch element to the left.
* \c rotr that rotates each batch element to the right.
*
* @tparam N Amount of bytes to rotate to the right.
* @tparam N Amount of elements to rotate to the right.
* @param x batch of integer values.
* @return rotated batch.
*/
Expand Down
4 changes: 2 additions & 2 deletions test/test_batch_manip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ namespace xsimd
exped_reverse[i] = lhs_in[N - 1 - i];
exped_fill[i] = lhs_in[N - 1];
exped_dup[i] = lhs_in[2 * (i / 2)];
exped_ror[i] = lhs_in[(i + 1) % N];
exped_rol[i] = lhs_in[(i - 1) % N];
exped_ror[i] = lhs_in[(i - 1) % N];
exped_rol[i] = lhs_in[(i + 1) % N];
}
vects.push_back(std::move(exped_reverse));
vects.push_back(std::move(exped_fill));
Expand Down

0 comments on commit 8bffdb4

Please sign in to comment.