mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.15.7
This commit is contained in:
@@ -27,13 +27,15 @@
|
||||
// All of the utilities here assume all data is in registers except
|
||||
// in rare cases where arguments are pointers.
|
||||
//
|
||||
// Some constants are generated using a memory overlay on the stack.
|
||||
//
|
||||
// Intrinsics automatically promote from REX to VEX when AVX is available
|
||||
// but ASM needs to be done manually.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
// Efficient and convenient moving bwtween GP & low bits of XMM.
|
||||
// Efficient and convenient moving between GP & low bits of XMM.
|
||||
// Use VEX when available to give access to xmm8-15 and zero extend for
|
||||
// larger vectors.
|
||||
|
||||
@@ -81,6 +83,23 @@ static inline uint32_t mm128_mov128_32( const __m128i a )
|
||||
return n;
|
||||
}
|
||||
|
||||
// Equivalent of set1, broadcast integer to all elements.
|
||||
#define m128_const_i128( i ) mm128_mov64_128( i )
|
||||
#define m128_const1_64( i ) _mm_shuffle_epi32( mm128_mov64_128( i ), 0x44 )
|
||||
#define m128_const1_32( i ) _mm_shuffle_epi32( mm128_mov32_128( i ), 0x00 )
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
// Assign 64 bit integers to respective elements: {hi, lo}
|
||||
#define m128_const_64( hi, lo ) \
|
||||
_mm_insert_epi64( mm128_mov64_128( lo ), hi, 1 )
|
||||
|
||||
#else // No insert in SSE2
|
||||
|
||||
#define m128_const_64 _mm_set_epi64x
|
||||
|
||||
#endif
|
||||
|
||||
// Pseudo constants
|
||||
|
||||
#define m128_zero _mm_setzero_si128()
|
||||
@@ -107,27 +126,53 @@ static inline __m128i mm128_neg1_fn()
|
||||
}
|
||||
#define m128_neg1 mm128_neg1_fn()
|
||||
|
||||
|
||||
// const functions work best when arguments are immediate constants or
|
||||
// are known to be in registers. If data needs to loaded from memory or cache
|
||||
// use set.
|
||||
|
||||
// Equivalent of set1, broadcast 64 bit integer to all elements.
|
||||
#define m128_const1_64( i ) _mm_shuffle_epi32( mm128_mov64_128( i ), 0x44 )
|
||||
#define m128_const1_32( i ) _mm_shuffle_epi32( mm128_mov32_128( i ), 0x00 )
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
// Assign 64 bit integers to respective elements: {hi, lo}
|
||||
#define m128_const_64( hi, lo ) \
|
||||
_mm_insert_epi64( mm128_mov64_128( lo ), hi, 1 )
|
||||
/////////////////////////////
|
||||
//
|
||||
// _mm_insert_ps( _mm128i v1, __m128i v2, imm8 c )
|
||||
//
|
||||
// Fast and powerful but very limited in its application.
|
||||
// It requires SSE4.1 but only works with 128 bit vectors with 32 bit
|
||||
// elements. There is no equivalent instruction for 256 bit or 512 bit vectors.
|
||||
// There's no integer version. There's no 64 bit, 16 bit or byte element
|
||||
// sizing. It's unique.
|
||||
//
|
||||
// It can:
|
||||
// - zero 32 bit elements of a 128 bit vector.
|
||||
// - extract any 32 bit element from one 128 bit vector and insert the
|
||||
// data to any 32 bit element of another 128 bit vector, or the same vector.
|
||||
// - do both simultaneoulsly.
|
||||
//
|
||||
// It can be used as a more efficient replacement for _mm_insert_epi32
|
||||
// or _mm_extract_epi32.
|
||||
//
|
||||
// Control byte definition:
|
||||
// c[3:0] zero mask
|
||||
// c[5:4] destination element selector
|
||||
// c[7:6] source element selector
|
||||
|
||||
#else // No insert in SSE2
|
||||
// Convert type and abbreviate name: e"x"tract "i"nsert "m"ask
|
||||
#define mm128_xim_32( v1, v2, c ) \
|
||||
_mm_castps_si128( _mm_insert_ps( _mm_castsi128_ps( v1 ), \
|
||||
_mm_castsi128_ps( v2 ), c ) )
|
||||
|
||||
#define m128_const_64 _mm_set_epi64x
|
||||
// Some examples of simple operations:
|
||||
|
||||
#endif
|
||||
// Insert 32 bit integer into v at element c and return modified v.
|
||||
static inline __m128i mm128_insert_32( const __m128i v, const uint32_t i,
|
||||
const int c )
|
||||
{ return mm128_xim_32( v, mm128_mov32_128( i ), c<<4 ); }
|
||||
|
||||
// Extract 32 bit element c from v and return as integer.
|
||||
static inline uint32_t mm128_extract_32( const __m128i v, const int c )
|
||||
{ return mm128_mov128_32( mm128_xim_32( v, v, c<<6 ) ); }
|
||||
|
||||
// Clear (zero) 32 bit elements based on bits set in 4 bit mask.
|
||||
static inline __m128i mm128_mask_32( const __m128i v, const int m )
|
||||
{ return mm128_xim_32( v, v, m ); }
|
||||
|
||||
#endif // SSE4_1
|
||||
|
||||
//
|
||||
// Basic operations without equivalent SIMD intrinsic
|
||||
@@ -140,11 +185,6 @@ static inline __m128i mm128_neg1_fn()
|
||||
#define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v )
|
||||
#define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v )
|
||||
|
||||
// Clear (zero) 32 bit elements based on bits set in 4 bit mask.
|
||||
// Fast, avoids using vector mask, but only available for 128 bit vectors.
|
||||
#define mm128_mask_32( a, mask ) \
|
||||
_mm_castps_si128( _mm_insert_ps( _mm_castsi128_ps( a ), \
|
||||
_mm_castsi128_ps( a ), mask ) )
|
||||
|
||||
// Add 4 values, fewer dependencies than sequential addition.
|
||||
#define mm128_add4_64( a, b, c, d ) \
|
||||
@@ -162,27 +202,6 @@ static inline __m128i mm128_neg1_fn()
|
||||
#define mm128_xor4( a, b, c, d ) \
|
||||
_mm_xor_si128( _mm_xor_si128( a, b ), _mm_xor_si128( c, d ) )
|
||||
|
||||
// Horizontal vector testing
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
#define mm128_allbits0( a ) _mm_testz_si128( a, a )
|
||||
#define mm128_allbits1( a ) _mm_testc_si128( a, m128_neg1 )
|
||||
// probably broken, avx2 is
|
||||
//#define mm128_allbitsne( a ) _mm_testnzc_si128( a, m128_neg1 )
|
||||
#define mm128_anybits0( a ) mm128_allbits1( a )
|
||||
#define mm128_anybits1( a ) mm128_allbits0( a )
|
||||
|
||||
#else // SSE2
|
||||
|
||||
// Bit-wise test of entire vector, useful to test results of cmp.
|
||||
#define mm128_anybits0( a ) (uint128_t)(a)
|
||||
#define mm128_anybits1( a ) (((uint128_t)(a))+1)
|
||||
|
||||
#define mm128_allbits0( a ) ( !mm128_anybits1(a) )
|
||||
#define mm128_allbits1( a ) ( !mm128_anybits0(a) )
|
||||
|
||||
#endif // SSE4.1 else SSE2
|
||||
|
||||
//
|
||||
// Vector pointer cast
|
||||
@@ -204,11 +223,6 @@ static inline __m128i mm128_neg1_fn()
|
||||
#define casto_m128i(p,o) (((__m128i*)(p))+(o))
|
||||
|
||||
|
||||
// Memory functions
|
||||
// Mostly for convenience, avoids calculating bytes.
|
||||
// Assumes data is alinged and integral.
|
||||
// n = number of __m128i, bytes/16
|
||||
|
||||
// Memory functions
|
||||
// Mostly for convenience, avoids calculating bytes.
|
||||
// Assumes data is alinged and integral.
|
||||
@@ -256,14 +270,14 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
#define mm128_ror_32 _mm_ror_epi32
|
||||
#define mm128_rol_32 _mm_rol_epi32
|
||||
|
||||
#else
|
||||
#else // SSE2
|
||||
|
||||
#define mm128_ror_64 mm128_ror_var_64
|
||||
#define mm128_rol_64 mm128_rol_var_64
|
||||
#define mm128_ror_32 mm128_ror_var_32
|
||||
#define mm128_rol_32 mm128_rol_var_32
|
||||
|
||||
#endif // AVX512 else
|
||||
#endif // AVX512 else SSE2
|
||||
|
||||
#define mm128_ror_16( v, c ) \
|
||||
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )
|
||||
@@ -280,58 +294,19 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
//#define mm128_swap_64( v ) _mm_alignr_epi8( v, v, 8 )
|
||||
//#define mm128_ror_1x32( v ) _mm_alignr_epi8( v, v, 4 )
|
||||
//#define mm128_rol_1x32( v ) _mm_alignr_epi8( v, v, 12 )
|
||||
#define mm128_ror_1x16( v ) _mm_alignr_epi8( v, v, 2 )
|
||||
#define mm128_rol_1x16( v ) _mm_alignr_epi8( v, v, 14 )
|
||||
#define mm128_ror_1x8( v ) _mm_alignr_epi8( v, v, 1 )
|
||||
#define mm128_rol_1x8( v ) _mm_alignr_epi8( v, v, 15 )
|
||||
|
||||
// Rotate by c bytes
|
||||
#define mm128_ror_x8( v, c ) _mm_alignr_epi8( v, c )
|
||||
#define mm128_rol_x8( v, c ) _mm_alignr_epi8( v, 16-(c) )
|
||||
|
||||
|
||||
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||
#define mm128_invert_32( v ) _mm_shuffle_epi32( v, 0x1b )
|
||||
// Swap 32 bit elements in 64 bit lanes
|
||||
#define mm128_swap64_32( v ) _mm_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
#define mm128_invert_16( v ) \
|
||||
_mm_shuffle_epi8( v, mm128_const_64( 0x0100030205040706, \
|
||||
0x09080b0a0d0c0f0e )
|
||||
#define mm128_invert_8( v ) \
|
||||
_mm_shuffle_epi8( v, mm128_const_64( 0x0001020304050607, \
|
||||
0x08090a0b0c0d0e0f )
|
||||
|
||||
#endif // SSSE3
|
||||
|
||||
|
||||
//
|
||||
// Rotate elements within lanes.
|
||||
|
||||
#define mm128_swap64_32( v ) _mm_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
#define mm128_rol64_8( v, c ) \
|
||||
_mm_or_si128( _mm_slli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm128_ror64_8( v, c ) \
|
||||
_mm_or_si128( _mm_srli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm128_rol32_8( v, c ) \
|
||||
_mm_or_si128( _mm_slli_epi32( v, ( ( (c)<<3 ) ), \
|
||||
_mm_srli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm128_ror32_8( v, c ) \
|
||||
_mm_or_si128( _mm_srli_epi32( v, ( ( (c)<<3 ) ), \
|
||||
_mm_slli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) )
|
||||
|
||||
// Rotate right by c bytes
|
||||
static inline __m128i mm128_ror_x8( const __m128i v, const int c )
|
||||
{ return _mm_alignr_epi8( v, v, c ); }
|
||||
|
||||
//
|
||||
// Endian byte swap.
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
#define mm128_bswap_64( v ) \
|
||||
_mm_shuffle_epi8( v, m128_const_64( 0x08090a0b0c0d0e0f, \
|
||||
0x0001020304050607 ) )
|
||||
@@ -374,7 +349,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
|
||||
#else // SSE2
|
||||
|
||||
// Use inline function instead of macro due to multiple statements.
|
||||
static inline __m128i mm128_bswap_64( __m128i v )
|
||||
{
|
||||
v = _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
|
||||
|
||||
@@ -15,33 +15,35 @@
|
||||
// is available.
|
||||
|
||||
// Move integer to low element of vector, other elements are set to zero.
|
||||
#define mm256_mov64_256( i ) _mm256_castsi128_si256( mm128_mov64_128( i ) )
|
||||
#define mm256_mov32_256( i ) _mm256_castsi128_si256( mm128_mov32_128( i ) )
|
||||
|
||||
#define mm256_mov64_256( n ) _mm256_castsi128_si256( mm128_mov64_128( n ) )
|
||||
#define mm256_mov32_256( n ) _mm256_castsi128_si256( mm128_mov32_128( n ) )
|
||||
|
||||
#define mm256_mov256_64( a ) mm128_mov128_64( _mm256_castsi256_si128( a ) )
|
||||
#define mm256_mov256_32( a ) mm128_mov128_32( _mm256_castsi256_si128( a ) )
|
||||
// Mo0ve low element of vector to integer.
|
||||
#define mm256_mov256_64( v ) mm128_mov128_64( _mm256_castsi256_si128( v ) )
|
||||
#define mm256_mov256_32( v ) mm128_mov128_32( _mm256_castsi256_si128( v ) )
|
||||
|
||||
// concatenate two 128 bit vectors into one 256 bit vector: { hi, lo }
|
||||
#define mm256_concat_128( hi, lo ) \
|
||||
_mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 )
|
||||
|
||||
|
||||
// Equavalent of set, move 64 bit integer constants to respective 64 bit
|
||||
// Equivalent of set, move 64 bit integer constants to respective 64 bit
|
||||
// elements.
|
||||
static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2,
|
||||
const uint64_t i1, const uint64_t i0 )
|
||||
{
|
||||
__m128i hi, lo;
|
||||
lo = mm128_mov64_128( i0 );
|
||||
hi = mm128_mov64_128( i2 );
|
||||
lo = _mm_insert_epi64( lo, i1, 1 );
|
||||
hi = _mm_insert_epi64( hi, i3, 1 );
|
||||
return mm256_concat_128( hi, lo );
|
||||
union { __m256i m256i;
|
||||
uint64_t u64[4]; } v;
|
||||
v.u64[0] = i0; v.u64[1] = i1; v.u64[2] = i2; v.u64[3] = i3;
|
||||
return v.m256i;
|
||||
}
|
||||
|
||||
// Equivalent of set1, broadcast integer constant to all elements.
|
||||
#define m256_const1_128( v ) _mm256_broadcastsi128_si256( v )
|
||||
// Equivalent of set1.
|
||||
// 128 bit vector argument
|
||||
#define m256_const1_128( v ) \
|
||||
_mm256_permute4x64_epi64( _mm256_castsi128_si256( v ), 0x44 )
|
||||
// 64 bit integer argument
|
||||
#define m256_const1_i128( i ) m256_const1_128( mm128_mov64_128( i ) )
|
||||
#define m256_const1_64( i ) _mm256_broadcastq_epi64( mm128_mov64_128( i ) )
|
||||
#define m256_const1_32( i ) _mm256_broadcastd_epi32( mm128_mov32_128( i ) )
|
||||
#define m256_const1_16( i ) _mm256_broadcastw_epi16( mm128_mov32_128( i ) )
|
||||
@@ -50,119 +52,29 @@ static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2,
|
||||
#define m256_const2_64( i1, i0 ) \
|
||||
m256_const1_128( m128_const_64( i1, i0 ) )
|
||||
|
||||
#define m126_const2_32( i1, i0 ) \
|
||||
m256_const1_64( ( (uint64_t)(i1) << 32 ) | ( (uint64_t)(i0) & 0xffffffff ) )
|
||||
|
||||
|
||||
//
|
||||
// All SIMD constant macros are actually functions containing executable
|
||||
// code and therefore can't be used as compile time initializers.
|
||||
|
||||
#define m256_zero _mm256_setzero_si256()
|
||||
#define m256_one_256 mm256_mov64_256( 1 )
|
||||
#define m256_one_128 \
|
||||
_mm256_permute4x64_epi64( _mm256_castsi128_si256( \
|
||||
mm128_mov64_128( 1 ) ), 0x44 )
|
||||
#define m256_one_64 _mm256_broadcastq_epi64( mm128_mov64_128( 1 ) )
|
||||
#define m256_one_32 _mm256_broadcastd_epi32( mm128_mov64_128( 1 ) )
|
||||
#define m256_one_16 _mm256_broadcastw_epi16( mm128_mov64_128( 1 ) )
|
||||
#define m256_one_8 _mm256_broadcastb_epi8 ( mm128_mov64_128( 1 ) )
|
||||
#define m256_zero _mm256_setzero_si256()
|
||||
#define m256_one_256 mm256_mov64_256( 1 )
|
||||
#define m256_one_128 m256_const1_i128( 1 )
|
||||
#define m256_one_64 _mm256_broadcastq_epi64( mm128_mov64_128( 1 ) )
|
||||
#define m256_one_32 _mm256_broadcastd_epi32( mm128_mov64_128( 1 ) )
|
||||
#define m256_one_16 _mm256_broadcastw_epi16( mm128_mov64_128( 1 ) )
|
||||
#define m256_one_8 _mm256_broadcastb_epi8 ( mm128_mov64_128( 1 ) )
|
||||
|
||||
static inline __m256i mm256_neg1_fn()
|
||||
{
|
||||
__m256i a;
|
||||
asm( "vpcmpeqq %0, %0, %0\n\t" : "=x"(a) );
|
||||
return a;
|
||||
__m256i v;
|
||||
asm( "vpcmpeqq %0, %0, %0\n\t" : "=x"(v) );
|
||||
return v;
|
||||
}
|
||||
#define m256_neg1 mm256_neg1_fn()
|
||||
|
||||
|
||||
//
|
||||
// Vector size conversion.
|
||||
//
|
||||
// Allows operations on either or both halves of a 256 bit vector serially.
|
||||
// Handy for parallel AES.
|
||||
// Caveats when writing:
|
||||
// _mm256_castsi256_si128 is free and without side effects.
|
||||
// _mm256_castsi128_si256 is also free but leaves the high half
|
||||
// undefined. That's ok if the hi half will be subseqnently assigned.
|
||||
// If assigning both, do lo first, If assigning only 1, use
|
||||
// _mm256_inserti128_si256.
|
||||
//
|
||||
#define mm128_extr_lo128_256( a ) _mm256_castsi256_si128( a )
|
||||
#define mm128_extr_hi128_256( a ) _mm256_extracti128_si256( a, 1 )
|
||||
|
||||
// Extract integers from 256 bit vector, ineficient, avoid if possible..
|
||||
#define mm256_extr_4x64( a3, a2, a1, a0, src ) \
|
||||
do { \
|
||||
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
|
||||
a0 = mm128_mov128_64( _mm256_castsi256_si128( src) ); \
|
||||
a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
|
||||
a2 = mm128_mov128_64( hi ); \
|
||||
a3 = _mm_extract_epi64( hi, 1 ); \
|
||||
} while(0)
|
||||
|
||||
#define mm256_extr_8x32( a7, a6, a5, a4, a3, a2, a1, a0, src ) \
|
||||
do { \
|
||||
uint64_t t = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
|
||||
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
|
||||
a0 = mm256_mov256_32( src ); \
|
||||
a1 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 1 ); \
|
||||
a2 = (uint32_t)( t ); \
|
||||
a3 = (uint32_t)( t<<32 ); \
|
||||
t = _mm_extract_epi64( hi, 1 ); \
|
||||
a4 = mm128_mov128_32( hi ); \
|
||||
a5 = _mm_extract_epi32( hi, 1 ); \
|
||||
a6 = (uint32_t)( t ); \
|
||||
a7 = (uint32_t)( t<<32 ); \
|
||||
} while(0)
|
||||
|
||||
|
||||
// Bytewise test of all 256 bits
|
||||
#define mm256_all0_8( a ) \
|
||||
( _mm256_movemask_epi8( a ) == 0 )
|
||||
|
||||
#define mm256_all1_8( a ) \
|
||||
( _mm256_movemask_epi8( a ) == -1 )
|
||||
|
||||
|
||||
#define mm256_anybits0( a ) \
|
||||
( _mm256_movemask_epi8( a ) & 0xffffffff )
|
||||
|
||||
#define mm256_anybits1( a ) \
|
||||
( ( _mm256_movemask_epi8( a ) & 0xffffffff ) != 0xffffffff )
|
||||
|
||||
|
||||
// Bitwise test of all 256 bits
|
||||
#define mm256_allbits0( a ) _mm256_testc_si256( a, m256_neg1 )
|
||||
#define mm256_allbits1( a ) _mm256_testc_si256( m256_zero, a )
|
||||
//#define mm256_anybits0( a ) !mm256_allbits1( a )
|
||||
//#define mm256_anybits1( a ) !mm256_allbits0( a )
|
||||
|
||||
|
||||
// Parallel AES, for when x is expected to be in a 256 bit register.
|
||||
// Use same 128 bit key.
|
||||
|
||||
#if defined(__VAES__)
|
||||
|
||||
#define mm256_aesenc_2x128( x, k ) \
|
||||
_mm256_aesenc_epi128( x, k )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_aesenc_2x128( x, k ) \
|
||||
mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \
|
||||
_mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) )
|
||||
|
||||
#endif
|
||||
|
||||
#define mm256_paesenc_2x128( y, x, k ) do \
|
||||
{ \
|
||||
__m128i *X = (__m128i*)x; \
|
||||
__m128i *Y = (__m128i*)y; \
|
||||
Y[0] = _mm_aesenc_si128( X[0], k ); \
|
||||
Y[1] = _mm_aesenc_si128( X[1], k ); \
|
||||
} while(0);
|
||||
// Consistent naming for similar operations.
|
||||
#define mm128_extr_lo128_256( v ) _mm256_castsi256_si128( v )
|
||||
#define mm128_extr_hi128_256( v ) _mm256_extracti128_si256( v, 1 )
|
||||
|
||||
//
|
||||
// Pointer casting
|
||||
@@ -201,13 +113,13 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
//
|
||||
// Basic operations without SIMD equivalent
|
||||
|
||||
// Bitwise not ( ~x )
|
||||
#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \
|
||||
// Bitwise not ( ~v )
|
||||
#define mm256_not( v ) _mm256_xor_si256( v, m256_neg1 ) \
|
||||
|
||||
// Unary negation of each element ( -a )
|
||||
#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a )
|
||||
#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a )
|
||||
#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a )
|
||||
// Unary negation of each element ( -v )
|
||||
#define mm256_negate_64( v ) _mm256_sub_epi64( m256_zero, v )
|
||||
#define mm256_negate_32( v ) _mm256_sub_epi32( m256_zero, v )
|
||||
#define mm256_negate_16( v ) _mm256_sub_epi16( m256_zero, v )
|
||||
|
||||
|
||||
// Add 4 values, fewer dependencies than sequential addition.
|
||||
@@ -265,17 +177,14 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
#define mm256_ror_32 _mm256_ror_epi32
|
||||
#define mm256_rol_32 _mm256_rol_epi32
|
||||
|
||||
#else
|
||||
|
||||
|
||||
// No AVX512, use fallback.
|
||||
#else // AVX2
|
||||
|
||||
#define mm256_ror_64 mm256_ror_var_64
|
||||
#define mm256_rol_64 mm256_rol_var_64
|
||||
#define mm256_ror_32 mm256_ror_var_32
|
||||
#define mm256_rol_32 mm256_rol_var_32
|
||||
|
||||
#endif // AVX512 else
|
||||
#endif // AVX512 else AVX2
|
||||
|
||||
#define mm256_ror_16( v, c ) \
|
||||
_mm256_or_si256( _mm256_srli_epi16( v, c ), \
|
||||
@@ -285,46 +194,6 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
_mm256_or_si256( _mm256_slli_epi16( v, c ), \
|
||||
_mm256_srli_epi16( v, 16-(c) ) )
|
||||
|
||||
// Rotate bits in each element of v by the amount in corresponding element of
|
||||
// index vector c
|
||||
#define mm256_rorv_64( v, c ) \
|
||||
_mm256_or_si256( \
|
||||
_mm256_srlv_epi64( v, c ), \
|
||||
_mm256_sllv_epi64( v, _mm256_sub_epi64( \
|
||||
_mm256_set1_epi64x( 64 ), c ) ) )
|
||||
|
||||
#define mm256_rolv_64( v, c ) \
|
||||
_mm256_or_si256( \
|
||||
_mm256_sllv_epi64( v, c ), \
|
||||
_mm256_srlv_epi64( v, _mm256_sub_epi64( \
|
||||
_mm256_set1_epi64x( 64 ), c ) ) )
|
||||
|
||||
#define mm256_rorv_32( v, c ) \
|
||||
_mm256_or_si256( \
|
||||
_mm256_srlv_epi32( v, c ), \
|
||||
_mm256_sllv_epi32( v, _mm256_sub_epi32( \
|
||||
_mm256_set1_epi32( 32 ), c ) ) )
|
||||
|
||||
#define mm256_rolv_32( v, c ) \
|
||||
_mm256_or_si256( \
|
||||
_mm256_sllv_epi32( v, c ), \
|
||||
_mm256_srlv_epi32( v, _mm256_sub_epi32( \
|
||||
_mm256_set1_epi32( 32 ), c ) ) )
|
||||
|
||||
// AVX512 can do 16 bit elements.
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define mm256_rorv_16( v, c ) \
|
||||
_mm256_or_si256( \
|
||||
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
|
||||
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
|
||||
|
||||
#define mm256_rolv_16( v, c ) \
|
||||
_mm256_or_si256( \
|
||||
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
|
||||
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
//
|
||||
// Rotate elements accross all lanes.
|
||||
@@ -336,13 +205,26 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define mm256_swap_128( v ) _mm256_alignr_epi64( v, v, 2 )
|
||||
#define mm256_ror_1x64( v ) _mm256_alignr_epi64( v, v, 1 )
|
||||
#define mm256_rol_1x64( v ) _mm256_alignr_epi64( v, v, 3 )
|
||||
#define mm256_ror_1x32( v ) _mm256_alignr_epi32( v, v, 1 )
|
||||
#define mm256_rol_1x32( v ) _mm256_alignr_epi32( v, v, 7 )
|
||||
#define mm256_ror_3x32( v ) _mm256_alignr_epi32( v, v, 3 )
|
||||
#define mm256_rol_3x32( v ) _mm256_alignr_epi32( v, v, 5 )
|
||||
static inline __m256i mm256_swap_128( const __m256i v )
|
||||
{ return _mm256_alignr_epi64( v, v, 2 ); }
|
||||
|
||||
static inline __m256i mm256_ror_1x64( const __m256i v )
|
||||
{ return _mm256_alignr_epi64( v, v, 1 ); }
|
||||
|
||||
static inline __m256i mm256_rol_1x64( const __m256i v )
|
||||
{ return _mm256_alignr_epi64( v, v, 3 ); }
|
||||
|
||||
static inline __m256i mm256_ror_1x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 1 ); }
|
||||
|
||||
static inline __m256i mm256_rol_1x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 7 ); }
|
||||
|
||||
static inline __m256i mm256_ror_3x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 3 ); }
|
||||
|
||||
static inline __m256i mm256_rol_3x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 5 ); }
|
||||
|
||||
#else // AVX2
|
||||
|
||||
@@ -377,131 +259,18 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
|
||||
#endif // AVX512 else AVX2
|
||||
|
||||
|
||||
// AVX512 can do 16 & 8 bit elements.
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
// Rotate 256 bit vector by one 16 bit element.
|
||||
#define mm256_ror_1x16( v ) \
|
||||
_mm256_permutexvar_epi16( m256_const_64( \
|
||||
0x0000000f000e000d, 0x000c000b000a0009, \
|
||||
0x0008000700060005, 0x0004000300020001 ), v )
|
||||
|
||||
#define mm256_rol_1x16( v ) \
|
||||
_mm256_permutexvar_epi16( m256_const_64( \
|
||||
0x000e000d000c000b, 0x000a000900080007, \
|
||||
0x0006000500040003, 0x000200010000000f ), v )
|
||||
|
||||
#if defined (__AVX512VBMI__)
|
||||
|
||||
// Rotate 256 bit vector by one byte.
|
||||
#define mm256_ror_1x8( v ) _mm256_permutexvar_epi8( m256_const_64( \
|
||||
0x001f1e1d1c1b1a19, 0x1817161514131211, \
|
||||
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
|
||||
|
||||
#define mm256_rol_1x8( v ) _mm256_permutexvar_epi16( m256_const_64( \
|
||||
0x1e1d1c1b1a191817, 0x161514131211100f, \
|
||||
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
|
||||
|
||||
#endif // VBMI
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
|
||||
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||
|
||||
#define mm256_invert_64 ( v ) _mm256_permute4x64_epi64( v, 0x1b )
|
||||
|
||||
#define mm256_invert_32 ( v ) _mm256_permutevar8x32_epi32( v, \
|
||||
m256_const_64( 0x0000000000000001, 0x0000000200000003 \
|
||||
0x0000000400000005, 0x0000000600000007 )
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
|
||||
#define mm256_invert_16 ( v ) \
|
||||
_mm256_permutexvar_epi16( m256_const_64( \
|
||||
0x0000000100020003, 0x0004000500060007, \
|
||||
0x00080009000a000b, 0x000c000d000e000f ), v )
|
||||
|
||||
#if defined(__AVX512VBMI__)
|
||||
|
||||
#define mm256_invert_8( v ) \
|
||||
_mm256_permutexvar_epi8( m256_const_64( \
|
||||
0x0001020304050607, 0x08090a0b0c0d0e0f, \
|
||||
0x1011121314151617, 0x18191a1b1c1d1e1f ), v )
|
||||
#endif // VBMI
|
||||
#endif // AVX512
|
||||
|
||||
|
||||
//
|
||||
// Rotate elements within each 128 bit lane of 256 bit vector.
|
||||
|
||||
#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e )
|
||||
#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e )
|
||||
#define mm256_ror128_32( v ) _mm256_shuffle_epi32( v, 0x39 )
|
||||
#define mm256_rol128_32( v ) _mm256_shuffle_epi32( v, 0x93 )
|
||||
|
||||
#define mm256_ror128_32( v ) _mm256_shuffle_epi32( v, 0x39 )
|
||||
|
||||
#define mm256_rol128_32( v ) _mm256_shuffle_epi32( v, 0x93 )
|
||||
|
||||
#define mm256_ror128_x8( v, c ) _mm256_alignr_epi8( v, v, c )
|
||||
|
||||
/*
|
||||
// Rotate each 128 bit lane by c elements.
|
||||
#define mm256_ror128_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_bsrli_epi128( v, c ), \
|
||||
_mm256_bslli_epi128( v, 16-(c) ) )
|
||||
#define mm256_rol128_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_bslli_epi128( v, c ), \
|
||||
_mm256_bsrli_epi128( v, 16-(c) ) )
|
||||
*/
|
||||
|
||||
// Rotate elements in each 64 bit lane
|
||||
|
||||
#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define mm256_rol64_8( v, c ) _mm256_rol_epi64( v, ((c)<<3) )
|
||||
#define mm256_ror64_8( v, c ) _mm256_ror_epi64( v, ((c)<<3) )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_rol64_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_slli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm256_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm256_ror64_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_srli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm256_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Rotate elements in each 32 bit lane
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define mm256_swap32_16( v ) _mm256_rol_epi32( v, 16 )
|
||||
|
||||
#define mm256_rol32_8( v ) _mm256_rol_epi32( v, 8 )
|
||||
#define mm256_ror32_8( v ) _mm256_ror_epi32( v, 8 )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_swap32_16( v ) \
|
||||
_mm256_or_si256( _mm256_slli_epi32( v, 16 ), \
|
||||
_mm256_srli_epi32( v, 16 ) )
|
||||
|
||||
#define mm256_rol32_8( v ) \
|
||||
_mm256_or_si256( _mm256_slli_epi32( v, 8 ), \
|
||||
_mm256_srli_epi32( v, 8 ) )
|
||||
|
||||
#define mm256_ror32_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_srli_epi32( v, 8 ), \
|
||||
_mm256_slli_epi32( v, 8 ) )
|
||||
|
||||
#endif
|
||||
static inline __m256i mm256_ror128_x8( const __m256i v, const int c )
|
||||
{ return _mm256_alignr_epi8( v, v, c ); }
|
||||
|
||||
// Swap 32 bit elements in each 64 bit lane.
|
||||
#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
//
|
||||
// Swap bytes in vector elements, endian bswap.
|
||||
|
||||
@@ -26,9 +26,6 @@
|
||||
// _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute
|
||||
// usually shuffles accross all lanes.
|
||||
//
|
||||
// Some instructions like cmp and blend use a mask regsiter now instead
|
||||
// a mask vector.
|
||||
//
|
||||
// permutexvar has args reversed, index is first arg. Previously all
|
||||
// permutes and shuffles have the index last.
|
||||
//
|
||||
@@ -85,52 +82,43 @@
|
||||
#define mm512_mov256_64( a ) mm128_mov128_64( _mm256_castsi512_si128( a ) )
|
||||
#define mm512_mov256_32( a ) mm128_mov128_32( _mm256_castsi512_si128( a ) )
|
||||
|
||||
|
||||
// Insert and extract integers is a multistage operation.
|
||||
// Insert integer into __m128i, then insert __m128i to __m256i, finally
|
||||
// insert __256i into __m512i. Reverse the order for extract.
|
||||
// Do not use __m512_insert_epi64 or _mm256_insert_epi64 to perform multiple
|
||||
// inserts.
|
||||
// Avoid small integers for multiple inserts.
|
||||
// Shortcuts:
|
||||
// Use castsi to reference the low bits of a vector or sub-vector. (free)
|
||||
// Use mov to insert integer into low bits of vector or sub-vector. (cheap)
|
||||
// Use _mm_insert only to reference the high bits of __m128i. (expensive)
|
||||
// Sequence instructions to minimize data dependencies.
|
||||
// Use const or const1 only when integer is either immediate or known to be in
|
||||
// a GP register. Use set/set1 when data needs to be loaded from memory or
|
||||
// cache.
|
||||
// A simple 128 bit permute, using function instead of macro avoids
|
||||
// problems if the v arg passed as an expression.
|
||||
static inline __m512i mm512_perm_128( const __m512i v, const int c )
|
||||
{ return _mm512_shuffle_i64x2( v, v, c ); }
|
||||
|
||||
// Concatenate two 256 bit vectors into one 512 bit vector {hi, lo}
|
||||
#define mm512_concat_256( hi, lo ) \
|
||||
_mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 )
|
||||
|
||||
// Equivalent of set, assign 64 bit integers to respective 64 bit elements.
|
||||
// Use stack memory overlay
|
||||
static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
|
||||
const uint64_t i5, const uint64_t i4,
|
||||
const uint64_t i3, const uint64_t i2,
|
||||
const uint64_t i1, const uint64_t i0 )
|
||||
{
|
||||
__m256i hi, lo;
|
||||
__m128i hi1, lo1;
|
||||
lo = mm256_mov64_256( i0 );
|
||||
lo1 = mm128_mov64_128( i2 );
|
||||
hi = mm256_mov64_256( i4 );
|
||||
hi1 = mm128_mov64_128( i6 );
|
||||
lo = _mm256_castsi128_si256(
|
||||
_mm_insert_epi64( _mm256_castsi256_si128( lo ), i1, 1 ) );
|
||||
lo1 = _mm_insert_epi64( lo1, i3, 1 );
|
||||
hi = _mm256_castsi128_si256(
|
||||
_mm_insert_epi64( _mm256_castsi256_si128( hi ), i5, 1 ) );
|
||||
hi1 = _mm_insert_epi64( hi1, i7, 1 );
|
||||
lo = _mm256_inserti128_si256( lo, lo1, 1 );
|
||||
hi = _mm256_inserti128_si256( hi, hi1, 1 );
|
||||
return mm512_concat_256( hi, lo );
|
||||
union { __m512i m512i;
|
||||
uint64_t u64[8]; } v;
|
||||
v.u64[0] = i0; v.u64[1] = i1;
|
||||
v.u64[2] = i2; v.u64[3] = i3;
|
||||
v.u64[4] = i4; v.u64[5] = i5;
|
||||
v.u64[6] = i6; v.u64[7] = i7;
|
||||
return v.m512i;
|
||||
}
|
||||
|
||||
// Equivalent of set1, broadcast 64 bit constant to all 64 bit elements.
|
||||
#define m512_const1_256( v ) _mm512_broadcast_i64x4( v )
|
||||
#define m512_const1_128( v ) _mm512_broadcast_i64x2( v )
|
||||
// Equivalent of set1, broadcast lo element all elements.
|
||||
static inline __m512i m512_const1_256( const __m256i v )
|
||||
{ return _mm512_inserti64x4( _mm512_castsi256_si512( v ), v, 1 ); }
|
||||
|
||||
#define m512_const1_128( v ) \
|
||||
mm512_perm_128( _mm512_castsi128_si512( v ), 0 )
|
||||
// Integer input argument up to 64 bits
|
||||
#define m512_const1_i128( i ) \
|
||||
mm512_perm_128( _mm512_castsi128_si512( mm128_mov64_128( i ) ), 0 )
|
||||
|
||||
//#define m512_const1_256( v ) _mm512_broadcast_i64x4( v )
|
||||
//#define m512_const1_128( v ) _mm512_broadcast_i64x2( v )
|
||||
#define m512_const1_64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) )
|
||||
#define m512_const1_32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) )
|
||||
#define m512_const1_16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) )
|
||||
@@ -142,23 +130,17 @@ static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
|
||||
#define m512_const2_64( i1, i0 ) \
|
||||
m512_const1_128( m128_const_64( i1, i0 ) )
|
||||
|
||||
#define m512_const2_32( i1, i0 ) \
|
||||
m512_const1_64( ( (uint64_t)(i1) << 32 ) | ( (uint64_t)(i0) & 0xffffffff ) )
|
||||
|
||||
// { m128_1, m128_1, m128_0, m128_0 }
|
||||
#define m512_const_2x128( v1, v0 ) \
|
||||
m512_mask_blend_epi64( 0x0f, m512_const1_128( v1 ), m512_const1_128( v0 ) )
|
||||
|
||||
static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
|
||||
const uint64_t i1, const uint64_t i0 )
|
||||
{
|
||||
__m256i lo = mm256_mov64_256( i0 );
|
||||
__m128i hi = mm128_mov64_128( i2 );
|
||||
lo = _mm256_castsi128_si256(
|
||||
_mm_insert_epi64( _mm256_castsi256_si128(
|
||||
lo ), i1, 1 ) );
|
||||
hi = _mm_insert_epi64( hi, i3, 1 );
|
||||
return _mm512_broadcast_i64x4( _mm256_inserti128_si256( lo, hi, 1 ) );
|
||||
union { __m512i m512i;
|
||||
uint64_t u64[8]; } v;
|
||||
v.u64[0] = v.u64[4] = i0;
|
||||
v.u64[1] = v.u64[5] = i1;
|
||||
v.u64[2] = v.u64[6] = i2;
|
||||
v.u64[3] = v.u64[7] = i3;
|
||||
return v.m512i;
|
||||
}
|
||||
|
||||
//
|
||||
@@ -170,14 +152,15 @@ static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
|
||||
|
||||
#define m512_zero _mm512_setzero_si512()
|
||||
#define m512_one_512 mm512_mov64_512( 1 )
|
||||
#define m512_one_256 _mm512_broadcast_i64x4 ( mm256_mov64_256( 1 ) )
|
||||
#define m512_one_128 _mm512_broadcast_i64x2 ( mm128_mov64_128( 1 ) )
|
||||
#define m512_one_64 _mm512_broadcastq_epi64( mm128_mov64_128( 1 ) )
|
||||
#define m512_one_32 _mm512_broadcastd_epi32( mm128_mov64_128( 1 ) )
|
||||
#define m512_one_16 _mm512_broadcastw_epi16( mm128_mov64_128( 1 ) )
|
||||
#define m512_one_8 _mm512_broadcastb_epi8 ( mm128_mov64_128( 1 ) )
|
||||
#define m512_one_256 _mm512_inserti64x4( m512_one_512, m256_one_256, 1 )
|
||||
#define m512_one_128 m512_const1_i128( 1 )
|
||||
#define m512_one_64 m512_const1_64( 1 )
|
||||
#define m512_one_32 m512_const1_32( 1 )
|
||||
#define m512_one_16 m512_const1_16( 1 )
|
||||
#define m512_one_8 m512_const1_8( 1 )
|
||||
|
||||
#define m512_neg1 m512_const1_64( 0xffffffffffffffff )
|
||||
//#define m512_neg1 m512_const1_64( 0xffffffffffffffff )
|
||||
#define m512_neg1 _mm512_movm_epi64( 0xff )
|
||||
|
||||
//
|
||||
// Basic operations without SIMD equivalent
|
||||
@@ -242,15 +225,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
_mm512_xor_si512( _mm512_xor_si512( a, b ), _mm512_xor_si512( c, d ) )
|
||||
|
||||
|
||||
|
||||
// Horizontal vector testing
|
||||
// Returns bit __mmask8
|
||||
#define mm512_allbits0( a ) _mm512_cmpeq_epi64_mask( a, m512_zero )
|
||||
#define mm512_allbits1( a ) _mm512_cmpeq_epi64_mask( a, m512_neg1 )
|
||||
#define mm512_anybits0( a ) _mm512_cmpneq_epi64_mask( a, m512_neg1 )
|
||||
#define mm512_anybits1( a ) _mm512_cmpneq_epi64_mask( a, m512_zero )
|
||||
|
||||
|
||||
//
|
||||
// Bit rotations.
|
||||
|
||||
@@ -262,37 +236,47 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
// _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32
|
||||
//
|
||||
|
||||
// For convenience and consistency with AVX2
|
||||
#define mm512_ror_64 _mm512_ror_epi64
|
||||
#define mm512_rol_64 _mm512_rol_epi64
|
||||
#define mm512_ror_32 _mm512_ror_epi32
|
||||
#define mm512_rol_32 _mm512_rol_epi32
|
||||
|
||||
#define mm512_ror_var_64( v, c ) \
|
||||
_mm512_or_si512( _mm512_srli_epi64( v, c ), \
|
||||
_mm512_slli_epi64( v, 64-(c) ) )
|
||||
static inline __m512i mm512_ror_var_64( const __m512i v, const int c )
|
||||
{
|
||||
return _mm512_or_si512( _mm512_srli_epi64( v, c ),
|
||||
_mm512_slli_epi64( v, 64-c ) );
|
||||
}
|
||||
|
||||
#define mm512_rol_var_64( v, c ) \
|
||||
_mm512_or_si512( _mm512_slli_epi64( v, c ), \
|
||||
_mm512_srli_epi64( v, 64-(c) ) )
|
||||
static inline __m512i mm512_rol_var_64( const __m512i v, const int c )
|
||||
{
|
||||
return _mm512_or_si512( _mm512_slli_epi64( v, c ),
|
||||
_mm512_srli_epi64( v, 64-c ) );
|
||||
}
|
||||
|
||||
#define mm512_ror_var_32( v, c ) \
|
||||
_mm512_or_si512( _mm512_srli_epi32( v, c ), \
|
||||
_mm512_slli_epi32( v, 32-(c) ) )
|
||||
static inline __m512i mm512_ror_var_32( const __m512i v, const int c )
|
||||
{
|
||||
return _mm512_or_si512( _mm512_srli_epi32( v, c ),
|
||||
_mm512_slli_epi32( v, 32-c ) );
|
||||
}
|
||||
|
||||
#define mm512_rol_var_32( v, c ) \
|
||||
_mm512_or_si512( _mm512_slli_epi32( v, c ), \
|
||||
_mm512_srli_epi32( v, 32-(c) ) )
|
||||
|
||||
|
||||
// Here is a fixed bit rotate for 16 bit elements:
|
||||
#define mm512_ror_16( v, c ) \
|
||||
_mm512_or_si512( _mm512_srli_epi16( v, c ), \
|
||||
_mm512_slli_epi16( v, 16-(c) )
|
||||
#define mm512_rol_16( v, c ) \
|
||||
_mm512_or_si512( _mm512_slli_epi16( v, c ), \
|
||||
_mm512_srli_epi16( v, 16-(c) )
|
||||
static inline __m512i mm512_rol_var_32( const __m512i v, const int c )
|
||||
{
|
||||
return _mm512_or_si512( _mm512_slli_epi32( v, c ),
|
||||
_mm512_srli_epi32( v, 32-c ) );
|
||||
}
|
||||
|
||||
static inline __m512i mm512_ror_16( __m512i const v, const int c )
|
||||
{
|
||||
return _mm512_or_si512( _mm512_srli_epi16( v, c ),
|
||||
_mm512_slli_epi16( v, 16-c ) );
|
||||
}
|
||||
|
||||
static inline __m512i mm512_rol_16( const __m512i v, const int c )
|
||||
{
|
||||
return _mm512_or_si512( _mm512_slli_epi16( v, c ),
|
||||
_mm512_srli_epi16( v, 16-c ) );
|
||||
}
|
||||
|
||||
// Rotations using a vector control index are very slow due to overhead
|
||||
// to generate the index vector. Repeated rotations using the same index
|
||||
@@ -363,25 +347,32 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
//
|
||||
// Rotate elements in 512 bit vector.
|
||||
|
||||
static inline __m512i mm512_swap_256( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 4 ); }
|
||||
|
||||
#define mm512_swap_256( v ) _mm512_alignr_epi64( v, v, 4 )
|
||||
static inline __m512i mm512_ror_1x128( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 2 ); }
|
||||
|
||||
// 1x64 notation used to disinguish from bit rotation.
|
||||
#define mm512_ror_1x128( v ) _mm512_alignr_epi64( v, v, 2 )
|
||||
#define mm512_rol_1x128( v ) _mm512_alignr_epi64( v, v, 6 )
|
||||
static inline __m512i mm512_rol_1x128( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 6 ); }
|
||||
|
||||
#define mm512_ror_1x64( v ) _mm512_alignr_epi64( v, v, 1 )
|
||||
#define mm512_rol_1x64( v ) _mm512_alignr_epi64( v, v, 7 )
|
||||
static inline __m512i mm512_ror_1x64( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 1 ); }
|
||||
|
||||
#define mm512_ror_1x32( v ) _mm512_alignr_epi32( v, v, 1 )
|
||||
#define mm512_rol_1x32( v ) _mm512_alignr_epi32( v, v, 15 )
|
||||
static inline __m512i mm512_rol_1x64( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 7 ); }
|
||||
|
||||
// Generic for odd rotations
|
||||
#define mm512_ror_x64( v, n ) _mm512_alignr_epi64( v, v, n )
|
||||
#define mm512_rol_x64( v, n ) _mm512_alignr_epi64( v, v, 8-(n) )
|
||||
static inline __m512i mm512_ror_1x32( const __m512i v )
|
||||
{ return _mm512_alignr_epi32( v, v, 1 ); }
|
||||
|
||||
#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n )
|
||||
#define mm512_rol_x32( v, n ) _mm512_alignr_epi32( v, v, 16-(n) )
|
||||
static inline __m512i mm512_rol_1x32( const __m512i v )
|
||||
{ return _mm512_alignr_epi32( v, v, 15 ); }
|
||||
|
||||
static inline __m512i mm512_ror_x64( const __m512i v, const int n )
|
||||
{ return _mm512_alignr_epi64( v, v, n ); }
|
||||
|
||||
static inline __m512i mm512_ror_x32( const __m512i v, const int n )
|
||||
{ return _mm512_alignr_epi32( v, v, n ); }
|
||||
|
||||
#define mm512_ror_1x16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
@@ -411,38 +402,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
0x1E1D1C1B1A191817, 0x161514131211100F, \
|
||||
0x0E0D0C0B0A090807, 0x060504030201003F ) )
|
||||
|
||||
|
||||
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||
#define mm512_invert_256( v ) \
|
||||
_mm512_permutexvar_epi64( v, m512_const_64( 3,2,1,0,7,6,5,4 ) )
|
||||
|
||||
#define mm512_invert_128( v ) \
|
||||
_mm512_permutexvar_epi64( v, m512_const_64( 1,0,3,2,5,4,7,6 ) )
|
||||
|
||||
#define mm512_invert_64( v ) \
|
||||
_mm512_permutexvar_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
|
||||
|
||||
#define mm512_invert_32( v ) \
|
||||
_mm512_permutexvar_epi32( m512_const_64( \
|
||||
0x0000000000000001,0x0000000200000003, \
|
||||
0x0000000400000005,0x0000000600000007, \
|
||||
0x0000000800000009,0x0000000a0000000b, \
|
||||
0x0000000c0000000d,0x0000000e0000000f ), v )
|
||||
|
||||
#define mm512_invert_16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x0000000100020003, 0x0004000500060007, \
|
||||
0x00080009000A000B, 0x000C000D000E000F, \
|
||||
0x0010001100120013, 0x0014001500160017, \
|
||||
0x00180019001A001B, 0x001C001D001E001F ), v )
|
||||
|
||||
#define mm512_invert_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x0001020304050607, 0x08090A0B0C0D0E0F, \
|
||||
0x1011121314151617, 0x18191A1B1C1D1E1F, \
|
||||
0x2021222324252627, 0x28292A2B2C2D2E2F, \
|
||||
0x3031323334353637, 0x38393A3B3C3D3E3F ) )
|
||||
|
||||
//
|
||||
// Rotate elements within 256 bit lanes of 512 bit vector.
|
||||
|
||||
@@ -450,11 +409,10 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
#define mm512_swap256_128( v ) _mm512_permutex_epi64( v, 0x4e )
|
||||
|
||||
// Rotate 256 bit lanes by one 64 bit element
|
||||
#define mm512_ror256_64( v ) _mm512_permutex_epi64( v, 0x39 )
|
||||
#define mm512_rol256_64( v ) _mm512_permutex_epi64( v, 0x93 )
|
||||
#define mm512_ror256_64( v ) _mm512_permutex_epi64( v, 0x39 )
|
||||
#define mm512_rol256_64( v ) _mm512_permutex_epi64( v, 0x93 )
|
||||
|
||||
// Rotate 256 bit lanes by one 32 bit element
|
||||
|
||||
#define mm512_ror256_32( v ) \
|
||||
_mm512_permutexvar_epi32( m512_const_64( \
|
||||
0x000000080000000f, 0x0000000e0000000d, \
|
||||
@@ -488,68 +446,41 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
0x203f3e3d3c3b3a39, 0x3837363534333231, \
|
||||
0x302f2e2d2c2b2a29, 0x2827262524232221, \
|
||||
0x001f1e1d1c1b1a19, 0x1817161514131211, \
|
||||
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
|
||||
0x100f0e0d0c0b0a09, 0x0807060504030201 ) )
|
||||
|
||||
#define mm512_rol256_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x3e3d3c3b3a393837, 0x363534333231302f, \
|
||||
0x2e2d2c2b2a292827, 0x262524232221203f, \
|
||||
0x1e1d1c1b1a191817, 0x161514131211100f, \
|
||||
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
|
||||
0x0e0d0c0b0a090807, 0x060504030201001f ) )
|
||||
|
||||
//
|
||||
// Rotate elements within 128 bit lanes of 512 bit vector.
|
||||
|
||||
// Swap hi & lo 64 bits in each 128 bit lane
|
||||
#define mm512_swap128_64( v ) _mm512_shuffle_epi32( v, 0x4e )
|
||||
// Swap 64 bits in each 128 bit lane
|
||||
#define mm512_swap128_64( v ) _mm512_shuffle_epi32( v, 0x4e )
|
||||
|
||||
// Rotate 128 bit lanes by one 32 bit element
|
||||
#define mm512_ror128_32( v ) _mm512_shuffle_epi32( v, 0x39 )
|
||||
#define mm512_rol128_32( v ) _mm512_shuffle_epi32( v, 0x93 )
|
||||
#define mm512_ror128_32( v ) _mm512_shuffle_epi32( v, 0x39 )
|
||||
#define mm512_rol128_32( v ) _mm512_shuffle_epi32( v, 0x93 )
|
||||
|
||||
#define mm512_ror128_x8( v, c ) _mm512_alignr_epi8( v, v, c )
|
||||
// Rotate right 128 bit lanes by c bytes
|
||||
static inline __m512i mm512_ror128_x8( const __m512i v, const int c )
|
||||
{ return _mm512_alignr_epi8( v, v, c ); }
|
||||
|
||||
/*
|
||||
// Rotate 128 bit lanes by c bytes, faster than building that monstrous
|
||||
// constant above.
|
||||
#define mm512_ror128_8( v, c ) \
|
||||
_mm512_or_si512( _mm512_bsrli_epi128( v, c ), \
|
||||
_mm512_bslli_epi128( v, 16-(c) ) )
|
||||
#define mm512_rol128_8( v, c ) \
|
||||
_mm512_or_si512( _mm512_bslli_epi128( v, c ), \
|
||||
_mm512_bsrli_epi128( v, 16-(c) ) )
|
||||
*/
|
||||
|
||||
//
|
||||
// Rotate elements within 64 bit lanes.
|
||||
|
||||
#define mm512_rol64_x8( v, c ) _mm512_rol_epi64( v, ((c)<<3) )
|
||||
#define mm512_ror64_x8( v, c ) _mm512_ror_epi64( v, ((c)<<3) )
|
||||
|
||||
// Swap 32 bit elements in each 64 bit lane
|
||||
#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
// Rotate each 64 bit lane by one 16 bit element.
|
||||
#define mm512_ror64_16( v ) _mm512_ror_epi64( v, 16 )
|
||||
#define mm512_rol64_16( v ) _mm512_rol_epi64( v, 16 )
|
||||
#define mm512_ror64_8( v ) _mm512_ror_epi64( v, 8 )
|
||||
#define mm512_rol64_8( v ) _mm512_rol_epi64( v, 8 )
|
||||
|
||||
//
|
||||
// Rotate elements within 32 bit lanes.
|
||||
|
||||
#define mm512_rol32_x8( v, c ) _mm512_rol_epi32( v, ((c)<<2) )
|
||||
#define mm512_ror32_x8( v, c ) _mm512_ror_epi32( v, ((c)<<2) )
|
||||
// Swap 32 bits in each 64 bit lane.
|
||||
#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
|
||||
//
|
||||
// Rotate elements from 2 512 bit vectors in place, source arguments
|
||||
// are overwritten.
|
||||
|
||||
#define mm512_swap1024_512(v1, v2) \
|
||||
v1 = _mm512_xor_si512(v1, v2); \
|
||||
v2 = _mm512_xor_si512(v1, v2); \
|
||||
v1 = _mm512_xor_si512(v1, v2);
|
||||
#define mm512_swap1024_512( v1, v2 ) \
|
||||
v1 = _mm512_xor_si512( v1, v2 ); \
|
||||
v2 = _mm512_xor_si512( v1, v2 ); \
|
||||
v1 = _mm512_xor_si512( v1, v2 );
|
||||
|
||||
#define mm512_ror1024_256( v1, v2 ) \
|
||||
do { \
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
#if !defined(SIMD_64_H__)
|
||||
#define SIMD_64_H__ 1
|
||||
|
||||
#if defined(__MMX__)
|
||||
#if defined(__MMX__) && defined(__SSE__)
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// 64 bit MMX vectors.
|
||||
//
|
||||
// There are rumours MMX wil be removed. Although casting with int64
|
||||
// works there is likely some overhead to move the data to An MMX register
|
||||
// and back.
|
||||
|
||||
// This code is not used anywhere annd likely never will. It's intent was
|
||||
// to support 2 way parallel hashing using SSE2 for 64 bit, and MMX for 32
|
||||
// bit hash functions, but was never implemented.
|
||||
|
||||
// Pseudo constants
|
||||
|
||||
/*
|
||||
#define m64_zero _mm_setzero_si64()
|
||||
#define m64_one_64 _mm_set_pi32( 0UL, 1UL )
|
||||
@@ -30,79 +30,67 @@
|
||||
|
||||
#define casti_m64(p,i) (((__m64*)(p))[(i)])
|
||||
|
||||
// cast all arguments as the're likely to be uint64_t
|
||||
|
||||
// Bitwise not: ~(a)
|
||||
//#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 )
|
||||
#define mm64_not( a ) ( (__m64)( ~( (uint64_t)(a) ) )
|
||||
|
||||
// Unary negate elements
|
||||
#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, (__m64)v )
|
||||
#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, (__m64)v )
|
||||
#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, (__m64)v )
|
||||
#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, v )
|
||||
#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, v )
|
||||
#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, v )
|
||||
|
||||
// Rotate bits in packed elements of 64 bit vector
|
||||
#define mm64_rol_64( a, n ) \
|
||||
_mm_or_si64( _mm_slli_si64( (__m64)(a), n ), \
|
||||
_mm_srli_si64( (__m64)(a), 64-(n) ) )
|
||||
_mm_or_si64( _mm_slli_si64( a, n ), \
|
||||
_mm_srli_si64( a, 64-(n) ) )
|
||||
|
||||
#define mm64_ror_64( a, n ) \
|
||||
_mm_or_si64( _mm_srli_si64( (__m64)(a), n ), \
|
||||
_mm_slli_si64( (__m64)(a), 64-(n) ) )
|
||||
_mm_or_si64( _mm_srli_si64( a, n ), \
|
||||
_mm_slli_si64( a, 64-(n) ) )
|
||||
|
||||
#define mm64_rol_32( a, n ) \
|
||||
_mm_or_si64( _mm_slli_pi32( (__m64)(a), n ), \
|
||||
_mm_srli_pi32( (__m64)(a), 32-(n) ) )
|
||||
_mm_or_si64( _mm_slli_pi32( a, n ), \
|
||||
_mm_srli_pi32( a, 32-(n) ) )
|
||||
|
||||
#define mm64_ror_32( a, n ) \
|
||||
_mm_or_si64( _mm_srli_pi32( (__m64)(a), n ), \
|
||||
_mm_slli_pi32( (__m64)(a), 32-(n) ) )
|
||||
_mm_or_si64( _mm_srli_pi32( a, n ), \
|
||||
_mm_slli_pi32( a, 32-(n) ) )
|
||||
|
||||
#define mm64_rol_16( a, n ) \
|
||||
_mm_or_si64( _mm_slli_pi16( (__m64)(a), n ), \
|
||||
_mm_srli_pi16( (__m64)(a), 16-(n) ) )
|
||||
_mm_or_si64( _mm_slli_pi16( a, n ), \
|
||||
_mm_srli_pi16( a, 16-(n) ) )
|
||||
|
||||
#define mm64_ror_16( a, n ) \
|
||||
_mm_or_si64( _mm_srli_pi16( (__m64)(a), n ), \
|
||||
_mm_slli_pi16( (__m64)(a), 16-(n) ) )
|
||||
_mm_or_si64( _mm_srli_pi16( a, n ), \
|
||||
_mm_slli_pi16( a, 16-(n) ) )
|
||||
|
||||
// Rotate packed elements accross lanes. Useful for byte swap and byte
|
||||
// rotation.
|
||||
|
||||
// _mm_shuffle_pi8 requires SSSE3 while _mm_shuffle_pi16 requires SSE
|
||||
// even though these are MMX instructions.
|
||||
|
||||
// Swap hi & lo 32 bits.
|
||||
#define mm64_swap32( a ) _mm_shuffle_pi16( (__m64)(a), 0x4e )
|
||||
#define mm64_swap_32( a ) _mm_shuffle_pi16( a, 0x4e )
|
||||
|
||||
#define mm64_ror1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x39 )
|
||||
#define mm64_rol1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x93 )
|
||||
#define mm64_ror64_1x16( a ) _mm_shuffle_pi16( a, 0x39 )
|
||||
#define mm64_rol64_1x16( a ) _mm_shuffle_pi16( a, 0x93 )
|
||||
|
||||
// Swap hi & lo 16 bits of each 32 bit element
|
||||
#define mm64_swap16_32( a ) _mm_shuffle_pi16( (__m64)(a), 0xb1 )
|
||||
#define mm64_swap32_16( a ) _mm_shuffle_pi16( a, 0xb1 )
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
// Endian byte swap packed elements
|
||||
// A vectorized version of the u64 bswap, use when data already in MMX reg.
|
||||
#define mm64_bswap_64( v ) \
|
||||
_mm_shuffle_pi8( (__m64)v, (__m64)0x0001020304050607 )
|
||||
|
||||
#define mm64_bswap_32( v ) \
|
||||
_mm_shuffle_pi8( (__m64)v, (__m64)0x0405060700010203 )
|
||||
_mm_shuffle_pi8( v, (__m64)0x0405060700010203 )
|
||||
|
||||
#define mm64_bswap_16( v ) \
|
||||
_mm_shuffle_pi8( (__m64)v, (__m64)0x0607040502030001 );
|
||||
_mm_shuffle_pi8( v, (__m64)0x0607040502030001 );
|
||||
|
||||
// Rotate right by c bytes
|
||||
static inline __m64 mm64_ror_x8( __m64 v, const int c )
|
||||
{ return _mm_alignr_pi8( v, v, c ); }
|
||||
|
||||
#else
|
||||
|
||||
#define mm64_bswap_64( v ) \
|
||||
(__m64)__builtin_bswap64( (uint64_t)v )
|
||||
|
||||
// These exist only for compatibility with CPUs without SSSE3. MMX doesn't
|
||||
// have extract 32 instruction so pointers are needed to access elements.
|
||||
// It' more efficient for the caller to use scalar variables and call
|
||||
// bswap_32 directly.
|
||||
#define mm64_bswap_32( v ) \
|
||||
_mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \
|
||||
__builtin_bswap32( ((uint32_t*)&v)[0] ) )
|
||||
@@ -115,17 +103,6 @@
|
||||
|
||||
#endif
|
||||
|
||||
// 64 bit mem functions use integral sizes instead of bytes, data must
|
||||
// be aligned to 64 bits.
|
||||
static inline void memcpy_m64( __m64 *dst, const __m64 *src, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; }
|
||||
|
||||
static inline void memset_zero_m64( __m64 *src, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) src[i] = (__m64)0ULL; }
|
||||
|
||||
static inline void memset_m64( __m64 *dst, const __m64 a, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
|
||||
|
||||
#endif // MMX
|
||||
|
||||
#endif // SIMD_64_H__
|
||||
|
||||
@@ -1,69 +1,16 @@
|
||||
#if !defined(SIMD_INT_H__)
|
||||
#define SIMD_INT_H__ 1
|
||||
|
||||
///////////////////////////////////
|
||||
//
|
||||
// Integers up to 128 bits.
|
||||
//
|
||||
// These utilities enhance support for integers up to 128 bits.
|
||||
// All standard operations are supported on 128 bit integers except
|
||||
// numeric constant representation and IO. 128 bit integers must be built
|
||||
// and displayed as 2 64 bit halves, just like the old times.
|
||||
//
|
||||
// Some utilities are also provided for smaller integers, most notably
|
||||
// bit rotation.
|
||||
|
||||
|
||||
|
||||
// MMX has no extract instruction for 32 bit elements so this:
|
||||
// Lo is trivial, high is a simple shift.
|
||||
// Input may be uint64_t or __m64, returns uint32_t.
|
||||
#define u64_extr_lo32(a) ( (uint32_t)( (uint64_t)(a) ) )
|
||||
#define u64_extr_hi32(a) ( (uint32_t)( ((uint64_t)(a)) >> 32) )
|
||||
|
||||
#define u64_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 2-(n)) <<5 ) ) )
|
||||
#define u64_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 4-(n)) <<4 ) ) )
|
||||
#define u64_extr_8( a, n ) ( (uint8_t) ( (a) >> ( ( 8-(n)) <<3 ) ) )
|
||||
|
||||
// Rotate bits in various sized integers.
|
||||
#define u64_ror_64( x, c ) \
|
||||
(uint64_t)( ( (uint64_t)(x) >> (c) ) | ( (uint64_t)(x) << (64-(c)) ) )
|
||||
#define u64_rol_64( x, c ) \
|
||||
(uint64_t)( ( (uint64_t)(x) << (c) ) | ( (uint64_t)(x) >> (64-(c)) ) )
|
||||
#define u32_ror_32( x, c ) \
|
||||
(uint32_t)( ( (uint32_t)(x) >> (c) ) | ( (uint32_t)(x) << (32-(c)) ) )
|
||||
#define u32_rol_32( x, c ) \
|
||||
(uint32_t)( ( (uint32_t)(x) << (c) ) | ( (uint32_t)(x) >> (32-(c)) ) )
|
||||
#define u16_ror_16( x, c ) \
|
||||
(uint16_t)( ( (uint16_t)(x) >> (c) ) | ( (uint16_t)(x) << (16-(c)) ) )
|
||||
#define u16_rol_16( x, c ) \
|
||||
(uint16_t)( ( (uint16_t)(x) << (c) ) | ( (uint16_t)(x) >> (16-(c)) ) )
|
||||
#define u8_ror_8( x, c ) \
|
||||
(uint8_t) ( ( (uint8_t) (x) >> (c) ) | ( (uint8_t) (x) << ( 8-(c)) ) )
|
||||
#define u8_rol_8( x, c ) \
|
||||
(uint8_t) ( ( (uint8_t) (x) << (c) ) | ( (uint8_t) (x) >> ( 8-(c)) ) )
|
||||
|
||||
// Endian byte swap
|
||||
#define bswap_64( a ) __builtin_bswap64( a )
|
||||
#define bswap_32( a ) __builtin_bswap32( a )
|
||||
|
||||
// 64 bit mem functions use integral sizes instead of bytes, data must
|
||||
// be aligned to 64 bits. Mostly for scaled indexing convenience.
|
||||
static inline void memcpy_64( uint64_t *dst, const uint64_t *src, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; }
|
||||
|
||||
static inline void memset_zero_64( uint64_t *src, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) src[i] = 0ull; }
|
||||
|
||||
static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
|
||||
|
||||
|
||||
///////////////////////////////////////
|
||||
//
|
||||
// 128 bit integers
|
||||
//
|
||||
// 128 bit integers are inneficient and not a shortcut for __m128i.
|
||||
// 128 bit integers are inneficient and not a shortcut for __m128i.
|
||||
// Native type __int128 supported starting with GCC-4.8.
|
||||
//
|
||||
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
|
||||
@@ -94,31 +41,12 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
|
||||
typedef __int128 int128_t;
|
||||
typedef unsigned __int128 uint128_t;
|
||||
|
||||
|
||||
|
||||
// Maybe usefull for making constants.
|
||||
#define mk_uint128( hi, lo ) \
|
||||
( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) )
|
||||
|
||||
|
||||
// Extracting the low bits is a trivial cast.
|
||||
// These specialized functions are optimized while providing a
|
||||
// consistent interface.
|
||||
#define u128_hi64( x ) ( (uint64_t)( (uint128_t)(x) >> 64 ) )
|
||||
#define u128_lo64( x ) ( (uint64_t)(x) )
|
||||
|
||||
// Generic extract, don't use for extracting low bits, cast instead.
|
||||
#define u128_extr_64( a, n ) ( (uint64_t)( (a) >> ( ( 2-(n)) <<6 ) ) )
|
||||
#define u128_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 4-(n)) <<5 ) ) )
|
||||
#define u128_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 8-(n)) <<4 ) ) )
|
||||
#define u128_extr_8( a, n ) ( (uint8_t) ( (a) >> ( (16-(n)) <<3 ) ) )
|
||||
|
||||
// Not much need for this but it fills a gap.
|
||||
#define u128_ror_128( x, c ) \
|
||||
( ( (uint128_t)(x) >> (c) ) | ( (uint128_t)(x) << (128-(c)) ) )
|
||||
#define u128_rol_128( x, c ) \
|
||||
( ( (uint128_t)(x) << (c) ) | ( (uint128_t)(x) >> (128-(c)) ) )
|
||||
|
||||
#endif // GCC_INT128
|
||||
|
||||
#endif // SIMD_INT_H__
|
||||
|
||||
Reference in New Issue
Block a user