This commit is contained in:
Jay D Dee
2019-07-30 10:16:43 -04:00
parent a51f59086b
commit 9d49e0be7a
66 changed files with 1949 additions and 1470 deletions

View File

@@ -677,41 +677,40 @@ static inline void mm512_bswap32_intrlv80_16x32( void *d, void *src )
{
__m512i s0 = mm512_bswap_32( casti_m512i( src, 0 ) );
__m128i s1 = mm128_bswap_32( casti_m128i( src, 4 ) );
const __m512i zero = m512_zero;
const __m512i one = m512_one_32;
const __m512i two = _mm512_add_epi32( one, one );
const __m512i three = _mm512_add_epi32( two, one );
const __m512i four = _mm512_add_epi32( two, two );
const __m512i eight = _mm512_add_epi32( four, four );
const __m512i eleven = _mm512_add_epi32( eight, three );
const __m512i one = m512_one_32;
const __m512i two = _mm512_add_epi32( one, one );
const __m512i three = _mm512_add_epi32( two, one );
__m512i x = _mm512_add_epi32( three, three );
casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, zero );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( s0, one );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( s0, two );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( s0, three );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32( s0, four );
casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, m512_zero );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( s0, one );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( s0, two );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( s0, three );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( two, two ) );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( four, one ) );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( four, two ) );
_mm512_add_epi32( three, two ) );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( four, three ) );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32( s0, eight );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( eight, one ) );
_mm512_add_epi32( x, one ) );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
x = _mm512_add_epi32( x, three );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( eight, two ) );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32( s0, eleven );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( eleven, one ) );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( eleven, two ) );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( eleven, three ) );
_mm512_add_epi32( x, one ) );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
x = _mm512_add_epi32( x, three );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( eleven, four ) );
_mm512_add_epi32( x, three ) );
casti_m512i( d,16 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), zero );
_mm512_castsi128_si512( s1 ), m512_zero );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), one );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32(
@@ -769,14 +768,14 @@ static inline void dintrlv_2x64( void *dst0, void *dst1,
// 4x64 (AVX2)
static inline void intrlv_4x64( void *dst, const void *src0,
const void *src1, const void *src2, const void *src3, int bit_len )
static inline void intrlv_4x64( void *dst, void *src0,
void *src1, void *src2, void *src3, int bit_len )
{
uint64_t *d = (uint64_t*)dst;
const uint64_t *s0 = (const uint64_t*)src0;
const uint64_t *s1 = (const uint64_t*)src1;
const uint64_t *s2 = (const uint64_t*)src2;
const uint64_t *s3 = (const uint64_t*)src3;
uint64_t *s0 = (uint64_t*)src0;
uint64_t *s1 = (uint64_t*)src1;
uint64_t *s2 = (uint64_t*)src2;
uint64_t *s3 = (uint64_t*)src3;
d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s2[ 0]; d[ 3] = s3[ 0];
d[ 4] = s0[ 1]; d[ 5] = s1[ 1]; d[ 6] = s2[ 1]; d[ 7] = s3[ 1];
d[ 8] = s0[ 2]; d[ 9] = s1[ 2]; d[ 10] = s2[ 2]; d[ 11] = s3[ 2];
@@ -870,10 +869,12 @@ static inline void extr_lane_4x64( void *d, const void *s,
((uint64_t*)d)[ 1] = ((uint64_t*)s)[ lane+ 4 ];
((uint64_t*)d)[ 2] = ((uint64_t*)s)[ lane+ 8 ];
((uint64_t*)d)[ 3] = ((uint64_t*)s)[ lane+12 ];
if ( bit_len <= 256 ) return;
((uint64_t*)d)[ 4] = ((uint64_t*)s)[ lane+16 ];
((uint64_t*)d)[ 5] = ((uint64_t*)s)[ lane+20 ];
((uint64_t*)d)[ 6] = ((uint64_t*)s)[ lane+24 ];
((uint64_t*)d)[ 7] = ((uint64_t*)s)[ lane+28 ];
/*
if ( bit_len <= 256 ) return;
((uint64_t*)d)[ 8] = ((uint64_t*)s)[ lane+32 ];
((uint64_t*)d)[ 9] = ((uint64_t*)s)[ lane+36 ];
@@ -883,6 +884,7 @@ static inline void extr_lane_4x64( void *d, const void *s,
((uint64_t*)d)[13] = ((uint64_t*)s)[ lane+52 ];
((uint64_t*)d)[14] = ((uint64_t*)s)[ lane+56 ];
((uint64_t*)d)[15] = ((uint64_t*)s)[ lane+60 ];
*/
}
#if defined(__AVX2__)
@@ -984,10 +986,12 @@ static inline void extr_lane_8x64( void *d, const void *s,
((uint64_t*)d)[ 1] = ((uint64_t*)s)[ lane+ 8 ];
((uint64_t*)d)[ 2] = ((uint64_t*)s)[ lane+ 16 ];
((uint64_t*)d)[ 3] = ((uint64_t*)s)[ lane+ 24 ];
if ( bit_len <= 256 ) return;
((uint64_t*)d)[ 4] = ((uint64_t*)s)[ lane+ 32 ];
((uint64_t*)d)[ 5] = ((uint64_t*)s)[ lane+ 40 ];
((uint64_t*)d)[ 6] = ((uint64_t*)s)[ lane+ 48 ];
((uint64_t*)d)[ 7] = ((uint64_t*)s)[ lane+ 56 ];
/*
if ( bit_len <= 256 ) return;
((uint64_t*)d)[ 8] = ((uint64_t*)s)[ lane+ 64 ];
((uint64_t*)d)[ 9] = ((uint64_t*)s)[ lane+ 72 ];
@@ -997,6 +1001,7 @@ static inline void extr_lane_8x64( void *d, const void *s,
((uint64_t*)d)[13] = ((uint64_t*)s)[ lane+104 ];
((uint64_t*)d)[14] = ((uint64_t*)s)[ lane+112 ];
((uint64_t*)d)[15] = ((uint64_t*)s)[ lane+120 ];
*/
}
#if defined(__AVX512F__) && defined(__AVX512VL__)
@@ -1006,13 +1011,13 @@ static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src )
__m512i *d = (__m512i*)dst;
__m512i s0 = mm512_bswap_32( casti_m512i(src, 0 ) );
__m128i s1 = mm128_bswap_32( casti_m128i(src, 4 ) );
const __m512i zero = m512_zero;
// const __m512i zero = m512_zero;
const __m512i one = m512_one_64;
const __m512i two = _mm512_add_epi64( one, one );
const __m512i three = _mm512_add_epi64( two, one );
const __m512i four = _mm512_add_epi64( two, two );
d[0] = _mm512_permutexvar_epi64( s0, zero );
d[0] = _mm512_permutexvar_epi64( s0, m512_zero );
d[1] = _mm512_permutexvar_epi64( s0, one );
d[2] = _mm512_permutexvar_epi64( s0, two );
d[3] = _mm512_permutexvar_epi64( s0, three );
@@ -1021,7 +1026,7 @@ static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src )
d[6] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, two ) );
d[7] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, three ) );
d[8] = _mm512_permutexvar_epi64(
_mm512_castsi128_si512( s1 ), zero );
_mm512_castsi128_si512( s1 ), m512_zero );
d[9] = _mm512_permutexvar_epi64(
_mm512_castsi128_si512( s1 ), one );
}

View File

@@ -10,29 +10,23 @@
// SSE2 is generally required for full 128 bit support. Some functions
// are also optimized with SSSE3 or SSE4.1.
//
// Do not call _mm_extract directly, it isn't supported in SSE2.
// Use mm128_extr instead, it will select the appropriate implementation.
// Do not call intrinsic _mm_extract directly, it isn't supported in SSE2.
// Use mm128_extr macro instead, it will select the appropriate implementation.
//
// 128 bit operations are enhanced with uint128 which adds 128 bit integer
// support for arithmetic and other operations. Casting to uint128_t is not
// efficient but is sometimes the only way for certain operations.
// free but is sometimes the only way for certain operations.
//
// Constants are an issue with simd. Simply put, immediate constants don't
// exist. All simd constants either reside in memory or a register.
// The distibction is made below with c128 being memory resident defined
// at compile time and m128 being register defined at run time.
// exist. All simd constants either reside in memory or a register and
// must be loaded or generated at run time.
//
// All run time constants must be generated using their components elements
// incurring significant overhead. The more elements the more overhead
// both in instructions and in GP register usage. Whenever possible use
// 64 bit constant elements regardless of the actual element size.
//
// Due to the cost of generating constants they should not be regenerated
// in the same function. Instead, define a local const.
// Due to the cost of generating constants it is often more efficient to
// define a local const for repeated references to the same constant.
//
// Some constant values can be generated using shortcuts. Zero for example
// is as simple as XORing any register with itself, and is implemented
// in the setzero instrinsic. These shortcuts must be implemented is asm
// in the setzero instrinsic. These shortcuts must be implemented using ASM
// due to doing things the compiler would complain about. Another single
// instruction constant is -1, defined below. Others may be added as the need
// arises. Even single instruction constants are less efficient than local
@@ -43,87 +37,59 @@
// into account. Those that generate a simd constant should not be used
// repeatedly. It may be better for the application to reimplement the
// utility to better suit its usage.
//
// More tips:
//
// Conversions from integer to vector should be avoided whenever possible.
// Extract, insert and set and set1 instructions should be avoided.
// In addition to the issues with constants set is also very inefficient with
// variables.
// Converting integer data to perform a couple of vector operations
// then converting back to integer should be avoided. Converting data in
// registers should also be avoided. Conversion should be limited to buffers
// in memory where the data is loaded directly to vector registers, bypassing
// the integer to vector conversion.
//
// Pseudo constants.
//
// These can't be used for compile time initialization.
// These should be used for all simple vectors.
// Repeated usage of any simd pseudo-constant should use a locally defined
// const rather than recomputing it for every reference.
#define m128_zero _mm_setzero_si128()
// As suggested by Intel...
// Arg passing for simd registers is assumed to be first output arg,
// then input args, then locals. This is probably wrong, gcc likely picks
// whichever register is currently holding the variable, or whichever
// register is available to hold it. Nevertheless, all args are specified
// by their arg number and local variables use registers starting at
// last arg + 1, by type.
// Output args don't need to be listed as clobbered.
static inline __m128i m128_one_128_fn()
{
register __m128i a;
asm( "movq $1, %0\n\t"
: "=x"(a) );
return a;
}
#define m128_one_128 m128_one_128_fn()
static inline __m128i m128_one_64_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubq %%xmm1, %0\n\t"
register uint64_t one = 1;
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
: "r"(one) );
return _mm_shuffle_epi32( a, 0x04 );
}
#define m128_one_64 m128_one_64_fn()
static inline __m128i m128_one_32_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubd %%xmm1, %0\n\t"
register uint32_t one = 1;
register __m128i a;
asm( "movd %1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
: "r"(one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_32 m128_one_32_fn()
static inline __m128i m128_one_16_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubw %%xmm1, %0\n\t"
register uint32_t one = 0x00010001;
register __m128i a;
asm( "movd %1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
: "r"(one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_16 m128_one_16_fn()
static inline __m128i m128_one_8_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubb %%xmm1, %0\n\t"
register uint32_t one = 0x01010101;
register __m128i a;
asm( "movd %1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
: "r"(one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_8 m128_one_8_fn()
@@ -136,35 +102,73 @@ static inline __m128i m128_neg1_fn()
}
#define m128_neg1 m128_neg1_fn()
// move uint64_t to low bits of __m128i, zeros the rest
static inline __m128i mm128_mov64_128( uint64_t n )
{
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return a;
}
static inline __m128i mm128_mov32_128( uint32_t n )
{
register __m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return a;
}
static inline uint64_t mm128_mov128_64( __m128i a )
{
register uint64_t n;
asm( "movq %1, %0\n\t"
: "=x" (n)
: "r" (a) );
return n;
}
static inline uint32_t mm128_mov128_32( __m128i a )
{
register uint32_t n;
asm( "movd %1, %0\n\t"
: "=x" (n)
: "r" (a) );
return n;
}
#if defined(__SSE41__)
static inline __m128i m128_one_128_fn()
{
__m128i a;
asm( "pinsrq $0, $1, %0\n\t"
"pinsrq $1, $0, %0\n\t"
: "=x"(a) );
return a;
}
#define m128_one_128 m128_one_128_fn()
// alternative to _mm_set_epi64x, doesn't use mem,
// cost = 2 pinsrt, estimate 4 clocks.
static inline __m128i m128_const_64( uint64_t hi, uint64_t lo )
static inline __m128i m128_const_64( const uint64_t hi, const uint64_t lo )
{
__m128i a;
asm( "pinsrq $0, %2, %0\n\t"
register __m128i a;
asm( "movq %2, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x"(a)
: "r"(hi), "r"(lo) );
return a;
}
}
static inline __m128i m128_const1_64( const uint64_t n )
{
register __m128i a;
asm( "movq %1, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x"(a)
: "r"(n) );
return a;
}
#else
#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
// #define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_const_64 _mm_set_epi64x
#define m128_const_64 _mm_set_epi64x
#define m128_const1_64 _mm_set1_epi64x
#endif
@@ -309,13 +313,13 @@ do { \
// Assumes data is alinged and integral.
// n = number of __m128i, bytes/16
static inline void memset_zero_128( __m128i *dst, int n )
static inline void memset_zero_128( __m128i *dst, const int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = m128_zero; }
static inline void memset_128( __m128i *dst, const __m128i a, int n )
static inline void memset_128( __m128i *dst, const __m128i a, const int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; }
@@ -383,13 +387,16 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
//
// Rotate elements within lanes.
// Equivalent to mm128_ror_64( v, 32 )
#define mm128_swap32_64( v ) _mm_shuffle_epi32( v, 0xb1 )
// Equivalent to mm128_ror_64( v, 16 )
#define mm128_ror16_64( v ) _mm_shuffle_epi8( v, \
m128_const_64( 0x09080f0e0d0c0b0a, 0x0100070605040302 )
#define mm128_rol16_64( v ) _mm_shuffle_epi8( v, \
m128_const_64( 0x0dc0b0a09080f0e, 0x0504030201000706 )
// Equivalent to mm128_ror_32( v, 16 )
#define mm128_swap16_32( v ) _mm_shuffle_epi8( v, \
m128_const_64( 0x0d0c0f0e09080b0a, 0x0504070601000302 )
@@ -459,7 +466,7 @@ static inline __m128i mm128_bswap_16( __m128i v )
return _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
}
static inline void mm128_block_bswap_64( __m128i *d, __m128i *s )
static inline void mm128_block_bswap_64( __m128i *d, const __m128i *s )
{
d[0] = mm128_bswap_64( s[0] );
d[1] = mm128_bswap_64( s[1] );
@@ -471,7 +478,7 @@ static inline void mm128_block_bswap_64( __m128i *d, __m128i *s )
d[7] = mm128_bswap_64( s[7] );
}
static inline void mm128_block_bswap_32( __m128i *d, __m128i *s )
static inline void mm128_block_bswap_32( __m128i *d, const __m128i *s )
{
d[0] = mm128_bswap_32( s[0] );
d[1] = mm128_bswap_32( s[1] );

View File

@@ -15,91 +15,88 @@
// is available.
//
// Pseudo constants.
// These can't be used for compile time initialization but are preferable
// for simple constant vectors at run time. For repeated use define a local
// constant to avoid multiple calls to the same macro.
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#define m256_one_256 \
_mm256_insertf128_si256( _mm256_castsi128_si256( m128_one_128 ), \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
#define m256_one_128 \
_mm256_insertf128_si256( _mm256_castsi128_si256( m128_one_128 ), \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
// set instructions load memory resident constants, this avoids mem.
// cost 4 pinsert + 1 vinsert, estimate 7 clocks.
// Avoid using, mm128_const_64 twice is still faster.
// cost 4 pinsert + 1 vinsert, estimate 8 clocks latency.
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_insertf128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
#define m256_const1_64( i ) m256_const_64( i, i, i, i )
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
static inline __m256i m256_const1_64( uint64_t i )
{
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm256_broadcastq_epi64( a );
}
#if defined(__AVX2__)
// These look like a lot of overhead but the compiler optimizes nicely
// and puts the asm inline in the calling function. Usage is like any
// variable expression.
// Don't call the frunction directly, use the macro to make appear like
// a constant identifier instead of a function.
// __m256i foo = m256_one_64;
static inline __m256i m256_one_64_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubq %%ymm1, %0, %0\n\t"
: "=x"(a)
:
: "ymm1" );
return a;
register uint64_t one = 1;
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_64 m256_one_64_fn()
static inline __m256i m256_one_32_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubd %%ymm1, %0, %0\n\t"
: "=x"(a)
:
: "ymm1" );
return a;
register uint64_t one = 0x0000000100000001;
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_32 m256_one_32_fn()
static inline __m256i m256_one_16_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubw %%ymm1, %0, %0\n\t"
: "=x"(a)
:
: "ymm1" );
return a;
register uint64_t one = 0x0001000100010001;
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_16 m256_one_16_fn()
static inline __m256i m256_one_8_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubb %%ymm1, %0, %0\n\t"
: "=x"(a)
:
: "ymm1" );
return a;
register uint64_t one = 0x0101010101010101;
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_8 m256_one_8_fn()
static inline __m256i m256_neg1_fn()
{
__m256i a;
register __m256i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
: "=x"(a) );
return a;
@@ -114,16 +111,16 @@ static inline __m256i m256_neg1_fn()
#define m256_one_8 _mm256_set1_epi64x( 0x0101010101010101ULL )
// AVX doesn't have inserti128 but insertf128 will do.
// Ideally this can be done with 2 instructions and no temporary variables.
static inline __m256i m256_neg1_fn()
{
__m128i a = m128_neg1;
return _mm256_insertf128_si256( _mm256_castsi128_si256( a ), a, 1 );
}
#define m256_neg1 m256_neg1_fn()
//#define m256_neg1 _mm256_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL )
#endif // AVX2 else AVX
//
// Vector size conversion.
//
@@ -139,11 +136,11 @@ static inline __m256i m256_neg1_fn()
#define mm128_extr_lo128_256( a ) _mm256_castsi256_si128( a )
#define mm128_extr_hi128_256( a ) _mm256_extracti128_si256( a, 1 )
// Extract 4 u64 from 256 bit vector.
// Extract integers from 256 bit vector, ineficient, avoid if possible..
#define mm256_extr_4x64( a0, a1, a2, a3, src ) \
do { \
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
a0 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 0 ); \
a0 = mm256_mov256_64( src ); \
a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
a2 = _mm_extract_epi64( hi, 0 ); \
a3 = _mm_extract_epi64( hi, 1 ); \
@@ -152,28 +149,43 @@ do { \
#define mm256_extr_8x32( a0, a1, a2, a3, a4, a5, a6, a7, src ) \
do { \
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
a0 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 0 ); \
a0 = mm256_mov256_32( src ); \
a1 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 1 ); \
a2 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 2 ); \
a3 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 3 ); \
a4 = _mm_extract_epi32( hi, 0 ); \
a4 = mm128_mov128_32( hi ); \
a5 = _mm_extract_epi32( hi, 1 ); \
a6 = _mm_extract_epi32( hi, 2 ); \
a7 = _mm_extract_epi32( hi, 3 ); \
} while(0)
// input __m128i, returns __m256i
// To build a 256 bit vector from 2 128 bit vectors lo must be done first.
// lo alone leaves hi undefined, hi alone leaves lo unchanged.
// Both cost one clock while preserving the other half..
// Insert b into specified half of a leaving other half of a unchanged.
#define mm256_ins_lo128_256( a, b ) _mm256_inserti128_si256( a, b, 0 )
#define mm256_ins_hi128_256( a, b ) _mm256_inserti128_si256( a, b, 1 )
// concatenate two 128 bit vectors into one 256 bit vector: { hi, lo }
#define mm256_concat_128( hi, lo ) \
mm256_ins_hi128_256( _mm256_castsi128_si256( lo ), hi )
_mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 )
// Move integer to lower bits of vector, upper bits set to zero.
static inline __m256i mm256_mov64_256( uint64_t n )
{
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return _mm256_castsi128_si256( a );
}
static inline __m256i mm256_mov32_256( uint32_t n )
{
register __m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return _mm256_castsi128_si256( a );
}
// Move lo bits of vector to integer, hi bits are truncated.
#define mm256_mov256_64( a ) mm128_mov128_64( _mm256_castsi256_si128( a ) )
#define mm256_mov256_32( a ) mm128_mov128_32( _mm256_castsi256_si128( a ) )
// Horizontal vector testing
#if defined(__AVX2__)
@@ -276,13 +288,13 @@ do { \
// Memory functions
// n = number of 256 bit (32 byte) vectors
static inline void memset_zero_256( __m256i *dst, int n )
static inline void memset_zero_256( __m256i *dst, const int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = m256_zero; }
static inline void memset_256( __m256i *dst, const __m256i a, int n )
static inline void memset_256( __m256i *dst, const __m256i a, const int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; }
///////////////////////////////
@@ -397,7 +409,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
#define mm256_rol_1x64( v ) _mm256_permute4x64_epi64( v, 0x93 )
// A little faster with avx512
// Rotate 256 bit vector by one 32 bit element. Use 64 bit set, it's faster.
// Rotate 256 bit vector by one 32 bit element.
#define mm256_ror_1x32( v ) \
_mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000000000007, 0x0000000600000005, \
@@ -455,24 +467,28 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
#define mm256_rol1x32_128( v ) _mm256_shuffle_epi32( v, 0x93 )
// Rotate each 128 bit lane by one 16 bit element.
#define mm256_rol1x16_128( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi16( 6,5,4,3,2,1,0,7, \
6,5,4,3,2,1,0,7 ) )
#define mm256_ror1x16_128( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi16( 0,7,6,5,4,3,2,1, \
0,7,6,5,4,3,2,1 ) )
_mm256_shuffle_epi8( v, m256_const_64( 0x01000f0e0d0c0b0a, \
0x0908070605040302, \
0x01000f0e0d0c0b0a, \
0x0908070605040302 ) )
#define mm256_rol1x16_128( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x0d0c0b0a09080706, \
0x0504030201000f0e, \
0x0d0c0b0a09080706, \
0x0504030201000f0e ) )
// Rotate each 128 bit lane by one byte
#define mm256_rol1x8_128( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi8(14,13,12,11,10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0,15, \
14,13,12,11,10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0,15 ) )
#define mm256_ror1x8_128( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi8( 0,15,14,13,12,11,10, 9, \
8, 7, 6, 5, 4, 3, 2, 1, \
0,15,14,13,12,11,10, 9, \
8, 7, 6, 5, 4, 3, 2, 1 ) )
_mm256_shuffle_epi8( v, m256_const_64( 0x000f0e0d0c0b0a09, \
0x0807060504030201, \
0x000f0e0d0c0b0a09, \
0x0807060504030201 ) )
#define mm256_rol1x8_128( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x0c0b0a09080f0e0d, \
0x0504030201000706, \
0x0d0c0b0a09080f0e, \
0x0504030201000706 ) )
// Rotate each 128 bit lane by c bytes.
#define mm256_bror_128( v, c ) \
@@ -485,34 +501,65 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
// Swap 32 bit elements in each 64 bit lane
#define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 )
#define mm256_ror16_64( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi16( 4,7,6,5,0,3,2,1, \
4,7,6,5,0,3,2,1 ) )
#define mm256_rol16_64( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi16( 6,5,4,7,2,1,0,3, \
6,5,4,7,2,1,0,3 ) )
#define mm256_ror1x16_64( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x09080f0e0d0c0b0a, \
0x0100070605040302, \
0x09080f0e0d0c0b0a, \
0x0100070605040302 ) )
#define mm256_rol1x16_64( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x0d0c0b0a09080f0e, \
0x0504030201000706, \
0x0d0c0b0a09080f0e, \
0x0504030201000706 ))
#define mm256_ror1x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x080f0e0d0c0b0a09, \
0x0007060504030201, \
0x080f0e0d0c0b0a09, \
0x0007060504030201 ))
#define mm256_rol1x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x0e0d0c0b0a09080f, \
0x0605040302010007, \
0x0e0d0c0b0a09080f, \
0x0605040302010007 ) )
#define mm256_ror3x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x0a09080f0e0d0c0b, \
0x0201000706050403, \
0x0a09080f0e0d0c0b, \
0x0201000706050403 ) )
#define mm256_rol3x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x0c0b0a09080f0e0d, \
0x0403020100070605, \
0x0c0b0a09080f0e0d, \
0x0403020100070605 ) )
// Swap 16 bit elements in each 32 bit lane
#define mm256_swap16_32( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi16( 6,7,4,5,2,3,0,1, \
6,7,4,5,2,3,0,1 ) )
_mm256_shuffle_epi8( v, m256_const_64( 0x0b0a09080f0e0d0c, \
0x0302010007060504, \
0x0b0a09080f0e0d0c, \
0x0302010007060504 )
//
// Swap bytes in vector elements, endian bswap.
#define mm256_bswap_64( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x08090a0b0c0d0e0f, \
0x0001020304050607, 0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
0x0001020304050607, \
0x08090a0b0c0d0e0f, \
0x0001020304050607 ) )
#define mm256_bswap_32( v ) \
_mm256_shuffle_epi8( v, m256_const_64( 0x0c0d0e0f08090a0b, \
0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203 ) )
0x0405060700010203, \
0x0c0d0e0f08090a0b, \
0x0405060700010203 ) )
#define mm256_bswap_16( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi8( 14,15, 12,13, 10,11, 8, 9, \
6, 7, 4, 5, 2, 3, 0, 1, \
14,15, 12,13, 10,11, 8, 9, \
6, 7, 4, 5, 2, 3, 0, 1 ) )
_mm256_shuffle_epi8( v, m256_const_64( 0x0e0f0c0d0a0b0809, \
0x0607040502030001, \
0x0e0f0c0d0a0b0809, \
0x0607040502030001 ) )
// 8 byte qword * 8 qwords * 4 lanes = 256 bytes
#define mm256_block_bswap_64( d, s ) do \