mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.9.8
This commit is contained in:
@@ -483,7 +483,9 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
|
||||
const __m256i three = _mm256_add_epi32( two, one );
|
||||
const __m256i four = _mm256_add_epi32( two, two );
|
||||
|
||||
casti_m256i( d, 0 ) = _mm256_permutevar8x32_epi32( s0, m256_zero );
|
||||
casti_m256i( d, 0 ) = _mm256_broadcastd_epi32(
|
||||
_mm256_castsi256_si128( s0 ) );
|
||||
// casti_m256i( d, 0 ) = _mm256_permutevar8x32_epi32( s0, m256_zero );
|
||||
casti_m256i( d, 1 ) = _mm256_permutevar8x32_epi32( s0, one );
|
||||
casti_m256i( d, 2 ) = _mm256_permutevar8x32_epi32( s0, two );
|
||||
casti_m256i( d, 3 ) = _mm256_permutevar8x32_epi32( s0, three );
|
||||
@@ -494,7 +496,9 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
|
||||
_mm256_add_epi32( four, two ) );
|
||||
casti_m256i( d, 7 ) = _mm256_permutevar8x32_epi32( s0,
|
||||
_mm256_add_epi32( four, three ) );
|
||||
casti_m256i( d, 8 ) = _mm256_permutevar8x32_epi32( s1, m256_zero );
|
||||
casti_m256i( d, 8 ) = _mm256_broadcastd_epi32(
|
||||
_mm256_castsi256_si128( s1 ) );
|
||||
// casti_m256i( d, 8 ) = _mm256_permutevar8x32_epi32( s1, m256_zero );
|
||||
casti_m256i( d, 9 ) = _mm256_permutevar8x32_epi32( s1, one );
|
||||
casti_m256i( d,10 ) = _mm256_permutevar8x32_epi32( s1, two );
|
||||
casti_m256i( d,11 ) = _mm256_permutevar8x32_epi32( s1, three );
|
||||
@@ -505,8 +509,9 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
|
||||
_mm256_add_epi32( four, two ) );
|
||||
casti_m256i( d,15 ) = _mm256_permutevar8x32_epi32( s1,
|
||||
_mm256_add_epi32( four, three ) );
|
||||
casti_m256i( d,16 ) = _mm256_permutevar8x32_epi32(
|
||||
_mm256_castsi128_si256( s2 ), m256_zero );
|
||||
casti_m256i( d,16 ) = _mm256_broadcastd_epi32( s2 );
|
||||
// casti_m256i( d,16 ) = _mm256_permutevar8x32_epi32(
|
||||
// _mm256_castsi128_si256( s2 ), m256_zero );
|
||||
casti_m256i( d,17 ) = _mm256_permutevar8x32_epi32(
|
||||
_mm256_castsi128_si256( s2 ), one );
|
||||
casti_m256i( d,18 ) = _mm256_permutevar8x32_epi32(
|
||||
@@ -682,7 +687,9 @@ static inline void mm512_bswap32_intrlv80_16x32( void *d, void *src )
|
||||
const __m512i three = _mm512_add_epi32( two, one );
|
||||
__m512i x = _mm512_add_epi32( three, three );
|
||||
|
||||
casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, m512_zero );
|
||||
casti_m512i( d, 0 ) = _mm512_broadcastd_epi32(
|
||||
_mm512_castsi512_si128( s0 ) );
|
||||
// casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, m512_zero );
|
||||
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( s0, one );
|
||||
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( s0, two );
|
||||
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( s0, three );
|
||||
@@ -709,8 +716,9 @@ static inline void mm512_bswap32_intrlv80_16x32( void *d, void *src )
|
||||
_mm512_add_epi32( x, two ) );
|
||||
casti_m512i( d,15 ) = _mm512_permutexvar_epi32( s0,
|
||||
_mm512_add_epi32( x, three ) );
|
||||
casti_m512i( d,16 ) = _mm512_permutexvar_epi32(
|
||||
_mm512_castsi128_si512( s1 ), m512_zero );
|
||||
casti_m512i( d,16 ) = _mm512_broadcastd_epi32( s1 );
|
||||
// casti_m512i( d,16 ) = _mm512_permutexvar_epi32(
|
||||
// _mm512_castsi128_si512( s1 ), m512_zero );
|
||||
casti_m512i( d,17 ) = _mm512_permutexvar_epi32(
|
||||
_mm512_castsi128_si512( s1 ), one );
|
||||
casti_m512i( d,18 ) = _mm512_permutexvar_epi32(
|
||||
@@ -987,15 +995,17 @@ static inline void extr_lane_8x64( void *d, const void *s,
|
||||
static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src )
|
||||
{
|
||||
__m512i *d = (__m512i*)dst;
|
||||
__m512i s0 = mm512_bswap_32( casti_m512i(src, 0 ) );
|
||||
__m128i s1 = mm128_bswap_32( casti_m128i(src, 4 ) );
|
||||
__m512i s0 = mm512_bswap_32( casti_m512i( src, 0 ) );
|
||||
__m128i s1 = mm128_bswap_32( casti_m128i( src, 4 ) );
|
||||
// const __m512i zero = m512_zero;
|
||||
const __m512i one = m512_one_64;
|
||||
const __m512i two = _mm512_add_epi64( one, one );
|
||||
const __m512i three = _mm512_add_epi64( two, one );
|
||||
const __m512i four = _mm512_add_epi64( two, two );
|
||||
|
||||
d[0] = _mm512_permutexvar_epi64( s0, m512_zero );
|
||||
d[0] = _mm512_broadcastq_epi64(
|
||||
_mm512_castsi512_si128( s0 ) );
|
||||
// d[0] = _mm512_permutexvar_epi64( s0, m512_zero );
|
||||
d[1] = _mm512_permutexvar_epi64( s0, one );
|
||||
d[2] = _mm512_permutexvar_epi64( s0, two );
|
||||
d[3] = _mm512_permutexvar_epi64( s0, three );
|
||||
@@ -1003,8 +1013,9 @@ static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src )
|
||||
d[5] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, one ) );
|
||||
d[6] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, two ) );
|
||||
d[7] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, three ) );
|
||||
d[8] = _mm512_permutexvar_epi64(
|
||||
_mm512_castsi128_si512( s1 ), m512_zero );
|
||||
d[8] = _mm512_broadcastq_epi64( s1 );
|
||||
// d[8] = _mm512_permutexvar_epi64(
|
||||
// _mm512_castsi128_si512( s1 ), m512_zero );
|
||||
d[9] = _mm512_permutexvar_epi64(
|
||||
_mm512_castsi128_si512( s1 ), one );
|
||||
}
|
||||
|
||||
@@ -42,9 +42,11 @@
|
||||
|
||||
static inline __m128i m128_one_128_fn()
|
||||
{
|
||||
register uint64_t one = 1;
|
||||
register __m128i a;
|
||||
asm( "movq $1, %0\n\t"
|
||||
: "=x"(a) );
|
||||
asm( "movq %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r" (one) );
|
||||
return a;
|
||||
}
|
||||
#define m128_one_128 m128_one_128_fn()
|
||||
@@ -54,9 +56,9 @@ static inline __m128i m128_one_64_fn()
|
||||
register uint64_t one = 1;
|
||||
register __m128i a;
|
||||
asm( "movq %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r"(one) );
|
||||
return _mm_shuffle_epi32( a, 0x04 );
|
||||
: "=x" (a)
|
||||
: "r" (one) );
|
||||
return _mm_shuffle_epi32( a, 0x44 );
|
||||
}
|
||||
#define m128_one_64 m128_one_64_fn()
|
||||
|
||||
@@ -65,8 +67,8 @@ static inline __m128i m128_one_32_fn()
|
||||
register uint32_t one = 1;
|
||||
register __m128i a;
|
||||
asm( "movd %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r"(one) );
|
||||
: "=x" (a)
|
||||
: "r" (one) );
|
||||
return _mm_shuffle_epi32( a, 0x00 );
|
||||
}
|
||||
#define m128_one_32 m128_one_32_fn()
|
||||
@@ -76,8 +78,8 @@ static inline __m128i m128_one_16_fn()
|
||||
register uint32_t one = 0x00010001;
|
||||
register __m128i a;
|
||||
asm( "movd %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r"(one) );
|
||||
: "=x" (a)
|
||||
: "r" (one) );
|
||||
return _mm_shuffle_epi32( a, 0x00 );
|
||||
}
|
||||
#define m128_one_16 m128_one_16_fn()
|
||||
@@ -87,8 +89,8 @@ static inline __m128i m128_one_8_fn()
|
||||
register uint32_t one = 0x01010101;
|
||||
register __m128i a;
|
||||
asm( "movd %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r"(one) );
|
||||
: "=x" (a)
|
||||
: "r" (one) );
|
||||
return _mm_shuffle_epi32( a, 0x00 );
|
||||
}
|
||||
#define m128_one_8 m128_one_8_fn()
|
||||
@@ -97,7 +99,7 @@ static inline __m128i m128_neg1_fn()
|
||||
{
|
||||
__m128i a;
|
||||
asm( "pcmpeqd %0, %0\n\t"
|
||||
: "=x"(a) );
|
||||
: "=x" (a) );
|
||||
return a;
|
||||
}
|
||||
#define m128_neg1 m128_neg1_fn()
|
||||
@@ -108,7 +110,7 @@ static inline __m128i mm128_mov64_128( uint64_t n )
|
||||
register __m128i a;
|
||||
asm( "movq %1, %0\n\t"
|
||||
: "=x" (a)
|
||||
: "r" (n) );
|
||||
: "r" (n) );
|
||||
return a;
|
||||
}
|
||||
|
||||
@@ -117,7 +119,7 @@ static inline __m128i mm128_mov32_128( uint32_t n )
|
||||
register __m128i a;
|
||||
asm( "movd %1, %0\n\t"
|
||||
: "=x" (a)
|
||||
: "r" (n) );
|
||||
: "r" (n) );
|
||||
return a;
|
||||
}
|
||||
|
||||
@@ -126,7 +128,7 @@ static inline uint64_t mm128_mov128_64( __m128i a )
|
||||
register uint64_t n;
|
||||
asm( "movq %1, %0\n\t"
|
||||
: "=x" (n)
|
||||
: "r" (a) );
|
||||
: "r" (a) );
|
||||
return n;
|
||||
}
|
||||
|
||||
@@ -135,10 +137,28 @@ static inline uint32_t mm128_mov128_32( __m128i a )
|
||||
register uint32_t n;
|
||||
asm( "movd %1, %0\n\t"
|
||||
: "=x" (n)
|
||||
: "r" (a) );
|
||||
: "r" (a) );
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline __m128i m128_const1_64( const uint64_t n )
|
||||
{
|
||||
register __m128i a;
|
||||
asm( "movq %1, %0\n\t"
|
||||
: "=x" (a)
|
||||
: "r" (n) );
|
||||
return _mm_shuffle_epi32( a, 0x44 );
|
||||
}
|
||||
|
||||
static inline __m128i m128_const1_32( const uint32_t n )
|
||||
{
|
||||
register __m128i a;
|
||||
asm( "movd %1, %0\n\t"
|
||||
: "=x" (a)
|
||||
: "r" (n) );
|
||||
return _mm_shuffle_epi32( a, 0x00 );
|
||||
}
|
||||
|
||||
#if defined(__SSE41__)
|
||||
|
||||
// alternative to _mm_set_epi64x, doesn't use mem,
|
||||
@@ -148,11 +168,12 @@ static inline __m128i m128_const_64( const uint64_t hi, const uint64_t lo )
|
||||
register __m128i a;
|
||||
asm( "movq %2, %0\n\t"
|
||||
"pinsrq $1, %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r"(hi), "r"(lo) );
|
||||
: "=x" (a)
|
||||
: "r" (hi), "r" (lo) );
|
||||
return a;
|
||||
}
|
||||
|
||||
/*
|
||||
static inline __m128i m128_const1_64( const uint64_t n )
|
||||
{
|
||||
register __m128i a;
|
||||
@@ -162,13 +183,13 @@ static inline __m128i m128_const1_64( const uint64_t n )
|
||||
: "r"(n) );
|
||||
return a;
|
||||
}
|
||||
|
||||
*/
|
||||
#else
|
||||
|
||||
// #define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
|
||||
|
||||
#define m128_const_64 _mm_set_epi64x
|
||||
#define m128_const1_64 _mm_set1_epi64x
|
||||
// #define m128_const1_64 _mm_set1_epi64x
|
||||
|
||||
#endif
|
||||
|
||||
@@ -263,46 +284,6 @@ do { \
|
||||
#endif
|
||||
|
||||
|
||||
// Gather and scatter data.
|
||||
// Surprise, they don't use vector instructions. Several reasons why.
|
||||
// Since scalar data elements are being manipulated scalar instructions
|
||||
// are most appropriate and can bypass vector registers. They are faster
|
||||
// and more efficient on a per instruction basis due to the higher clock
|
||||
// speed and greater avaiability of execution resources. It's good for
|
||||
// interleaving data buffers for parallel processing.
|
||||
// May suffer overhead if data is already in a vector register. This can
|
||||
// usually be easilly avoided by the coder. Sometimes _mm_set is simply better.
|
||||
// These macros are likely to be used when transposing matrices rather than
|
||||
// conversions of a single vector.
|
||||
|
||||
// Gather data elements into contiguous memory for vector use.
|
||||
// Source args are appropriately sized value integers, destination arg is a
|
||||
// type agnostic pointer.
|
||||
// Vector alignment is not required, though likely. Appropriate integer
|
||||
// alignment satisfies these macros.
|
||||
|
||||
// rewrite using insert
|
||||
#define mm128_gather_64( d, s0, s1 ) \
|
||||
((uint64_t*)d)[0] = (uint64_t)s0; \
|
||||
((uint64_t*)d)[1] = (uint64_t)s1;
|
||||
|
||||
#define mm128_gather_32( d, s0, s1, s2, s3 ) \
|
||||
((uint32_t*)d)[0] = (uint32_t)s0; \
|
||||
((uint32_t*)d)[1] = (uint32_t)s1; \
|
||||
((uint32_t*)d)[2] = (uint32_t)s2; \
|
||||
((uint32_t*)d)[3] = (uint32_t)s3;
|
||||
|
||||
// Scatter data from contiguous memory.
|
||||
#define mm128_scatter_64( d0, d1, s ) \
|
||||
*( (uint64_t*)d0) = ((uint64_t*)s)[0]; \
|
||||
*( (uint64_t*)d1) = ((uint64_t*)s)[1];
|
||||
|
||||
#define mm128_scatter_32( d0, d1, d2, d3, s ) \
|
||||
*( (uint32_t*)d0) = ((uint32_t*)s)[0]; \
|
||||
*( (uint32_t*)d1) = ((uint32_t*)s)[1]; \
|
||||
*( (uint32_t*)d2) = ((uint32_t*)s)[2]; \
|
||||
*( (uint32_t*)d3) = ((uint32_t*)s)[3];
|
||||
|
||||
// Memory functions
|
||||
// Mostly for convenience, avoids calculating bytes.
|
||||
// Assumes data is alinged and integral.
|
||||
|
||||
@@ -39,11 +39,20 @@ static inline __m256i m256_const1_64( uint64_t i )
|
||||
{
|
||||
register __m128i a;
|
||||
asm( "movq %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r"(i) );
|
||||
: "=x" (a)
|
||||
: "r" (i) );
|
||||
return _mm256_broadcastq_epi64( a );
|
||||
}
|
||||
|
||||
static inline __m256i m256_const1_32( uint32_t i )
|
||||
{
|
||||
register __m128i a;
|
||||
asm( "movd %1, %0\n\t"
|
||||
: "=x" (a)
|
||||
: "r" (i) );
|
||||
return _mm256_broadcastd_epi32( a );
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
// Don't call the frunction directly, use the macro to make appear like
|
||||
@@ -142,7 +151,7 @@ do { \
|
||||
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
|
||||
a0 = mm256_mov256_64( src ); \
|
||||
a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
|
||||
a2 = _mm_extract_epi64( hi, 0 ); \
|
||||
a2 = mm128_mov128_64( hi ); \
|
||||
a3 = _mm_extract_epi64( hi, 1 ); \
|
||||
} while(0)
|
||||
|
||||
@@ -246,44 +255,6 @@ static inline __m256i mm256_mov32_256( uint32_t n )
|
||||
#define casto_m256i(p,o) (((__m256i*)(p))+(o))
|
||||
|
||||
|
||||
// Gather scatter
|
||||
|
||||
#define mm256_gather_64( d, s0, s1, s2, s3 ) \
|
||||
((uint64_t*)(d))[0] = (uint64_t)(s0); \
|
||||
((uint64_t*)(d))[1] = (uint64_t)(s1); \
|
||||
((uint64_t*)(d))[2] = (uint64_t)(s2); \
|
||||
((uint64_t*)(d))[3] = (uint64_t)(s3);
|
||||
|
||||
#define mm256_gather_32( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
|
||||
((uint32_t*)(d))[0] = (uint32_t)(s0); \
|
||||
((uint32_t*)(d))[1] = (uint32_t)(s1); \
|
||||
((uint32_t*)(d))[2] = (uint32_t)(s2); \
|
||||
((uint32_t*)(d))[3] = (uint32_t)(s3); \
|
||||
((uint32_t*)(d))[4] = (uint32_t)(s4); \
|
||||
((uint32_t*)(d))[5] = (uint32_t)(s5); \
|
||||
((uint32_t*)(d))[6] = (uint32_t)(s6); \
|
||||
((uint32_t*)(d))[7] = (uint32_t)(s7);
|
||||
|
||||
|
||||
// Scatter data from contiguous memory.
|
||||
// All arguments are pointers
|
||||
#define mm256_scatter_64( d0, d1, d2, d3, s ) \
|
||||
*((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \
|
||||
*((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \
|
||||
*((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \
|
||||
*((uint64_t*)(d3)) = ((uint64_t*)(s))[3];
|
||||
|
||||
#define mm256_scatter_32( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
|
||||
*((uint32_t*)(d0)) = ((uint32_t*)(s))[0]; \
|
||||
*((uint32_t*)(d1)) = ((uint32_t*)(s))[1]; \
|
||||
*((uint32_t*)(d2)) = ((uint32_t*)(s))[2]; \
|
||||
*((uint32_t*)(d3)) = ((uint32_t*)(s))[3]; \
|
||||
*((uint32_t*)(d4)) = ((uint32_t*)(s))[4]; \
|
||||
*((uint32_t*)(d5)) = ((uint32_t*)(s))[5]; \
|
||||
*((uint32_t*)(d6)) = ((uint32_t*)(s))[6]; \
|
||||
*((uint32_t*)(d7)) = ((uint32_t*)(s))[7];
|
||||
|
||||
|
||||
//
|
||||
// Memory functions
|
||||
// n = number of 256 bit (32 byte) vectors
|
||||
|
||||
@@ -47,7 +47,6 @@
|
||||
// _mm512_setzero_si512 uses xor instruction. If needed frequently
|
||||
// in a function is it better to define a register variable (const?)
|
||||
// initialized to zero.
|
||||
// It isn't clear to me yet how set or set1 actually work.
|
||||
|
||||
#define m512_zero _mm512_setzero_si512()
|
||||
#define m512_one_512 _mm512_set_epi64( 0ULL, 0ULL, 0ULL, 0ULL, \
|
||||
@@ -60,10 +59,18 @@
|
||||
//#define m512_one_8 _mm512_set1_epi8( 1U )
|
||||
//#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
|
||||
|
||||
#define mi512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
|
||||
_mm512_inserti64x4( _mm512_castsi512_si256( m256_const_64( i3.i2,i1,i0 ) ), \
|
||||
#define m512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
|
||||
_mm512_inserti64x4( _mm512_castsi256_si512( m256_const_64( i3,i2,i1,i0 ) ), \
|
||||
m256_const_64( i7,i6,i5,i4 ), 1 )
|
||||
#define m512_const1_64( i ) m256_const_64( i, i, i, i, i, i, i, i )
|
||||
|
||||
static inline __m512i m512_const1_64( uint64_t i )
|
||||
{
|
||||
register __m128i a;
|
||||
asm( "movq %1, %0\n\t"
|
||||
: "=x"(a)
|
||||
: "r"(i) );
|
||||
return _mm512_broadcastq_epi64( a );
|
||||
}
|
||||
|
||||
static inline __m512i m512_one_64_fn()
|
||||
{
|
||||
@@ -135,15 +142,12 @@ static inline __m512i m512_neg1_fn()
|
||||
#define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x )
|
||||
#define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x )
|
||||
|
||||
|
||||
|
||||
#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a )
|
||||
#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 )
|
||||
|
||||
#define mm128_extr_lo128_512( a ) _mm512_castsi512_si256( a )
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Pointer casting
|
||||
|
||||
@@ -163,73 +167,9 @@ static inline __m512i m512_neg1_fn()
|
||||
// returns p+o as pointer to vector
|
||||
#define casto_m512i(p,o) (((__m512i*)(p))+(o))
|
||||
|
||||
// Gather scatter
|
||||
|
||||
#define mm512_gather_64( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
|
||||
((uint64_t*)(d))[0] = (uint64_t)(s0); \
|
||||
((uint64_t*)(d))[1] = (uint64_t)(s1); \
|
||||
((uint64_t*)(d))[2] = (uint64_t)(s2); \
|
||||
((uint64_t*)(d))[3] = (uint64_t)(s3); \
|
||||
((uint64_t*)(d))[4] = (uint64_t)(s4); \
|
||||
((uint64_t*)(d))[5] = (uint64_t)(s5); \
|
||||
((uint64_t*)(d))[6] = (uint64_t)(s6); \
|
||||
((uint64_t*)(d))[7] = (uint64_t)(s7);
|
||||
|
||||
|
||||
#define mm512_gather_32( d, s00, s01, s02, s03, s04, s05, s06, s07, \
|
||||
s08, s09, s10, s11, s12, s13, s14, s15 ) \
|
||||
((uint32_t*)(d))[ 0] = (uint32_t)(s00); \
|
||||
((uint32_t*)(d))[ 1] = (uint32_t)(s01); \
|
||||
((uint32_t*)(d))[ 2] = (uint32_t)(s02); \
|
||||
((uint32_t*)(d))[ 3] = (uint32_t)(s03); \
|
||||
((uint32_t*)(d))[ 4] = (uint32_t)(s04); \
|
||||
((uint32_t*)(d))[ 5] = (uint32_t)(s05); \
|
||||
((uint32_t*)(d))[ 6] = (uint32_t)(s06); \
|
||||
((uint32_t*)(d))[ 7] = (uint32_t)(s07); \
|
||||
((uint32_t*)(d))[ 8] = (uint32_t)(s08); \
|
||||
((uint32_t*)(d))[ 9] = (uint32_t)(s09); \
|
||||
((uint32_t*)(d))[10] = (uint32_t)(s10); \
|
||||
((uint32_t*)(d))[11] = (uint32_t)(s11); \
|
||||
((uint32_t*)(d))[12] = (uint32_t)(s12); \
|
||||
((uint32_t*)(d))[13] = (uint32_t)(s13); \
|
||||
((uint32_t*)(d))[13] = (uint32_t)(s14); \
|
||||
((uint32_t*)(d))[15] = (uint32_t)(s15);
|
||||
|
||||
// Scatter data from contiguous memory.
|
||||
// All arguments are pointers
|
||||
#define mm512_scatter_64( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
|
||||
*((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \
|
||||
*((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \
|
||||
*((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \
|
||||
*((uint64_t*)(d3)) = ((uint64_t*)(s))[3]; \
|
||||
*((uint64_t*)(d4)) = ((uint64_t*)(s))[4]; \
|
||||
*((uint64_t*)(d5)) = ((uint64_t*)(s))[5]; \
|
||||
*((uint64_t*)(d6)) = ((uint64_t*)(s))[6]; \
|
||||
*((uint64_t*)(d7)) = ((uint64_t*)(s))[7];
|
||||
|
||||
|
||||
#define mm512_scatter_32( d00, d01, d02, d03, d04, d05, d06, d07, \
|
||||
d08, d09, d10, d11, d12, d13, d14, d15, s ) \
|
||||
*((uint32_t*)(d00)) = ((uint32_t*)(s))[ 0]; \
|
||||
*((uint32_t*)(d01)) = ((uint32_t*)(s))[ 1]; \
|
||||
*((uint32_t*)(d02)) = ((uint32_t*)(s))[ 2]; \
|
||||
*((uint32_t*)(d03)) = ((uint32_t*)(s))[ 3]; \
|
||||
*((uint32_t*)(d04)) = ((uint32_t*)(s))[ 4]; \
|
||||
*((uint32_t*)(d05)) = ((uint32_t*)(s))[ 5]; \
|
||||
*((uint32_t*)(d06)) = ((uint32_t*)(s))[ 6]; \
|
||||
*((uint32_t*)(d07)) = ((uint32_t*)(s))[ 7]; \
|
||||
*((uint32_t*)(d00)) = ((uint32_t*)(s))[ 8]; \
|
||||
*((uint32_t*)(d01)) = ((uint32_t*)(s))[ 9]; \
|
||||
*((uint32_t*)(d02)) = ((uint32_t*)(s))[10]; \
|
||||
*((uint32_t*)(d03)) = ((uint32_t*)(s))[11]; \
|
||||
*((uint32_t*)(d04)) = ((uint32_t*)(s))[12]; \
|
||||
*((uint32_t*)(d05)) = ((uint32_t*)(s))[13]; \
|
||||
*((uint32_t*)(d06)) = ((uint32_t*)(s))[14]; \
|
||||
*((uint32_t*)(d07)) = ((uint32_t*)(s))[15];
|
||||
|
||||
// Add 4 values, fewer dependencies than sequential addition.
|
||||
|
||||
|
||||
#define mm512_add4_64( a, b, c, d ) \
|
||||
_mm512_add_epi64( _mm512_add_epi64( a, b ), _mm512_add_epi64( c, d ) )
|
||||
|
||||
@@ -246,7 +186,6 @@ static inline __m512i m512_neg1_fn()
|
||||
_mm512_xor_si512( _mm512_xor_si256( a, b ), _mm512_xor_si256( c, d ) )
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Bit rotations.
|
||||
|
||||
@@ -321,23 +260,26 @@ static inline __m512i m512_neg1_fn()
|
||||
_mm512_permutex_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
|
||||
|
||||
#define mm512_invert_32( v ) \
|
||||
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15 ) )
|
||||
_mm512_permutexvar_epi32( v, m512_const_64( \
|
||||
0x0000000000000001,0x0000000200000003, \
|
||||
0x0000000400000005,0x0000000600000007, \
|
||||
0x0000000800000009,0x0000000a0000000b, \
|
||||
0x0000000c0000000d,0x0000000e0000000f ) )
|
||||
|
||||
|
||||
#define mm512_invert_16( v ) \
|
||||
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||
0x00000001, 0x00020003, 0x00040005, 0x00060007, \
|
||||
0x00080009, 0x000A000B, 0x000C000D, 0x000E000F, \
|
||||
0x00100011, 0x00120013, 0x00140015, 0x00160017, \
|
||||
0x00180019, 0x001A001B, 0x001C001D, 0x001E001F ) )
|
||||
_mm512_permutexvar_epi16( v, m512_const_64( \
|
||||
0x0000000100020003, 0x0004000500060007, \
|
||||
0x00080009000A000B, 0x000C000D000E000F, \
|
||||
0x0010001100120013, 0x0014001500160017, \
|
||||
0x00180019001A001B, 0x001C001D001E001F ) )
|
||||
|
||||
#define mm512_invert_8( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F, \
|
||||
0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F, \
|
||||
0x20212223, 0x24252627, 0x28292A2B, 0x2C2D2E2F, \
|
||||
0x30313233, 0x34353637, 0x38393A3B, 0x3C3D3E3F ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x0001020304050607, 0x08090A0B0C0D0E0F, \
|
||||
0x1011121314151617, 0x18191A1B1C1D1E1F, \
|
||||
0x2021222324252627, 0x28292A2B2C2D2E2F, \
|
||||
0x3031323334353637, 0x38393A3B3C3D3E3F ) )
|
||||
|
||||
//
|
||||
// Rotate elements within 256 bit lanes of 512 bit vector.
|
||||
@@ -351,38 +293,46 @@ static inline __m512i m512_neg1_fn()
|
||||
|
||||
// Rotate 256 bit lanes by one 32 bit element
|
||||
#define mm512_ror1x32_256( v ) \
|
||||
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||
8,15,14,13,12,11,10, 9, 0, 7, 6, 5, 4, 3, 2, 1 ) )
|
||||
_mm512_permutexvar_epi32( v, m512_const_64( \
|
||||
0x000000080000000f, 0x0000000e0000000d, \
|
||||
0x0000000c0000000b, 0x0000000a00000009, \
|
||||
0x0000000000000007, 0x0000000600000005, \
|
||||
0x0000000400000003, 0x0000000200000001 ) )
|
||||
|
||||
#define mm512_rol1x32_256( v ) \
|
||||
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||
14,13,12,11,10, 9, 8,15, 6, 5, 4, 3, 2, 1, 0, 7 ) )
|
||||
_mm512_permutexvar_epi32( v, m512_const_64( \
|
||||
0x0000000e0000000d, 0x0000000c0000000b, \
|
||||
0x0000000a00000009, 0x000000080000000f, \
|
||||
0x0000000600000005, 0x0000000400000003, \
|
||||
0x0000000200000001, 0x0000000000000007 ) )
|
||||
|
||||
#define mm512_ror1x16_256( v ) \
|
||||
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||
0x0010001F, 0x001E001D, 0x001C001B, 0x001A0019, \
|
||||
0x00180017, 0x00160015, 0x00140013, 0x00120011, \
|
||||
0x0000000F, 0x000E000D, 0x000C000B, 0x000A0009, \
|
||||
0x00080007, 0x00060005, 0x00040003, 0x00020001 ) )
|
||||
_mm512_permutexvar_epi16( v, m512_const_64( \
|
||||
0x0010001F001E001D, 0x001C001B001A0019, \
|
||||
0x0018001700160015, 0x0014001300120011, \
|
||||
0x0000000F000E000D, 0x000C000B000A0009, \
|
||||
0x0008000700060005, 0x0004000300020001 ) )
|
||||
|
||||
#define mm512_rol1x16_256( v ) \
|
||||
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||
0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \
|
||||
0x00160015, 0x00140013, 0x00120011, 0x0000000F, \
|
||||
0x000E000D, 0x000C000B, 0x000A0009, 0x00080007, \
|
||||
0x00060005, 0x00040003, 0x00020001, 0x0000001F ) )
|
||||
_mm512_permutexvar_epi16( v, m512_const_64( \
|
||||
0x001E001D001C001B, 0x001A001900180017, \
|
||||
0x0016001500140013, 0x001200110000000F, \
|
||||
0x000E000D000C000B, 0x000A000900080007, \
|
||||
0x0006000500040003, 0x000200010000001F ) )
|
||||
|
||||
#define mm512_ror1x8_256( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x203F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \
|
||||
0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \
|
||||
0x001F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \
|
||||
0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x203F3E3D3C3B3A39, 0x3837363534333231, \
|
||||
0x302F2E2D2C2B2A29, 0x2827262524232221, \
|
||||
0x001F1E1D1C1B1A19, 0x1817161514131211, \
|
||||
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
|
||||
|
||||
#define mm512_rol1x8_256( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F, \
|
||||
0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221203F, \
|
||||
0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \
|
||||
0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201001F ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x3E3D3C3B3A393837, 0x363534333231302F, \
|
||||
0x2E2D2C2B2A292827, 0x262524232221203F, \
|
||||
0x1E1D1C1B1A191817, 0x161514131211100F, \
|
||||
0x0E0D0C0B0A090807, 0x060504030201001F ) )
|
||||
|
||||
//
|
||||
// Rotate elements within 128 bit lanes of 512 bit vector.
|
||||
@@ -441,80 +391,80 @@ static inline __m512i m512_neg1_fn()
|
||||
|
||||
// Rotate each 64 bit lane by one 16 bit element.
|
||||
#define mm512_ror1x16_64( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x39383F3E, 0x3D3C3B3A, 0x31303736, 0x35343332, \
|
||||
0x29282F2E, 0x2D2C2B2A, 0x21202726, 0x25242322, \
|
||||
0x19181F1E, 0x1D1C1B1A, 0x11101716, 0x15141312, \
|
||||
0x09080F0E, 0x0D0C0B0A, 0x01000706, 0x05040302 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x39383F3E3D3C3B3A, 0x3130373635343332, \
|
||||
0x29282F2E2D2C2B2A, 0x2120272625242322, \
|
||||
0x19181F1E1D1C1B1A, 0x1110171615141312, \
|
||||
0x09080F0E0D0C0B0A, 0x0100070605040302 ) )
|
||||
|
||||
#define mm512_rol1x16_64( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x3D3C3B3A, 0x39383F3E, 0x35343332, 0x31303736 \
|
||||
0x2D2C2B2A, 0x29282F2E, 0x25242322, 0x21202726 \
|
||||
0x1D1C1B1A, 0x19181F1E, 0x15141312, 0x11101716 \
|
||||
0x0D0C0B0A, 0x09080F0E, 0x05040302, 0x01000706 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x3D3C3B3A39383F3E, 0x3534333231303736 \
|
||||
0x2D2C2B2A29282F2E, 0x2524232221202726 \
|
||||
0x1D1C1B1A19181F1E, 0x1514131211101716 \
|
||||
0x0D0C0B0A09080F0E, 0x0504030201000706 ) )
|
||||
|
||||
// Rotate each 64 bit lane by one byte.
|
||||
#define mm512_ror1x8_64( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x383F3E3D, 0x3C3B3A39, 0x30373635, 0x34333231, \
|
||||
0x282F2E2D, 0x2C2B2A29, 0x20272625, 0x24232221, \
|
||||
0x181F1E1D, 0x1C1B1A19, 0x10171615, 0x14131211, \
|
||||
0x080F0E0D, 0x0C0B0A09, 0x00070605, 0x0403020 )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x383F3E3D3C3B3A39, 0x3037363534333231, \
|
||||
0x282F2E2D2C2B2A29, 0x2027262524232221, \
|
||||
0x181F1E1D1C1B1A19, 0x1017161514131211, \
|
||||
0x080F0E0D0C0B0A09, 0x0007060504030201 ) )
|
||||
#define mm512_rol1x8_64( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x3E3D3C3B, 0x3A39383F, 0x36353433, 0x32313037, \
|
||||
0x2E2D2C2B, 0x2A29282F, 0x26252423, 0x22212027, \
|
||||
0x1E1D1C1B, 0x1A19181F, 0x16151413, 0x12111017, \
|
||||
0x0E0D0C0B, 0x0A09080F, 0x06050403, 0x02010007 )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x3E3D3C3B3A39383F, 0x3635343332313037, \
|
||||
0x2E2D2C2B2A29282F, 0x2625242322212027, \
|
||||
0x1E1D1C1B1A19181F, 0x1615141312111017, \
|
||||
0x0E0D0C0B0A09080F, 0x0605040302010007 ) )
|
||||
|
||||
//
|
||||
// Rotate elements within 32 bit lanes.
|
||||
|
||||
#define mm512_swap16_32( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x001D001C, 0x001F001E, 0x00190018, 0x001B001A, \
|
||||
0x00150014, 0x00170016, 0x00110010, 0x00130012, \
|
||||
0x000D000C, 0x000F000E, 0x00190008, 0x000B000A, \
|
||||
0x00050004, 0x00070006, 0x00110000, 0x00030002 )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x001D001C001F001E, 0x00190018001B001A, \
|
||||
0x0015001400170016, 0x0011001000130012, \
|
||||
0x000D000C000F000E, 0x00190008000B000A, \
|
||||
0x0005000400070006, 0x0011000000030002 ) )
|
||||
|
||||
#define mm512_ror1x8_32( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x3C3F3E3D, 0x383B3A39, 0x34373635, 0x30333231, \
|
||||
0x2C2F2E2D, 0x282B2A29, 0x24272625, 0x20232221, \
|
||||
0x1C1F1E1D, 0x181B1A19, 0x14171615, 0x10131211, \
|
||||
0x0C0F0E0D, 0x080B0A09, 0x04070605, 0x00030201 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x3C3F3E3D383B3A39, 0x3437363530333231, \
|
||||
0x2C2F2E2D282B2A29, 0x2427262520232221, \
|
||||
0x1C1F1E1D181B1A19, 0x1417161510131211, \
|
||||
0x0C0F0E0D080B0A09, 0x0407060500030201 ) )
|
||||
|
||||
#define mm512_rol1x8_32( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x3E3D3C3F, 0x3A39383B, 0x36353437, 0x32313033, \
|
||||
0x2E2D2C2F, 0x2A29282B, 0x26252427, 0x22212023, \
|
||||
0x1E1D1C1F, 0x1A19181B, 0x16151417, 0x12111013, \
|
||||
0x0E0D0C0F, 0x0A09080B, 0x06050407, 0x02010003 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x3E3D3C3F3A39383B, 0x3635343732313033, \
|
||||
0x2E2D2C2F2A29282B, 0x2625242722212023, \
|
||||
0x1E1D1C1F1A19181B, 0x1615141712111013, \
|
||||
0x0E0D0C0F0A09080B, 0x0605040702010003 ) )
|
||||
|
||||
//
|
||||
// Swap bytes in vector elements, vectorized bswap.
|
||||
|
||||
#define mm512_bswap_64( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x38393A3B, 0x3C3D3E3F, 0x20313233, 0x34353637, \
|
||||
0x28292A2B, 0x2C2D2E2F, 0x20212223, 0x34353637, \
|
||||
0x18191A1B, 0x1C1D1E1F, 0x10111213, 0x14151617, \
|
||||
0x08090A0B, 0x0C0D0E0F, 0x00010203, 0x04050607 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x38393A3B3C3D3E3F, 0x2031323334353637, \
|
||||
0x28292A2B2C2D2E2F, 0x2021222334353637, \
|
||||
0x18191A1B1C1D1E1F, 0x1011121314151617, \
|
||||
0x08090A0B0C0D0E0F, 0x0001020304050607 ) )
|
||||
|
||||
#define mm512_bswap_32( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
|
||||
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
|
||||
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
|
||||
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x3C3D3E3F38393A3B, 0x3435363730313233, \
|
||||
0x3C3D3E3F38393A3B, 0x3435363730313233, \
|
||||
0x3C3D3E3F38393A3B, 0x3435363730313233, \
|
||||
0x3C3D3E3F38393A3B, 0x3435363730313233 ) )
|
||||
|
||||
#define mm512_bswap_16( v ) \
|
||||
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||
0x3E3F3C3D, 0x3A3B3839, 0x36373435, 0x32333031, \
|
||||
0x2E2F2C2D, 0x2A2B2829, 0x26272425, 0x22232021, \
|
||||
0x1E1F1C1D, 0x1A1B1819, 0x16171415, 0x12131011, \
|
||||
0x0E0F0C0D, 0x0A0B0809, 0x06070405, 0x02030001 ) )
|
||||
_mm512_permutexvar_epi8( v, m512_const_64( \
|
||||
0x3E3F3C3D3A3B3839, 0x3637343532333031, \
|
||||
0x2E2F2C2D2A2B2829, 0x2627242522232021, \
|
||||
0x1E1F1C1D1A1B1819, 0x1617141512131011, \
|
||||
0x0E0F0C0D0A0B0809, 0x0607040502030001 ) )
|
||||
|
||||
//
|
||||
// Rotate elements from 2 512 bit vectors in place, source arguments
|
||||
|
||||
Reference in New Issue
Block a user