This commit is contained in:
Jay D Dee
2019-10-10 19:58:34 -04:00
parent 789c8b70bc
commit 72330eb5a7
30 changed files with 4534 additions and 858 deletions

View File

@@ -1,45 +1,11 @@
#if !defined(INTERLEAVE_H__)
#define INTERLEAVE_H__ 1
// philosophical discussion
//
// transitions:
//
// int32 <-> int64
// uint64_t = (uint64_t)int32_lo | ( (uint64_t)int32_hi << 32 )
// Efficient transition and post processing, 32 bit granularity is lost.
// Not pratical.
//
// int32 <-> m64
// More complex, 32 bit granularity maintained, limited number of mmx regs.
// int32 <-> int64 <-> m64 might be more efficient.
//
// int32 <-> m128
// Expensive, current implementation.
//
// int32 <-> m256
// Very expensive multi stage, current implementation.
//
// int64/m64 <-> m128
// Efficient, agnostic to native element size. Common.
//
// m128 <-> m256
// Expensive for a single instruction, unavoidable. Common.
//
// Multi stage options
//
// int32 <-> int64 -> m128
// More efficient than insert32, granularity maintained. Common.
//
// int64 <-> m128 -> m256
// Unavoidable, reasonably efficient. Common
//
// int32 <-> int64 -> m128 -> m256
// Seems inevitable, most efficient despite number of stages. Common.
//
// It seems the best approach is to avoid transitions and use the native type
// of the data: 64 & 32 bit use integer, 128 bit use m128i.
//////////////////////////////////////////////////////////////////////////
//
// Utilities to interleave and deinterleave multiple data for parallel
// processing using SIMD. Utilities are grouped by data size.
//
////////////////////////////////
//
@@ -262,8 +228,6 @@ static inline void dintrlv_4x32_512( void *dst0, void *dst1, void *dst2,
d0[15] = s[ 60]; d1[15] = s[ 61]; d2[15] = s[ 62]; d3[15] = s[ 63];
}
#undef DLEAVE_4x32
static inline void extr_lane_4x32( void *d, const void *s,
const int lane, const int bit_len )
{
@@ -308,6 +272,7 @@ static inline void mm128_intrlv_4x32x( void *dst, void *src0, void *src1,
}
}
// Double buffered source to reduce latency
static inline void mm128_bswap32_intrlv80_4x32( void *d, void *src )
{
__m128i sx = mm128_bswap_32( casti_m128i( src,0 ) );
@@ -469,15 +434,11 @@ static inline void extr_lane_8x32( void *d, const void *s,
#if defined(__AVX2__)
// There a alignment problems with the source buffer on Wwindows,
// can't use 256 bit bswap.
static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
{
__m256i s0 = mm256_bswap_32( casti_m256i( src,0 ) );
__m256i s1 = mm256_bswap_32( casti_m256i( src,1 ) );
__m128i s2 = mm128_bswap_32( casti_m128i( src,4 ) );
// const __m256i zero = m256_zero;
const __m256i one = m256_one_32;
const __m256i two = _mm256_add_epi32( one, one );
const __m256i three = _mm256_add_epi32( two, one );
@@ -485,7 +446,6 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
casti_m256i( d, 0 ) = _mm256_broadcastd_epi32(
_mm256_castsi256_si128( s0 ) );
// casti_m256i( d, 0 ) = _mm256_permutevar8x32_epi32( s0, m256_zero );
casti_m256i( d, 1 ) = _mm256_permutevar8x32_epi32( s0, one );
casti_m256i( d, 2 ) = _mm256_permutevar8x32_epi32( s0, two );
casti_m256i( d, 3 ) = _mm256_permutevar8x32_epi32( s0, three );
@@ -498,7 +458,6 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
_mm256_add_epi32( four, three ) );
casti_m256i( d, 8 ) = _mm256_broadcastd_epi32(
_mm256_castsi256_si128( s1 ) );
// casti_m256i( d, 8 ) = _mm256_permutevar8x32_epi32( s1, m256_zero );
casti_m256i( d, 9 ) = _mm256_permutevar8x32_epi32( s1, one );
casti_m256i( d,10 ) = _mm256_permutevar8x32_epi32( s1, two );
casti_m256i( d,11 ) = _mm256_permutevar8x32_epi32( s1, three );
@@ -510,8 +469,6 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
casti_m256i( d,15 ) = _mm256_permutevar8x32_epi32( s1,
_mm256_add_epi32( four, three ) );
casti_m256i( d,16 ) = _mm256_broadcastd_epi32( s2 );
// casti_m256i( d,16 ) = _mm256_permutevar8x32_epi32(
// _mm256_castsi128_si256( s2 ), m256_zero );
casti_m256i( d,17 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s2 ), one );
casti_m256i( d,18 ) = _mm256_permutevar8x32_epi32(
@@ -655,7 +612,7 @@ static inline void dintrlv_16x32_512( void *d00, void *d01, void *d02,
#undef DLEAVE_16x32
static inline void extr_lane_16x32( void *d, const void *s,
const int lane, const int bit_len )
const int lane, const int bit_len )
{
((uint32_t*)d)[ 0] = ((uint32_t*)s)[ lane ];
((uint32_t*)d)[ 1] = ((uint32_t*)s)[ lane+16 ];
@@ -689,42 +646,39 @@ static inline void mm512_bswap32_intrlv80_16x32( void *d, void *src )
casti_m512i( d, 0 ) = _mm512_broadcastd_epi32(
_mm512_castsi512_si128( s0 ) );
// casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, m512_zero );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( s0, one );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( s0, two );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( s0, three );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( two, two ) );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( three, two ) );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( one, s0 );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( two, s0 );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( three, s0 );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( two, two ), s0 );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( three, two ), s0 );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
x = _mm512_add_epi32( x, three );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
x = _mm512_add_epi32( x, three );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, three ) );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, three ), s0 );
casti_m512i( d,16 ) = _mm512_broadcastd_epi32( s1 );
// casti_m512i( d,16 ) = _mm512_permutexvar_epi32(
// _mm512_castsi128_si512( s1 ), m512_zero );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), one );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), two );
casti_m512i( d,19 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), three );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32( one,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32( two,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,19 ) = _mm512_permutexvar_epi32( three,
_mm512_castsi128_si512( s1 ) );
}
#endif // AVX512
@@ -997,27 +951,21 @@ static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src )
__m512i *d = (__m512i*)dst;
__m512i s0 = mm512_bswap_32( casti_m512i( src, 0 ) );
__m128i s1 = mm128_bswap_32( casti_m128i( src, 4 ) );
// const __m512i zero = m512_zero;
const __m512i one = m512_one_64;
const __m512i two = _mm512_add_epi64( one, one );
const __m512i three = _mm512_add_epi64( two, one );
const __m512i four = _mm512_add_epi64( two, two );
d[0] = _mm512_broadcastq_epi64(
_mm512_castsi512_si128( s0 ) );
// d[0] = _mm512_permutexvar_epi64( s0, m512_zero );
d[1] = _mm512_permutexvar_epi64( s0, one );
d[2] = _mm512_permutexvar_epi64( s0, two );
d[3] = _mm512_permutexvar_epi64( s0, three );
d[4] = _mm512_permutexvar_epi64( s0, four );
d[5] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, one ) );
d[6] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, two ) );
d[7] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, three ) );
d[8] = _mm512_broadcastq_epi64( s1 );
// d[8] = _mm512_permutexvar_epi64(
// _mm512_castsi128_si512( s1 ), m512_zero );
d[9] = _mm512_permutexvar_epi64(
_mm512_castsi128_si512( s1 ), one );
d[0] = _mm512_broadcastq_epi64( _mm512_castsi512_si128( s0 ) );
d[1] = _mm512_permutexvar_epi64( one, s0 );
d[2] = _mm512_permutexvar_epi64( two, s0 );
d[3] = _mm512_permutexvar_epi64( three, s0 );
d[4] = _mm512_permutexvar_epi64( four, s0 );
d[5] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, one ), s0 );
d[6] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, two ), s0 );
d[7] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, three ), s0 );
d[8] = _mm512_broadcastq_epi64( s1 );
d[9] = _mm512_permutexvar_epi64( one, _mm512_castsi128_si512( s1 ) );
}
#endif // AVX512
@@ -1164,6 +1112,44 @@ static inline void dintrlv_4x128_512( void *dst0, void *dst1, void *dst2,
}
// 2x256 (AVX512)
#if defined (__AVX__)
static inline void intrlv_2x256( void *dst, const void *src0,
const void *src1, int bit_len )
{
__m256i *d = (__m256i*)dst;
const __m256i *s0 = (const __m256i*)src0;
const __m256i *s1 = (const __m256i*)src1;
d[ 0] = s0[0]; d[ 1] = s1[0];
if ( bit_len <= 256 ) return;
d[ 2] = s0[1]; d[ 3] = s1[1];
if ( bit_len <= 512 ) return;
d[ 4] = s0[2];
if ( bit_len <= 640 ) return;
d[ 5] = s1[2];
d[ 6] = s0[3]; d[ 7] = s1[3];
}
// No 80 byte dintrlv
static inline void dintrlv_2x256( void *dst0, void *dst1,
const void *src, int bit_len )
{
__m256i *d0 = (__m256i*)dst0;
__m256i *d1 = (__m256i*)dst1;
const __m256i *s = (const __m256i*)src;
d0[0] = s[ 0]; d1[0] = s[ 1];
if ( bit_len <= 256 ) return;
d0[1] = s[ 2]; d1[1] = s[ 3];
if ( bit_len <= 512 ) return;
d0[2] = s[ 4]; d1[2] = s[ 5];
d0[3] = s[ 6]; d1[3] = s[ 7];
}
#endif // AVX
///////////////////////////
//
// Re-intereleaving

View File

@@ -19,18 +19,19 @@
//
// Constants are an issue with simd. Simply put, immediate constants don't
// exist. All simd constants either reside in memory or a register and
// must be loaded or generated at run time.
// must be loaded from memory or generated using instructions at run time.
//
// Due to the cost of generating constants it is often more efficient to
// define a local const for repeated references to the same constant.
//
// Some constant values can be generated using shortcuts. Zero for example
// is as simple as XORing any register with itself, and is implemented
// in the setzero instrinsic. These shortcuts must be implemented using ASM
// iby the setzero instrinsic. These shortcuts must be implemented using ASM
// due to doing things the compiler would complain about. Another single
// instruction constant is -1, defined below. Others may be added as the need
// arises. Even single instruction constants are less efficient than local
// register variables so the advice above stands.
// register variables so the advice above stands. These pseudo-constants
// do not perform any memory accesses
//
// One common use for simd constants is as a control index for some simd
// instructions like blend and shuffle. The utilities below do not take this
@@ -40,74 +41,74 @@
#define m128_zero _mm_setzero_si128()
static inline __m128i m128_one_128_fn()
static inline __m128i mm128_one_128_fn()
{
register uint64_t one = 1;
register __m128i a;
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return a;
}
#define m128_one_128 m128_one_128_fn()
#define m128_one_128 mm128_one_128_fn()
static inline __m128i m128_one_64_fn()
static inline __m128i mm128_one_64_fn()
{
register uint64_t one = 1;
register __m128i a;
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x44 );
}
#define m128_one_64 m128_one_64_fn()
#define m128_one_64 mm128_one_64_fn()
static inline __m128i m128_one_32_fn()
static inline __m128i mm128_one_32_fn()
{
register uint32_t one = 1;
register __m128i a;
__m128i a;
const uint32_t one = 1;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_32 m128_one_32_fn()
#define m128_one_32 mm128_one_32_fn()
static inline __m128i m128_one_16_fn()
static inline __m128i mm128_one_16_fn()
{
register uint32_t one = 0x00010001;
register __m128i a;
__m128i a;
const uint32_t one = 0x00010001;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_16 m128_one_16_fn()
#define m128_one_16 mm128_one_16_fn()
static inline __m128i m128_one_8_fn()
static inline __m128i mm128_one_8_fn()
{
register uint32_t one = 0x01010101;
register __m128i a;
__m128i a;
const uint32_t one = 0x01010101;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_8 m128_one_8_fn()
#define m128_one_8 mm128_one_8_fn()
static inline __m128i m128_neg1_fn()
static inline __m128i mm128_neg1_fn()
{
__m128i a;
asm( "pcmpeqd %0, %0\n\t"
: "=x" (a) );
return a;
}
#define m128_neg1 m128_neg1_fn()
#define m128_neg1 mm128_neg1_fn()
// move uint64_t to low bits of __m128i, zeros the rest
static inline __m128i mm128_mov64_128( uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -116,7 +117,7 @@ static inline __m128i mm128_mov64_128( uint64_t n )
static inline __m128i mm128_mov32_128( uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -125,7 +126,7 @@ static inline __m128i mm128_mov32_128( uint32_t n )
static inline uint64_t mm128_mov128_64( __m128i a )
{
register uint64_t n;
uint64_t n;
asm( "movq %1, %0\n\t"
: "=x" (n)
: "r" (a) );
@@ -134,7 +135,7 @@ static inline uint64_t mm128_mov128_64( __m128i a )
static inline uint32_t mm128_mov128_32( __m128i a )
{
register uint32_t n;
uint32_t n;
asm( "movd %1, %0\n\t"
: "=x" (n)
: "r" (a) );
@@ -143,7 +144,7 @@ static inline uint32_t mm128_mov128_32( __m128i a )
static inline __m128i m128_const1_64( const uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -152,7 +153,7 @@ static inline __m128i m128_const1_64( const uint64_t n )
static inline __m128i m128_const1_32( const uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -165,7 +166,7 @@ static inline __m128i m128_const1_32( const uint32_t n )
static inline __m128i m128_const_64( const uint64_t hi, const uint64_t lo )
{
register __m128i a;
__m128i a;
asm( "movq %2, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x" (a)
@@ -173,23 +174,9 @@ static inline __m128i m128_const_64( const uint64_t hi, const uint64_t lo )
return a;
}
/*
static inline __m128i m128_const1_64( const uint64_t n )
{
register __m128i a;
asm( "movq %1, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x"(a)
: "r"(n) );
return a;
}
*/
#else
// #define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_const_64 _mm_set_epi64x
// #define m128_const1_64 _mm_set1_epi64x
#endif
@@ -310,8 +297,19 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
// AVX512 has implemented bit rotation for 128 bit vectors with
// 64 and 32 bit elements.
//
// Rotate each element of v by c bits
// compiler doesn't like when a variable is used for the last arg of
// _mm_rol_epi32, must be "8 bit immediate".
// sm3-hash-4way.c fails to compile.
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm128_ror_64( v, c ) _mm_ror_epi64( v, c )
#define mm128_rol_64( v, c ) _mm_rol_epi64( v, c )
#define mm128_ror_32( v, c ) _mm_ror_epi32( v, c )
#define mm128_rol_32( v, c ) _mm_rol_epi32( v, c )
#else
*/
#define mm128_ror_64( v, c ) \
_mm_or_si128( _mm_srli_epi64( v, c ), _mm_slli_epi64( v, 64-(c) ) )
@@ -325,6 +323,8 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_rol_32( v, c ) \
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
//#endif // AVX512 else
#define mm128_ror_16( v, c ) \
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )
@@ -365,6 +365,22 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_brol( v, c ) \
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm128_invert_32( v ) _mm_shuffle_epi32( v, 0x1b )
#if defined(__SSSE3__)
#define mm128_invert_16( v ) \
_mm_shuffle_epi8( v, mm128_const_64( 0x0100030205040706, \
0x09080b0a0d0c0f0e )
#define mm128_invert_8( v ) \
_mm_shuffle_epi8( v, mm128_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f )
#endif // SSSE3
//
// Rotate elements within lanes.

View File

@@ -14,30 +14,32 @@
// is limited because 256 bit vectors are less likely to be used when 512
// is available.
//
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#define m256_one_256 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
#define m256_one_128 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
// set instructions load memory resident constants, this avoids mem.
// cost 4 pinsert + 1 vinsert, estimate 8 clocks latency.
#if defined(__AVX2__)
#define m256_const_128( hi, lo ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 )
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
m256_const_128( m128_const_64( i3, i2 ), m128_const_64( i1, i0 ) )
/*
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
*/
#else // AVX
#define m256_const_64( i3, i2, i1, i0 ) _mm256_set_epi64x( i3, i2, i1, i0 )
#endif
static inline __m256i m256_const1_64( uint64_t i )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (i) );
@@ -46,86 +48,140 @@ static inline __m256i m256_const1_64( uint64_t i )
static inline __m256i m256_const1_32( uint32_t i )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastd_epi32( a );
}
static inline __m256i m256_const1_16( uint16_t i )
{
__m128i a;
asm( "movw %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastw_epi16( a );
}
static inline __m256i m256_const1_8( uint8_t i )
{
__m128i a;
asm( "movb %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastb_epi8( a );
}
//
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#if defined(__AVX2__)
// Don't call the frunction directly, use the macro to make appear like
// a constant identifier instead of a function.
// __m256i foo = m256_one_64;
static inline __m256i m256_one_64_fn()
static inline __m256i mm256_one_256_fn()
{
register uint64_t one = 1;
register __m128i a;
__m256i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return a;
}
#define m256_one_256 mm256_one_256_fn()
static inline __m256i mm256_one_128_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastsi128_si256( a );
}
#define m256_one_128 mm256_one_128_fn()
static inline __m256i mm256_one_64_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_64 m256_one_64_fn()
#define m256_one_64 mm256_one_64_fn()
static inline __m256i m256_one_32_fn()
static inline __m256i mm256_one_32_fn()
{
register uint64_t one = 0x0000000100000001;
register __m128i a;
__m128i a;
const uint64_t one = 0x0000000100000001;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_32 m256_one_32_fn()
#define m256_one_32 mm256_one_32_fn()
static inline __m256i m256_one_16_fn()
static inline __m256i mm256_one_16_fn()
{
register uint64_t one = 0x0001000100010001;
register __m128i a;
__m128i a;
const uint64_t one = 0x0001000100010001;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_16 m256_one_16_fn()
#define m256_one_16 mm256_one_16_fn()
static inline __m256i m256_one_8_fn()
static inline __m256i mm256_one_8_fn()
{
register uint64_t one = 0x0101010101010101;
register __m128i a;
__m128i a;
const uint64_t one = 0x0101010101010101;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_8 m256_one_8_fn()
#define m256_one_8 mm256_one_8_fn()
static inline __m256i m256_neg1_fn()
static inline __m256i mm256_neg1_fn()
{
register __m256i a;
__m256i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
: "=x"(a) );
return a;
}
#define m256_neg1 m256_neg1_fn()
#define m256_neg1 mm256_neg1_fn()
#else // AVX
#define m256_one_256 m256_const_64( m128_zero, m128_one ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
#define m256_one_128 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
#define m256_one_64 _mm256_set1_epi64x( 1ULL )
#define m256_one_32 _mm256_set1_epi64x( 0x0000000100000001ULL )
#define m256_one_16 _mm256_set1_epi64x( 0x0001000100010001ULL )
#define m256_one_8 _mm256_set1_epi64x( 0x0101010101010101ULL )
// AVX doesn't have inserti128 but insertf128 will do.
static inline __m256i m256_neg1_fn()
static inline __m256i mm256_neg1_fn()
{
__m128i a = m128_neg1;
return _mm256_insertf128_si256( _mm256_castsi128_si256( a ), a, 1 );
}
#define m256_neg1 m256_neg1_fn()
#define m256_neg1 mm256_neg1_fn()
#endif // AVX2 else AVX
@@ -175,7 +231,7 @@ do { \
// Move integer to lower bits of vector, upper bits set to zero.
static inline __m256i mm256_mov64_256( uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -184,14 +240,14 @@ static inline __m256i mm256_mov64_256( uint64_t n )
static inline __m256i mm256_mov32_256( uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return _mm256_castsi128_si256( a );
}
// Move lo bits of vector to integer, hi bits are truncated.
// Return lo bits of vector as integer.
#define mm256_mov256_64( a ) mm128_mov128_64( _mm256_castsi256_si128( a ) )
#define mm256_mov256_32( a ) mm128_mov128_32( _mm256_castsi256_si128( a ) )
@@ -310,10 +366,20 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// The only bit shift for more than 64 bits is with __int128.
//
// AVX512 has bit rotate for 256 bit vectors with 64 or 32 bit elements
// but is of little value
//
// Rotate each element of v by c bits
// compiler doesn't like when a variable is used for the last arg of
// _mm_rol_epi32, must be "8 bit immediate".
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_ror_64( v, c ) _mm256_ror_epi64( v, c )
#define mm256_rol_64( v, c ) _mm256_rol_epi64( v, c )
#define mm256_ror_32( v, c ) _mm256_ror_epi32( v, c )
#define mm256_rol_32( v, c ) _mm256_rol_epi32( v, c )
#else
*/
#define mm256_ror_64( v, c ) \
_mm256_or_si256( _mm256_srli_epi64( v, c ), \
_mm256_slli_epi64( v, 64-(c) ) )
@@ -330,6 +396,9 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_or_si256( _mm256_slli_epi32( v, c ), \
_mm256_srli_epi32( v, 32-(c) ) )
// #endif // AVX512 else
#define mm256_ror_16( v, c ) \
_mm256_or_si256( _mm256_srli_epi16( v, c ), \
_mm256_slli_epi16( v, 16-(c) ) )
@@ -365,6 +434,19 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_set1_epi32( 32 ), c ) ) )
// AVX512 can do 16 bit elements.
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_rorv_16( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#define mm256_rolv_16( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#endif // AVX512
//
// Rotate elements accross all lanes.
@@ -403,7 +485,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
0x0000000000000007, 0x0000000600000005 )
// AVX512 can do 16 & 8 bit elements.
#if defined(__AVX512VL__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Rotate 256 bit vector by one 16 bit element.
#define mm256_ror_1x16( v ) \
@@ -416,17 +498,50 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
0x000e000d000c000b, 0x000a000900080007, \
0x0006000500040003, 0x000200010000000f ), v )
// Rotate 256 bit vector by one byte.
#define mm256_ror_1x8( v ) m256_const_64( \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 )
#if defined (__AVX512VBMI__)
#define mm256_rol_1x8( v ) m256_const_64( \
// Rotate 256 bit vector by one byte.
#define mm256_ror_1x8( v ) _mm256_permutexvar_epi8( m256_const_64( \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
#define mm256_rol_1x8( v ) _mm256_permutexvar_epi16( m256_const_64( \
0x1e1d1c1b1a191817, 0x161514131211100f, \
0x0e0d0c0b0a090807, 0x060504030201001f )
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
#endif // VBMI
#endif // AVX512
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm256_invert_64 ( v ) _mm256_permute4x64_epi64( v, 0x1b )
#define mm256_invert_32 ( v ) _mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000000000001, 0x0000000200000003 \
0x0000000400000005, 0x0000000600000007 )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
#define mm256_invert_16 ( v ) \
_mm256_permutexvar_epi16( m256_const_64( 0x0000000100020003, \
0x0004000500060007, \
0x00080009000a000b, \
0x000c000d000e000f ), v )
#if defined(__AVX512VBMI__)
#define mm256_invert_8( v ) \
_mm256_permutexvar_epi8( m256_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f, \
0x1011121314151617, \
0x18191a1b1c1d1e1f ), v )
#endif // VBMI
#endif // AVX512
//
// Rotate elements within lanes of 256 bit vector.

View File

@@ -1,35 +1,32 @@
#if !defined(SIMD_512_H__)
#define SIMD_512_H__ 1
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
//
// Some extentsions in AVX512 supporting operations on
// smaller elements in 256 bit vectors.
// AVX-512
//
// The baseline for these utilities is AVX512F, AVX512DQ, AVX512BW
// and AVX512VL, first available in quantity in Skylake-X.
// Some utilities may require additional features available in subsequent
// architectures and are noted.
// Variable rotate, each element rotates by corresponding index.
#define mm256_rorv_16( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#define mm256_rolv_16( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
#define mm256_invert_16 ( v ) \
_mm256_permutex_epi16( v, _mm256_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15 ) )
#define mm256_invert_8( v ) \
_mm256_permutex_epi8( v, _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15, \
16,17,18,19,20,21,22,23, \
24,25,26,27,28,29,30,31 ) )
// AVX512 intrinsics have a few peculiarities with permutes and shuffles
// that are inconsistent with previous AVX2 implementations.
//
// _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute
// usually shuffles accross all lanes.
//
// permutexvar has args reversed, index is first arg. Previously all
// permutes and shuffles have the source vector first.
//
// _mm512_permutexvar_epi8 requires AVX512-VBMI, larger elements don't.
// It also performs the same op as _mm512_shuffle_epi8.
//
// _mm512_shuffle_epi8 shuffles accross entire 512 bits. Shuffle usually
// doesn't cross 128 bit lane boundaries.
//////////////////////////////////////////////////////////////
//
@@ -40,6 +37,74 @@
//
// Experimental, not fully tested.
//
// Pseudo constants.
//
// Vector constants are not really constants and can't be used as compile time
// initializers. They contain executable instructions to generate values at
// run time. They are very slow. If the same constant will be used repeatedly
// in a function it's better to define it once in a local register variable
// and use the variable for references.
// Tthe simpler the constant, the more efficienct it's generation. Zero is
// the fastest, then all elements set the same, different 64 bit elements,
// and different smaller elements is the slowest. Caching multiple uses us
// always faster.
#define m512_const_256( hi, lo ) \
_mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 )
#define m512_const_128( i3, i2, i1, i0 ) \
_mm512_inserti64x4( _mm512_castsi256_si512( m256_const_128( i1, i0 ) ), \
m256_const_128( i3,i2 ), 1 )
#define m512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
m512_const_256( m256_const_64( i7,i6,i5,i4 ), \
m256_const_64( i3,i2,i1,i0 ) )
static inline __m512i m512_const1_256( __m256i v )
{
return _mm512_broadcast_i64x4( v );
}
static inline __m512i m512_const1_128( __m128i v )
{
return _mm512_broadcast_i64x2( v );
}
static inline __m512i m512_const1_64( uint64_t i )
{
__m128i a;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastq_epi64( a );
}
static inline __m512i m512_const1_32( uint32_t i )
{
__m128i a;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastd_epi32( a );
}
static inline __m512i m512_const1_16( uint16_t i )
{
__m128i a;
asm( "movw %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastw_epi16( a );
}
static inline __m512i m512_const1_8( uint8_t i )
{
__m128i a;
asm( "movb %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastb_epi8( a );
}
//
// Pseudo constants.
@@ -49,89 +114,104 @@
// initialized to zero.
#define m512_zero _mm512_setzero_si512()
/*
#define m512_one_512 _mm512_set_epi64( 0ULL, 0ULL, 0ULL, 0ULL, \
0ULL, 0ULL, 0ULL, 1ULL )
#define m512_one_256 _mm512_set4_epi64( 0ULL, 0ULL, 0ULL, 1ULL )
#define m512_one_128 _mm512_set4_epi64( 0ULL, 1ULL, 0ULL, 1ULL )
//#define m512_one_64 _mm512_set1_epi64( 1ULL )
//#define m512_one_32 _mm512_set1_epi32( 1UL )
//#define m512_one_16 _mm512_set1_epi16( 1U )
//#define m512_one_8 _mm512_set1_epi8( 1U )
//#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
#define m512_one_64 _mm512_set1_epi64( 1ULL )
#define m512_one_32 _mm512_set1_epi32( 1UL )
#define m512_one_16 _mm512_set1_epi16( 1U )
#define m512_one_8 _mm512_set1_epi8( 1U )
#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
*/
#define m512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
_mm512_inserti64x4( _mm512_castsi256_si512( m256_const_64( i3,i2,i1,i0 ) ), \
m256_const_64( i7,i6,i5,i4 ), 1 )
static inline __m512i m512_const1_64( uint64_t i )
static inline __m512i mm512_one_512_fn()
{
register __m128i a;
__m512i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return a;
}
#define m512_one_512 mm512_one_512_fn()
static inline __m512i mm512_one_256_fn()
{
__m256i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(i) );
: "r" (one) );
return _mm512_broadcast_i64x4( a );
}
#define m512_one_256 mm512_one_256_fn()
static inline __m512i mm512_one_128_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcast_i64x2( a );
}
#define m512_one_128 mm512_one_128_fn()
static inline __m512i mm512_one_64_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_64 mm512_one_64_fn()
static inline __m512i m512_one_64_fn()
static inline __m512i mm512_one_32_fn()
{
__m512i a;
asm( "vpxorq %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubq %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 0x0000000100000001;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_64 m512_one_64_fn()
#define m512_one_32 mm512_one_32_fn()
static inline __m512i m512_one_32_fn()
static inline __m512i mm512_one_16_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubd %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 0x0001000100010001;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_32 m512_one_32_fn()
#define m512_one_16 mm512_one_16_fn()
static inline __m512i m512_one_16_fn()
static inline __m512i mm512_one_8_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubw %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 0x0101010101010101;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_16 m512_one_16_fn()
#define m512_one_8 mm512_one_8_fn()
static inline __m512i m512_one_8_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubb %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
}
#define m512_one_8 m512_one_8_fn()
static inline __m512i m512_neg1_fn()
static inline __m512i mm512_neg1_fn()
{
__m512i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
:"=x"(a) );
return a;
}
#define m512_neg1 m512_neg1_fn()
#define m512_neg1 mm512_neg1_fn()
//
@@ -142,6 +222,7 @@ static inline __m512i m512_neg1_fn()
#define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x )
#define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x )
// More efficient to use cast to extract low lanes, it's free.
#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a )
#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 )
@@ -168,7 +249,7 @@ static inline __m512i m512_neg1_fn()
#define casto_m512i(p,o) (((__m512i*)(p))+(o))
// Add 4 values, fewer dependencies than sequential addition.
// Sum 4 values, fewer dependencies than sequential addition.
#define mm512_add4_64( a, b, c, d ) \
_mm512_add_epi64( _mm512_add_epi64( a, b ), _mm512_add_epi64( c, d ) )
@@ -186,16 +267,32 @@ static inline __m512i m512_neg1_fn()
_mm512_xor_si512( _mm512_xor_si256( a, b ), _mm512_xor_si256( c, d ) )
// Vector size conversion
#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a )
#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 )
#define mm512_concat_256( hi, lo ) \
_mm512_inserti164x4( _mm512_castsi256_si512( lo ), hi, 1 )
// Horizontal vector testing
#define mm512_allbits0( a ) _mm512_cmpeq_epi64_mask( a, m512_zero )
#define mm256_allbits1( a ) _mm512_cmpeq_epi64_mask( a, m512_neg1 )
#define mm512_anybits0( a ) _mm512_cmpneq_epi64_mask( a, m512_neg1 )
#define mm512_anybits1( a ) _mm512_cmpneq_epi64_mask( a, m512_zero )
//
// Bit rotations.
// AVX512F has built-in bit fixed and variable rotation for 64 & 32 bit
// elements. There is no bit rotation or shift for larger elements.
// AVX512F has built-in fixed and variable bit rotation for 64 & 32 bit
// elements and can be called directly.
//
// _mm512_rol_epi64, _mm512_ror_epi64, _mm512_rol_epi32, _mm512_ror_epi32
// _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32
//
// Here is a bit rotate for 16 bit elements:
// Here is a fixed bit rotate for 16 bit elements:
#define mm512_ror_16( v, c ) \
_mm512_or_si512( _mm512_srli_epi16( v, c ), \
_mm512_slli_epi16( v, 16-(c) )
@@ -203,6 +300,36 @@ static inline __m512i m512_neg1_fn()
_mm512_or_si512( _mm512_slli_epi16( v, c ), \
_mm512_srli_epi16( v, 16-(c) )
// Rotations using a vector control index are very slow due to overhead
// to generate the index vector. Repeated rotations using the same index
// are better handled by the calling function where the index only needs
// to be generated once then reused very efficiently.
// Permutes and shuffles using an immediate index are significantly faster.
//
// Swap bytes in vector elements, vectorized endian conversion.
#define mm512_bswap_64( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x38393A3B3C3D3E3F, 0x3031323334353637, \
0x28292A2B2C2D2E2F, 0x2021222324252627, \
0x18191A1B1C1D1E1F, 0x1011121314151617, \
0x08090A0B0C0D0E0F, 0x0001020304050607 ) )
#define mm512_bswap_32( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233 ) )
#define mm512_bswap_16( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3F3C3D3A3B3839, 0x3637343532333031, \
0x2E2F2C2D2A2B2829, 0x2627242522232021, \
0x1E1F1C1D1A1B1819, 0x1617141512131011, \
0x0E0F0C0D0A0B0809, 0x0607040502030001 ) )
//
// Rotate elements in 512 bit vector.
@@ -222,60 +349,57 @@ static inline __m512i m512_neg1_fn()
#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n )
#define mm512_ror_1x16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0000001F001E001D, 0x001C001B001A0019, \
0X0018001700160015, 0X0014001300120011, \
0X0010000F000E000D, 0X000C000B000A0009, \
0X0008000700060005, 0X0004000300020001 ) )
0X0008000700060005, 0X0004000300020001 ), v )
#define mm512_rol_1x16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A001900180017, \
0X0016001500140013, 0X001200110010000F, \
0X000E000D000C000B, 0X000A000900080007, \
0X0006000500040003, 0X000200010000001F ) )
0X0006000500040003, 0X000200010000001F ), v )
#define mm512_ror_1x8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x003F3E3D3C3B3A39, 0x3837363534333231, \
0x302F2E2D2C2B2A29, 0x2827262524232221, \
0x201F1E1D1C1B1A19. 0x1817161514131211, \
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol_1x8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231302F. \
0x2E2D2C2B2A292827, 0x262524232221201F, \
0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201003F ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm512_invert_128( v ) _mm512_permute4f128_epi32( a, 0x1b )
#define mm512_invert_128( v ) _mm512_shuffle_i64x2( v, v, 0x1b )
#define mm512_invert_64( v ) \
_mm512_permutex_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
_mm512_permutexvar_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
#define mm512_invert_32( v ) \
_mm512_permutexvar_epi32( v, m512_const_64( \
_mm512_permutexvar_epi32( m512_const_64( \
0x0000000000000001,0x0000000200000003, \
0x0000000400000005,0x0000000600000007, \
0x0000000800000009,0x0000000a0000000b, \
0x0000000c0000000d,0x0000000e0000000f ) )
0x0000000c0000000d,0x0000000e0000000f ), v )
#define mm512_invert_16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0000000100020003, 0x0004000500060007, \
0x00080009000A000B, 0x000C000D000E000F, \
0x0010001100120013, 0x0014001500160017, \
0x00180019001A001B, 0x001C001D001E001F ) )
0x00180019001A001B, 0x001C001D001E001F ), v )
#define mm512_invert_8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x0001020304050607, 0x08090A0B0C0D0E0F, \
0x1011121314151617, 0x18191A1B1C1D1E1F, \
0x2021222324252627, 0x28292A2B2C2D2E2F, \
@@ -293,46 +417,46 @@ static inline __m512i m512_neg1_fn()
// Rotate 256 bit lanes by one 32 bit element
#define mm512_ror1x32_256( v ) \
_mm512_permutexvar_epi32( v, m512_const_64( \
_mm512_permutexvar_epi32( m512_const_64( \
0x000000080000000f, 0x0000000e0000000d, \
0x0000000c0000000b, 0x0000000a00000009, \
0x0000000000000007, 0x0000000600000005, \
0x0000000400000003, 0x0000000200000001 ) )
0x0000000400000003, 0x0000000200000001, v ) )
#define mm512_rol1x32_256( v ) \
_mm512_permutexvar_epi32( v, m512_const_64( \
_mm512_permutexvar_epi32( m512_const_64( \
0x0000000e0000000d, 0x0000000c0000000b, \
0x0000000a00000009, 0x000000080000000f, \
0x0000000600000005, 0x0000000400000003, \
0x0000000200000001, 0x0000000000000007 ) )
0x0000000200000001, 0x0000000000000007 ), v )
#define mm512_ror1x16_256( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0010001F001E001D, 0x001C001B001A0019, \
0x0018001700160015, 0x0014001300120011, \
0x0000000F000E000D, 0x000C000B000A0009, \
0x0008000700060005, 0x0004000300020001 ) )
0x0008000700060005, 0x0004000300020001 ), v )
#define mm512_rol1x16_256( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A001900180017, \
0x0016001500140013, 0x001200110000000F, \
0x000E000D000C000B, 0x000A000900080007, \
0x0006000500040003, 0x000200010000001F ) )
0x0006000500040003, 0x000200010000001F ), v )
#define mm512_ror1x8_256( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x203F3E3D3C3B3A39, 0x3837363534333231, \
0x302F2E2D2C2B2A29, 0x2827262524232221, \
0x001F1E1D1C1B1A19, 0x1817161514131211, \
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol1x8_256( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231302F, \
0x2E2D2C2B2A292827, 0x262524232221203F, \
0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201001F ) )
0x0E0D0C0B0A090807, 0x060504030201001F ))
//
// Rotate elements within 128 bit lanes of 512 bit vector.
@@ -345,28 +469,28 @@ static inline __m512i m512_neg1_fn()
#define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 )
#define mm512_ror1x16_128( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0018001F001E001D, 0x001C001B001A0019, \
0x0010001700160015, 0x0014001300120011, \
0x0008000F000E000D, 0x000C000B000A0009, \
0x0000000700060005, 0x0004000300020001 ) )
0x0000000700060005, 0x0004000300020001 ), v )
#define mm512_rol1x16_128( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A00190018001F, \
0x0016001500140013, 0x0012001100100017, \
0x000E000D000C000B, 0x000A00090008000F, \
0x0006000500040003, 0x0002000100000007 ) )
0x0006000500040003, 0x0002000100000007, v ) )
#define mm512_ror1x8_128( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x303F3E3D3C3B3A39, 0x3837363534333231, \
0x202F2E2D2C2B2A29, 0x2827262524232221, \
0x101F1E1D1C1B1A19, 0x1817161514131211, \
0x000F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol1x8_128( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231303F, \
0x2E2D2C2B2A292827, 0x262524232221202F, \
0x1E1D1C1B1A191817, 0x161514131211101F, \
@@ -387,32 +511,30 @@ static inline __m512i m512_neg1_fn()
// Swap 32 bit elements in each 64 bit lane
#define mm512_swap32_64( v ) _mm512_shuffle_epi32( v, 0xb1 )
// _mm512_set_epi8 doesn't seem to work
// Rotate each 64 bit lane by one 16 bit element.
#define mm512_ror1x16_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x39383F3E3D3C3B3A, 0x3130373635343332, \
0x29282F2E2D2C2B2A, 0x2120272625242322, \
0x19181F1E1D1C1B1A, 0x1110171615141312, \
0x09080F0E0D0C0B0A, 0x0100070605040302 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001c001f001e001d, 0x0018001b001a0019, \
0x0014001700160015, 0x0010001300120011, \
0x000c000f000e000d, 0x0008000b000a0009, \
0x0004000700060005, 0x0000000300020001, v )
#define mm512_rol1x16_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x3D3C3B3A39383F3E, 0x3534333231303736 \
0x2D2C2B2A29282F2E, 0x2524232221202726 \
0x1D1C1B1A19181F1E, 0x1514131211101716 \
0x0D0C0B0A09080F0E, 0x0504030201000706 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001e001d001c001f, 0x001a00190018001b, \
0x0016001500140017, 0x0012001100100013, \
0x000e000d000c000f, 0x000a00090008000b, \
0x0006000500040007, 0x0002000100000003, v )
// Rotate each 64 bit lane by one byte.
#define mm512_ror1x8_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x383F3E3D3C3B3A39, 0x3037363534333231, \
0x282F2E2D2C2B2A29, 0x2027262524232221, \
0x181F1E1D1C1B1A19, 0x1017161514131211, \
0x080F0E0D0C0B0A09, 0x0007060504030201 ) )
#define mm512_rol1x8_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle( v, m512_const_64( \
0x3E3D3C3B3A39383F, 0x3635343332313037, \
0x2E2D2C2B2A29282F, 0x2625242322212027, \
0x1E1D1C1B1A19181F, 0x1615141312111017, \
@@ -422,55 +544,31 @@ static inline __m512i m512_neg1_fn()
// Rotate elements within 32 bit lanes.
#define mm512_swap16_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x001D001C001F001E, 0x00190018001B001A, \
0x0015001400170016, 0x0011001000130012, \
0x000D000C000F000E, 0x00190008000B000A, \
0x0005000400070006, 0x0011000000030002 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001e001f001c001d, 0x001a001b00180019, \
0x0016001700140015, 0x0012001300100011, \
0x000e000f000c000d, 0x000a000b00080009, \
0x0006000700040005, 0x0002000300000001 ), v )
#define mm512_ror1x8_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3C3F3E3D383B3A39, 0x3437363530333231, \
0x2C2F2E2D282B2A29, 0x2427262520232221, \
0x1C1F1E1D181B1A19, 0x1417161510131211, \
0x0C0F0E0D080B0A09, 0x0407060500030201 ) )
0x0C0F0E0D080B0A09, 0x0407060500030201 ))
#define mm512_rol1x8_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3F3A39383B, 0x3635343732313033, \
0x2E2D2C2F2A29282B, 0x2625242722212023, \
0x1E1D1C1F1A19181B, 0x1615141712111013, \
0x0E0D0C0F0A09080B, 0x0605040702010003 ) )
//
// Swap bytes in vector elements, vectorized bswap.
#define mm512_bswap_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x38393A3B3C3D3E3F, 0x2031323334353637, \
0x28292A2B2C2D2E2F, 0x2021222334353637, \
0x18191A1B1C1D1E1F, 0x1011121314151617, \
0x08090A0B0C0D0E0F, 0x0001020304050607 ) )
#define mm512_bswap_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233 ) )
#define mm512_bswap_16( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x3E3F3C3D3A3B3839, 0x3637343532333031, \
0x2E2F2C2D2A2B2829, 0x2627242522232021, \
0x1E1F1C1D1A1B1819, 0x1617141512131011, \
0x0E0F0C0D0A0B0809, 0x0607040502030001 ) )
//
// Rotate elements from 2 512 bit vectors in place, source arguments
// are overwritten.
// These can all be done with 2 permutex2var instructions but they are
// slower than either xor or alignr.
// slower than either xor or alignr and require AVX512VBMI.
#define mm512_swap512_1024(v1, v2) \
v1 = _mm512_xor_si512(v1, v2); \

View File

@@ -1,5 +1,5 @@
#if !defined(SIMD_SCALAR_H__)
#define SIMD_SCALAR_H__ 1
#if !defined(SIMD_INT_H__)
#define SIMD_INT_H__ 1
///////////////////////////////////
//
@@ -13,6 +13,8 @@
// Some utilities are also provided for smaller integers, most notably
// bit rotation.
// MMX has no extract instruction for 32 bit elements so this:
// Lo is trivial, high is a simple shift.
// Input may be uint64_t or __m64, returns uint32_t.
@@ -56,18 +58,45 @@ static inline void memset_zero_64( uint64_t *src, int n )
static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
#if defined (GCC_INT128)
///////////////////////////////////////
//
// 128 bit integers
//
// 128 bit integers are inneficient and not a shortcut for __m128i.
// Native type __int128 supported starting with GCC-4.8.
//
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
// for 128 bit arithmetic. Vectors are preferred when 128 bit arith
// is not required. int128 also works better with other integer sizes.
// Vectors benefit from wider registers.
//
// For safety use typecasting on all numeric arguments.
//
// Use typecasting for conversion to/from 128 bit vector:
// __m128i v128 = (__m128i)my_int128l
// __m256i v256 = _mm256_set_m128i( (__m128i)my_int128, (__m128i)my_int128 );
// my_int128 = (uint128_t)_mm256_extracti128_si256( v256, 1 );
// No real need or use.
//#define u128_neg1 ((uint128_t)(-1))
// Compiler check for __int128 support
// Configure also has a test for int128.
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
#define GCC_INT128 1
#endif
// usefull for making constants.
#if !defined(GCC_INT128)
#warning "__int128 not supported, requires GCC-4.8 or newer."
#endif
#if defined(GCC_INT128)
// Familiar looking type names
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
// Maybe usefull for making constants.
#define mk_uint128( hi, lo ) \
( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) )
@@ -92,6 +121,6 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
#endif // GCC_INT128
#endif // SIMD_SCALAR_H__
#endif // SIMD_INT_H__

View File

@@ -1,389 +0,0 @@
//////////////////////////////////////
//
// Type abstraction overlays designed for use in highly optimized
// straight line code operating on array structures. It uses direct
// struct member access instead of indexing to access array elements.
// Ex: array.u32_3 instead of array[3].
//
// Vector types are used to represent asrrays. 64 and 128 bit vectors have
// corresponding 64 and 128 bit integer types.
//
// Data accesses are not tied to memory as arrays are. Thes structures
// can operate comfortably as reguietr variables.
//
// Although the abstraction makes for transparent usage there is overhead.
// Extra move instructins are required when an operation requires a
// different register type. Additionaly 128 bit operations, uint128_t
// and AES, can't be done in parallel with a 256 bit or lager vector.
// The require additionalmove instructions in addition to the lack of
// improvement from parallelism.
//
// Move instruction overhead is required when moving among gpr, mmx
// and xmm registers. The number of extra moves is usually the number
// of elements inthe vector. If bothe are the same size onlu one move
// is required. The number is doubled if the data is moved back.
//
// xmm and ymm resgisters are special, they are aliased. xmm registers
// overlay the lower 128 bits of the ymm registers. Accessing the data
// in the lower half of a ymm register by an xmm argument is free.
// The upper 128 bits need to be extracted and inserted like with other
// different sized data types.
//
// Integer types can be converted to differently sized integers without
// penalty.
//
// Conversions with penalty should be avoided as much possible by grouping
// operations requiring the same register set.
//
// There are two algorithms for extracting and inserting data.
//
// There isthe straightforward iterative meathod wher each element is
// extracted or inserted in turn. The compiler evidently take a different
// aproach based on assembly code generated by a set intrinsic.
// To extract 64 bit or smaller elements from a 256 bit vector the
// first extracts the upper 128 bit into a second xmm register. This
// eliminates a dependency between the upper and lower elements allowing
// the CPU more opportunity at multiple operations per clock.
// This adds one additional instruction to the process. With AVX512 an
// another stege is added by first splitting up the 512 bit vector into
// 2 256 bit vectors,
//
// xmm/ymm aliasing makes accessing low half trivial and without cost.
// Accessing the upper half requires a move from the upper half of
// the source register to the lower half of the destination.
// It's a bigger issue with GPRs as there is no aliasing.
//
// Theoretically memory resident data could bypass the move and load
// the data directly into the desired register type. However this
// ignores the overhead to ensure coherency between register and memory
// wich is significantly more.
//
// Overlay avoids pointer dereferences and favours register move over
// memory load, notwistanding compiler optimization.
//
// The syntax is ugly but can be abstracted with macros.
// Universal 64 bit overlay
// Avoids arrays and pointers, suitable as register variable.
// Conversions are transparent but not free, cost is one MOV instruction.
// Facilitates manipulating 32 bit data in 64 bit pairs.
// Allows full use of 64 bit registers for 32 bit data, effectively doubling
// the size of the register set.
// Potentially up to 50% reduction in instructions depending on rate of
// conversion.
///////////////////////////////////////////////////////
//
// 128 bit integer
//
// Native type __int128 supported starting with GCC-4.8.
//
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
// for 128 bit arithmetic. Vectors are preferred when 128 bit arith
// is not required. int128 also works better with other integer sizes.
// Vectors benefit from wider registers.
//
// For safety use typecasting on all numeric arguments.
//
// Use typecasting for conversion to/from 128 bit vector:
// __m128i v128 = (__m128i)my_int128l
// __m256i v256 = _mm256_set_m128i( (__m128i)my_int128, (__m128i)my_int128 );
// my_int128 = (uint128_t)_mm256_extracti128_si256( v256, 1 );
// Compiler check for __int128 support
// Configure also has a test for int128.
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
#define GCC_INT128 1
#endif
#if !defined(GCC_INT128)
#warning "__int128 not supported, requires GCC-4.8 or newer."
#endif
#if defined(GCC_INT128)
// Familiar looking type names
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
#endif
/////////////////////////////////////
//
// MMX 64 bit vector
//
// Emulates uint32_t[2]
struct _regarray_u32x2
{
uint32_t _0; uint32_t _1;
};
typedef struct _regarray_u32x2 regarray_u32x2;
// Emulates uint16_t[4]
struct _regarray_u16x4
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
};
typedef struct _regarray_u16x4 regarray_u16x4;
// Emulates uint8_t[8]
struct _regarray_u8x8
{
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
};
typedef struct _regarray_u8x8 regarray_u8x8;
// universal 64 bit overlay
union _regarray_64
{
regarray_u32x2 u32_; // uint32_t[2]
regarray_u16x4 u16_; // uint16_t[4]
regarray_u8x8 u8_; // uint8_t[8]
uint64_t u64;
__m64 v64;
};
typedef union _regarray_64 regarray_64;
/////
//
// SSE2
// Universal 128 bit overlay
//
// Avoids arrays and pointers, suitable as register variable.
// Designed for speed in straight line code with no loops.
//
// Conversions are transparent but not free, cost is one MOV instruction
// in each direction, except for lower half of ymm to/from xmm which are
// free.
//
// Facilitates two dimensional vectoring.
//
// 128 bit integer and AES can't be done in parallel. AES suffers extraction
// and insertion of the upper 128 bits. uint128_t suffers 4 times the cost
// with 2 64 bit extractions and 2 insertions for each 128 bit lane with
// single stage ymm <--> gpr for a total of 8 moves.
//
// Two stage conversion is possible which helps CPU instruction scheduling
// by removing a register dependency between the upper and lower 128 at the
// cost of two extra instructions (128 bit extract and insert. The compiler
// seems to prefer the 2 staged approach when using the set intrinsic.
// Use macros to simplify array access emulation.
// emulated array type: uint64_t a[4];
// array indexing: a[0], a[1]
// overlay emulation: a.u64_0, a.u64_1
// without macro: a.u64_._0, a.u64_._1
struct _regarray_u64x2
{
uint64_t _0; uint64_t _1;
};
typedef struct _regarray_u64x2 regarray_u64x2;
struct _regarray_v64x2
{
__m64 _0; __m64 _1;
};
typedef struct _regarray_v64x2 regarray_v64x2;
struct _regarray_u32x4
{
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
};
typedef struct _regarray_u32x2 regarray_u32x4;
struct _regarray_u16x8
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
};
typedef struct _regarray_u16x4 regarray_u16x4;
struct _regarray_u8x16
{
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
uint8_t _8; uint8_t _9; uint8_t _a; uint8_t _b;
uint8_t _c; uint8_t _d; uint8_t _e; uint8_t _f;
};
typedef struct _regarray_u8x16 regarray_u8x16;
union _register_array_m128v
{
#if defined(GCC_INT128)
uint128_t u128;
#endif
__m128i v128;
regarray_u64x2 u64_; // uint64_t[2]
regarray_v64x2 v64_; // __m64[2]
regarray_u32x4 u32_; // uint32_t[4]
regarray_u16x4 u16_; // uint16_t[8]
regarray_u8x16 u8_; // uint8_t[16]
};
typedef union _register_array_m128v register_array_m128v;
///////////////////
//
// AVX2
//
struct _regarray_v128x2
{
__m128i _0; __m128i _1;
};
typedef struct _regarray_v128x2 regarray_v128x2;
struct _regarray_u128x2
{
uint128_t _0; uint128_t _1;
};
typedef struct _regarray_u128x2 regarray_u128x2;
struct _regarray_u64x4
{
uint64_t _0; uint64_t _1; uint64_t _2; uint64_t _3;
};
typedef struct _regarray_u64x4 regarray_u64x4;
struct _regarray_v64x4
{
__m64 _0; __m64 _1; __m64 _2; __m64 _3;
};
typedef struct _regarray_v64x4 regarray_v64x4;
struct _regarray_u32x8
{
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
uint32_t _4; uint32_t _5; uint32_t _6; uint32_t _7;
};
typedef struct _regarray_u32x8 regarray_u32x8;
struct _regarray_u16x16
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
uint16_t _8; uint16_t _9; uint16_t _a; uint16_t _b;
uint16_t _c; uint16_t _d; uint16_t _e; uint16_t _f;
};
typedef struct _regarray_u16x16 regarray_u16x16;
struct _regarray_u8x32
{
uint8_t _00; uint8_t _01; uint8_t _02; uint8_t _03;
uint8_t _04; uint8_t _05; uint8_t _06; uint8_t _07;
uint8_t _08; uint8_t _09; uint8_t _0a; uint8_t _0b;
uint8_t _0c; uint8_t _0d; uint8_t _0e; uint8_t _0f;
uint8_t _10; uint8_t _11; uint8_t _12; uint8_t _13;
uint8_t _14; uint8_t _15; uint8_t _16; uint8_t _17;
uint8_t _18; uint8_t _19; uint8_t _1a; uint8_t _1b;
uint8_t _1c; uint8_t _1d; uint8_t _1e; uint8_t _1f;
};
typedef struct _regarray_u8x32 regarray_u8x32;
union _regarray_v256
{
__m256i v256;
#if defined(GCC_INT128)
regarray_u128x2 u128_; // uint128_t[2]
#endif
regarray_v128x2 v128_; // __m128i[2]
regarray_v64x4 v64_;
regarray_u64x4 u64_;
regarray_u32x8 u32_;
regarray_u16x16 u16_;
regarray_u8x32 u8_;
};
typedef union _regarray_v256 regarray_v256;
////////////
//
// Abstraction macros to allow easy readability.
// Users may define their own list to suit their preferences
// such as, upper case hex, leading zeros, multidimensional,
// alphabetic, day of week, etc..
#define v128_0 v128_._0
#define v128_1 v128_._1
#define u128_0 u128_._0
#define u128_1 u128_._1
#define v64_0 v64_._0
#define v64_1 v64_._1
#define v64_2 v64_._2
#define v64_3 v64_._3
#define u64_0 u64_._0
#define u64_1 u64_._1
#define u64_2 u64_._2
#define u64_3 u64_._3
#define u32_0 u32_._0
#define u32_1 u32_._1
#define u32_2 u32_._2
#define u32_3 u32_._3
#define u32_4 u32_._4
#define u32_5 u32_._5
#define u32_6 u32_._6
#define u32_7 u32_._7
#define u16_0 u16_._0
#define u16_1 u16_._1
#define u16_2 u16_._2
#define u16_3 u16_._3
#define u16_4 u16_._4
#define u16_5 u16_._5
#define u16_6 u16_._6
#define u16_7 u16_._7
#define u16_8 u16_._8
#define u16_9 u16_._9
#define u16_a u16_._a
#define u16_b u16_._b
#define u16_c u16_._c
#define u16_d u16_._d
#define u16_e u16_._e
#define u16_f u16_._f
#define u8_00 u8_._00
#define u8_01 u8_._01
#define u8_02 u8_._02
#define u8_03 u8_._03
#define u8_04 u8_._04
#define u8_05 u8_._05
#define u8_06 u8_._06
#define u8_07 u8_._07
#define u8_08 u8_._08
#define u8_09 u8_._09
#define u8_0a u8_._0a
#define u8_0b u8_._0b
#define u8_0c u8_._0c
#define u8_0d u8_._0d
#define u8_0e u8_._0e
#define u8_0f u8_._0f
#define u8_10 u8_._10
#define u8_11 u8_._11
#define u8_12 u8_._12
#define u8_13 u8_._13
#define u8_14 u8_._14
#define u8_15 u8_._15
#define u8_16 u8_._16
#define u8_17 u8_._17
#define u8_18 u8_._18
#define u8_19 u8_._19
#define u8_1a u8_._1a
#define u8_1b u8_._1b
#define u8_1c u8_._1c
#define u8_1d u8_._1d
#define u8_1e u8_._1e
#define u8_1f u8_._1f