This commit is contained in:
Jay D Dee
2019-07-12 10:42:38 -04:00
parent 9abc19a30a
commit e625ed5420
31 changed files with 1269 additions and 1188 deletions

View File

@@ -1,6 +1,50 @@
#if !defined(INTRLV_AVX_H__)
#define INTRLV_AVX_H__ 1
// philosophical discussion
//
// transitions:
//
// int32 <-> int64
// uint64_t = (uint64_t)int32_lo | ( (uint64_t)int32_hi << 32 )
// Efficient transition and post processing, 32 bit granularity is lost.
//
// int32 <-> m64
// More complex, 32 bit granularity maintained, limited number of mmx regs.
// int32 <-> int64 <-> m64 might be more efficient.
//
// int32 <-> m128
// Expensive, current implementation.
//
// int32 <-> m256
// Very expensive multi stage, current implementation.
//
// int64/m64 <-> m128
// Efficient, agnostic to native element size. Common.
//
// m128 <-> m256
// Expensive for a single instruction, unavoidable. Common.
//
// Multi stage options
//
// int32 <-> int64 -> m128
// More efficient than insert32, granularity maintained. Common.
//
// int64 <-> m128 -> m256
// Unavoidable, reasonably efficient. Common
//
// int32 <-> int64 -> m128 -> m256
// Seems inevitable, most efficient despite number of stages. Common.
//
// Implementation plan.
//
// 1. Complete m128 <-> m256
// 2. Implement int64 <-> m128
// 3. Combine int64 <-> m128 <-> m256
// 4. Implement int32 <-> int64 <-> m128
// 5. Combine int32 <-> int64 <-> m128 <-> m256
//
#if defined(__AVX__)
// Convenient short cuts for local use only

View File

@@ -1,5 +1,5 @@
#if !defined(SIMD_SSE2_H__)
#define SIMD_SSE2_H__ 1
#if !defined(SIMD_128_H__)
#define SIMD_128_H__ 1
#if defined(__SSE2__)
@@ -15,69 +15,148 @@
//
// 128 bit operations are enhanced with uint128 which adds 128 bit integer
// support for arithmetic and other operations. Casting to uint128_t is not
// free, it requires a move from mmx to gpr but is often the only way or
// the more efficient way for certain operations.
// Compile time constant initializers are type agnostic and can have
// a pointer handle of almost any type. All arguments must be scalar constants.
// up to 64 bits. These iniitializers should only be used at compile time
// to initialize vector arrays. All data reside in memory.
// efficient but is sometimes the only way for certain operations.
//
// Constants are an issue with simd. Simply put, immediate constants don't
// exist. All simd constants either reside in memory or a register.
// The distibction is made below with c128 being memory resident defined
// at compile time and m128 being register defined at run time.
//
// All run time constants must be generated using their components elements
// incurring significant overhead. The more elements the more overhead
// both in instructions and in GP register usage. Whenever possible use
// 64 bit constant elements regardless of the actual element size.
//
// Due to the cost of generating constants they should not be regenerated
// in the same function. Instead, define a local const.
//
// Some constant values can be generated using shortcuts. Zero for example
// is as simple as XORing any register with itself, and is implemented
// in the setzero instrinsic. These shortcuts must be implemented is asm
// due to doing things the compiler would complain about. Another single
// instruction constant is -1, defined below. Others may be added as the need
// arises. Even single instruction constants are less efficient than local
// register variables so the advice above stands.
//
// One common use for simd constants is as a control index for some simd
// instructions like blend and shuffle. The utilities below do not take this
// into account. Those that generate a simd constant should not be used
// repeatedly. It may be better for the application to reimplement the
// utility to better suit its usage.
//
// These are of limited use, it is often simpler to use uint64_t arrays
// and cast as required.
#define mm128_const_64( x1, x0 ) {{ x1, x0 }}
#define mm128_const1_64( x ) {{ x, x }}
#define mm128_const_32( x3, x2, x1, x0 ) {{ x3, x2, x1, x0 }}
#define mm128_const1_32( x ) {{ x,x,x,x }}
#define mm128_const_16( x7, x6, x5, x4, x3, x2, x1, x0 ) \
{{ x7, x6, x5, x4, x3, x2, x1, x0 }}
#define mm128_const1_16( x ) {{ x,x,x,x, x,x,x,x }}
#define mm128_const_8( x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 ) \
{{ x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 }}
#define mm128_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
// Compile time constants, use only for compile time initializing.
#define c128_zero mm128_const1_64( 0ULL )
#define c128_one_128 mm128_const_64( 0ULL, 1ULL )
#define c128_one_64 mm128_const1_64( 1ULL )
#define c128_one_32 mm128_const1_32( 1UL )
#define c128_one_16 mm128_const1_16( 1U )
#define c128_one_8 mm128_const1_8( 1U )
#define c128_neg1 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL )
#define c128_neg1_64 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL )
#define c128_neg1_32 mm128_const1_32( 0xFFFFFFFFUL )
#define c128_neg1_16 mm128_const1_32( 0xFFFFU )
#define c128_neg1_8 mm128_const1_32( 0xFFU )
//
// Pseudo constants.
//
// These can't be used for compile time initialization.
// These should be used for all simple vectors.
//
// _mm_setzero_si128 uses pxor instruction, it's unclear what _mm_set_epi does.
// Clearly it's faster than reading a memory resident constant. Assume set
// is also faster.
// If a pseudo constant is used often in a function it may be preferable
// to define a register variable to represent that constant.
// register __m128i zero = mm_setzero_si128().
// This reduces any references to a move instruction.
// Repeated usage of any simd pseudo-constant should use a locally defined
// const rather than recomputing it for every reference.
#define m128_zero _mm_setzero_si128()
#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_one_64 _mm_set1_epi64x( 1ULL )
#define m128_one_32 _mm_set1_epi32( 1UL )
#define m128_one_16 _mm_set1_epi16( 1U )
#define m128_one_8 _mm_set1_epi8( 1U )
// As suggested by Intel...
// Arg passing for simd registers is assumed to be first output arg,
// then input args, then locals. This is probably wrong, gcc likely picks
// whichever register is currently holding the variable, or whichever
// register is available to hold it. Nevertheless, all args are specified
// by their arg number and local variables use registers starting at
// last arg + 1, by type.
// Output args don't need to be listed as clobbered.
#define m128_neg1 _mm_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL )
static inline __m128i m128_one_64_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubq %%xmm1, %0\n\t"
:"=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_64 m128_one_64_fn()
static inline __m128i m128_one_32_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubd %%xmm1, %0\n\t"
:"=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_32 m128_one_32_fn()
static inline __m128i m128_one_16_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubw %%xmm1, %0\n\t"
:"=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_16 m128_one_16_fn()
static inline __m128i m128_one_8_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubb %%xmm1, %0\n\t"
:"=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_8 m128_one_8_fn()
static inline __m128i m128_neg1_fn()
{
__m128i a;
asm( "pcmpeqd %0, %0\n\t"
:"=x"(a) );
return a;
}
#define m128_neg1 m128_neg1_fn()
#if defined(__SSE41__)
static inline __m128i m128_one_128_fn()
{
__m128i a;
asm( "pinsrq $0, $1, %0\n\t"
"pinsrq $1, $0, %0\n\t"
:"=x"(a) );
return a;
}
#define m128_one_128 m128_one_128_fn()
// alternative to _mm_set_epi64x, doesn't use mem,
// cost = 2 pinsrt, estimate 4 clocks.
static inline __m128i m128_const_64( uint64_t hi, uint64_t lo )
{
__m128i a;
asm( "pinsrq $0, %2, %0\n\t"
"pinsrq $1, %1, %0\n\t"
:"=x"(a)
:"r"(hi),"r"(lo) );
return a;
}
#else
#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_const_64 _mm_set_epi64x
#endif
//
// Basic operations without equivalent SIMD intrinsic
@@ -90,9 +169,21 @@
#define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v )
#define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v )
// Use uint128_t for most arithmetic, bit shift, comparison operations
// spanning all 128 bits. Some extractions are also more efficient
// casting __m128i as uint128_t and usingstandard operators.
// Add 4 values, fewer dependencies than sequential addition.
#define mm128_add4_64( a, b, c, d ) \
_mm_add_epi64( _mm_add_epi64( a, b ), _mm_add_epi64( c, d ) )
#define mm128_add4_32( a, b, c, d ) \
_mm_add_epi32( _mm_add_epi32( a, b ), _mm_add_epi32( c, d ) )
#define mm128_add4_16( a, b, c, d ) \
_mm_add_epi16( _mm_add_epi16( a, b ), _mm_add_epi16( c, d ) )
#define mm128_add4_8( a, b, c, d ) \
_mm_add_epi8( _mm_add_epi8( a, b ), _mm_add_epi8( c, d ) )
#define mm128_xor4( a, b, c, d ) \
_mm_xor_si128( _mm_xor_si128( a, b ), _mm_xor_si128( c, d ) )
// This isn't cheap, not suitable for bulk usage.
#define mm128_extr_4x32( a0, a1, a2, a3, src ) \
@@ -105,6 +196,16 @@ do { \
// Horizontal vector testing
#if defined(__SSE41__)
#define mm128_allbits0( a ) _mm_testz_si128( a, a )
#define mm128_allbits1( a ) _mm_testc_si128( a, m128_neg1 )
#define mm128_allbitsne( a ) _mm_testnzc_si128( a, m128_neg1 )
#define mm128_anybits0 mm128_allbitsne
#define mm128_anybits1 mm128_allbitsne
#else // SSE2
// Bit-wise test of entire vector, useful to test results of cmp.
#define mm128_anybits0( a ) (uint128_t)(a)
#define mm128_anybits1( a ) (((uint128_t)(a))+1)
@@ -112,6 +213,8 @@ do { \
#define mm128_allbits0( a ) ( !mm128_anybits1(a) )
#define mm128_allbits1( a ) ( !mm128_anybits0(a) )
#endif // SSE41 else SSE2
//
// Vector pointer cast
@@ -139,6 +242,7 @@ do { \
#else
// Doesn't work with register variables.
#define mm128_extr_64(a,n) (((uint64_t*)&a)[n])
#define mm128_extr_32(a,n) (((uint32_t*)&a)[n])
@@ -209,7 +313,7 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
// Bit rotations
// AVX512 has implemented bit rotation for 128 bit vectors with
// 64 and 32 bit elements. Not really useful.
// 64 and 32 bit elements.
//
// Rotate each element of v by c bits
@@ -233,13 +337,16 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
_mm_or_si128( _mm_slli_epi16( v, c ), _mm_srli_epi16( v, 16-(c) ) )
//
// Rotate elements accross all lanes
// Rotate vector elements accross all lanes
#define mm128_swap_64( v ) _mm_shuffle_epi32( v, 0x4e )
#define mm128_ror_1x32( v ) _mm_shuffle_epi32( v, 0x39 )
#define mm128_rol_1x32( v ) _mm_shuffle_epi32( v, 0x93 )
#if defined (__SSE3__)
// no SSE2 implementation, no current users
#define mm128_ror_1x16( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 1, 0,15,14,13,12,11,10 \
9, 8, 7, 6, 5, 4, 3, 2 ) )
@@ -252,6 +359,7 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
#define mm128_rol_1x8( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 14,13,12,11,10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0,15 ) )
#endif // SSE3
// Rotate 16 byte (128 bit) vector by c bytes.
// Less efficient using shift but more versatile. Use only for odd number
@@ -262,17 +370,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
#define mm128_brol( v, c ) \
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm128_invert_32( v ) _mm_shuffle_epi32( a, 0x1b )
#define mm128_invert_16( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 1, 0, 3, 2, 5, 4, 7, 6, \
9, 8, 11,10, 13,12, 15,14 ) )
#define mm128_invert_8( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15 ) )
//
// Rotate elements within lanes.
@@ -283,7 +380,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
#define mm128_rol16_64( v ) _mm_shuffle_epi8( v, \
_mm_set_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 )
#define mm128_swap16_32( v ) _mm_shuffle_epi8( v, \
_mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 )
@@ -293,17 +389,45 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
#if defined(__SSSE3__)
#define mm128_bswap_64( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 8, 9,10,11,12,13,14,15, \
0, 1, 2, 3, 4, 5, 6, 7 ) )
_mm_shuffle_epi8( v, m128_const64( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) )
#define mm128_bswap_32( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 12,13,14,15, 8, 9,10,11, \
4, 5, 6, 7, 0, 1, 2, 3 ) )
_mm_shuffle_epi8( v, m128_const_64( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) )
#define mm128_bswap_16( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 14,15, 12,13, 10,11, 8, 9, \
6, 7, 4, 5, 2, 3, 0, 1 ) )
// 8 byte qword * 8 qwords * 2 lanes = 128 bytes
#define mm128_block_bswap_64( d, s ) do \
{ \
__m128i ctl = m128_const_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \
casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \
casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \
casti_m128i( d, 3 ) = _mm_shuffle_epi8( casti_m128i( s, 3 ), ctl ); \
casti_m128i( d, 4 ) = _mm_shuffle_epi8( casti_m128i( s, 4 ), ctl ); \
casti_m128i( d, 5 ) = _mm_shuffle_epi8( casti_m128i( s, 5 ), ctl ); \
casti_m128i( d, 6 ) = _mm_shuffle_epi8( casti_m128i( s, 6 ), ctl ); \
casti_m128i( d, 7 ) = _mm_shuffle_epi8( casti_m128i( s, 7 ), ctl ); \
} while(0)
// 4 byte dword * 8 dwords * 4 lanes = 128 bytes
#define mm128_block_bswap_32( d, s ) do \
{ \
__m128i ctl = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \
casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \
casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \
casti_m128i( d, 3 ) = _mm_shuffle_epi8( casti_m128i( s, 3 ), ctl ); \
casti_m128i( d, 4 ) = _mm_shuffle_epi8( casti_m128i( s, 4 ), ctl ); \
casti_m128i( d, 5 ) = _mm_shuffle_epi8( casti_m128i( s, 5 ), ctl ); \
casti_m128i( d, 6 ) = _mm_shuffle_epi8( casti_m128i( s, 6 ), ctl ); \
casti_m128i( d, 7 ) = _mm_shuffle_epi8( casti_m128i( s, 7 ), ctl ); \
} while(0)
#else // SSE2
// Use inline function instead of macro due to multiple statements.
@@ -326,16 +450,41 @@ static inline __m128i mm128_bswap_16( __m128i v )
return _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
}
static inline void mm128_block_bswap_64( __m128i *d, __m128i *s )
{
d[0] = mm128_bswap_32( s[0] );
d[1] = mm128_bswap_32( s[1] );
d[2] = mm128_bswap_32( s[2] );
d[3] = mm128_bswap_32( s[3] );
d[4] = mm128_bswap_32( s[4] );
d[5] = mm128_bswap_32( s[5] );
d[6] = mm128_bswap_32( s[6] );
d[7] = mm128_bswap_32( s[7] );
}
static inline void mm128_block_bswap_32( __m128i *d, __m128i *s )
{
d[0] = mm128_bswap_32( s[0] );
d[1] = mm128_bswap_32( s[1] );
d[2] = mm128_bswap_32( s[2] );
d[3] = mm128_bswap_32( s[3] );
d[4] = mm128_bswap_32( s[4] );
d[5] = mm128_bswap_32( s[5] );
d[6] = mm128_bswap_32( s[6] );
d[7] = mm128_bswap_32( s[7] );
}
#endif // SSSE3 else SSE2
//
// Rotate in place concatenated 128 bit vectors as one 256 bit vector.
// Swap 128 bit vectorse.
#define mm128_swap128_256(v1, v2) \
v1 = _mm_xor_si128(v1, v2); \
v2 = _mm_xor_si128(v1, v2); \
v1 = _mm_xor_si128(v1, v2);
#define mm128_swap128_256( v1, v2 ) \
v1 = _mm_xor_si128( v1, v2 ); \
v2 = _mm_xor_si128( v1, v2 ); \
v1 = _mm_xor_si128( v1, v2 );
// Concatenate v1 & v2 and rotate as one 256 bit vector.
#if defined(__SSE4_1__)
@@ -457,4 +606,4 @@ do { \
#endif // SSE4.1 else SSE2
#endif // __SSE2__
#endif // SIMD_SSE2_H__
#endif // SIMD_128_H__

View File

@@ -1,44 +1,134 @@
#if !defined(SIMD_AVX2_H__)
#define SIMD_AVX2_H__ 1
#if !defined(SIMD_256_H__)
#define SIMD_256_H__ 1
#if defined(__AVX2__)
#if defined(__AVX__)
/////////////////////////////////////////////////////////////////////
//
// AVX2 256 bit vectors
//
// AVX2 is required for integer support of 256 bit vectors.
// Basic support for 256 bit vectors is available with AVX but integer
// support requires AVX2.
// Some 256 bit vector utilities require AVX512 or have more efficient
// AVX512 implementations. They will be selected automatically but their use
// is limited because 256 bit vectors are less likely to be used when 512
// is available.
// Vector type overlays used by compile time vector constants.
// Constants of these types reside in memory.
//
// Basic operations without SIMD equivalent
// Pseudo constants.
// These can't be used for compile time initialization but are preferable
// for simple constant vectors at run time. For repeated use define a local
// constant to avoid multiple calls to the same macro.
// Bitwise not ( ~x )
#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \
#define m256_zero _mm256_setzero_si256()
// Unary negation of each element ( -a )
#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a )
#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a )
#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a )
#define m256_one_256 \
_mm256_insertf128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
/***************************
*
* extracti128 (AVX2) vs extractf128 (AVX)???
#define m256_one_128 \
_mm256_insertf128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
// set instructions load memory resident constants, this avoids mem.
// cost 4 pinsert + 1 vinsert, estimate 7 clocks.
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_insertf128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
#define m256_const1_64( i ) m256_const_64( i, i, i, i )
#if defined(__AVX2__)
// These look like a lot of overhead but the compiler optimizes nicely
// and puts the asm inline in the calling function. Usage is like any
// variable expression.
// __m256i foo = m256_one_64;
static inline __m256i m256_one_64_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubq %%ymm1, %0, %0\n\t"
:"=x"(a)
:
: "ymm1" );
return a;
}
#define m256_one_64 m256_one_64_fn()
static inline __m256i m256_one_32_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubd %%ymm1, %0, %0\n\t"
:"=x"(a)
:
: "ymm1" );
return a;
}
#define m256_one_32 m256_one_32_fn()
static inline __m256i m256_one_16_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubw %%ymm1, %0, %0\n\t"
:"=x"(a)
:
: "ymm1" );
return a;
}
#define m256_one_16 m256_one_16_fn()
static inline __m256i m256_one_8_fn()
{
__m256i a;
asm( "vpxor %0, %0, %0\n\t"
"vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t"
"vpsubb %%ymm1, %0, %0\n\t"
:"=x"(a)
:
: "ymm1" );
return a;
}
#define m256_one_8 m256_one_8_fn()
static inline __m256i m256_neg1_fn()
{
__m256i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
:"=x"(a) );
return a;
}
#define m256_neg1 m256_neg1_fn()
#else // AVX
#define m256_one_64 _mm256_set1_epi64x( 1ULL )
#define m256_one_32 _mm256_set1_epi64x( 0x0000000100000001ULL )
#define m256_one_16 _mm256_set1_epi64x( 0x0001000100010001ULL )
#define m256_one_8 _mm256_set1_epi64x( 0x0101010101010101ULL )
// AVX doesn't have inserti128 but insertf128 will do.
// Ideally this can be done with 2 instructions and no temporary variables.
static inline __m256i m256_neg1_fn()
{
__m128i a = m128_neg1;
return _mm256_insertf128_si256( _mm256_castsi128_si256( a ), a, 1 );
}
#define m256_neg1 m256_neg1_fn()
//#define m256_neg1 _mm256_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL )
#endif // AVX2 else AVX
//
// Vector size conversion.
//
// Allows operations on either or both halves of a 256 bit vector serially.
// Handy for parallel AES.
// Caveats:
// Caveats when writing:
// _mm256_castsi256_si128 is free and without side effects.
// _mm256_castsi128_si256 is also free but leaves the high half
// undefined. That's ok if the hi half will be subseqnently assigned.
@@ -78,14 +168,22 @@ do { \
// Insert b into specified half of a leaving other half of a unchanged.
#define mm256_ins_lo128_256( a, b ) _mm256_inserti128_si256( a, b, 0 )
#define mm256_ins_hi128_256( a, b ) _mm256_inserti128_si256( a, b, 1 )
*/
/*
// concatenate two 128 bit vectors into one 256 bit vector: { hi, lo }
#define mm256_concat_128( hi, lo ) \
mm256_ins_hi128_256( _mm256_castsi128_si256( lo ), hi )
// Horizontal vector testing
#if defined(__AVX2__)
#define mm256_allbits0( a ) _mm256_testz_si256( a, a )
#define mm256_allbits1( a ) _mm256_testc_si256( a, m256_neg1 )
#define mm256_allbitsne( a ) _mm256_testnzc_si256( a, m256_neg1 )
#define mm256_anybits0 mm256_allbitsne
#define mm256_anybits1 mm256_allbitsne
#else // AVX
// Bit-wise test of entire vector, useful to test results of cmp.
#define mm256_anybits0( a ) \
@@ -99,35 +197,20 @@ do { \
#define mm256_allbits0_256( a ) ( !mm256_anybits1(a) )
#define mm256_allbits1_256( a ) ( !mm256_anybits0(a) )
#endif // AVX2 else AVX
// Parallel AES, for when x is expected to be in a 256 bit register.
#define mm256_aesenc_2x128( x ) \
mm256_concat_128( \
_mm_aesenc_si128( mm128_extr_hi128_256( x ), m128_zero ), \
_mm_aesenc_si128( mm128_extr_lo128_256( x ), m128_zero ) )
// Use same 128 bit key.
#define mm256_aesenc_2x128( x, k ) \
mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \
_mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) )
#define mm256_aesenckey_2x128( x, k ) \
mm256_concat_128( \
_mm_aesenc_si128( mm128_extr_hi128_256( x ), \
mm128_extr_lo128_256( k ) ), \
_mm_aesenc_si128( mm128_extr_hi128_256( x ), \
mm128_extr_lo128_256( k ) ) )
#define mm256_paesenc_2x128( y, x ) do \
#define mm256_paesenc_2x128( y, x, k ) do \
{ \
__m256i *X = (__m256i*)x; \
__m256i *Y = (__m256i*)y; \
y[0] = _mm_aesenc_si128( x[0], m128_zero ); \
y[1] = _mm_aesenc_si128( x[1], m128_zero ); \
} while(0);
// With pointers.
#define mm256_paesenckey_2x128( y, x, k ) do \
{ \
__m256i *X = (__m256i*)x; \
__m256i *Y = (__m256i*)y; \
__m256i *K = (__m256i*)ky; \
y[0] = _mm_aesenc_si128( x[0], K[0] ); \
y[1] = _mm_aesenc_si128( x[1], K[1] ); \
__m128i *X = (__m128i*)x; \
__m128i *Y = (__m128i*)y; \
Y[0] = _mm_aesenc_si128( X[0], k ); \
Y[1] = _mm_aesenc_si128( X[1], k ); \
} while(0);
//
@@ -201,7 +284,41 @@ static inline void memset_256( __m256i *dst, const __m256i a, int n )
static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; }
*************************************/
///////////////////////////////
//
// AVX2 needed from now on.
//
#if defined(__AVX2__)
//
// Basic operations without SIMD equivalent
// Bitwise not ( ~x )
#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \
// Unary negation of each element ( -a )
#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a )
#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a )
#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a )
// Add 4 values, fewer dependencies than sequential addition.
#define mm256_add4_64( a, b, c, d ) \
_mm256_add_epi64( _mm256_add_epi64( a, b ), _mm256_add_epi64( c, d ) )
#define mm256_add4_32( a, b, c, d ) \
_mm256_add_epi32( _mm256_add_epi32( a, b ), _mm256_add_epi32( c, d ) )
#define mm256_add4_16( a, b, c, d ) \
_mm256_add_epi16( _mm256_add_epi16( a, b ), _mm256_add_epi16( c, d ) )
#define mm256_add4_8( a, b, c, d ) \
_mm256_add_epi8( _mm256_add_epi8( a, b ), _mm256_add_epi8( c, d ) )
#define mm256_xor4( a, b, c, d ) \
_mm256_xor_si256( _mm256_xor_si256( a, b ), _mm256_xor_si256( c, d ) )
//
// Bit rotations.
@@ -241,24 +358,27 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
// index vector c
#define mm256_rorv_64( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi64( v, _mm256_set1_epi64x( c ) ), \
_mm256_sllv_epi64( v, _mm256_set1_epi64x( 64-(c) ) ) )
_mm256_srlv_epi64( v, c ), \
_mm256_sllv_epi64( v, _mm256_sub_epi64( \
_mm256_set1_epi64x( 64 ), c ) ) )
#define mm256_rolv_64( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi64( v, _mm256_set1_epi64x( c ) ), \
_mm256_srlv_epi64( v, _mm256_set1_epi64x( 64-(c) ) ) )
_mm256_sllv_epi64( v, c ), \
_mm256_srlv_epi64( v, _mm256_sub_epi64( \
_mm256_set1_epi64x( 64 ), c ) ) )
#define mm256_rorv_32( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi32( v, _mm256_set1_epi32( c ) ), \
_mm256_sllv_epi32( v, _mm256_set1_epi32( 32-(c) ) ) )
_mm256_srlv_epi32( v, c ), \
_mm256_sllv_epi32( v, _mm256_sub_epi32( \
_mm256_set1_epi32( 32 ), c ) ) )
#define mm256_rolv_32( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi32( v, _mm256_set1_epi32( c ) ), \
_mm256_srlv_epi32( v, _mm256_set1_epi32( 32-(c) ) ) )
_mm256_sllv_epi32( v, c ), \
_mm256_srlv_epi32( v, _mm256_sub_epi32( \
_mm256_set1_epi32( 32 ), c ) ) )
// AVX512 can do 16 bit elements.
@@ -275,17 +395,28 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
#define mm256_ror_1x64( v ) _mm256_permute4x64_epi64( v, 0x39 )
#define mm256_rol_1x64( v ) _mm256_permute4x64_epi64( v, 0x93 )
// Rotate 256 bit vector by one 32 bit element.
// A little faster with avx512
// Rotate 256 bit vector by one 32 bit element. Use 64 bit set, it's faster.
#define mm256_ror_1x32( v ) \
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 0,7,6,5, 4,3,2,1 ) )
_mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000000000007, 0x0000000600000005, \
0x0000000400000003, 0x0000000200000001 )
#define mm256_rol_1x32( v ) \
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 6,5,4,3, 2,1,0,7 ) )
_mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000600000005, 0x0000000400000003, \
0x0000000200000001, 0x0000000000000007 )
// Rotate 256 bit vector by three 32 bit elements (96 bits).
#define mm256_ror_3x32( v ) \
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 2,1,0,7, 6,5,4,3 ) )
_mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000200000001, 0x0000000000000007, \
0x0000000600000005, 0x0000000400000003 )
#define mm256_rol_3x32( v ) \
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 4,3,2,1, 0,7,6,5 ) )
_mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000400000003, 0x0000000200000001, \
0x0000000000000007, 0x0000000600000005 )
// AVX512 can do 16 & 8 bit elements.
#if defined(__AVX512VL__)
@@ -293,7 +424,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
// Rotate 256 bit vector by one 16 bit element.
#define mm256_ror_1x16( v ) \
_mm256_permutexvar_epi16( _mm256_set_epi16( \
0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v )
0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v )
#define mm256_rol_1x16( v ) \
_mm256_permutexvar_epi16( _mm256_set_epi16( \
@@ -303,7 +434,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
#define mm256_ror_1x8( v ) \
_mm256_permutexvar_epi8( _mm256_set_epi8( \
0,31,30,29,28,27,26,25, 24,23,22,21,20,19,18,17, \
16,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v )
16,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v )
#define mm256_rol_1x8( v ) \
_mm256_permutexvar_epi8( _mm256_set_epi8( \
@@ -312,14 +443,6 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
#endif // AVX512
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm256_invert_64( v ) _mm256_permute4x64_epi64( a, 0x1b )
#define mm256_invert_32( v ) \
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 0,1,2,3,4,5,6,7 ) )
// AVX512 can do 16 & 8 bit elements.
//
// Rotate elements within lanes of 256 bit vector.
@@ -332,15 +455,23 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
// Rotate each 128 bit lane by one 16 bit element.
#define mm256_rol1x16_128( v ) \
_mm256_shuffle_epi8( 13,12,11,10, 9,8,7,6, 5,4,3,2, 1,0,15,14 )
_mm256_shuffle_epi8( v, _mm256_set_epi16( 6,5,4,3,2,1,0,7, \
6,5,4,3,2,1,0,7 ) )
#define mm256_ror1x16_128( v ) \
_mm256_shuffle_epi8( 1,0,15,14, 13,12,11,10, 9,8,7,6, 5,4,3,2 )
_mm256_shuffle_epi8( v, _mm256_set_epi16( 0,7,6,5,4,3,2,1, \
0,7,6,5,4,3,2,1 ) )
// Rotate each 128 bit lane by one byte
#define mm256_rol1x8_128( v ) \
_mm256_shuffle_epi8( 14, 13,12,11, 10,9,8,7, 6,5,4,3, 2,1,0,15 )
_mm256_shuffle_epi8( v, _mm256_set_epi8(14,13,12,11,10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0,15, \
14,13,12,11,10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0,15 ) )
#define mm256_ror1x8_128( v ) \
_mm256_shuffle_epi8( 0,15,14,13, 12,11,10,9, 8,7,6,5, 4,3,2,1 )
_mm256_shuffle_epi8( v, _mm256_set_epi8( 0,15,14,13,12,11,10, 9, \
8, 7, 6, 5, 4, 3, 2, 1, \
0,15,14,13,12,11,10, 9, \
8, 7, 6, 5, 4, 3, 2, 1 ) )
// Rotate each 128 bit lane by c bytes.
#define mm256_bror_128( v, c ) \
@@ -354,28 +485,27 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
#define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 )
#define mm256_ror16_64( v ) \
_mm256_shuffle_epi8( 9, 8,15,14,13,12,11,10, 1, 0, 7, 6, 5, 4, 3, 2 );
_mm256_shuffle_epi8( v, _mm256_set_epi16( 4,7,6,5,0,3,2,1, \
4,7,6,5,0,3,2,1 ) )
#define mm256_rol16_64( v ) \
_mm256_shuffle_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 );
_mm256_shuffle_epi8( v, _mm256_set_epi16( 6,5,4,7,2,1,0,3, \
6,5,4,7,2,1,0,3 ) )
// Swap 16 bit elements in each 32 bit lane
#define mm256_swap16_32( v ) _mm256_shuffle_epi8( v, \
_mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 )
#define mm256_swap16_32( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi16( 6,7,4,5,2,3,0,1, \
6,7,4,5,2,3,0,1 ) )
//
// Swap bytes in vector elements, endian bswap.
#define mm256_bswap_64( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi8( 8, 9,10,11,12,13,14,15, \
0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15, \
0, 1, 2, 3, 4, 5, 6, 7 ) )
_mm256_shuffle_epi8( v, m256_const_64( 0x08090a0b0c0d0e0f, \
0x0001020304050607, 0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
#define mm256_bswap_32( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi8( 12,13,14,15, 8, 9,10,11, \
4, 5, 6, 7, 0, 1, 2, 3, \
12,13,14,15, 8, 9,10,11, \
4, 5, 6, 7, 0, 1, 2, 3 ) )
_mm256_shuffle_epi8( v, m256_const_64( 0x0c0d0e0f08090a0b, \
0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203 ) )
#define mm256_bswap_16( v ) \
_mm256_shuffle_epi8( v, _mm256_set_epi8( 14,15, 12,13, 10,11, 8, 9, \
@@ -383,6 +513,36 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
14,15, 12,13, 10,11, 8, 9, \
6, 7, 4, 5, 2, 3, 0, 1 ) )
// 8 byte qword * 8 qwords * 4 lanes = 256 bytes
#define mm256_block_bswap_64( d, s ) do \
{ \
__m256i ctl = m256_const_64( 0x08090a0b0c0d0e0f, 0x0001020304050607, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \
casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \
casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \
casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \
casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \
} while(0)
// 4 byte dword * 8 dwords * 8 lanes = 256 bytes
#define mm256_block_bswap_32( d, s ) do \
{ \
__m256i ctl = m256_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203, \
0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \
casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \
casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \
casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \
casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \
} while(0)
//
// Rotate two concatenated 256 bit vectors as one 512 bit vector by specified
// number of elements. Rotate is done in place, source arguments are
@@ -466,5 +626,6 @@ do { \
} while(0)
#endif // __AVX2__
#endif // SIMD_AVX2_H__
#endif // __AVX__
#endif // SIMD_256_H__

View File

@@ -1,5 +1,5 @@
#if !defined(SIMD_AVX512_H__)
#define SIMD_AVX512_H__ 1
#if !defined(SIMD_512_H__)
#define SIMD_512_H__ 1
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
@@ -246,28 +246,22 @@
//
// Rotate elements in 512 bit vector.
#define mm512_swap_256( v ) \
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 3,2,1,0, 7,6,5,4 ) )
#define mm512_swap_256( v ) _mm512_alignr_epi64( v, v, 4 )
#define mm512_ror_1x128( v ) \
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 1,0, 7,6, 5,4, 3,2 ) )
#define mm512_ror_1x128( v ) _mm512_alignr_epi64( v, v, 2 )
#define mm512_rol_1x128( v ) _mm512_alignr_epi64( v, v, 6 )
#define mm512_rol_1x128( v ) \
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 5,4, 3,2, 1,0, 7,6 ) )
#define mm512_ror_1x64( v ) _mm512_alignr_epi64( v, v, 1 )
#define mm512_rol_1x64( v ) _mm512_alignr_epi64( v, v, 7 )
#define mm512_ror_1x64( v ) \
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 0,7,6,5,4,3,2,1 ) )
#define mm512_ror_1x32( v ) _mm512_alignr_epi32( v, v, 1 )
#define mm512_rol_1x32( v ) _mm512_alignr_epi32( v, v, 15 )
#define mm512_rol_1x64( v ) \
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 6,5,4,3,2,1,0,7 ) )
// Generic for odd rotations
#define mm512_ror_x64( v, n ) _mm512_alignr_epi64( v, v, n )
#define mm512_ror_1x32( v ) \
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ) )
#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n )
#define mm512_rol_1x32( v ) \
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 15 ) )
// Although documented to exist in AVX512F the _mm512_set_epi8 &
// _mm512_set_epi16 intrinsics fail to compile. Seems usefull to have
@@ -282,7 +276,7 @@
0X00080007, 0X00060005, 0X00040003, 0X00020001 ) )
#define mm512_rol_1x16( v ) \
_mm512_permutexvar_epi16( v, _mm512_set_epi16( \
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \
0X00160015, 0X00140013, 0X00120011, 0x0010000F, \
0X000E000D, 0X000C000B, 0X000A0009, 0X00080007, \
@@ -290,14 +284,14 @@
#define mm512_ror_1x8( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi8( \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x003F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \
0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \
0x201F1E1D, 0x1C1B1A19. 0x18171615, 0x14131211, \
0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) )
#define mm512_rol_1x8( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi8( \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F. \
0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221201F, \
0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \
@@ -601,4 +595,4 @@ do { \
} while(0)
#endif // AVX512
#endif // SIMD_AVX512_H__
#endif // SIMD_512_H__

View File

@@ -1,5 +1,5 @@
#if !defined(SIMD_MMX_H__)
#define SIMD_MMX_H__ 1
#if !defined(SIMD_64_H__)
#define SIMD_64_H__ 1
#if defined(__MMX__)
@@ -13,21 +13,20 @@
// Pseudo constants
/*
#define m64_zero _mm_setzero_si64()
#define m64_one_64 _mm_set_pi32( 0UL, 1UL )
#define m64_one_32 _mm_set1_pi32( 1UL )
#define m64_one_16 _mm_set1_pi16( 1U )
#define m64_one_8 _mm_set1_pi8( 1U );
#define m64_neg1 _mm_set1_pi32( 0xFFFFFFFFUL )
/* cast also works, which is better?
*/
#define m64_zero ( (__m64)0ULL )
#define m64_one_64 ( (__m64)1ULL )
#define m64_one_32 ( (__m64)0x0000000100000001ULL )
#define m64_one_16 ( (__m64)0x0001000100010001ULL )
#define m64_one_8 ( (__m64)0x0101010101010101ULL )
#define m64_neg1 ( (__m64)0xFFFFFFFFFFFFFFFFULL )
*/
#define casti_m64(p,i) (((__m64*)(p))[(i)])
@@ -42,6 +41,14 @@
#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, (__m64)v )
// Rotate bits in packed elements of 64 bit vector
#define mm64_rol_64( a, n ) \
_mm_or_si64( _mm_slli_si64( (__m64)(a), n ), \
_mm_srli_si64( (__m64)(a), 64-(n) ) )
#define mm64_ror_64( a, n ) \
_mm_or_si64( _mm_srli_si64( (__m64)(a), n ), \
_mm_slli_si64( (__m64)(a), 64-(n) ) )
#define mm64_rol_32( a, n ) \
_mm_or_si64( _mm_slli_pi32( (__m64)(a), n ), \
_mm_srli_pi32( (__m64)(a), 32-(n) ) )
@@ -78,22 +85,20 @@
// Endian byte swap packed elements
// A vectorized version of the u64 bswap, use when data already in MMX reg.
#define mm64_bswap_64( v ) \
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) )
_mm_shuffle_pi8( (__m64)v, (__m64)0x0001020304050607 )
#define mm64_bswap_32( v ) \
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 4,5,6,7, 0,1,2,3 ) )
_mm_shuffle_pi8( (__m64)v, (__m64)0x0405060700010203 )
/*
#define mm64_bswap_16( v ) \
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 6,7, 4,5, 2,3, 0,1 ) );
*/
_mm_shuffle_pi8( (__m64)v, (__m64)0x0607040502030001 );
#else
#define mm64_bswap_64( v ) \
(__m64)__builtin_bswap64( (uint64_t)v )
// This exists only for compatibility with CPUs without SSSE3. MMX doesn't
// These exist only for compatibility with CPUs without SSSE3. MMX doesn't
// have extract 32 instruction so pointers are needed to access elements.
// It' more efficient for the caller to use scalar variables and call
// bswap_32 directly.
@@ -101,20 +106,11 @@
_mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \
__builtin_bswap32( ((uint32_t*)&v)[0] ) )
#endif
// Invert vector: {3,2,1,0} -> {0,1,2,3}
// Invert_64 is the same as bswap64
// Invert_32 is the same as swap32
#define mm64_invert_16( v ) _mm_shuffle_pi16( (__m64)v, 0x1b )
#if defined(__SSSE3__)
// An SSE2 or MMX version of this would be monstrous, shifting, masking and
// oring each byte individually.
#define mm64_invert_8( v ) \
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) );
#define mm64_bswap_16( v ) \
_mm_set_pi16( __builtin_bswap16( ((uint16_t*)&v)[3] ), \
__builtin_bswap16( ((uint16_t*)&v)[2] ), \
__builtin_bswap16( ((uint16_t*)&v)[1] ), \
__builtin_bswap16( ((uint16_t*)&v)[0] ) )
#endif
@@ -131,5 +127,5 @@ static inline void memset_m64( __m64 *dst, const __m64 a, int n )
#endif // MMX
#endif // SIMD_MMX_H__
#endif // SIMD_64_H__

View File

@@ -1,243 +0,0 @@
#if !defined(SIMD_AVX_H__)
#define SIMD_AVX_H__ 1
#if defined(__AVX__)
/////////////////////////////////////////////////////////////////////
//
// AVX 256 bit vectors
//
// Basic support for 256 bit vectors. Most of the good stuff needs AVX2.
// Compile time vector constants and initializers.
//
// The following macro constants and functions should only be used
// for compile time initialization of constant and variable vector
// arrays. These constants use memory, use _mm256_set at run time to
// avoid using memory.
#define mm256_const_64( x3, x2, x1, x0 ) {{ x3, x2, x1, x0 }}
#define mm256_const1_64( x ) {{ x,x,x,x }}
#define mm256_const_32( x7, x6, x5, x4, x3, x2, x1, x0 ) \
{{ x7, x6, x5, x4, x3, x2, x1, x0 }}
#define mm256_const1_32( x ) {{ x,x,x,x, x,x,x,x }}
#define mm256_const_16( x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 ) \
{{ x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 }}
#define mm256_const1_16( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
#define mm256_const_8( x31, x30, x29, x28, x27, x26, x25, x24, \
x23, x22, x21, x20, x19, x18, x17, x16, \
x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 ) \
{{ x31, x30, x29, x28, x27, x26, x25, x24, \
x23, x22, x21, x20, x19, x18, x17, x16, \
x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 }}
#define mm256_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \
x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
// Predefined compile time constant vectors.
// Use Pseudo constants at run time for all simple constant vectors.
#define c256_zero mm256_const1_64( 0ULL )
#define c256_one_256 mm256_const_64( 0ULL, 0ULL, 0ULL, 1ULL )
#define c256_one_128 mm256_const_64( 0ULL, 1ULL, 0ULL, 1ULL )
#define c256_one_64 mm256_const1_64( 1ULL )
#define c256_one_32 mm256_const1_32( 1UL )
#define c256_one_16 mm256_const1_16( 1U )
#define c256_one_8 mm256_const1_8( 1U )
#define c256_neg1 mm256_const1_64( 0xFFFFFFFFFFFFFFFFULL )
#define c256_neg1_64 mm256_const1_64( 0xFFFFFFFFFFFFFFFFULL )
#define c256_neg1_32 mm256_const1_32( 0xFFFFFFFFUL )
#define c256_neg1_16 mm256_const1_16( 0xFFFFU )
#define c256_neg1_8 mm256_const1_8( 0xFFU )
//
// Pseudo constants.
// These can't be used for compile time initialization but are preferable
// for simple constant vectors at run time.
#define m256_zero _mm256_setzero_si256()
#define m256_one_256 _mm256_set_epi64x( 0ULL, 0ULL, 0ULL, 1ULL )
#define m256_one_128 _mm256_set_epi64x( 0ULL, 1ULL, 0ULL, 1ULL )
#define m256_one_64 _mm256_set1_epi64x( 1ULL )
#define m256_one_32 _mm256_set1_epi32( 1UL )
#define m256_one_16 _mm256_set1_epi16( 1U )
#define m256_one_8 _mm256_set1_epi8( 1U )
#define m256_neg1 _mm256_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL )
//
// Vector size conversion.
//
// Allows operations on either or both halves of a 256 bit vector serially.
// Handy for parallel AES.
// Caveats:
// _mm256_castsi256_si128 is free and without side effects.
// _mm256_castsi128_si256 is also free but leaves the high half
// undefined. That's ok if the hi half will be subseqnently assigned.
// If assigning both, do lo first, If assigning only 1, use
// _mm256_inserti128_si256.
//
// What to do about extractf128 (AVX) and extracti128 (AVX2)?
#define mm128_extr_lo128_256( a ) _mm256_castsi256_si128( a )
#define mm128_extr_hi128_256( a ) _mm256_extractf128_si256( a, 1 )
// Extract 4 u64 from 256 bit vector.
#define mm256_extr_4x64( a0, a1, a2, a3, src ) \
do { \
__m128i hi = _mm256_extractf128_si256( src, 1 ); \
a0 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 0 ); \
a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
a2 = _mm_extract_epi64( hi, 0 ); \
a3 = _mm_extract_epi64( hi, 1 ); \
} while(0)
#define mm256_extr_8x32( a0, a1, a2, a3, a4, a5, a6, a7, src ) \
do { \
__m128i hi = _mm256_extractf128_si256( src, 1 ); \
a0 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 0 ); \
a1 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 1 ); \
a2 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 2 ); \
a3 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 3 ); \
a4 = _mm_extract_epi32( hi, 0 ); \
a5 = _mm_extract_epi32( hi, 1 ); \
a6 = _mm_extract_epi32( hi, 2 ); \
a7 = _mm_extract_epi32( hi, 3 ); \
} while(0)
// input __m128i, returns __m256i
// To build a 256 bit vector from 2 128 bit vectors lo must be done first.
// lo alone leaves hi undefined, hi alone leaves lo unchanged.
// Both cost one clock while preserving the other half..
// Insert b into specified half of a leaving other half of a unchanged.
#define mm256_ins_lo128_256( a, b ) _mm256_insertf128_si256( a, b, 0 )
#define mm256_ins_hi128_256( a, b ) _mm256_insertf128_si256( a, b, 1 )
// concatenate two 128 bit vectors into one 256 bit vector: { hi, lo }
#define mm256_concat_128( hi, lo ) \
mm256_ins_hi128_256( _mm256_castsi128_si256( lo ), hi )
// Horizontal vector testing
// Needs int128 support
// Bit-wise test of entire vector, useful to test results of cmp.
#define mm256_anybits0( a ) \
( (uint128_t)mm128_extr_hi128_256( a ) \
| (uint128_t)mm128_extr_lo128_256( a ) )
#define mm256_anybits1( a ) \
( ( (uint128_t)mm128_extr_hi128_256( a ) + 1 ) \
| ( (uint128_t)mm128_extr_lo128_256( a ) + 1 ) )
#define mm256_allbits0_256( a ) ( !mm256_anybits1(a) )
#define mm256_allbits1_256( a ) ( !mm256_anybits0(a) )
// Parallel AES, for when x is expected to be in a 256 bit register.
#define mm256_aesenc_2x128( x ) \
mm256_concat_128( \
_mm_aesenc_si128( mm128_extr_hi128_256( x ), m128_zero ), \
_mm_aesenc_si128( mm128_extr_lo128_256( x ), m128_zero ) )
#define mm256_aesenckey_2x128( x, k ) \
mm256_concat_128( \
_mm_aesenc_si128( mm128_extr_hi128_256( x ), \
mm128_extr_lo128_256( k ) ), \
_mm_aesenc_si128( mm128_extr_hi128_256( x ), \
mm128_extr_lo128_256( k ) ) )
#define mm256_paesenc_2x128( y, x ) do \
{ \
__m256i *X = (__m256i*)x; \
__m256i *Y = (__m256i*)y; \
y[0] = _mm_aesenc_si128( x[0], m128_zero ); \
y[1] = _mm_aesenc_si128( x[1], m128_zero ); \
} while(0);
// With pointers.
#define mm256_paesenckey_2x128( y, x, k ) do \
{ \
__m256i *X = (__m256i*)x; \
__m256i *Y = (__m256i*)y; \
__m256i *K = (__m256i*)ky; \
y[0] = _mm_aesenc_si128( x[0], K[0] ); \
y[1] = _mm_aesenc_si128( x[1], K[1] ); \
} while(0);
//
// Pointer casting
// p = any aligned pointer
// returns p as pointer to vector type, not very useful
#define castp_m256i(p) ((__m256i*)(p))
// p = any aligned pointer
// returns *p, watch your pointer arithmetic
#define cast_m256i(p) (*((__m256i*)(p)))
// p = any aligned pointer, i = scaled array index
// returns value p[i]
#define casti_m256i(p,i) (((__m256i*)(p))[(i)])
// p = any aligned pointer, o = scaled offset
// returns pointer p+o
#define casto_m256i(p,o) (((__m256i*)(p))+(o))
// Gather scatter
#define mm256_gather_64( d, s0, s1, s2, s3 ) \
((uint64_t*)(d))[0] = (uint64_t)(s0); \
((uint64_t*)(d))[1] = (uint64_t)(s1); \
((uint64_t*)(d))[2] = (uint64_t)(s2); \
((uint64_t*)(d))[3] = (uint64_t)(s3);
#define mm256_gather_32( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
((uint32_t*)(d))[0] = (uint32_t)(s0); \
((uint32_t*)(d))[1] = (uint32_t)(s1); \
((uint32_t*)(d))[2] = (uint32_t)(s2); \
((uint32_t*)(d))[3] = (uint32_t)(s3); \
((uint32_t*)(d))[4] = (uint32_t)(s4); \
((uint32_t*)(d))[5] = (uint32_t)(s5); \
((uint32_t*)(d))[6] = (uint32_t)(s6); \
((uint32_t*)(d))[7] = (uint32_t)(s7);
// Scatter data from contiguous memory.
// All arguments are pointers
#define mm256_scatter_64( d0, d1, d2, d3, s ) \
*((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \
*((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \
*((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \
*((uint64_t*)(d3)) = ((uint64_t*)(s))[3];
#define mm256_scatter_32( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
*((uint32_t*)(d0)) = ((uint32_t*)(s))[0]; \
*((uint32_t*)(d1)) = ((uint32_t*)(s))[1]; \
*((uint32_t*)(d2)) = ((uint32_t*)(s))[2]; \
*((uint32_t*)(d3)) = ((uint32_t*)(s))[3]; \
*((uint32_t*)(d4)) = ((uint32_t*)(s))[4]; \
*((uint32_t*)(d5)) = ((uint32_t*)(s))[5]; \
*((uint32_t*)(d6)) = ((uint32_t*)(s))[6]; \
*((uint32_t*)(d7)) = ((uint32_t*)(s))[7];
//
// Memory functions
// n = number of 256 bit (32 byte) vectors
static inline void memset_zero_256( __m256i *dst, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = m256_zero; }
static inline void memset_256( __m256i *dst, const __m256i a, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; }
#endif // __AVX__
#endif // SIMD_AVX_H__

View File

@@ -62,10 +62,16 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
//
// 128 bit integers
//
// 128 bit integers are inneficient and not a shortcut for __m128i.
// No real need or use.
//#define u128_neg1 ((uint128_t)(-1))
// usefull for making constants.
#define mk_uint128( hi, lo ) \
( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) )
// Extracting the low bits is a trivial cast.
// These specialized functions are optimized while providing a
// consistent interface.