This commit is contained in:
Jay D Dee
2019-12-09 15:59:02 -05:00
parent 73430b13b1
commit a17ff6f189
48 changed files with 3561 additions and 1367 deletions

View File

@@ -1528,6 +1528,58 @@ static inline void intrlv_8x64( void *dst, const void *src0,
d[63] = _mm_unpackhi_epi64( s6[7], s7[7] );
}
static inline void intrlv_8x64_512( void *dst, const void *src0,
const void *src1, const void *src2, const void *src3,
const void *src4, const void *src5, const void *src6,
const void *src7 )
{
__m128i *d = (__m128i*)dst;
const __m128i *s0 = (const __m128i*)src0;
const __m128i *s1 = (const __m128i*)src1;
const __m128i *s2 = (const __m128i*)src2;
const __m128i *s3 = (const __m128i*)src3;
const __m128i *s4 = (const __m128i*)src4;
const __m128i *s5 = (const __m128i*)src5;
const __m128i *s6 = (const __m128i*)src6;
const __m128i *s7 = (const __m128i*)src7;
d[ 0] = _mm_unpacklo_epi64( s0[0], s1[0] );
d[ 1] = _mm_unpacklo_epi64( s2[0], s3[0] );
d[ 2] = _mm_unpacklo_epi64( s4[0], s5[0] );
d[ 3] = _mm_unpacklo_epi64( s6[0], s7[0] );
d[ 4] = _mm_unpackhi_epi64( s0[0], s1[0] );
d[ 5] = _mm_unpackhi_epi64( s2[0], s3[0] );
d[ 6] = _mm_unpackhi_epi64( s4[0], s5[0] );
d[ 7] = _mm_unpackhi_epi64( s6[0], s7[0] );
d[ 8] = _mm_unpacklo_epi64( s0[1], s1[1] );
d[ 9] = _mm_unpacklo_epi64( s2[1], s3[1] );
d[10] = _mm_unpacklo_epi64( s4[1], s5[1] );
d[11] = _mm_unpacklo_epi64( s6[1], s7[1] );
d[12] = _mm_unpackhi_epi64( s0[1], s1[1] );
d[13] = _mm_unpackhi_epi64( s2[1], s3[1] );
d[14] = _mm_unpackhi_epi64( s4[1], s5[1] );
d[15] = _mm_unpackhi_epi64( s6[1], s7[1] );
d[16] = _mm_unpacklo_epi64( s0[2], s1[2] );
d[17] = _mm_unpacklo_epi64( s2[2], s3[2] );
d[18] = _mm_unpacklo_epi64( s4[2], s5[2] );
d[19] = _mm_unpacklo_epi64( s6[2], s7[2] );
d[20] = _mm_unpackhi_epi64( s0[2], s1[2] );
d[21] = _mm_unpackhi_epi64( s2[2], s3[2] );
d[22] = _mm_unpackhi_epi64( s4[2], s5[2] );
d[23] = _mm_unpackhi_epi64( s6[2], s7[2] );
d[24] = _mm_unpacklo_epi64( s0[3], s1[3] );
d[25] = _mm_unpacklo_epi64( s2[3], s3[3] );
d[26] = _mm_unpacklo_epi64( s4[3], s5[3] );
d[27] = _mm_unpacklo_epi64( s6[3], s7[3] );
d[28] = _mm_unpackhi_epi64( s0[3], s1[3] );
d[29] = _mm_unpackhi_epi64( s2[3], s3[3] );
d[30] = _mm_unpackhi_epi64( s4[3], s5[3] );
d[31] = _mm_unpackhi_epi64( s6[3], s7[3] );
}
/*
#define ILEAVE_8x64( i ) do \
{ \
@@ -1656,6 +1708,57 @@ static inline void dintrlv_8x64( void *dst0, void *dst1, void *dst2,
d7[7] = _mm_unpackhi_epi64( s[59], s[63] );
}
static inline void dintrlv_8x64_512( void *dst0, void *dst1, void *dst2,
void *dst3, void *dst4, void *dst5, void *dst6, void *dst7,
const void *src )
{
__m128i *d0 = (__m128i*)dst0;
__m128i *d1 = (__m128i*)dst1;
__m128i *d2 = (__m128i*)dst2;
__m128i *d3 = (__m128i*)dst3;
__m128i *d4 = (__m128i*)dst4;
__m128i *d5 = (__m128i*)dst5;
__m128i *d6 = (__m128i*)dst6;
__m128i *d7 = (__m128i*)dst7;
const __m128i* s = (const __m128i*)src;
d0[0] = _mm_unpacklo_epi64( s[ 0], s[ 4] );
d1[0] = _mm_unpackhi_epi64( s[ 0], s[ 4] );
d2[0] = _mm_unpacklo_epi64( s[ 1], s[ 5] );
d3[0] = _mm_unpackhi_epi64( s[ 1], s[ 5] );
d4[0] = _mm_unpacklo_epi64( s[ 2], s[ 6] );
d5[0] = _mm_unpackhi_epi64( s[ 2], s[ 6] );
d6[0] = _mm_unpacklo_epi64( s[ 3], s[ 7] );
d7[0] = _mm_unpackhi_epi64( s[ 3], s[ 7] );
d0[1] = _mm_unpacklo_epi64( s[ 8], s[12] );
d1[1] = _mm_unpackhi_epi64( s[ 8], s[12] );
d2[1] = _mm_unpacklo_epi64( s[ 9], s[13] );
d3[1] = _mm_unpackhi_epi64( s[ 9], s[13] );
d4[1] = _mm_unpacklo_epi64( s[10], s[14] );
d5[1] = _mm_unpackhi_epi64( s[10], s[14] );
d6[1] = _mm_unpacklo_epi64( s[11], s[15] );
d7[1] = _mm_unpackhi_epi64( s[11], s[15] );
d0[2] = _mm_unpacklo_epi64( s[16], s[20] );
d1[2] = _mm_unpackhi_epi64( s[16], s[20] );
d2[2] = _mm_unpacklo_epi64( s[17], s[21] );
d3[2] = _mm_unpackhi_epi64( s[17], s[21] );
d4[2] = _mm_unpacklo_epi64( s[18], s[22] );
d5[2] = _mm_unpackhi_epi64( s[18], s[22] );
d6[2] = _mm_unpacklo_epi64( s[19], s[23] );
d7[2] = _mm_unpackhi_epi64( s[19], s[23] );
d0[3] = _mm_unpacklo_epi64( s[24], s[28] );
d1[3] = _mm_unpackhi_epi64( s[24], s[28] );
d2[3] = _mm_unpacklo_epi64( s[25], s[29] );
d3[3] = _mm_unpackhi_epi64( s[25], s[29] );
d4[3] = _mm_unpacklo_epi64( s[26], s[30] );
d5[3] = _mm_unpackhi_epi64( s[26], s[30] );
d6[3] = _mm_unpacklo_epi64( s[27], s[31] );
d7[3] = _mm_unpackhi_epi64( s[27], s[31] );
}
/*
#define DLEAVE_8x64( i ) do \
{ \
@@ -1910,6 +2013,32 @@ static inline void dintrlv_4x128_512( void *dst0, void *dst1, void *dst2,
}
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
static inline void mm512_bswap32_intrlv80_4x128( void *d, void *src )
{
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
s2 = _mm_shuffle_epi8( s2, bswap_shuf );
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m512i( d, 0 ) = _mm512_broadcast_i64x2( s0 );
casti_m512i( d, 1 ) = _mm512_broadcast_i64x2( s1 );
casti_m512i( d, 2 ) = _mm512_broadcast_i64x2( s2 );
casti_m512i( d, 3 ) = _mm512_broadcast_i64x2( s3 );
casti_m512i( d, 4 ) = _mm512_broadcast_i64x2( s4 );
}
#endif
// 2x256 (AVX512)
#if defined (__AVX__)
@@ -1946,6 +2075,9 @@ static inline void dintrlv_2x256( void *dst0, void *dst1,
d0[3] = s[6]; d1[3] = s[7];
}
#endif // AVX
///////////////////////////

View File

@@ -243,7 +243,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm128_ror_64 _mm_ror_epi64

View File

@@ -454,6 +454,13 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// Swap 32 bit elements in each 64 bit lane
#define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_rol1x16_64( v ) _mm256_rol_epi64( v, 16 )
#define mm256_ror1x16_64( v ) _mm256_ror_epi64( v, 16 )
#else
#define mm256_ror1x16_64( v ) \
_mm256_shuffle_epi8( v, \
m256_const_64( 0x19181f1e1d1c1b1a, 0x1110171615141312, \
@@ -463,6 +470,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1d1c1b1a19181f1e, 0x1514131211101716, \
0x0d0c0b0a09080f0e, 0x0504030201000706 ) )
#endif
#define mm256_ror1x8_64( v ) \
_mm256_shuffle_epi8( v, \
@@ -486,10 +494,18 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// Swap 16 bit elements in each 32 bit lane
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_swap16_32( v ) _mm256_rol_epi32( v, 16 )
#else
#define mm256_swap16_32( v ) \
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1b1a19181f1e1d1c, 0x1312111017161514, \
0x0b0a09080f0e0d0c, 0x0302010007060504 ) )
#endif
//
// Swap bytes in vector elements, endian bswap.

View File

@@ -13,20 +13,31 @@
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// AVX512 intrinsics have a few peculiarities with permutes and shuffles
// that are inconsistent with previous AVX2 implementations.
// AVX512 intrinsics have a few changes from previous conventions.
//
// Some instructions like cmp and blend use the mask regsiters now instead
// a vector mask.
//
// The new rotate instructions require the count to be only an 8 bit
// immediate value. The documentation is the same as for shift and
// it allows variables. Suspect a compiler issue but it still happens
// in GCC9.
//
// _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute
// usually shuffles accross all lanes.
//
// Some instructions like cmp and blend use a mask regsiter now instead
// a mask vector.
//
// permutexvar has args reversed, index is first arg. Previously all
// permutes and shuffles have the source vector first.
// permutes and shuffles have the index last.
//
// _mm512_permutexvar_epi8 requires AVX512-VBMI, larger elements don't.
// It also performs the same op as _mm512_shuffle_epi8.
//
// _mm512_shuffle_epi8 shuffles accross entire 512 bits. Shuffle usually
// doesn't cross 128 bit lane boundaries.
// shuffle_epi8 shuffles accross entire 512 bits. Shuffle usually
// doesn't cross 128 bit lane boundaries but is consistent with AVX2
// where shuffle_epi8 spans the entire vector.
//////////////////////////////////////////////////////////////
//
@@ -35,7 +46,6 @@
// Other AVX512 extensions that may be required for some functions.
// __AVX512VBMI__ __AVX512VAES__
//
// Experimental, not fully tested.
// Move integer to/from element 0 of vector.
@@ -88,10 +98,19 @@ static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
return mm512_concat_256( hi, lo );
}
// Equivalent of set4, broadcast 256 bits in groups of four 64 bit constants
// to all 256 bit lanes: {i3,i2,i1,i0,i3,i2,i1,i0,i3,i2,i1,i0,i3,i2,i1,i0}.
// Equivalent of set1, broadcast 64 bit constant to all 64 bit elements.
#define m512_const1_256( i ) _mm512_broadcast_i64x4( i )
#define m512_const1_128( i ) _mm512_broadcast_i64x2( i )
#define m512_const1_64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) )
#define m512_const1_32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) )
#define m512_const1_16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) )
#define m512_const1_8( i ) _mm512_broadcastb_epi8 ( mm128_mov32_128( i ) )
#define m512_const2_64( i1, i0 ) \
m512_const1_128( m128_const_64( i1, i0 ) )
static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 )
const uint64_t i1, const uint64_t i0 )
{
__m256i lo = mm256_mov64_256( i0 );
__m128i hi = mm128_mov64_128( i2 );
@@ -99,25 +118,9 @@ static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
_mm_insert_epi64( _mm256_castsi256_si128(
lo ), i1, 1 ) );
hi = _mm_insert_epi64( hi, i3, 1 );
return _mm512_permutex_epi64( _mm512_castsi256_si512(
_mm256_inserti128_si256( lo, hi, 1 ) ), 0xe4 );
return _mm512_broadcast_i64x4( _mm256_inserti128_si256( lo, hi, 1 ) );
}
// Broadcast 128 bits in pairs of 64 bit constants {i1. i0} to all
// 128 bit lanes.
#define m512_const2_64( i1, i0 ) \
_mm512_permutex_epi64( _mm512_castsi128_si512( \
m128_const_64( i1, i0 ) ), 0x44 )
// Equivalent of set1, broadcast 64 bit constant to all 64 bit elements.
#define m512_const1_256( i ) _mm512_broadcast_i64x4( i )
#define m512_const1_128( i ) _mm512_broadcast_i64x2( i )
#define m512_const1_64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) )
#define m512_const1_32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) )
#define m512_const1_16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) )
#define m512_const1_8( i ) _mm512_broadcastb_epi8 ( mm128_mov32_128( i ) )
//
// Pseudo constants.
@@ -136,17 +139,6 @@ static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
#define m512_neg1 m512_const1_64( 0xffffffffffffffff )
/*
// EVEX vcmpeqq returns a bit mask instead of a vector
static inline __m512i mm512_neg1_fn()
{
__m512i a;
asm( "vpcmpeqq %0, %0, %0\n\t" : "=x"(a) );
return a;
}
#define m512_neg1 mm512_neg1_fn()
*/
//
// Basic operations without SIMD equivalent
@@ -209,7 +201,7 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
// Horizontal vector testing
// Returns bit mask
// Returns bit __mmask8
#define mm512_allbits0( a ) _mm512_cmpeq_epi64_mask( a, m512_zero )
#define mm512_allbits1( a ) _mm512_cmpeq_epi64_mask( a, m512_neg1 )
#define mm512_anybits0( a ) _mm512_cmpneq_epi64_mask( a, m512_neg1 )
@@ -514,6 +506,12 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
#define mm512_swap32_64( v ) _mm512_shuffle_epi32( v, 0xb1 )
// Rotate each 64 bit lane by one 16 bit element.
#define mm512_ror1x16_64( v ) _mm512_ror_epi64( v, 16 )
#define mm512_rol1x16_64( v ) _mm512_rol_epi64( v, 16 )
#define mm512_ror1x8_64( v ) _mm512_ror_epi64( v, 8 )
#define mm512_rol1x8_64( v ) _mm512_rol_epi64( v, 8 )
/*
#define mm512_ror1x16_64( v ) \
_mm512_permutexvar_epi16( m512_const_64( \
0x001c001f001e001d, 0x0018001b001a0019, \
@@ -541,10 +539,16 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
0x2E2D2C2B2A29282F, 0x2625242322212027, \
0x1E1D1C1B1A19181F, 0x1615141312111017, \
0x0E0D0C0B0A09080F, 0x0605040302010007 ) )
*/
//
// Rotate elements within 32 bit lanes.
#define mm512_swap16_32( v ) _mm512_ror_epi32( v, 16 )
#define mm512_ror1x8_32( v ) _mm512_ror_epi32( v, 8 )
#define mm512_rol1x8_32( v ) _mm512_rol_epi32( v, 8 )
/*
#define mm512_swap16_32( v ) \
_mm512_permutexvar_epi16( m512_const_64( \
0x001e001f001c001d, 0x001a001b00180019, \
@@ -565,6 +569,9 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
0x2E2D2C2F2A29282B, 0x2625242722212023, \
0x1E1D1C1F1A19181B, 0x1615141712111013, \
0x0E0D0C0F0A09080B, 0x0605040702010003 ) )
*/
//
// Rotate elements from 2 512 bit vectors in place, source arguments