This commit is contained in:
Jay D Dee
2019-12-03 12:26:11 -05:00
parent 91ec6f1771
commit 40039386a0
58 changed files with 3372 additions and 1920 deletions

View File

@@ -575,12 +575,26 @@ static inline void mm128_bswap32_intrlv80_4x32( void *d, const void *src )
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
#if defined(__SSSE3__)
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
s2 = _mm_shuffle_epi8( s2, bswap_shuf );
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
#else
s0 = mm128_bswap_32( s0 );
s1 = mm128_bswap_32( s1 );
s2 = mm128_bswap_32( s2 );
s3 = mm128_bswap_32( s3 );
s4 = mm128_bswap_32( s4 );
#endif
casti_m128i( d, 0 ) = _mm_shuffle_epi32( s0, 0x00 );
casti_m128i( d, 1 ) = _mm_shuffle_epi32( s0, 0x55 );
casti_m128i( d, 2 ) = _mm_shuffle_epi32( s0, 0xaa );
@@ -742,17 +756,18 @@ static inline void extr_lane_8x32( void *d, const void *s,
static inline void mm256_bswap32_intrlv80_8x32( void *d, const void *src )
{
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
s0 = mm128_bswap_32( s0 );
s1 = mm128_bswap_32( s1 );
s2 = mm128_bswap_32( s2 );
s3 = mm128_bswap_32( s3 );
s4 = mm128_bswap_32( s4 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
s2 = _mm_shuffle_epi8( s2, bswap_shuf );
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m128i( d, 0 ) =
casti_m128i( d, 1 ) = _mm_shuffle_epi32( s0 , 0x00 );
@@ -960,17 +975,18 @@ static inline void extr_lane_16x32( void *d, const void *s,
static inline void mm512_bswap32_intrlv80_16x32( void *d, const void *src )
{
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
s0 = mm128_bswap_32( s0 );
s1 = mm128_bswap_32( s1 );
s2 = mm128_bswap_32( s2 );
s3 = mm128_bswap_32( s3 );
s4 = mm128_bswap_32( s4 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
s2 = _mm_shuffle_epi8( s2, bswap_shuf );
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m128i( d, 0 ) =
casti_m128i( d, 1 ) =
@@ -1374,17 +1390,18 @@ static inline void extr_lane_4x64( void *d, const void *s,
static inline void mm256_bswap32_intrlv80_4x64( void *d, const void *src )
{
__m128i s0 = casti_m128i( src, 0 );
__m128i s1 = casti_m128i( src, 1 );
__m128i s2 = casti_m128i( src, 2 );
__m128i s3 = casti_m128i( src, 3 );
__m128i s4 = casti_m128i( src, 4 );
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
s0 = mm128_bswap_32( s0 );
s1 = mm128_bswap_32( s1 );
s2 = mm128_bswap_32( s2 );
s3 = mm128_bswap_32( s3 );
s4 = mm128_bswap_32( s4 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
s2 = _mm_shuffle_epi8( s2, bswap_shuf );
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m128i( d, 0 ) =
casti_m128i( d, 1 ) = _mm_shuffle_epi32( s0, 0x44 );
@@ -1556,7 +1573,7 @@ static inline void dintrlv_8x64( void *dst0, void *dst1, void *dst2,
__m128i *d3 = (__m128i*)dst3;
__m128i *d4 = (__m128i*)dst4;
__m128i *d5 = (__m128i*)dst5;
__m128i *d6 = (__m128i*)dst5;
__m128i *d6 = (__m128i*)dst6;
__m128i *d7 = (__m128i*)dst7;
const __m128i* s = (const __m128i*)src;
@@ -1690,17 +1707,18 @@ static inline void extr_lane_8x64( void *d, const void *s,
static inline void mm512_bswap32_intrlv80_8x64( void *d, const void *src )
{
__m128i s0 = casti_m128i( src, 0 );
__m128i s1 = casti_m128i( src, 1 );
__m128i s2 = casti_m128i( src, 2 );
__m128i s3 = casti_m128i( src, 3 );
__m128i s4 = casti_m128i( src, 4 );
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m128i s4 = casti_m128i( src,4 );
s0 = mm128_bswap_32( s0 );
s1 = mm128_bswap_32( s1 );
s2 = mm128_bswap_32( s2 );
s3 = mm128_bswap_32( s3 );
s4 = mm128_bswap_32( s4 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
s2 = _mm_shuffle_epi8( s2, bswap_shuf );
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m128i( d, 0 ) =
casti_m128i( d, 1 ) =
@@ -1746,7 +1764,6 @@ static inline void mm512_bswap32_intrlv80_8x64( void *d, const void *src )
casti_m128i( d, 37 ) =
casti_m128i( d, 38 ) =
casti_m128i( d, 39 ) = _mm_shuffle_epi32( s4, 0xee );
}
#endif // AVX512
@@ -1967,6 +1984,68 @@ static inline void rintrlv_4x64_4x32( void *dst, const void *src,
#undef RLEAVE_4x64_4x32
#define RLEAVE_8x64_8x32( i ) do \
{ \
uint32_t *d = (uint32_t*)dst + (i); \
const uint32_t *s = (const uint32_t*)src + (i); \
d[ 0] = s[ 0]; d[ 1] = s[ 2]; d[ 2] = s[ 4]; d[ 3] = s[ 6]; \
d[ 4] = s[ 8]; d[ 5] = s[10]; d[ 6] = s[12]; d[ 7] = s[14]; \
d[ 8] = s[ 1]; d[ 9] = s[ 3]; d[10] = s[ 5]; d[11] = s[ 7]; \
d[12] = s[ 9]; d[13] = s[11]; d[14] = s[13]; d[16] = s[15]; \
} while(0)
// 8x64 -> 8x32
static inline void rintrlv_8x64_8x32( void *dst, const void *src,
const int bit_len )
{
RLEAVE_8x64_8x32( 0 ); RLEAVE_8x64_8x32( 16 );
RLEAVE_8x64_8x32( 32 ); RLEAVE_8x64_8x32( 48 );
RLEAVE_8x64_8x32( 64 ); RLEAVE_8x64_8x32( 80 );
RLEAVE_8x64_8x32( 96 ); RLEAVE_8x64_8x32( 112 );
RLEAVE_8x64_8x32( 128 ); RLEAVE_8x64_8x32( 144 );
RLEAVE_8x64_8x32( 160 ); RLEAVE_8x64_8x32( 176 );
RLEAVE_8x64_8x32( 192 ); RLEAVE_8x64_8x32( 208 );
RLEAVE_8x64_8x32( 224 ); RLEAVE_8x64_8x32( 240 );
if ( bit_len <= 256 ) return;
RLEAVE_8x64_8x32( 256 ); RLEAVE_8x64_8x32( 272 );
RLEAVE_8x64_8x32( 288 ); RLEAVE_8x64_8x32( 304 );
RLEAVE_8x64_8x32( 320 ); RLEAVE_8x64_8x32( 336 );
RLEAVE_8x64_8x32( 352 ); RLEAVE_8x64_8x32( 368 );
RLEAVE_8x64_8x32( 384 ); RLEAVE_8x64_8x32( 400 );
RLEAVE_8x64_8x32( 416 ); RLEAVE_8x64_8x32( 432 );
RLEAVE_8x64_8x32( 448 ); RLEAVE_8x64_8x32( 464 );
RLEAVE_8x64_8x32( 480 ); RLEAVE_8x64_8x32( 496 );
if ( bit_len <= 512 ) return;
RLEAVE_8x64_8x32( 512 ); RLEAVE_8x64_8x32( 528 );
RLEAVE_8x64_8x32( 544 ); RLEAVE_8x64_8x32( 560 );
RLEAVE_8x64_8x32( 576 ); RLEAVE_8x64_8x32( 592 );
RLEAVE_8x64_8x32( 608 ); RLEAVE_8x64_8x32( 624 );
RLEAVE_8x64_8x32( 640 ); RLEAVE_8x64_8x32( 656 );
RLEAVE_8x64_8x32( 672 ); RLEAVE_8x64_8x32( 688 );
RLEAVE_8x64_8x32( 704 ); RLEAVE_8x64_8x32( 720 );
RLEAVE_8x64_8x32( 736 ); RLEAVE_8x64_8x32( 752 );
RLEAVE_8x64_8x32( 768 ); RLEAVE_8x64_8x32( 784 );
RLEAVE_8x64_8x32( 800 ); RLEAVE_8x64_8x32( 816 );
RLEAVE_8x64_8x32( 832 ); RLEAVE_8x64_8x32( 848 );
RLEAVE_8x64_8x32( 864 ); RLEAVE_8x64_8x32( 880 );
RLEAVE_8x64_8x32( 896 ); RLEAVE_8x64_8x32( 912 );
RLEAVE_8x64_8x32( 928 ); RLEAVE_8x64_8x32( 944 );
RLEAVE_8x64_8x32( 960 ); RLEAVE_8x64_8x32( 976 );
RLEAVE_8x64_8x32( 992 ); RLEAVE_8x64_8x32(1008 );
}
#undef RLEAVE_8x64_8x32
// 4x32 -> 4x64
@@ -2067,7 +2146,7 @@ static inline void rintrlv_2x128_4x64( void *dst, const void *src0,
d[13] = _mm_unpacklo_epi64( s1[ 6], s1[ 7] );
d[14] = _mm_unpackhi_epi64( s0[ 6], s0[ 7] );
d[15] = _mm_unpackhi_epi64( s1[ 6], s1[ 7] );
if ( bit_len <= 256 ) return;
if ( bit_len <= 512 ) return;
d[16] = _mm_unpacklo_epi64( s0[ 8], s0[ 9] );
d[17] = _mm_unpacklo_epi64( s1[ 8], s1[ 9] );
d[18] = _mm_unpackhi_epi64( s0[ 8], s0[ 9] );
@@ -2189,15 +2268,15 @@ static inline void rintrlv_4x64_2x128( void *dst0, void *dst1,
#if defined(__SSE4_1__)
// No SSE2 implementation.
#define mm128_intrlv_blend_64( hi, lo ) _mm_blend_epi16( hi, lo, 0x0f )
#define mm128_intrlv_blend_32( hi, lo ) _mm_blend_epi16( hi, lo, 0x33 )
//#define mm128_intrlv_blend_64( hi, lo ) _mm_blend_epi16( hi, lo, 0x0f )
//#define mm128_intrlv_blend_32( hi, lo ) _mm_blend_epi16( hi, lo, 0x33 )
#endif // SSE4_1
#if defined(__AVX2__)
#define mm256_intrlv_blend_128( hi, lo ) _mm256_blend_epi32( hi, lo, 0x0f )
#define mm256_intrlv_blend_64( hi, lo ) _mm256_blend_epi32( hi, lo, 0x33 )
//#define mm256_intrlv_blend_128( hi, lo ) _mm256_blend_epi32( hi, lo, 0x0f )
//#define mm256_intrlv_blend_64( hi, lo ) _mm256_blend_epi32( hi, lo, 0x33 )
#define mm256_intrlv_blend_32( hi, lo ) _mm256_blend_epi32( hi, lo, 0x55 )
// Select lanes of 32 byte hash from 2 sources according to control mask.
@@ -2216,4 +2295,18 @@ do { \
#endif // AVX2
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
/*
#define mm512_intrlv_blend_128( hi, lo ) \
_mm512_mask_blend_epi32( 0x0f0f, hi, lo )
#define mm512_intrlv_blend_64( hi, lo ) \
_mm512_mask_blend_epi32( 0x3333, hi, lo )
*/
#define mm512_intrlv_blend_32( hi, lo ) \
_mm512_mask_blend_epi32( 0x5555, hi, lo )
#endif // AVX512
#endif // INTERLEAVE_H__

View File

@@ -242,7 +242,7 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm128_ror_64 _mm_ror_epi64
@@ -251,14 +251,14 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_rol_32 _mm_rol_epi32
#else
*/
#define mm128_ror_64 mm128_ror_var_64
#define mm128_rol_64 mm128_rol_var_64
#define mm128_ror_32 mm128_ror_var_32
#define mm128_rol_32 mm128_rol_var_32
//#endif // AVX512 else
#endif // AVX512 else
#define mm128_ror_16( v, c ) \
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )

View File

@@ -233,7 +233,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_or_si256( _mm256_slli_epi32( v, c ), \
_mm256_srli_epi32( v, 32-(c) ) )
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// AVX512, control must be 8 bit immediate.
@@ -244,7 +244,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#define mm256_rol_32 _mm256_rol_epi32
#else
*/
// No AVX512, use fallback.
@@ -253,7 +253,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#define mm256_ror_32 mm256_ror_var_32
#define mm256_rol_32 mm256_rol_var_32
// #endif // AVX512 else
#endif // AVX512 else
#define mm256_ror_16( v, c ) \
_mm256_or_si256( _mm256_srli_epi16( v, c ), \
@@ -311,7 +311,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// AVX512 has finer granularity full vector permutes.
// AVX512 has full vector alignr which might be faster, especially for 32 bit
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_swap_128( v ) _mm256_alignr_epi64( v, v, 2 )
@@ -323,7 +323,6 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#define mm256_rol_3x32( v ) _mm256_alignr_epi32( v, v, 5 )
#else // AVX2
*/
// Swap 128 bit elements in 256 bit vector.
#define mm256_swap_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
@@ -354,7 +353,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
m256_const_64( 0x0000000400000003, 0x0000000200000001, \
0x0000000000000007, 0x0000000600000005 )
//#endif // AVX512 else AVX2
#endif // AVX512 else AVX2
// AVX512 can do 16 & 8 bit elements.
@@ -423,21 +422,25 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#define mm256_ror1x32_128( v ) _mm256_shuffle_epi32( v, 0x39 )
#define mm256_rol1x32_128( v ) _mm256_shuffle_epi32( v, 0x93 )
// Rotate each 128 bit lane by one 16 bit element.
#define mm256_ror1x16_128( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x01000f0e0d0c0b0a, \
0x0908070605040302 ) )
#define mm256_rol1x16_128( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0d0c0b0a09080706, \
0x0504030201000f0e ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x11101f1e1d1c1b1a, 0x1918171615141312, \
0x01000f0e0d0c0b0a, 0x0908070605040302 ) )
#define mm256_rol1x16_128( v ) \
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1d1c1b1a19181716, 0x1514131211101f1e, \
0x0d0c0b0a09080706, 0x0504030201000f0e ) )
// Rotate each 128 bit lane by one byte
#define mm256_ror1x8_128( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x000f0e0d0c0b0a09, \
0x0807060504030201 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x101f1e1d1c1b1a19, 0x1817161514131211, \
0x000f0e0d0c0b0a09, 0x0807060504030201 ) )
#define mm256_rol1x8_128( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0d0c0b0a09080f0e, \
0x0504030201000706 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1d1c1b1a19181f1e, 0x1514131211101716, \
0x0d0c0b0a09080f0e, 0x0504030201000706 ) )
// Rotate each 128 bit lane by c bytes.
#define mm256_bror_128( v, c ) \
@@ -451,50 +454,65 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 )
#define mm256_ror1x16_64( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x09080f0e0d0c0b0a, \
0x0100070605040302 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x19181f1e1d1c1b1a, 0x1110171615141312, \
0x09080f0e0d0c0b0a, 0x0100070605040302 ) )
#define mm256_rol1x16_64( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0d0c0b0a09080f0e, \
0x0504030201000706 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1d1c1b1a19181f1e, 0x1514131211101716, \
0x0d0c0b0a09080f0e, 0x0504030201000706 ) )
#define mm256_ror1x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x080f0e0d0c0b0a09, \
0x0007060504030201 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x181f1e1d1c1b1a19, 0x1017161514131211, \
0x080f0e0d0c0b0a09, 0x0007060504030201 ) )
#define mm256_rol1x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0e0d0c0b0a09080f, \
0x0605040302010007 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1e1d1c1b1a19181f, 0x1615141312111017, \
0x0e0d0c0b0a09080f, 0x0605040302010007 ) )
#define mm256_ror3x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0a09080f0e0d0c0b, \
0x0201000706050403 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1a19181f1e1d1c1b, 0x1211101716151413, \
0x0a09080f0e0d0c0b, 0x0201000706050403 ) )
#define mm256_rol3x8_64( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0c0b0a09080f0e0d, \
0x0403020100070605 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1c1b1a19181f1e1d, 0x1413121110171615, \
0x0c0b0a09080f0e0d, 0x0403020100070605 ) )
// Swap 16 bit elements in each 32 bit lane
#define mm256_swap16_32( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0b0a09080f0e0d0c, \
0x0302010007060504 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1b1a19181f1e1d1c, 0x1312111017161514, \
0x0b0a09080f0e0d0c, 0x0302010007060504 ) )
//
// Swap bytes in vector elements, endian bswap.
#define mm256_bswap_64( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
#define mm256_bswap_32( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1c1d1e1f18191a1b, 0x1415161710111213, \
0x0c0d0e0f08090a0b, 0x0405060700010203 ) )
#define mm256_bswap_16( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( 0x0e0f0c0d0a0b0809, \
0x0607040502030001 ) )
_mm256_shuffle_epi8( v, \
m256_const_64( 0x1e1f1c1d1a1b1819, 0x1617141512131011, \
0x0e0f0c0d0a0b0809, 0x0607040502030001, ) )
// Source and destination are pointers, may point to same memory.
// 8 byte qword * 8 qwords * 4 lanes = 256 bytes
#define mm256_block_bswap_64( d, s ) do \
{ \
__m256i ctl = m256_const2_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
__m256i ctl = m256_const_64( 0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) ; \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
@@ -508,7 +526,8 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// 4 byte dword * 8 dwords * 8 lanes = 256 bytes
#define mm256_block_bswap_32( d, s ) do \
{ \
__m256i ctl = m256_const2_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
__m256i ctl = m256_const_64( 0x1c1d1e1f18191a1b, 0x1415161710111213, \
0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \

View File

@@ -90,7 +90,7 @@ static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
// Equivalent of set4, broadcast 256 bits in groups of four 64 bit constants
// to all 256 bit lanes: {i3,i2,i1,i0,i3,i2,i1,i0,i3,i2,i1,i0,i3,i2,i1,i0}.
static inline __m512i mm512_const4_64( const uint64_t i3, const uint64_t i2,
static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 )
{
__m256i lo = mm256_mov64_256( i0 );
@@ -105,7 +105,7 @@ static inline __m512i mm512_const4_64( const uint64_t i3, const uint64_t i2,
// Broadcast 128 bits in pairs of 64 bit constants {i1. i0} to all
// 128 bit lanes.
#define mm512_const2_64( i1, i0 ) \
#define m512_const2_64( i1, i0 ) \
_mm512_permutex_epi64( _mm512_castsi128_si512( \
m128_const_64( i1, i0 ) ), 0x44 )
@@ -132,7 +132,7 @@ static inline __m512i mm512_const4_64( const uint64_t i3, const uint64_t i2,
#define m512_one_16 _mm512_broadcastw_epi16( mm128_mov64_128( 1 ) )
#define m512_one_8 _mm512_broadcastb_epi8 ( mm128_mov64_128( 1 ) )
#define m512_neg1 mm512_const1_64( 0xffffffffffffffff )
#define m512_neg1 m512_const1_64( 0xffffffffffffffff )
/*
// EVEX vcmpeqq returns a bit mask instead of a vector
@@ -173,6 +173,19 @@ static inline __m512i mm512_neg1_fn()
// returns p+o as pointer to vector
#define casto_m512i(p,o) (((__m512i*)(p))+(o))
//
// Memory functions
// n = number of 512 bit (64 byte) vectors
static inline void memset_zero_512( __m512i *dst, const int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = m512_zero; }
static inline void memset_512( __m512i *dst, const __m512i a, const int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; }
// Sum 4 values, fewer dependencies than sequential addition.
@@ -189,7 +202,7 @@ static inline __m512i mm512_neg1_fn()
_mm512_add_epi8( _mm512_add_epi8( a, b ), _mm512_add_epi8( c, d ) )
#define mm512_xor4( a, b, c, d ) \
_mm512_xor_si512( _mm512_xor_si256( a, b ), _mm512_xor_si256( c, d ) )
_mm512_xor_si512( _mm512_xor_si512( a, b ), _mm512_xor_si512( c, d ) )
@@ -212,6 +225,11 @@ static inline __m512i mm512_neg1_fn()
// _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32
//
#define mm512_ror_64 _mm512_ror_epi64
#define mm512_rol_64 _mm512_rol_epi64
#define mm512_ror_32 _mm512_ror_epi32
#define mm512_rol_32 _mm512_rol_epi32
#define mm512_ror_var_64( v, c ) \
_mm512_or_si512( _mm512_srli_epi64( v, c ), \
_mm512_slli_epi64( v, 64-(c) ) )
@@ -249,22 +267,34 @@ static inline __m512i mm512_neg1_fn()
// Swap bytes in vector elements, vectorized endian conversion.
#define mm512_bswap_64( v ) \
_mm512_shuffle_epi8( v, m512_const2_64( \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
_mm512_shuffle_epi8( v, \
m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
0x28292a2b2c2d2e2f, 0x2021222324252627, \
0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ))
#define mm512_bswap_32( v ) \
_mm512_shuffle_epi8( v, m512_const2_64( \
0x0c0d0e0f08090a0b, 0x0405060700010203 ) )
_mm512_shuffle_epi8( v, \
m512_const_64( 0x3c3d3e3f38393a3b, 0x3435363730313233, \
0x2c2d2e2f28292a2b, 0x2425262720212223, \
0x0c0d0e0f08090a0b, 0x0405060700010203, \
0x1c1d1e1f18191a1b, 0x1415161710111213 ) )
#define mm512_bswap_16( v ) \
_mm512_shuffle_epi8( v, m512_const2_64( \
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) )
_mm512_shuffle_epi8( v, \
m512_const_64( 0x3e3f3c3d3a3b3839, 0x3637343532333031, \
0x2e2f2c2d2a2b2829, 0x2627242522232021, \
0x1e1f1c1d1a1b1819, 0x1617141512131011, \
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) )
// Source and destination are pointers, may point to same memory.
// 8 lanes of 64 bytes each
#define mm512_block_bswap_64( d, s ) do \
{ \
__m512i ctl = m512_const2_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
__m512i ctl = m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
0x28292a2b2c2d2e2f, 0x2021222324252627, \
0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
casti_m512i( d, 0 ) = _mm512_shuffle_epi8( casti_m512i( s, 0 ), ctl ); \
casti_m512i( d, 1 ) = _mm512_shuffle_epi8( casti_m512i( s, 1 ), ctl ); \
casti_m512i( d, 2 ) = _mm512_shuffle_epi8( casti_m512i( s, 2 ), ctl ); \
@@ -278,7 +308,10 @@ static inline __m512i mm512_neg1_fn()
// 16 lanes of 32 bytes each
#define mm512_block_bswap_32( d, s ) do \
{ \
__m512i ctl = m512_const2_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
__m512i ctl = m512_const_64( 0x3c3d3e3f38393a3b, 0x3435363730313233, \
0x2c2d2e2f28292a2b, 0x2425262720212223, \
0x0c0d0e0f08090a0b, 0x0405060700010203, \
0x1c1d1e1f18191a1b, 0x1415161710111213 ); \
casti_m512i( d, 0 ) = _mm512_shuffle_epi8( casti_m512i( s, 0 ), ctl ); \
casti_m512i( d, 1 ) = _mm512_shuffle_epi8( casti_m512i( s, 1 ), ctl ); \
casti_m512i( d, 2 ) = _mm512_shuffle_epi8( casti_m512i( s, 2 ), ctl ); \
@@ -381,6 +414,8 @@ static inline __m512i mm512_neg1_fn()
#define mm512_ror1x64_256( v ) _mm512_permutex_epi64( v, 0x39 )
#define mm512_rol1x64_256( v ) _mm512_permutex_epi64( v, 0x93 )
/* Need to fix
// Rotate 256 bit lanes by one 32 bit element
#define mm512_ror1x32_256( v ) \
_mm512_permutexvar_epi32( m512_const4_64( \
@@ -411,7 +446,7 @@ static inline __m512i mm512_neg1_fn()
_mm512_shuffle_epi8( v, m512_const4_64( \
0x1e1d1c1b1a191817, 0x161514131211100f, \
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
*/
//
// Rotate elements within 128 bit lanes of 512 bit vector.
@@ -422,6 +457,7 @@ static inline __m512i mm512_neg1_fn()
#define mm512_ror1x32_128( v ) _mm512_shuffle_epi32( v, 0x39 )
#define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 )
/*
#define mm512_ror1x16_128( v ) \
_mm512_permutexvar_epi16( m512_const2_64( \
0x0000000700060005, 0x0004000300020001 ), v )
@@ -437,6 +473,7 @@ static inline __m512i mm512_neg1_fn()
#define mm512_rol1x8_128( v ) \
_mm512_shuffle_epi8( v, m512_const2_64( \
0x0e0d0c0b0a090807, 0x060504030201000f ) )
*/
// Rotate 128 bit lanes by c bytes.
#define mm512_bror_128( v, c ) \