This commit is contained in:
Jay D Dee
2023-06-14 11:07:40 -04:00
parent de564ccbde
commit 57a6b7b58b
31 changed files with 3724 additions and 3345 deletions

View File

@@ -410,7 +410,8 @@ static inline void extr_lane_4x32( void *d, const void *s,
static inline void mm128_bswap32_80( void *d, void *s )
{
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
const __m128i bswap_shuf = _mm_set_epi64x( 0x0c0d0e0f08090a0b,
0x0405060700010203 );
casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), bswap_shuf );
casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), bswap_shuf );
casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), bswap_shuf );
@@ -456,7 +457,8 @@ static inline void mm128_bswap32_intrlv80_4x32( void *d, const void *src )
#if defined(__SSSE3__)
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
const __m128i bswap_shuf = _mm_set_epi64x( 0x0c0d0e0f08090a0b,
0x0405060700010203 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
@@ -731,7 +733,12 @@ static inline void extr_lane_8x32( void *d, const void *s,
static inline void mm256_bswap32_intrlv80_8x32( void *d, const void *src )
{
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
const __m128i bswap_shuf = _mm_set_epi64x( 0x0c0d0e0f08090a0b,
0x0405060700010203 );
const __m256i c1 = _mm256_set1_epi32( 1 );
const __m256i c2 = _mm256_add_epi32( c1, c1 );
const __m256i c3 = _mm256_add_epi32( c2, c1 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
@@ -744,52 +751,46 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, const void *src )
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m128i( d, 0 ) =
casti_m128i( d, 1 ) = _mm_shuffle_epi32( s0 , 0x00 );
casti_m128i( d, 2 ) =
casti_m128i( d, 3 ) = _mm_shuffle_epi32( s0 , 0x55 );
casti_m128i( d, 4 ) =
casti_m128i( d, 5 ) = _mm_shuffle_epi32( s0 , 0xaa );
casti_m128i( d, 6 ) =
casti_m128i( d, 7 ) = _mm_shuffle_epi32( s0 , 0xff );
casti_m256i( d, 0 ) = _mm256_broadcastd_epi32( s0 );
casti_m256i( d, 1 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s0 ), c1 );
casti_m256i( d, 2 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s0 ), c2 );
casti_m256i( d, 3 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s0 ), c3 );
casti_m128i( d, 8 ) =
casti_m128i( d, 9 ) = _mm_shuffle_epi32( s1 , 0x00 );
casti_m128i( d,10 ) =
casti_m128i( d,11 ) = _mm_shuffle_epi32( s1 , 0x55 );
casti_m128i( d,12 ) =
casti_m128i( d,13 ) = _mm_shuffle_epi32( s1 , 0xaa );
casti_m128i( d,14 ) =
casti_m128i( d,15 ) = _mm_shuffle_epi32( s1 , 0xff );
casti_m256i( d, 4 ) = _mm256_broadcastd_epi32( s1 );
casti_m256i( d, 5 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s1 ), c1 );
casti_m256i( d, 6 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s1 ), c2 );
casti_m256i( d, 7 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s1 ), c3 );
casti_m128i( d,16 ) =
casti_m128i( d,17 ) = _mm_shuffle_epi32( s2 , 0x00 );
casti_m128i( d,18 ) =
casti_m128i( d,19 ) = _mm_shuffle_epi32( s2 , 0x55 );
casti_m128i( d,20 ) =
casti_m128i( d,21 ) = _mm_shuffle_epi32( s2 , 0xaa );
casti_m128i( d,22 ) =
casti_m128i( d,23 ) = _mm_shuffle_epi32( s2 , 0xff );
casti_m256i( d, 8 ) = _mm256_broadcastd_epi32( s2 );
casti_m256i( d, 9 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s2 ), c1 );
casti_m256i( d,10 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s2 ), c2 );
casti_m256i( d,11 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s2 ), c3 );
casti_m128i( d,24 ) =
casti_m128i( d,25 ) = _mm_shuffle_epi32( s3 , 0x00 );
casti_m128i( d,26 ) =
casti_m128i( d,27 ) = _mm_shuffle_epi32( s3 , 0x55 );
casti_m128i( d,28 ) =
casti_m128i( d,29 ) = _mm_shuffle_epi32( s3 , 0xaa );
casti_m128i( d,30 ) =
casti_m128i( d,31 ) = _mm_shuffle_epi32( s3 , 0xff );
casti_m128i( d,32 ) =
casti_m128i( d,33 ) = _mm_shuffle_epi32( s4 , 0x00 );
casti_m128i( d,34 ) =
casti_m128i( d,35 ) = _mm_shuffle_epi32( s4 , 0x55 );
casti_m128i( d,36 ) =
casti_m128i( d,37 ) = _mm_shuffle_epi32( s4 , 0xaa );
casti_m128i( d,38 ) =
casti_m128i( d,39 ) = _mm_shuffle_epi32( s4 , 0xff );
}
casti_m256i( d,12 ) = _mm256_broadcastd_epi32( s3 );
casti_m256i( d,13 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s3 ), c1 );
casti_m256i( d,14 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s3 ), c2 );
casti_m256i( d,15 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s3 ), c3 );
casti_m256i( d,16 ) = _mm256_broadcastd_epi32( s4 );
casti_m256i( d,17 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s4 ), c1 );
casti_m256i( d,18 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s4 ), c2 );
casti_m256i( d,19 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s4 ), c3 );
}
#endif // AVX2
@@ -1174,9 +1175,72 @@ static inline void extr_lane_16x32( void *d, const void *s,
#if defined(__AVX512F__) && defined(__AVX512VL__)
#if defined(__AVX512VBMI__)
// Combine byte swap & broadcast in one permute
static inline void mm512_bswap32_intrlv80_16x32( void *d, const void *src )
{
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
const __m512i c0 = _mm512_set1_epi32( 0x00010203 );
const __m512i c1 = _mm512_set1_epi32( 0x04050607 );
const __m512i c2 = _mm512_set1_epi32( 0x08090a0b );
const __m512i c3 = _mm512_set1_epi32( 0x0c0d0e0f );
const __m128i s0 = casti_m128i( src,0 );
const __m128i s1 = casti_m128i( src,1 );
const __m128i s2 = casti_m128i( src,2 );
const __m128i s3 = casti_m128i( src,3 );
const __m128i s4 = casti_m128i( src,4 );
casti_m512i( d, 0 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi8( c2,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi8( c3,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi8( c2,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi8( c3,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,10 ) = _mm512_permutexvar_epi8( c2,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,11 ) = _mm512_permutexvar_epi8( c3,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,12 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,13 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,14 ) = _mm512_permutexvar_epi8( c2,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,15 ) = _mm512_permutexvar_epi8( c3,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,16 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s4 ) );
casti_m512i( d,17 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s4 ) );
casti_m512i( d,18 ) = _mm512_permutexvar_epi8( c2,
_mm512_castsi128_si512( s4 ) );
casti_m512i( d,19 ) = _mm512_permutexvar_epi8( c3,
_mm512_castsi128_si512( s4 ) );
}
#else
static inline void mm512_bswap32_intrlv80_16x32( void *d, const void *src )
{
const __m128i bswap_shuf = _mm_set_epi64x( 0x0c0d0e0f08090a0b,
0x0405060700010203 );
const __m512i c1 = _mm512_set1_epi32( 1 );
const __m512i c2 = _mm512_add_epi32( c1, c1 );
const __m512i c3 = _mm512_add_epi32( c2, c1 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
@@ -1189,33 +1253,48 @@ static inline void mm512_bswap32_intrlv80_16x32( void *d, const void *src )
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m512i( d, 0 ) = mm512_bcast_m128( _mm_shuffle_epi32( s0 , 0x00 ) );
casti_m512i( d, 1 ) = mm512_bcast_m128( _mm_shuffle_epi32( s0 , 0x55 ) );
casti_m512i( d, 2 ) = mm512_bcast_m128( _mm_shuffle_epi32( s0 , 0xaa ) );
casti_m512i( d, 3 ) = mm512_bcast_m128( _mm_shuffle_epi32( s0 , 0xff ) );
casti_m512i( d, 0 ) = _mm512_broadcastd_epi32( s0 );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( c1,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( c2,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( c3,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d, 4 ) = mm512_bcast_m128( _mm_shuffle_epi32( s1 , 0x00 ) );
casti_m512i( d, 5 ) = mm512_bcast_m128( _mm_shuffle_epi32( s1 , 0x55 ) );
casti_m512i( d, 6 ) = mm512_bcast_m128( _mm_shuffle_epi32( s1 , 0xaa ) );
casti_m512i( d, 7 ) = mm512_bcast_m128( _mm_shuffle_epi32( s1 , 0xff ) );
casti_m512i( d, 4 ) = _mm512_broadcastd_epi32( s1 );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32( c1,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( c2,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32( c3,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d, 8 ) = mm512_bcast_m128( _mm_shuffle_epi32( s2 , 0x00 ) );
casti_m512i( d, 9 ) = mm512_bcast_m128( _mm_shuffle_epi32( s2 , 0x55 ) );
casti_m512i( d,10 ) = mm512_bcast_m128( _mm_shuffle_epi32( s2 , 0xaa ) );
casti_m512i( d,11 ) = mm512_bcast_m128( _mm_shuffle_epi32( s2 , 0xff ) );
casti_m512i( d, 8 ) = _mm512_broadcastd_epi32( s2 );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( c1,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32( c2,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32( c3,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,12 ) = mm512_bcast_m128( _mm_shuffle_epi32( s3 , 0x00 ) );
casti_m512i( d,13 ) = mm512_bcast_m128( _mm_shuffle_epi32( s3 , 0x55 ) );
casti_m512i( d,14 ) = mm512_bcast_m128( _mm_shuffle_epi32( s3 , 0xaa ) );
casti_m512i( d,15 ) = mm512_bcast_m128( _mm_shuffle_epi32( s3 , 0xff ) );
casti_m512i( d,12 ) = _mm512_broadcastd_epi32( s3 );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32( c1,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32( c2,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32( c3,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,16 ) = mm512_bcast_m128( _mm_shuffle_epi32( s4 , 0x00 ) );
casti_m512i( d,17 ) = mm512_bcast_m128( _mm_shuffle_epi32( s4 , 0x55 ) );
casti_m512i( d,18 ) = mm512_bcast_m128( _mm_shuffle_epi32( s4 , 0xaa ) );
casti_m512i( d,19 ) = mm512_bcast_m128( _mm_shuffle_epi32( s4 , 0xff ) );
casti_m512i( d,16 ) = _mm512_broadcastd_epi32( s4 );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32( c1,
_mm512_castsi128_si512( s4 ) );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32( c2,
_mm512_castsi128_si512( s4 ) );
casti_m512i( d,19 ) = _mm512_permutexvar_epi32( c3,
_mm512_castsi128_si512( s4 ) );
}
#endif // VBMI else
#endif // AVX512
///////////////////////////
@@ -1393,82 +1472,56 @@ static inline void extr_lane_4x64( void *dst, const void *src, const int lane,
return; // bit_len == 512
}
#if defined(__SSSE3__)
#if defined(__AVX2__)
static inline void mm256_intrlv80_4x64( void *d, const void *src )
{
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
__m256i s0 = casti_m256i( src,0 );
__m256i s1 = casti_m256i( src,1 );
__m128i s4 = casti_m128i( src,4 );
casti_m128i( d, 0 ) =
casti_m128i( d, 1 ) = _mm_shuffle_epi32( s0, 0x44 );
casti_m128i( d, 2 ) =
casti_m128i( d, 3 ) = _mm_shuffle_epi32( s0, 0xee );
casti_m256i( d, 0 ) = _mm256_permute4x64_epi64( s0, 0x00 );
casti_m256i( d, 1 ) = _mm256_permute4x64_epi64( s0, 0x55 );
casti_m256i( d, 2 ) = _mm256_permute4x64_epi64( s0, 0xaa );
casti_m256i( d, 3 ) = _mm256_permute4x64_epi64( s0, 0xff );
casti_m128i( d, 4 ) =
casti_m128i( d, 5 ) = _mm_shuffle_epi32( s1, 0x44 );
casti_m128i( d, 6 ) =
casti_m128i( d, 7 ) = _mm_shuffle_epi32( s1, 0xee );
casti_m256i( d, 4 ) = _mm256_permute4x64_epi64( s1, 0x00 );
casti_m256i( d, 5 ) = _mm256_permute4x64_epi64( s1, 0x55 );
casti_m256i( d, 6 ) = _mm256_permute4x64_epi64( s1, 0xaa );
casti_m256i( d, 7 ) = _mm256_permute4x64_epi64( s1, 0xff );
casti_m128i( d, 8 ) =
casti_m128i( d, 9 ) = _mm_shuffle_epi32( s2, 0x44 );
casti_m128i( d, 10 ) =
casti_m128i( d, 11 ) = _mm_shuffle_epi32( s2, 0xee );
casti_m128i( d, 12 ) =
casti_m128i( d, 13 ) = _mm_shuffle_epi32( s3, 0x44 );
casti_m128i( d, 14 ) =
casti_m128i( d, 15 ) = _mm_shuffle_epi32( s3, 0xee );
casti_m128i( d, 16 ) =
casti_m128i( d, 17 ) = _mm_shuffle_epi32( s4, 0x44 );
casti_m128i( d, 18 ) =
casti_m128i( d, 19 ) = _mm_shuffle_epi32( s4, 0xee );
casti_m256i( d, 8 ) = _mm256_permute4x64_epi64(
_mm256_castsi128_si256( s4 ), 0x00 );
casti_m256i( d, 9 ) = _mm256_permute4x64_epi64(
_mm256_castsi128_si256( s4 ), 0x55 );
}
static inline void mm256_bswap32_intrlv80_4x64( void *d, const void *src )
{
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
__m128i s3 = casti_m128i( src,3 );
const __m256i bswap_shuf = mm256_bcast_m128(
_mm_set_epi64x( 0x0c0d0e0f08090a0b, 0x0405060700010203 ) );
__m256i s0 = casti_m256i( src,0 );
__m256i s1 = casti_m256i( src,1 );
__m128i s4 = casti_m128i( src,4 );
s0 = _mm_shuffle_epi8( s0, bswap_shuf );
s1 = _mm_shuffle_epi8( s1, bswap_shuf );
s2 = _mm_shuffle_epi8( s2, bswap_shuf );
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
s0 = _mm256_shuffle_epi8( s0, bswap_shuf );
s1 = _mm256_shuffle_epi8( s1, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, _mm256_castsi256_si128( bswap_shuf ) );
casti_m128i( d, 0 ) =
casti_m128i( d, 1 ) = _mm_shuffle_epi32( s0, 0x44 );
casti_m128i( d, 2 ) =
casti_m128i( d, 3 ) = _mm_shuffle_epi32( s0, 0xee );
casti_m128i( d, 4 ) =
casti_m128i( d, 5 ) = _mm_shuffle_epi32( s1, 0x44 );
casti_m128i( d, 6 ) =
casti_m128i( d, 7 ) = _mm_shuffle_epi32( s1, 0xee );
casti_m128i( d, 8 ) =
casti_m128i( d, 9 ) = _mm_shuffle_epi32( s2, 0x44 );
casti_m128i( d, 10 ) =
casti_m128i( d, 11 ) = _mm_shuffle_epi32( s2, 0xee );
casti_m128i( d, 12 ) =
casti_m128i( d, 13 ) = _mm_shuffle_epi32( s3, 0x44 );
casti_m128i( d, 14 ) =
casti_m128i( d, 15 ) = _mm_shuffle_epi32( s3, 0xee );
casti_m128i( d, 16 ) =
casti_m128i( d, 17 ) = _mm_shuffle_epi32( s4, 0x44 );
casti_m128i( d, 18 ) =
casti_m128i( d, 19 ) = _mm_shuffle_epi32( s4, 0xee );
casti_m256i( d, 0 ) = _mm256_permute4x64_epi64( s0, 0x00 );
casti_m256i( d, 1 ) = _mm256_permute4x64_epi64( s0, 0x55 );
casti_m256i( d, 2 ) = _mm256_permute4x64_epi64( s0, 0xaa );
casti_m256i( d, 3 ) = _mm256_permute4x64_epi64( s0, 0xff );
casti_m256i( d, 4 ) = _mm256_permute4x64_epi64( s1, 0x00 );
casti_m256i( d, 5 ) = _mm256_permute4x64_epi64( s1, 0x55 );
casti_m256i( d, 6 ) = _mm256_permute4x64_epi64( s1, 0xaa );
casti_m256i( d, 7 ) = _mm256_permute4x64_epi64( s1, 0xff );
casti_m256i( d, 8 ) = _mm256_permute4x64_epi64(
_mm256_castsi128_si256( s4 ), 0x00 );
casti_m256i( d, 9 ) = _mm256_permute4x64_epi64(
_mm256_castsi128_si256( s4 ), 0x55 );
}
#endif // AVX2
@@ -1796,25 +1849,65 @@ static inline void extr_lane_8x64( void *dst, const void *src, const int lane,
// broadcast to all lanes
static inline void mm512_intrlv80_8x64( void *dst, const void *src )
{
__m512i *d = (__m512i*)dst;
const __m128i *s = (const __m128i*)src;
__m512i *d = (__m512i*)dst;
const uint64_t *s = (const uint64_t*)src;
d[ 0] = mm512_bcast_m128( _mm_shuffle_epi32( s[0], 0x44 ) );
d[ 1] = mm512_bcast_m128( _mm_shuffle_epi32( s[0], 0xee ) );
d[ 2] = mm512_bcast_m128( _mm_shuffle_epi32( s[1], 0x44 ) );
d[ 3] = mm512_bcast_m128( _mm_shuffle_epi32( s[1], 0xee ) );
d[ 4] = mm512_bcast_m128( _mm_shuffle_epi32( s[2], 0x44 ) );
d[ 5] = mm512_bcast_m128( _mm_shuffle_epi32( s[2], 0xee ) );
d[ 6] = mm512_bcast_m128( _mm_shuffle_epi32( s[3], 0x44 ) );
d[ 7] = mm512_bcast_m128( _mm_shuffle_epi32( s[3], 0xee ) );
d[ 8] = mm512_bcast_m128( _mm_shuffle_epi32( s[4], 0x44 ) );
d[ 9] = mm512_bcast_m128( _mm_shuffle_epi32( s[4], 0xee ) );
d[0] = _mm512_set1_epi64( s[0] );
d[1] = _mm512_set1_epi64( s[1] );
d[2] = _mm512_set1_epi64( s[2] );
d[3] = _mm512_set1_epi64( s[3] );
d[4] = _mm512_set1_epi64( s[4] );
d[5] = _mm512_set1_epi64( s[5] );
d[6] = _mm512_set1_epi64( s[6] );
d[7] = _mm512_set1_epi64( s[7] );
d[8] = _mm512_set1_epi64( s[8] );
d[9] = _mm512_set1_epi64( s[9] );
}
// byte swap and broadcast to al lanes
// byte swap and broadcast to all lanes
#if defined(__AVX512VBMI__)
// Combine byte swap & broadcast in one permute
static inline void mm512_bswap32_intrlv80_8x64( void *d, const void *src )
{
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
const __m512i c0 = _mm512_set1_epi64( 0x0405060700010203 );
const __m512i c1 = _mm512_set1_epi64( 0x0c0d0e0f08090a0b );
const __m128i s0 = casti_m128i( src,0 );
const __m128i s1 = casti_m128i( src,1 );
const __m128i s2 = casti_m128i( src,2 );
const __m128i s3 = casti_m128i( src,3 );
const __m128i s4 = casti_m128i( src,4 );
casti_m512i( d,0 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d,1 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d,2 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,3 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,4 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,5 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,6 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,7 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,8 ) = _mm512_permutexvar_epi8( c0,
_mm512_castsi128_si512( s4 ) );
casti_m512i( d,9 ) = _mm512_permutexvar_epi8( c1,
_mm512_castsi128_si512( s4 ) );
}
#else
static inline void mm512_bswap32_intrlv80_8x64( void *d, const void *src )
{
const __m128i bswap_shuf = _mm_set_epi64x( 0x0c0d0e0f08090a0b,
0x0405060700010203 );
const __m512i c1 = _mm512_set1_epi64( 1 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );
@@ -1827,18 +1920,24 @@ static inline void mm512_bswap32_intrlv80_8x64( void *d, const void *src )
s3 = _mm_shuffle_epi8( s3, bswap_shuf );
s4 = _mm_shuffle_epi8( s4, bswap_shuf );
casti_m512i( d, 0 ) = mm512_bcast_m128( _mm_shuffle_epi32( s0, 0x44 ) );
casti_m512i( d, 1 ) = mm512_bcast_m128( _mm_shuffle_epi32( s0, 0xee ) );
casti_m512i( d, 2 ) = mm512_bcast_m128( _mm_shuffle_epi32( s1, 0x44 ) );
casti_m512i( d, 3 ) = mm512_bcast_m128( _mm_shuffle_epi32( s1, 0xee ) );
casti_m512i( d, 4 ) = mm512_bcast_m128( _mm_shuffle_epi32( s2, 0x44 ) );
casti_m512i( d, 5 ) = mm512_bcast_m128( _mm_shuffle_epi32( s2, 0xee ) );
casti_m512i( d, 6 ) = mm512_bcast_m128( _mm_shuffle_epi32( s3, 0x44 ) );
casti_m512i( d, 7 ) = mm512_bcast_m128( _mm_shuffle_epi32( s3, 0xee ) );
casti_m512i( d, 8 ) = mm512_bcast_m128( _mm_shuffle_epi32( s4, 0x44 ) );
casti_m512i( d, 9 ) = mm512_bcast_m128( _mm_shuffle_epi32( s4, 0xee ) );
casti_m512i( d,0 ) = _mm512_broadcastq_epi64( s0 );
casti_m512i( d,1 ) = _mm512_permutexvar_epi64( c1,
_mm512_castsi128_si512( s0 ) );
casti_m512i( d,2 ) = _mm512_broadcastq_epi64( s1 );
casti_m512i( d,3 ) = _mm512_permutexvar_epi64( c1,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,4 ) = _mm512_broadcastq_epi64( s2 );
casti_m512i( d,5 ) = _mm512_permutexvar_epi64( c1,
_mm512_castsi128_si512( s2 ) );
casti_m512i( d,6 ) = _mm512_broadcastq_epi64( s3 );
casti_m512i( d,7 ) = _mm512_permutexvar_epi64( c1,
_mm512_castsi128_si512( s3 ) );
casti_m512i( d,8 ) = _mm512_broadcastq_epi64( s4 );
casti_m512i( d,9 ) = _mm512_permutexvar_epi64( c1,
_mm512_castsi128_si512( s4 ) );
}
#endif // VBMI else
#endif // AVX512
//////////////////////////
@@ -1995,7 +2094,8 @@ static inline void dintrlv_4x128_512( void *dst0, void *dst1, void *dst2,
static inline void mm512_bswap32_intrlv80_4x128( void *d, void *src )
{
__m128i bswap_shuf = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 );
const __m128i bswap_shuf = _mm_set_epi64x( 0x0c0d0e0f08090a0b,
0x0405060700010203 );
__m128i s0 = casti_m128i( src,0 );
__m128i s1 = casti_m128i( src,1 );
__m128i s2 = casti_m128i( src,2 );

View File

@@ -42,10 +42,10 @@ typedef union
uint32_t u32[4];
} __attribute__ ((aligned (16))) m128_ovly;
// Efficient and convenient moving between GP & low bits of XMM.
// Use VEX when available to give access to xmm8-15 and zero extend for
// larger vectors.
// Deprecated. EVEX adds support for integer argument in broadcast instruction
// eliminating the need for an explicit move in most cases. Use the set1
// intrinsic with integers and let the compiler figure it out.
static inline __m128i mm128_mov64_128( const uint64_t n )
{
__m128i a;
@@ -68,65 +68,27 @@ static inline __m128i mm128_mov32_128( const uint32_t n )
return a;
}
// Inconstant naming, prefix should reflect return value:
// u64_mov128_64
static inline uint64_t u64_mov128_64( const __m128i a )
{
uint64_t n;
#if defined(__AVX__)
asm( "vmovq %1, %0\n\t" : "=r"(n) : "x"(a) );
#else
asm( "movq %1, %0\n\t" : "=r"(n) : "x"(a) );
#endif
return n;
}
static inline uint32_t u32_mov128_32( const __m128i a )
{
uint32_t n;
#if defined(__AVX__)
asm( "vmovd %1, %0\n\t" : "=r"(n) : "x"(a) );
#else
asm( "movd %1, %0\n\t" : "=r"(n) : "x"(a) );
#endif
return n;
}
// Emulate broadcast & insert instructions not available in SSE2
#define mm128_bcast_i64( i ) _mm_shuffle_epi32( mm128_mov64_128( i ), 0x44 )
#define mm128_bcast_i32( i ) _mm_shuffle_epi32( mm128_mov32_128( i ), 0x00 )
// FYI only, not used anywhere
//#define mm128_bcast_m64( v ) _mm_shuffle_epi32( v, 0x44 )
//#define mm128_bcast_m32( v ) _mm_shuffle_epi32( v, 0x00 )
#define m128_const_i128( i ) mm128_mov64_128( i )
// deprecated
#define m128_const1_64 mm128_bcast_i64
#define m128_const1_32 mm128_bcast_i32
#if defined(__SSE4_1__)
// Assign 64 bit integers to respective elements: {hi, lo}
#define m128_const_64( hi, lo ) \
_mm_insert_epi64( mm128_mov64_128( lo ), hi, 1 )
#else
// Deprecated, use set1 directly
#define m128_const1_64 _mm_set1_epi64x
#define m128_const1_32 _mm_set1_epi32
// Deprecated, use set directly
#define m128_const_64 _mm_set_epi64x
#endif
// Pseudo constants
#define m128_zero _mm_setzero_si128()
#define m128_one_128 mm128_mov64_128( 1 )
#define m128_one_64 mm128_bcast_i64( 1 )
#define m128_one_32 mm128_bcast_i32( 1 )
#define m128_one_16 mm128_bcast_i32( 0x00010001 )
#define m128_one_8 mm128_bcast_i32( 0x01010101 )
//#define m128_one_64 _mm_set1_epi64x( 1 )
#define m128_one_32 _mm_set1_epi32( 1 )
// ASM avoids the need to initialize return variable to avoid compiler warning.
// Macro abstracts function parentheses to look like an identifier.
static inline __m128i mm128_neg1_fn()
{
__m128i a;
@@ -184,15 +146,11 @@ static inline __m128i mm128_insert_32( const __m128i v, const uint32_t i,
const int c )
{ return mm128_xim_32( v, mm128_mov32_128( i ), c<<4 ); }
// Extract 32 bit element c from v and return as integer.
static inline uint32_t mm128_extract_32( const __m128i v, const int c )
{ return u32_mov128_32( mm128_xim_32( v, v, c<<6 ) ); }
// Zero 32 bit elements when bit in mask is set.
// Zero 32 bit elements when corresponding bit in 4 bit mask is set.
static inline __m128i mm128_mask_32( const __m128i v, const int m )
{ return mm128_xim_32( v, v, m ); }
// Move element i2 of v2 to element i1 of v1 and return updated v1.
// Copy element i2 of v2 to element i1 of dest and copy remaining elements from v1.
#define mm128_mov32_32( v1, i1, v2, i2 ) \
mm128_xim_32( v1, v2, ( (i1)<<4 ) | ( (i2)<<6 ) )
@@ -213,13 +171,6 @@ static inline __m128i mm128_not( const __m128i v )
#endif
/*
// Unary negation of elements (-v)
#define mm128_negate_64( v ) _mm_sub_epi64( m128_zero, v )
#define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v )
#define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v )
*/
// Add 4 values, fewer dependencies than sequential addition.
#define mm128_add4_64( a, b, c, d ) \
_mm_add_epi64( _mm_add_epi64( a, b ), _mm_add_epi64( c, d ) )
@@ -384,16 +335,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#endif // AVX512 else SSE2
#define mm128_ror_16( v, c ) \
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )
#define mm128_rol_16( v, c ) \
_mm_or_si128( _mm_slli_epi16( v, c ), _mm_srli_epi16( v, 16-(c) ) )
// Deprecated.
#define mm128_rol_var_32( v, c ) \
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
// Cross lane shuffles
//
// Limited 2 input shuffle, combines shuffle with blend. The destination low
@@ -415,6 +356,7 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_shuflr_32( v ) _mm_shuffle_epi32( v, 0x39 )
#define mm128_shufll_32( v ) _mm_shuffle_epi32( v, 0x93 )
/* Not used
#if defined(__SSSE3__)
// Rotate right by c bytes, no SSE2 equivalent.
@@ -422,6 +364,7 @@ static inline __m128i mm128_shuflr_x8( const __m128i v, const int c )
{ return _mm_alignr_epi8( v, v, c ); }
#endif
*/
// Rotate 64 bit lanes
@@ -471,25 +414,25 @@ static inline __m128i mm128_shuflr_x8( const __m128i v, const int c )
#if defined(__SSSE3__)
#define mm128_bswap_128( v ) \
_mm_shuffle_epi8( v, m128_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f ) )
_mm_shuffle_epi8( v, _mm_set_epi64x( 0x0001020304050607, \
0x08090a0b0c0d0e0f ) )
#define mm128_bswap_64( v ) \
_mm_shuffle_epi8( v, m128_const_64( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) )
_mm_shuffle_epi8( v, _mm_set_epi64x( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) )
#define mm128_bswap_32( v ) \
_mm_shuffle_epi8( v, m128_const_64( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) )
_mm_shuffle_epi8( v, _mm_set_epi64x( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) )
#define mm128_bswap_16( v ) \
_mm_shuffle_epi8( v, m128_const_64( 0x0e0f0c0d0a0b0809, \
0x0607040502030001 )
_mm_shuffle_epi8( v, _mm_set_epi64x( 0x0e0f0c0d0a0b0809, \
0x0607040502030001 )
// 8 byte qword * 8 qwords * 2 lanes = 128 bytes
#define mm128_block_bswap_64( d, s ) do \
{ \
__m128i ctl = m128_const_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
__m128i ctl = _mm_set_epi64x( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \
casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \
casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \
@@ -503,7 +446,7 @@ static inline __m128i mm128_shuflr_x8( const __m128i v, const int c )
// 4 byte dword * 8 dwords * 4 lanes = 128 bytes
#define mm128_block_bswap_32( d, s ) do \
{ \
__m128i ctl = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
__m128i ctl = _mm_set_epi64x( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \
casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \
casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \
@@ -564,14 +507,6 @@ static inline void mm128_block_bswap_32( __m128i *d, const __m128i *s )
#endif // SSSE3 else SSE2
// Swap 128 bit vectors.
// This should be avoided, it's more efficient to switch references.
#define mm128_swap256_128( v1, v2 ) \
v1 = _mm_xor_si128( v1, v2 ); \
v2 = _mm_xor_si128( v1, v2 ); \
v1 = _mm_xor_si128( v1, v2 );
// alignr instruction for 32 & 64 bit elements is only available with AVX512
// but emulated here. Behaviour is consistent with Intel alignr intrinsics.

View File

@@ -59,57 +59,41 @@ typedef union
#if defined(__AVX2__)
// Move integer to low element of vector, other elements are set to zero.
#define mm256_mov64_256( i ) _mm256_castsi128_si256( mm128_mov64_128( i ) )
#define mm256_mov32_256( i ) _mm256_castsi128_si256( mm128_mov32_128( i ) )
// Move low element of vector to integer.
#define u64_mov256_64( v ) u64_mov128_64( _mm256_castsi256_si128( v ) )
#define u32_mov256_32( v ) u32_mov128_32( _mm256_castsi256_si128( v ) )
// concatenate two 128 bit vectors into one 256 bit vector: { hi, lo }
#define mm256_concat_128( hi, lo ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 )
// Broadcast, ie set1, from 128 bit vector input.
#define mm256_bcast_m128( v ) \
_mm256_permute4x64_epi64( _mm256_castsi128_si256( v ), 0x44 )
#define mm256_bcast_i128( i ) mm256_bcast_m128( mm128_mov64_128( i ) )
#define mm256_bcast_i64( i ) _mm256_broadcastq_epi64( mm128_mov64_128( i ) )
#define mm256_bcast_i32( i ) _mm256_broadcastd_epi32( mm128_mov32_128( i ) )
#define mm256_bcast_i16( i ) _mm256_broadcastw_epi16( mm128_mov32_128( i ) )
#define mm256_bcast_i8( i ) _mm256_broadcastb_epi8 ( mm128_mov32_128( i ) )
_mm256_permute4x64_epi64( _mm256_castsi128_si256( v ), 0x44 )
// Equivalent of set, move 64 bit integer constants to respective 64 bit
// elements.
static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 )
{
union { __m256i m256i; uint64_t u64[4]; } v;
v.u64[0] = i0; v.u64[1] = i1; v.u64[2] = i2; v.u64[3] = i3;
return v.m256i;
}
// Set either the low or high 64 bit elements in 128 bit lanes, other elements
// are set to zero.
#if defined(__AVX512VL__)
#define mm256_bcast128lo_64( i64 ) _mm256_maskz_set1_epi64( 0x55, i64 )
#define mm256_bcast128hi_64( i64 ) _mm256_maskz_set1_epi64( 0xaa, i64 )
#else
#define mm256_bcast128lo_64( i64 ) mm256_bcast_m128( mm128_mov64_128( i64 ) )
#define mm256_bcast128hi_64( i64 ) _mm256_permute4x64_epi64( \
_mm256_castsi128_si256( mm128_mov64_128( i64 ) ), 0x11 )
#endif
#define mm256_set2_64( i1, i0 ) mm256_bcast_m128( _mm_set_epi64x( i1, i0 ) )
// Deprecated
#define m256_const1_128 mm256_bcast_m128
#define m256_const1_i128 mm256_bcast_i128
#define m256_const1_64 mm256_bcast_i64
#define m256_const1_32 mm256_bcast_i32
#define m256_const2_64( i1, i0 ) \
m256_const1_128( m128_const_64( i1, i0 ) )
#define m256_const1_64 _mm256_set1_epi64x
#define m256_const1_32 _mm256_set1_epi32
//
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#define m256_one_256 mm256_mov64_256( 1 )
#define m256_one_128 mm256_bcast_i128( 1 )
#define m256_one_64 mm256_bcast_i64( 1 )
#define m256_one_32 mm256_bcast_i32( 1 )
#define m256_one_16 mm256_bcast_i16( 1 )
#define m256_one_8 mm256_bcast_i8 ( 1 )
#define m256_zero _mm256_setzero_si256()
//#define m256_one_256 mm256_mov64_256( 1 )
#define m256_one_128 mm256_bcast_m128( m128_one_128 )
#define m256_one_64 _mm256_set1_epi64x( 1 )
#define m256_one_32 _mm256_set1_epi32( 1 )
static inline __m256i mm256_neg1_fn()
{
@@ -119,10 +103,6 @@ static inline __m256i mm256_neg1_fn()
}
#define m256_neg1 mm256_neg1_fn()
// Consistent naming for similar operations.
#define mm128_extr_lo128_256( v ) _mm256_castsi256_si128( v )
#define mm128_extr_hi128_256( v ) _mm256_extracti128_si256( v, 1 )
//
// Memory functions
// n = number of 256 bit (32 byte) vectors
@@ -151,14 +131,6 @@ static inline __m256i mm256_not( const __m256i v )
#endif
/*
// Unary negation of each element ( -v )
#define mm256_negate_64( v ) _mm256_sub_epi64( m256_zero, v )
#define mm256_negate_32( v ) _mm256_sub_epi32( m256_zero, v )
#define mm256_negate_16( v ) _mm256_sub_epi16( m256_zero, v )
*/
// Add 4 values, fewer dependencies than sequential addition.
#define mm256_add4_64( a, b, c, d ) \
@@ -167,12 +139,6 @@ static inline __m256i mm256_not( const __m256i v )
#define mm256_add4_32( a, b, c, d ) \
_mm256_add_epi32( _mm256_add_epi32( a, b ), _mm256_add_epi32( c, d ) )
#define mm256_add4_16( a, b, c, d ) \
_mm256_add_epi16( _mm256_add_epi16( a, b ), _mm256_add_epi16( c, d ) )
#define mm256_add4_8( a, b, c, d ) \
_mm256_add_epi8( _mm256_add_epi8( a, b ), _mm256_add_epi8( c, d ) )
#if defined(__AVX512VL__)
// AVX512 has ternary logic that supports any 3 input boolean expression.
@@ -344,19 +310,6 @@ static inline __m256i mm256_not( const __m256i v )
#endif // AVX512 else AVX2
#define mm256_ror_16( v, c ) \
_mm256_or_si256( _mm256_srli_epi16( v, c ), \
_mm256_slli_epi16( v, 16-(c) ) )
#define mm256_rol_16( v, c ) \
_mm256_or_si256( _mm256_slli_epi16( v, c ), \
_mm256_srli_epi16( v, 16-(c) ) )
// Deprecated.
#define mm256_rol_var_32( v, c ) \
_mm256_or_si256( _mm256_slli_epi32( v, c ), \
_mm256_srli_epi32( v, 32-(c) ) )
//
// Cross lane shuffles
//
@@ -386,12 +339,12 @@ static inline __m256i mm256_shufll_32( const __m256i v )
#define mm256_shuflr_32( v ) \
_mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000000000007, 0x0000000600000005, \
_mm256_set_spi64x( 0x0000000000000007, 0x0000000600000005, \
0x0000000400000003, 0x0000000200000001 ) )
#define mm256_shufll_32( v ) \
_mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000600000005, 0x0000000400000003, \
_mm256_set_epi64x( 0x0000000600000005, 0x0000000400000003, \
0x0000000200000001, 0x0000000000000007 ) )
#endif
@@ -409,15 +362,17 @@ static inline __m256i mm256_shufll_32( const __m256i v )
_mm256_castps_si256( _mm256_shuffle_ps( _mm256_castsi256_ps( v1 ), \
_mm256_castsi256_ps( v2 ), c ) );
#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e )
#define mm256_shuflr128_64 mm256_swap128_64
#define mm256_shufll128_64 mm256_swap128_64
#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e )
#define mm256_shuflr128_64 mm256_swap128_64
#define mm256_shufll128_64 mm256_swap128_64
#define mm256_shuflr128_32( v ) _mm256_shuffle_epi32( v, 0x39 )
#define mm256_shufll128_32( v ) _mm256_shuffle_epi32( v, 0x93 )
/* Not used
static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
{ return _mm256_alignr_epi8( v, v, c ); }
*/
// 64 bit lanes
@@ -429,16 +384,16 @@ static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
#define mm256_shuflr64_24( v ) _mm256_ror_epi64( v, 24 )
#else
#define mm256_shuflr64_24( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( \
0x0a09080f0e0d0c0b, 0x0201000706050403 ) )
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x0a09080f0e0d0c0b, 0x0201000706050403 ) ) )
#endif
#if defined(__AVX512VL__)
#define mm256_shuflr64_16( v ) _mm256_ror_epi64( v, 16 )
#else
#define mm256_shuflr64_16( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( \
0x09080f0e0d0c0b0a, 0x0100070605040302 ) )
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x09080f0e0d0c0b0a, 0x0100070605040302 ) ) )
#endif
// 32 bit lanes
@@ -447,8 +402,8 @@ static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
#define mm256_swap32_16( v ) _mm256_ror_epi32( v, 16 )
#else
#define mm256_swap32_16( v ) \
_mm256_shuffle_epi8( v, m256_const2_64( \
0x0d0c0f0e09080b0a, 0x0504070601000302 ) )
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x0d0c0f0e09080b0a, 0x0504070601000302 ) ) )
#endif
#define mm256_shuflr32_16 mm256_swap32_16
#define mm256_shufll32_16 mm256_swap32_16
@@ -464,22 +419,23 @@ static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
// Reverse byte order in elements, endian bswap.
#define mm256_bswap_64( v ) \
_mm256_shuffle_epi8( v, \
m256_const2_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) ) )
#define mm256_bswap_32( v ) \
_mm256_shuffle_epi8( v, \
m256_const2_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ) )
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x0c0d0e0f08090a0b, 0x0405060700010203 ) ) )
#define mm256_bswap_16( v ) \
_mm256_shuffle_epi8( v, \
m256_const2_64( 0x0e0f0c0d0a0b0809, 0x0607040502030001, ) )
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) ) )
// Source and destination are pointers, may point to same memory.
// 8 byte qword * 8 qwords * 4 lanes = 256 bytes
#define mm256_block_bswap_64( d, s ) do \
{ \
__m256i ctl = m256_const2_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ) ; \
__m256i ctl = mm256_bcast_m128( _mm_set_epi64x( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
@@ -493,7 +449,8 @@ static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
// 4 byte dword * 8 dwords * 8 lanes = 256 bytes
#define mm256_block_bswap_32( d, s ) do \
{ \
__m256i ctl = m256_const2_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
__m256i ctl = mm256_bcast_m128( _mm_set_epi64x( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
@@ -504,13 +461,6 @@ static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \
} while(0)
// swap 256 bit vectors in place.
// This should be avoided, it's more efficient to switch references.
#define mm256_swap512_256( v1, v2 ) \
v1 = _mm256_xor_si256( v1, v2 ); \
v2 = _mm256_xor_si256( v1, v2 ); \
v1 = _mm256_xor_si256( v1, v2 );
#endif // __AVX2__
#endif // SIMD_256_H__

View File

@@ -32,25 +32,26 @@
// "_mm512_permutex_epi64" only shuffles within 256 bit lanes. All other
// AVX512 permutes can cross all lanes.
//
// "_mm512_shuffle_epi8" shuffles accross the entire 512 bits. Shuffle
// instructions generally don't cross 128 bit lane boundaries and the AVX2
// version of this specific instruction does not.
//
// New alignr instructions for epi64 and epi32 operate across the entire
// vector but slower than epi8 which continues to be restricted to 128 bit
// lanes.
//
// "vpbroadcastq/d/w/b" instructions now support integer register source
// argument in addition to XMM register or mem location. set1 intrinsic uses
// integer arg, broadcast intrinsic requires xmm. Mask versions of 256 and
// 128 bit broadcast also inherit this addition.
//
// "_mm512_permutexvar_epi8" and "_mm512_permutex2var_epi8" require
// AVX512-VBMI. The same instructions with larger elements don't have this
// requirement. "_mm512_permutexvar_epi8" also performs the same operation
// as "_mm512_shuffle_epi8" which only requires AVX512-BW.
// requirement.
//
// Two coding conventions are used to prevent macro argument side effects:
// - if a macro arg is used in an expression it must be protected by
// parentheses to ensure an expression argument is evaluated first.
// parentheses to ensure the expression argument is evaluated first.
// - if an argument is to referenced multiple times a C inline function
// should be used instead of a macro to prevent an expression argument
// from being evaluated multiple times.
// from being evaluated multiple times (wasteful) or produces side
// effects (very bad).
//
// There are 2 areas where overhead is a major concern: constants and
// permutations.
@@ -87,7 +88,7 @@
// __AVX512VBMI__ __AVX512VAES__
//
// Used instead if casting.
// Used instead of casting.
typedef union
{
__m512i m512;
@@ -96,119 +97,40 @@ typedef union
uint64_t u64[8];
} __attribute__ ((aligned (64))) m512_ovly;
// Move integer to/from element 0 of vector.
#define mm512_mov64_512( n ) _mm512_castsi128_si512( mm128_mov64_128( n ) )
#define mm512_mov32_512( n ) _mm512_castsi128_si512( mm128_mov32_128( n ) )
#define u64_mov512_64( a ) u64_mov128_64( _mm256_castsi512_si128( a ) )
#define u32_mov512_32( a ) u32_mov128_32( _mm256_castsi512_si128( a ) )
// A simple 128 bit permute, using function instead of macro avoids
// problems if the v arg passed as an expression.
static inline __m512i mm512_perm_128( const __m512i v, const int c )
{ return _mm512_shuffle_i64x2( v, v, c ); }
// Concatenate two 256 bit vectors into one 512 bit vector {hi, lo}
#define mm512_concat_256( hi, lo ) \
_mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 )
// Work in progress.
// modified naming scheme to align more with opcode mnenonic:
// m512_const1 becomes mm512_bcast_m[n] or mm512_bcast_i[n], short for
// broadcast, i indicates integer arg, m is vector. Set1 intrinsics should
// genarally be used for integer data.
// mm512_const should only be used with immediate integer arguments, use
// _mm512_set intrinsic instead.
// mm512_set, mm512_set[n] macros may be defined when no intrinsic exists
// for either the arg size or arg count.
#define mm512_set_128( v3, v2, v1, v0 ) \
mm512_concat_256( mm256_concat_128( v3, v2 ), \
mm256_concat_128( v1, v0 ) )
// Equivalent of set, assign 64 bit integers to respective 64 bit elements.
// Use stack memory overlay
static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
const uint64_t i5, const uint64_t i4,
const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 )
{
union { __m512i m512i;
uint64_t u64[8]; } v;
v.u64[0] = i0; v.u64[1] = i1;
v.u64[2] = i2; v.u64[3] = i3;
v.u64[4] = i4; v.u64[5] = i5;
v.u64[6] = i6; v.u64[7] = i7;
return v.m512i;
}
// Broadcast with vector argument is generally more efficient except for
// integer immediate constants or when data was most recently referenced as
// integer and is still available in an integer register.
/* not used
// Equivalent of set1, broadcast lo element to all elements.
static inline __m512i m512_const1_256( const __m256i v )
{ return _mm512_inserti64x4( _mm512_castsi256_si512( v ), v, 1 ); }
*/
// Broadcast 128 bit vector to all lanes of 512 bit vector.
#define mm512_bcast_m128( v ) mm512_perm_128( _mm512_castsi128_si512( v ), 0 )
// Low 64 bits only, high 64 bits are zeroed.
#define mm512_bcast_i128( i ) mm512_bcast_m128( mm128_mov64_128( i ) )
#define mm512_bcast_i64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) )
#define mm512_bcast_i32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) )
#define mm512_bcast_i16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) )
#define mm512_bcast_i8( i ) _mm512_broadcastb_epi8( mm128_mov32_128( i ) )
// const1 is deprecated, use bcast instead
#define m512_const1_128 mm512_bcast_m128
#define m512_const1_i128 mm512_bcast_i128
#define m512_const1_64 mm512_bcast_i64
#define m512_const1_32 mm512_bcast_i32
// Set either the low or high 64 bit elements in 128 bit lanes, other elements
// are set to zero.
#define mm512_bcast128lo_64( i64 ) _mm512_maskz_set1_epi64( 0x55, i64 )
#define mm512_bcast128hi_64( i64 ) _mm512_maskz_set1_epi64( 0xaa, i64 )
#define m512_const2_128( v1, v0 ) \
_mm512_inserti64x2( _mm512_castsi128_si512( v0 ), v1, 1 )
#define mm512_set2_64( i1, i0 ) \
mm512_bcast_m128( _mm_set_epi64x( i1, i0 ) )
#define m512_const2_64( i1, i0 ) \
mm512_bcast_m128( m128_const_64( i1, i0 ) )
// Deprecated, use set
#define m512_const1_64 _mm512_set1_epi64
#define m512_const1_32 _mm512_set1_epi32
static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 )
{
union { __m512i m512i;
uint64_t u64[8]; } v;
v.u64[0] = v.u64[4] = i0;
v.u64[1] = v.u64[5] = i1;
v.u64[2] = v.u64[6] = i2;
v.u64[3] = v.u64[7] = i3;
return v.m512i;
}
//
// Pseudo constants.
// _mm512_setzero_si512 uses xor instruction. If needed frequently
// in a function is it better to define a register variable (const?)
// initialized to zero.
#define m512_zero _mm512_setzero_si512()
#define m512_one_512 mm512_mov64_512( 1 )
#define m512_one_256 _mm512_inserti64x4( m512_one_512, m256_one_256, 1 )
#define m512_one_128 mm512_bcast_i128( (__uint128_t)1 )
#define m512_one_64 mm512_bcast_i64( (uint64_t)1 )
#define m512_one_32 mm512_bcast_i32( (uint32_t)1 )
#define m512_one_16 mm512_bcast_i16( (uint16_t)1 )
#define m512_one_8 mm512_bcast_i8( (uint8_t)1 )
// Deprecated
#define m512_one_64 _mm512_set1_epi64( 1 )
#define m512_one_32 _mm512_set1_epi32( 1 )
// use asm to avoid compiler warning for unitialized local
static inline __m512i mm512_neg1_fn()
{
__m512i a;
asm( "vpternlogq $0xff, %0, %0, %0\n\t" : "=x"(a) );
return a;
__m512i v;
asm( "vpternlogq $0xff, %0, %0, %0\n\t" : "=x"(v) );
return v;
}
#define m512_neg1 mm512_neg1_fn() // 1 clock
#define m512_neg1 mm512_neg1_fn()
//
// Basic operations without SIMD equivalent
@@ -217,13 +139,6 @@ static inline __m512i mm512_neg1_fn()
static inline __m512i mm512_not( const __m512i x )
{ return _mm512_ternarylogic_epi64( x, x, x, 1 ); }
/*
// Unary negation: -x
#define mm512_negate_64( x ) _mm512_sub_epi64( m512_zero, x )
#define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x )
#define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x )
*/
//
// Pointer casting
@@ -265,12 +180,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
#define mm512_add4_32( a, b, c, d ) \
_mm512_add_epi32( _mm512_add_epi32( a, b ), _mm512_add_epi32( c, d ) )
#define mm512_add4_16( a, b, c, d ) \
_mm512_add_epi16( _mm512_add_epi16( a, b ), _mm512_add_epi16( c, d ) )
#define mm512_add4_8( a, b, c, d ) \
_mm512_add_epi8( _mm512_add_epi8( a, b ), _mm512_add_epi8( c, d ) )
//
// Ternary logic uses 8 bit truth table to define any 3 input logical
// expression using any number or combinations of AND, OR, XOR, NOT.
@@ -333,34 +242,23 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
// Reverse byte order of packed elements, vectorized endian conversion.
#define mm512_bswap_64( v ) \
_mm512_shuffle_epi8( v, \
m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
0x28292a2b2c2d2e2f, 0x2021222324252627, \
0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
_mm512_shuffle_epi8( v, mm512_bcast_m128( _mm_set_epi64x( \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) ) )
#define mm512_bswap_32( v ) \
_mm512_shuffle_epi8( v, \
m512_const_64( 0x3c3d3e3f38393a3b, 0x3435363730313233, \
0x2c2d2e2f28292a2b, 0x2425262720212223, \
0x1c1d1e1f18191a1b, 0x1415161710111213, \
0x0c0d0e0f08090a0b, 0x0405060700010203 ) )
_mm512_shuffle_epi8( v, mm512_bcast_m128( _mm_set_epi64x( \
0x0c0d0e0f08090a0b, 0x0405060700010203 ) ) )
#define mm512_bswap_16( v ) \
_mm512_shuffle_epi8( v, \
m512_const_64( 0x3e3f3c3d3a3b3839, 0x3637343532333031, \
0x2e2f2c2d2a2b2829, 0x2627242522232021, \
0x1e1f1c1d1a1b1819, 0x1617141512131011, \
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) )
_mm512_shuffle_epi8( v, mm512_bcast_m128( _mm_set_epi64x( \
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) ) )
// Source and destination are pointers, may point to same memory.
// 8 lanes of 64 bytes each
#define mm512_block_bswap_64( d, s ) do \
{ \
const __m512i ctl = m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
0x28292a2b2c2d2e2f, 0x2021222324252627, \
0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
const __m512i ctl = mm512_bcast_m128( _mm_set_epi64x( \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) ); \
casti_m512i( d, 0 ) = _mm512_shuffle_epi8( casti_m512i( s, 0 ), ctl ); \
casti_m512i( d, 1 ) = _mm512_shuffle_epi8( casti_m512i( s, 1 ), ctl ); \
casti_m512i( d, 2 ) = _mm512_shuffle_epi8( casti_m512i( s, 2 ), ctl ); \
@@ -374,10 +272,8 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
// 16 lanes of 32 bytes each
#define mm512_block_bswap_32( d, s ) do \
{ \
const __m512i ctl = m512_const_64( 0x3c3d3e3f38393a3b, 0x3435363730313233, \
0x2c2d2e2f28292a2b, 0x2425262720212223, \
0x1c1d1e1f18191a1b, 0x1415161710111213, \
0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
const __m512i ctl = mm512_bcast_m128( _mm_set_epi64x( \
0x0c0d0e0f08090a0b, 0x0405060700010203 ) ); \
casti_m512i( d, 0 ) = _mm512_shuffle_epi8( casti_m512i( s, 0 ), ctl ); \
casti_m512i( d, 1 ) = _mm512_shuffle_epi8( casti_m512i( s, 1 ), ctl ); \
casti_m512i( d, 2 ) = _mm512_shuffle_epi8( casti_m512i( s, 2 ), ctl ); \
@@ -395,8 +291,8 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
// Rotate elements across entire vector.
static inline __m512i mm512_swap_256( const __m512i v )
{ return _mm512_alignr_epi64( v, v, 4 ); }
#define mm512_shuflr_256( v ) mm512_swap_256
#define mm512_shufll_256( v ) mm512_swap_256
#define mm512_shuflr_256 mm512_swap_256
#define mm512_shufll_256 mm512_swap_256
static inline __m512i mm512_shuflr_128( const __m512i v )
{ return _mm512_alignr_epi64( v, v, 2 ); }
@@ -404,6 +300,7 @@ static inline __m512i mm512_shuflr_128( const __m512i v )
static inline __m512i mm512_shufll_128( const __m512i v )
{ return _mm512_alignr_epi64( v, v, 6 ); }
/* Not used
static inline __m512i mm512_shuflr_64( const __m512i v )
{ return _mm512_alignr_epi64( v, v, 1 ); }
@@ -415,7 +312,9 @@ static inline __m512i mm512_shuflr_32( const __m512i v )
static inline __m512i mm512_shufll_32( const __m512i v )
{ return _mm512_alignr_epi32( v, v, 15 ); }
*/
/* Not used
// Generic
static inline __m512i mm512_shuflr_x64( const __m512i v, const int n )
{ return _mm512_alignr_epi64( v, v, n ); }
@@ -424,34 +323,20 @@ static inline __m512i mm512_shuflr_x32( const __m512i v, const int n )
{ return _mm512_alignr_epi32( v, v, n ); }
#define mm512_shuflr_16( v ) \
_mm512_permutexvar_epi16( m512_const_64( \
_mm512_permutexvar_epi16( _mm512_set_epi64( \
0x0000001F001E001D, 0x001C001B001A0019, \
0x0018001700160015, 0x0014001300120011, \
0x0010000F000E000D, 0x000C000B000A0009, \
0x0008000700060005, 0x0004000300020001 ), v )
#define mm512_shufll_16( v ) \
_mm512_permutexvar_epi16( m512_const_64( \
_mm512_permutexvar_epi16( _mm512_set_epi64( \
0x001E001D001C001B, 0x001A001900180017, \
0x0016001500140013, 0x001200110010000F, \
0x000E000D000C000B, 0x000A000900080007, \
0x0006000500040003, 0x000200010000001F ), v )
*/
#define mm512_shuflr_8( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x003F3E3D3C3B3A39, 0x3837363534333231, \
0x302F2E2D2C2B2A29, 0x2827262524232221, \
0x201F1E1D1C1B1A19. 0x1817161514131211, \
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_shufll_8( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231302F. \
0x2E2D2C2B2A292827, 0x262524232221201F, \
0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201003F ) )
// 256 bit lanes used only by lyra2, move these there
// Rotate elements within 256 bit lanes of 512 bit vector.
// Swap hi & lo 128 bits in each 256 bit lane
@@ -466,47 +351,48 @@ static inline __m512i mm512_shuflr_x32( const __m512i v, const int n )
/* Not used
// Rotate 256 bit lanes by one 32 bit element
#define mm512_shuflr256_32( v ) \
_mm512_permutexvar_epi32( m512_const_64( \
_mm512_permutexvar_epi32( _mm512_set_epi64( \
0x000000080000000f, 0x0000000e0000000d, \
0x0000000c0000000b, 0x0000000a00000009, \
0x0000000000000007, 0x0000000600000005, \
0x0000000400000003, 0x0000000200000001 ), v )
#define mm512_shufll256_32( v ) \
_mm512_permutexvar_epi32( m512_const_64( \
_mm512_permutexvar_epi32( _mm512_set_epi64( \
0x0000000e0000000d, 0x0000000c0000000b, \
0x0000000a00000009, 0x000000080000000f, \
0x0000000600000005, 0x0000000400000003, \
0x0000000200000001, 0x0000000000000007 ), v )
#define mm512_shuflr256_16( v ) \
_mm512_permutexvar_epi16( m512_const_64( \
_mm512_permutexvar_epi16( _mm512_set_epi64( \
0x00100001001e001d, 0x001c001b001a0019, \
0x0018001700160015, 0x0014001300120011, \
0x0000000f000e000d, 0x000c000b000a0009, \
0x0008000700060005, 0x0004000300020001 ), v )
#define mm512_shufll256_16( v ) \
_mm512_permutexvar_epi16( m512_const_64( \
_mm512_permutexvar_epi16( _mm512_set_epi64( \
0x001e001d001c001b, 0x001a001900180017, \
0x0016001500140013, 0x001200110010001f, \
0x000e000d000c000b, 0x000a000900080007, \
0x0006000500040003, 0x000200010000000f ), v )
#define mm512_shuflr256_8( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( _mm512_set_epi64( \
0x203f3e3d3c3b3a39, 0x3837363534333231, \
0x302f2e2d2c2b2a29, 0x2827262524232221, \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 ) )
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
#define mm512_shufll256_8( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( _mm512_set_epi64( \
0x3e3d3c3b3a393837, 0x363534333231302f, \
0x2e2d2c2b2a292827, 0x262524232221203f, \
0x1e1d1c1b1a191817, 0x161514131211100f, \
0x0e0d0c0b0a090807, 0x060504030201001f ) )
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
*/
//
// Shuffle/rotate elements within 128 bit lanes of 512 bit vector.
@@ -518,11 +404,13 @@ static inline __m512i mm512_shuflr_x32( const __m512i v, const int n )
#define mm512_shuflr128_32( v ) _mm512_shuffle_epi32( v, 0x39 )
#define mm512_shufll128_32( v ) _mm512_shuffle_epi32( v, 0x93 )
/* Not used
// Rotate 128 bit lanes right by c bytes, versatile and just as fast
static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
static inline __m512i mm512_shuflr128_x8( const __m512i v, const int c )
{ return _mm512_alignr_epi8( v, v, c ); }
*/
// Limited 2 input, 1 output shuffle, combines shuffle with blend.
// Limited 2 input shuffle, combines shuffle with blend.
// Like most shuffles it's limited to 128 bit lanes and like some shuffles
// destination elements must come from a specific source arg.
#define mm512_shuffle2_64( v1, v2, c ) \
@@ -534,6 +422,7 @@ static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
_mm512_castsi512_ps( v2 ), c ) );
// 64 bit lanes
// Not really necessary with AVX512, included for consistency with AVX2/SSE.
#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 )
#define mm512_shuflr64_32 mm512_swap64_32
@@ -548,6 +437,7 @@ static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
#define mm512_shuflr64_8( v ) _mm512_ror_epi64( v, 8 )
#define mm512_shufll64_8( v ) _mm512_rol_epi64( v, 8 )
/* Not used
// 32 bit lanes
#define mm512_swap32_16( v ) _mm512_ror_epi32( v, 16 )
@@ -556,6 +446,7 @@ static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
#define mm512_shuflr32_8( v ) _mm512_ror_epi32( v, 8 )
#define mm512_shufll32_8( v ) _mm512_rol_epi32( v, 8 )
*/
#endif // AVX512
#endif // SIMD_512_H__