This commit is contained in:
Jay D Dee
2025-06-20 20:31:41 -04:00
parent dd99580a4c
commit 66191db93c
86 changed files with 2701 additions and 4322 deletions

View File

@@ -61,8 +61,10 @@ typedef union
#if defined(__AVX2__)
// Broadcast, ie set1, from 128 bit vector input.
#define mm256_bcast_m128( v ) \
#define mm256_bcast128( v ) \
_mm256_permute4x64_epi64( _mm256_castsi128_si256( v ), 0x44 )
// deprecated
#define mm256_bcast_m128 mm256_bcast128
// Set either the low or high 64 bit elements in 128 bit lanes, other elements
// are set to zero.
@@ -73,23 +75,23 @@ typedef union
#else
#define mm256_bcast128lo_64( i64 ) mm256_bcast_m128( v128_mov64( i64 ) )
#define mm256_bcast128lo_64( i64 ) mm256_bcast128( v128_mov64( i64 ) )
#define mm256_bcast128hi_64( i64 ) _mm256_permute4x64_epi64( \
_mm256_castsi128_si256( v128_mov64( i64 ) ), 0x11 )
#endif
#define mm256_set2_64( i1, i0 ) mm256_bcast_m128( _mm_set_epi64x( i1, i0 ) )
#define mm256_set2_64( i1, i0 ) mm256_bcast128( _mm_set_epi64x( i1, i0 ) )
#define mm256_set4_32( i3, i2, i1, i0 ) \
mm256_bcast_m128( _mm_set_epi32( i3, i2, i1, i0 ) )
mm256_bcast128( _mm_set_epi32( i3, i2, i1, i0 ) )
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#define m256_one_128 mm256_bcast_m128( v128_one )
#define m256_one_128 mm256_bcast128( v128_one )
static inline __m256i mm256_neg1_fn()
{
@@ -231,21 +233,8 @@ static inline __m256i mm256_not( const __m256i v )
#define mm256_swap64_32 mm256_qrev32 // grandfathered
#define mm256_qrev16(v) mm256_shuffle16( v, 0x1b )
#define mm256_qrev8(v) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
v128_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ) ) )
#define mm256_lrev16(v) mm256_shuffle16( v, 0xb1 )
#define mm256_lrev8(v) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
v128_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ) ) )
#define mm256_wrev8(v) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
v128_64( 0x0e0f0c0d0a0b0809, 0x0607040502030001 ) ) )
//
// Bit rotations.
@@ -268,50 +257,33 @@ static inline __m256i mm256_not( const __m256i v )
#if defined(VL256)
#define mm256_ror_64 _mm256_ror_epi64
#define mm256_rol_64 _mm256_rol_epi64
#define mm256_ror_32 _mm256_ror_epi32
#define mm256_rol_32 _mm256_rol_epi32
#define mm256_ror_64 _mm256_ror_epi64
#define mm256_rol_64 _mm256_rol_epi64
#define mm256_ror_32 _mm256_ror_epi32
#define mm256_rol_32 _mm256_rol_epi32
// Redundant but naming may be a better fit in some applications.
#define mm126_shuflr64_8( v) _mm256_ror_epi64( v, 8 )
#define mm156_shufll64_8( v) _mm256_rol_epi64( v, 8 )
#define mm256_shuflr64_16(v) _mm256_ror_epi64( v, 16 )
#define mm256_shufll64_16(v) _mm256_rol_epi64( v, 16 )
#define mm256_shuflr64_24(v) _mm256_ror_epi64( v, 24 )
#define mm256_shufll64_24(v) _mm256_rol_epi64( v, 24 )
#define mm256_shuflr32_8( v) _mm256_ror_epi32( v, 8 )
#define mm256_shufll32_8( v) _mm256_rol_epi32( v, 8 )
#define mm256_shuflr32_16(v) _mm256_ror_epi32( v, 16 )
#define mm256_shufll32_16(v) _mm256_rol_epi32( v, 16 )
#define mm256_shuflr64_8( v) _mm256_ror_epi64( v, 8 )
#define mm256_shufll64_8( v) _mm256_rol_epi64( v, 8 )
#define mm256_shuflr64_16(v) _mm256_ror_epi64( v, 16 )
#define mm256_shufll64_16(v) _mm256_rol_epi64( v, 16 )
#define mm256_shuflr64_24(v) _mm256_ror_epi64( v, 24 )
#define mm256_shufll64_24(v) _mm256_rol_epi64( v, 24 )
#define mm256_shuflr32_8( v) _mm256_ror_epi32( v, 8 )
#define mm256_shufll32_8( v) _mm256_rol_epi32( v, 8 )
#define mm256_shuflr32_16(v) _mm256_ror_epi32( v, 16 )
#define mm256_shufll32_16(v) _mm256_rol_epi32( v, 16 )
#else
// ROR & ROL will always find the fastest but these names may be a better fit
// in some applications.
#define mm256_shuflr64_8( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
_mm_set_epi64x( 0x080f0e0d0c0b0a09, 0x0007060504030201 ) ) )
#define mm256_shufll64_8( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
_mm_set_epi64x( 0x0e0d0c0b0a09080f, 0x0605040302010007 ) ) )
#define mm256_shuflr64_24( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
_mm_set_epi64x( 0x0a09080f0e0d0c0b, 0x0201000706050403 ) ) )
#define mm256_shufll64_24( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
_mm_set_epi64x( 0x0c0b0a09080f0e0d, 0x0403020100070605 ) ) )
#define mm256_shuflr32_8( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
_mm_set_epi64x( 0x0c0f0e0d080b0a09, 0x0407060500030201 ) ) )
#define mm256_shufll32_8( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
_mm_set_epi64x( 0x0e0d0c0f0a09080b, 0x0605040702010003 ) ) )
#define mm256_shuflr64_8( v ) _mm256_shuffle_epi8( v, V256_SHUFLR64_8 )
#define mm256_shufll64_8( v ) _mm256_shuffle_epi8( v, V256_SHUFLL64_8 )
#define mm256_shuflr64_24(v ) _mm256_shuffle_epi8( v, V256_SHUFLR64_24 )
#define mm256_shufll64_24(v ) _mm256_shuffle_epi8( v, V256_SHUFLL64_24 )
#define mm256_shuflr32_8( v ) _mm256_shuffle_epi8( v, V256_SHUFLR32_8 )
#define mm256_shufll32_8( v ) _mm256_shuffle_epi8( v, V256_SHUFLL32_8 )
#define mm256_ror_64( v, c ) \
( (c) == 8 ) ? mm256_shuflr64_8( v ) \
@@ -347,96 +319,6 @@ static inline __m256i mm256_not( const __m256i v )
#endif
//
// x2 rotates elements in 2 individual vectors in a double buffered
// optimization for AVX2, does nothing for AVX512 but is here for
// transparency.
#if defined(VL256)
/*
#define mm256_ror_64 _mm256_ror_epi64
#define mm256_rol_64 _mm256_rol_epi64
#define mm256_ror_32 _mm256_ror_epi32
#define mm256_rol_32 _mm256_rol_epi32
*/
#define mm256_rorx2_64( v1, v0, c ) \
_mm256_ror_epi64( v0, c ); \
_mm256_ror_epi64( v1, c )
#define mm256_rolx2_64( v1, v0, c ) \
_mm256_rol_epi64( v0, c ); \
_mm256_rol_epi64( v1, c )
#define mm256_rorx2_32( v1, v0, c ) \
_mm256_ror_epi32( v0, c ); \
_mm256_ror_epi32( v1, c )
#define mm256_rolx2_32( v1, v0, c ) \
_mm256_rol_epi32( v0, c ); \
_mm256_rol_epi32( v1, c )
#else // AVX2
/*
// use shuflr64 shuflr32 below for optimized bit rotations of multiples of 8.
#define mm256_ror_64( v, c ) \
_mm256_or_si256( _mm256_srli_epi64( v, c ), \
_mm256_slli_epi64( v, 64-(c) ) )
#define mm256_rol_64( v, c ) \
_mm256_or_si256( _mm256_slli_epi64( v, c ), \
_mm256_srli_epi64( v, 64-(c) ) )
#define mm256_ror_32( v, c ) \
_mm256_or_si256( _mm256_srli_epi32( v, c ), \
_mm256_slli_epi32( v, 32-(c) ) )
#define mm256_rol_32( v, c ) \
_mm256_or_si256( _mm256_slli_epi32( v, c ), \
_mm256_srli_epi32( v, 32-(c) ) )
*/
#define mm256_rorx2_64( v1, v0, c ) \
{ \
__m256i t0 = _mm256_srli_epi64( v0, c ); \
__m256i t1 = _mm256_srli_epi64( v1, c ); \
v0 = _mm256_slli_epi64( v0, 64-(c) ); \
v1 = _mm256_slli_epi64( v1, 64-(c) ); \
v0 = _mm256_or_si256( v0, t0 ); \
v1 = _mm256_or_si256( v1, t1 ); \
}
#define mm256_rolx2_64( v1, v0, c ) \
{ \
__m256i t0 = _mm256_slli_epi64( v0, c ); \
__m256i t1 = _mm256_slli_epi64( v1, c ); \
v0 = _mm256_srli_epi64( v0, 64-(c) ); \
v1 = _mm256_srli_epi64( v1, 64-(c) ); \
v0 = _mm256_or_si256( v0, t0 ); \
v1 = _mm256_or_si256( v1, t1 ); \
}
#define mm256_rorx2_32( v1, v0, c ) \
{ \
__m256i t0 = _mm256_srli_epi32( v0, c ); \
__m256i t1 = _mm256_srli_epi32( v1, c ); \
v0 = _mm256_slli_epi32( v0, 32-(c) ); \
v1 = _mm256_slli_epi32( v1, 32-(c) ); \
v0 = _mm256_or_si256( v0, t0 ); \
v1 = _mm256_or_si256( v1, t1 ); \
}
#define mm256_rolx2_32( v1, v0, c ) \
{ \
__m256i t0 = _mm256_slli_epi32( v0, c ); \
__m256i t1 = _mm256_slli_epi32( v1, c ); \
v0 = _mm256_srli_epi32( v0, 32-(c) ); \
v1 = _mm256_srli_epi32( v1, 32-(c) ); \
v0 = _mm256_or_si256( v0, t0 ); \
v1 = _mm256_or_si256( v1, t1 ); \
}
#endif // AVX512 else AVX2
#if defined(__AVX2__)
// 128 bit version of unpack
@@ -453,20 +335,14 @@ static inline __m256i mm256_not( const __m256i v )
//
// Cross lane shuffles
//
// Rotate elements accross all lanes.
#define mm256_shuffle_16( v, c ) \
_mm256_or_si256( _mm256_shufflehi_epi16( v, c ), \
_mm256_shufflelo_epi16( v, c ) )
// Swap 128 bit elements in 256 bit vector.
#define mm256_swap_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
#define mm256_rev_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
#define mm256_swap_128 mm256_rev_128 // grandfathered
// Rotate 256 bit vector by one 64 bit element
#define mm256_shuflr_64( v ) _mm256_permute4x64_epi64( v, 0x39 )
#define mm256_shufll_64( v ) _mm256_permute4x64_epi64( v, 0x93 )
// Reverse 64 bit elements
/* not used
// Reverse elements
#define mm256_rev_64( v ) _mm256_permute4x64_epi64( v, 0x1b )
#define mm256_rev_32( v ) \
@@ -474,7 +350,12 @@ static inline __m256i mm256_not( const __m256i v )
0x0000000400000005, 0x0000000600000007 )
#define mm256_rev_16( v ) \
_mm256_permute4x64_epi64( mm256_shuffle_16( v, 0x1b ), 0x4e )
_mm256_permute4x64_epi64( mm256_shuffle16( v, 0x1b ), 0x4e )
*/
// Rotate 256 bit vector by one 64 bit element
#define mm256_shuflr_64( v ) _mm256_permute4x64_epi64( v, 0x39 )
#define mm256_shufll_64( v ) _mm256_permute4x64_epi64( v, 0x93 )
/* Not used
// Rotate 256 bit vector by one 32 bit element.
@@ -486,7 +367,7 @@ static inline __m256i mm256_shufll_32( const __m256i v )
#else
#define mm256_shuflr_32( v ) \
_mm256_permutevar8x32_epi32( v, \
_mm256_set_spi64x( 0x0000000000000007, 0x0000000600000005, \
_mm256_set_epi64x( 0x0000000000000007, 0x0000000600000005, \
0x0000000400000003, 0x0000000200000001 ) )
#define mm256_shufll_32( v ) \
_mm256_permutevar8x32_epi32( v, \
@@ -507,113 +388,64 @@ static inline __m256i mm256_shufll_32( const __m256i v )
_mm256_castps_si256( _mm256_shuffle_ps( _mm256_castsi256_ps( v1 ), \
_mm256_castsi256_ps( v2 ), c ) );
#define mm256_swap128_64(v) _mm256_shuffle_epi32( v, 0x4e )
#define mm256_rev128_64(v) _mm256_shuffle_epi32( v, 0x4e )
#define mm256_swap128_64 mm256_rev128_64 // grandfathered
/*not used
#define mm256_rev128_32(v) _mm256_shuffle_epi32( v, 0x1b )
#define mm256_rev128_16(v) mm256_shuffle_16( v, 0x1b )
#define mm256_rev128_16(v) mm256_shuffle16( v, 0x1b )
*/
#define mm256_shuflr128_32(v) _mm256_shuffle_epi32( v, 0x39 )
#define mm256_shufll128_32(v) _mm256_shuffle_epi32( v, 0x93 )
#define mm256_shuflr128_16(v) mm256_shuffle_16( v, 0x39 )
#define mm256_shufll128_16(v) mm256_shuffle_16( v, 0x93 )
/* not used
#define mm256_shuflr128_16(v) mm256_shuffle16( v, 0x39 )
#define mm256_shufll128_16(v) mm256_shuffle16( v, 0x93 )
/* Not used
static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
{ return _mm256_alignr_epi8( v, v, c ); }
*/
// Reverse byte order in elements, endian bswap.
#define mm256_bswap_64( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x08090a0b0c0d0e0f, 0x0001020304050607 ) ) )
#define mm256_bswap_64( v ) _mm256_shuffle_epi8( v, V256_BSWAP64 )
#define mm256_bswap_32( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
0x0c0d0e0f08090a0b, 0x0405060700010203 ) ) )
#define mm256_bswap_32( v ) _mm256_shuffle_epi8( v, V256_BSWAP32 )
/* not used
#define mm256_bswap_16( v ) \
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
_mm256_shuffle_epi8( v, mm256_bcast128( _mm_set_epi64x( \
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) ) )
//
*/
// Source and destination are pointers, may point to same memory.
// 8 byte qword * 8 qwords * 4 lanes = 256 bytes
#define mm256_block_bswap_64( d, s ) \
{ \
__m256i ctl = mm256_bcast_m128( _mm_set_epi64x( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \
casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \
casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \
casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \
casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \
}
#define mm256_block_bswap64_512 mm256_block_bswap_64
#define mm256_block_bswap64_1024( d, s ) \
{ \
__m256i ctl = mm256_bcast_m128( _mm_set_epi64x( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \
casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \
casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \
casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \
casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \
casti_m256i( d, 8 ) = _mm256_shuffle_epi8( casti_m256i( s, 8 ), ctl ); \
casti_m256i( d, 9 ) = _mm256_shuffle_epi8( casti_m256i( s, 9 ), ctl ); \
casti_m256i( d,10 ) = _mm256_shuffle_epi8( casti_m256i( s,10 ), ctl ); \
casti_m256i( d,11 ) = _mm256_shuffle_epi8( casti_m256i( s,11 ), ctl ); \
casti_m256i( d,12 ) = _mm256_shuffle_epi8( casti_m256i( s,12 ), ctl ); \
casti_m256i( d,13 ) = _mm256_shuffle_epi8( casti_m256i( s,13 ), ctl ); \
casti_m256i( d,14 ) = _mm256_shuffle_epi8( casti_m256i( s,14 ), ctl ); \
casti_m256i( d,15 ) = _mm256_shuffle_epi8( casti_m256i( s,15 ), ctl ); \
casti_m256i( d,0 ) = mm256_bswap_64( casti_m256i( s,0 ) ); \
casti_m256i( d,1 ) = mm256_bswap_64( casti_m256i( s,1 ) ); \
casti_m256i( d,2 ) = mm256_bswap_64( casti_m256i( s,2 ) ); \
casti_m256i( d,3 ) = mm256_bswap_64( casti_m256i( s,3 ) ); \
casti_m256i( d,4 ) = mm256_bswap_64( casti_m256i( s,4 ) ); \
casti_m256i( d,5 ) = mm256_bswap_64( casti_m256i( s,5 ) ); \
casti_m256i( d,6 ) = mm256_bswap_64( casti_m256i( s,6 ) ); \
casti_m256i( d,7 ) = mm256_bswap_64( casti_m256i( s,7 ) ); \
}
// 4 byte dword * 8 dwords * 8 lanes = 256 bytes
#define mm256_block_bswap_32( d, s ) \
{ \
__m256i ctl = mm256_bcast_m128( _mm_set_epi64x( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \
casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \
casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \
casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \
casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \
casti_m256i( d, 0 ) = mm256_bswap_32( casti_m256i( s, 0 ) ); \
casti_m256i( d, 1 ) = mm256_bswap_32( casti_m256i( s, 1 ) ); \
casti_m256i( d, 2 ) = mm256_bswap_32( casti_m256i( s, 2 ) ); \
casti_m256i( d, 3 ) = mm256_bswap_32( casti_m256i( s, 3 ) ); \
casti_m256i( d, 4 ) = mm256_bswap_32( casti_m256i( s, 4 ) ); \
casti_m256i( d, 5 ) = mm256_bswap_32( casti_m256i( s, 5 ) ); \
casti_m256i( d, 6 ) = mm256_bswap_32( casti_m256i( s, 6 ) ); \
casti_m256i( d, 7 ) = mm256_bswap_32( casti_m256i( s, 7 ) ); \
}
#define mm256_block_bswap32_256 mm256_block_bswap_32
#define mm256_block_bswap32_512( d, s ) \
{ \
__m256i ctl = mm256_bcast_m128( _mm_set_epi64x( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) ); \
casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \
casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \
casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \
casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \
casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \
casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \
casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \
casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \
casti_m256i( d, 8 ) = _mm256_shuffle_epi8( casti_m256i( s, 8 ), ctl ); \
casti_m256i( d, 9 ) = _mm256_shuffle_epi8( casti_m256i( s, 9 ), ctl ); \
casti_m256i( d,10 ) = _mm256_shuffle_epi8( casti_m256i( s,10 ), ctl ); \
casti_m256i( d,11 ) = _mm256_shuffle_epi8( casti_m256i( s,11 ), ctl ); \
casti_m256i( d,12 ) = _mm256_shuffle_epi8( casti_m256i( s,12 ), ctl ); \
casti_m256i( d,13 ) = _mm256_shuffle_epi8( casti_m256i( s,13 ), ctl ); \
casti_m256i( d,14 ) = _mm256_shuffle_epi8( casti_m256i( s,14 ), ctl ); \
casti_m256i( d,15 ) = _mm256_shuffle_epi8( casti_m256i( s,15 ), ctl ); \
}
#if defined(VL256)
#define mm256_alignr64 _mm256_alignr_epi64