mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.22.3
This commit is contained in:
@@ -32,25 +32,26 @@
|
||||
// "_mm512_permutex_epi64" only shuffles within 256 bit lanes. All other
|
||||
// AVX512 permutes can cross all lanes.
|
||||
//
|
||||
// "_mm512_shuffle_epi8" shuffles accross the entire 512 bits. Shuffle
|
||||
// instructions generally don't cross 128 bit lane boundaries and the AVX2
|
||||
// version of this specific instruction does not.
|
||||
//
|
||||
// New alignr instructions for epi64 and epi32 operate across the entire
|
||||
// vector but slower than epi8 which continues to be restricted to 128 bit
|
||||
// lanes.
|
||||
//
|
||||
// "vpbroadcastq/d/w/b" instructions now support integer register source
|
||||
// argument in addition to XMM register or mem location. set1 intrinsic uses
|
||||
// integer arg, broadcast intrinsic requires xmm. Mask versions of 256 and
|
||||
// 128 bit broadcast also inherit this addition.
|
||||
//
|
||||
// "_mm512_permutexvar_epi8" and "_mm512_permutex2var_epi8" require
|
||||
// AVX512-VBMI. The same instructions with larger elements don't have this
|
||||
// requirement. "_mm512_permutexvar_epi8" also performs the same operation
|
||||
// as "_mm512_shuffle_epi8" which only requires AVX512-BW.
|
||||
// requirement.
|
||||
//
|
||||
// Two coding conventions are used to prevent macro argument side effects:
|
||||
// - if a macro arg is used in an expression it must be protected by
|
||||
// parentheses to ensure an expression argument is evaluated first.
|
||||
// parentheses to ensure the expression argument is evaluated first.
|
||||
// - if an argument is to referenced multiple times a C inline function
|
||||
// should be used instead of a macro to prevent an expression argument
|
||||
// from being evaluated multiple times.
|
||||
// from being evaluated multiple times (wasteful) or produces side
|
||||
// effects (very bad).
|
||||
//
|
||||
// There are 2 areas where overhead is a major concern: constants and
|
||||
// permutations.
|
||||
@@ -87,7 +88,7 @@
|
||||
// __AVX512VBMI__ __AVX512VAES__
|
||||
//
|
||||
|
||||
// Used instead if casting.
|
||||
// Used instead of casting.
|
||||
typedef union
|
||||
{
|
||||
__m512i m512;
|
||||
@@ -96,119 +97,40 @@ typedef union
|
||||
uint64_t u64[8];
|
||||
} __attribute__ ((aligned (64))) m512_ovly;
|
||||
|
||||
// Move integer to/from element 0 of vector.
|
||||
|
||||
#define mm512_mov64_512( n ) _mm512_castsi128_si512( mm128_mov64_128( n ) )
|
||||
#define mm512_mov32_512( n ) _mm512_castsi128_si512( mm128_mov32_128( n ) )
|
||||
|
||||
#define u64_mov512_64( a ) u64_mov128_64( _mm256_castsi512_si128( a ) )
|
||||
#define u32_mov512_32( a ) u32_mov128_32( _mm256_castsi512_si128( a ) )
|
||||
|
||||
// A simple 128 bit permute, using function instead of macro avoids
|
||||
// problems if the v arg passed as an expression.
|
||||
static inline __m512i mm512_perm_128( const __m512i v, const int c )
|
||||
{ return _mm512_shuffle_i64x2( v, v, c ); }
|
||||
|
||||
// Concatenate two 256 bit vectors into one 512 bit vector {hi, lo}
|
||||
#define mm512_concat_256( hi, lo ) \
|
||||
_mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 )
|
||||
|
||||
// Work in progress.
|
||||
// modified naming scheme to align more with opcode mnenonic:
|
||||
// m512_const1 becomes mm512_bcast_m[n] or mm512_bcast_i[n], short for
|
||||
// broadcast, i indicates integer arg, m is vector. Set1 intrinsics should
|
||||
// genarally be used for integer data.
|
||||
// mm512_const should only be used with immediate integer arguments, use
|
||||
// _mm512_set intrinsic instead.
|
||||
// mm512_set, mm512_set[n] macros may be defined when no intrinsic exists
|
||||
// for either the arg size or arg count.
|
||||
|
||||
#define mm512_set_128( v3, v2, v1, v0 ) \
|
||||
mm512_concat_256( mm256_concat_128( v3, v2 ), \
|
||||
mm256_concat_128( v1, v0 ) )
|
||||
|
||||
// Equivalent of set, assign 64 bit integers to respective 64 bit elements.
|
||||
// Use stack memory overlay
|
||||
static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
|
||||
const uint64_t i5, const uint64_t i4,
|
||||
const uint64_t i3, const uint64_t i2,
|
||||
const uint64_t i1, const uint64_t i0 )
|
||||
{
|
||||
union { __m512i m512i;
|
||||
uint64_t u64[8]; } v;
|
||||
v.u64[0] = i0; v.u64[1] = i1;
|
||||
v.u64[2] = i2; v.u64[3] = i3;
|
||||
v.u64[4] = i4; v.u64[5] = i5;
|
||||
v.u64[6] = i6; v.u64[7] = i7;
|
||||
return v.m512i;
|
||||
}
|
||||
|
||||
// Broadcast with vector argument is generally more efficient except for
|
||||
// integer immediate constants or when data was most recently referenced as
|
||||
// integer and is still available in an integer register.
|
||||
|
||||
/* not used
|
||||
// Equivalent of set1, broadcast lo element to all elements.
|
||||
static inline __m512i m512_const1_256( const __m256i v )
|
||||
{ return _mm512_inserti64x4( _mm512_castsi256_si512( v ), v, 1 ); }
|
||||
*/
|
||||
|
||||
// Broadcast 128 bit vector to all lanes of 512 bit vector.
|
||||
#define mm512_bcast_m128( v ) mm512_perm_128( _mm512_castsi128_si512( v ), 0 )
|
||||
// Low 64 bits only, high 64 bits are zeroed.
|
||||
#define mm512_bcast_i128( i ) mm512_bcast_m128( mm128_mov64_128( i ) )
|
||||
#define mm512_bcast_i64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) )
|
||||
#define mm512_bcast_i32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) )
|
||||
#define mm512_bcast_i16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) )
|
||||
#define mm512_bcast_i8( i ) _mm512_broadcastb_epi8( mm128_mov32_128( i ) )
|
||||
|
||||
// const1 is deprecated, use bcast instead
|
||||
#define m512_const1_128 mm512_bcast_m128
|
||||
#define m512_const1_i128 mm512_bcast_i128
|
||||
#define m512_const1_64 mm512_bcast_i64
|
||||
#define m512_const1_32 mm512_bcast_i32
|
||||
// Set either the low or high 64 bit elements in 128 bit lanes, other elements
|
||||
// are set to zero.
|
||||
#define mm512_bcast128lo_64( i64 ) _mm512_maskz_set1_epi64( 0x55, i64 )
|
||||
#define mm512_bcast128hi_64( i64 ) _mm512_maskz_set1_epi64( 0xaa, i64 )
|
||||
|
||||
#define m512_const2_128( v1, v0 ) \
|
||||
_mm512_inserti64x2( _mm512_castsi128_si512( v0 ), v1, 1 )
|
||||
#define mm512_set2_64( i1, i0 ) \
|
||||
mm512_bcast_m128( _mm_set_epi64x( i1, i0 ) )
|
||||
|
||||
#define m512_const2_64( i1, i0 ) \
|
||||
mm512_bcast_m128( m128_const_64( i1, i0 ) )
|
||||
// Deprecated, use set
|
||||
#define m512_const1_64 _mm512_set1_epi64
|
||||
#define m512_const1_32 _mm512_set1_epi32
|
||||
|
||||
static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
|
||||
const uint64_t i1, const uint64_t i0 )
|
||||
{
|
||||
union { __m512i m512i;
|
||||
uint64_t u64[8]; } v;
|
||||
v.u64[0] = v.u64[4] = i0;
|
||||
v.u64[1] = v.u64[5] = i1;
|
||||
v.u64[2] = v.u64[6] = i2;
|
||||
v.u64[3] = v.u64[7] = i3;
|
||||
return v.m512i;
|
||||
}
|
||||
|
||||
//
|
||||
// Pseudo constants.
|
||||
|
||||
// _mm512_setzero_si512 uses xor instruction. If needed frequently
|
||||
// in a function is it better to define a register variable (const?)
|
||||
// initialized to zero.
|
||||
|
||||
#define m512_zero _mm512_setzero_si512()
|
||||
#define m512_one_512 mm512_mov64_512( 1 )
|
||||
#define m512_one_256 _mm512_inserti64x4( m512_one_512, m256_one_256, 1 )
|
||||
#define m512_one_128 mm512_bcast_i128( (__uint128_t)1 )
|
||||
#define m512_one_64 mm512_bcast_i64( (uint64_t)1 )
|
||||
#define m512_one_32 mm512_bcast_i32( (uint32_t)1 )
|
||||
#define m512_one_16 mm512_bcast_i16( (uint16_t)1 )
|
||||
#define m512_one_8 mm512_bcast_i8( (uint8_t)1 )
|
||||
// Deprecated
|
||||
#define m512_one_64 _mm512_set1_epi64( 1 )
|
||||
#define m512_one_32 _mm512_set1_epi32( 1 )
|
||||
|
||||
// use asm to avoid compiler warning for unitialized local
|
||||
static inline __m512i mm512_neg1_fn()
|
||||
{
|
||||
__m512i a;
|
||||
asm( "vpternlogq $0xff, %0, %0, %0\n\t" : "=x"(a) );
|
||||
return a;
|
||||
__m512i v;
|
||||
asm( "vpternlogq $0xff, %0, %0, %0\n\t" : "=x"(v) );
|
||||
return v;
|
||||
}
|
||||
#define m512_neg1 mm512_neg1_fn() // 1 clock
|
||||
#define m512_neg1 mm512_neg1_fn()
|
||||
|
||||
//
|
||||
// Basic operations without SIMD equivalent
|
||||
@@ -217,13 +139,6 @@ static inline __m512i mm512_neg1_fn()
|
||||
static inline __m512i mm512_not( const __m512i x )
|
||||
{ return _mm512_ternarylogic_epi64( x, x, x, 1 ); }
|
||||
|
||||
/*
|
||||
// Unary negation: -x
|
||||
#define mm512_negate_64( x ) _mm512_sub_epi64( m512_zero, x )
|
||||
#define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x )
|
||||
#define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x )
|
||||
*/
|
||||
|
||||
//
|
||||
// Pointer casting
|
||||
|
||||
@@ -265,12 +180,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
#define mm512_add4_32( a, b, c, d ) \
|
||||
_mm512_add_epi32( _mm512_add_epi32( a, b ), _mm512_add_epi32( c, d ) )
|
||||
|
||||
#define mm512_add4_16( a, b, c, d ) \
|
||||
_mm512_add_epi16( _mm512_add_epi16( a, b ), _mm512_add_epi16( c, d ) )
|
||||
|
||||
#define mm512_add4_8( a, b, c, d ) \
|
||||
_mm512_add_epi8( _mm512_add_epi8( a, b ), _mm512_add_epi8( c, d ) )
|
||||
|
||||
//
|
||||
// Ternary logic uses 8 bit truth table to define any 3 input logical
|
||||
// expression using any number or combinations of AND, OR, XOR, NOT.
|
||||
@@ -333,34 +242,23 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
// Reverse byte order of packed elements, vectorized endian conversion.
|
||||
|
||||
#define mm512_bswap_64( v ) \
|
||||
_mm512_shuffle_epi8( v, \
|
||||
m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
|
||||
0x28292a2b2c2d2e2f, 0x2021222324252627, \
|
||||
0x18191a1b1c1d1e1f, 0x1011121314151617, \
|
||||
0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
|
||||
_mm512_shuffle_epi8( v, mm512_bcast_m128( _mm_set_epi64x( \
|
||||
0x08090a0b0c0d0e0f, 0x0001020304050607 ) ) )
|
||||
|
||||
#define mm512_bswap_32( v ) \
|
||||
_mm512_shuffle_epi8( v, \
|
||||
m512_const_64( 0x3c3d3e3f38393a3b, 0x3435363730313233, \
|
||||
0x2c2d2e2f28292a2b, 0x2425262720212223, \
|
||||
0x1c1d1e1f18191a1b, 0x1415161710111213, \
|
||||
0x0c0d0e0f08090a0b, 0x0405060700010203 ) )
|
||||
_mm512_shuffle_epi8( v, mm512_bcast_m128( _mm_set_epi64x( \
|
||||
0x0c0d0e0f08090a0b, 0x0405060700010203 ) ) )
|
||||
|
||||
#define mm512_bswap_16( v ) \
|
||||
_mm512_shuffle_epi8( v, \
|
||||
m512_const_64( 0x3e3f3c3d3a3b3839, 0x3637343532333031, \
|
||||
0x2e2f2c2d2a2b2829, 0x2627242522232021, \
|
||||
0x1e1f1c1d1a1b1819, 0x1617141512131011, \
|
||||
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) )
|
||||
_mm512_shuffle_epi8( v, mm512_bcast_m128( _mm_set_epi64x( \
|
||||
0x0e0f0c0d0a0b0809, 0x0607040502030001 ) ) )
|
||||
|
||||
// Source and destination are pointers, may point to same memory.
|
||||
// 8 lanes of 64 bytes each
|
||||
#define mm512_block_bswap_64( d, s ) do \
|
||||
{ \
|
||||
const __m512i ctl = m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
|
||||
0x28292a2b2c2d2e2f, 0x2021222324252627, \
|
||||
0x18191a1b1c1d1e1f, 0x1011121314151617, \
|
||||
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
|
||||
const __m512i ctl = mm512_bcast_m128( _mm_set_epi64x( \
|
||||
0x08090a0b0c0d0e0f, 0x0001020304050607 ) ); \
|
||||
casti_m512i( d, 0 ) = _mm512_shuffle_epi8( casti_m512i( s, 0 ), ctl ); \
|
||||
casti_m512i( d, 1 ) = _mm512_shuffle_epi8( casti_m512i( s, 1 ), ctl ); \
|
||||
casti_m512i( d, 2 ) = _mm512_shuffle_epi8( casti_m512i( s, 2 ), ctl ); \
|
||||
@@ -374,10 +272,8 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
// 16 lanes of 32 bytes each
|
||||
#define mm512_block_bswap_32( d, s ) do \
|
||||
{ \
|
||||
const __m512i ctl = m512_const_64( 0x3c3d3e3f38393a3b, 0x3435363730313233, \
|
||||
0x2c2d2e2f28292a2b, 0x2425262720212223, \
|
||||
0x1c1d1e1f18191a1b, 0x1415161710111213, \
|
||||
0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
|
||||
const __m512i ctl = mm512_bcast_m128( _mm_set_epi64x( \
|
||||
0x0c0d0e0f08090a0b, 0x0405060700010203 ) ); \
|
||||
casti_m512i( d, 0 ) = _mm512_shuffle_epi8( casti_m512i( s, 0 ), ctl ); \
|
||||
casti_m512i( d, 1 ) = _mm512_shuffle_epi8( casti_m512i( s, 1 ), ctl ); \
|
||||
casti_m512i( d, 2 ) = _mm512_shuffle_epi8( casti_m512i( s, 2 ), ctl ); \
|
||||
@@ -395,8 +291,8 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
// Rotate elements across entire vector.
|
||||
static inline __m512i mm512_swap_256( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 4 ); }
|
||||
#define mm512_shuflr_256( v ) mm512_swap_256
|
||||
#define mm512_shufll_256( v ) mm512_swap_256
|
||||
#define mm512_shuflr_256 mm512_swap_256
|
||||
#define mm512_shufll_256 mm512_swap_256
|
||||
|
||||
static inline __m512i mm512_shuflr_128( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 2 ); }
|
||||
@@ -404,6 +300,7 @@ static inline __m512i mm512_shuflr_128( const __m512i v )
|
||||
static inline __m512i mm512_shufll_128( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 6 ); }
|
||||
|
||||
/* Not used
|
||||
static inline __m512i mm512_shuflr_64( const __m512i v )
|
||||
{ return _mm512_alignr_epi64( v, v, 1 ); }
|
||||
|
||||
@@ -415,7 +312,9 @@ static inline __m512i mm512_shuflr_32( const __m512i v )
|
||||
|
||||
static inline __m512i mm512_shufll_32( const __m512i v )
|
||||
{ return _mm512_alignr_epi32( v, v, 15 ); }
|
||||
*/
|
||||
|
||||
/* Not used
|
||||
// Generic
|
||||
static inline __m512i mm512_shuflr_x64( const __m512i v, const int n )
|
||||
{ return _mm512_alignr_epi64( v, v, n ); }
|
||||
@@ -424,34 +323,20 @@ static inline __m512i mm512_shuflr_x32( const __m512i v, const int n )
|
||||
{ return _mm512_alignr_epi32( v, v, n ); }
|
||||
|
||||
#define mm512_shuflr_16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
_mm512_permutexvar_epi16( _mm512_set_epi64( \
|
||||
0x0000001F001E001D, 0x001C001B001A0019, \
|
||||
0x0018001700160015, 0x0014001300120011, \
|
||||
0x0010000F000E000D, 0x000C000B000A0009, \
|
||||
0x0008000700060005, 0x0004000300020001 ), v )
|
||||
|
||||
#define mm512_shufll_16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
_mm512_permutexvar_epi16( _mm512_set_epi64( \
|
||||
0x001E001D001C001B, 0x001A001900180017, \
|
||||
0x0016001500140013, 0x001200110010000F, \
|
||||
0x000E000D000C000B, 0x000A000900080007, \
|
||||
0x0006000500040003, 0x000200010000001F ), v )
|
||||
*/
|
||||
|
||||
#define mm512_shuflr_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x003F3E3D3C3B3A39, 0x3837363534333231, \
|
||||
0x302F2E2D2C2B2A29, 0x2827262524232221, \
|
||||
0x201F1E1D1C1B1A19. 0x1817161514131211, \
|
||||
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
|
||||
|
||||
#define mm512_shufll_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x3E3D3C3B3A393837, 0x363534333231302F. \
|
||||
0x2E2D2C2B2A292827, 0x262524232221201F, \
|
||||
0x1E1D1C1B1A191817, 0x161514131211100F, \
|
||||
0x0E0D0C0B0A090807, 0x060504030201003F ) )
|
||||
|
||||
// 256 bit lanes used only by lyra2, move these there
|
||||
// Rotate elements within 256 bit lanes of 512 bit vector.
|
||||
|
||||
// Swap hi & lo 128 bits in each 256 bit lane
|
||||
@@ -466,47 +351,48 @@ static inline __m512i mm512_shuflr_x32( const __m512i v, const int n )
|
||||
/* Not used
|
||||
// Rotate 256 bit lanes by one 32 bit element
|
||||
#define mm512_shuflr256_32( v ) \
|
||||
_mm512_permutexvar_epi32( m512_const_64( \
|
||||
_mm512_permutexvar_epi32( _mm512_set_epi64( \
|
||||
0x000000080000000f, 0x0000000e0000000d, \
|
||||
0x0000000c0000000b, 0x0000000a00000009, \
|
||||
0x0000000000000007, 0x0000000600000005, \
|
||||
0x0000000400000003, 0x0000000200000001 ), v )
|
||||
|
||||
#define mm512_shufll256_32( v ) \
|
||||
_mm512_permutexvar_epi32( m512_const_64( \
|
||||
_mm512_permutexvar_epi32( _mm512_set_epi64( \
|
||||
0x0000000e0000000d, 0x0000000c0000000b, \
|
||||
0x0000000a00000009, 0x000000080000000f, \
|
||||
0x0000000600000005, 0x0000000400000003, \
|
||||
0x0000000200000001, 0x0000000000000007 ), v )
|
||||
|
||||
#define mm512_shuflr256_16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
_mm512_permutexvar_epi16( _mm512_set_epi64( \
|
||||
0x00100001001e001d, 0x001c001b001a0019, \
|
||||
0x0018001700160015, 0x0014001300120011, \
|
||||
0x0000000f000e000d, 0x000c000b000a0009, \
|
||||
0x0008000700060005, 0x0004000300020001 ), v )
|
||||
|
||||
#define mm512_shufll256_16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
_mm512_permutexvar_epi16( _mm512_set_epi64( \
|
||||
0x001e001d001c001b, 0x001a001900180017, \
|
||||
0x0016001500140013, 0x001200110010001f, \
|
||||
0x000e000d000c000b, 0x000a000900080007, \
|
||||
0x0006000500040003, 0x000200010000000f ), v )
|
||||
|
||||
#define mm512_shuflr256_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
_mm512_shuffle_epi8( _mm512_set_epi64( \
|
||||
0x203f3e3d3c3b3a39, 0x3837363534333231, \
|
||||
0x302f2e2d2c2b2a29, 0x2827262524232221, \
|
||||
0x001f1e1d1c1b1a19, 0x1817161514131211, \
|
||||
0x100f0e0d0c0b0a09, 0x0807060504030201 ) )
|
||||
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
|
||||
|
||||
#define mm512_shufll256_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
_mm512_shuffle_epi8( _mm512_set_epi64( \
|
||||
0x3e3d3c3b3a393837, 0x363534333231302f, \
|
||||
0x2e2d2c2b2a292827, 0x262524232221203f, \
|
||||
0x1e1d1c1b1a191817, 0x161514131211100f, \
|
||||
0x0e0d0c0b0a090807, 0x060504030201001f ) )
|
||||
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
|
||||
*/
|
||||
|
||||
//
|
||||
// Shuffle/rotate elements within 128 bit lanes of 512 bit vector.
|
||||
|
||||
@@ -518,11 +404,13 @@ static inline __m512i mm512_shuflr_x32( const __m512i v, const int n )
|
||||
#define mm512_shuflr128_32( v ) _mm512_shuffle_epi32( v, 0x39 )
|
||||
#define mm512_shufll128_32( v ) _mm512_shuffle_epi32( v, 0x93 )
|
||||
|
||||
/* Not used
|
||||
// Rotate 128 bit lanes right by c bytes, versatile and just as fast
|
||||
static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
|
||||
static inline __m512i mm512_shuflr128_x8( const __m512i v, const int c )
|
||||
{ return _mm512_alignr_epi8( v, v, c ); }
|
||||
*/
|
||||
|
||||
// Limited 2 input, 1 output shuffle, combines shuffle with blend.
|
||||
// Limited 2 input shuffle, combines shuffle with blend.
|
||||
// Like most shuffles it's limited to 128 bit lanes and like some shuffles
|
||||
// destination elements must come from a specific source arg.
|
||||
#define mm512_shuffle2_64( v1, v2, c ) \
|
||||
@@ -534,6 +422,7 @@ static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
|
||||
_mm512_castsi512_ps( v2 ), c ) );
|
||||
|
||||
// 64 bit lanes
|
||||
// Not really necessary with AVX512, included for consistency with AVX2/SSE.
|
||||
|
||||
#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 )
|
||||
#define mm512_shuflr64_32 mm512_swap64_32
|
||||
@@ -548,6 +437,7 @@ static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
|
||||
#define mm512_shuflr64_8( v ) _mm512_ror_epi64( v, 8 )
|
||||
#define mm512_shufll64_8( v ) _mm512_rol_epi64( v, 8 )
|
||||
|
||||
/* Not used
|
||||
// 32 bit lanes
|
||||
|
||||
#define mm512_swap32_16( v ) _mm512_ror_epi32( v, 16 )
|
||||
@@ -556,6 +446,7 @@ static inline __m512i mm512_shuflr128_8( const __m512i v, const int c )
|
||||
|
||||
#define mm512_shuflr32_8( v ) _mm512_ror_epi32( v, 8 )
|
||||
#define mm512_shufll32_8( v ) _mm512_rol_epi32( v, 8 )
|
||||
*/
|
||||
|
||||
#endif // AVX512
|
||||
#endif // SIMD_512_H__
|
||||
|
||||
Reference in New Issue
Block a user