mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.20.3
This commit is contained in:
@@ -302,6 +302,44 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
//
|
||||
// Extended bit shift for concatenated packed elements from 2 vectors.
|
||||
// Shift right returns low half, shift left return high half.
|
||||
|
||||
#if defined(__AVX512VBMI2__) && defined(__AVX512VL__)
|
||||
|
||||
#define mm128_shl2_64( v1, v2, c ) _mm_shldi_epi64( v1, v2, c )
|
||||
#define mm128_shr2_64( v1, v2, c ) _mm_shrdi_epi64( v1, v2, c )
|
||||
|
||||
#define mm128_shl2_32( v1, v2, c ) _mm_shldi_epi32( v1, v2, c )
|
||||
#define mm128_shr2_32( v1, v2, c ) _mm_shrdi_epi32( v1, v2, c )
|
||||
|
||||
#define mm128_shl2_16( v1, v2, c ) _mm_shldi_epi16( v1, v2, c )
|
||||
#define mm128_shr2_16( v1, v2, c ) _mm_shrdi_epi16( v1, v2, c )
|
||||
|
||||
#else
|
||||
|
||||
#define mm128_shl2_64( v1, v2, c ) \
|
||||
_mm_or_si128( _mm_slli_epi64( v1, c ), _mm_srli_epi64( v2, 64 - (c) ) )
|
||||
|
||||
#define mm128_shr2_64( v1, v2, c ) \
|
||||
_mm_or_si128( _mm_srli_epi64( v2, c ), _mm_slli_epi64( v1, 64 - (c) ) )
|
||||
|
||||
#define mm128_shl2_32( v1, v2, c ) \
|
||||
_mm_or_si128( _mm_slli_epi32( v1, c ), _mm_srli_epi32( v2, 32 - (c) ) )
|
||||
|
||||
#define mm128_shr2_32( v1, v2, c ) \
|
||||
_mm_or_si128( _mm_srli_epi32( v2, c ), _mm_slli_epi32( v1, 32 - (c) ) )
|
||||
|
||||
#define mm128_shl2_16( v1, v2, c ) \
|
||||
_mm_or_si128( _mm_slli_epi16( v1, c ), _mm_srli_epi16( v2, 16 - (c) ) )
|
||||
|
||||
#define mm128_shr2_16( v1, v2, c ) \
|
||||
_mm_or_si128( _mm_srli_epi16( v2, c ), _mm_slli_epi16( v1, 16 - (c) ) )
|
||||
|
||||
#endif
|
||||
*/
|
||||
|
||||
//
|
||||
// Bit rotations
|
||||
@@ -402,13 +440,13 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
//
|
||||
// Limited 2 input shuffle, combines shuffle with blend. The destination low
|
||||
// half is always taken from src a, and the high half from src b.
|
||||
#define mm128_shuffle2_64( a, b, c ) \
|
||||
_mm_castpd_si128( _mm_shuffle_pd( _mm_castsi128_pd( a ), \
|
||||
_mm_castsi128_pd( b ), c ) );
|
||||
#define mm128_shuffle2_64( v1, v2, c ) \
|
||||
_mm_castpd_si128( _mm_shuffle_pd( _mm_castsi128_pd( v1 ), \
|
||||
_mm_castsi128_pd( v2 ), c ) );
|
||||
|
||||
#define mm128_shuffle2_32( a, b, c ) \
|
||||
_mm_castps_si128( _mm_shuffle_ps( _mm_castsi128_ps( a ), \
|
||||
_mm_castsi128_ps( b ), c ) );
|
||||
#define mm128_shuffle2_32( v1, v2, c ) \
|
||||
_mm_castps_si128( _mm_shuffle_ps( _mm_castsi128_ps( v1 ), \
|
||||
_mm_castsi128_ps( v2 ), c ) );
|
||||
|
||||
//
|
||||
// Rotate vector elements accross all lanes
|
||||
@@ -574,30 +612,68 @@ static inline void mm128_block_bswap_32( __m128i *d, const __m128i *s )
|
||||
|
||||
|
||||
// Two input shuffle-rotate.
|
||||
// Concatenate v1 & v2 and bit rotate as one 256 bit vector.
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
// Concatenate v1 & v2 and byte rotate as a 256 bit vector.
|
||||
// Function macros with two inputs and one output, inputs are preserved.
|
||||
// Returns the high 128 bits, ie updated v1.
|
||||
// These functions are preferred but only available with SSSE3. Use procedure
|
||||
// macros below for SSE2 compatibility.
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
#define mm128_shufl2r_64( v1, v2 ) _mm_alignr_epi8( v2, v1, 8 )
|
||||
#define mm128_shufl2l_64( v1, v2 ) _mm_alignr_epi8( v1, v2, 8 )
|
||||
|
||||
/*
|
||||
#define mm128_shufl2r_32( v1, v2 ) _mm_alignr_epi8( v2, v1, 4 )
|
||||
#define mm128_shufl2l_32( v1, v2 ) _mm_alignr_epi8( v1, v2, 4 )
|
||||
|
||||
#define mm128_shufl2r_16( v1, v2 ) _mm_alignr_epi8( v2, v1, 2 )
|
||||
#define mm128_shufl2l_16( v1, v2 ) _mm_alignr_epi8( v1, v2, 2 )
|
||||
|
||||
#define mm128_shufl2r_8( v1, v2 ) _mm_alignr_epi8( v2, v1, 8 )
|
||||
#define mm128_shufl2l_8( v1, v2 ) _mm_alignr_epi8( v1, v2, 8 )
|
||||
#define mm128_shufl2r_8( v1, v2 ) _mm_alignr_epi8( v2, v1, 1 )
|
||||
#define mm128_shufl2l_8( v1, v2 ) _mm_alignr_epi8( v1, v2, 1 )
|
||||
*/
|
||||
|
||||
#else
|
||||
|
||||
#define mm128_shufl2r_64( v1, v2 ) \
|
||||
_mm_or_si128( _mm_srli_si128( v1, 8 ), \
|
||||
_mm_slli_si128( v2, 8 ) )
|
||||
|
||||
#define mm128_shufl2l_64( v1, v2 ) \
|
||||
_mm_or_si128( _mm_slli_si128( v1, 8 ), \
|
||||
_mm_srli_si128( v2, 8 ) )
|
||||
/*
|
||||
#define mm128_shufl2r_32( v1, v2 ) \
|
||||
_mm_or_si128( _mm_srli_si128( v1, 4 ), \
|
||||
_mm_slli_si128( v2, 12 ) )
|
||||
|
||||
#define mm128_shufl2l_32( v1, v2 ) \
|
||||
_mm_or_si128( _mm_slli_si128( v1, 4 ), \
|
||||
_mm_srli_si128( v2, 12 ) )
|
||||
|
||||
#define mm128_shufl2r_16( v1, v2 ) \
|
||||
_mm_or_si128( _mm_srli_si128( v1, 2 ), \
|
||||
_mm_slli_si128( v2, 14 ) )
|
||||
|
||||
#define mm128_shufl2l_16( v1, v2 ) \
|
||||
_mm_or_si128( _mm_slli_si128( v1, 2 ), \
|
||||
_mm_srli_si128( v2, 14 ) )
|
||||
|
||||
#define mm128_shufl2r_8( v1, v2 ) \
|
||||
_mm_or_si128( _mm_srli_si128( v1, 1 ), \
|
||||
_mm_slli_si128( v2, 15 ) )
|
||||
|
||||
#define mm128_shufl2l_8( v1, v2 ) \
|
||||
_mm_or_si128( _mm_slli_si128( v1, 1 ), \
|
||||
_mm_srli_si128( v2, 15 ) )
|
||||
*/
|
||||
#endif
|
||||
|
||||
// Procedure macros with 2 inputs and 2 outputs, input args are overwritten.
|
||||
// Deprecated for SSSE3 and above, SSSE3 versions exist for only for
|
||||
// compatibility with with existing code.
|
||||
// vrol & vror are deprecated and do not exist for larger vectors.
|
||||
// Their only use is by lyra2 blake2b when AVX2 is not available and is
|
||||
// grandfathered.
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
#define mm128_vror256_64( v1, v2 ) \
|
||||
do { \
|
||||
@@ -613,6 +689,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
#define mm128_vror256_32( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 4 ); \
|
||||
@@ -654,6 +731,7 @@ do { \
|
||||
v2 = _mm_alignr_epi8( v2, v1, 15 ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
*/
|
||||
|
||||
#else // SSE2
|
||||
|
||||
@@ -674,7 +752,7 @@ do { \
|
||||
_mm_srli_si128( v1, 8 ) ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
#define mm128_vror256_32( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 4 ), \
|
||||
@@ -728,7 +806,7 @@ do { \
|
||||
_mm_srli_si128( v1, 15 ) ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
*/
|
||||
#endif // SSE4.1 else SSE2
|
||||
|
||||
#endif // __SSE2__
|
||||
|
Reference in New Issue
Block a user