mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.15.7
This commit is contained in:
@@ -1,18 +1,18 @@
|
||||
#if !defined(SIMD_64_H__)
|
||||
#define SIMD_64_H__ 1
|
||||
|
||||
#if defined(__MMX__)
|
||||
#if defined(__MMX__) && defined(__SSE__)
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// 64 bit MMX vectors.
|
||||
//
|
||||
// There are rumours MMX wil be removed. Although casting with int64
|
||||
// works there is likely some overhead to move the data to An MMX register
|
||||
// and back.
|
||||
|
||||
// This code is not used anywhere annd likely never will. It's intent was
|
||||
// to support 2 way parallel hashing using SSE2 for 64 bit, and MMX for 32
|
||||
// bit hash functions, but was never implemented.
|
||||
|
||||
// Pseudo constants
|
||||
|
||||
/*
|
||||
#define m64_zero _mm_setzero_si64()
|
||||
#define m64_one_64 _mm_set_pi32( 0UL, 1UL )
|
||||
@@ -30,79 +30,67 @@
|
||||
|
||||
#define casti_m64(p,i) (((__m64*)(p))[(i)])
|
||||
|
||||
// cast all arguments as the're likely to be uint64_t
|
||||
|
||||
// Bitwise not: ~(a)
|
||||
//#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 )
|
||||
#define mm64_not( a ) ( (__m64)( ~( (uint64_t)(a) ) )
|
||||
|
||||
// Unary negate elements
|
||||
#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, (__m64)v )
|
||||
#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, (__m64)v )
|
||||
#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, (__m64)v )
|
||||
#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, v )
|
||||
#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, v )
|
||||
#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, v )
|
||||
|
||||
// Rotate bits in packed elements of 64 bit vector
|
||||
#define mm64_rol_64( a, n ) \
|
||||
_mm_or_si64( _mm_slli_si64( (__m64)(a), n ), \
|
||||
_mm_srli_si64( (__m64)(a), 64-(n) ) )
|
||||
_mm_or_si64( _mm_slli_si64( a, n ), \
|
||||
_mm_srli_si64( a, 64-(n) ) )
|
||||
|
||||
#define mm64_ror_64( a, n ) \
|
||||
_mm_or_si64( _mm_srli_si64( (__m64)(a), n ), \
|
||||
_mm_slli_si64( (__m64)(a), 64-(n) ) )
|
||||
_mm_or_si64( _mm_srli_si64( a, n ), \
|
||||
_mm_slli_si64( a, 64-(n) ) )
|
||||
|
||||
#define mm64_rol_32( a, n ) \
|
||||
_mm_or_si64( _mm_slli_pi32( (__m64)(a), n ), \
|
||||
_mm_srli_pi32( (__m64)(a), 32-(n) ) )
|
||||
_mm_or_si64( _mm_slli_pi32( a, n ), \
|
||||
_mm_srli_pi32( a, 32-(n) ) )
|
||||
|
||||
#define mm64_ror_32( a, n ) \
|
||||
_mm_or_si64( _mm_srli_pi32( (__m64)(a), n ), \
|
||||
_mm_slli_pi32( (__m64)(a), 32-(n) ) )
|
||||
_mm_or_si64( _mm_srli_pi32( a, n ), \
|
||||
_mm_slli_pi32( a, 32-(n) ) )
|
||||
|
||||
#define mm64_rol_16( a, n ) \
|
||||
_mm_or_si64( _mm_slli_pi16( (__m64)(a), n ), \
|
||||
_mm_srli_pi16( (__m64)(a), 16-(n) ) )
|
||||
_mm_or_si64( _mm_slli_pi16( a, n ), \
|
||||
_mm_srli_pi16( a, 16-(n) ) )
|
||||
|
||||
#define mm64_ror_16( a, n ) \
|
||||
_mm_or_si64( _mm_srli_pi16( (__m64)(a), n ), \
|
||||
_mm_slli_pi16( (__m64)(a), 16-(n) ) )
|
||||
_mm_or_si64( _mm_srli_pi16( a, n ), \
|
||||
_mm_slli_pi16( a, 16-(n) ) )
|
||||
|
||||
// Rotate packed elements accross lanes. Useful for byte swap and byte
|
||||
// rotation.
|
||||
|
||||
// _mm_shuffle_pi8 requires SSSE3 while _mm_shuffle_pi16 requires SSE
|
||||
// even though these are MMX instructions.
|
||||
|
||||
// Swap hi & lo 32 bits.
|
||||
#define mm64_swap32( a ) _mm_shuffle_pi16( (__m64)(a), 0x4e )
|
||||
#define mm64_swap_32( a ) _mm_shuffle_pi16( a, 0x4e )
|
||||
|
||||
#define mm64_ror1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x39 )
|
||||
#define mm64_rol1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x93 )
|
||||
#define mm64_ror64_1x16( a ) _mm_shuffle_pi16( a, 0x39 )
|
||||
#define mm64_rol64_1x16( a ) _mm_shuffle_pi16( a, 0x93 )
|
||||
|
||||
// Swap hi & lo 16 bits of each 32 bit element
|
||||
#define mm64_swap16_32( a ) _mm_shuffle_pi16( (__m64)(a), 0xb1 )
|
||||
#define mm64_swap32_16( a ) _mm_shuffle_pi16( a, 0xb1 )
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
// Endian byte swap packed elements
|
||||
// A vectorized version of the u64 bswap, use when data already in MMX reg.
|
||||
#define mm64_bswap_64( v ) \
|
||||
_mm_shuffle_pi8( (__m64)v, (__m64)0x0001020304050607 )
|
||||
|
||||
#define mm64_bswap_32( v ) \
|
||||
_mm_shuffle_pi8( (__m64)v, (__m64)0x0405060700010203 )
|
||||
_mm_shuffle_pi8( v, (__m64)0x0405060700010203 )
|
||||
|
||||
#define mm64_bswap_16( v ) \
|
||||
_mm_shuffle_pi8( (__m64)v, (__m64)0x0607040502030001 );
|
||||
_mm_shuffle_pi8( v, (__m64)0x0607040502030001 );
|
||||
|
||||
// Rotate right by c bytes
|
||||
static inline __m64 mm64_ror_x8( __m64 v, const int c )
|
||||
{ return _mm_alignr_pi8( v, v, c ); }
|
||||
|
||||
#else
|
||||
|
||||
#define mm64_bswap_64( v ) \
|
||||
(__m64)__builtin_bswap64( (uint64_t)v )
|
||||
|
||||
// These exist only for compatibility with CPUs without SSSE3. MMX doesn't
|
||||
// have extract 32 instruction so pointers are needed to access elements.
|
||||
// It' more efficient for the caller to use scalar variables and call
|
||||
// bswap_32 directly.
|
||||
#define mm64_bswap_32( v ) \
|
||||
_mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \
|
||||
__builtin_bswap32( ((uint32_t*)&v)[0] ) )
|
||||
@@ -115,17 +103,6 @@
|
||||
|
||||
#endif
|
||||
|
||||
// 64 bit mem functions use integral sizes instead of bytes, data must
|
||||
// be aligned to 64 bits.
|
||||
static inline void memcpy_m64( __m64 *dst, const __m64 *src, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; }
|
||||
|
||||
static inline void memset_zero_m64( __m64 *src, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) src[i] = (__m64)0ULL; }
|
||||
|
||||
static inline void memset_m64( __m64 *dst, const __m64 a, int n )
|
||||
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
|
||||
|
||||
#endif // MMX
|
||||
|
||||
#endif // SIMD_64_H__
|
||||
|
||||
Reference in New Issue
Block a user