mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
46dca7a493 |
@@ -73,6 +73,13 @@ If not what makes it happen or not happen?
|
||||
Change Log
|
||||
----------
|
||||
|
||||
v23.6
|
||||
|
||||
ARM: Sha256dt, Sha256t, Sha256d 4-way now working and fully optimized for NEON, SHA also enabled but untested.
|
||||
x86: Sha256dt, Sha256t, Sha256d faster SSE2 4-way.
|
||||
ARM: Scrypt, Scryptn2 fully optimized for NEON, SHA also enabled but untested.
|
||||
Linux: added a log when miner is started as root to discourage doing so.
|
||||
|
||||
v23.5
|
||||
|
||||
New version numbering drops the leading 3, the major version will now be the calendar year, the minor version identifies planned releases during the year.
|
||||
|
@@ -136,10 +136,10 @@ static void fill_block( __m256i *state, const block *ref_block,
|
||||
|
||||
#else // SSE2
|
||||
|
||||
static void fill_block( v128_t *state, const block *ref_block,
|
||||
static void fill_block( v128u64_t *state, const block *ref_block,
|
||||
block *next_block, int with_xor )
|
||||
{
|
||||
v128_t block_XY[ARGON2_OWORDS_IN_BLOCK];
|
||||
v128u64_t block_XY[ARGON2_OWORDS_IN_BLOCK];
|
||||
unsigned int i;
|
||||
|
||||
if ( with_xor )
|
||||
|
@@ -23,56 +23,46 @@
|
||||
|
||||
#if !defined(__AVX512F__)
|
||||
|
||||
|
||||
#if !defined(__AVX2__)
|
||||
|
||||
|
||||
static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y) {
|
||||
const v128_t z = v128_mulw32(x, y);
|
||||
return v128_add64(v128_add64(x, y), v128_add64(z, z));
|
||||
static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
|
||||
{
|
||||
const v128u64_t z = v128_mulw32( x, y );
|
||||
return (v128u32_t)v128_add64( v128_add64( (v128u64_t)x, (v128u64_t)y ),
|
||||
v128_add64( z, z ) );
|
||||
}
|
||||
|
||||
#define G1( A0, B0, C0, D0, A1, B1, C1, D1 ) \
|
||||
do { \
|
||||
{ \
|
||||
A0 = fBlaMka( A0, B0 ); \
|
||||
A1 = fBlaMka( A1, B1 ); \
|
||||
\
|
||||
D0 = v128_xor( D0, A0 ); \
|
||||
D1 = v128_xor( D1, A1 ); \
|
||||
\
|
||||
D0 = v128_ror64( D0, 32 ); \
|
||||
D1 = v128_ror64( D1, 32 ); \
|
||||
\
|
||||
C0 = fBlaMka( C0, D0 ); \
|
||||
C1 = fBlaMka( C1, D1 ); \
|
||||
\
|
||||
B0 = v128_xor( B0, C0 ); \
|
||||
B1 = v128_xor( B1, C1 ); \
|
||||
\
|
||||
B0 = v128_ror64( B0, 24 ); \
|
||||
B1 = v128_ror64( B1, 24 ); \
|
||||
} while ((void)0, 0)
|
||||
}
|
||||
|
||||
#define G2( A0, B0, C0, D0, A1, B1, C1, D1 ) \
|
||||
do { \
|
||||
{ \
|
||||
A0 = fBlaMka( A0, B0 ); \
|
||||
A1 = fBlaMka( A1, B1 ); \
|
||||
\
|
||||
D0 = v128_xor( D0, A0 ); \
|
||||
D1 = v128_xor( D1, A1 ); \
|
||||
\
|
||||
D0 = v128_ror64( D0, 16 ); \
|
||||
D1 = v128_ror64( D1, 16 ); \
|
||||
\
|
||||
C0 = fBlaMka( C0, D0 ); \
|
||||
C1 = fBlaMka( C1, D1 ); \
|
||||
\
|
||||
B0 = v128_xor( B0, C0 ); \
|
||||
B1 = v128_xor( B1, C1 ); \
|
||||
\
|
||||
B0 = v128_ror64( B0, 63 ); \
|
||||
B1 = v128_ror64( B1, 63 ); \
|
||||
} while ((void)0, 0)
|
||||
}
|
||||
|
||||
#if defined(__SSSE3__) || defined(__ARM_NEON)
|
||||
|
||||
|
@@ -2303,9 +2303,8 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2326,9 +2325,10 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3;
|
||||
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
@@ -2348,8 +2348,7 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[2] = YB2;
|
||||
XA[3] = YA3;
|
||||
XB[3] = YB3;
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
@@ -2377,9 +2376,8 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2389,19 +2387,21 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t1 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t2 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
@@ -2457,9 +2457,7 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = zb[2].m128;
|
||||
XA[3] = za[3].m128;
|
||||
XB[3] = zb[3].m128;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static void salsa8_simd128_2buf( uint32_t * const ba, uint32_t * const bb,
|
||||
@@ -2638,9 +2636,8 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XC[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2650,28 +2647,29 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
v128_t t2 = v128_blendv( XA[2], XA[3], mask_cc );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[2], mask_cc );
|
||||
XA[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XA[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XB[0], XB[1], mask_cc );
|
||||
t1 = v128_blendv( XB[1], XB[0], mask_cc );
|
||||
t2 = v128_blendv( XB[2], XB[3], mask_cc );
|
||||
t3 = v128_blendv( XB[3], XB[2], mask_cc );
|
||||
XB[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XC[0], XC[1], mask_cc );
|
||||
t1 = v128_blendv( XC[1], XC[0], mask_cc );
|
||||
t2 = v128_blendv( XC[2], XC[3], mask_cc );
|
||||
t3 = v128_blendv( XC[3], XC[2], mask_cc );
|
||||
XC[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XC[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XC[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XC[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XC[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XC[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3, YC0, YC1, YC2, YC3;
|
||||
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
@@ -2699,9 +2697,7 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XA[3] = YA3;
|
||||
XB[3] = YB3;
|
||||
XC[3] = YC3;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
@@ -2738,9 +2734,8 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XC[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2750,27 +2745,29 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t1 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t2 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XC[0], XC[2], mask_f0 );
|
||||
t1 = v128_blendv( XC[1], XC[3], mask_3c );
|
||||
t2 = v128_blendv( XC[2], XC[0], mask_f0 );
|
||||
t1 = v128_blendv( XC[2], XC[0], mask_f0 );
|
||||
t2 = v128_blendv( XC[1], XC[3], mask_3c );
|
||||
t3 = v128_blendv( XC[3], XC[1], mask_3c );
|
||||
XC[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XC[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XC[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XC[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XC[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XC[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4], yc[4], zc[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
@@ -2850,9 +2847,7 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XA[3] = za[3].m128;
|
||||
XB[3] = zb[3].m128;
|
||||
XC[3] = zc[3].m128;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
// Triple buffered, 3x memory usage
|
||||
|
@@ -56,10 +56,10 @@ static const uint32_t sha256_initial_state[8] =
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SCRYPT_THROUGHPUT 16
|
||||
#elif defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
#define SCRYPT_THROUGHPUT 2
|
||||
#elif defined(__AVX2__)
|
||||
#define SCRYPT_THROUGHPUT 8
|
||||
#elif defined(__SHA__) // NEON?
|
||||
#define SCRYPT_THROUGHPUT 2
|
||||
#else
|
||||
#define SCRYPT_THROUGHPUT 4
|
||||
#endif
|
||||
@@ -155,7 +155,7 @@ static inline void PBKDF2_SHA256_128_32(uint32_t *tstate, uint32_t *ostate,
|
||||
output[i] = bswap_32( ostate[i] );
|
||||
}
|
||||
|
||||
#if defined(__SHA__)
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
|
||||
static inline void HMAC_SHA256_80_init_SHA_2BUF( const uint32_t *key0,
|
||||
const uint32_t *key1, uint32_t *tstate0, uint32_t *tstate1,
|
||||
@@ -266,6 +266,9 @@ static inline void PBKDF2_SHA256_128_32_SHA_2BUF( uint32_t *tstate0,
|
||||
|
||||
#endif // SHA
|
||||
|
||||
|
||||
|
||||
|
||||
static const uint32_t keypad_4way[4 * 12] = {
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
@@ -1221,10 +1224,10 @@ static int scrypt_N_1_1_256_16way( const uint32_t *input, uint32_t *output,
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
#if ( SCRYPT_THROUGHPUT == 2 ) && ( defined(__SHA__) || defined(__ARM_FEATURE_SHA2) )
|
||||
|
||||
static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
uint32_t *midstate, int N, int thrid )
|
||||
static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, int N, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) tstate[ 2*8 ];
|
||||
uint32_t _ALIGN(128) ostate[ 2*8 ];
|
||||
@@ -1241,13 +1244,13 @@ static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
scrypt_core_simd128_2buf( W, scratchbuf, N );
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32_SHA_2BUF( tstate, tstate+8, ostate, ostate+8, W, W+32,
|
||||
output, output+8 );
|
||||
PBKDF2_SHA256_128_32_SHA_2BUF( tstate, tstate+8, ostate,
|
||||
ostate+8, W, W+32, output, output+8 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // THROUGHPUT = 2 && SHA
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 4 )
|
||||
|
||||
@@ -1267,13 +1270,10 @@ static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
|
||||
HMAC_SHA256_80_init( input, tstate, ostate );
|
||||
PBKDF2_SHA256_80_128( tstate, ostate, input, W );
|
||||
|
||||
HMAC_SHA256_80_init( input +20, tstate+ 8, ostate+ 8 );
|
||||
PBKDF2_SHA256_80_128( tstate+ 8, ostate+ 8, input +20, W+32 );
|
||||
|
||||
HMAC_SHA256_80_init( input +40, tstate+16, ostate+16 );
|
||||
PBKDF2_SHA256_80_128( tstate+16, ostate+16, input +40, W+64 );
|
||||
|
||||
HMAC_SHA256_80_init( input +60, tstate+24, ostate+24 );
|
||||
PBKDF2_SHA256_80_128( tstate+24, ostate+24, input +60, W+96 );
|
||||
|
||||
@@ -1303,11 +1303,8 @@ static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate, ostate, W, output );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+ 8, ostate+ 8, W+32, output+ 8 );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+16, ostate+16, W+64, output+16 );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+24, ostate+24, W+96, output+24 );
|
||||
|
||||
return 1;
|
||||
@@ -1418,14 +1415,14 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
||||
rc = scrypt_N_1_1_256_8way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#elif ( SCRYPT_THROUGHPUT == 4 )
|
||||
#if defined(__SHA__)
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
rc = scrypt_N_1_1_256_4way_sha( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
rc = scrypt_N_1_1_256_4way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#endif
|
||||
#elif ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
#elif ( SCRYPT_THROUGHPUT == 2 ) && ( defined(__SHA__) || defined(__ARM_FEATURE_SHA2) )
|
||||
rc = scrypt_N_1_1_256_sha_2buf( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
@@ -1472,10 +1469,10 @@ bool scrypt_miner_thread_init( int thr_id )
|
||||
|
||||
bool register_scrypt_algo( algo_gate_t* gate )
|
||||
{
|
||||
#if defined(__SHA__)
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT | NEON_OPT;
|
||||
#else
|
||||
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX_OPT | AVX2_OPT | AVX512_OPT;
|
||||
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
|
||||
#endif
|
||||
gate->miner_thread_init =(void*)&scrypt_miner_thread_init;
|
||||
gate->scanhash = (void*)&scanhash_scrypt;
|
||||
@@ -1492,15 +1489,15 @@ bool register_scrypt_algo( algo_gate_t* gate )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 4 * 128; // 4 way
|
||||
#elif defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
// scrypt_throughput = 2;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
#elif defined(__AVX2__)
|
||||
// scrypt_throughput = 8;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 way
|
||||
#elif defined(__SHA__)
|
||||
// scrypt_throughput = 4;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
#else
|
||||
// scrypt_throughput = 4;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
|
@@ -54,29 +54,29 @@ static const uint32_t K256[64] =
|
||||
v128_xor( v128_xor( \
|
||||
v128_ror32(x, 17), v128_ror32(x, 19) ), v128_sr32(x, 10) )
|
||||
|
||||
#define SHA2s_MEXP( a, b, c, d ) \
|
||||
#define SHA256_4X32_MEXP( a, b, c, d ) \
|
||||
v128_add4_32( SSG2_1( a ), b, SSG2_0( c ), d );
|
||||
|
||||
#define SHA256x4_MSG_EXPANSION( W ) \
|
||||
W[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] ); \
|
||||
W[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] ); \
|
||||
W[ 2] = SHA2s_MEXP( W[ 0], W[11], W[ 3], W[ 2] ); \
|
||||
W[ 3] = SHA2s_MEXP( W[ 1], W[12], W[ 4], W[ 3] ); \
|
||||
W[ 4] = SHA2s_MEXP( W[ 2], W[13], W[ 5], W[ 4] ); \
|
||||
W[ 5] = SHA2s_MEXP( W[ 3], W[14], W[ 6], W[ 5] ); \
|
||||
W[ 6] = SHA2s_MEXP( W[ 4], W[15], W[ 7], W[ 6] ); \
|
||||
W[ 7] = SHA2s_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] ); \
|
||||
W[ 8] = SHA2s_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] ); \
|
||||
W[ 9] = SHA2s_MEXP( W[ 7], W[ 2], W[10], W[ 9] ); \
|
||||
W[10] = SHA2s_MEXP( W[ 8], W[ 3], W[11], W[10] ); \
|
||||
W[11] = SHA2s_MEXP( W[ 9], W[ 4], W[12], W[11] ); \
|
||||
W[12] = SHA2s_MEXP( W[10], W[ 5], W[13], W[12] ); \
|
||||
W[13] = SHA2s_MEXP( W[11], W[ 6], W[14], W[13] ); \
|
||||
W[14] = SHA2s_MEXP( W[12], W[ 7], W[15], W[14] ); \
|
||||
W[15] = SHA2s_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
#define SHA256_4X32_MSG_EXPANSION( W ) \
|
||||
W[ 0] = SHA256_4X32_MEXP( W[14], W[ 9], W[ 1], W[ 0] ); \
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] ); \
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] ); \
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] ); \
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] ); \
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] ); \
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] ); \
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] ); \
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] ); \
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] ); \
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] ); \
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] ); \
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] ); \
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] ); \
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] ); \
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
|
||||
do { \
|
||||
#define SHA256_4X32_ROUND(A, B, C, D, E, F, G, H, i, j) \
|
||||
{ \
|
||||
v128_t T1, T2; \
|
||||
v128_t K = v128_32( K256[( (j)+(i) )] ); \
|
||||
T1 = v128_add32( H, v128_add4_32( BSG2_1(E), CHs(E, F, G), \
|
||||
@@ -85,31 +85,41 @@ do { \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = v128_add32( D, T1 ); \
|
||||
H = v128_add32( T1, T2 ); \
|
||||
} while (0)
|
||||
}
|
||||
|
||||
#define SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, j ) \
|
||||
#define SHA256_4X32_ROUND_NOMSG( A, B, C, D, E, F, G, H, i, j ) \
|
||||
{ \
|
||||
v128_t T1 = v128_add4_32( H, BSG2_1(E), CHs(E, F, G), \
|
||||
v128_32( K256[(i)+(j)] ) ); \
|
||||
v128_t T2 = v128_add32( BSG2_0(A), MAJs(A, B, C) ); \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = v128_add32( D, T1 ); \
|
||||
H = v128_add32( T1, T2 ); \
|
||||
}
|
||||
|
||||
#define SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, j ) \
|
||||
{ \
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C ); \
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, j ); \
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, j ); \
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, j ); \
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, j ); \
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, j ); \
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, j ); \
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, j ); \
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, j ); \
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, j ); \
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, j ); \
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, j ); \
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, j ); \
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, j ); \
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, j ); \
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, j ); \
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, j ); \
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, j ); \
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, j ); \
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, j ); \
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 3, j ); \
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, j ); \
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 5, j ); \
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 6, j ); \
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 7, j ); \
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 8, j ); \
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 9, j ); \
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 10, j ); \
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 11, j ); \
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 12, j ); \
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 13, j ); \
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 14, j ); \
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, j ); \
|
||||
}
|
||||
|
||||
// LE data, no need to byte swap
|
||||
static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
static inline void SHA256_4X32_TRANSFORM( v128_t *out, v128_t *W,
|
||||
const v128_t *in )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
@@ -123,13 +133,13 @@ static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
G = in[6];
|
||||
H = in[7];
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
|
||||
out[0] = v128_add32( in[0], A );
|
||||
out[1] = v128_add32( in[1], B );
|
||||
@@ -142,47 +152,37 @@ static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
}
|
||||
|
||||
// LE data, no need to byte swap
|
||||
void sha256_4way_transform_le( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_transform_le( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in )
|
||||
{
|
||||
v128_t W[16];
|
||||
v128_memcpy( W, data, 16 );
|
||||
SHA256_4WAY_TRANSFORM( state_out, W, state_in );
|
||||
SHA256_4X32_TRANSFORM( state_out, W, state_in );
|
||||
}
|
||||
|
||||
// BE data, need to byte swap input data
|
||||
void sha256_4way_transform_be( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_transform_be( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in )
|
||||
{
|
||||
v128_t W[16];
|
||||
v128_block_bswap32( W, data );
|
||||
v128_block_bswap32( W+8, data+8 );
|
||||
SHA256_4WAY_TRANSFORM( state_out, W, state_in );
|
||||
SHA256_4X32_TRANSFORM( state_out, W, state_in );
|
||||
}
|
||||
|
||||
// prehash_3rounds & final_rounds are not working
|
||||
void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
void sha256_4x32_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
const v128_t *W, const v128_t *state_in )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
v128_t A, B, C, D, E, F, G, H, T1;
|
||||
|
||||
// precalculate constant part msg expansion for second iteration.
|
||||
X[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
X[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
X[ 2] = v128_add32( v128_add32( SSG2_1( X[ 0] ), W[11] ), W[ 2] );
|
||||
X[ 3] = v128_add32( v128_add32( SSG2_1( X[ 1] ), W[12] ), SSG2_0( W[ 4] ) );
|
||||
X[ 4] = v128_add32( v128_add32( W[13], SSG2_0( W[ 5] ) ), W[ 4] );
|
||||
X[ 5] = v128_add32( v128_add32( W[14], SSG2_0( W[ 6] ) ), W[ 5] );
|
||||
X[ 6] = v128_add32( v128_add32( W[15], SSG2_0( W[ 7] ) ), W[ 6] );
|
||||
X[ 7] = v128_add32( v128_add32( X[ 0], SSG2_0( W[ 8] ) ), W[ 7] );
|
||||
X[ 8] = v128_add32( v128_add32( X[ 1], SSG2_0( W[ 9] ) ), W[ 8] );
|
||||
X[ 9] = v128_add32( SSG2_0( W[10] ), W[ 9] );
|
||||
X[10] = v128_add32( SSG2_0( W[11] ), W[10] );
|
||||
X[11] = v128_add32( SSG2_0( W[12] ), W[11] );
|
||||
X[12] = v128_add32( SSG2_0( W[13] ), W[12] );
|
||||
X[13] = v128_add32( SSG2_0( W[14] ), W[13] );
|
||||
X[14] = v128_add32( SSG2_0( W[15] ), W[14] );
|
||||
X[15] = v128_add32( SSG2_0( X[ 0] ), W[15] );
|
||||
X[ 0] = v128_add32( SSG2_0( W[ 1] ), W[ 0] );
|
||||
X[ 1] = v128_add32( v128_add32( SSG2_1( W[15] ), SSG2_0( W[ 2] ) ), W[ 1] );
|
||||
X[ 2] = v128_add32( SSG2_1( X[ 0] ), W[ 2] );
|
||||
X[ 3] = v128_add32( SSG2_1( X[ 1] ), SSG2_0( W[ 4] ) );
|
||||
X[ 4] = SSG2_0( W[15] );
|
||||
X[ 5] = v128_add32( SSG2_0( X[ 0] ), W[15] );
|
||||
// W[0] for round 32
|
||||
X[ 6] = v128_add32( SSG2_0( X[ 1] ), X[ 0] );
|
||||
|
||||
A = v128_load( state_in );
|
||||
B = v128_load( state_in + 1 );
|
||||
@@ -195,9 +195,14 @@ void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
|
||||
// round 3 part 1, avoid nonces W[3]
|
||||
T1 = v128_add4_32( E, BSG2_1(B), CHs(B, C, D), v128_32( K256[3] ) );
|
||||
A = v128_add32( A, T1 );
|
||||
E = v128_add32( T1, v128_add32( BSG2_0(F), MAJs(F, G, H) ) );
|
||||
|
||||
v128_store( state_mid , A );
|
||||
v128_store( state_mid + 1, B );
|
||||
@@ -209,7 +214,7 @@ void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
v128_store( state_mid + 7, H );
|
||||
}
|
||||
|
||||
void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in, const v128_t *state_mid, const v128_t *X )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
@@ -226,45 +231,64 @@ void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
G = v128_load( state_mid + 6 );
|
||||
H = v128_load( state_mid + 7 );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( G, H );
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( F, G );
|
||||
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
// round 3 part 2, add nonces
|
||||
A = v128_add32( A, W[3] );
|
||||
E = v128_add32( E, W[3] );
|
||||
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
|
||||
// update precalculated msg expansion with new nonce: W[3].
|
||||
W[ 0] = X[ 0];
|
||||
W[ 1] = X[ 1];
|
||||
W[ 2] = v128_add32( X[ 2], SSG2_0( W[ 3] ) );
|
||||
W[ 3] = v128_add32( X[ 3], W[ 3] );
|
||||
W[ 4] = v128_add32( X[ 4], SSG2_1( W[ 2] ) );
|
||||
W[ 5] = v128_add32( X[ 5], SSG2_1( W[ 3] ) );
|
||||
W[ 6] = v128_add32( X[ 6], SSG2_1( W[ 4] ) );
|
||||
W[ 7] = v128_add32( X[ 7], SSG2_1( W[ 5] ) );
|
||||
W[ 8] = v128_add32( X[ 8], SSG2_1( W[ 6] ) );
|
||||
W[ 9] = v128_add32( X[ 9], v128_add32( SSG2_1( W[ 7] ), W[ 2] ) );
|
||||
W[10] = v128_add32( X[10], v128_add32( SSG2_1( W[ 8] ), W[ 3] ) );
|
||||
W[11] = v128_add32( X[11], v128_add32( SSG2_1( W[ 9] ), W[ 4] ) );
|
||||
W[12] = v128_add32( X[12], v128_add32( SSG2_1( W[10] ), W[ 5] ) );
|
||||
W[13] = v128_add32( X[13], v128_add32( SSG2_1( W[11] ), W[ 6] ) );
|
||||
W[14] = v128_add32( X[14], v128_add32( SSG2_1( W[12] ), W[ 7] ) );
|
||||
W[15] = v128_add32( X[15], v128_add32( SSG2_1( W[13] ), W[ 8] ) );
|
||||
W[ 4] = v128_add32( W[ 4], SSG2_1( W[ 2] ) );
|
||||
W[ 5] = SSG2_1( W[ 3] );
|
||||
W[ 6] = v128_add32( W[15], SSG2_1( W[ 4] ) );
|
||||
W[ 7] = v128_add32( X[ 0], SSG2_1( W[ 5] ) );
|
||||
W[ 8] = v128_add32( X[ 1], SSG2_1( W[ 6] ) );
|
||||
W[ 9] = v128_add32( SSG2_1( W[ 7] ), W[ 2] );
|
||||
W[10] = v128_add32( SSG2_1( W[ 8] ), W[ 3] );
|
||||
W[11] = v128_add32( SSG2_1( W[ 9] ), W[ 4] );
|
||||
W[12] = v128_add32( SSG2_1( W[10] ), W[ 5] );
|
||||
W[13] = v128_add32( SSG2_1( W[11] ), W[ 6] );
|
||||
W[14] = v128_add32( X[ 4], v128_add32( SSG2_1( W[12] ), W[ 7] ) );
|
||||
W[15] = v128_add32( X[ 5], v128_add32( SSG2_1( W[13] ), W[ 8] ) );
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
|
||||
W[ 0] = v128_add32( X[ 6], v128_add32( SSG2_1( W[14] ), W[ 9] ) );
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
|
||||
A = v128_add32( A, v128_load( state_in ) );
|
||||
B = v128_add32( B, v128_load( state_in + 1 ) );
|
||||
@@ -285,10 +309,11 @@ void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
v128_store( state_out + 7, H );
|
||||
}
|
||||
|
||||
|
||||
# if 0
|
||||
|
||||
// Working correctly but still slower
|
||||
int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
int sha256_4x32_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in, const uint32_t *target )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H, T0, T1, T2;
|
||||
@@ -308,38 +333,38 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
const v128_t IV7 = H;
|
||||
const v128_t IV6 = G;
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
|
||||
W[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
W[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA2s_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA2s_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA2s_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA2s_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA2s_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA2s_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA2s_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA2s_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA2s_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA2s_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA2s_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
W[ 0] = SHA256_4X32_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 48 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 48 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 48 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 48 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 48 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 48 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 48 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 48 );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 48 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 48 );
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, 48 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, 48 );
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, 48 );
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 3, 48 );
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, 48 );
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 5, 48 );
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 6, 48 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 7, 48 );
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 8, 48 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 9, 48 );
|
||||
|
||||
T0 = v128_add32( v128_32( K256[58] ),
|
||||
v128_add4_32( BSG2_1( C ), CHs( C, D, E ), W[10], F ) );
|
||||
@@ -368,7 +393,7 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
F = v128_add32( T0, v128_add32( BSG2_0( G ), MAJs( G, H, A ) ) );
|
||||
|
||||
// round 61 part 1
|
||||
W[13] = SHA2s_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
T0 = v128_add32( v128_32( K256[61] ),
|
||||
v128_add4_32( BSG2_1( H ), CHs( H, A, B ), W[13], C ) );
|
||||
G = v128_add32( G, T0 );
|
||||
@@ -401,11 +426,11 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
C = v128_add32( T0, v128_add32( BSG2_0( D ), MAJs( D, E, F ) ) );
|
||||
|
||||
// rounds 62 & 63
|
||||
W[14] = SHA2s_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA2s_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 48 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 48 );
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 14, 48 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, 48 );
|
||||
|
||||
state_out[0] = v128_add32( state_in[0], A );
|
||||
state_out[1] = v128_add32( state_in[1], B );
|
||||
@@ -420,7 +445,7 @@ return 1;
|
||||
|
||||
#endif
|
||||
|
||||
void sha256_4way_init( sha256_4way_context *sc )
|
||||
void sha256_4x32_init( sha256_4x32_context *sc )
|
||||
{
|
||||
sc->count_high = sc->count_low = 0;
|
||||
sc->val[0] = v128_32( sha256_iv[0] );
|
||||
@@ -433,7 +458,7 @@ void sha256_4way_init( sha256_4way_context *sc )
|
||||
sc->val[7] = v128_32( sha256_iv[7] );
|
||||
}
|
||||
|
||||
void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
void sha256_4x32_update( sha256_4x32_context *sc, const void *data, size_t len )
|
||||
{
|
||||
v128_t *vdata = (v128_t*)data;
|
||||
size_t ptr;
|
||||
@@ -454,7 +479,7 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
len -= clen;
|
||||
if ( ptr == buf_size )
|
||||
{
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
ptr = 0;
|
||||
}
|
||||
clow = sc->count_low;
|
||||
@@ -465,7 +490,7 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
}
|
||||
}
|
||||
|
||||
void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
void sha256_4x32_close( sha256_4x32_context *sc, void *dst )
|
||||
{
|
||||
unsigned ptr;
|
||||
uint32_t low, high;
|
||||
@@ -479,7 +504,7 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
if ( ptr > pad )
|
||||
{
|
||||
v128_memset_zero( sc->buf + (ptr>>2), (buf_size - ptr) >> 2 );
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
v128_memset_zero( sc->buf, pad >> 2 );
|
||||
}
|
||||
else
|
||||
@@ -491,17 +516,17 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
|
||||
sc->buf[ pad >> 2 ] = v128_32( bswap_32( high ) );
|
||||
sc->buf[( pad+4 ) >> 2 ] = v128_32( bswap_32( low ) );
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
|
||||
v128_block_bswap32( dst, sc->val );
|
||||
}
|
||||
|
||||
void sha256_4way_full( void *dst, const void *data, size_t len )
|
||||
void sha256_4x32_full( void *dst, const void *data, size_t len )
|
||||
{
|
||||
sha256_4way_context ctx;
|
||||
sha256_4way_init( &ctx );
|
||||
sha256_4way_update( &ctx, data, len );
|
||||
sha256_4way_close( &ctx, dst );
|
||||
sha256_4x32_context ctx;
|
||||
sha256_4x32_init( &ctx );
|
||||
sha256_4x32_update( &ctx, data, len );
|
||||
sha256_4x32_close( &ctx, dst );
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
@@ -97,6 +97,14 @@ void sha256_neon_x2sha_final_rounds( uint32_t *state_out_X,
|
||||
#define sha256_prehash_3rounds sha256_neon_sha_prehash_3rounds
|
||||
#define sha256_2x_final_rounds sha256_neon_x2sha_final_rounds
|
||||
|
||||
// generic API
|
||||
#define sha256_transform_le sha256_neon_sha_transform_le
|
||||
#define sha256_transform_be sha256_neon_sha_transform_be
|
||||
#define sha256_2x_transform_le sha256_neon_x2sha_transform_le
|
||||
#define sha256_2x_transform_be sha256_neon_x2sha_transform_be
|
||||
#define sha256_prehash_3rounds sha256_neon_sha_prehash_3rounds
|
||||
#define sha256_2x_final_rounds sha256_neon_x2sha_final_rounds
|
||||
|
||||
#else
|
||||
// without HW acceleration...
|
||||
#include "sph_sha2.h"
|
||||
|
@@ -360,15 +360,17 @@ int scanhash_sha256d_8way( struct work *work, const uint32_t max_nonce,
|
||||
|
||||
#if defined(SHA256D_4WAY)
|
||||
|
||||
int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256d_4x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t istate[8] __attribute__ ((aligned (32)));
|
||||
v128_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
@@ -376,17 +378,16 @@ int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
memset( block, 0, 16*4*4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
vdata[16+15] = v128_32( 80*8 );
|
||||
@@ -396,36 +397,39 @@ int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
block[15] = v128_32( 32*8 );
|
||||
|
||||
// initialize state
|
||||
istate[0] = v128_32( sha256_iv[0] );
|
||||
istate[1] = v128_32( sha256_iv[1] );
|
||||
istate[2] = v128_32( sha256_iv[2] );
|
||||
istate[3] = v128_32( sha256_iv[3] );
|
||||
istate[4] = v128_32( sha256_iv[4] );
|
||||
istate[5] = v128_32( sha256_iv[5] );
|
||||
istate[6] = v128_32( sha256_iv[6] );
|
||||
istate[7] = v128_32( sha256_iv[7] );
|
||||
iv[0] = v128_32( sha256_iv[0] );
|
||||
iv[1] = v128_32( sha256_iv[1] );
|
||||
iv[2] = v128_32( sha256_iv[2] );
|
||||
iv[3] = v128_32( sha256_iv[3] );
|
||||
iv[4] = v128_32( sha256_iv[4] );
|
||||
iv[5] = v128_32( sha256_iv[5] );
|
||||
iv[6] = v128_32( sha256_iv[6] );
|
||||
iv[7] = v128_32( sha256_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform_le( mstate, vdata, istate );
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
sha256_4way_transform_le( block, vdata+16, mstate );
|
||||
sha256_4way_transform_le( hash32, block, istate );
|
||||
|
||||
v128_block_bswap32( hash32, hash32 );
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
// sha256_4x32_transform_le( block, vdata+16, mhash1 );
|
||||
sha256_4x32_transform_le( hash32, block, iv );
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lhash, hash32, lane, 256 );
|
||||
casti_v128( lhash, 0 ) = v128_bswap32( casti_v128( lhash, 0 ) );
|
||||
casti_v128( lhash, 1 ) = v128_bswap32( casti_v128( lhash, 1 ) );
|
||||
if ( likely( valid_hash( lhash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
submit_solution( work, lhash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
}
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
|
@@ -7,15 +7,15 @@
|
||||
#include "sph_sha2.h"
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SHA256DT_16X64 1
|
||||
#define SHA256DT_16X32 1
|
||||
#elif defined(__x86_64__) && defined(__SHA__)
|
||||
#define SHA256DT_X86_SHA256 1
|
||||
#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_SHA2)
|
||||
#define SHA256DT_NEON_SHA256 1
|
||||
#elif defined(__AVX2__)
|
||||
#define SHA256DT_8X64 1
|
||||
#define SHA256DT_8X32 1
|
||||
#elif defined (__SSE2__) || defined(__ARM_NEON)
|
||||
#define SHA256DT_4X64 1
|
||||
#define SHA256DT_4X32 1
|
||||
#endif
|
||||
// else ref, should never happen
|
||||
|
||||
@@ -183,9 +183,9 @@ int scanhash_sha256dt_neon_x2sha( struct work *work, uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_16X64)
|
||||
#elif defined(SHA256DT_16X32)
|
||||
|
||||
int scanhash_sha256dt_16x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_16x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m512i block[16] __attribute__ ((aligned (128)));
|
||||
@@ -275,9 +275,9 @@ int scanhash_sha256dt_16x64( struct work *work, const uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_8X64)
|
||||
#elif defined(SHA256DT_8X32)
|
||||
|
||||
int scanhash_sha256dt_8x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_8x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m256i vdata[32] __attribute__ ((aligned (64)));
|
||||
@@ -355,16 +355,18 @@ int scanhash_sha256dt_8x64( struct work *work, const uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_4X64)
|
||||
#elif defined(SHA256DT_4X32)
|
||||
|
||||
int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_4x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
@@ -373,26 +375,24 @@ int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
memset( block, 0, 16*4*4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
v128_memset_zero( vdata+16 + 5, 9 );
|
||||
vdata[16+15] = v128_32( 0x480 );
|
||||
|
||||
block[ 8] = last_byte;
|
||||
v128_memset_zero( block + 9, 6 );
|
||||
v128_memset_zero( block + 9, 5 );
|
||||
block[15] = v128_32( 0x300 );
|
||||
|
||||
// initialize state
|
||||
iv[0] = v128_32( sha256dt_iv[0] );
|
||||
iv[1] = v128_32( sha256dt_iv[1] );
|
||||
iv[2] = v128_32( sha256dt_iv[2] );
|
||||
@@ -402,62 +402,15 @@ int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
iv[6] = v128_32( sha256dt_iv[6] );
|
||||
iv[7] = v128_32( sha256dt_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4x32_transform_le( mhash, vdata, iv );
|
||||
|
||||
/*
|
||||
uint32_t m1 [8] __attribute__ ((aligned (32)));
|
||||
uint32_t h1 [8] __attribute__ ((aligned (32)));
|
||||
uint32_t b1 [16] __attribute__ ((aligned (32)));
|
||||
uint32_t e16 [16] __attribute__ ((aligned (32)));
|
||||
uint32_t *m4 = (uint32_t*)&midstate;
|
||||
uint32_t *h4 = (uint32_t*)hash32;
|
||||
|
||||
sha256_transform_le( m1, pdata, sha256dt_iv );
|
||||
|
||||
memcpy( e16, pdata + 16, 12 );
|
||||
e16[3] = n;
|
||||
e16[4] = 0x80000000;
|
||||
memset( &e16[5], 0, 40 );
|
||||
e16[15] = 0x480; // funky bit count
|
||||
|
||||
b1[8] = 0x80000000;
|
||||
memset( &b1[9], 0, 24 );
|
||||
b1[9] = b1[10] = b1[11] = b1[12] = b1[13] = b1[14] = 0;
|
||||
b1[15] = 0x300; // bit count
|
||||
*/
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
sha256_4x32_transform_le( block, vdata+16, mhash );
|
||||
|
||||
//sha256_transform_le( b1, e16, m1 );
|
||||
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
// sha256_4x32_transform_le( block, vdata+16, mhash1 );
|
||||
sha256_4x32_transform_le( hash32, block, iv );
|
||||
|
||||
/*
|
||||
sha256_transform_le( h1, b1, sha256dt_iv );
|
||||
|
||||
printf("final hash1: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h1[0],h1[1],h1[2],h1[3],h1[4],h1[5],h1[6],h1[7]);
|
||||
printf("final hash4: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h4[0],h4[4],h4[8],h4[12],h4[16],h4[20],h4[24],h4[28]);
|
||||
|
||||
casti_v128( h1,0 ) = v128_bswap32( casti_v128( h1,0 ) );
|
||||
casti_v128( h1,1 ) = v128_bswap32( casti_v128( h1,1 ) );
|
||||
*/
|
||||
|
||||
// v128_block_bswap32( hash32, hash32 );
|
||||
|
||||
/*
|
||||
printf("bswap hash1: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h1[0],h1[1],h1[2],h1[3],h1[4],h1[5],h1[6],h1[7]);
|
||||
printf("bswap hash4: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h4[0],h4[4],h4[8],h4[12],h4[16],h4[20],h4[24],h4[28]);
|
||||
|
||||
exit(0);
|
||||
*/
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
{
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
@@ -472,7 +425,7 @@ exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
@@ -485,9 +438,9 @@ exit(0);
|
||||
int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t block1a[16] __attribute__ ((aligned (32)));
|
||||
uint32_t block2a[16] __attribute__ ((aligned (32)));
|
||||
uint32_t hasha[8] __attribute__ ((aligned (32)));
|
||||
uint32_t block1[16] __attribute__ ((aligned (32)));
|
||||
uint32_t block2[16] __attribute__ ((aligned (32)));
|
||||
uint32_t hash32[8] __attribute__ ((aligned (32)));
|
||||
uint32_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
@@ -497,37 +450,40 @@ int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
|
||||
memset( block1, 0, 64 );
|
||||
memset( block2, 0, 64 );
|
||||
|
||||
// hash first 64 byte block of data
|
||||
sha256_transform_le( mstate, pdata, sha256dt_iv );
|
||||
|
||||
// fill & pad second bock without nonce
|
||||
memcpy( block1a, pdata + 16, 12 );
|
||||
block1a[ 3] = 0;
|
||||
block1a[ 4] = 0x80000000;
|
||||
memset( block1a + 5, 0, 40 );
|
||||
block1a[15] = 0x480; // funky bit count
|
||||
memcpy( block1, pdata + 16, 12 );
|
||||
block1[ 3] = n;
|
||||
block1[ 4] = 0x80000000;
|
||||
memset( block1 + 5, 0, 40 );
|
||||
block1[15] = 0x480; // funky bit count
|
||||
|
||||
// Pad third block
|
||||
block2a[ 8] = 0x80000000;
|
||||
memset( block2a + 9, 0, 24 );
|
||||
block2a[15] = 0x300; // bit count
|
||||
block2[ 8] = 0x80000000;
|
||||
memset( block2 + 9, 0, 24 );
|
||||
block2[15] = 0x300; // bit count
|
||||
|
||||
do
|
||||
{
|
||||
// Insert nonce for second block
|
||||
block1a[3] = n;
|
||||
sha256_transform_le( block2a, block1a, mstate );
|
||||
block1[3] = n;
|
||||
sha256_transform_le( block2, block1, mstate );
|
||||
|
||||
sha256_transform_le( hasha, block2a, sha256dt_iv );
|
||||
sha256_transform_le( hash32, block2, sha256dt_iv );
|
||||
|
||||
if ( unlikely( bswap_32( hasha[7] ) <= ptarget[7] ) )
|
||||
if ( unlikely( bswap_32( hash32[7] ) <= ptarget[7] ) )
|
||||
{
|
||||
casti_v128( hasha, 0 ) = v128_bswap32( casti_v128( hasha, 0 ) );
|
||||
casti_v128( hasha, 1 ) = v128_bswap32( casti_v128( hasha, 1 ) );
|
||||
if ( likely( valid_hash( hasha, ptarget ) && !bench ) )
|
||||
casti_v128( hash32, 0 ) = v128_bswap32( casti_v128( hash32, 0 ) );
|
||||
casti_v128( hash32, 1 ) = v128_bswap32( casti_v128( hash32, 1 ) );
|
||||
if ( likely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n;
|
||||
submit_solution( work, hasha, mythr );
|
||||
submit_solution( work, hash32, mythr );
|
||||
}
|
||||
}
|
||||
n += 1;
|
||||
@@ -543,18 +499,18 @@ int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
bool register_sha256dt_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
|
||||
#if defined(SHA256DT_16X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_16x64;
|
||||
#if defined(SHA256DT_16X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_16x32;
|
||||
#elif defined(SHA256DT_X86_SHA256)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_x86_x2sha;
|
||||
#elif defined(SHA256DT_NEON_SHA256)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_neon_x2sha;
|
||||
#elif defined(SHA256DT_8X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_8x64;
|
||||
#elif defined(SHA256DT_4X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_4x64;
|
||||
#elif defined(SHA256DT_8X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_8x32;
|
||||
#elif defined(SHA256DT_4X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_4x32;
|
||||
#else
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_ref;
|
||||
#endif
|
||||
|
@@ -375,11 +375,11 @@ int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t istate[8] __attribute__ ((aligned (32)));
|
||||
v128_t mstate[8] __attribute__ ((aligned (32)));
|
||||
// v128_t mstate2[8] __attribute__ ((aligned (32)));
|
||||
// v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
@@ -387,61 +387,58 @@ int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
memset( block, 0, 16*4*4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
vdata[16+15] = v128_32( 80*8 ); // bit count
|
||||
vdata[16+15] = v128_32( 80*8 );
|
||||
|
||||
block[ 8] = last_byte;
|
||||
v128_memset_zero( block + 9, 6 );
|
||||
block[15] = v128_32( 32*8 ); // bit count
|
||||
block[15] = v128_32( 32*8 );
|
||||
|
||||
// initialize state
|
||||
istate[0] = v128_32( sha256_iv[0] );
|
||||
istate[1] = v128_32( sha256_iv[1] );
|
||||
istate[2] = v128_32( sha256_iv[2] );
|
||||
istate[3] = v128_32( sha256_iv[3] );
|
||||
istate[4] = v128_32( sha256_iv[4] );
|
||||
istate[5] = v128_32( sha256_iv[5] );
|
||||
istate[6] = v128_32( sha256_iv[6] );
|
||||
istate[7] = v128_32( sha256_iv[7] );
|
||||
iv[0] = v128_32( sha256_iv[0] );
|
||||
iv[1] = v128_32( sha256_iv[1] );
|
||||
iv[2] = v128_32( sha256_iv[2] );
|
||||
iv[3] = v128_32( sha256_iv[3] );
|
||||
iv[4] = v128_32( sha256_iv[4] );
|
||||
iv[5] = v128_32( sha256_iv[5] );
|
||||
iv[6] = v128_32( sha256_iv[6] );
|
||||
iv[7] = v128_32( sha256_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform_le( mstate, vdata, istate );
|
||||
|
||||
// sha256_4way_prehash_3rounds( mstate2, mexp_pre, vdata + 16, mstate1 );
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
// sha256_4way_final_rounds( block, vdata+16, mstate1, mstate2,
|
||||
// mexp_pre );
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
sha256_4way_transform_le( block, block, iv );
|
||||
sha256_4way_transform_le( hash32, block, iv );
|
||||
|
||||
sha256_4way_transform_le( block, vdata+16, mstate );
|
||||
sha256_4way_transform_le( block, block, istate );
|
||||
sha256_4way_transform_le( hash32, block, istate );
|
||||
|
||||
v128_block_bswap32( hash32, hash32 );
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lhash, hash32, lane, 256 );
|
||||
casti_v128( lhash, 0 ) = v128_bswap32( casti_v128( lhash, 0 ) );
|
||||
casti_v128( lhash, 1 ) = v128_bswap32( casti_v128( lhash, 1 ) );
|
||||
if ( likely( valid_hash( lhash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
submit_solution( work, lhash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
}
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
|
@@ -53,7 +53,6 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "algo/sha/hmac-sha256-hash.h"
|
||||
#include "algo/sha/hmac-sha256-hash-4way.h"
|
||||
#include "yespower.h"
|
||||
#include "yespower-platform.c"
|
||||
|
||||
|
20
configure
vendored
20
configure
vendored
@@ -1,6 +1,6 @@
|
||||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.71 for cpuminer-opt 23.5.
|
||||
# Generated by GNU Autoconf 2.71 for cpuminer-opt 23.6.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
|
||||
@@ -608,8 +608,8 @@ MAKEFLAGS=
|
||||
# Identity of this package.
|
||||
PACKAGE_NAME='cpuminer-opt'
|
||||
PACKAGE_TARNAME='cpuminer-opt'
|
||||
PACKAGE_VERSION='23.5'
|
||||
PACKAGE_STRING='cpuminer-opt 23.5'
|
||||
PACKAGE_VERSION='23.6'
|
||||
PACKAGE_STRING='cpuminer-opt 23.6'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
@@ -1360,7 +1360,7 @@ if test "$ac_init_help" = "long"; then
|
||||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures cpuminer-opt 23.5 to adapt to many kinds of systems.
|
||||
\`configure' configures cpuminer-opt 23.6 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
@@ -1432,7 +1432,7 @@ fi
|
||||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 23.5:";;
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 23.6:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
@@ -1538,7 +1538,7 @@ fi
|
||||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
cpuminer-opt configure 23.5
|
||||
cpuminer-opt configure 23.6
|
||||
generated by GNU Autoconf 2.71
|
||||
|
||||
Copyright (C) 2021 Free Software Foundation, Inc.
|
||||
@@ -1985,7 +1985,7 @@ cat >config.log <<_ACEOF
|
||||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by cpuminer-opt $as_me 23.5, which was
|
||||
It was created by cpuminer-opt $as_me 23.6, which was
|
||||
generated by GNU Autoconf 2.71. Invocation command line was
|
||||
|
||||
$ $0$ac_configure_args_raw
|
||||
@@ -3593,7 +3593,7 @@ fi
|
||||
|
||||
# Define the identity of the package.
|
||||
PACKAGE='cpuminer-opt'
|
||||
VERSION='23.5'
|
||||
VERSION='23.6'
|
||||
|
||||
|
||||
printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
|
||||
@@ -7508,7 +7508,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by cpuminer-opt $as_me 23.5, which was
|
||||
This file was extended by cpuminer-opt $as_me 23.6, which was
|
||||
generated by GNU Autoconf 2.71. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
@@ -7576,7 +7576,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\
|
||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config='$ac_cs_config_escaped'
|
||||
ac_cs_version="\\
|
||||
cpuminer-opt config.status 23.5
|
||||
cpuminer-opt config.status 23.6
|
||||
configured by $0, generated by GNU Autoconf 2.71,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
AC_INIT([cpuminer-opt], [23.5])
|
||||
AC_INIT([cpuminer-opt], [23.6])
|
||||
|
||||
AC_PREREQ([2.59c])
|
||||
AC_CANONICAL_SYSTEM
|
||||
|
4291
configure~
4291
configure~
File diff suppressed because it is too large
Load Diff
63
cpu-miner.c
63
cpu-miner.c
@@ -2321,6 +2321,12 @@ static void *miner_thread( void *userdata )
|
||||
gettimeofday( (struct timeval *) &tv_start, NULL );
|
||||
|
||||
// Scan for nonce
|
||||
// nonce_found = scanhash_sha256dt_ref( &work, max_nonce, &hashes_done,
|
||||
// mythr );
|
||||
// nonce_found = scanhash_sha256dt_4x32( &work, max_nonce, &hashes_done,
|
||||
// mythr );
|
||||
|
||||
|
||||
nonce_found = algo_gate.scanhash( &work, max_nonce, &hashes_done,
|
||||
mythr );
|
||||
|
||||
@@ -3677,58 +3683,44 @@ static int thread_create(struct thr_info *thr, void* func)
|
||||
|
||||
void get_defconfig_path(char *out, size_t bufsize, char *argv0);
|
||||
|
||||
|
||||
#include "simd-utils.h"
|
||||
#include "algo/sha/sha512-hash.h"
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct thr_info *thr;
|
||||
long flags;
|
||||
int i, err;
|
||||
|
||||
|
||||
/*
|
||||
#include "simd-utils.h"
|
||||
uint64_t h1[8] __attribute__((aligned(32)));;
|
||||
uint64_t h2[8*2] __attribute__((aligned(32)));
|
||||
uint64_t hx[8*2] __attribute__((aligned(32)));
|
||||
|
||||
printf("bswap32: %08x, bswap64: %016lx\n", bswap_32( 0x03020100 ), bswap_64( 0x0706050403020100 ) );
|
||||
printf("ror32: %08x, ror64: %016lx\n", ror32( 0x03020100, 8 ), ror64( 0x0706050403020100, 8 ) );
|
||||
exit(0);
|
||||
uint64_t inp[20*2] __attribute__((aligned(32))) = {0};
|
||||
|
||||
uint64x2_t a64 = v128_set64( 0x5555555555555555, 0xcccccccccccccccc ) ;
|
||||
uint64x2_t c64 = v128_set64( 0xffffffffffffffff, 0x0000000000000000 ) ;
|
||||
uint64x2_t mask = v128_set64( 0x0f0f0f0ff0f0f0f0, 0xf0f0f0f00f0f0f0f ) ;
|
||||
sha512_2x64_context ctx2;
|
||||
sph_sha512_context ctx1;
|
||||
|
||||
uint32x4_t a32 = v128_set32( 0x0f0e0d0c, 0x0b0a0908, 0x07060504, 0x03020100 );
|
||||
uint16x8_t a16 = v128_set16( 0x0f0e, 0x00d0c, 0x0b0a, 0x0908, 0x0706, 0x0504, 0x0302, 0x0100 );
|
||||
uint8x16_t a8 = v128_set8( 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00 );
|
||||
sha512_2x64_ctx( &ctx2, h2, inp, 80 );
|
||||
sha512_2x64_init( &ctx2 );
|
||||
sha512_2x64_update( &ctx2, inp, 80 );
|
||||
sha512_2x64_close( &ctx2, h2 );
|
||||
|
||||
a64 = v128_bswap64( a32 );
|
||||
a32 = v128_bswap32( a32 );
|
||||
a16 = v128_bswap16( a16 );
|
||||
|
||||
uint64_t *b64 = (uint64_t*)&a64;
|
||||
uint32_t *b32 = (uint32_t*)&a32;
|
||||
uint16_t *b16 = (uint16_t*)&a16;
|
||||
|
||||
//a32 = v128_ror32( a32, 4 );
|
||||
sph_sha512_init( &ctx1 );
|
||||
sph_sha512( &ctx1, inp, 80 );
|
||||
sph_sha512_close( &ctx1, h1 );
|
||||
|
||||
|
||||
printf("64: %016lx, %016lx\n", b64[1], b64[0] );
|
||||
printf("h1: %016lx %016lx %016lx %016lx %016lx %016lx %016lx %016lx\n", h1[0], h1[1], h1[2], h1[3], h1[4], h1[5], h1[6], h1[7]);
|
||||
|
||||
printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
|
||||
printf("16: %04x %04x %04x %04x %04x %04x %04x %04x\n", b16[7], b16[6], b16[5], b16[4], b16[3], b16[2], b16[1], b16[0] );
|
||||
|
||||
//a32 = v128_ror32( a32, 28 );
|
||||
//printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
//a32 = v128_rol32( a32, 4 );
|
||||
//printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
//a32 = v128_rol32( a32, 28 );
|
||||
//printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
printf("h2: %016lx %016lx %016lx %016lx %016lx %016lx %016lx %016lx\n\n", h2[0], h2[2], h2[4], h2[ 6], h2[ 8], h2[10], h2[12], h2[14]);
|
||||
|
||||
exit(0);
|
||||
*/
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pthread_mutex_init(&applog_lock, NULL);
|
||||
|
||||
show_credits();
|
||||
@@ -3864,6 +3856,9 @@ exit(0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ( is_root() )
|
||||
applog( LOG_NOTICE, "Running cpuminer as Superuser is discouraged.");
|
||||
|
||||
#ifndef WIN32
|
||||
if (opt_background)
|
||||
{
|
||||
|
13
miner.h
13
miner.h
@@ -20,7 +20,7 @@
|
||||
#define USER_AGENT_OS
|
||||
#endif
|
||||
|
||||
#define USER_AGENT PACKAGE_NAME "-" PACKAGE_VERSION "-" USER_AGENT_ARCH "-" USER_AGENT_OS
|
||||
#define USER_AGENT PACKAGE_NAME "-" PACKAGE_VERSION "-" USER_AGENT_ARCH USER_AGENT_OS
|
||||
|
||||
//#define MAX_CPUS 128
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
#include <stdbool.h>
|
||||
#include <inttypes.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <pthread.h>
|
||||
#include <jansson.h>
|
||||
#include <curl/curl.h>
|
||||
@@ -76,6 +76,15 @@
|
||||
#endif
|
||||
|
||||
|
||||
static inline bool is_root()
|
||||
{
|
||||
#if defined(WIN32)
|
||||
return false;
|
||||
#else
|
||||
return !getuid();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
#ifndef min
|
||||
#define min(a,b) (a>b ? (b) :(a))
|
||||
|
@@ -1509,20 +1509,20 @@ static inline void v128_bswap32_intrlv80_2x64( void *d, const void *src )
|
||||
|
||||
#elif defined(__ARM_NEON)
|
||||
|
||||
casti_v128u64( d,0 ) = vdupq_laneq_u64( s0, 0 );
|
||||
casti_v128u64( d,1 ) = vdupq_laneq_u64( s0, 1 );
|
||||
casti_v128u64( d,0 ) = vdupq_laneq_u64( (uint64x2_t)s0, 0 );
|
||||
casti_v128u64( d,1 ) = vdupq_laneq_u64( (uint64x2_t)s0, 1 );
|
||||
|
||||
casti_v128u64( d,2 ) = vdupq_laneq_u64( s1, 0 );
|
||||
casti_v128u64( d,3 ) = vdupq_laneq_u64( s1, 1 );
|
||||
casti_v128u64( d,2 ) = vdupq_laneq_u64( (uint64x2_t)s1, 0 );
|
||||
casti_v128u64( d,3 ) = vdupq_laneq_u64( (uint64x2_t)s1, 1 );
|
||||
|
||||
casti_v128u64( d,4 ) = vdupq_laneq_u64( s2, 0 );
|
||||
casti_v128u64( d,5 ) = vdupq_laneq_u64( s2, 1 );
|
||||
casti_v128u64( d,4 ) = vdupq_laneq_u64( (uint64x2_t)s2, 0 );
|
||||
casti_v128u64( d,5 ) = vdupq_laneq_u64( (uint64x2_t)s2, 1 );
|
||||
|
||||
casti_v128u64( d,6 ) = vdupq_laneq_u64( s3, 0 );
|
||||
casti_v128u64( d,7 ) = vdupq_laneq_u64( s3, 1 );
|
||||
casti_v128u64( d,6 ) = vdupq_laneq_u64( (uint64x2_t)s3, 0 );
|
||||
casti_v128u64( d,7 ) = vdupq_laneq_u64( (uint64x2_t)s3, 1 );
|
||||
|
||||
casti_v128u64( d,8 ) = vdupq_laneq_u64( s4, 0 );
|
||||
casti_v128u64( d,9 ) = vdupq_laneq_u64( s4, 1 );
|
||||
casti_v128u64( d,8 ) = vdupq_laneq_u64( (uint64x2_t)s4, 0 );
|
||||
casti_v128u64( d,9 ) = vdupq_laneq_u64( (uint64x2_t)s4, 1 );
|
||||
|
||||
#endif
|
||||
}
|
||||
|
@@ -100,13 +100,15 @@ static inline uint64x2_t v128_mulw32( uint32x4_t v1, uint32x4_t v0 )
|
||||
#define v128_sra32 vshrq_n_s32
|
||||
#define v128_sra16 vshrq_n_s16
|
||||
|
||||
// logic
|
||||
// unary logic
|
||||
#define v128_not vmvnq_u32
|
||||
|
||||
// binary
|
||||
#define v128_or vorrq_u32
|
||||
#define v128_and vandq_u32
|
||||
#define v128_not vmvnq_u32
|
||||
#define v128_xor veorq_u32
|
||||
#define v128_andnot( v1, v0 ) vandq_u32( vmvnq_u32(v1), v0 )
|
||||
#define v128_xnor( a, b ) v128_not( v128_xor( a, b ) )
|
||||
#define v128_andnot vandq_u32
|
||||
#define v128_xnor( v1, v0 ) v128_not( v128_xor( v1, v0 ) )
|
||||
#define v128_ornot vornq_u32
|
||||
|
||||
// ternary logic, veorq_u32 not defined
|
||||
@@ -136,7 +138,7 @@ static inline uint64x2_t v128_mulw32( uint32x4_t v1, uint32x4_t v0 )
|
||||
#define v128_unpacklo8( v1, v0 ) vzip1q_u8( v0, v1 )
|
||||
#define v128_unpackhi8( v1, v0 ) vzip2q_u8( v0, v1 )
|
||||
|
||||
// Shorter agnostic names for unpack using NEON-like syntax
|
||||
// Shorter achchitecture agnostic names for unpack using NEON-like mnemonics
|
||||
#define v128_ziplo64 vzip1q_u64
|
||||
#define v128_ziphi64 vzip2q_u64
|
||||
#define v128_ziplo32 vzip1q_u32
|
||||
@@ -279,28 +281,44 @@ static inline void v128_memcpy( void *dst, const void *src, const int n )
|
||||
//TODO, maybe, Optimize 64 bit rotations
|
||||
// Fall back for odd bit rotations
|
||||
static inline uint64x2_t v128_ror64( uint64x2_t v, int c )
|
||||
{ return vsriq_n_u64( vshlq_n_u64( v, 64-c ), v, c ); }
|
||||
{
|
||||
return vsriq_n_u64( vshlq_n_u64( (uint64x2_t)v, 64-c ), (uint64x2_t)v, c );
|
||||
}
|
||||
|
||||
static inline uint64x2_t v128_rol64( uint64x2_t v, int c )
|
||||
{ return vsriq_n_u64( vshlq_n_u64( v, c ), v, 64-c ); }
|
||||
{
|
||||
return vsliq_n_u64( vshrq_n_u64( (uint64x2_t)v, 64-c ), (uint64x2_t)v, c );
|
||||
}
|
||||
|
||||
//static inline uint64x2_t v128_rol64( uint64x2_t v, int c )
|
||||
//{ return vsriq_n_u64( vshlq_n_u64( v, c ), v, 64-c ); }
|
||||
|
||||
static inline uint32x4_t v128_ror32( uint32x4_t v, int c )
|
||||
{ return vsriq_n_u32( vshlq_n_u32( v, 32-c ), v, c ); }
|
||||
|
||||
static inline uint32x4_t v128_rol32( uint32x4_t v, int c )
|
||||
{ return vsriq_n_u32( vshlq_n_u32( v, c ), v, 32-c ); }
|
||||
{ return vsliq_n_u32( vshrq_n_u32( v, 32-c ), v, c ); }
|
||||
|
||||
//static inline uint32x4_t v128_rol32( uint32x4_t v, int c )
|
||||
//{ return vsriq_n_u32( vshlq_n_u32( v, c ), v, 32-c ); }
|
||||
|
||||
static inline uint16x8_t v128_ror16( uint16x8_t v, int c )
|
||||
{ return vsriq_n_u16( vshlq_n_u16( v, 16-c ), v, c ); }
|
||||
|
||||
static inline uint16x8_t v128_rol16( uint16x8_t v, int c )
|
||||
{ return vsriq_n_u16( vshlq_n_u16( v, c ), v, 16-c ); }
|
||||
{ return vsliq_n_u16( vshrq_n_u16( v, 16-c ), v, c ); }
|
||||
|
||||
//static inline uint16x8_t v128_rol16( uint16x8_t v, int c )
|
||||
//{ return vsriq_n_u16( vshlq_n_u16( v, c ), v, 16-c ); }
|
||||
|
||||
static inline uint8x16_t v128_ror8( uint8x16_t v, int c )
|
||||
{ return vsriq_n_u8( vshlq_n_u8( v, 8-c ), v, c ); }
|
||||
|
||||
static inline uint8x16_t v128_rol8( uint16x8_t v, int c )
|
||||
{ return vsriq_n_u8( vshlq_n_u8( v, c ), v, 8-c ); }
|
||||
static inline uint8x16_t v128_rol8( uint8x16_t v, int c )
|
||||
{ return vsliq_n_u8( vshrq_n_u8( v, 8-c ), v, c ); }
|
||||
|
||||
//static inline uint8x16_t v128_rol8( uint16x8_t v, int c )
|
||||
//{ return vsriq_n_u8( vshlq_n_u8( v, c ), v, 8-c ); }
|
||||
|
||||
/*
|
||||
// Optimzed for half element rotations (swap)
|
||||
@@ -358,7 +376,7 @@ static inline uint8x16_t v128_rol8( uint16x8_t v, int c )
|
||||
}
|
||||
|
||||
// vector rotation , size?
|
||||
static inline uint32x4_t v128_swap64( uint32x4_t v )
|
||||
static inline uint64x2_t v128_swap64( uint64x2_t v )
|
||||
{ return vextq_u64( v, v, 1 ); }
|
||||
|
||||
static inline uint32x4_t v128_shuflr32( uint32x4_t v )
|
||||
@@ -413,10 +431,10 @@ static inline uint32x4_t v128_shufll32( uint32x4_t v )
|
||||
#define v128_bitrev8( v ) vrbitq_u8
|
||||
|
||||
// reverse byte order
|
||||
#define v128_bswap16 vrev16q_u8
|
||||
#define v128_bswap32 vrev32q_u8
|
||||
#define v128_bswap64 vrev64q_u8
|
||||
#define v128_bswap128(v) v128_swap64( v128_bswap64(v) )
|
||||
#define v128_bswap16(v) (uint16x8_t)vrev16q_u8( (uint8x16_t)(v) )
|
||||
#define v128_bswap32(v) (uint32x4_t)vrev32q_u8( (uint8x16_t)(v) )
|
||||
#define v128_bswap64(v) (uint64x2_t)vrev64q_u8( (uint8x16_t)(v) )
|
||||
#define v128_bswap128(v) (uint32x4_t)v128_swap64( v128_bswap64(v) )
|
||||
#define v128_bswap256(p) v128_bswap128( (p)[0], (p)[1] )
|
||||
|
||||
// Usefull for x86_64 but does nothing for ARM
|
||||
|
Reference in New Issue
Block a user