mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v23.6
This commit is contained in:
@@ -136,10 +136,10 @@ static void fill_block( __m256i *state, const block *ref_block,
|
||||
|
||||
#else // SSE2
|
||||
|
||||
static void fill_block( v128_t *state, const block *ref_block,
|
||||
static void fill_block( v128u64_t *state, const block *ref_block,
|
||||
block *next_block, int with_xor )
|
||||
{
|
||||
v128_t block_XY[ARGON2_OWORDS_IN_BLOCK];
|
||||
v128u64_t block_XY[ARGON2_OWORDS_IN_BLOCK];
|
||||
unsigned int i;
|
||||
|
||||
if ( with_xor )
|
||||
|
@@ -23,56 +23,46 @@
|
||||
|
||||
#if !defined(__AVX512F__)
|
||||
|
||||
|
||||
#if !defined(__AVX2__)
|
||||
|
||||
|
||||
static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y) {
|
||||
const v128_t z = v128_mulw32(x, y);
|
||||
return v128_add64(v128_add64(x, y), v128_add64(z, z));
|
||||
static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
|
||||
{
|
||||
const v128u64_t z = v128_mulw32( x, y );
|
||||
return (v128u32_t)v128_add64( v128_add64( (v128u64_t)x, (v128u64_t)y ),
|
||||
v128_add64( z, z ) );
|
||||
}
|
||||
|
||||
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
|
||||
do { \
|
||||
A0 = fBlaMka(A0, B0); \
|
||||
A1 = fBlaMka(A1, B1); \
|
||||
\
|
||||
D0 = v128_xor(D0, A0); \
|
||||
D1 = v128_xor(D1, A1); \
|
||||
\
|
||||
D0 = v128_ror64(D0, 32); \
|
||||
D1 = v128_ror64(D1, 32); \
|
||||
\
|
||||
C0 = fBlaMka(C0, D0); \
|
||||
C1 = fBlaMka(C1, D1); \
|
||||
\
|
||||
B0 = v128_xor(B0, C0); \
|
||||
B1 = v128_xor(B1, C1); \
|
||||
\
|
||||
B0 = v128_ror64(B0, 24); \
|
||||
B1 = v128_ror64(B1, 24); \
|
||||
} while ((void)0, 0)
|
||||
#define G1( A0, B0, C0, D0, A1, B1, C1, D1 ) \
|
||||
{ \
|
||||
A0 = fBlaMka( A0, B0 ); \
|
||||
A1 = fBlaMka( A1, B1 ); \
|
||||
D0 = v128_xor( D0, A0 ); \
|
||||
D1 = v128_xor( D1, A1 ); \
|
||||
D0 = v128_ror64( D0, 32 ); \
|
||||
D1 = v128_ror64( D1, 32 ); \
|
||||
C0 = fBlaMka( C0, D0 ); \
|
||||
C1 = fBlaMka( C1, D1 ); \
|
||||
B0 = v128_xor( B0, C0 ); \
|
||||
B1 = v128_xor( B1, C1 ); \
|
||||
B0 = v128_ror64( B0, 24 ); \
|
||||
B1 = v128_ror64( B1, 24 ); \
|
||||
}
|
||||
|
||||
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
|
||||
do { \
|
||||
A0 = fBlaMka(A0, B0); \
|
||||
A1 = fBlaMka(A1, B1); \
|
||||
\
|
||||
D0 = v128_xor(D0, A0); \
|
||||
D1 = v128_xor(D1, A1); \
|
||||
\
|
||||
D0 = v128_ror64(D0, 16); \
|
||||
D1 = v128_ror64(D1, 16); \
|
||||
\
|
||||
C0 = fBlaMka(C0, D0); \
|
||||
C1 = fBlaMka(C1, D1); \
|
||||
\
|
||||
B0 = v128_xor(B0, C0); \
|
||||
B1 = v128_xor(B1, C1); \
|
||||
\
|
||||
B0 = v128_ror64(B0, 63); \
|
||||
B1 = v128_ror64(B1, 63); \
|
||||
} while ((void)0, 0)
|
||||
#define G2( A0, B0, C0, D0, A1, B1, C1, D1 ) \
|
||||
{ \
|
||||
A0 = fBlaMka( A0, B0 ); \
|
||||
A1 = fBlaMka( A1, B1 ); \
|
||||
D0 = v128_xor( D0, A0 ); \
|
||||
D1 = v128_xor( D1, A1 ); \
|
||||
D0 = v128_ror64( D0, 16 ); \
|
||||
D1 = v128_ror64( D1, 16 ); \
|
||||
C0 = fBlaMka( C0, D0 ); \
|
||||
C1 = fBlaMka( C1, D1 ); \
|
||||
B0 = v128_xor( B0, C0 ); \
|
||||
B1 = v128_xor( B1, C1 ); \
|
||||
B0 = v128_ror64( B0, 63 ); \
|
||||
B1 = v128_ror64( B1, 63 ); \
|
||||
}
|
||||
|
||||
#if defined(__SSSE3__) || defined(__ARM_NEON)
|
||||
|
||||
|
@@ -2303,9 +2303,8 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2326,9 +2325,10 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3;
|
||||
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
@@ -2348,8 +2348,7 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[2] = YB2;
|
||||
XA[3] = YA3;
|
||||
XB[3] = YB3;
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
@@ -2357,8 +2356,8 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
|
||||
v128_t *XA = (v128_t*)xa;
|
||||
v128_t *XB = (v128_t*)xb;
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
v128_t t0 = _mm_blend_epi16( XA[0], XA[2], 0xf0 );
|
||||
v128_t t1 = _mm_blend_epi16( XA[0], XA[2], 0x0f );
|
||||
@@ -2377,9 +2376,8 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2389,19 +2387,21 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t1 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t2 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
@@ -2457,9 +2457,7 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = zb[2].m128;
|
||||
XA[3] = za[3].m128;
|
||||
XB[3] = zb[3].m128;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static void salsa8_simd128_2buf( uint32_t * const ba, uint32_t * const bb,
|
||||
@@ -2611,7 +2609,7 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
v128_t *XB = (v128_t*)xb;
|
||||
v128_t *XC = (v128_t*)xc;
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
v128_t t0 = _mm_blend_epi16( XA[0], XA[1], 0xcc );
|
||||
v128_t t1 = _mm_blend_epi16( XA[0], XA[1], 0x33 );
|
||||
@@ -2638,9 +2636,8 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XC[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2650,28 +2647,29 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
v128_t t2 = v128_blendv( XA[2], XA[3], mask_cc );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[2], mask_cc );
|
||||
XA[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XA[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XB[0], XB[1], mask_cc );
|
||||
t1 = v128_blendv( XB[1], XB[0], mask_cc );
|
||||
t2 = v128_blendv( XB[2], XB[3], mask_cc );
|
||||
t3 = v128_blendv( XB[3], XB[2], mask_cc );
|
||||
XB[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XC[0], XC[1], mask_cc );
|
||||
t1 = v128_blendv( XC[1], XC[0], mask_cc );
|
||||
t2 = v128_blendv( XC[2], XC[3], mask_cc );
|
||||
t3 = v128_blendv( XC[3], XC[2], mask_cc );
|
||||
XC[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XC[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XC[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XC[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XC[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XC[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3, YC0, YC1, YC2, YC3;
|
||||
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
@@ -2699,9 +2697,7 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XA[3] = YA3;
|
||||
XB[3] = YB3;
|
||||
XC[3] = YC3;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
@@ -2738,9 +2734,8 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XC[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2750,27 +2745,29 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t1 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t2 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XC[0], XC[2], mask_f0 );
|
||||
t1 = v128_blendv( XC[1], XC[3], mask_3c );
|
||||
t2 = v128_blendv( XC[2], XC[0], mask_f0 );
|
||||
t1 = v128_blendv( XC[2], XC[0], mask_f0 );
|
||||
t2 = v128_blendv( XC[1], XC[3], mask_3c );
|
||||
t3 = v128_blendv( XC[3], XC[1], mask_3c );
|
||||
XC[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XC[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XC[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XC[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XC[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XC[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4], yc[4], zc[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
@@ -2850,9 +2847,7 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XA[3] = za[3].m128;
|
||||
XB[3] = zb[3].m128;
|
||||
XC[3] = zc[3].m128;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
// Triple buffered, 3x memory usage
|
||||
|
@@ -56,10 +56,10 @@ static const uint32_t sha256_initial_state[8] =
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SCRYPT_THROUGHPUT 16
|
||||
#elif defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
#define SCRYPT_THROUGHPUT 2
|
||||
#elif defined(__AVX2__)
|
||||
#define SCRYPT_THROUGHPUT 8
|
||||
#elif defined(__SHA__) // NEON?
|
||||
#define SCRYPT_THROUGHPUT 2
|
||||
#else
|
||||
#define SCRYPT_THROUGHPUT 4
|
||||
#endif
|
||||
@@ -155,7 +155,7 @@ static inline void PBKDF2_SHA256_128_32(uint32_t *tstate, uint32_t *ostate,
|
||||
output[i] = bswap_32( ostate[i] );
|
||||
}
|
||||
|
||||
#if defined(__SHA__)
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
|
||||
static inline void HMAC_SHA256_80_init_SHA_2BUF( const uint32_t *key0,
|
||||
const uint32_t *key1, uint32_t *tstate0, uint32_t *tstate1,
|
||||
@@ -266,6 +266,9 @@ static inline void PBKDF2_SHA256_128_32_SHA_2BUF( uint32_t *tstate0,
|
||||
|
||||
#endif // SHA
|
||||
|
||||
|
||||
|
||||
|
||||
static const uint32_t keypad_4way[4 * 12] = {
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
@@ -1221,10 +1224,10 @@ static int scrypt_N_1_1_256_16way( const uint32_t *input, uint32_t *output,
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
#if ( SCRYPT_THROUGHPUT == 2 ) && ( defined(__SHA__) || defined(__ARM_FEATURE_SHA2) )
|
||||
|
||||
static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
uint32_t *midstate, int N, int thrid )
|
||||
static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, int N, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) tstate[ 2*8 ];
|
||||
uint32_t _ALIGN(128) ostate[ 2*8 ];
|
||||
@@ -1241,13 +1244,13 @@ static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
scrypt_core_simd128_2buf( W, scratchbuf, N );
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32_SHA_2BUF( tstate, tstate+8, ostate, ostate+8, W, W+32,
|
||||
output, output+8 );
|
||||
PBKDF2_SHA256_128_32_SHA_2BUF( tstate, tstate+8, ostate,
|
||||
ostate+8, W, W+32, output, output+8 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // THROUGHPUT = 2 && SHA
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 4 )
|
||||
|
||||
@@ -1267,13 +1270,10 @@ static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
|
||||
HMAC_SHA256_80_init( input, tstate, ostate );
|
||||
PBKDF2_SHA256_80_128( tstate, ostate, input, W );
|
||||
|
||||
HMAC_SHA256_80_init( input +20, tstate+ 8, ostate+ 8 );
|
||||
PBKDF2_SHA256_80_128( tstate+ 8, ostate+ 8, input +20, W+32 );
|
||||
|
||||
HMAC_SHA256_80_init( input +40, tstate+16, ostate+16 );
|
||||
PBKDF2_SHA256_80_128( tstate+16, ostate+16, input +40, W+64 );
|
||||
|
||||
HMAC_SHA256_80_init( input +60, tstate+24, ostate+24 );
|
||||
PBKDF2_SHA256_80_128( tstate+24, ostate+24, input +60, W+96 );
|
||||
|
||||
@@ -1303,11 +1303,8 @@ static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate, ostate, W, output );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+ 8, ostate+ 8, W+32, output+ 8 );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+16, ostate+16, W+64, output+16 );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+24, ostate+24, W+96, output+24 );
|
||||
|
||||
return 1;
|
||||
@@ -1418,14 +1415,14 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
||||
rc = scrypt_N_1_1_256_8way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#elif ( SCRYPT_THROUGHPUT == 4 )
|
||||
#if defined(__SHA__)
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
rc = scrypt_N_1_1_256_4way_sha( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
rc = scrypt_N_1_1_256_4way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#endif
|
||||
#elif ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
#elif ( SCRYPT_THROUGHPUT == 2 ) && ( defined(__SHA__) || defined(__ARM_FEATURE_SHA2) )
|
||||
rc = scrypt_N_1_1_256_sha_2buf( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
@@ -1472,10 +1469,10 @@ bool scrypt_miner_thread_init( int thr_id )
|
||||
|
||||
bool register_scrypt_algo( algo_gate_t* gate )
|
||||
{
|
||||
#if defined(__SHA__)
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT | NEON_OPT;
|
||||
#else
|
||||
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX_OPT | AVX2_OPT | AVX512_OPT;
|
||||
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
|
||||
#endif
|
||||
gate->miner_thread_init =(void*)&scrypt_miner_thread_init;
|
||||
gate->scanhash = (void*)&scanhash_scrypt;
|
||||
@@ -1492,15 +1489,15 @@ bool register_scrypt_algo( algo_gate_t* gate )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 4 * 128; // 4 way
|
||||
#elif defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
// scrypt_throughput = 2;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
#elif defined(__AVX2__)
|
||||
// scrypt_throughput = 8;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 way
|
||||
#elif defined(__SHA__)
|
||||
// scrypt_throughput = 4;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
#else
|
||||
// scrypt_throughput = 4;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
|
@@ -54,29 +54,29 @@ static const uint32_t K256[64] =
|
||||
v128_xor( v128_xor( \
|
||||
v128_ror32(x, 17), v128_ror32(x, 19) ), v128_sr32(x, 10) )
|
||||
|
||||
#define SHA2s_MEXP( a, b, c, d ) \
|
||||
#define SHA256_4X32_MEXP( a, b, c, d ) \
|
||||
v128_add4_32( SSG2_1( a ), b, SSG2_0( c ), d );
|
||||
|
||||
#define SHA256x4_MSG_EXPANSION( W ) \
|
||||
W[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] ); \
|
||||
W[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] ); \
|
||||
W[ 2] = SHA2s_MEXP( W[ 0], W[11], W[ 3], W[ 2] ); \
|
||||
W[ 3] = SHA2s_MEXP( W[ 1], W[12], W[ 4], W[ 3] ); \
|
||||
W[ 4] = SHA2s_MEXP( W[ 2], W[13], W[ 5], W[ 4] ); \
|
||||
W[ 5] = SHA2s_MEXP( W[ 3], W[14], W[ 6], W[ 5] ); \
|
||||
W[ 6] = SHA2s_MEXP( W[ 4], W[15], W[ 7], W[ 6] ); \
|
||||
W[ 7] = SHA2s_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] ); \
|
||||
W[ 8] = SHA2s_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] ); \
|
||||
W[ 9] = SHA2s_MEXP( W[ 7], W[ 2], W[10], W[ 9] ); \
|
||||
W[10] = SHA2s_MEXP( W[ 8], W[ 3], W[11], W[10] ); \
|
||||
W[11] = SHA2s_MEXP( W[ 9], W[ 4], W[12], W[11] ); \
|
||||
W[12] = SHA2s_MEXP( W[10], W[ 5], W[13], W[12] ); \
|
||||
W[13] = SHA2s_MEXP( W[11], W[ 6], W[14], W[13] ); \
|
||||
W[14] = SHA2s_MEXP( W[12], W[ 7], W[15], W[14] ); \
|
||||
W[15] = SHA2s_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
#define SHA256_4X32_MSG_EXPANSION( W ) \
|
||||
W[ 0] = SHA256_4X32_MEXP( W[14], W[ 9], W[ 1], W[ 0] ); \
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] ); \
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] ); \
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] ); \
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] ); \
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] ); \
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] ); \
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] ); \
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] ); \
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] ); \
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] ); \
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] ); \
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] ); \
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] ); \
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] ); \
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
|
||||
do { \
|
||||
#define SHA256_4X32_ROUND(A, B, C, D, E, F, G, H, i, j) \
|
||||
{ \
|
||||
v128_t T1, T2; \
|
||||
v128_t K = v128_32( K256[( (j)+(i) )] ); \
|
||||
T1 = v128_add32( H, v128_add4_32( BSG2_1(E), CHs(E, F, G), \
|
||||
@@ -85,31 +85,41 @@ do { \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = v128_add32( D, T1 ); \
|
||||
H = v128_add32( T1, T2 ); \
|
||||
} while (0)
|
||||
}
|
||||
|
||||
#define SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, j ) \
|
||||
#define SHA256_4X32_ROUND_NOMSG( A, B, C, D, E, F, G, H, i, j ) \
|
||||
{ \
|
||||
v128_t T1 = v128_add4_32( H, BSG2_1(E), CHs(E, F, G), \
|
||||
v128_32( K256[(i)+(j)] ) ); \
|
||||
v128_t T2 = v128_add32( BSG2_0(A), MAJs(A, B, C) ); \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = v128_add32( D, T1 ); \
|
||||
H = v128_add32( T1, T2 ); \
|
||||
}
|
||||
|
||||
#define SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, j ) \
|
||||
{ \
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C ); \
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, j ); \
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, j ); \
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, j ); \
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, j ); \
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, j ); \
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, j ); \
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, j ); \
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, j ); \
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, j ); \
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, j ); \
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, j ); \
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, j ); \
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, j ); \
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, j ); \
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, j ); \
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, j ); \
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, j ); \
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, j ); \
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, j ); \
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 3, j ); \
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, j ); \
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 5, j ); \
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 6, j ); \
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 7, j ); \
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 8, j ); \
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 9, j ); \
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 10, j ); \
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 11, j ); \
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 12, j ); \
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 13, j ); \
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 14, j ); \
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, j ); \
|
||||
}
|
||||
|
||||
// LE data, no need to byte swap
|
||||
static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
static inline void SHA256_4X32_TRANSFORM( v128_t *out, v128_t *W,
|
||||
const v128_t *in )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
@@ -123,13 +133,13 @@ static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
G = in[6];
|
||||
H = in[7];
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
|
||||
out[0] = v128_add32( in[0], A );
|
||||
out[1] = v128_add32( in[1], B );
|
||||
@@ -142,47 +152,37 @@ static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
}
|
||||
|
||||
// LE data, no need to byte swap
|
||||
void sha256_4way_transform_le( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_transform_le( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in )
|
||||
{
|
||||
v128_t W[16];
|
||||
v128_memcpy( W, data, 16 );
|
||||
SHA256_4WAY_TRANSFORM( state_out, W, state_in );
|
||||
SHA256_4X32_TRANSFORM( state_out, W, state_in );
|
||||
}
|
||||
|
||||
// BE data, need to byte swap input data
|
||||
void sha256_4way_transform_be( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_transform_be( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in )
|
||||
{
|
||||
v128_t W[16];
|
||||
v128_block_bswap32( W, data );
|
||||
v128_block_bswap32( W+8, data+8 );
|
||||
SHA256_4WAY_TRANSFORM( state_out, W, state_in );
|
||||
SHA256_4X32_TRANSFORM( state_out, W, state_in );
|
||||
}
|
||||
|
||||
// prehash_3rounds & final_rounds are not working
|
||||
void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
const v128_t *W, const v128_t *state_in )
|
||||
void sha256_4x32_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
const v128_t *W, const v128_t *state_in )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
v128_t A, B, C, D, E, F, G, H, T1;
|
||||
|
||||
// precalculate constant part msg expansion for second iteration.
|
||||
X[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
X[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
X[ 2] = v128_add32( v128_add32( SSG2_1( X[ 0] ), W[11] ), W[ 2] );
|
||||
X[ 3] = v128_add32( v128_add32( SSG2_1( X[ 1] ), W[12] ), SSG2_0( W[ 4] ) );
|
||||
X[ 4] = v128_add32( v128_add32( W[13], SSG2_0( W[ 5] ) ), W[ 4] );
|
||||
X[ 5] = v128_add32( v128_add32( W[14], SSG2_0( W[ 6] ) ), W[ 5] );
|
||||
X[ 6] = v128_add32( v128_add32( W[15], SSG2_0( W[ 7] ) ), W[ 6] );
|
||||
X[ 7] = v128_add32( v128_add32( X[ 0], SSG2_0( W[ 8] ) ), W[ 7] );
|
||||
X[ 8] = v128_add32( v128_add32( X[ 1], SSG2_0( W[ 9] ) ), W[ 8] );
|
||||
X[ 9] = v128_add32( SSG2_0( W[10] ), W[ 9] );
|
||||
X[10] = v128_add32( SSG2_0( W[11] ), W[10] );
|
||||
X[11] = v128_add32( SSG2_0( W[12] ), W[11] );
|
||||
X[12] = v128_add32( SSG2_0( W[13] ), W[12] );
|
||||
X[13] = v128_add32( SSG2_0( W[14] ), W[13] );
|
||||
X[14] = v128_add32( SSG2_0( W[15] ), W[14] );
|
||||
X[15] = v128_add32( SSG2_0( X[ 0] ), W[15] );
|
||||
X[ 0] = v128_add32( SSG2_0( W[ 1] ), W[ 0] );
|
||||
X[ 1] = v128_add32( v128_add32( SSG2_1( W[15] ), SSG2_0( W[ 2] ) ), W[ 1] );
|
||||
X[ 2] = v128_add32( SSG2_1( X[ 0] ), W[ 2] );
|
||||
X[ 3] = v128_add32( SSG2_1( X[ 1] ), SSG2_0( W[ 4] ) );
|
||||
X[ 4] = SSG2_0( W[15] );
|
||||
X[ 5] = v128_add32( SSG2_0( X[ 0] ), W[15] );
|
||||
// W[0] for round 32
|
||||
X[ 6] = v128_add32( SSG2_0( X[ 1] ), X[ 0] );
|
||||
|
||||
A = v128_load( state_in );
|
||||
B = v128_load( state_in + 1 );
|
||||
@@ -194,11 +194,16 @@ void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
H = v128_load( state_in + 7 );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
|
||||
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
|
||||
// round 3 part 1, avoid nonces W[3]
|
||||
T1 = v128_add4_32( E, BSG2_1(B), CHs(B, C, D), v128_32( K256[3] ) );
|
||||
A = v128_add32( A, T1 );
|
||||
E = v128_add32( T1, v128_add32( BSG2_0(F), MAJs(F, G, H) ) );
|
||||
|
||||
v128_store( state_mid , A );
|
||||
v128_store( state_mid + 1, B );
|
||||
v128_store( state_mid + 2, C );
|
||||
@@ -209,7 +214,7 @@ void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
v128_store( state_mid + 7, H );
|
||||
}
|
||||
|
||||
void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in, const v128_t *state_mid, const v128_t *X )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
@@ -226,45 +231,64 @@ void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
G = v128_load( state_mid + 6 );
|
||||
H = v128_load( state_mid + 7 );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( G, H );
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( F, G );
|
||||
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
// round 3 part 2, add nonces
|
||||
A = v128_add32( A, W[3] );
|
||||
E = v128_add32( E, W[3] );
|
||||
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
|
||||
// update precalculated msg expansion with new nonce: W[3].
|
||||
W[ 0] = X[ 0];
|
||||
W[ 1] = X[ 1];
|
||||
W[ 2] = v128_add32( X[ 2], SSG2_0( W[ 3] ) );
|
||||
W[ 3] = v128_add32( X[ 3], W[ 3] );
|
||||
W[ 4] = v128_add32( X[ 4], SSG2_1( W[ 2] ) );
|
||||
W[ 5] = v128_add32( X[ 5], SSG2_1( W[ 3] ) );
|
||||
W[ 6] = v128_add32( X[ 6], SSG2_1( W[ 4] ) );
|
||||
W[ 7] = v128_add32( X[ 7], SSG2_1( W[ 5] ) );
|
||||
W[ 8] = v128_add32( X[ 8], SSG2_1( W[ 6] ) );
|
||||
W[ 9] = v128_add32( X[ 9], v128_add32( SSG2_1( W[ 7] ), W[ 2] ) );
|
||||
W[10] = v128_add32( X[10], v128_add32( SSG2_1( W[ 8] ), W[ 3] ) );
|
||||
W[11] = v128_add32( X[11], v128_add32( SSG2_1( W[ 9] ), W[ 4] ) );
|
||||
W[12] = v128_add32( X[12], v128_add32( SSG2_1( W[10] ), W[ 5] ) );
|
||||
W[13] = v128_add32( X[13], v128_add32( SSG2_1( W[11] ), W[ 6] ) );
|
||||
W[14] = v128_add32( X[14], v128_add32( SSG2_1( W[12] ), W[ 7] ) );
|
||||
W[15] = v128_add32( X[15], v128_add32( SSG2_1( W[13] ), W[ 8] ) );
|
||||
W[ 4] = v128_add32( W[ 4], SSG2_1( W[ 2] ) );
|
||||
W[ 5] = SSG2_1( W[ 3] );
|
||||
W[ 6] = v128_add32( W[15], SSG2_1( W[ 4] ) );
|
||||
W[ 7] = v128_add32( X[ 0], SSG2_1( W[ 5] ) );
|
||||
W[ 8] = v128_add32( X[ 1], SSG2_1( W[ 6] ) );
|
||||
W[ 9] = v128_add32( SSG2_1( W[ 7] ), W[ 2] );
|
||||
W[10] = v128_add32( SSG2_1( W[ 8] ), W[ 3] );
|
||||
W[11] = v128_add32( SSG2_1( W[ 9] ), W[ 4] );
|
||||
W[12] = v128_add32( SSG2_1( W[10] ), W[ 5] );
|
||||
W[13] = v128_add32( SSG2_1( W[11] ), W[ 6] );
|
||||
W[14] = v128_add32( X[ 4], v128_add32( SSG2_1( W[12] ), W[ 7] ) );
|
||||
W[15] = v128_add32( X[ 5], v128_add32( SSG2_1( W[13] ), W[ 8] ) );
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
|
||||
W[ 0] = v128_add32( X[ 6], v128_add32( SSG2_1( W[14] ), W[ 9] ) );
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
|
||||
A = v128_add32( A, v128_load( state_in ) );
|
||||
B = v128_add32( B, v128_load( state_in + 1 ) );
|
||||
@@ -285,10 +309,11 @@ void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
v128_store( state_out + 7, H );
|
||||
}
|
||||
|
||||
|
||||
# if 0
|
||||
|
||||
// Working correctly but still slower
|
||||
int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
int sha256_4x32_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in, const uint32_t *target )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H, T0, T1, T2;
|
||||
@@ -308,38 +333,38 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
const v128_t IV7 = H;
|
||||
const v128_t IV6 = G;
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
|
||||
W[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
W[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA2s_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA2s_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA2s_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA2s_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA2s_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA2s_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA2s_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA2s_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA2s_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA2s_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA2s_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
W[ 0] = SHA256_4X32_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 48 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 48 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 48 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 48 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 48 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 48 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 48 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 48 );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 48 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 48 );
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, 48 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, 48 );
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, 48 );
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 3, 48 );
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, 48 );
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 5, 48 );
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 6, 48 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 7, 48 );
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 8, 48 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 9, 48 );
|
||||
|
||||
T0 = v128_add32( v128_32( K256[58] ),
|
||||
v128_add4_32( BSG2_1( C ), CHs( C, D, E ), W[10], F ) );
|
||||
@@ -368,7 +393,7 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
F = v128_add32( T0, v128_add32( BSG2_0( G ), MAJs( G, H, A ) ) );
|
||||
|
||||
// round 61 part 1
|
||||
W[13] = SHA2s_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
T0 = v128_add32( v128_32( K256[61] ),
|
||||
v128_add4_32( BSG2_1( H ), CHs( H, A, B ), W[13], C ) );
|
||||
G = v128_add32( G, T0 );
|
||||
@@ -401,11 +426,11 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
C = v128_add32( T0, v128_add32( BSG2_0( D ), MAJs( D, E, F ) ) );
|
||||
|
||||
// rounds 62 & 63
|
||||
W[14] = SHA2s_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA2s_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 48 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 48 );
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 14, 48 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, 48 );
|
||||
|
||||
state_out[0] = v128_add32( state_in[0], A );
|
||||
state_out[1] = v128_add32( state_in[1], B );
|
||||
@@ -420,7 +445,7 @@ return 1;
|
||||
|
||||
#endif
|
||||
|
||||
void sha256_4way_init( sha256_4way_context *sc )
|
||||
void sha256_4x32_init( sha256_4x32_context *sc )
|
||||
{
|
||||
sc->count_high = sc->count_low = 0;
|
||||
sc->val[0] = v128_32( sha256_iv[0] );
|
||||
@@ -433,7 +458,7 @@ void sha256_4way_init( sha256_4way_context *sc )
|
||||
sc->val[7] = v128_32( sha256_iv[7] );
|
||||
}
|
||||
|
||||
void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
void sha256_4x32_update( sha256_4x32_context *sc, const void *data, size_t len )
|
||||
{
|
||||
v128_t *vdata = (v128_t*)data;
|
||||
size_t ptr;
|
||||
@@ -454,7 +479,7 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
len -= clen;
|
||||
if ( ptr == buf_size )
|
||||
{
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
ptr = 0;
|
||||
}
|
||||
clow = sc->count_low;
|
||||
@@ -465,7 +490,7 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
}
|
||||
}
|
||||
|
||||
void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
void sha256_4x32_close( sha256_4x32_context *sc, void *dst )
|
||||
{
|
||||
unsigned ptr;
|
||||
uint32_t low, high;
|
||||
@@ -479,7 +504,7 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
if ( ptr > pad )
|
||||
{
|
||||
v128_memset_zero( sc->buf + (ptr>>2), (buf_size - ptr) >> 2 );
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
v128_memset_zero( sc->buf, pad >> 2 );
|
||||
}
|
||||
else
|
||||
@@ -491,17 +516,17 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
|
||||
sc->buf[ pad >> 2 ] = v128_32( bswap_32( high ) );
|
||||
sc->buf[( pad+4 ) >> 2 ] = v128_32( bswap_32( low ) );
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
|
||||
v128_block_bswap32( dst, sc->val );
|
||||
}
|
||||
|
||||
void sha256_4way_full( void *dst, const void *data, size_t len )
|
||||
void sha256_4x32_full( void *dst, const void *data, size_t len )
|
||||
{
|
||||
sha256_4way_context ctx;
|
||||
sha256_4way_init( &ctx );
|
||||
sha256_4way_update( &ctx, data, len );
|
||||
sha256_4way_close( &ctx, dst );
|
||||
sha256_4x32_context ctx;
|
||||
sha256_4x32_init( &ctx );
|
||||
sha256_4x32_update( &ctx, data, len );
|
||||
sha256_4x32_close( &ctx, dst );
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
@@ -97,6 +97,14 @@ void sha256_neon_x2sha_final_rounds( uint32_t *state_out_X,
|
||||
#define sha256_prehash_3rounds sha256_neon_sha_prehash_3rounds
|
||||
#define sha256_2x_final_rounds sha256_neon_x2sha_final_rounds
|
||||
|
||||
// generic API
|
||||
#define sha256_transform_le sha256_neon_sha_transform_le
|
||||
#define sha256_transform_be sha256_neon_sha_transform_be
|
||||
#define sha256_2x_transform_le sha256_neon_x2sha_transform_le
|
||||
#define sha256_2x_transform_be sha256_neon_x2sha_transform_be
|
||||
#define sha256_prehash_3rounds sha256_neon_sha_prehash_3rounds
|
||||
#define sha256_2x_final_rounds sha256_neon_x2sha_final_rounds
|
||||
|
||||
#else
|
||||
// without HW acceleration...
|
||||
#include "sph_sha2.h"
|
||||
|
@@ -360,15 +360,17 @@ int scanhash_sha256d_8way( struct work *work, const uint32_t max_nonce,
|
||||
|
||||
#if defined(SHA256D_4WAY)
|
||||
|
||||
int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256d_4x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t istate[8] __attribute__ ((aligned (32)));
|
||||
v128_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
@@ -376,17 +378,16 @@ int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
memset( block, 0, 16*4*4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
vdata[16+15] = v128_32( 80*8 );
|
||||
@@ -396,36 +397,39 @@ int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
block[15] = v128_32( 32*8 );
|
||||
|
||||
// initialize state
|
||||
istate[0] = v128_32( sha256_iv[0] );
|
||||
istate[1] = v128_32( sha256_iv[1] );
|
||||
istate[2] = v128_32( sha256_iv[2] );
|
||||
istate[3] = v128_32( sha256_iv[3] );
|
||||
istate[4] = v128_32( sha256_iv[4] );
|
||||
istate[5] = v128_32( sha256_iv[5] );
|
||||
istate[6] = v128_32( sha256_iv[6] );
|
||||
istate[7] = v128_32( sha256_iv[7] );
|
||||
iv[0] = v128_32( sha256_iv[0] );
|
||||
iv[1] = v128_32( sha256_iv[1] );
|
||||
iv[2] = v128_32( sha256_iv[2] );
|
||||
iv[3] = v128_32( sha256_iv[3] );
|
||||
iv[4] = v128_32( sha256_iv[4] );
|
||||
iv[5] = v128_32( sha256_iv[5] );
|
||||
iv[6] = v128_32( sha256_iv[6] );
|
||||
iv[7] = v128_32( sha256_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform_le( mstate, vdata, istate );
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
sha256_4way_transform_le( block, vdata+16, mstate );
|
||||
sha256_4way_transform_le( hash32, block, istate );
|
||||
|
||||
v128_block_bswap32( hash32, hash32 );
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
// sha256_4x32_transform_le( block, vdata+16, mhash1 );
|
||||
sha256_4x32_transform_le( hash32, block, iv );
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
extr_lane_4x32( lhash, hash32, lane, 256 );
|
||||
casti_v128( lhash, 0 ) = v128_bswap32( casti_v128( lhash, 0 ) );
|
||||
casti_v128( lhash, 1 ) = v128_bswap32( casti_v128( lhash, 1 ) );
|
||||
if ( likely( valid_hash( lhash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lhash, mythr );
|
||||
}
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
|
@@ -7,15 +7,15 @@
|
||||
#include "sph_sha2.h"
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SHA256DT_16X64 1
|
||||
#define SHA256DT_16X32 1
|
||||
#elif defined(__x86_64__) && defined(__SHA__)
|
||||
#define SHA256DT_X86_SHA256 1
|
||||
#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_SHA2)
|
||||
#define SHA256DT_NEON_SHA256 1
|
||||
#elif defined(__AVX2__)
|
||||
#define SHA256DT_8X64 1
|
||||
#define SHA256DT_8X32 1
|
||||
#elif defined (__SSE2__) || defined(__ARM_NEON)
|
||||
#define SHA256DT_4X64 1
|
||||
#define SHA256DT_4X32 1
|
||||
#endif
|
||||
// else ref, should never happen
|
||||
|
||||
@@ -183,9 +183,9 @@ int scanhash_sha256dt_neon_x2sha( struct work *work, uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_16X64)
|
||||
#elif defined(SHA256DT_16X32)
|
||||
|
||||
int scanhash_sha256dt_16x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_16x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m512i block[16] __attribute__ ((aligned (128)));
|
||||
@@ -275,9 +275,9 @@ int scanhash_sha256dt_16x64( struct work *work, const uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_8X64)
|
||||
#elif defined(SHA256DT_8X32)
|
||||
|
||||
int scanhash_sha256dt_8x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_8x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m256i vdata[32] __attribute__ ((aligned (64)));
|
||||
@@ -355,16 +355,18 @@ int scanhash_sha256dt_8x64( struct work *work, const uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_4X64)
|
||||
#elif defined(SHA256DT_4X32)
|
||||
|
||||
int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_4x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash[8] __attribute__ ((aligned (32)));
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
@@ -373,26 +375,24 @@ int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
memset( block, 0, 16*4*4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
v128_memset_zero( vdata+16 + 5, 9 );
|
||||
vdata[16+15] = v128_32( 0x480 );
|
||||
|
||||
block[ 8] = last_byte;
|
||||
v128_memset_zero( block + 9, 6 );
|
||||
v128_memset_zero( block + 9, 5 );
|
||||
block[15] = v128_32( 0x300 );
|
||||
|
||||
// initialize state
|
||||
iv[0] = v128_32( sha256dt_iv[0] );
|
||||
iv[1] = v128_32( sha256dt_iv[1] );
|
||||
iv[2] = v128_32( sha256dt_iv[2] );
|
||||
@@ -402,62 +402,15 @@ int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
iv[6] = v128_32( sha256dt_iv[6] );
|
||||
iv[7] = v128_32( sha256dt_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4x32_transform_le( mhash, vdata, iv );
|
||||
|
||||
/*
|
||||
uint32_t m1 [8] __attribute__ ((aligned (32)));
|
||||
uint32_t h1 [8] __attribute__ ((aligned (32)));
|
||||
uint32_t b1 [16] __attribute__ ((aligned (32)));
|
||||
uint32_t e16 [16] __attribute__ ((aligned (32)));
|
||||
uint32_t *m4 = (uint32_t*)&midstate;
|
||||
uint32_t *h4 = (uint32_t*)hash32;
|
||||
|
||||
sha256_transform_le( m1, pdata, sha256dt_iv );
|
||||
|
||||
memcpy( e16, pdata + 16, 12 );
|
||||
e16[3] = n;
|
||||
e16[4] = 0x80000000;
|
||||
memset( &e16[5], 0, 40 );
|
||||
e16[15] = 0x480; // funky bit count
|
||||
|
||||
b1[8] = 0x80000000;
|
||||
memset( &b1[9], 0, 24 );
|
||||
b1[9] = b1[10] = b1[11] = b1[12] = b1[13] = b1[14] = 0;
|
||||
b1[15] = 0x300; // bit count
|
||||
*/
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
sha256_4x32_transform_le( block, vdata+16, mhash );
|
||||
|
||||
//sha256_transform_le( b1, e16, m1 );
|
||||
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
// sha256_4x32_transform_le( block, vdata+16, mhash1 );
|
||||
sha256_4x32_transform_le( hash32, block, iv );
|
||||
|
||||
/*
|
||||
sha256_transform_le( h1, b1, sha256dt_iv );
|
||||
|
||||
printf("final hash1: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h1[0],h1[1],h1[2],h1[3],h1[4],h1[5],h1[6],h1[7]);
|
||||
printf("final hash4: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h4[0],h4[4],h4[8],h4[12],h4[16],h4[20],h4[24],h4[28]);
|
||||
|
||||
casti_v128( h1,0 ) = v128_bswap32( casti_v128( h1,0 ) );
|
||||
casti_v128( h1,1 ) = v128_bswap32( casti_v128( h1,1 ) );
|
||||
*/
|
||||
|
||||
// v128_block_bswap32( hash32, hash32 );
|
||||
|
||||
/*
|
||||
printf("bswap hash1: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h1[0],h1[1],h1[2],h1[3],h1[4],h1[5],h1[6],h1[7]);
|
||||
printf("bswap hash4: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h4[0],h4[4],h4[8],h4[12],h4[16],h4[20],h4[24],h4[28]);
|
||||
|
||||
exit(0);
|
||||
*/
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
{
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
@@ -472,7 +425,7 @@ exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
@@ -485,10 +438,10 @@ exit(0);
|
||||
int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t block1a[16] __attribute__ ((aligned (32)));
|
||||
uint32_t block2a[16] __attribute__ ((aligned (32)));
|
||||
uint32_t hasha[8] __attribute__ ((aligned (32)));
|
||||
uint32_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t block1[16] __attribute__ ((aligned (32)));
|
||||
uint32_t block2[16] __attribute__ ((aligned (32)));
|
||||
uint32_t hash32[8] __attribute__ ((aligned (32)));
|
||||
uint32_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
@@ -497,37 +450,40 @@ int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
|
||||
memset( block1, 0, 64 );
|
||||
memset( block2, 0, 64 );
|
||||
|
||||
// hash first 64 byte block of data
|
||||
sha256_transform_le( mstate, pdata, sha256dt_iv );
|
||||
|
||||
// fill & pad second bock without nonce
|
||||
memcpy( block1a, pdata + 16, 12 );
|
||||
block1a[ 3] = 0;
|
||||
block1a[ 4] = 0x80000000;
|
||||
memset( block1a + 5, 0, 40 );
|
||||
block1a[15] = 0x480; // funky bit count
|
||||
memcpy( block1, pdata + 16, 12 );
|
||||
block1[ 3] = n;
|
||||
block1[ 4] = 0x80000000;
|
||||
memset( block1 + 5, 0, 40 );
|
||||
block1[15] = 0x480; // funky bit count
|
||||
|
||||
// Pad third block
|
||||
block2a[ 8] = 0x80000000;
|
||||
memset( block2a + 9, 0, 24 );
|
||||
block2a[15] = 0x300; // bit count
|
||||
block2[ 8] = 0x80000000;
|
||||
memset( block2 + 9, 0, 24 );
|
||||
block2[15] = 0x300; // bit count
|
||||
|
||||
do
|
||||
{
|
||||
// Insert nonce for second block
|
||||
block1a[3] = n;
|
||||
sha256_transform_le( block2a, block1a, mstate );
|
||||
block1[3] = n;
|
||||
sha256_transform_le( block2, block1, mstate );
|
||||
|
||||
sha256_transform_le( hasha, block2a, sha256dt_iv );
|
||||
sha256_transform_le( hash32, block2, sha256dt_iv );
|
||||
|
||||
if ( unlikely( bswap_32( hasha[7] ) <= ptarget[7] ) )
|
||||
if ( unlikely( bswap_32( hash32[7] ) <= ptarget[7] ) )
|
||||
{
|
||||
casti_v128( hasha, 0 ) = v128_bswap32( casti_v128( hasha, 0 ) );
|
||||
casti_v128( hasha, 1 ) = v128_bswap32( casti_v128( hasha, 1 ) );
|
||||
if ( likely( valid_hash( hasha, ptarget ) && !bench ) )
|
||||
casti_v128( hash32, 0 ) = v128_bswap32( casti_v128( hash32, 0 ) );
|
||||
casti_v128( hash32, 1 ) = v128_bswap32( casti_v128( hash32, 1 ) );
|
||||
if ( likely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n;
|
||||
submit_solution( work, hasha, mythr );
|
||||
submit_solution( work, hash32, mythr );
|
||||
}
|
||||
}
|
||||
n += 1;
|
||||
@@ -543,18 +499,18 @@ int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
bool register_sha256dt_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
|
||||
#if defined(SHA256DT_16X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_16x64;
|
||||
#if defined(SHA256DT_16X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_16x32;
|
||||
#elif defined(SHA256DT_X86_SHA256)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_x86_x2sha;
|
||||
#elif defined(SHA256DT_NEON_SHA256)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_neon_x2sha;
|
||||
#elif defined(SHA256DT_8X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_8x64;
|
||||
#elif defined(SHA256DT_4X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_4x64;
|
||||
#elif defined(SHA256DT_8X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_8x32;
|
||||
#elif defined(SHA256DT_4X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_4x32;
|
||||
#else
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_ref;
|
||||
#endif
|
||||
|
@@ -372,14 +372,14 @@ int scanhash_sha256t_8way( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t istate[8] __attribute__ ((aligned (32)));
|
||||
v128_t mstate[8] __attribute__ ((aligned (32)));
|
||||
// v128_t mstate2[8] __attribute__ ((aligned (32)));
|
||||
// v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
@@ -387,62 +387,59 @@ int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
memset( block, 0, 16*4*4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
vdata[16+15] = v128_32( 80*8 ); // bit count
|
||||
vdata[16+15] = v128_32( 80*8 );
|
||||
|
||||
block[ 8] = last_byte;
|
||||
v128_memset_zero( block + 9, 6 );
|
||||
block[15] = v128_32( 32*8 ); // bit count
|
||||
|
||||
block[15] = v128_32( 32*8 );
|
||||
|
||||
// initialize state
|
||||
istate[0] = v128_32( sha256_iv[0] );
|
||||
istate[1] = v128_32( sha256_iv[1] );
|
||||
istate[2] = v128_32( sha256_iv[2] );
|
||||
istate[3] = v128_32( sha256_iv[3] );
|
||||
istate[4] = v128_32( sha256_iv[4] );
|
||||
istate[5] = v128_32( sha256_iv[5] );
|
||||
istate[6] = v128_32( sha256_iv[6] );
|
||||
istate[7] = v128_32( sha256_iv[7] );
|
||||
iv[0] = v128_32( sha256_iv[0] );
|
||||
iv[1] = v128_32( sha256_iv[1] );
|
||||
iv[2] = v128_32( sha256_iv[2] );
|
||||
iv[3] = v128_32( sha256_iv[3] );
|
||||
iv[4] = v128_32( sha256_iv[4] );
|
||||
iv[5] = v128_32( sha256_iv[5] );
|
||||
iv[6] = v128_32( sha256_iv[6] );
|
||||
iv[7] = v128_32( sha256_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform_le( mstate, vdata, istate );
|
||||
|
||||
// sha256_4way_prehash_3rounds( mstate2, mexp_pre, vdata + 16, mstate1 );
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
// sha256_4way_final_rounds( block, vdata+16, mstate1, mstate2,
|
||||
// mexp_pre );
|
||||
|
||||
sha256_4way_transform_le( block, vdata+16, mstate );
|
||||
sha256_4way_transform_le( block, block, istate );
|
||||
sha256_4way_transform_le( hash32, block, istate );
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
sha256_4way_transform_le( block, block, iv );
|
||||
sha256_4way_transform_le( hash32, block, iv );
|
||||
|
||||
v128_block_bswap32( hash32, hash32 );
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
{
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
extr_lane_4x32( lhash, hash32, lane, 256 );
|
||||
casti_v128( lhash, 0 ) = v128_bswap32( casti_v128( lhash, 0 ) );
|
||||
casti_v128( lhash, 1 ) = v128_bswap32( casti_v128( lhash, 1 ) );
|
||||
if ( likely( valid_hash( lhash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
submit_solution( work, lhash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
n += 4;
|
||||
}
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
|
@@ -873,20 +873,20 @@ void sha512_4x64_ctx( sha512_4x64_context *sc, void *dst, const void *data,
|
||||
// SHA512 2 way 64 SSE2 or NEON
|
||||
|
||||
#define BSG5_0_2x64( x ) v128_xor3( v128_ror64( x, 28 ), \
|
||||
v128_ror64( x, 34 ), \
|
||||
v128_ror64( x, 39 ) )
|
||||
v128_ror64( x, 34 ), \
|
||||
v128_ror64( x, 39 ) )
|
||||
|
||||
#define BSG5_1_2x64( x ) v128_xor3( v128_ror64( x, 14 ), \
|
||||
v128_ror64( x, 18 ), \
|
||||
v128_ror64( x, 41 ) )
|
||||
v128_ror64( x, 18 ), \
|
||||
v128_ror64( x, 41 ) )
|
||||
|
||||
#define SSG5_0_2x64( x ) v128_xor3( v128_ror64( x, 1 ), \
|
||||
v128_ror64( x, 8 ), \
|
||||
v128_sr64( x, 7 ) )
|
||||
v128_ror64( x, 8 ), \
|
||||
v128_sr64( x, 7 ) )
|
||||
|
||||
#define SSG5_1_2x64( x ) v128_xor3( v128_ror64( x, 19 ), \
|
||||
v128_ror64( x, 61 ), \
|
||||
v128_sr64( x, 6 ) )
|
||||
v128_ror64( x, 61 ), \
|
||||
v128_sr64( x, 6 ) )
|
||||
|
||||
#define CH_2x64(X, Y, Z) \
|
||||
v128_xor( v128_and( v128_xor( Y, Z ), X ), Z )
|
||||
|
@@ -53,7 +53,6 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "algo/sha/hmac-sha256-hash.h"
|
||||
#include "algo/sha/hmac-sha256-hash-4way.h"
|
||||
#include "yespower.h"
|
||||
#include "yespower-platform.c"
|
||||
|
||||
|
Reference in New Issue
Block a user