mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v23.5
This commit is contained in:
@@ -46,7 +46,7 @@
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
#if defined(NOASM) || defined(__arm__)
|
||||
#if defined(NOASM) || defined(__arm__) || defined(__aarch64__)
|
||||
#define ASM 0
|
||||
#else
|
||||
#define ASM 1
|
||||
|
@@ -919,14 +919,14 @@ void scrypt_core_simd128_4way( v128_t *X, v128_t *V, const uint32_t N )
|
||||
x16[1] = 32 * ( x16[1] & ( N-1) );
|
||||
x16[2] = 32 * ( x16[2] & ( N-1) );
|
||||
x16[3] = 32 * ( x16[3] & ( N-1) );
|
||||
m128_ovly *v = (m128_ovly*)V;
|
||||
v128_ovly *v = (v128_ovly*)V;
|
||||
|
||||
for( int i = 0; i < 32; i++ )
|
||||
{
|
||||
X[i] = v128_xor( X[i], v128_set_32( v[ x16[3] + i ].u32[3],
|
||||
v[ x16[2] + i ].u32[2],
|
||||
v[ x16[1] + i ].u32[1],
|
||||
v[ x16[0] + i ].u32[0] ) );
|
||||
X[i] = v128_xor( X[i], v128_set32( v[ x16[3] + i ].u32[3],
|
||||
v[ x16[2] + i ].u32[2],
|
||||
v[ x16[1] + i ].u32[1],
|
||||
v[ x16[0] + i ].u32[0] ) );
|
||||
}
|
||||
|
||||
salsa8_simd128_4way( &X[ 0], &X[16] );
|
||||
@@ -1995,7 +1995,7 @@ void scrypt_core_simd128_2way_3buf( uint64_t *X, uint64_t *V,
|
||||
|
||||
#endif // AVX2
|
||||
|
||||
#if defined(__SSE2__) // required and assumed
|
||||
#if defined(__SSE2__) || defined(__ARM_NEON)
|
||||
|
||||
// Simple 4 way parallel.
|
||||
// Tested OK
|
||||
@@ -2060,18 +2060,18 @@ void scrypt_core_4way( v128_t *X, v128_t *V, const uint32_t N )
|
||||
}
|
||||
for ( int n = 0; n < N; n++ )
|
||||
{
|
||||
m128_ovly *vptr[4];
|
||||
m128_ovly *x16 = (m128_ovly*)(&X[16]);
|
||||
v128_ovly *vptr[4];
|
||||
v128_ovly *x16 = (v128_ovly*)(&X[16]);
|
||||
|
||||
for ( int l = 0; l < 4; l++ )
|
||||
{
|
||||
uint32_t xl = (*x16).u32[l];
|
||||
vptr[l] = (m128_ovly*)( &V[ 32 * ( xl & ( N-1 ) ) ] );
|
||||
vptr[l] = (v128_ovly*)( &V[ 32 * ( xl & ( N-1 ) ) ] );
|
||||
}
|
||||
|
||||
for ( int i = 0; i < 32; i++ )
|
||||
{
|
||||
m128_ovly v;
|
||||
v128_ovly v;
|
||||
for ( int l = 0; l < 4; l++ )
|
||||
v.u32[l] = ( *(vptr[l] +i ) ) .u32[l];
|
||||
X[i] = v128_xor( X[i], v.m128 );
|
||||
@@ -2195,12 +2195,12 @@ static void salsa8_simd128( uint32_t *b, const uint32_t * const c)
|
||||
|
||||
#else // SSE2
|
||||
|
||||
m128_ovly y[4], z[4];
|
||||
v128_ovly y[4], z[4];
|
||||
|
||||
X0 = v128_set_32( b[15], b[10], b[ 5], b[ 0] );
|
||||
X1 = v128_set_32( b[ 3], b[14], b[ 9], b[ 4] );
|
||||
X2 = v128_set_32( b[ 7], b[ 2], b[13], b[ 8] );
|
||||
X3 = v128_set_32( b[11], b[ 6], b[ 1], b[12] );
|
||||
X0 = v128_set32( b[15], b[10], b[ 5], b[ 0] );
|
||||
X1 = v128_set32( b[ 3], b[14], b[ 9], b[ 4] );
|
||||
X2 = v128_set32( b[ 7], b[ 2], b[13], b[ 8] );
|
||||
X3 = v128_set32( b[11], b[ 6], b[ 1], b[12] );
|
||||
|
||||
SALSA_8ROUNDS_FINAL_SIMD128;
|
||||
|
||||
@@ -2257,7 +2257,11 @@ void scrypt_core_simd128( uint32_t *X, uint32_t *V, const uint32_t N )
|
||||
for ( int n = 0; n < N; n++ )
|
||||
{
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
#if defined(__SSE4_1__)
|
||||
_mm_stream_si128( (v128_t*)V + n*8 + i, casti_v128( X, i ) );
|
||||
#else
|
||||
v128_store( (v128_t*)V + n*8 + i, casti_v128( X, i ) );
|
||||
#endif
|
||||
|
||||
salsa8_simd128( &X[ 0], &X[16] );
|
||||
salsa8_simd128( &X[16], &X[ 0] );
|
||||
@@ -2299,18 +2303,42 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#else // SSE2
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
|
||||
v128_t t0 = v128_blendv( XA[0], XA[1], mask_cc );
|
||||
v128_t t1 = v128_blendv( XA[1], XA[0], mask_cc );
|
||||
v128_t t2 = v128_blendv( XA[2], XA[3], mask_cc );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[2], mask_cc );
|
||||
XA[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XA[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XB[0], XB[1], mask_cc );
|
||||
t1 = v128_blendv( XB[1], XB[0], mask_cc );
|
||||
t2 = v128_blendv( XB[2], XB[3], mask_cc );
|
||||
t3 = v128_blendv( XB[3], XB[2], mask_cc );
|
||||
XB[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3;
|
||||
|
||||
YA0 = v128_set_32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
YB0 = v128_set_32( xb[15], xb[10], xb[ 5], xb[ 0] );
|
||||
YA1 = v128_set_32( xa[ 3], xa[14], xa[ 9], xa[ 4] );
|
||||
YB1 = v128_set_32( xb[ 3], xb[14], xb[ 9], xb[ 4] );
|
||||
YA2 = v128_set_32( xa[ 7], xa[ 2], xa[13], xa[ 8] );
|
||||
YB2 = v128_set_32( xb[ 7], xb[ 2], xb[13], xb[ 8] );
|
||||
YA3 = v128_set_32( xa[11], xa[ 6], xa[ 1], xa[12] );
|
||||
YB3 = v128_set_32( xb[11], xb[ 6], xb[ 1], xb[12] );
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
YB0 = v128_set32( xb[15], xb[10], xb[ 5], xb[ 0] );
|
||||
YA1 = v128_set32( xa[ 3], xa[14], xa[ 9], xa[ 4] );
|
||||
YB1 = v128_set32( xb[ 3], xb[14], xb[ 9], xb[ 4] );
|
||||
YA2 = v128_set32( xa[ 7], xa[ 2], xa[13], xa[ 8] );
|
||||
YB2 = v128_set32( xb[ 7], xb[ 2], xb[13], xb[ 8] );
|
||||
YA3 = v128_set32( xa[11], xa[ 6], xa[ 1], xa[12] );
|
||||
YB3 = v128_set32( xb[11], xb[ 6], xb[ 1], xb[12] );
|
||||
|
||||
XA[0] = YA0;
|
||||
XB[0] = YB0;
|
||||
@@ -2349,9 +2377,32 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#else // SSE2
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
|
||||
m128_ovly ya[4], za[4], yb[4], zb[4];
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
|
||||
v128_t t0 = v128_blendv( XA[0], XA[2], mask_f0 );
|
||||
v128_t t1 = v128_blendv( XA[2], XA[0], mask_f0 );
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
yb[0].m128 = XB[0];
|
||||
@@ -2406,7 +2457,8 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = zb[2].m128;
|
||||
XA[3] = za[3].m128;
|
||||
XB[3] = zb[3].m128;
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -2586,22 +2638,54 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XC[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#else // SSE2
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
|
||||
v128_t t0 = v128_blendv( XA[0], XA[1], mask_cc );
|
||||
v128_t t1 = v128_blendv( XA[1], XA[0], mask_cc );
|
||||
v128_t t2 = v128_blendv( XA[2], XA[3], mask_cc );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[2], mask_cc );
|
||||
XA[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XA[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XB[0], XB[1], mask_cc );
|
||||
t1 = v128_blendv( XB[1], XB[0], mask_cc );
|
||||
t2 = v128_blendv( XB[2], XB[3], mask_cc );
|
||||
t3 = v128_blendv( XB[3], XB[2], mask_cc );
|
||||
XB[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XC[0], XC[1], mask_cc );
|
||||
t1 = v128_blendv( XC[1], XC[0], mask_cc );
|
||||
t2 = v128_blendv( XC[2], XC[3], mask_cc );
|
||||
t3 = v128_blendv( XC[3], XC[2], mask_cc );
|
||||
XC[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XC[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XC[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XC[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3, YC0, YC1, YC2, YC3;
|
||||
|
||||
YA0 = v128_set_32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
YB0 = v128_set_32( xb[15], xb[10], xb[ 5], xb[ 0] );
|
||||
YC0 = v128_set_32( xc[15], xc[10], xc[ 5], xc[ 0] );
|
||||
YA1 = v128_set_32( xa[ 3], xa[14], xa[ 9], xa[ 4] );
|
||||
YB1 = v128_set_32( xb[ 3], xb[14], xb[ 9], xb[ 4] );
|
||||
YC1 = v128_set_32( xc[ 3], xc[14], xc[ 9], xc[ 4] );
|
||||
YA2 = v128_set_32( xa[ 7], xa[ 2], xa[13], xa[ 8] );
|
||||
YB2 = v128_set_32( xb[ 7], xb[ 2], xb[13], xb[ 8] );
|
||||
YC2 = v128_set_32( xc[ 7], xc[ 2], xc[13], xc[ 8] );
|
||||
YA3 = v128_set_32( xa[11], xa[ 6], xa[ 1], xa[12] );
|
||||
YB3 = v128_set_32( xb[11], xb[ 6], xb[ 1], xb[12] );
|
||||
YC3 = v128_set_32( xc[11], xc[ 6], xc[ 1], xc[12] );
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
YB0 = v128_set32( xb[15], xb[10], xb[ 5], xb[ 0] );
|
||||
YC0 = v128_set32( xc[15], xc[10], xc[ 5], xc[ 0] );
|
||||
YA1 = v128_set32( xa[ 3], xa[14], xa[ 9], xa[ 4] );
|
||||
YB1 = v128_set32( xb[ 3], xb[14], xb[ 9], xb[ 4] );
|
||||
YC1 = v128_set32( xc[ 3], xc[14], xc[ 9], xc[ 4] );
|
||||
YA2 = v128_set32( xa[ 7], xa[ 2], xa[13], xa[ 8] );
|
||||
YB2 = v128_set32( xb[ 7], xb[ 2], xb[13], xb[ 8] );
|
||||
YC2 = v128_set32( xc[ 7], xc[ 2], xc[13], xc[ 8] );
|
||||
YA3 = v128_set32( xa[11], xa[ 6], xa[ 1], xa[12] );
|
||||
YB3 = v128_set32( xb[11], xb[ 6], xb[ 1], xb[12] );
|
||||
YC3 = v128_set32( xc[11], xc[ 6], xc[ 1], xc[12] );
|
||||
|
||||
XA[0] = YA0;
|
||||
XB[0] = YB0;
|
||||
@@ -2616,6 +2700,7 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XB[3] = YB3;
|
||||
XC[3] = YC3;
|
||||
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -2653,9 +2738,40 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XC[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#else // SSE2
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
|
||||
m128_ovly ya[4], za[4], yb[4], zb[4], yc[4], zc[4];
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
|
||||
v128_t t0 = v128_blendv( XA[0], XA[2], mask_f0 );
|
||||
v128_t t1 = v128_blendv( XA[2], XA[0], mask_f0 );
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XC[0], XC[2], mask_f0 );
|
||||
t1 = v128_blendv( XC[1], XC[3], mask_3c );
|
||||
t2 = v128_blendv( XC[2], XC[0], mask_f0 );
|
||||
t3 = v128_blendv( XC[3], XC[1], mask_3c );
|
||||
XC[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XC[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XC[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XC[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4], yc[4], zc[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
yb[0].m128 = XB[0];
|
||||
@@ -2735,6 +2851,7 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XB[3] = zb[3].m128;
|
||||
XC[3] = zc[3].m128;
|
||||
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include "algo/sha/sha256-hash.h"
|
||||
#include <mm_malloc.h>
|
||||
//#include <mm_malloc.h>
|
||||
#include "malloc-huge.h"
|
||||
|
||||
static const uint32_t keypad[12] = {
|
||||
@@ -55,17 +55,13 @@ static const uint32_t sha256_initial_state[8] =
|
||||
};
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define SCRYPT_THROUGHPUT 16
|
||||
|
||||
#define SCRYPT_THROUGHPUT 16
|
||||
#elif defined(__AVX2__)
|
||||
|
||||
#define SCRYPT_THROUGHPUT 8
|
||||
|
||||
#define SCRYPT_THROUGHPUT 8
|
||||
#elif defined(__SHA__) // NEON?
|
||||
#define SCRYPT_THROUGHPUT 2
|
||||
#else
|
||||
|
||||
#define SCRYPT_THROUGHPUT 4
|
||||
|
||||
#define SCRYPT_THROUGHPUT 4
|
||||
#endif
|
||||
|
||||
// static int scrypt_throughput = 0;
|
||||
@@ -268,9 +264,7 @@ static inline void PBKDF2_SHA256_128_32_SHA_2BUF( uint32_t *tstate0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
#endif // SHA
|
||||
|
||||
static const uint32_t keypad_4way[4 * 12] = {
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
@@ -439,7 +433,6 @@ static inline void PBKDF2_SHA256_128_32_4way( uint32_t *tstate,
|
||||
output[i] = bswap_32( ostate[i] );
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_SHA256_8WAY
|
||||
|
||||
/*
|
||||
@@ -629,7 +622,6 @@ static inline void HMAC_SHA256_80_init_16way( const uint32_t *key,
|
||||
(const __m512i*)tstate );
|
||||
}
|
||||
|
||||
|
||||
static inline void PBKDF2_SHA256_80_128_16way( const uint32_t *tstate,
|
||||
const uint32_t *ostate, const uint32_t *salt, uint32_t *output )
|
||||
{
|
||||
@@ -803,18 +795,13 @@ static int scrypt_N_1_1_256_8way( const uint32_t *input, uint32_t *output,
|
||||
dintrlv_2x128( X+192, X+224, W+192, 1024 );
|
||||
}
|
||||
|
||||
|
||||
|
||||
// SCRYPT CORE
|
||||
|
||||
// AVX2
|
||||
|
||||
|
||||
// AVX2
|
||||
// disable de/interleave for testing.
|
||||
// scrypt_core_8way( (__m256i*)W , (__m256i*)V, N );
|
||||
|
||||
|
||||
/*
|
||||
// AVX2 working
|
||||
intrlv_2x128( W, X, X+ 32, 1024 );
|
||||
@@ -956,7 +943,6 @@ static int scrypt_N_1_1_256_16way( const uint32_t *input, uint32_t *output,
|
||||
X+256, X+288, X+320, X+352, X+384, X+416, X+448, X+480,
|
||||
W, 1024 );
|
||||
|
||||
|
||||
if ( opt_param_n > 0x4000 )
|
||||
{
|
||||
scrypt_core_simd128_3buf( X, scratchbuf, N );
|
||||
@@ -992,7 +978,6 @@ static int scrypt_N_1_1_256_16way( const uint32_t *input, uint32_t *output,
|
||||
|
||||
// SCRYPT CORE
|
||||
|
||||
|
||||
// AVX512
|
||||
/*
|
||||
// AVX512 16 way working
|
||||
@@ -1043,9 +1028,6 @@ static int scrypt_N_1_1_256_16way( const uint32_t *input, uint32_t *output,
|
||||
dintrlv_4x128( X+256+128, X+256+160, X+256+192, X+256+224, W+256+128, 1024 );
|
||||
*/
|
||||
|
||||
|
||||
// AVX2
|
||||
|
||||
/*
|
||||
// AVX2
|
||||
// disable de/interleave for testing.
|
||||
@@ -1239,7 +1221,8 @@ static int scrypt_N_1_1_256_16way( const uint32_t *input, uint32_t *output,
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
#if 0
|
||||
#if ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
|
||||
static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
uint32_t *midstate, int N, int thrid )
|
||||
{
|
||||
@@ -1264,6 +1247,12 @@ static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 4 )
|
||||
|
||||
#if defined(__SHA__)
|
||||
|
||||
static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
uint32_t *midstate, int N, int thrid )
|
||||
{
|
||||
@@ -1323,9 +1312,10 @@ static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 4 )
|
||||
#else
|
||||
// SSE2
|
||||
|
||||
static int scrypt_N_1_1_256_4way( const uint32_t *input, uint32_t *output,
|
||||
uint32_t *midstate, int N, int thrid )
|
||||
{
|
||||
@@ -1352,8 +1342,6 @@ static int scrypt_N_1_1_256_4way( const uint32_t *input, uint32_t *output,
|
||||
else
|
||||
scrypt_core_4way( (v128_t*)W, (v128_t*)scratchbuf, N );
|
||||
|
||||
|
||||
|
||||
// dintrlv_4x32( X, X+32, X+64, X+96, W, 1024 );
|
||||
|
||||
////// SCRYPT_CORE
|
||||
@@ -1396,9 +1384,10 @@ static int scrypt_N_1_1_256_4way( const uint32_t *input, uint32_t *output,
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif // SCRYPT_THROUGHPUT == 4
|
||||
|
||||
//#endif // SHA
|
||||
#endif
|
||||
|
||||
#endif // SCRYPT_THROUGHPUT == 4
|
||||
|
||||
extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
@@ -1422,43 +1411,26 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
||||
bool rc = true;
|
||||
for ( i = 0; i < SCRYPT_THROUGHPUT; i++ ) data[ i*20 + 19 ] = ++n;
|
||||
|
||||
//#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if ( SCRYPT_THROUGHPUT == 16 )
|
||||
// if ( SCRYPT_THROUGHPUT == 16 )
|
||||
rc = scrypt_N_1_1_256_16way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
// else
|
||||
//#endif
|
||||
//#if defined(__AVX2__)
|
||||
#elif ( SCRYPT_THROUGHPUT == 8 )
|
||||
// if ( SCRYPT_THROUGHPUT == 8 )
|
||||
rc = scrypt_N_1_1_256_8way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
// else
|
||||
//#endif
|
||||
#elif ( SCRYPT_THROUGHPUT == 4 )
|
||||
// if ( SCRYPT_THROUGHPUT == 4 ) // slower on Ryzen than 8way
|
||||
//#if defined(__SHA__)
|
||||
// rc = scrypt_N_1_1_256_4way_sha( data, hash, midstate, opt_param_n,
|
||||
// thr_id );
|
||||
//#else
|
||||
#if defined(__SHA__)
|
||||
rc = scrypt_N_1_1_256_4way_sha( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
rc = scrypt_N_1_1_256_4way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
|
||||
#error "Invalid SCRYPT_THROUGHPUT"
|
||||
|
||||
#endif
|
||||
/*
|
||||
#if defined(__SHA__)
|
||||
else
|
||||
if ( SCRYPT_THROUGHPUT == 2 ) // slower on Ryzen than 4way_sha & 8way
|
||||
#endif
|
||||
#elif ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
rc = scrypt_N_1_1_256_sha_2buf( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#endif
|
||||
else // should never get here
|
||||
#else
|
||||
rc = scrypt_N_1_1_256( data, hash, midstate, opt_param_n, thr_id );
|
||||
*/
|
||||
#endif
|
||||
|
||||
// test the hash
|
||||
if ( rc )
|
||||
@@ -1490,7 +1462,7 @@ bool scrypt_miner_thread_init( int thr_id )
|
||||
applog( LOG_NOTICE, "Thread %u is using huge pages", thr_id );
|
||||
}
|
||||
else
|
||||
scratchbuf = _mm_malloc( scratchbuf_size, 128 );
|
||||
scratchbuf = mm_malloc( scratchbuf_size, 128 );
|
||||
|
||||
if ( scratchbuf ) return true;
|
||||
|
||||
@@ -1500,18 +1472,18 @@ bool scrypt_miner_thread_init( int thr_id )
|
||||
|
||||
bool register_scrypt_algo( algo_gate_t* gate )
|
||||
{
|
||||
//#if defined(__SHA__)
|
||||
// gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
//#else
|
||||
#if defined(__SHA__)
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
#else
|
||||
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX_OPT | AVX2_OPT | AVX512_OPT;
|
||||
//#endif
|
||||
#endif
|
||||
gate->miner_thread_init =(void*)&scrypt_miner_thread_init;
|
||||
gate->scanhash = (void*)&scanhash_scrypt;
|
||||
opt_target_factor = 65536.0;
|
||||
opt_param_n = opt_param_n ? opt_param_n : 1024;
|
||||
applog( LOG_INFO,"Scrypt paramaters: N= %d, R= 1", opt_param_n );
|
||||
|
||||
// scrypt_throughput can be defined at compile time and used to replace
|
||||
// scrypt_throughput defined at compile time and used to replace
|
||||
// MAX_WAYS to reduce memory usage.
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
@@ -1520,19 +1492,15 @@ bool register_scrypt_algo( algo_gate_t* gate )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 4 * 128; // 4 way
|
||||
|
||||
/* SHA is slower than AVX2 on Ryzen
|
||||
#elif defined(__SHA__)
|
||||
scrypt_throughput = 4;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
*/
|
||||
|
||||
#elif defined(__AVX2__)
|
||||
// scrypt_throughput = 8;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 way
|
||||
#elif defined(__SHA__)
|
||||
// scrypt_throughput = 4;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
#else
|
||||
// scrypt_throughput = 4;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
@@ -1549,7 +1517,7 @@ bool register_scrypt_algo( algo_gate_t* gate )
|
||||
format_number_si( &t_size, t_units );
|
||||
format_number_si( &d_size, d_units );
|
||||
applog( LOG_INFO,"Throughput %d/thr, Buffer %.0f %siB/thr, Total %.0f %siB\n",
|
||||
SCRYPT_THROUGHPUT, t_size, t_units, d_size, d_units );
|
||||
SCRYPT_THROUGHPUT, t_size, t_units, d_size, d_units );
|
||||
|
||||
return true;
|
||||
};
|
||||
|
Reference in New Issue
Block a user