Compare commits

...

6 Commits

Author SHA1 Message Date
Jay D Dee
6e8b8ed34f v3.12.6.1 2020-03-07 14:11:06 -05:00
Jay D Dee
c0aadbcc99 v3.12.6 2020-03-05 18:43:20 -05:00
Jay D Dee
3da149418a v3.12.5 2020-03-01 13:18:17 -05:00
Jay D Dee
720610cce5 v3.12.4.6 2020-02-28 18:20:32 -05:00
Jay D Dee
cedcf4d070 v3.12.4.5 2020-02-28 02:42:22 -05:00
Jay D Dee
81b50c3c71 v3.12.4.4 2020-02-25 14:07:32 -05:00
32 changed files with 853 additions and 677 deletions

View File

@@ -37,25 +37,25 @@ Requirements
------------ ------------
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes 1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI Intel Core2 and newer and AMD equivalents. Further optimizations are available
optimizations a CPU with AES_NI is required. This includes Intel Westmere on some algoritms for CPUs with AES, AVX, AVX2, SHA, AVX512 and VAES.
and newer and AMD equivalents. Further optimizations are available on some
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
performance. performance.
ARM CPUs are not supported. ARM and Aarch64 CPUs are not supported.
2. 64 bit Linux OS. Ubuntu and Fedora based distributions, including Mint and 2. 64 bit Linux or Windows OS. Ubuntu and Fedora based distributions,
Centos, are known to work and have all dependencies in their repositories. including Mint and Centos, are known to work and have all dependencies
Others may work but may require more effort. Older versions such as Centos 6 in their repositories. Others may work but may require more effort. Older
don't work due to missing features. versions such as Centos 6 don't work due to missing features.
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries. 64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
MacOS, OSx and Android are not supported. MacOS, OSx and Android are not supported.
3. Stratum pool. Some algos may work wallet mining using getwork or GBT. YMMV. 3. Stratum pool supporting stratum+tcp:// or stratum+ssl:// protocols or
RPC getwork using http:// or https://.
GBT is YMMV.
Supported Algorithms Supported Algorithms
-------------------- --------------------
@@ -152,6 +152,27 @@ Supported Algorithms
yespower-b2b generic yespower + blake2b yespower-b2b generic yespower + blake2b
zr5 Ziftr zr5 Ziftr
Many variations of scrypt based algos can be mine by specifying their
parameters:
scryptn2: --algo scrypt --param-n 1048576
cpupower: --algo yespower --param-key "CPUpower: The number of CPU working or available for proof-of-work mining"
power2b: --algo yespower-b2b --param-n 2048 --param-r 32 --param-key "Now I am become Death, the destroyer of worlds"
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
yespoweriots: --algo yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
yespoweric: --algo yespower --param-n 2048 --param-r 32 --param-key "IsotopeC"
yespowerurx: --algo yespower --param-n 2048 --param-r 32 --param-key "UraniumX"
yespowerltncg: --algo yespower --param-n 2048 --param-r 32 --param-key "LTNCGYES"
Errata Errata
------ ------

View File

@@ -65,6 +65,76 @@ If not what makes it happen or not happen?
Change Log Change Log
---------- ----------
v3.12.6.1
Issue #252: Fixed SSL mining (stratum+tcps://)
Issue #254 Fixed benchmark.
Issue #253: Implemented stale share reduction for yespower, x25x, x22i, x21s,
x16*, scryptn2, more to come.
v3.12.6
Issue #246: improved stale share detection for getwork.
Improved precision of target_to_diff conversion from 4 digits to 20+.
Display hash and target debug data for all rejected shares.
A graphical representation of CPU affinity is displayed when using --threads.
Added highest and lowest accepted share to summary log.
Other small changes to logs to improve consistency and clarity.
v3.12.5
Issues #246 & #251: fixed incorrect share diff for stratum and getwork,
fixed incorrect target diff for getwork. Stats should now be correct for
getwork as well as stratum.
Issue #252: Fixed stratum+tcps not using curl ssl.
Getwork: reduce stale blocks, faster response to new work.
Added ntime to new job/work logs.
README.md now lists the parameters for yespower variations that don't have
a specific algo name.
v3.12.4.6
Issue #246: fixed getwork repeated new block logs with same height. New work
for the same block is now reported as "New work" instead of New block".
Also added a check that work is new before generating "New work" log.
Added target diff to getwork new block log.
Changed share ratio in share result log to simple fraction, no longer %.
Added debug log to display mininginfo, use -D.
v3.12.4.5
Issue #246: better stale share detection for getwork, and enhanced logging
of stale shares for stratum & getwork.
Issue #251: fixed incorrect share difficulty and share ratio in share
result log.
Changed submit log to include share diff and block height.
Small cosmetic changes to logs.
v3.12.4.4
Issue #246: Fixed net hashrate in getwork block log,
removed duplicate getwork block log,
other small tweaks to stats logs for getwork.
Issue #248: Fixed chronic stale shares with scrypt:1048576 (scryptn2).
v3.12.4.3 v3.12.4.3
Fixed segfault in new block log for getwork. Fixed segfault in new block log for getwork.

View File

@@ -97,21 +97,23 @@ int null_scanhash()
return 0; return 0;
} }
void null_hash() int null_hash()
{ {
applog(LOG_WARNING,"SWERR: null_hash unsafe null function"); applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
return 0;
}; };
/*
void null_hash_suw() void null_hash_suw()
{ {
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function"); applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
}; };
*/
void init_algo_gate( algo_gate_t* gate ) void init_algo_gate( algo_gate_t* gate )
{ {
gate->miner_thread_init = (void*)&return_true; gate->miner_thread_init = (void*)&return_true;
gate->scanhash = (void*)&null_scanhash; gate->scanhash = (void*)&null_scanhash;
gate->hash = (void*)&null_hash; gate->hash = (void*)&null_hash;
gate->hash_suw = (void*)&null_hash_suw; // gate->hash_suw = (void*)&null_hash_suw;
gate->get_new_work = (void*)&std_get_new_work; gate->get_new_work = (void*)&std_get_new_work;
gate->work_decode = (void*)&std_le_work_decode; gate->work_decode = (void*)&std_le_work_decode;
gate->decode_extra_data = (void*)&do_nothing; gate->decode_extra_data = (void*)&do_nothing;

View File

@@ -113,9 +113,10 @@ typedef struct
// mandatory functions, must be overwritten // mandatory functions, must be overwritten
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* ); int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
// not used anywhere
// optional unsafe, must be overwritten if algo uses function // optional unsafe, must be overwritten if algo uses function
void ( *hash ) ( void*, const void*, uint32_t ) ; int ( *hash ) ( void*, const void*, uint32_t ) ;
void ( *hash_suw ) ( void*, const void* ); //void ( *hash_suw ) ( void*, const void* );
//optional, safe to use default in most cases //optional, safe to use default in most cases
@@ -213,8 +214,8 @@ void four_way_not_tested();
int null_scanhash(); int null_scanhash();
// displays warning // displays warning
void null_hash (); int null_hash ();
void null_hash_suw(); //void null_hash_suw();
// optional safe targets, default listed first unless noted. // optional safe targets, default listed first unless noted.

View File

@@ -311,7 +311,7 @@ bool register_m7m_algo( algo_gate_t *gate )
{ {
gate->optimizations = SHA_OPT; gate->optimizations = SHA_OPT;
init_m7m_ctx(); init_m7m_ctx();
gate->scanhash = (void*)scanhash_m7m_hash; gate->scanhash = (void*)&scanhash_m7m_hash;
gate->build_stratum_request = (void*)&std_be_build_stratum_request; gate->build_stratum_request = (void*)&std_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode; gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result; gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;

View File

@@ -380,7 +380,7 @@ static inline void PBKDF2_SHA256_128_32_8way(uint32_t *tstate,
#endif /* HAVE_SHA256_8WAY */ #endif /* HAVE_SHA256_8WAY */
#if defined(USE_ASM) && defined(__x86_64__) //#if defined(USE_ASM) && defined(__x86_64__)
#define SCRYPT_MAX_WAYS 12 #define SCRYPT_MAX_WAYS 12
#define HAVE_SCRYPT_3WAY 1 #define HAVE_SCRYPT_3WAY 1
@@ -394,113 +394,6 @@ void scrypt_core_3way(uint32_t *X, uint32_t *V, int N);
void scrypt_core_6way(uint32_t *X, uint32_t *V, int N); void scrypt_core_6way(uint32_t *X, uint32_t *V, int N);
#endif #endif
#elif defined(USE_ASM) && defined(__i386__)
#define SCRYPT_MAX_WAYS 4
#define scrypt_best_throughput() 1
void scrypt_core(uint32_t *X, uint32_t *V, int N);
#elif defined(USE_ASM) && defined(__arm__) && defined(__APCS_32__)
void scrypt_core(uint32_t *X, uint32_t *V, int N);
#if defined(__ARM_NEON__)
#undef HAVE_SHA256_4WAY
#define SCRYPT_MAX_WAYS 3
#define HAVE_SCRYPT_3WAY 1
#define scrypt_best_throughput() 3
void scrypt_core_3way(uint32_t *X, uint32_t *V, int N);
#endif
#else
static inline void xor_salsa8(uint32_t B[16], const uint32_t Bx[16])
{
uint32_t x00,x01,x02,x03,x04,x05,x06,x07,x08,x09,x10,x11,x12,x13,x14,x15;
int i;
x00 = (B[ 0] ^= Bx[ 0]);
x01 = (B[ 1] ^= Bx[ 1]);
x02 = (B[ 2] ^= Bx[ 2]);
x03 = (B[ 3] ^= Bx[ 3]);
x04 = (B[ 4] ^= Bx[ 4]);
x05 = (B[ 5] ^= Bx[ 5]);
x06 = (B[ 6] ^= Bx[ 6]);
x07 = (B[ 7] ^= Bx[ 7]);
x08 = (B[ 8] ^= Bx[ 8]);
x09 = (B[ 9] ^= Bx[ 9]);
x10 = (B[10] ^= Bx[10]);
x11 = (B[11] ^= Bx[11]);
x12 = (B[12] ^= Bx[12]);
x13 = (B[13] ^= Bx[13]);
x14 = (B[14] ^= Bx[14]);
x15 = (B[15] ^= Bx[15]);
for (i = 0; i < 8; i += 2) {
#define R(a, b) (((a) << (b)) | ((a) >> (32 - (b))))
/* Operate on columns. */
x04 ^= R(x00+x12, 7); x09 ^= R(x05+x01, 7);
x14 ^= R(x10+x06, 7); x03 ^= R(x15+x11, 7);
x08 ^= R(x04+x00, 9); x13 ^= R(x09+x05, 9);
x02 ^= R(x14+x10, 9); x07 ^= R(x03+x15, 9);
x12 ^= R(x08+x04,13); x01 ^= R(x13+x09,13);
x06 ^= R(x02+x14,13); x11 ^= R(x07+x03,13);
x00 ^= R(x12+x08,18); x05 ^= R(x01+x13,18);
x10 ^= R(x06+x02,18); x15 ^= R(x11+x07,18);
/* Operate on rows. */
x01 ^= R(x00+x03, 7); x06 ^= R(x05+x04, 7);
x11 ^= R(x10+x09, 7); x12 ^= R(x15+x14, 7);
x02 ^= R(x01+x00, 9); x07 ^= R(x06+x05, 9);
x08 ^= R(x11+x10, 9); x13 ^= R(x12+x15, 9);
x03 ^= R(x02+x01,13); x04 ^= R(x07+x06,13);
x09 ^= R(x08+x11,13); x14 ^= R(x13+x12,13);
x00 ^= R(x03+x02,18); x05 ^= R(x04+x07,18);
x10 ^= R(x09+x08,18); x15 ^= R(x14+x13,18);
#undef R
}
B[ 0] += x00;
B[ 1] += x01;
B[ 2] += x02;
B[ 3] += x03;
B[ 4] += x04;
B[ 5] += x05;
B[ 6] += x06;
B[ 7] += x07;
B[ 8] += x08;
B[ 9] += x09;
B[10] += x10;
B[11] += x11;
B[12] += x12;
B[13] += x13;
B[14] += x14;
B[15] += x15;
}
static inline void scrypt_core(uint32_t *X, uint32_t *V, int N)
{
int i;
for (i = 0; i < N; i++) {
memcpy(&V[i * 32], X, 128);
xor_salsa8(&X[0], &X[16]);
xor_salsa8(&X[16], &X[0]);
}
for (i = 0; i < N; i++) {
uint32_t j = 32 * (X[16] & (N - 1));
for (uint8_t k = 0; k < 32; k++)
X[k] ^= V[j + k];
xor_salsa8(&X[0], &X[16]);
xor_salsa8(&X[16], &X[0]);
}
}
#endif
#ifndef SCRYPT_MAX_WAYS #ifndef SCRYPT_MAX_WAYS
#define SCRYPT_MAX_WAYS 1 #define SCRYPT_MAX_WAYS 1
#define scrypt_best_throughput() 1 #define scrypt_best_throughput() 1
@@ -511,8 +404,8 @@ unsigned char *scrypt_buffer_alloc(int N)
return (uchar*) malloc((size_t)N * SCRYPT_MAX_WAYS * 128 + 63); return (uchar*) malloc((size_t)N * SCRYPT_MAX_WAYS * 128 + 63);
} }
static void scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output, static bool scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
uint32_t *midstate, unsigned char *scratchpad, int N) uint32_t *midstate, unsigned char *scratchpad, int N, int thr_id )
{ {
uint32_t tstate[8], ostate[8]; uint32_t tstate[8], ostate[8];
uint32_t X[32]; uint32_t X[32];
@@ -527,11 +420,13 @@ static void scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
scrypt_core(X, V, N); scrypt_core(X, V, N);
PBKDF2_SHA256_128_32(tstate, ostate, X, output); PBKDF2_SHA256_128_32(tstate, ostate, X, output);
return true;
} }
#ifdef HAVE_SHA256_4WAY #ifdef HAVE_SHA256_4WAY
static void scrypt_1024_1_1_256_4way(const uint32_t *input, static int scrypt_1024_1_1_256_4way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N) uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
int thrid )
{ {
uint32_t _ALIGN(128) tstate[4 * 8]; uint32_t _ALIGN(128) tstate[4 * 8];
uint32_t _ALIGN(128) ostate[4 * 8]; uint32_t _ALIGN(128) ostate[4 * 8];
@@ -545,32 +440,47 @@ static void scrypt_1024_1_1_256_4way(const uint32_t *input,
for (i = 0; i < 20; i++) for (i = 0; i < 20; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
W[4 * i + k] = input[k * 20 + i]; W[4 * i + k] = input[k * 20 + i];
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
tstate[4 * i + k] = midstate[i]; tstate[4 * i + k] = midstate[i];
HMAC_SHA256_80_init_4way(W, tstate, ostate); HMAC_SHA256_80_init_4way(W, tstate, ostate);
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W); PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
if ( work_restart[thrid].restart ) return 0;
for (i = 0; i < 32; i++) for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
X[k * 32 + i] = W[4 * i + k]; X[k * 32 + i] = W[4 * i + k];
scrypt_core(X + 0 * 32, V, N); scrypt_core(X + 0 * 32, V, N);
scrypt_core(X + 1 * 32, V, N); scrypt_core(X + 1 * 32, V, N);
scrypt_core(X + 2 * 32, V, N); scrypt_core(X + 2 * 32, V, N);
scrypt_core(X + 3 * 32, V, N); scrypt_core(X + 3 * 32, V, N);
if ( work_restart[thrid].restart ) return 0;
for (i = 0; i < 32; i++) for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
W[4 * i + k] = X[k * 32 + i]; W[4 * i + k] = X[k * 32 + i];
PBKDF2_SHA256_128_32_4way(tstate, ostate, W, W); PBKDF2_SHA256_128_32_4way(tstate, ostate, W, W);
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
output[k * 8 + i] = W[4 * i + k]; output[k * 8 + i] = W[4 * i + k];
return 1;
} }
#endif /* HAVE_SHA256_4WAY */ #endif /* HAVE_SHA256_4WAY */
#ifdef HAVE_SCRYPT_3WAY #ifdef HAVE_SCRYPT_3WAY
static void scrypt_1024_1_1_256_3way(const uint32_t *input, static int scrypt_1024_1_1_256_3way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N) uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
int thrid )
{ {
uint32_t _ALIGN(64) tstate[3 * 8], ostate[3 * 8]; uint32_t _ALIGN(64) tstate[3 * 8], ostate[3 * 8];
uint32_t _ALIGN(64) X[3 * 32]; uint32_t _ALIGN(64) X[3 * 32];
@@ -581,23 +491,34 @@ static void scrypt_1024_1_1_256_3way(const uint32_t *input,
memcpy(tstate + 0, midstate, 32); memcpy(tstate + 0, midstate, 32);
memcpy(tstate + 8, midstate, 32); memcpy(tstate + 8, midstate, 32);
memcpy(tstate + 16, midstate, 32); memcpy(tstate + 16, midstate, 32);
HMAC_SHA256_80_init(input + 0, tstate + 0, ostate + 0); HMAC_SHA256_80_init(input + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8); HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8);
HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16); HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16);
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0); PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32); PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32);
PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64); PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64);
if ( work_restart[thrid].restart ) return 0;
scrypt_core_3way(X, V, N); scrypt_core_3way(X, V, N);
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0); PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8); PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8);
PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16); PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16);
return 1;
} }
#ifdef HAVE_SHA256_4WAY #ifdef HAVE_SHA256_4WAY
static void scrypt_1024_1_1_256_12way(const uint32_t *input, static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N) uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
int thrid )
{ {
uint32_t _ALIGN(128) tstate[12 * 8]; uint32_t _ALIGN(128) tstate[12 * 8];
uint32_t _ALIGN(128) ostate[12 * 8]; uint32_t _ALIGN(128) ostate[12 * 8];
@@ -612,43 +533,60 @@ static void scrypt_1024_1_1_256_12way(const uint32_t *input,
for (i = 0; i < 20; i++) for (i = 0; i < 20; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
W[128 * j + 4 * i + k] = input[80 * j + k * 20 + i]; W[128 * j + 4 * i + k] = input[80 * j + k * 20 + i];
for (j = 0; j < 3; j++) for (j = 0; j < 3; j++)
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
tstate[32 * j + 4 * i + k] = midstate[i]; tstate[32 * j + 4 * i + k] = midstate[i];
HMAC_SHA256_80_init_4way(W + 0, tstate + 0, ostate + 0); HMAC_SHA256_80_init_4way(W + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32); HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32);
HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64); HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64);
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0); PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128); PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128);
PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256); PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256);
if ( work_restart[thrid].restart ) return 0;
for (j = 0; j < 3; j++) for (j = 0; j < 3; j++)
for (i = 0; i < 32; i++) for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
X[128 * j + k * 32 + i] = W[128 * j + 4 * i + k]; X[128 * j + k * 32 + i] = W[128 * j + 4 * i + k];
scrypt_core_3way(X + 0 * 96, V, N); scrypt_core_3way(X + 0 * 96, V, N);
scrypt_core_3way(X + 1 * 96, V, N); scrypt_core_3way(X + 1 * 96, V, N);
scrypt_core_3way(X + 2 * 96, V, N); scrypt_core_3way(X + 2 * 96, V, N);
scrypt_core_3way(X + 3 * 96, V, N); scrypt_core_3way(X + 3 * 96, V, N);
if ( work_restart[thrid].restart ) return 0;
for (j = 0; j < 3; j++) for (j = 0; j < 3; j++)
for (i = 0; i < 32; i++) for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
W[128 * j + 4 * i + k] = X[128 * j + k * 32 + i]; W[128 * j + 4 * i + k] = X[128 * j + k * 32 + i];
PBKDF2_SHA256_128_32_4way(tstate + 0, ostate + 0, W + 0, W + 0); PBKDF2_SHA256_128_32_4way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_128_32_4way(tstate + 32, ostate + 32, W + 128, W + 128); PBKDF2_SHA256_128_32_4way(tstate + 32, ostate + 32, W + 128, W + 128);
PBKDF2_SHA256_128_32_4way(tstate + 64, ostate + 64, W + 256, W + 256); PBKDF2_SHA256_128_32_4way(tstate + 64, ostate + 64, W + 256, W + 256);
for (j = 0; j < 3; j++) for (j = 0; j < 3; j++)
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++) for (k = 0; k < 4; k++)
output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k]; output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k];
return 1;
} }
#endif /* HAVE_SHA256_4WAY */ #endif /* HAVE_SHA256_4WAY */
#endif /* HAVE_SCRYPT_3WAY */ #endif /* HAVE_SCRYPT_3WAY */
#ifdef HAVE_SCRYPT_6WAY #ifdef HAVE_SCRYPT_6WAY
static void scrypt_1024_1_1_256_24way(const uint32_t *input, static int scrypt_1024_1_1_256_24way( const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N) uint32_t *output, uint32_t *midstate,
unsigned char *scratchpad, int N, int thrid )
{ {
uint32_t _ALIGN(128) tstate[24 * 8]; uint32_t _ALIGN(128) tstate[24 * 8];
uint32_t _ALIGN(128) ostate[24 * 8]; uint32_t _ALIGN(128) ostate[24 * 8];
@@ -657,41 +595,60 @@ static void scrypt_1024_1_1_256_24way(const uint32_t *input,
uint32_t *V; uint32_t *V;
int i, j, k; int i, j, k;
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63)); V = (uint32_t *)( ( (uintptr_t)(scratchpad) + 63 ) & ~ (uintptr_t)(63) );
for (j = 0; j < 3; j++) for ( j = 0; j < 3; j++ )
for (i = 0; i < 20; i++) for ( i = 0; i < 20; i++ )
for (k = 0; k < 8; k++) for ( k = 0; k < 8; k++ )
W[8 * 32 * j + 8 * i + k] = input[8 * 20 * j + k * 20 + i]; W[8 * 32 * j + 8 * i + k] = input[8 * 20 * j + k * 20 + i];
for (j = 0; j < 3; j++)
for (i = 0; i < 8; i++) for ( j = 0; j < 3; j++ )
for (k = 0; k < 8; k++) for ( i = 0; i < 8; i++ )
for ( k = 0; k < 8; k++ )
tstate[8 * 8 * j + 8 * i + k] = midstate[i]; tstate[8 * 8 * j + 8 * i + k] = midstate[i];
HMAC_SHA256_80_init_8way(W + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init_8way(W + 256, tstate + 64, ostate + 64); HMAC_SHA256_80_init_8way( W + 0, tstate + 0, ostate + 0 );
HMAC_SHA256_80_init_8way(W + 512, tstate + 128, ostate + 128); HMAC_SHA256_80_init_8way( W + 256, tstate + 64, ostate + 64 );
PBKDF2_SHA256_80_128_8way(tstate + 0, ostate + 0, W + 0, W + 0); HMAC_SHA256_80_init_8way( W + 512, tstate + 128, ostate + 128 );
PBKDF2_SHA256_80_128_8way(tstate + 64, ostate + 64, W + 256, W + 256);
PBKDF2_SHA256_80_128_8way(tstate + 128, ostate + 128, W + 512, W + 512); if ( work_restart[thrid].restart ) return 0;
for (j = 0; j < 3; j++)
for (i = 0; i < 32; i++) PBKDF2_SHA256_80_128_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
for (k = 0; k < 8; k++) PBKDF2_SHA256_80_128_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
PBKDF2_SHA256_80_128_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
if ( work_restart[thrid].restart ) return 0;
for ( j = 0; j < 3; j++ )
for ( i = 0; i < 32; i++ )
for ( k = 0; k < 8; k++ )
X[8 * 32 * j + k * 32 + i] = W[8 * 32 * j + 8 * i + k]; X[8 * 32 * j + k * 32 + i] = W[8 * 32 * j + 8 * i + k];
scrypt_core_6way(X + 0 * 32, V, N);
scrypt_core_6way(X + 6 * 32, V, N); scrypt_core_6way( X + 0 * 32, V, N );
scrypt_core_6way(X + 12 * 32, V, N); scrypt_core_6way( X + 6 * 32, V, N );
scrypt_core_6way(X + 18 * 32, V, N);
for (j = 0; j < 3; j++) if ( work_restart[thrid].restart ) return 0;
for (i = 0; i < 32; i++)
for (k = 0; k < 8; k++) scrypt_core_6way( X + 12 * 32, V, N );
scrypt_core_6way( X + 18 * 32, V, N );
if ( work_restart[thrid].restart ) return 0;
for ( j = 0; j < 3; j++ )
for ( i = 0; i < 32; i++ )
for ( k = 0; k < 8; k++ )
W[8 * 32 * j + 8 * i + k] = X[8 * 32 * j + k * 32 + i]; W[8 * 32 * j + 8 * i + k] = X[8 * 32 * j + k * 32 + i];
PBKDF2_SHA256_128_32_8way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_128_32_8way(tstate + 64, ostate + 64, W + 256, W + 256); PBKDF2_SHA256_128_32_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
PBKDF2_SHA256_128_32_8way(tstate + 128, ostate + 128, W + 512, W + 512); PBKDF2_SHA256_128_32_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
for (j = 0; j < 3; j++) PBKDF2_SHA256_128_32_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
for (i = 0; i < 8; i++)
for (k = 0; k < 8; k++) for ( j = 0; j < 3; j++ )
for ( i = 0; i < 8; i++ )
for ( k = 0; k < 8; k++ )
output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k]; output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k];
return 1;
} }
#endif /* HAVE_SCRYPT_6WAY */ #endif /* HAVE_SCRYPT_6WAY */
@@ -703,7 +660,6 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
uint32_t data[SCRYPT_MAX_WAYS * 20], hash[SCRYPT_MAX_WAYS * 8]; uint32_t data[SCRYPT_MAX_WAYS * 20], hash[SCRYPT_MAX_WAYS * 8];
uint32_t midstate[8]; uint32_t midstate[8];
uint32_t n = pdata[19] - 1; uint32_t n = pdata[19] - 1;
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated int thr_id = mythr->id; // thr_id arg is deprecated
int throughput = scrypt_best_throughput(); int throughput = scrypt_best_throughput();
int i; int i;
@@ -714,6 +670,8 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
throughput *= 4; throughput *= 4;
#endif #endif
// applog(LOG_INFO,"Scrypt thoughput %d",throughput);
for (i = 0; i < throughput; i++) for (i = 0; i < throughput; i++)
memcpy(data + i * 20, pdata, 80); memcpy(data + i * 20, pdata, 80);
@@ -721,46 +679,50 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
sha256_transform(midstate, data, 0); sha256_transform(midstate, data, 0);
do { do {
bool rc = true;
for (i = 0; i < throughput; i++) for (i = 0; i < throughput; i++)
data[i * 20 + 19] = ++n; data[i * 20 + 19] = ++n;
#if defined(HAVE_SHA256_4WAY) #if defined(HAVE_SHA256_4WAY)
if (throughput == 4) if (throughput == 4)
scrypt_1024_1_1_256_4way(data, hash, midstate, rc = scrypt_1024_1_1_256_4way(data, hash, midstate,
scratchbuf, scratchbuf_size ); scratchbuf, scratchbuf_size, thr_id );
else else
#endif #endif
#if defined(HAVE_SCRYPT_3WAY) && defined(HAVE_SHA256_4WAY) #if defined(HAVE_SCRYPT_3WAY) && defined(HAVE_SHA256_4WAY)
if (throughput == 12) if (throughput == 12)
scrypt_1024_1_1_256_12way(data, hash, midstate, rc = scrypt_1024_1_1_256_12way(data, hash, midstate,
scratchbuf, scratchbuf_size ); scratchbuf, scratchbuf_size, thr_id );
else else
#endif #endif
#if defined(HAVE_SCRYPT_6WAY) #if defined(HAVE_SCRYPT_6WAY)
if (throughput == 24) if (throughput == 24)
scrypt_1024_1_1_256_24way(data, hash, midstate, rc = scrypt_1024_1_1_256_24way(data, hash, midstate,
scratchbuf, scratchbuf_size ); scratchbuf, scratchbuf_size, thr_id );
else else
#endif #endif
#if defined(HAVE_SCRYPT_3WAY) #if defined(HAVE_SCRYPT_3WAY)
if (throughput == 3) if (throughput == 3)
scrypt_1024_1_1_256_3way(data, hash, midstate, rc = scrypt_1024_1_1_256_3way(data, hash, midstate,
scratchbuf, scratchbuf_size ); scratchbuf, scratchbuf_size, thr_id );
else else
#endif #endif
scrypt_1024_1_1_256(data, hash, midstate, scratchbuf, rc = scrypt_1024_1_1_256(data, hash, midstate, scratchbuf,
scratchbuf_size ); scratchbuf_size, thr_id );
for (i = 0; i < throughput; i++) { if ( rc )
if (unlikely(hash[i * 8 + 7] <= Htarg && fulltest(hash + i * 8, ptarget))) { for ( i = 0; i < throughput; i++ )
{
if ( unlikely( valid_hash( hash + i * 8, ptarget ) ) )
{
pdata[19] = data[i * 20 + 19]; pdata[19] = data[i * 20 + 19];
submit_solution( work, hash, mythr ); submit_solution( work, hash + i * 8, mythr );
} }
}
} while ( likely( n < max_nonce && !(*restart) ) );
*hashes_done = n - pdata[19] + 1; }
} while ( likely( ( n < ( max_nonce - throughput ) ) && !(*restart) ) );
*hashes_done = n - pdata[19];
pdata[19] = n; pdata[19] = n;
return 0; return 0;
} }
@@ -779,7 +741,6 @@ bool register_scrypt_algo( algo_gate_t* gate )
gate->optimizations = SSE2_OPT | AVX2_OPT; gate->optimizations = SSE2_OPT | AVX2_OPT;
gate->miner_thread_init =(void*)&scrypt_miner_thread_init; gate->miner_thread_init =(void*)&scrypt_miner_thread_init;
gate->scanhash = (void*)&scanhash_scrypt; gate->scanhash = (void*)&scanhash_scrypt;
// gate->hash = (void*)&scrypt_1024_1_1_256_24way;
opt_target_factor = 65536.0; opt_target_factor = 65536.0;
if ( !opt_param_n ) if ( !opt_param_n )

View File

@@ -77,7 +77,7 @@ typedef union _hex_context_overlay hex_context_overlay;
static __thread x16r_context_overlay hex_ctx; static __thread x16r_context_overlay hex_ctx;
void hex_hash( void* output, const void* input ) int hex_hash( void* output, const void* input, int thrid )
{ {
uint32_t _ALIGN(128) hash[16]; uint32_t _ALIGN(128) hash[16];
x16r_context_overlay ctx; x16r_context_overlay ctx;
@@ -214,11 +214,15 @@ void hex_hash( void* output, const void* input )
SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break; break;
} }
if ( work_restart[thrid].restart ) return 0;
algo = (uint8_t)hash[0] % X16R_HASH_FUNC_COUNT; algo = (uint8_t)hash[0] % X16R_HASH_FUNC_COUNT;
in = (void*) hash; in = (void*) hash;
size = 64; size = 64;
} }
memcpy(output, hash, 32); memcpy(output, hash, 32);
return 1;
} }
int scanhash_hex( struct work *work, uint32_t max_nonce, int scanhash_hex( struct work *work, uint32_t max_nonce,
@@ -286,8 +290,7 @@ int scanhash_hex( struct work *work, uint32_t max_nonce,
do do
{ {
edata[19] = nonce; edata[19] = nonce;
hex_hash( hash32, edata ); if ( hex_hash( hash32, edata, thr_id ) );
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{ {
be32enc( &pdata[19], nonce ); be32enc( &pdata[19], nonce );

View File

@@ -80,7 +80,7 @@ void x16r_8way_prehash( void *vdata, void *pdata )
// Called by wrapper hash function to optionally continue hashing and // Called by wrapper hash function to optionally continue hashing and
// convert to final hash. // convert to final hash.
void x16r_8way_hash_generic( void* output, const void* input ) int x16r_8way_hash_generic( void* output, const void* input, int thrid )
{ {
uint32_t vhash[20*8] __attribute__ ((aligned (128))); uint32_t vhash[20*8] __attribute__ ((aligned (128)));
uint32_t hash0[20] __attribute__ ((aligned (64))); uint32_t hash0[20] __attribute__ ((aligned (64)));
@@ -424,6 +424,9 @@ void x16r_8way_hash_generic( void* output, const void* input )
hash7, vhash ); hash7, vhash );
break; break;
} }
if ( work_restart[thrid].restart ) return 0;
size = 64; size = 64;
} }
@@ -435,14 +438,17 @@ void x16r_8way_hash_generic( void* output, const void* input )
memcpy( output+320, hash5, 64 ); memcpy( output+320, hash5, 64 );
memcpy( output+384, hash6, 64 ); memcpy( output+384, hash6, 64 );
memcpy( output+448, hash7, 64 ); memcpy( output+448, hash7, 64 );
return 1;
} }
// x16-r,-s,-rt wrapper called directly by scanhash to repackage 512 bit // x16-r,-s,-rt wrapper called directly by scanhash to repackage 512 bit
// hash to 256 bit final hash. // hash to 256 bit final hash.
void x16r_8way_hash( void* output, const void* input ) int x16r_8way_hash( void* output, const void* input, int thrid )
{ {
uint8_t hash[64*8] __attribute__ ((aligned (128))); uint8_t hash[64*8] __attribute__ ((aligned (128)));
x16r_8way_hash_generic( hash, input ); if ( !x16r_8way_hash_generic( hash, input, thrid ) )
return 0;
memcpy( output, hash, 32 ); memcpy( output, hash, 32 );
memcpy( output+32, hash+64, 32 ); memcpy( output+32, hash+64, 32 );
@@ -452,7 +458,9 @@ void x16r_8way_hash( void* output, const void* input )
memcpy( output+160, hash+320, 32 ); memcpy( output+160, hash+320, 32 );
memcpy( output+192, hash+384, 32 ); memcpy( output+192, hash+384, 32 );
memcpy( output+224, hash+448, 32 ); memcpy( output+224, hash+448, 32 );
}
return 1;
}
// x16r only // x16r only
int scanhash_x16r_8way( struct work *work, uint32_t max_nonce, int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
@@ -492,8 +500,7 @@ int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x16r_8way_hash( hash, vdata ); if( x16r_8way_hash( hash, vdata, thr_id ) );
for ( int i = 0; i < 8; i++ ) for ( int i = 0; i < 8; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{ {
@@ -565,7 +572,7 @@ void x16r_4way_prehash( void *vdata, void *pdata )
} }
} }
void x16r_4way_hash_generic( void* output, const void* input ) int x16r_4way_hash_generic( void* output, const void* input, int thrid )
{ {
uint32_t vhash[20*4] __attribute__ ((aligned (128))); uint32_t vhash[20*4] __attribute__ ((aligned (128)));
uint32_t hash0[20] __attribute__ ((aligned (64))); uint32_t hash0[20] __attribute__ ((aligned (64)));
@@ -794,23 +801,31 @@ void x16r_4way_hash_generic( void* output, const void* input )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash ); dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break; break;
} }
if ( work_restart[thrid].restart ) return 0;
size = 64; size = 64;
} }
memcpy( output, hash0, 64 ); memcpy( output, hash0, 64 );
memcpy( output+64, hash1, 64 ); memcpy( output+64, hash1, 64 );
memcpy( output+128, hash2, 64 ); memcpy( output+128, hash2, 64 );
memcpy( output+192, hash3, 64 ); memcpy( output+192, hash3, 64 );
return 1;
} }
void x16r_4way_hash( void* output, const void* input ) int x16r_4way_hash( void* output, const void* input, int thrid )
{ {
uint8_t hash[64*4] __attribute__ ((aligned (64))); uint8_t hash[64*4] __attribute__ ((aligned (64)));
x16r_4way_hash_generic( hash, input ); if ( !x16r_4way_hash_generic( hash, input, thrid ) )
return 0;
memcpy( output, hash, 32 ); memcpy( output, hash, 32 );
memcpy( output+32, hash+64, 32 ); memcpy( output+32, hash+64, 32 );
memcpy( output+64, hash+128, 32 ); memcpy( output+64, hash+128, 32 );
memcpy( output+96, hash+192, 32 ); memcpy( output+96, hash+192, 32 );
return 1;
} }
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce, int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
@@ -849,7 +864,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x16r_4way_hash( hash, vdata ); if ( x16r_4way_hash( hash, vdata, thr_id ) );
for ( int i = 0; i < 4; i++ ) for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{ {

View File

@@ -131,8 +131,8 @@ typedef union _x16r_8way_context_overlay x16r_8way_context_overlay;
extern __thread x16r_8way_context_overlay x16r_ctx; extern __thread x16r_8way_context_overlay x16r_ctx;
void x16r_8way_prehash( void *, void * ); void x16r_8way_prehash( void *, void * );
void x16r_8way_hash_generic( void *, const void * ); int x16r_8way_hash_generic( void *, const void *, int );
void x16r_8way_hash( void *, const void * ); int x16r_8way_hash( void *, const void *, int );
int scanhash_x16r_8way( struct work *, uint32_t , int scanhash_x16r_8way( struct work *, uint32_t ,
uint64_t *, struct thr_info * ); uint64_t *, struct thr_info * );
extern __thread x16r_8way_context_overlay x16r_ctx; extern __thread x16r_8way_context_overlay x16r_ctx;
@@ -166,8 +166,8 @@ typedef union _x16r_4way_context_overlay x16r_4way_context_overlay;
extern __thread x16r_4way_context_overlay x16r_ctx; extern __thread x16r_4way_context_overlay x16r_ctx;
void x16r_4way_prehash( void *, void * ); void x16r_4way_prehash( void *, void * );
void x16r_4way_hash_generic( void *, const void * ); int x16r_4way_hash_generic( void *, const void *, int );
void x16r_4way_hash( void *, const void * ); int x16r_4way_hash( void *, const void *, int );
int scanhash_x16r_4way( struct work *, uint32_t, int scanhash_x16r_4way( struct work *, uint32_t,
uint64_t *, struct thr_info * ); uint64_t *, struct thr_info * );
extern __thread x16r_4way_context_overlay x16r_ctx; extern __thread x16r_4way_context_overlay x16r_ctx;
@@ -205,26 +205,26 @@ typedef union _x16r_context_overlay x16r_context_overlay;
extern __thread x16r_context_overlay x16_ctx; extern __thread x16r_context_overlay x16_ctx;
void x16r_prehash( void *, void * ); void x16r_prehash( void *, void * );
void x16r_hash_generic( void *, const void * ); int x16r_hash_generic( void *, const void *, int );
void x16r_hash( void *, const void * ); int x16r_hash( void *, const void *, int );
int scanhash_x16r( struct work *, uint32_t, uint64_t *, struct thr_info * ); int scanhash_x16r( struct work *, uint32_t, uint64_t *, struct thr_info * );
// x16Rv2 // x16Rv2
#if defined(X16RV2_8WAY) #if defined(X16RV2_8WAY)
void x16rv2_8way_hash( void *state, const void *input ); int x16rv2_8way_hash( void *state, const void *input, int thrid );
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce, int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(X16RV2_4WAY) #elif defined(X16RV2_4WAY)
void x16rv2_4way_hash( void *state, const void *input ); int x16rv2_4way_hash( void *state, const void *input, int thrid );
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce, int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
#else #else
void x16rv2_hash( void *state, const void *input ); int x16rv2_hash( void *state, const void *input, int thr_id );
int scanhash_x16rv2( struct work *work, uint32_t max_nonce, int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
@@ -254,21 +254,21 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
// x21s // x21s
#if defined(X16R_8WAY) #if defined(X16R_8WAY)
void x21s_8way_hash( void *state, const void *input ); int x21s_8way_hash( void *state, const void *input, int thrid );
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce, int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
bool x21s_8way_thread_init(); bool x21s_8way_thread_init();
#elif defined(X16R_4WAY) #elif defined(X16R_4WAY)
void x21s_4way_hash( void *state, const void *input ); int x21s_4way_hash( void *state, const void *input, int thrid );
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce, int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
bool x21s_4way_thread_init(); bool x21s_4way_thread_init();
#else #else
void x21s_hash( void *state, const void *input ); int x21s_hash( void *state, const void *input, int thr_id );
int scanhash_x21s( struct work *work, uint32_t max_nonce, int scanhash_x21s( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
bool x21s_thread_init(); bool x21s_thread_init();

View File

@@ -48,7 +48,7 @@ void x16r_prehash( void *edata, void *pdata )
} }
} }
void x16r_hash_generic( void* output, const void* input ) int x16r_hash_generic( void* output, const void* input, int thrid )
{ {
uint32_t _ALIGN(128) hash[16]; uint32_t _ALIGN(128) hash[16];
x16r_context_overlay ctx; x16r_context_overlay ctx;
@@ -178,18 +178,24 @@ void x16r_hash_generic( void* output, const void* input )
SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break; break;
} }
if ( work_restart[thrid].restart ) return 0;
in = (void*) hash; in = (void*) hash;
size = 64; size = 64;
} }
memcpy( output, hash, 64 ); memcpy( output, hash, 64 );
return true;
} }
void x16r_hash( void* output, const void* input ) int x16r_hash( void* output, const void* input, int thrid )
{ {
uint8_t hash[64] __attribute__ ((aligned (64))); uint8_t hash[64] __attribute__ ((aligned (64)));
x16r_hash_generic( hash, input ); if ( !x16r_hash_generic( hash, input, thrid ) )
return 0;
memcpy( output, hash, 32 ); memcpy( output, hash, 32 );
return 1;
} }
int scanhash_x16r( struct work *work, uint32_t max_nonce, int scanhash_x16r( struct work *work, uint32_t max_nonce,
@@ -223,8 +229,7 @@ int scanhash_x16r( struct work *work, uint32_t max_nonce,
do do
{ {
edata[19] = nonce; edata[19] = nonce;
x16r_hash( hash32, edata ); if ( x16r_hash( hash32, edata, thr_id ) )
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{ {
pdata[19] = bswap_32( nonce ); pdata[19] = bswap_32( nonce );

View File

@@ -41,8 +41,7 @@ int scanhash_x16rt_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x16r_8way_hash( hash, vdata ); if ( x16r_8way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 8; i++ ) for ( int i = 0; i < 8; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{ {
@@ -95,7 +94,7 @@ int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x16r_4way_hash( hash, vdata ); if ( x16r_4way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 4; i++ ) for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{ {

View File

@@ -36,8 +36,7 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
do do
{ {
edata[19] = nonce; edata[19] = nonce;
x16r_hash( hash32, edata ); if ( x16r_hash( hash32, edata, thr_id ) )
if ( valid_hash( hash32, ptarget ) && !bench ) if ( valid_hash( hash32, ptarget ) && !bench )
{ {
pdata[19] = bswap_32( nonce ); pdata[19] = bswap_32( nonce );

View File

@@ -65,7 +65,7 @@ union _x16rv2_8way_context_overlay
typedef union _x16rv2_8way_context_overlay x16rv2_8way_context_overlay; typedef union _x16rv2_8way_context_overlay x16rv2_8way_context_overlay;
static __thread x16rv2_8way_context_overlay x16rv2_ctx; static __thread x16rv2_8way_context_overlay x16rv2_ctx;
void x16rv2_8way_hash( void* output, const void* input ) int x16rv2_8way_hash( void* output, const void* input, int thrid )
{ {
uint32_t vhash[24*8] __attribute__ ((aligned (128))); uint32_t vhash[24*8] __attribute__ ((aligned (128)));
uint32_t hash0[24] __attribute__ ((aligned (64))); uint32_t hash0[24] __attribute__ ((aligned (64)));
@@ -563,6 +563,9 @@ void x16rv2_8way_hash( void* output, const void* input )
hash7, vhash ); hash7, vhash );
break; break;
} }
if ( work_restart[thrid].restart ) return 0;
size = 64; size = 64;
} }
@@ -574,6 +577,7 @@ void x16rv2_8way_hash( void* output, const void* input )
memcpy( output+160, hash5, 32 ); memcpy( output+160, hash5, 32 );
memcpy( output+192, hash6, 32 ); memcpy( output+192, hash6, 32 );
memcpy( output+224, hash7, 32 ); memcpy( output+224, hash7, 32 );
return 1;
} }
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce, int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
@@ -669,8 +673,7 @@ int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x16rv2_8way_hash( hash, vdata ); if ( x16rv2_8way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 8; i++ ) for ( int i = 0; i < 8; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{ {
@@ -718,7 +721,7 @@ inline void padtiger512( uint32_t* hash )
for ( int i = 6; i < 16; i++ ) hash[i] = 0; for ( int i = 6; i < 16; i++ ) hash[i] = 0;
} }
void x16rv2_4way_hash( void* output, const void* input ) int x16rv2_4way_hash( void* output, const void* input, int thrid )
{ {
uint32_t hash0[20] __attribute__ ((aligned (64))); uint32_t hash0[20] __attribute__ ((aligned (64)));
uint32_t hash1[20] __attribute__ ((aligned (64))); uint32_t hash1[20] __attribute__ ((aligned (64)));
@@ -1023,12 +1026,16 @@ void x16rv2_4way_hash( void* output, const void* input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break; break;
} }
if ( work_restart[thrid].restart ) return 0;
size = 64; size = 64;
} }
memcpy( output, hash0, 32 ); memcpy( output, hash0, 32 );
memcpy( output+32, hash1, 32 ); memcpy( output+32, hash1, 32 );
memcpy( output+64, hash2, 32 ); memcpy( output+64, hash2, 32 );
memcpy( output+96, hash3, 32 ); memcpy( output+96, hash3, 32 );
return 1;
} }
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce, int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
@@ -1119,7 +1126,7 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
do do
{ {
x16rv2_4way_hash( hash, vdata ); if ( x16rv2_4way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 4; i++ ) for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{ {

View File

@@ -67,7 +67,7 @@ inline void padtiger512(uint32_t* hash) {
for (int i = (24/4); i < (64/4); i++) hash[i] = 0; for (int i = (24/4); i < (64/4); i++) hash[i] = 0;
} }
void x16rv2_hash( void* output, const void* input ) int x16rv2_hash( void* output, const void* input, int thrid )
{ {
uint32_t _ALIGN(128) hash[16]; uint32_t _ALIGN(128) hash[16];
x16rv2_context_overlay ctx; x16rv2_context_overlay ctx;
@@ -180,10 +180,14 @@ void x16rv2_hash( void* output, const void* input )
SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break; break;
} }
if ( work_restart[thrid].restart ) return 0;
in = (void*) hash; in = (void*) hash;
size = 64; size = 64;
} }
memcpy(output, hash, 32); memcpy(output, hash, 32);
return 1;
} }
int scanhash_x16rv2( struct work *work, uint32_t max_nonce, int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
@@ -221,8 +225,7 @@ int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
do do
{ {
edata[19] = nonce; edata[19] = nonce;
x16rv2_hash( hash32, edata ); if ( x16rv2_hash( hash32, edata, thr_id ) )
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{ {
pdata[19] = bswap_32( nonce ); pdata[19] = bswap_32( nonce );

View File

@@ -30,7 +30,7 @@ union _x21s_8way_context_overlay
typedef union _x21s_8way_context_overlay x21s_8way_context_overlay; typedef union _x21s_8way_context_overlay x21s_8way_context_overlay;
void x21s_8way_hash( void* output, const void* input ) int x21s_8way_hash( void* output, const void* input, int thrid )
{ {
uint32_t vhash[16*8] __attribute__ ((aligned (128))); uint32_t vhash[16*8] __attribute__ ((aligned (128)));
uint8_t shash[64*8] __attribute__ ((aligned (64))); uint8_t shash[64*8] __attribute__ ((aligned (64)));
@@ -44,7 +44,8 @@ void x21s_8way_hash( void* output, const void* input )
uint32_t *hash7 = (uint32_t*)( shash+448 ); uint32_t *hash7 = (uint32_t*)( shash+448 );
x21s_8way_context_overlay ctx; x21s_8way_context_overlay ctx;
x16r_8way_hash_generic( shash, input ); if ( !x16r_8way_hash_generic( shash, input, thrid ) )
return 0;
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6, intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7 ); hash7 );
@@ -124,6 +125,8 @@ void x21s_8way_hash( void* output, const void* input )
sha256_8way_init( &ctx.sha256 ); sha256_8way_init( &ctx.sha256 );
sha256_8way_update( &ctx.sha256, vhash, 64 ); sha256_8way_update( &ctx.sha256, vhash, 64 );
sha256_8way_close( &ctx.sha256, output ); sha256_8way_close( &ctx.sha256, output );
return 1;
} }
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce, int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
@@ -166,8 +169,7 @@ int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x21s_8way_hash( hash, vdata ); if ( x21s_8way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 8; lane++ ) for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) ) if ( unlikely( hash7[lane] <= Htarg ) )
{ {
@@ -215,7 +217,7 @@ union _x21s_4way_context_overlay
typedef union _x21s_4way_context_overlay x21s_4way_context_overlay; typedef union _x21s_4way_context_overlay x21s_4way_context_overlay;
void x21s_4way_hash( void* output, const void* input ) int x21s_4way_hash( void* output, const void* input, int thrid )
{ {
uint32_t vhash[16*4] __attribute__ ((aligned (64))); uint32_t vhash[16*4] __attribute__ ((aligned (64)));
uint8_t shash[64*4] __attribute__ ((aligned (64))); uint8_t shash[64*4] __attribute__ ((aligned (64)));
@@ -225,7 +227,8 @@ void x21s_4way_hash( void* output, const void* input )
uint32_t *hash2 = (uint32_t*)( shash+128 ); uint32_t *hash2 = (uint32_t*)( shash+128 );
uint32_t *hash3 = (uint32_t*)( shash+192 ); uint32_t *hash3 = (uint32_t*)( shash+192 );
x16r_4way_hash_generic( shash, input ); if ( !x16r_4way_hash_generic( shash, input, thrid ) )
return 0;
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
@@ -299,6 +302,8 @@ void x21s_4way_hash( void* output, const void* input )
dintrlv_4x32( output, output+32, output+64,output+96, vhash, 256 ); dintrlv_4x32( output, output+32, output+64,output+96, vhash, 256 );
#endif #endif
return 1;
} }
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce, int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
@@ -337,7 +342,7 @@ int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x21s_4way_hash( hash, vdata ); if ( x21s_4way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 4; i++ ) for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{ {

View File

@@ -27,12 +27,13 @@ union _x21s_context_overlay
}; };
typedef union _x21s_context_overlay x21s_context_overlay; typedef union _x21s_context_overlay x21s_context_overlay;
void x21s_hash( void* output, const void* input ) int x21s_hash( void* output, const void* input, int thrid )
{ {
uint32_t _ALIGN(128) hash[16]; uint32_t _ALIGN(128) hash[16];
x21s_context_overlay ctx; x21s_context_overlay ctx;
x16r_hash_generic( hash, input ); if ( !x16r_hash_generic( hash, input, thrid ) )
return 0;
sph_haval256_5_init( &ctx.haval ); sph_haval256_5_init( &ctx.haval );
sph_haval256_5( &ctx.haval, (const void*) hash, 64) ; sph_haval256_5( &ctx.haval, (const void*) hash, 64) ;
@@ -54,6 +55,8 @@ void x21s_hash( void* output, const void* input )
SHA256_Final( (unsigned char*)hash, &ctx.sha256 ); SHA256_Final( (unsigned char*)hash, &ctx.sha256 );
memcpy( output, hash, 32 ); memcpy( output, hash, 32 );
return 1;
} }
int scanhash_x21s( struct work *work, uint32_t max_nonce, int scanhash_x21s( struct work *work, uint32_t max_nonce,
@@ -87,8 +90,7 @@ int scanhash_x21s( struct work *work, uint32_t max_nonce,
do do
{ {
edata[19] = nonce; edata[19] = nonce;
x21s_hash( hash32, edata ); if ( x21s_hash( hash32, edata, thr_id ) )
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{ {
pdata[19] = bswap_32( nonce ); pdata[19] = bswap_32( nonce );

View File

@@ -62,7 +62,7 @@ union _x22i_8way_ctx_overlay
}; };
typedef union _x22i_8way_ctx_overlay x22i_8way_ctx_overlay; typedef union _x22i_8way_ctx_overlay x22i_8way_ctx_overlay;
void x22i_8way_hash( void *output, const void *input ) int x22i_8way_hash( void *output, const void *input, int thrid )
{ {
uint64_t vhash[8*8] __attribute__ ((aligned (128))); uint64_t vhash[8*8] __attribute__ ((aligned (128)));
uint64_t vhashA[8*8] __attribute__ ((aligned (64))); uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
@@ -129,6 +129,8 @@ void x22i_8way_hash( void *output, const void *input )
keccak512_8way_update( &ctx.keccak, vhash, 64 ); keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhash ); keccak512_8way_close( &ctx.keccak, vhash );
if ( work_restart[thrid].restart ) return 0;
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 ); rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
luffa512_4way_full( &ctx.luffa, vhashA, vhashA, 64 ); luffa512_4way_full( &ctx.luffa, vhashA, vhashA, 64 );
@@ -214,6 +216,8 @@ void x22i_8way_hash( void *output, const void *input )
#endif #endif
if ( work_restart[thrid].restart ) return 0;
hamsi512_8way_init( &ctx.hamsi ); hamsi512_8way_init( &ctx.hamsi );
hamsi512_8way_update( &ctx.hamsi, vhash, 64 ); hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
hamsi512_8way_close( &ctx.hamsi, vhash ); hamsi512_8way_close( &ctx.hamsi, vhash );
@@ -346,6 +350,8 @@ void x22i_8way_hash( void *output, const void *input )
sph_tiger (&ctx.tiger, (const void*) hash7, 64); sph_tiger (&ctx.tiger, (const void*) hash7, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA7); sph_tiger_close(&ctx.tiger, (void*) hashA7);
if ( work_restart[thrid].restart ) return 0;
memset( hash0, 0, 64 ); memset( hash0, 0, 64 );
memset( hash1, 0, 64 ); memset( hash1, 0, 64 );
memset( hash2, 0, 64 ); memset( hash2, 0, 64 );
@@ -399,6 +405,8 @@ void x22i_8way_hash( void *output, const void *input )
sha256_8way_init( &ctx.sha256 ); sha256_8way_init( &ctx.sha256 );
sha256_8way_update( &ctx.sha256, vhash, 64 ); sha256_8way_update( &ctx.sha256, vhash, 64 );
sha256_8way_close( &ctx.sha256, output ); sha256_8way_close( &ctx.sha256, output );
return 1;
} }
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce, int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
@@ -428,8 +436,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x22i_8way_hash( hash, vdata ); if ( x22i_8way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 8; lane++ ) for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) ) if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
{ {
@@ -437,7 +444,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
if ( likely( valid_hash( lane_hash, ptarget ) ) ) if ( likely( valid_hash( lane_hash, ptarget ) ) )
{ {
pdata[19] = bswap_32( n + lane ); pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane ); submit_solution( work, lane_hash, mythr );
} }
} }
*noncev = _mm512_add_epi32( *noncev, *noncev = _mm512_add_epi32( *noncev,
@@ -524,7 +531,7 @@ union _x22i_4way_ctx_overlay
}; };
typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay; typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay;
void x22i_4way_hash( void *output, const void *input ) int x22i_4way_hash( void *output, const void *input, int thrid )
{ {
uint64_t hash0[8*4] __attribute__ ((aligned (64))); uint64_t hash0[8*4] __attribute__ ((aligned (64)));
uint64_t hash1[8*4] __attribute__ ((aligned (64))); uint64_t hash1[8*4] __attribute__ ((aligned (64)));
@@ -563,6 +570,8 @@ void x22i_4way_hash( void *output, const void *input )
keccak512_4way_update( &ctx.keccak, vhash, 64 ); keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash ); keccak512_4way_close( &ctx.keccak, vhash );
if ( work_restart[thrid].restart ) return false;
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa512_2way_full( &ctx.luffa, vhashA, vhashA, 64 ); luffa512_2way_full( &ctx.luffa, vhashA, vhashA, 64 );
@@ -591,6 +600,8 @@ void x22i_4way_hash( void *output, const void *input )
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 ); intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
if ( work_restart[thrid].restart ) return false;
hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way_update( &ctx.hamsi, vhash, 64 ); hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash ); hamsi512_4way_close( &ctx.hamsi, vhash );
@@ -636,6 +647,8 @@ void x22i_4way_hash( void *output, const void *input )
sha512_4way_close( &ctx.sha512, vhash ); sha512_4way_close( &ctx.sha512, vhash );
dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash ); dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash );
if ( work_restart[thrid].restart ) return false;
ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0); ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0);
ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1); ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1);
ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2); ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2);
@@ -668,6 +681,8 @@ void x22i_4way_hash( void *output, const void *input )
sph_tiger (&ctx.tiger, (const void*) hash3, 64); sph_tiger (&ctx.tiger, (const void*) hash3, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA3); sph_tiger_close(&ctx.tiger, (void*) hashA3);
if ( work_restart[thrid].restart ) return false;
memset( hash0, 0, 64 ); memset( hash0, 0, 64 );
memset( hash1, 0, 64 ); memset( hash1, 0, 64 );
memset( hash2, 0, 64 ); memset( hash2, 0, 64 );
@@ -700,8 +715,9 @@ void x22i_4way_hash( void *output, const void *input )
sha256_4way_init( &ctx.sha256 ); sha256_4way_init( &ctx.sha256 );
sha256_4way_update( &ctx.sha256, vhash, 64 ); sha256_4way_update( &ctx.sha256, vhash, 64 );
sha256_4way_close( &ctx.sha256, output ); sha256_4way_close( &ctx.sha256, output );
}
return 1;
}
int scanhash_x22i_4way( struct work* work, uint32_t max_nonce, int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ) uint64_t *hashes_done, struct thr_info *mythr )
@@ -729,8 +745,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x22i_4way_hash( hash, vdata ); if ( x22i_4way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 4; lane++ ) for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) ) if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
{ {
@@ -738,7 +753,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
if ( valid_hash( lane_hash, ptarget ) ) if ( valid_hash( lane_hash, ptarget ) )
{ {
pdata[19] = bswap_32( n + lane ); pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane ); submit_solution( work, lane_hash, mythr );
} }
} }
*noncev = _mm256_add_epi32( *noncev, *noncev = _mm256_add_epi32( *noncev,

View File

@@ -16,19 +16,19 @@ bool register_x22i_algo( algo_gate_t* gate );
#if defined(X22I_8WAY) #if defined(X22I_8WAY)
void x22i_8way_hash( void *state, const void *input ); int x22i_8way_hash( void *state, const void *input, int thrid );
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce, int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(X22I_4WAY) #elif defined(X22I_4WAY)
void x22i_4way_hash( void *state, const void *input ); int x22i_4way_hash( void *state, const void *input, int thrid );
int scanhash_x22i_4way( struct work *work, uint32_t max_nonce, int scanhash_x22i_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
#else #else
void x22i_hash( void *state, const void *input ); int x22i_hash( void *state, const void *input, int thrid );
int scanhash_x22i( struct work *work, uint32_t max_nonce, int scanhash_x22i( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
@@ -44,19 +44,19 @@ bool register_x25i_algo( algo_gate_t* gate );
#if defined(X25X_8WAY) #if defined(X25X_8WAY)
void x25x_8way_hash( void *state, const void *input ); int x25x_8way_hash( void *state, const void *input, int thrid );
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce, int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(X25X_4WAY) #elif defined(X25X_4WAY)
void x25x_4way_hash( void *state, const void *input ); int x25x_4way_hash( void *state, const void *input, int thrid );
int scanhash_x25x_4way( struct work *work, uint32_t max_nonce, int scanhash_x25x_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );
#else #else
void x25x_hash( void *state, const void *input ); int x25x_hash( void *state, const void *input, int thrif );
int scanhash_x25x( struct work *work, uint32_t max_nonce, int scanhash_x25x( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr ); uint64_t *hashes_done, struct thr_info *mythr );

View File

@@ -59,7 +59,7 @@ union _x22i_context_overlay
}; };
typedef union _x22i_context_overlay x22i_context_overlay; typedef union _x22i_context_overlay x22i_context_overlay;
void x22i_hash( void *output, const void *input ) int x22i_hash( void *output, const void *input, int thrid )
{ {
unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0}; unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
unsigned char hash2[65] __attribute__((aligned(64))) = {0}; unsigned char hash2[65] __attribute__((aligned(64))) = {0};
@@ -95,6 +95,8 @@ void x22i_hash( void *output, const void *input )
sph_keccak512(&ctx.keccak, (const void*) hash, 64); sph_keccak512(&ctx.keccak, (const void*) hash, 64);
sph_keccak512_close(&ctx.keccak, hash); sph_keccak512_close(&ctx.keccak, hash);
if ( work_restart[thrid].restart ) return 0;
init_luffa( &ctx.luffa, 512 ); init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 ); (const BitSequence*)hash, 64 );
@@ -121,6 +123,8 @@ void x22i_hash( void *output, const void *input )
sph_echo512_close( &ctx.echo, hash ); sph_echo512_close( &ctx.echo, hash );
#endif #endif
if ( work_restart[thrid].restart ) return 0;
sph_hamsi512_init(&ctx.hamsi); sph_hamsi512_init(&ctx.hamsi);
sph_hamsi512(&ctx.hamsi, (const void*) hash, 64); sph_hamsi512(&ctx.hamsi, (const void*) hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash); sph_hamsi512_close(&ctx.hamsi, hash);
@@ -143,6 +147,8 @@ void x22i_hash( void *output, const void *input )
ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2); ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2);
if ( work_restart[thrid].restart ) return 0;
memset(hash, 0, 64); memset(hash, 0, 64);
sph_haval256_5_init(&ctx.haval); sph_haval256_5_init(&ctx.haval);
sph_haval256_5(&ctx.haval,(const void*) hash2, 64); sph_haval256_5(&ctx.haval,(const void*) hash2, 64);
@@ -165,6 +171,8 @@ void x22i_hash( void *output, const void *input )
SHA256_Final( (unsigned char*) hash, &ctx.sha256 ); SHA256_Final( (unsigned char*) hash, &ctx.sha256 );
memcpy(output, hash, 32); memcpy(output, hash, 32);
return 1;
} }
int scanhash_x22i( struct work *work, uint32_t max_nonce, int scanhash_x22i( struct work *work, uint32_t max_nonce,
@@ -188,7 +196,7 @@ int scanhash_x22i( struct work *work, uint32_t max_nonce,
do do
{ {
edata[19] = n; edata[19] = n;
x22i_hash( hash64, edata ); if ( x22i_hash( hash64, edata, thr_id ) );
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
{ {
pdata[19] = bswap_32( n ); pdata[19] = bswap_32( n );

View File

@@ -94,7 +94,7 @@ union _x25x_8way_ctx_overlay
}; };
typedef union _x25x_8way_ctx_overlay x25x_8way_ctx_overlay; typedef union _x25x_8way_ctx_overlay x25x_8way_ctx_overlay;
void x25x_8way_hash( void *output, const void *input ) int x25x_8way_hash( void *output, const void *input, int thrid )
{ {
uint64_t vhash[8*8] __attribute__ ((aligned (128))); uint64_t vhash[8*8] __attribute__ ((aligned (128)));
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0}; unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
@@ -186,6 +186,8 @@ void x25x_8way_hash( void *output, const void *input )
dintrlv_8x64_512( hash0[5], hash1[5], hash2[5], hash3[5], dintrlv_8x64_512( hash0[5], hash1[5], hash2[5], hash3[5],
hash4[5], hash5[5], hash6[5], hash7[5], vhash ); hash4[5], hash5[5], hash6[5], hash7[5], vhash );
if ( work_restart[thrid].restart ) return 0;
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 ); rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
luffa_4way_init( &ctx.luffa, 512 ); luffa_4way_init( &ctx.luffa, 512 );
@@ -261,6 +263,7 @@ void x25x_8way_hash( void *output, const void *input )
intrlv_8x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10], intrlv_8x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10],
hash4[10], hash5[10], hash6[10], hash7[10] ); hash4[10], hash5[10], hash6[10], hash7[10] );
#else #else
init_echo( &ctx.echo, 512 ); init_echo( &ctx.echo, 512 );
@@ -292,6 +295,8 @@ void x25x_8way_hash( void *output, const void *input )
#endif #endif
if ( work_restart[thrid].restart ) return 0;
hamsi512_8way_init( &ctx.hamsi ); hamsi512_8way_init( &ctx.hamsi );
hamsi512_8way_update( &ctx.hamsi, vhash, 64 ); hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
hamsi512_8way_close( &ctx.hamsi, vhash ); hamsi512_8way_close( &ctx.hamsi, vhash );
@@ -407,6 +412,8 @@ void x25x_8way_hash( void *output, const void *input )
sph_tiger (&ctx.tiger, (const void*) hash7[17], 64); sph_tiger (&ctx.tiger, (const void*) hash7[17], 64);
sph_tiger_close(&ctx.tiger, (void*) hash7[18]); sph_tiger_close(&ctx.tiger, (void*) hash7[18]);
if ( work_restart[thrid].restart ) return 0;
intrlv_2x256( vhash, hash0[18], hash1[18], 256 ); intrlv_2x256( vhash, hash0[18], hash1[18], 256 );
LYRA2X_2WAY( vhash, 32, vhash, 32, 1, 4, 4 ); LYRA2X_2WAY( vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash0[19], hash1[19], vhash, 256 ); dintrlv_2x256( hash0[19], hash1[19], vhash, 256 );
@@ -468,6 +475,8 @@ void x25x_8way_hash( void *output, const void *input )
laneHash(512, (const BitSequence*)hash6[22], 512, (BitSequence*)hash6[23]); laneHash(512, (const BitSequence*)hash6[22], 512, (BitSequence*)hash6[23]);
laneHash(512, (const BitSequence*)hash7[22], 512, (BitSequence*)hash7[23]); laneHash(512, (const BitSequence*)hash7[22], 512, (BitSequence*)hash7[23]);
if ( work_restart[thrid].restart ) return 0;
x25x_shuffle( hash0 ); x25x_shuffle( hash0 );
x25x_shuffle( hash1 ); x25x_shuffle( hash1 );
x25x_shuffle( hash2 ); x25x_shuffle( hash2 );
@@ -528,6 +537,8 @@ void x25x_8way_hash( void *output, const void *input )
blake2s_8way_init( &ctx.blake2s, 32 ); blake2s_8way_init( &ctx.blake2s, 32 );
blake2s_8way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 ); blake2s_8way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
return 1;
} }
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce, int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
@@ -557,7 +568,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x25x_8way_hash( hash, vdata ); if ( x25x_8way_hash( hash, vdata, thr_id ) );
for ( int lane = 0; lane < 8; lane++ ) for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) ) if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
@@ -566,7 +577,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
if ( likely( valid_hash( lane_hash, ptarget ) ) ) if ( likely( valid_hash( lane_hash, ptarget ) ) )
{ {
pdata[19] = bswap_32( n + lane ); pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane ); submit_solution( work, lane_hash, mythr );
} }
} }
*noncev = _mm512_add_epi32( *noncev, *noncev = _mm512_add_epi32( *noncev,
@@ -654,7 +665,7 @@ union _x25x_4way_ctx_overlay
}; };
typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay; typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay;
void x25x_4way_hash( void *output, const void *input ) int x25x_4way_hash( void *output, const void *input, int thrid )
{ {
uint64_t vhash[8*4] __attribute__ ((aligned (128))); uint64_t vhash[8*4] __attribute__ ((aligned (128)));
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0}; unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
@@ -686,6 +697,8 @@ void x25x_4way_hash( void *output, const void *input )
jh512_4way_close( &ctx.jh, vhash ); jh512_4way_close( &ctx.jh, vhash );
dintrlv_4x64_512( hash0[4], hash1[4], hash2[4], hash3[4], vhash ); dintrlv_4x64_512( hash0[4], hash1[4], hash2[4], hash3[4], vhash );
if ( work_restart[thrid].restart ) return 0;
keccak512_4way_init( &ctx.keccak ); keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, 64 ); keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash ); keccak512_4way_close( &ctx.keccak, vhash );
@@ -738,6 +751,8 @@ void x25x_4way_hash( void *output, const void *input )
intrlv_4x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10] ); intrlv_4x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10] );
if ( work_restart[thrid].restart ) return 0;
hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way_update( &ctx.hamsi, vhash, 64 ); hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash ); hamsi512_4way_close( &ctx.hamsi, vhash );
@@ -819,6 +834,8 @@ void x25x_4way_hash( void *output, const void *input )
LYRA2RE( (void*)hash3[19], 32, (const void*)hash3[18], 32, LYRA2RE( (void*)hash3[19], 32, (const void*)hash3[18], 32,
(const void*)hash3[18], 32, 1, 4, 4 ); (const void*)hash3[18], 32, 1, 4, 4 );
if ( work_restart[thrid].restart ) return 0;
sph_gost512_init(&ctx.gost); sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) hash0[19], 64); sph_gost512 (&ctx.gost, (const void*) hash0[19], 64);
sph_gost512_close(&ctx.gost, (void*) hash0[20]); sph_gost512_close(&ctx.gost, (void*) hash0[20]);
@@ -850,6 +867,8 @@ void x25x_4way_hash( void *output, const void *input )
laneHash(512, (const BitSequence*)hash2[22], 512, (BitSequence*)hash2[23]); laneHash(512, (const BitSequence*)hash2[22], 512, (BitSequence*)hash2[23]);
laneHash(512, (const BitSequence*)hash3[22], 512, (BitSequence*)hash3[23]); laneHash(512, (const BitSequence*)hash3[22], 512, (BitSequence*)hash3[23]);
if ( work_restart[thrid].restart ) return 0;
x25x_shuffle( hash0 ); x25x_shuffle( hash0 );
x25x_shuffle( hash1 ); x25x_shuffle( hash1 );
x25x_shuffle( hash2 ); x25x_shuffle( hash2 );
@@ -882,6 +901,8 @@ void x25x_4way_hash( void *output, const void *input )
blake2s_4way_init( &ctx.blake2s, 32 ); blake2s_4way_init( &ctx.blake2s, 32 );
blake2s_4way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 ); blake2s_4way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
return 1;
} }
int scanhash_x25x_4way( struct work* work, uint32_t max_nonce, int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
@@ -910,8 +931,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev ); _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do do
{ {
x25x_4way_hash( hash, vdata ); if ( x25x_4way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 4; lane++ ) for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) ) if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
{ {
@@ -919,7 +939,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
if ( valid_hash( lane_hash, ptarget ) ) if ( valid_hash( lane_hash, ptarget ) )
{ {
pdata[19] = bswap_32( n + lane ); pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane ); submit_solution( work, lane_hash, mythr );
} }
} }
*noncev = _mm256_add_epi32( *noncev, *noncev = _mm256_add_epi32( *noncev,

View File

@@ -64,7 +64,7 @@ union _x25x_context_overlay
}; };
typedef union _x25x_context_overlay x25x_context_overlay; typedef union _x25x_context_overlay x25x_context_overlay;
void x25x_hash( void *output, const void *input ) int x25x_hash( void *output, const void *input, int thrid )
{ {
unsigned char hash[25][64] __attribute__((aligned(64))) = {0}; unsigned char hash[25][64] __attribute__((aligned(64))) = {0};
x25x_context_overlay ctx; x25x_context_overlay ctx;
@@ -99,6 +99,8 @@ void x25x_hash( void *output, const void *input )
sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64); sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64);
sph_keccak512_close(&ctx.keccak, &hash[5]); sph_keccak512_close(&ctx.keccak, &hash[5]);
if ( work_restart[thrid].restart ) return 0;
init_luffa( &ctx.luffa, 512 ); init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6], update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6],
(const BitSequence*)&hash[5], 64 ); (const BitSequence*)&hash[5], 64 );
@@ -125,6 +127,8 @@ void x25x_hash( void *output, const void *input )
sph_echo512_close( &ctx.echo, &hash[10] ); sph_echo512_close( &ctx.echo, &hash[10] );
#endif #endif
if ( work_restart[thrid].restart ) return 0;
sph_hamsi512_init(&ctx.hamsi); sph_hamsi512_init(&ctx.hamsi);
sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64); sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64);
sph_hamsi512_close(&ctx.hamsi, &hash[11]); sph_hamsi512_close(&ctx.hamsi, &hash[11]);
@@ -151,6 +155,8 @@ void x25x_hash( void *output, const void *input )
sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64); sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64);
sph_haval256_5_close(&ctx.haval,&hash[17]); sph_haval256_5_close(&ctx.haval,&hash[17]);
if ( work_restart[thrid].restart ) return 0;
sph_tiger_init(&ctx.tiger); sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) &hash[17], 64); sph_tiger (&ctx.tiger, (const void*) &hash[17], 64);
sph_tiger_close(&ctx.tiger, (void*) &hash[18]); sph_tiger_close(&ctx.tiger, (void*) &hash[18]);
@@ -199,6 +205,8 @@ void x25x_hash( void *output, const void *input )
blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 ); blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 );
memcpy(output, &hash[24], 32); memcpy(output, &hash[24], 32);
return 1;
} }
int scanhash_x25x( struct work *work, uint32_t max_nonce, int scanhash_x25x( struct work *work, uint32_t max_nonce,
@@ -222,7 +230,7 @@ int scanhash_x25x( struct work *work, uint32_t max_nonce,
do do
{ {
edata[19] = n; edata[19] = n;
x25x_hash( hash64, edata ); if ( x25x_hash( hash64, edata, thr_id ) );
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) ) if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
{ {
pdata[19] = bswap_32( n ); pdata[19] = bswap_32( n );

View File

@@ -79,7 +79,7 @@ int main(int argc, const char * const *argv)
for (i = 0; i < sizeof(src); i++) for (i = 0; i < sizeof(src); i++)
src.u8[i] = i * 3; src.u8[i] = i * 3;
if (yespower_tls(src.u8, sizeof(src), &params, &dst)) { if (!yespower_tls(src.u8, sizeof(src), &params, &dst)) {
puts("FAILED"); puts("FAILED");
return 1; return 1;
} }

View File

@@ -53,7 +53,7 @@ int scanhash_yespower_r8g( struct work *work, uint32_t max_nonce,
do { do {
yespower_tls( (unsigned char *)endiandata, params.perslen, yespower_tls( (unsigned char *)endiandata, params.perslen,
&params, (yespower_binary_t*)hash ); &params, (yespower_binary_t*)hash, thr_id );
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark ) if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
{ {

View File

@@ -194,11 +194,13 @@ static int free_region(yespower_region_t *region)
#define restrict #define restrict
#endif #endif
/*
#ifdef __GNUC__ #ifdef __GNUC__
#define unlikely(exp) __builtin_expect(exp, 0) #define unlikely(exp) __builtin_expect(exp, 0)
#else #else
#define unlikely(exp) (exp) #define unlikely(exp) (exp)
#endif #endif
*/
#ifdef __SSE__ #ifdef __SSE__
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint)); #define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
@@ -1113,7 +1115,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
int yespower_b2b(yespower_local_t *local, int yespower_b2b(yespower_local_t *local,
const uint8_t *src, size_t srclen, const uint8_t *src, size_t srclen,
const yespower_params_t *params, const yespower_params_t *params,
yespower_binary_t *dst) yespower_binary_t *dst, int thrid )
{ {
uint32_t N = params->N; uint32_t N = params->N;
uint32_t r = params->r; uint32_t r = params->r;
@@ -1168,17 +1170,25 @@ int yespower_b2b(yespower_local_t *local,
srclen = 0; srclen = 0;
} }
if ( work_restart[thrid].restart ) return false;
pbkdf2_blake2b_yp(init_hash, sizeof(init_hash), src, srclen, 1, B, 128); pbkdf2_blake2b_yp(init_hash, sizeof(init_hash), src, srclen, 1, B, 128);
if ( work_restart[thrid].restart ) return false;
memcpy(init_hash, B, sizeof(init_hash)); memcpy(init_hash, B, sizeof(init_hash));
smix_1_0(B, r, N, V, XY, &ctx); smix_1_0(B, r, N, V, XY, &ctx);
if ( work_restart[thrid].restart ) return false;
hmac_blake2b_yp_hash((uint8_t *)dst, B + B_size - 64, 64, init_hash, sizeof(init_hash)); hmac_blake2b_yp_hash((uint8_t *)dst, B + B_size - 64, 64, init_hash, sizeof(init_hash));
/* Success! */ /* Success! */
return 0; return 1;
fail: fail:
memset(dst, 0xff, sizeof(*dst)); memset(dst, 0xff, sizeof(*dst));
return -1; return 0;
} }
/** /**
@@ -1189,7 +1199,7 @@ fail:
* Return 0 on success; or -1 on error. * Return 0 on success; or -1 on error.
*/ */
int yespower_b2b_tls(const uint8_t *src, size_t srclen, int yespower_b2b_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst) const yespower_params_t *params, yespower_binary_t *dst, int thrid )
{ {
static __thread int initialized = 0; static __thread int initialized = 0;
static __thread yespower_local_t local; static __thread yespower_local_t local;
@@ -1199,7 +1209,7 @@ int yespower_b2b_tls(const uint8_t *src, size_t srclen,
initialized = 1; initialized = 1;
} }
return yespower_b2b(&local, src, srclen, params, dst); return yespower_b2b(&local, src, srclen, params, dst, thrid);
} }
/* /*
int yespower_init_local(yespower_local_t *local) int yespower_init_local(yespower_local_t *local)

View File

@@ -34,9 +34,10 @@ static yespower_params_t yespower_params;
// YESPOWER // YESPOWER
void yespower_hash( const char *input, char *output, uint32_t len ) int yespower_hash( const char *input, char *output, uint32_t len, int thrid )
{ {
yespower_tls( input, len, &yespower_params, (yespower_binary_t*)output ); return yespower_tls( input, len, &yespower_params,
(yespower_binary_t*)output, thrid );
} }
int scanhash_yespower( struct work *work, uint32_t max_nonce, int scanhash_yespower( struct work *work, uint32_t max_nonce,
@@ -55,7 +56,7 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
be32enc( &endiandata[k], pdata[k] ); be32enc( &endiandata[k], pdata[k] );
endiandata[19] = n; endiandata[19] = n;
do { do {
yespower_hash( (char*)endiandata, (char*)vhash, 80 ); if ( yespower_hash( (char*)endiandata, (char*)vhash, 80, thr_id ) )
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark ) if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
{ {
be32enc( pdata+19, n ); be32enc( pdata+19, n );
@@ -70,9 +71,9 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
// YESPOWER-B2B // YESPOWER-B2B
void yespower_b2b_hash( const char *input, char *output, uint32_t len ) int yespower_b2b_hash( const char *input, char *output, uint32_t len, int thrid )
{ {
yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output ); return yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output, thrid );
} }
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce, int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
@@ -91,7 +92,7 @@ int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
be32enc( &endiandata[k], pdata[k] ); be32enc( &endiandata[k], pdata[k] );
endiandata[19] = n; endiandata[19] = n;
do { do {
yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80 ); if (yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80, thr_id ) )
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark ) if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
{ {
be32enc( pdata+19, n ); be32enc( pdata+19, n );

View File

@@ -107,11 +107,13 @@
#define restrict #define restrict
#endif #endif
/*
#ifdef __GNUC__ #ifdef __GNUC__
#define unlikely(exp) __builtin_expect(exp, 0) #define unlikely(exp) __builtin_expect(exp, 0)
#else #else
#define unlikely(exp) (exp) #define unlikely(exp) (exp)
#endif #endif
*/
#ifdef __SSE__ #ifdef __SSE__
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint)); #define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
@@ -1023,7 +1025,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
int yespower(yespower_local_t *local, int yespower(yespower_local_t *local,
const uint8_t *src, size_t srclen, const uint8_t *src, size_t srclen,
const yespower_params_t *params, const yespower_params_t *params,
yespower_binary_t *dst) yespower_binary_t *dst, int thrid )
{ {
yespower_version_t version = params->version; yespower_version_t version = params->version;
uint32_t N = params->N; uint32_t N = params->N;
@@ -1077,14 +1079,23 @@ int yespower(yespower_local_t *local,
if (version == YESPOWER_0_5) { if (version == YESPOWER_0_5) {
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1, PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1,
B, B_size); B, B_size);
if ( work_restart[thrid].restart ) return false;
memcpy(sha256, B, sizeof(sha256)); memcpy(sha256, B, sizeof(sha256));
smix(B, r, N, V, XY, &ctx); smix(B, r, N, V, XY, &ctx);
if ( work_restart[thrid].restart ) return false;
PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1, PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1,
(uint8_t *)dst, sizeof(*dst)); (uint8_t *)dst, sizeof(*dst));
if (pers) { if (pers) {
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen, HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
sha256); sha256);
if ( work_restart[thrid].restart ) return false;
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst); SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
} }
} else { } else {
@@ -1106,7 +1117,7 @@ int yespower(yespower_local_t *local,
} }
/* Success! */ /* Success! */
return 0; return 1;
} }
/** /**
@@ -1117,7 +1128,7 @@ int yespower(yespower_local_t *local,
* Return 0 on success; or -1 on error. * Return 0 on success; or -1 on error.
*/ */
int yespower_tls(const uint8_t *src, size_t srclen, int yespower_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst) const yespower_params_t *params, yespower_binary_t *dst, int thrid )
{ {
static __thread int initialized = 0; static __thread int initialized = 0;
static __thread yespower_local_t local; static __thread yespower_local_t local;
@@ -1128,7 +1139,7 @@ int yespower_tls(const uint8_t *src, size_t srclen,
initialized = 1; initialized = 1;
} }
return yespower(&local, src, srclen, params, dst); return yespower( &local, src, srclen, params, dst, thrid );
} }
int yespower_init_local(yespower_local_t *local) int yespower_init_local(yespower_local_t *local)

View File

@@ -32,6 +32,7 @@
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> /* for size_t */ #include <stdlib.h> /* for size_t */
#include "miner.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@@ -109,11 +110,11 @@ extern int yespower_free_local(yespower_local_t *local);
*/ */
extern int yespower(yespower_local_t *local, extern int yespower(yespower_local_t *local,
const uint8_t *src, size_t srclen, const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst); const yespower_params_t *params, yespower_binary_t *dst, int thrid);
extern int yespower_b2b(yespower_local_t *local, extern int yespower_b2b(yespower_local_t *local,
const uint8_t *src, size_t srclen, const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst); const yespower_params_t *params, yespower_binary_t *dst, int thrid );
/** /**
* yespower_tls(src, srclen, params, dst): * yespower_tls(src, srclen, params, dst):
@@ -125,10 +126,10 @@ extern int yespower_b2b(yespower_local_t *local,
* MT-safe as long as dst is local to the thread. * MT-safe as long as dst is local to the thread.
*/ */
extern int yespower_tls(const uint8_t *src, size_t srclen, extern int yespower_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst); const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen, extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst); const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
#ifdef __cplusplus #ifdef __cplusplus
} }

20
configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# Guess values for system-dependent variables and create Makefiles. # Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.4.3. # Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.6.1.
# #
# #
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package. # Identity of this package.
PACKAGE_NAME='cpuminer-opt' PACKAGE_NAME='cpuminer-opt'
PACKAGE_TARNAME='cpuminer-opt' PACKAGE_TARNAME='cpuminer-opt'
PACKAGE_VERSION='3.12.4.3' PACKAGE_VERSION='3.12.6.1'
PACKAGE_STRING='cpuminer-opt 3.12.4.3' PACKAGE_STRING='cpuminer-opt 3.12.6.1'
PACKAGE_BUGREPORT='' PACKAGE_BUGREPORT=''
PACKAGE_URL='' PACKAGE_URL=''
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing. # Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh. # This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF cat <<_ACEOF
\`configure' configures cpuminer-opt 3.12.4.3 to adapt to many kinds of systems. \`configure' configures cpuminer-opt 3.12.6.1 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]... Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1404,7 +1404,7 @@ fi
if test -n "$ac_init_help"; then if test -n "$ac_init_help"; then
case $ac_init_help in case $ac_init_help in
short | recursive ) echo "Configuration of cpuminer-opt 3.12.4.3:";; short | recursive ) echo "Configuration of cpuminer-opt 3.12.6.1:";;
esac esac
cat <<\_ACEOF cat <<\_ACEOF
@@ -1509,7 +1509,7 @@ fi
test -n "$ac_init_help" && exit $ac_status test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then if $ac_init_version; then
cat <<\_ACEOF cat <<\_ACEOF
cpuminer-opt configure 3.12.4.3 cpuminer-opt configure 3.12.6.1
generated by GNU Autoconf 2.69 generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc. Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake. running configure, to aid debugging if configure makes a mistake.
It was created by cpuminer-opt $as_me 3.12.4.3, which was It was created by cpuminer-opt $as_me 3.12.6.1, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@ $ $0 $@
@@ -2993,7 +2993,7 @@ fi
# Define the identity of the package. # Define the identity of the package.
PACKAGE='cpuminer-opt' PACKAGE='cpuminer-opt'
VERSION='3.12.4.3' VERSION='3.12.6.1'
cat >>confdefs.h <<_ACEOF cat >>confdefs.h <<_ACEOF
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their # report actual input values of CONFIG_FILES etc. instead of their
# values after options handling. # values after options handling.
ac_log=" ac_log="
This file was extended by cpuminer-opt $as_me 3.12.4.3, which was This file was extended by cpuminer-opt $as_me 3.12.6.1, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES CONFIG_FILES = $CONFIG_FILES
@@ -6756,7 +6756,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\ ac_cs_version="\\
cpuminer-opt config.status 3.12.4.3 cpuminer-opt config.status 3.12.6.1
configured by $0, generated by GNU Autoconf 2.69, configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\" with options \\"\$ac_cs_config\\"

View File

@@ -1,4 +1,4 @@
AC_INIT([cpuminer-opt], [3.12.4.3]) AC_INIT([cpuminer-opt], [3.12.6.1])
AC_PREREQ([2.59c]) AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM AC_CANONICAL_SYSTEM

View File

@@ -102,6 +102,7 @@ static int opt_fail_pause = 10;
static int opt_time_limit = 0; static int opt_time_limit = 0;
int opt_timeout = 300; int opt_timeout = 300;
static int opt_scantime = 5; static int opt_scantime = 5;
const int min_scantime = 1;
//static const bool opt_time = true; //static const bool opt_time = true;
enum algos opt_algo = ALGO_NULL; enum algos opt_algo = ALGO_NULL;
char* opt_param_key = NULL; char* opt_param_key = NULL;
@@ -160,7 +161,7 @@ uint32_t rejected_share_count = 0;
uint32_t stale_share_count = 0; uint32_t stale_share_count = 0;
uint32_t solved_block_count = 0; uint32_t solved_block_count = 0;
double *thr_hashrates; double *thr_hashrates;
double global_hashrate = 0; double global_hashrate = 0.;
double stratum_diff = 0.; double stratum_diff = 0.;
double net_diff = 0.; double net_diff = 0.;
double net_hashrate = 0.; double net_hashrate = 0.;
@@ -184,6 +185,24 @@ int default_api_listen = 4048;
pthread_mutex_t applog_lock; pthread_mutex_t applog_lock;
pthread_mutex_t stats_lock; pthread_mutex_t stats_lock;
static struct timeval session_start;
static struct timeval five_min_start;
static uint64_t session_first_block = 0;
static double latency_sum = 0.;
static uint64_t submit_sum = 0;
static uint64_t accept_sum = 0;
static uint64_t stale_sum = 0;
static uint64_t reject_sum = 0;
static double norm_diff_sum = 0.;
static uint32_t last_block_height = 0;
static double highest_share = 0; // all shares include discard and reject
static double lowest_share = 9e99; // lowest accepted
//static bool new_job = false;
static double last_targetdiff = 0.;
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
static uint32_t hi_temp = 0;
#endif
static char const short_options[] = static char const short_options[] =
#ifdef HAVE_SYSLOG_H #ifdef HAVE_SYSLOG_H
@@ -200,6 +219,20 @@ char* lp_id;
static void workio_cmd_free(struct workio_cmd *wc); static void workio_cmd_free(struct workio_cmd *wc);
static void format_affinity_map( char *map_str, uint64_t map )
{
int n = num_cpus < 64 ? num_cpus : 64;
int i;
for ( i = 0; i < n; i++ )
{
if ( map & 1 ) map_str[i] = '!';
else map_str[i] = '.';
map >>= 1;
}
memset( &map_str[i], 0, 64 - i );
}
#ifdef __linux /* Linux specific policy and affinity management */ #ifdef __linux /* Linux specific policy and affinity management */
#include <sched.h> #include <sched.h>
@@ -420,8 +453,7 @@ static bool work_decode( const json_t *val, struct work *work )
if ( !allow_mininginfo ) if ( !allow_mininginfo )
net_diff = algo_gate.calc_network_diff( work ); net_diff = algo_gate.calc_network_diff( work );
work->targetdiff = target_to_diff( work->target ); work->targetdiff = target_to_diff( work->target );
// for api stats, on longpoll pools stratum_diff = last_targetdiff = work->targetdiff;
stratum_diff = work->targetdiff;
work->sharediff = 0; work->sharediff = 0;
algo_gate.decode_extra_data( work, &net_blocks ); algo_gate.decode_extra_data( work, &net_blocks );
return true; return true;
@@ -471,6 +503,10 @@ static bool get_mininginfo( CURL *curl, struct work *work )
if ( key && json_is_integer( key ) ) if ( key && json_is_integer( key ) )
net_blocks = json_integer_value( key ); net_blocks = json_integer_value( key );
if ( opt_debug )
applog(LOG_INFO,"Mining info: diff %.5g, net_hashrate %f, height %d",
net_diff, net_hashrate, net_blocks );
if ( !work->height ) if ( !work->height )
{ {
// complete missing data from getwork // complete missing data from getwork
@@ -478,6 +514,8 @@ static bool get_mininginfo( CURL *curl, struct work *work )
if ( work->height > g_work.height ) if ( work->height > g_work.height )
{ {
restart_threads(); restart_threads();
/* redundant with new block log
if ( !opt_quiet ) if ( !opt_quiet )
{ {
char netinfo[64] = { 0 }; char netinfo[64] = { 0 };
@@ -492,6 +530,7 @@ static bool get_mininginfo( CURL *curl, struct work *work )
applog( LOG_BLUE, "%s block %d, %s", applog( LOG_BLUE, "%s block %d, %s",
algo_names[opt_algo], work->height, netinfo ); algo_names[opt_algo], work->height, netinfo );
} }
*/
} }
} // res } // res
} }
@@ -869,33 +908,9 @@ static inline void sprintf_et( char *str, int seconds )
sprintf( str, "%um%02us", min, sec ); sprintf( str, "%um%02us", min, sec );
} }
// Bitcoin formula for converting difficulty to an equivalent const double exp32 = 4294967296.; // 2**32
// number of hashes. const double exp48 = 4294967296. * 65536.; // 2**48
// const double exp64 = 4294967296. * 4294967296.; // 2**64
// https://en.bitcoin.it/wiki/Difficulty
//
// hash = diff * 2**32
//
// diff_to_hash = 2**32 = 0x100000000 = 4294967296;
const double diff_to_hash = 4294967296.;
static struct timeval session_start;
static struct timeval five_min_start;
static uint64_t session_first_block = 0;
static double latency_sum = 0.;
static uint64_t submit_sum = 0;
static uint64_t accept_sum = 0;
static uint64_t stale_sum = 0;
static uint64_t reject_sum = 0;
static double norm_diff_sum = 0.;
static uint32_t last_block_height = 0;
//static bool new_job = false;
static double last_targetdiff = 0.;
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
static uint32_t hi_temp = 0;
#endif
//static uint32_t stratum_errors = 0;
struct share_stats_t struct share_stats_t
{ {
@@ -939,6 +954,7 @@ void report_summary_log( bool force )
uint64_t accepts = accept_sum; accept_sum = 0; uint64_t accepts = accept_sum; accept_sum = 0;
uint64_t rejects = reject_sum; reject_sum = 0; uint64_t rejects = reject_sum; reject_sum = 0;
uint64_t stales = stale_sum; stale_sum = 0; uint64_t stales = stale_sum; stale_sum = 0;
memcpy( &start_time, &five_min_start, sizeof start_time ); memcpy( &start_time, &five_min_start, sizeof start_time );
memcpy( &five_min_start, &now, sizeof now ); memcpy( &five_min_start, &now, sizeof now );
@@ -949,12 +965,10 @@ void report_summary_log( bool force )
double share_time = (double)et.tv_sec + (double)et.tv_usec / 1e6; double share_time = (double)et.tv_sec + (double)et.tv_usec / 1e6;
double ghrate = global_hashrate; double ghrate = global_hashrate;
double shrate = share_time == 0. ? 0. : exp32 * last_targetdiff
double shrate = share_time == 0. ? 0. : diff_to_hash * last_targetdiff
* (double)(accepts) / share_time; * (double)(accepts) / share_time;
double sess_hrate = uptime.tv_sec == 0. ? 0. : diff_to_hash * norm_diff_sum double sess_hrate = uptime.tv_sec == 0. ? 0. : exp32 * norm_diff_sum
/ (double)uptime.tv_sec; / (double)uptime.tv_sec;
double submit_rate = share_time == 0. ? 0. : (double)submits*60. / share_time; double submit_rate = share_time == 0. ? 0. : (double)submits*60. / share_time;
char shr_units[4] = {0}; char shr_units[4] = {0};
char ghr_units[4] = {0}; char ghr_units[4] = {0};
@@ -969,7 +983,8 @@ void report_summary_log( bool force )
sprintf_et( et_str, et.tv_sec ); sprintf_et( et_str, et.tv_sec );
sprintf_et( upt_str, uptime.tv_sec ); sprintf_et( upt_str, uptime.tv_sec );
applog( LOG_NOTICE, "Periodic Report %s %s", et_str, upt_str ); applog( LOG_BLUE, "%s: %s", algo_names[ opt_algo ], short_url );
applog2( LOG_NOTICE, "Periodic Report %s %s", et_str, upt_str );
applog2( LOG_INFO, "Share rate %.2f/min %.2f/min", applog2( LOG_INFO, "Share rate %.2f/min %.2f/min",
submit_rate, (double)submitted_share_count*60. / submit_rate, (double)submitted_share_count*60. /
( (double)uptime.tv_sec + (double)uptime.tv_usec / 1e6 ) ); ( (double)uptime.tv_sec + (double)uptime.tv_usec / 1e6 ) );
@@ -979,12 +994,12 @@ void report_summary_log( bool force )
if ( accepted_share_count < submitted_share_count ) if ( accepted_share_count < submitted_share_count )
{ {
double lost_ghrate = uptime.tv_sec == 0. ? 0. double lost_ghrate = uptime.tv_sec == 0 ? 0.
: diff_to_hash * last_targetdiff : exp32 * last_targetdiff
* (double)(submitted_share_count - accepted_share_count ) * (double)(submitted_share_count - accepted_share_count )
/ (double)uptime.tv_sec; / (double)uptime.tv_sec;
double lost_shrate = share_time == 0. ? 0. double lost_shrate = share_time == 0. ? 0.
: diff_to_hash * last_targetdiff * (double)(submits - accepts ) : exp32 * last_targetdiff * (double)(submits - accepts )
/ share_time; / share_time;
char lshr_units[4] = {0}; char lshr_units[4] = {0};
char lghr_units[4] = {0}; char lghr_units[4] = {0};
@@ -1005,13 +1020,16 @@ void report_summary_log( bool force )
applog2( LOG_INFO,"Rejected %6d %6d", applog2( LOG_INFO,"Rejected %6d %6d",
rejects, rejected_share_count ); rejects, rejected_share_count );
if ( solved_block_count ) if ( solved_block_count )
applog2( LOG_INFO,"Blocks solved %6d", applog2( LOG_INFO,"Blocks Solved %6d",
solved_block_count ); solved_block_count );
applog2( LOG_INFO, "Hi/Lo Share Diff %.5g / %.5g",
highest_share, lowest_share );
} }
bool lowdiff_debug = false; bool lowdiff_debug = false;
static int share_result( int result, struct work *null_work, static int share_result( int result, struct work *work,
const char *reason ) const char *reason )
{ {
double share_time = 0., share_ratio = 0.; double share_time = 0., share_ratio = 0.;
@@ -1056,12 +1074,15 @@ static int share_result( int result, struct work *null_work,
} }
share_ratio = my_stats.net_diff == 0. ? 0. : my_stats.share_diff / share_ratio = my_stats.net_diff == 0. ? 0. : my_stats.share_diff /
my_stats.net_diff * 100.; my_stats.net_diff;
// check result // check result
if ( likely( result ) ) if ( likely( result ) )
{ {
accepted_share_count++; accepted_share_count++;
if ( my_stats.share_diff < lowest_share )
lowest_share = my_stats.share_diff;
if ( my_stats.share_diff > highest_share )
highest_share = my_stats.share_diff;
sprintf( sres, "S%d", stale_share_count ); sprintf( sres, "S%d", stale_share_count );
sprintf( rres, "R%d", rejected_share_count ); sprintf( rres, "R%d", rejected_share_count );
if unlikely( ( my_stats.net_diff > 0. ) if unlikely( ( my_stats.net_diff > 0. )
@@ -1082,9 +1103,11 @@ static int share_result( int result, struct work *null_work,
{ {
sprintf( ares, "A%d", accepted_share_count ); sprintf( ares, "A%d", accepted_share_count );
sprintf( bres, "B%d", solved_block_count ); sprintf( bres, "B%d", solved_block_count );
if ( reason && strstr( reason, "Invalid job id" ) ) stale = work ? work->data[ algo_gate.ntime_index ]
!= g_work.data[ algo_gate.ntime_index ] : false;
if ( reason ) stale = stale || strstr( reason, "Invalid job id" );
if ( stale )
{ {
stale = true;
stale_share_count++; stale_share_count++;
sprintf( sres, "Stale %d", stale_share_count ); sprintf( sres, "Stale %d", stale_share_count );
sprintf( rres, "R%d", rejected_share_count ); sprintf( rres, "R%d", rejected_share_count );
@@ -1094,7 +1117,7 @@ static int share_result( int result, struct work *null_work,
rejected_share_count++; rejected_share_count++;
sprintf( sres, "S%d", stale_share_count ); sprintf( sres, "S%d", stale_share_count );
sprintf( rres, "Rejected %d" , rejected_share_count ); sprintf( rres, "Rejected %d" , rejected_share_count );
lowdiff_debug = true; // lowdiff_debug = true;
} }
} }
@@ -1120,95 +1143,59 @@ static int share_result( int result, struct work *null_work,
pthread_mutex_unlock( &stats_lock ); pthread_mutex_unlock( &stats_lock );
/*
if ( likely( result ) )
{
if ( unlikely( solved ) )
{
sprintf( bres, "BLOCK SOLVED %d", solved_block_count );
sprintf( ares, "A%d", accepted_share_count );
}
else
{
sprintf( bres, "B%d", solved_block_count );
sprintf( ares, "Accepted %d", accepted_share_count );
}
sprintf( sres, "S%d", stale_share_count );
sprintf( rres, "R%d", rejected_share_count );
}
else
{
sprintf( ares, "A%d", accepted_share_count );
sprintf( bres, "B%d", solved_block_count );
if ( stale )
{
sprintf( sres, "Stale %d", stale_share_count );
sprintf( rres, "R%d", rejected_share_count );
}
else
{
sprintf( sres, "S%d", stale_share_count );
sprintf( rres, "Rejected %d" , rejected_share_count );
}
}
*/
if ( use_colors ) if ( use_colors )
{ {
bcol = acol = scol = rcol = CL_WHT; bcol = acol = scol = rcol = CL_N;
if ( likely( result ) ) if ( likely( result ) )
{ {
acol = CL_GRN; acol = CL_WHT CL_GRN;
if ( unlikely( solved ) ) bcol = CL_MAG; if ( unlikely( solved ) ) bcol = CL_WHT CL_MAG;
} }
else if ( stale ) scol = CL_YL2; else if ( stale ) scol = CL_WHT CL_YL2;
else rcol = CL_RED; else rcol = CL_WHT CL_RED;
} }
else else
bcol = acol = scol = rcol = "\0"; bcol = acol = scol = rcol = "\0";
applog( LOG_NOTICE, "%d %s%s %s%s %s%s %s%s" CL_WHT ", %.3f sec (%dms)", applog( LOG_NOTICE, "%d %s%s %s%s %s%s %s%s" CL_N ", %.3f sec (%dms)",
my_stats.share_count, acol, ares, scol, sres, rcol, rres, bcol, my_stats.share_count, acol, ares, scol, sres, rcol, rres, bcol,
bres, share_time, latency ); bres, share_time, latency );
if ( !opt_quiet ) if ( !opt_quiet )
{ {
if ( have_stratum ) if ( have_stratum )
applog2( LOG_NOTICE, "Diff %.5g (%.3g%), %sBlock %d, %sJob %s" CL_WHT, applog2( LOG_INFO, "Diff %.5g (%.3g), %sBlock %d" CL_N ", %sJob %s",
my_stats.share_diff, share_ratio, bcol, stratum.block_height, my_stats.share_diff, share_ratio, bcol, stratum.block_height,
scol, my_stats.job_id ); scol, my_stats.job_id );
else else
applog2( LOG_NOTICE, "Diff %.5g (%.3g%), %sBlock %d" CL_WHT, {
my_stats.share_diff, share_ratio, bcol, stratum.block_height ); uint64_t height = work ? work->height : last_block_height;
applog2( LOG_INFO, "Diff %.5g (%.3g), %sBlock %d",
my_stats.share_diff, share_ratio, bcol, height );
}
} }
if ( unlikely( reason && !result ) ) if ( unlikely( opt_debug || !( opt_quiet || result || stale ) ) )
{
if ( !( opt_quiet || stale ) )
{ {
uint32_t str[8];
if ( reason )
applog( LOG_WARNING, "Reject reason: %s", reason ); applog( LOG_WARNING, "Reject reason: %s", reason );
uint32_t str1[8], str2[8];
char str3[65];
// display share hash and target for troubleshooting // display share hash and target for troubleshooting
diff_to_target( str1, my_stats.share_diff ); diff_to_target( str, my_stats.share_diff );
for ( int i = 0; i < 8; i++ ) applog2( LOG_INFO, "Hash: %08x%08x%08x%08x...",
be32enc( str2 + i, str1[7 - i] ); str[7], str[6], str[5], str[4] );
bin2hex( str3, (unsigned char*)str2, 12 ); uint32_t *targ;
applog2( LOG_INFO, "Share diff: %.5g, Hash: %s...", if ( work )
my_stats.share_diff, str3 ); targ = work->target;
else
diff_to_target( str1, my_stats.target_diff ); {
for ( int i = 0; i < 8; i++ ) diff_to_target( str, my_stats.target_diff );
be32enc( str2 + i, str1[7 - i] ); targ = &str[0];
bin2hex( str3, (unsigned char*)str2, 12 );
applog2( LOG_INFO, "Target diff: %.5g, Targ: %s...",
my_stats.target_diff, str3 );
} }
applog2( LOG_INFO, "Target: %08x%08x%08x%08x...",
if ( unlikely( opt_reset_on_stale && stale ) ) targ[7], targ[6], targ[5], targ[4] );
stratum_need_reset = true;
} }
return 1; return 1;
} }
@@ -1354,28 +1341,6 @@ char* std_malloc_txs_request( struct work *work )
static bool submit_upstream_work( CURL *curl, struct work *work ) static bool submit_upstream_work( CURL *curl, struct work *work )
{ {
/* pass if the previous hash is not the current previous hash */
/* Submit anyway, discardring here messes up the stats
if ( !submit_old && memcmp( &work->data[1], &g_work.data[1], 32 ) )
{
applog( LOG_WARNING, "Stale work detected, discarding" );
return true;
}
if ( !have_stratum && allow_mininginfo )
{
struct work mining_info;
get_mininginfo( curl, &mining_info );
if ( work->height < mining_info.height )
{
applog( LOG_WARNING, "Block %u was already solved, current block %d",
work->height, mining_info.height );
return true;
}
}
*/
if ( have_stratum ) if ( have_stratum )
{ {
char req[JSON_BUF_LEN]; char req[JSON_BUF_LEN];
@@ -1496,7 +1461,9 @@ start:
else else
rc = work_decode( json_object_get( val, "result" ), work ); rc = work_decode( json_object_get( val, "result" ), work );
if ( opt_protocol && rc ) if ( rc )
{
if ( opt_protocol )
{ {
timeval_subtract( &diff, &tv_end, &tv_start ); timeval_subtract( &diff, &tv_end, &tv_start );
applog( LOG_DEBUG, "got new work in %.2f ms", applog( LOG_DEBUG, "got new work in %.2f ms",
@@ -1507,8 +1474,12 @@ start:
// store work height in solo // store work height in solo
get_mininginfo(curl, work); get_mininginfo(curl, work);
applog( LOG_BLUE, "%s %s block %d, diff %.5g", algo_names[ opt_algo ], if ( work->height > last_block_height )
short_url, work->height, net_diff ); {
last_block_height = work->height;
applog( LOG_BLUE, "New Block %d, Net Diff %.5g, Target Diff %.5g, Ntime %08x",
work->height, net_diff, work->targetdiff,
bswap_32( work->data[ algo_gate.ntime_index ] ) );
if ( !opt_quiet && net_diff && net_hashrate ) if ( !opt_quiet && net_diff && net_hashrate )
{ {
@@ -1523,20 +1494,28 @@ start:
if ( miner_hr ) if ( miner_hr )
{ {
double net_hr = net_hashrate;
char net_hr_units[4] = {0}; char net_hr_units[4] = {0};
char miner_hr_units[4] = {0}; char miner_hr_units[4] = {0};
char net_ttf[32]; char net_ttf[32];
char miner_ttf[32]; char miner_ttf[32];
sprintf_et( net_ttf, net_diff * diff_to_hash / net_hashrate ); sprintf_et( net_ttf, ( work->targetdiff * exp32 ) / net_hr );
sprintf_et( miner_ttf, net_diff * diff_to_hash / miner_hr ); sprintf_et( miner_ttf, ( work->targetdiff * exp32 ) / miner_hr );
scale_hash_for_display ( &miner_hr, miner_hr_units ); scale_hash_for_display ( &miner_hr, miner_hr_units );
scale_hash_for_display ( &net_hashrate, net_hr_units ); scale_hash_for_display ( &net_hr, net_hr_units );
applog2(LOG_INFO, "Miner TTF @ %.2f %sh/s %s, net TTF @ %.2f %sh/s %s", applog2( LOG_INFO,
"Miner TTF @ %.2f %sh/s %s, Net TTF @ %.2f %sh/s %s",
miner_hr, miner_hr_units, miner_ttf, miner_hr, miner_hr_units, miner_ttf,
net_hashrate, net_hr_units, net_ttf ); net_hr, net_hr_units, net_ttf );
} }
} }
} // work->height > last_block_height
else if ( memcmp( &work->data[1], &g_work.data[1], 32 ) )
applog( LOG_BLUE, "New Work, Ntime %08lx",
bswap_32( work->data[ algo_gate.ntime_index ] ) );
} // rc
return rc; return rc;
} }
@@ -1558,36 +1537,34 @@ static void workio_cmd_free(struct workio_cmd *wc)
free(wc); free(wc);
} }
static bool workio_get_work(struct workio_cmd *wc, CURL *curl) static bool workio_get_work( struct workio_cmd *wc, CURL *curl )
{ {
struct work *ret_work; struct work *ret_work;
int failures = 0; int failures = 0;
ret_work = (struct work*) calloc(1, sizeof(*ret_work)); ret_work = (struct work*) calloc( 1, sizeof(*ret_work) );
if (!ret_work) if ( !ret_work )
return false; return false;
/* obtain new work from bitcoin via JSON-RPC */ /* obtain new work from bitcoin via JSON-RPC */
while (!get_upstream_work(curl, ret_work)) while ( !get_upstream_work( curl, ret_work ) )
{ {
if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) if ( unlikely( ( opt_retries >= 0 ) && ( ++failures > opt_retries ) ) )
{ {
applog(LOG_ERR, "json_rpc_call failed, terminating workio thread"); applog( LOG_ERR, "json_rpc_call failed, terminating workio thread" );
free(ret_work); free( ret_work );
return false; return false;
} }
/* pause, then restart work-request loop */ /* pause, then restart work-request loop */
applog(LOG_ERR, "json_rpc_call failed, retry after %d seconds", applog( LOG_ERR, "json_rpc_call failed, retry after %d seconds",
opt_fail_pause); opt_fail_pause );
sleep(opt_fail_pause); sleep( opt_fail_pause );
} }
report_summary_log( false );
/* send work to requesting thread */ /* send work to requesting thread */
if (!tq_push(wc->thr->q, ret_work)) if ( !tq_push(wc->thr->q, ret_work ) )
free(ret_work); free( ret_work );
return true; return true;
} }
@@ -1701,8 +1678,7 @@ static bool get_work(struct thr_info *thr, struct work *work)
return true; return true;
} }
bool submit_work( struct thr_info *thr, bool submit_work( struct thr_info *thr, const struct work *work_in )
const struct work *work_in )
{ {
struct workio_cmd *wc; struct workio_cmd *wc;
@@ -1726,34 +1702,23 @@ err_out:
return false; return false;
} }
/*
// __float128? // __float128?
// Convert little endian 256 bit (38 decimal digits) unsigned integer to // Convert little endian 256 bit (38 decimal digits) unsigned integer to
// double precision floating point with 15 decimal digits precision. // double precision floating point with 15 decimal digits precision.
// returns u * ( 2**256 )
static inline double u256_to_double( const uint64_t *u ) static inline double u256_to_double( const uint64_t *u )
{ {
const double exp64 = 4294967296.0 * 4294967296.0; // 2**64
return ( ( u[3] * exp64 + u[2] ) * exp64 + u[1] ) * exp64 + u[0]; return ( ( u[3] * exp64 + u[2] ) * exp64 + u[1] ) * exp64 + u[0];
} }
*/
void work_set_target_ratio( struct work* work, const void *hash ) static void update_submit_stats( struct work *work, const void *hash )
{ {
if ( likely( hash ) ) // work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
{
double dhash = u256_to_double( (const uint64_t*)hash );
if ( likely( dhash > 0. ) )
work->sharediff = work->targetdiff *
u256_to_double( (const uint64_t*)( work->target ) ) / dhash;
}
else
work->sharediff = 0.;
// collect some share stats
// Frequent share submission combined with high latency can caused
// shares to be submitted faster than they are acked. If severe enough
// it can overflow the queue and overwrite stats for a share.
pthread_mutex_lock( &stats_lock ); pthread_mutex_lock( &stats_lock );
submitted_share_count++;
share_stats[ s_put_ptr ].share_count = submitted_share_count; share_stats[ s_put_ptr ].share_count = submitted_share_count;
gettimeofday( &share_stats[ s_put_ptr ].submit_time, NULL ); gettimeofday( &share_stats[ s_put_ptr ].submit_time, NULL );
share_stats[ s_put_ptr ].share_diff = work->sharediff; share_stats[ s_put_ptr ].share_diff = work->sharediff;
@@ -1770,22 +1735,25 @@ void work_set_target_ratio( struct work* work, const void *hash )
bool submit_solution( struct work *work, const void *hash, bool submit_solution( struct work *work, const void *hash,
struct thr_info *thr ) struct thr_info *thr )
{ {
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
if ( likely( submit_work( thr, work ) ) ) if ( likely( submit_work( thr, work ) ) )
{ {
submitted_share_count++; update_submit_stats( work, hash );
work_set_target_ratio( work, hash );
if ( !opt_quiet ) if ( !opt_quiet )
{ {
if ( have_stratum ) if ( have_stratum )
applog( LOG_NOTICE, "%d submitted by thread %d, job %s", applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
submitted_share_count, thr->id, work->job_id ); submitted_share_count, work->sharediff, work->height,
work->job_id );
else else
applog( LOG_NOTICE, "%d submitted by thread %d", applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
submitted_share_count, thr->id ); submitted_share_count, work->sharediff, work->height,
work->data[ algo_gate.ntime_index ] );
} }
if ( lowdiff_debug ) if ( unlikely( lowdiff_debug ) )
{ {
uint32_t* h = (uint32_t*)hash; uint32_t* h = (uint32_t*)hash;
uint32_t* t = (uint32_t*)work->target; uint32_t* t = (uint32_t*)work->target;
@@ -1797,27 +1765,30 @@ bool submit_solution( struct work *work, const void *hash,
return true; return true;
} }
else else
applog( LOG_WARNING, "%d failed to submit share thread %d.", applog( LOG_WARNING, "%d failed to submit share", submitted_share_count );
submitted_share_count, thr->id );
return false; return false;
} }
// deprecated, use submit_solution
bool submit_lane_solution( struct work *work, const void *hash, bool submit_lane_solution( struct work *work, const void *hash,
struct thr_info *thr, const int lane ) struct thr_info *thr, const int lane )
{ {
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
if ( likely( submit_work( thr, work ) ) ) if ( likely( submit_work( thr, work ) ) )
{ {
submitted_share_count++; update_submit_stats( work, hash );
work_set_target_ratio( work, hash );
if ( !opt_quiet ) if ( !opt_quiet )
{ {
if ( have_stratum ) if ( have_stratum )
applog( LOG_NOTICE, "%d submitted by thread %d, lane %d, job %s", applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
submitted_share_count, thr->id, lane, work->job_id ); submitted_share_count, work->sharediff, work->height,
work->job_id );
else else
applog( LOG_NOTICE, "%d submitted by thread %d, lane %d", applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
submitted_share_count, thr->id, lane ); submitted_share_count, work->sharediff, work->height,
work->data[ algo_gate.ntime_index ] );
} }
if ( lowdiff_debug ) if ( lowdiff_debug )
@@ -1832,8 +1803,8 @@ bool submit_lane_solution( struct work *work, const void *hash,
return true; return true;
} }
else else
applog( LOG_WARNING, "%d failed to submit share, thread %d, lane %d.", applog( LOG_WARNING, "%d failed to submit share", submitted_share_count );
submitted_share_count, thr->id, lane );
return false; return false;
} }
@@ -1986,6 +1957,8 @@ static void *miner_thread( void *userdata )
if (!opt_benchmark && opt_priority == 0) if (!opt_benchmark && opt_priority == 0)
{ {
setpriority(PRIO_PROCESS, 0, 19); setpriority(PRIO_PROCESS, 0, 19);
if ( !thr_id && !opt_quiet )
applog(LOG_INFO, "Miner thread priority %d (nice 19)", opt_priority );
drop_policy(); drop_policy();
} }
else else
@@ -2002,8 +1975,8 @@ static void *miner_thread( void *userdata )
case 4: prio = -10; break; case 4: prio = -10; break;
case 5: prio = -15; case 5: prio = -15;
} }
if (opt_debug) if ( !( thr_id || opt_quiet ) )
applog(LOG_DEBUG, "Thread %d priority %d (nice %d)", thr_id, applog( LOG_INFO, "Miner thread priority %d (nice %d)",
opt_priority, prio ); opt_priority, prio );
#endif #endif
setpriority(PRIO_PROCESS, 0, prio); setpriority(PRIO_PROCESS, 0, prio);
@@ -2019,7 +1992,7 @@ static void *miner_thread( void *userdata )
{ {
affine_to_cpu_mask( thr_id, (uint128_t)1 << (thr_id % num_cpus) ); affine_to_cpu_mask( thr_id, (uint128_t)1 << (thr_id % num_cpus) );
if ( opt_debug ) if ( opt_debug )
applog( LOG_DEBUG, "Binding thread %d to cpu %d.", applog( LOG_INFO, "Binding thread %d to cpu %d.",
thr_id, thr_id % num_cpus, thr_id, thr_id % num_cpus,
u128_hi64( (uint128_t)1 << (thr_id % num_cpus) ), u128_hi64( (uint128_t)1 << (thr_id % num_cpus) ),
u128_lo64( (uint128_t)1 << (thr_id % num_cpus) ) ); u128_lo64( (uint128_t)1 << (thr_id % num_cpus) ) );
@@ -2040,14 +2013,14 @@ static void *miner_thread( void *userdata )
{ {
#if AFFINITY_USES_UINT128 #if AFFINITY_USES_UINT128
if ( num_cpus > 64 ) if ( num_cpus > 64 )
applog( LOG_DEBUG, "Binding thread %d to mask %016llx %016llx", applog( LOG_INFO, "Binding thread %d to mask %016llx %016llx",
thr_id, u128_hi64( opt_affinity ), thr_id, u128_hi64( opt_affinity ),
u128_lo64( opt_affinity ) ); u128_lo64( opt_affinity ) );
else else
applog( LOG_DEBUG, "Binding thread %d to mask %016llx", applog( LOG_INFO, "Binding thread %d to mask %016llx",
thr_id, opt_affinity ); thr_id, opt_affinity );
#else #else
applog( LOG_DEBUG, "Binding thread %d to mask %016llx", applog( LOG_INFO, "Binding thread %d to mask %016llx",
thr_id, opt_affinity ); thr_id, opt_affinity );
#endif #endif
} }
@@ -2083,10 +2056,10 @@ static void *miner_thread( void *userdata )
} }
else else
{ {
int min_scantime = have_longpoll ? LP_SCANTIME : opt_scantime; int scantime = have_longpoll ? LP_SCANTIME : opt_scantime;
pthread_mutex_lock( &g_work_lock ); pthread_mutex_lock( &g_work_lock );
if ( time(NULL) - g_work_time >= min_scantime if ( time(NULL) - g_work_time >= scantime
|| *nonceptr >= end_nonce ) || *nonceptr >= end_nonce )
{ {
if ( unlikely( !get_work( mythr, &g_work ) ) ) if ( unlikely( !get_work( mythr, &g_work ) ) )
@@ -2097,6 +2070,7 @@ static void *miner_thread( void *userdata )
goto out; goto out;
} }
g_work_time = time(NULL); g_work_time = time(NULL);
restart_threads();
} }
algo_gate.get_new_work( &work, &g_work, thr_id, &end_nonce ); algo_gate.get_new_work( &work, &g_work, thr_id, &end_nonce );
@@ -2244,7 +2218,7 @@ static void *miner_thread( void *userdata )
} }
else else
sprintf( tempstr, "%d C", temp ); sprintf( tempstr, "%d C", temp );
applog( LOG_INFO,"CPU temp: curr %s (max %d), Freq: %.3f/%.3f GHz", applog( LOG_NOTICE,"CPU temp: curr %s (max %d), Freq: %.3f/%.3f GHz",
tempstr, hi_temp, (float)lo_freq / 1e6, (float)hi_freq/ 1e6 ); tempstr, hi_temp, (float)lo_freq / 1e6, (float)hi_freq/ 1e6 );
if ( temp > hi_temp ) hi_temp = temp; if ( temp > hi_temp ) hi_temp = temp;
} }
@@ -2562,8 +2536,10 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
algo_gate.build_extraheader( g_work, sctx ); algo_gate.build_extraheader( g_work, sctx );
net_diff = algo_gate.calc_network_diff( g_work ); net_diff = algo_gate.calc_network_diff( g_work );
algo_gate.set_work_data_endian( g_work ); algo_gate.set_work_data_endian( g_work );
work_set_target( g_work, sctx->job.diff g_work->height = sctx->block_height;
/ ( opt_target_factor * opt_diff_factor ) ); g_work->targetdiff = sctx->job.diff
/ ( opt_target_factor * opt_diff_factor );
diff_to_target( g_work->target, g_work->targetdiff );
pthread_mutex_unlock( &sctx->work_lock ); pthread_mutex_unlock( &sctx->work_lock );
@@ -2587,13 +2563,13 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
pthread_mutex_unlock( &stats_lock ); pthread_mutex_unlock( &stats_lock );
if ( stratum_diff != sctx->job.diff ) if ( stratum_diff != sctx->job.diff )
applog( LOG_BLUE, "New stratum diff %g, block %d, job %s", applog( LOG_BLUE, "New Diff %g, Block %d, Job %s",
sctx->job.diff, sctx->block_height, g_work->job_id ); sctx->job.diff, sctx->block_height, g_work->job_id );
else if ( last_block_height != sctx->block_height ) else if ( last_block_height != sctx->block_height )
applog( LOG_BLUE, "New block %d, job %s", applog( LOG_BLUE, "New Block %d, Job %s",
sctx->block_height, g_work->job_id ); sctx->block_height, g_work->job_id );
else if ( g_work->job_id ) else if ( g_work->job_id )
applog( LOG_BLUE,"New job %s", g_work->job_id ); applog( LOG_BLUE,"New Job %s", g_work->job_id );
// Update data and calculate new estimates. // Update data and calculate new estimates.
if ( ( stratum_diff != sctx->job.diff ) if ( ( stratum_diff != sctx->job.diff )
@@ -2609,9 +2585,8 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
if ( !opt_quiet ) if ( !opt_quiet )
{ {
applog2( LOG_INFO, "%s: %s", algo_names[opt_algo], short_url );
applog2( LOG_INFO, "Diff: Net %.5g, Stratum %.5g, Target %.5g", applog2( LOG_INFO, "Diff: Net %.5g, Stratum %.5g, Target %.5g",
net_diff, stratum_diff, last_targetdiff ); net_diff, stratum_diff, g_work->targetdiff );
if ( likely( hr > 0. ) ) if ( likely( hr > 0. ) )
{ {
@@ -2619,10 +2594,10 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
char block_ttf[32]; char block_ttf[32];
char share_ttf[32]; char share_ttf[32];
sprintf_et( block_ttf, net_diff * diff_to_hash / hr ); sprintf_et( block_ttf, ( net_diff * exp32 ) / hr );
sprintf_et( share_ttf, last_targetdiff * diff_to_hash / hr ); sprintf_et( share_ttf, g_work->targetdiff * exp32 / hr );
scale_hash_for_display ( &hr, hr_units ); scale_hash_for_display ( &hr, hr_units );
applog2( LOG_INFO, "TTF @ %.2f %sh/s: block %s, share %s", applog2( LOG_INFO, "TTF @ %.2f %sh/s: Block %s, Share %s",
hr, hr_units, block_ttf, share_ttf ); hr, hr_units, block_ttf, share_ttf );
if ( !multipool && last_block_height > session_first_block ) if ( !multipool && last_block_height > session_first_block )
@@ -2635,14 +2610,14 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
: et.tv_sec / ( last_block_height - session_first_block ); : et.tv_sec / ( last_block_height - session_first_block );
if ( net_diff && net_ttf ) if ( net_diff && net_ttf )
{ {
double net_hr = net_diff * diff_to_hash / net_ttf; double net_hr = net_diff * exp32 / net_ttf;
char net_ttf_str[32]; // char net_ttf_str[32];
char net_hr_units[4] = {0}; char net_hr_units[4] = {0};
sprintf_et( net_ttf_str, net_ttf ); // sprintf_et( net_ttf_str, net_ttf );
scale_hash_for_display ( &net_hr, net_hr_units ); scale_hash_for_display ( &net_hr, net_hr_units );
applog2( LOG_INFO, "Net TTF @ %.2f %sh/s: %s", applog2( LOG_INFO, "Net hash rate (est) %.2f %sh/s",
net_hr, net_hr_units, net_ttf_str ); net_hr, net_hr_units );
} }
} }
} // hr > 0 } // hr > 0
@@ -2653,12 +2628,12 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
static void *stratum_thread(void *userdata ) static void *stratum_thread(void *userdata )
{ {
struct thr_info *mythr = (struct thr_info *) userdata; struct thr_info *mythr = (struct thr_info *) userdata;
char *s; char *s = NULL;
stratum.url = (char*) tq_pop(mythr->q, NULL); stratum.url = (char*) tq_pop(mythr->q, NULL);
if (!stratum.url) if (!stratum.url)
goto out; goto out;
applog( LOG_INFO, "Stratum connect %s", short_url ); applog( LOG_BLUE, "Stratum connect %s", short_url );
while (1) while (1)
{ {
@@ -2718,30 +2693,26 @@ static void *stratum_thread(void *userdata )
restart_threads(); restart_threads();
} }
if ( stratum_socket_full( &stratum, opt_timeout ) ) if ( likely( stratum_socket_full( &stratum, opt_timeout ) ) )
{ {
s = stratum_recv_line(&stratum); if ( likely( s = stratum_recv_line( &stratum ) ) )
if ( !s )
applog(LOG_WARNING, "Stratum connection interrupted");
}
else
{ {
s = NULL; if ( likely( !stratum_handle_method( &stratum, s ) ) )
applog(LOG_ERR, "Stratum connection timeout");
}
if ( s )
{
if ( !stratum_handle_method( &stratum, s ) )
stratum_handle_response( s ); stratum_handle_response( s );
free( s ); free( s );
} }
else else
{ {
// stratum_errors++; applog(LOG_WARNING, "Stratum connection interrupted");
// check if this redundant
stratum_disconnect( &stratum ); stratum_disconnect( &stratum );
} }
}
else
{
applog(LOG_ERR, "Stratum connection timeout");
stratum_disconnect( &stratum );
}
} // loop } // loop
out: out:
return NULL; return NULL;
@@ -3378,7 +3349,7 @@ bool check_cpu_capability ()
" with VC++ 2013\n"); " with VC++ 2013\n");
#elif defined(__GNUC__) #elif defined(__GNUC__)
" with GCC"); " with GCC");
printf(" %d.%d.%d.\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__); printf(" %d.%d.%d\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
#else #else
printf(".\n"); printf(".\n");
#endif #endif
@@ -3526,7 +3497,6 @@ int main(int argc, char *argv[])
if (num_cpus < 1) if (num_cpus < 1)
num_cpus = 1; num_cpus = 1;
if (!opt_n_threads) if (!opt_n_threads)
opt_n_threads = num_cpus; opt_n_threads = num_cpus;
@@ -3600,11 +3570,12 @@ int main(int argc, char *argv[])
pthread_mutex_init( &stratum.sock_lock, NULL ); pthread_mutex_init( &stratum.sock_lock, NULL );
pthread_mutex_init( &stratum.work_lock, NULL ); pthread_mutex_init( &stratum.work_lock, NULL );
flags = !opt_benchmark flags = CURL_GLOBAL_ALL;
&& ( strncmp( rpc_url, "https:", 6 ) if ( !opt_benchmark )
|| strncasecmp(rpc_url, "stratum+tcps://", 15 ) ) if ( strncasecmp( rpc_url, "https:", 6 )
? ( CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL ) && strncasecmp( rpc_url, "stratum+tcps://", 15 ) )
: CURL_GLOBAL_ALL; flags &= ~CURL_GLOBAL_SSL;
if ( curl_global_init( flags ) ) if ( curl_global_init( flags ) )
{ {
applog(LOG_ERR, "CURL initialization failed"); applog(LOG_ERR, "CURL initialization failed");
@@ -3645,7 +3616,8 @@ int main(int argc, char *argv[])
if (opt_priority > 0) if (opt_priority > 0)
{ {
DWORD prio = NORMAL_PRIORITY_CLASS; DWORD prio = NORMAL_PRIORITY_CLASS;
switch (opt_priority) { switch (opt_priority)
{
case 1: case 1:
prio = BELOW_NORMAL_PRIORITY_CLASS; prio = BELOW_NORMAL_PRIORITY_CLASS;
break; break;
@@ -3662,10 +3634,6 @@ int main(int argc, char *argv[])
} }
#endif #endif
if ( num_cpus != opt_n_threads )
applog( LOG_INFO,"%u CPU cores available, %u miner threads selected.",
num_cpus, opt_n_threads );
// To be confirmed with more than 64 cpus // To be confirmed with more than 64 cpus
if ( opt_affinity != -1 ) if ( opt_affinity != -1 )
{ {
@@ -3697,8 +3665,12 @@ int main(int argc, char *argv[])
*/ */
} }
applog( LOG_INFO, "Extranonce subscribe: %s", if ( !opt_quiet && ( opt_n_threads < num_cpus ) )
opt_extranonce ? "YES" : "NO" ); {
char affinity_map[64];
format_affinity_map( affinity_map, opt_affinity );
applog( LOG_INFO, "CPU affinity [%s]", affinity_map );
}
#ifdef HAVE_SYSLOG_H #ifdef HAVE_SYSLOG_H
if (use_syslog) if (use_syslog)
@@ -3750,7 +3722,7 @@ int main(int argc, char *argv[])
/* start longpoll thread */ /* start longpoll thread */
err = thread_create(thr, longpoll_thread); err = thread_create(thr, longpoll_thread);
if (err) { if (err) {
applog(LOG_ERR, "long poll thread create failed"); applog(LOG_ERR, "Long poll thread create failed");
return 1; return 1;
} }
} }
@@ -3770,7 +3742,7 @@ int main(int argc, char *argv[])
err = thread_create(thr, stratum_thread); err = thread_create(thr, stratum_thread);
if (err) if (err)
{ {
applog(LOG_ERR, "stratum thread create failed"); applog(LOG_ERR, "Stratum thread create failed");
return 1; return 1;
} }
if (have_stratum) if (have_stratum)
@@ -3811,18 +3783,16 @@ int main(int argc, char *argv[])
return 1; return 1;
err = thread_create(thr, miner_thread); err = thread_create(thr, miner_thread);
if (err) { if (err) {
applog(LOG_ERR, "thread %d create failed", i); applog(LOG_ERR, "Miner thread %d create failed", i);
return 1; return 1;
} }
} }
applog(LOG_INFO, "%d miner threads started, " applog( LOG_INFO, "%d of %d miner threads started using '%s' algorithm",
"using '%s' algorithm.", opt_n_threads, num_cpus, algo_names[opt_algo] );
opt_n_threads,
algo_names[opt_algo]);
/* main loop - simply wait for workio thread to exit */ /* main loop - simply wait for workio thread to exit */
pthread_join(thr_info[work_thr_id].pth, NULL); pthread_join( thr_info[work_thr_id].pth, NULL );
applog(LOG_WARNING, "workio thread dead, exiting."); applog( LOG_WARNING, "workio thread dead, exiting." );
return 0; return 0;
} }

28
miner.h
View File

@@ -312,6 +312,20 @@ int varint_encode( unsigned char *p, uint64_t n );
size_t address_to_script( unsigned char *out, size_t outsz, const char *addr ); size_t address_to_script( unsigned char *out, size_t outsz, const char *addr );
int timeval_subtract( struct timeval *result, struct timeval *x, int timeval_subtract( struct timeval *result, struct timeval *x,
struct timeval *y); struct timeval *y);
// Bitcoin formula for converting difficulty to an equivalent
// number of hashes.
//
// https://en.bitcoin.it/wiki/Difficulty
//
// hash = diff * 2**32
//
// diff_to_hash = 2**32 = 0x100000000 = 4294967296 = exp32;
const double exp32; // 2**32
const double exp48; // 2**48
const double exp64; // 2**64
bool fulltest( const uint32_t *hash, const uint32_t *target ); bool fulltest( const uint32_t *hash, const uint32_t *target );
bool valid_hash( const void*, const void* ); bool valid_hash( const void*, const void* );
@@ -332,12 +346,16 @@ struct thr_info {
//struct thr_info *thr_info; //struct thr_info *thr_info;
void test_hash_and_submit( struct work *work, const void *hash,
struct thr_info *thr );
bool submit_solution( struct work *work, const void *hash, bool submit_solution( struct work *work, const void *hash,
struct thr_info *thr ); struct thr_info *thr );
// deprecated
bool submit_lane_solution( struct work *work, const void *hash, bool submit_lane_solution( struct work *work, const void *hash,
struct thr_info *thr, const int lane ); struct thr_info *thr, const int lane );
bool submit_work( struct thr_info *thr, const struct work *work_in ); bool submit_work( struct thr_info *thr, const struct work *work_in );
@@ -378,6 +396,7 @@ struct work {
size_t xnonce2_len; size_t xnonce2_len;
unsigned char *xnonce2; unsigned char *xnonce2;
bool sapling; bool sapling;
bool stale;
// x16rt // x16rt
uint32_t merkleroothash[8]; uint32_t merkleroothash[8];
@@ -758,7 +777,7 @@ extern const int pk_buffer_size_max;
extern int pk_buffer_size; extern int pk_buffer_size;
static char const usage[] = "\ static char const usage[] = "\
Usage: " PACKAGE_NAME " [OPTIONS]\n\ Usage: cpuminer [OPTIONS]\n\
Options:\n\ Options:\n\
-a, --algo=ALGO specify the algorithm to use\n\ -a, --algo=ALGO specify the algorithm to use\n\
allium Garlicoin (GRLC)\n\ allium Garlicoin (GRLC)\n\
@@ -853,8 +872,8 @@ Options:\n\
yespower-b2b generic yespower + blake2b\n\ yespower-b2b generic yespower + blake2b\n\
zr5 Ziftr\n\ zr5 Ziftr\n\
-N, --param-n N parameter for scrypt based algos\n\ -N, --param-n N parameter for scrypt based algos\n\
-R, --patam-r R parameter for scrypt based algos\n\ -R, --param-r R parameter for scrypt based algos\n\
-K, --param-key Key parameter for algos that use it\n\ -K, --param-key Key (pers) parameter for algos that use it\n\
-o, --url=URL URL of mining server\n\ -o, --url=URL URL of mining server\n\
-O, --userpass=U:P username:password pair for mining server\n\ -O, --userpass=U:P username:password pair for mining server\n\
-u, --user=USERNAME username for mining server\n\ -u, --user=USERNAME username for mining server\n\
@@ -893,7 +912,6 @@ Options:\n\
"\ "\
-B, --background run the miner in the background\n\ -B, --background run the miner in the background\n\
--benchmark run in offline benchmark mode\n\ --benchmark run in offline benchmark mode\n\
--cputest debug hashes from cpu algorithms\n\
--cpu-affinity set process affinity to cpu core(s), mask 0x3 for cores 0 and 1\n\ --cpu-affinity set process affinity to cpu core(s), mask 0x3 for cores 0 and 1\n\
--cpu-priority set process priority (default: 0 idle, 2 normal to 5 highest)\n\ --cpu-priority set process priority (default: 0 idle, 2 normal to 5 highest)\n\
-b, --api-bind IP/Port for the miner API (default: 127.0.0.1:4048)\n\ -b, --api-bind IP/Port for the miner API (default: 127.0.0.1:4048)\n\

81
util.c
View File

@@ -983,6 +983,7 @@ int timeval_subtract(struct timeval *result, struct timeval *x,
return x->tv_sec < y->tv_sec; return x->tv_sec < y->tv_sec;
} }
// deprecated, use test_hash_and_submit
// Use this when deinterleaved // Use this when deinterleaved
// do 64 bit test 4 iterations // do 64 bit test 4 iterations
inline bool valid_hash( const void *hash, const void *target ) inline bool valid_hash( const void *hash, const void *target )
@@ -999,6 +1000,7 @@ inline bool valid_hash( const void *hash, const void *target )
return true; return true;
} }
// deprecated, use test_hash_and_submit
bool fulltest( const uint32_t *hash, const uint32_t *target ) bool fulltest( const uint32_t *hash, const uint32_t *target )
{ {
int i; int i;
@@ -1044,32 +1046,40 @@ void diff_to_target(uint32_t *target, double diff)
uint64_t m; uint64_t m;
int k; int k;
const double exp64 = (double)0xffffffffffffffff + 1.; for (k = 6; k > 0 && diff > 1.0; k--)
for ( k = 3; k > 0 && diff > 1.0; k-- ) diff /= exp32;
diff /= exp64;
// for (k = 6; k > 0 && diff > 1.0; k--)
// diff /= 4294967296.0; // diff /= 4294967296.0;
m = (uint64_t)( 0xffff0000 / diff );
if unlikely( m == 0 && k == 3 ) // m = (uint64_t)(4294901760.0 / diff);
memset( target, 0xff, 32 );
else m = (uint64_t)(exp32 / diff);
{
memset( target, 0, 32 ); if (m == 0 && k == 6)
((uint64_t*)target)[k] = m; memset(target, 0xff, 32);
// target[k] = (uint32_t)m; else {
// target[k + 1] = (uint32_t)(m >> 32); memset(target, 0, 32);
target[k] = (uint32_t)m;
target[k + 1] = (uint32_t)(m >> 32);
} }
} }
// Only used by stratum pools // deprecated
void work_set_target(struct work* work, double diff) void work_set_target(struct work* work, double diff)
{ {
diff_to_target( work->target, diff ); diff_to_target( work->target, diff );
work->targetdiff = diff; work->targetdiff = diff;
} }
// Only used by longpoll pools double target_to_diff( uint32_t* target )
{
uint64_t *targ = (uint64_t*)target;
// extract 64 bits from target[ 240:176 ]
uint64_t m = ( targ[3] << 16 ) | ( targ[2] >> 48 );
return m ? (exp48-1.) / (double)m : 0.;
}
/*
double target_to_diff(uint32_t* target) double target_to_diff(uint32_t* target)
{ {
uchar* tgt = (uchar*) target; uchar* tgt = (uchar*) target;
@@ -1083,11 +1093,13 @@ double target_to_diff(uint32_t* target)
(uint64_t)tgt[23] << 8 | (uint64_t)tgt[23] << 8 |
(uint64_t)tgt[22] << 0; (uint64_t)tgt[22] << 0;
if (!m) if (!m)
return 0.; return 0.;
else else
return (double)0x0000ffff00000000/m; return (double)0x0000ffff00000000/m;
} }
*/
#ifdef WIN32 #ifdef WIN32
#define socket_blocks() (WSAGetLastError() == WSAEWOULDBLOCK) #define socket_blocks() (WSAGetLastError() == WSAEWOULDBLOCK)
@@ -1546,35 +1558,44 @@ bool stratum_authorize(struct stratum_ctx *sctx, const char *user, const char *p
ret = true; ret = true;
if (!opt_extranonce) if ( !opt_extranonce )
goto out; goto out;
// subscribe to extranonce (optional) // subscribe to extranonce (optional)
sprintf(s, "{\"id\": 3, \"method\": \"mining.extranonce.subscribe\", \"params\": []}"); sprintf(s, "{\"id\": 3, \"method\": \"mining.extranonce.subscribe\", \"params\": []}");
if (!stratum_send_line(sctx, s)) if ( !stratum_send_line( sctx, s ) )
goto out; goto out;
if (!socket_full(sctx->sock, 3)) { if ( !socket_full( sctx->sock, 3 ) )
applog(LOG_WARNING, "stratum extranonce subscribe timed out"); {
applog( LOG_WARNING, "Extranonce disabled, subscribe timed out" );
opt_extranonce = false;
goto out; goto out;
} }
if ( !opt_quiet )
applog( LOG_INFO, "Extranonce subscription enabled" );
sret = stratum_recv_line(sctx); sret = stratum_recv_line( sctx );
if (sret) { if ( sret )
json_t *extra = JSON_LOADS(sret, &err); {
if (!extra) { json_t *extra = JSON_LOADS( sret, &err );
if ( !extra )
{
applog(LOG_WARNING, "JSON decode failed(%d): %s", err.line, err.text); applog(LOG_WARNING, "JSON decode failed(%d): %s", err.line, err.text);
} else {
if (json_integer_value(json_object_get(extra, "id")) != 3) {
// we receive a standard method if extranonce is ignored
if (!stratum_handle_method(sctx, sret))
applog(LOG_WARNING, "Stratum answer id is not correct!");
} }
res_val = json_object_get(extra, "result"); else
{
if ( json_integer_value(json_object_get( extra, "id" ) ) != 3 )
{
// we receive a standard method if extranonce is ignored
if ( !stratum_handle_method( sctx, sret ) )
applog( LOG_WARNING, "Stratum answer id is not correct!" );
}
res_val = json_object_get( extra, "result" );
// if (opt_debug && (!res_val || json_is_false(res_val))) // if (opt_debug && (!res_val || json_is_false(res_val)))
// applog(LOG_DEBUG, "extranonce subscribe not supported"); // applog(LOG_DEBUG, "extranonce subscribe not supported");
json_decref(extra); json_decref( extra );
} }
free(sret); free(sret);
} }