mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6e8b8ed34f | ||
![]() |
c0aadbcc99 |
22
README.md
22
README.md
@@ -37,25 +37,25 @@ Requirements
|
|||||||
------------
|
------------
|
||||||
|
|
||||||
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
|
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
|
||||||
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI
|
Intel Core2 and newer and AMD equivalents. Further optimizations are available
|
||||||
optimizations a CPU with AES_NI is required. This includes Intel Westmere
|
on some algoritms for CPUs with AES, AVX, AVX2, SHA, AVX512 and VAES.
|
||||||
and newer and AMD equivalents. Further optimizations are available on some
|
|
||||||
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
|
|
||||||
|
|
||||||
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
|
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
|
||||||
performance.
|
performance.
|
||||||
|
|
||||||
ARM CPUs are not supported.
|
ARM and Aarch64 CPUs are not supported.
|
||||||
|
|
||||||
2. 64 bit Linux OS. Ubuntu and Fedora based distributions, including Mint and
|
2. 64 bit Linux or Windows OS. Ubuntu and Fedora based distributions,
|
||||||
Centos, are known to work and have all dependencies in their repositories.
|
including Mint and Centos, are known to work and have all dependencies
|
||||||
Others may work but may require more effort. Older versions such as Centos 6
|
in their repositories. Others may work but may require more effort. Older
|
||||||
don't work due to missing features.
|
versions such as Centos 6 don't work due to missing features.
|
||||||
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
|
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
|
||||||
|
|
||||||
MacOS, OSx and Android are not supported.
|
MacOS, OSx and Android are not supported.
|
||||||
|
|
||||||
3. Stratum pool. Some algos may work wallet mining using getwork or GBT. YMMV.
|
3. Stratum pool supporting stratum+tcp:// or stratum+ssl:// protocols or
|
||||||
|
RPC getwork using http:// or https://.
|
||||||
|
GBT is YMMV.
|
||||||
|
|
||||||
Supported Algorithms
|
Supported Algorithms
|
||||||
--------------------
|
--------------------
|
||||||
@@ -163,7 +163,7 @@ power2b: --algo yespower-b2b --param-n 2048 --param-r 32 --param-key "Now I am b
|
|||||||
|
|
||||||
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
|
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
|
||||||
|
|
||||||
yespoweriots: --a yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
|
yespoweriots: --algo yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
|
||||||
|
|
||||||
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
|
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
|
||||||
|
|
||||||
|
@@ -65,12 +65,37 @@ If not what makes it happen or not happen?
|
|||||||
Change Log
|
Change Log
|
||||||
----------
|
----------
|
||||||
|
|
||||||
|
v3.12.6.1
|
||||||
|
|
||||||
|
Issue #252: Fixed SSL mining (stratum+tcps://)
|
||||||
|
|
||||||
|
Issue #254 Fixed benchmark.
|
||||||
|
|
||||||
|
Issue #253: Implemented stale share reduction for yespower, x25x, x22i, x21s,
|
||||||
|
x16*, scryptn2, more to come.
|
||||||
|
|
||||||
|
v3.12.6
|
||||||
|
|
||||||
|
Issue #246: improved stale share detection for getwork.
|
||||||
|
|
||||||
|
Improved precision of target_to_diff conversion from 4 digits to 20+.
|
||||||
|
|
||||||
|
Display hash and target debug data for all rejected shares.
|
||||||
|
|
||||||
|
A graphical representation of CPU affinity is displayed when using --threads.
|
||||||
|
|
||||||
|
Added highest and lowest accepted share to summary log.
|
||||||
|
|
||||||
|
Other small changes to logs to improve consistency and clarity.
|
||||||
|
|
||||||
v3.12.5
|
v3.12.5
|
||||||
|
|
||||||
Issues #246 & #251: fixed incorrect share diff for stratum and getwork,
|
Issues #246 & #251: fixed incorrect share diff for stratum and getwork,
|
||||||
fixed incorrect target diff for getwork. Stats should now be correct for
|
fixed incorrect target diff for getwork. Stats should now be correct for
|
||||||
getwork as well as stratum.
|
getwork as well as stratum.
|
||||||
|
|
||||||
|
Issue #252: Fixed stratum+tcps not using curl ssl.
|
||||||
|
|
||||||
Getwork: reduce stale blocks, faster response to new work.
|
Getwork: reduce stale blocks, faster response to new work.
|
||||||
|
|
||||||
Added ntime to new job/work logs.
|
Added ntime to new job/work logs.
|
||||||
|
@@ -97,21 +97,23 @@ int null_scanhash()
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void null_hash()
|
int null_hash()
|
||||||
{
|
{
|
||||||
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
|
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
|
||||||
|
return 0;
|
||||||
};
|
};
|
||||||
|
/*
|
||||||
void null_hash_suw()
|
void null_hash_suw()
|
||||||
{
|
{
|
||||||
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
|
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
|
||||||
};
|
};
|
||||||
|
*/
|
||||||
void init_algo_gate( algo_gate_t* gate )
|
void init_algo_gate( algo_gate_t* gate )
|
||||||
{
|
{
|
||||||
gate->miner_thread_init = (void*)&return_true;
|
gate->miner_thread_init = (void*)&return_true;
|
||||||
gate->scanhash = (void*)&null_scanhash;
|
gate->scanhash = (void*)&null_scanhash;
|
||||||
gate->hash = (void*)&null_hash;
|
gate->hash = (void*)&null_hash;
|
||||||
gate->hash_suw = (void*)&null_hash_suw;
|
// gate->hash_suw = (void*)&null_hash_suw;
|
||||||
gate->get_new_work = (void*)&std_get_new_work;
|
gate->get_new_work = (void*)&std_get_new_work;
|
||||||
gate->work_decode = (void*)&std_le_work_decode;
|
gate->work_decode = (void*)&std_le_work_decode;
|
||||||
gate->decode_extra_data = (void*)&do_nothing;
|
gate->decode_extra_data = (void*)&do_nothing;
|
||||||
|
@@ -113,9 +113,10 @@ typedef struct
|
|||||||
// mandatory functions, must be overwritten
|
// mandatory functions, must be overwritten
|
||||||
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
|
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
|
||||||
|
|
||||||
|
// not used anywhere
|
||||||
// optional unsafe, must be overwritten if algo uses function
|
// optional unsafe, must be overwritten if algo uses function
|
||||||
void ( *hash ) ( void*, const void*, uint32_t ) ;
|
int ( *hash ) ( void*, const void*, uint32_t ) ;
|
||||||
void ( *hash_suw ) ( void*, const void* );
|
//void ( *hash_suw ) ( void*, const void* );
|
||||||
|
|
||||||
//optional, safe to use default in most cases
|
//optional, safe to use default in most cases
|
||||||
|
|
||||||
@@ -213,8 +214,8 @@ void four_way_not_tested();
|
|||||||
int null_scanhash();
|
int null_scanhash();
|
||||||
|
|
||||||
// displays warning
|
// displays warning
|
||||||
void null_hash ();
|
int null_hash ();
|
||||||
void null_hash_suw();
|
//void null_hash_suw();
|
||||||
|
|
||||||
// optional safe targets, default listed first unless noted.
|
// optional safe targets, default listed first unless noted.
|
||||||
|
|
||||||
|
@@ -311,7 +311,7 @@ bool register_m7m_algo( algo_gate_t *gate )
|
|||||||
{
|
{
|
||||||
gate->optimizations = SHA_OPT;
|
gate->optimizations = SHA_OPT;
|
||||||
init_m7m_ctx();
|
init_m7m_ctx();
|
||||||
gate->scanhash = (void*)scanhash_m7m_hash;
|
gate->scanhash = (void*)&scanhash_m7m_hash;
|
||||||
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
|
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
|
||||||
gate->work_decode = (void*)&std_be_work_decode;
|
gate->work_decode = (void*)&std_be_work_decode;
|
||||||
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
|
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
|
||||||
|
@@ -424,7 +424,7 @@ static bool scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_SHA256_4WAY
|
#ifdef HAVE_SHA256_4WAY
|
||||||
static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
static int scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
||||||
int thrid )
|
int thrid )
|
||||||
{
|
{
|
||||||
@@ -449,6 +449,8 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
|||||||
|
|
||||||
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
|
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++)
|
||||||
for (k = 0; k < 4; k++)
|
for (k = 0; k < 4; k++)
|
||||||
X[k * 32 + i] = W[4 * i + k];
|
X[k * 32 + i] = W[4 * i + k];
|
||||||
@@ -458,6 +460,8 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
|||||||
scrypt_core(X + 2 * 32, V, N);
|
scrypt_core(X + 2 * 32, V, N);
|
||||||
scrypt_core(X + 3 * 32, V, N);
|
scrypt_core(X + 3 * 32, V, N);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++)
|
||||||
for (k = 0; k < 4; k++)
|
for (k = 0; k < 4; k++)
|
||||||
W[4 * i + k] = X[k * 32 + i];
|
W[4 * i + k] = X[k * 32 + i];
|
||||||
@@ -468,13 +472,13 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
|||||||
for (k = 0; k < 4; k++)
|
for (k = 0; k < 4; k++)
|
||||||
output[k * 8 + i] = W[4 * i + k];
|
output[k * 8 + i] = W[4 * i + k];
|
||||||
|
|
||||||
return true;
|
return 1;
|
||||||
}
|
}
|
||||||
#endif /* HAVE_SHA256_4WAY */
|
#endif /* HAVE_SHA256_4WAY */
|
||||||
|
|
||||||
#ifdef HAVE_SCRYPT_3WAY
|
#ifdef HAVE_SCRYPT_3WAY
|
||||||
|
|
||||||
static bool scrypt_1024_1_1_256_3way(const uint32_t *input,
|
static int scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
||||||
int thrid )
|
int thrid )
|
||||||
{
|
{
|
||||||
@@ -492,23 +496,23 @@ static bool scrypt_1024_1_1_256_3way(const uint32_t *input,
|
|||||||
HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8);
|
HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8);
|
||||||
HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16);
|
HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16);
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
|
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
|
||||||
PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32);
|
PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32);
|
||||||
PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64);
|
PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64);
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
scrypt_core_3way(X, V, N);
|
scrypt_core_3way(X, V, N);
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
|
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
|
||||||
PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8);
|
PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8);
|
||||||
PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16);
|
PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16);
|
||||||
|
|
||||||
return true;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_SHA256_4WAY
|
#ifdef HAVE_SHA256_4WAY
|
||||||
@@ -539,13 +543,13 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
|||||||
HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32);
|
HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32);
|
||||||
HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64);
|
HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64);
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
|
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||||
PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128);
|
PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128);
|
||||||
PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256);
|
PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
for (j = 0; j < 3; j++)
|
for (j = 0; j < 3; j++)
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++)
|
||||||
@@ -557,7 +561,7 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
|||||||
scrypt_core_3way(X + 2 * 96, V, N);
|
scrypt_core_3way(X + 2 * 96, V, N);
|
||||||
scrypt_core_3way(X + 3 * 96, V, N);
|
scrypt_core_3way(X + 3 * 96, V, N);
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
for (j = 0; j < 3; j++)
|
for (j = 0; j < 3; j++)
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++)
|
||||||
@@ -573,14 +577,14 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
|||||||
for (k = 0; k < 4; k++)
|
for (k = 0; k < 4; k++)
|
||||||
output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k];
|
output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k];
|
||||||
|
|
||||||
return true;
|
return 1;
|
||||||
}
|
}
|
||||||
#endif /* HAVE_SHA256_4WAY */
|
#endif /* HAVE_SHA256_4WAY */
|
||||||
|
|
||||||
#endif /* HAVE_SCRYPT_3WAY */
|
#endif /* HAVE_SCRYPT_3WAY */
|
||||||
|
|
||||||
#ifdef HAVE_SCRYPT_6WAY
|
#ifdef HAVE_SCRYPT_6WAY
|
||||||
static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
static int scrypt_1024_1_1_256_24way( const uint32_t *input,
|
||||||
uint32_t *output, uint32_t *midstate,
|
uint32_t *output, uint32_t *midstate,
|
||||||
unsigned char *scratchpad, int N, int thrid )
|
unsigned char *scratchpad, int N, int thrid )
|
||||||
{
|
{
|
||||||
@@ -607,13 +611,13 @@ static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
|||||||
HMAC_SHA256_80_init_8way( W + 256, tstate + 64, ostate + 64 );
|
HMAC_SHA256_80_init_8way( W + 256, tstate + 64, ostate + 64 );
|
||||||
HMAC_SHA256_80_init_8way( W + 512, tstate + 128, ostate + 128 );
|
HMAC_SHA256_80_init_8way( W + 512, tstate + 128, ostate + 128 );
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
PBKDF2_SHA256_80_128_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
|
PBKDF2_SHA256_80_128_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
|
||||||
PBKDF2_SHA256_80_128_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
|
PBKDF2_SHA256_80_128_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
|
||||||
PBKDF2_SHA256_80_128_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
|
PBKDF2_SHA256_80_128_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
for ( j = 0; j < 3; j++ )
|
for ( j = 0; j < 3; j++ )
|
||||||
for ( i = 0; i < 32; i++ )
|
for ( i = 0; i < 32; i++ )
|
||||||
@@ -622,10 +626,13 @@ static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
|||||||
|
|
||||||
scrypt_core_6way( X + 0 * 32, V, N );
|
scrypt_core_6way( X + 0 * 32, V, N );
|
||||||
scrypt_core_6way( X + 6 * 32, V, N );
|
scrypt_core_6way( X + 6 * 32, V, N );
|
||||||
scrypt_core_6way( X + 12 * 32, V, N );
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
|
scrypt_core_6way( X + 12 * 32, V, N );
|
||||||
scrypt_core_6way( X + 18 * 32, V, N );
|
scrypt_core_6way( X + 18 * 32, V, N );
|
||||||
|
|
||||||
if ( work_restart[thrid].restart ) return false;
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
for ( j = 0; j < 3; j++ )
|
for ( j = 0; j < 3; j++ )
|
||||||
for ( i = 0; i < 32; i++ )
|
for ( i = 0; i < 32; i++ )
|
||||||
@@ -641,7 +648,7 @@ static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
|||||||
for ( k = 0; k < 8; k++ )
|
for ( k = 0; k < 8; k++ )
|
||||||
output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k];
|
output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k];
|
||||||
|
|
||||||
return true;
|
return 1;
|
||||||
}
|
}
|
||||||
#endif /* HAVE_SCRYPT_6WAY */
|
#endif /* HAVE_SCRYPT_6WAY */
|
||||||
|
|
||||||
@@ -711,6 +718,7 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
|||||||
pdata[19] = data[i * 20 + 19];
|
pdata[19] = data[i * 20 + 19];
|
||||||
submit_solution( work, hash + i * 8, mythr );
|
submit_solution( work, hash + i * 8, mythr );
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
} while ( likely( ( n < ( max_nonce - throughput ) ) && !(*restart) ) );
|
} while ( likely( ( n < ( max_nonce - throughput ) ) && !(*restart) ) );
|
||||||
|
|
||||||
|
@@ -77,7 +77,7 @@ typedef union _hex_context_overlay hex_context_overlay;
|
|||||||
|
|
||||||
static __thread x16r_context_overlay hex_ctx;
|
static __thread x16r_context_overlay hex_ctx;
|
||||||
|
|
||||||
void hex_hash( void* output, const void* input )
|
int hex_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t _ALIGN(128) hash[16];
|
uint32_t _ALIGN(128) hash[16];
|
||||||
x16r_context_overlay ctx;
|
x16r_context_overlay ctx;
|
||||||
@@ -214,11 +214,15 @@ void hex_hash( void* output, const void* input )
|
|||||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
algo = (uint8_t)hash[0] % X16R_HASH_FUNC_COUNT;
|
algo = (uint8_t)hash[0] % X16R_HASH_FUNC_COUNT;
|
||||||
in = (void*) hash;
|
in = (void*) hash;
|
||||||
size = 64;
|
size = 64;
|
||||||
}
|
}
|
||||||
memcpy(output, hash, 32);
|
memcpy(output, hash, 32);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_hex( struct work *work, uint32_t max_nonce,
|
int scanhash_hex( struct work *work, uint32_t max_nonce,
|
||||||
@@ -286,8 +290,7 @@ int scanhash_hex( struct work *work, uint32_t max_nonce,
|
|||||||
do
|
do
|
||||||
{
|
{
|
||||||
edata[19] = nonce;
|
edata[19] = nonce;
|
||||||
hex_hash( hash32, edata );
|
if ( hex_hash( hash32, edata, thr_id ) );
|
||||||
|
|
||||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
be32enc( &pdata[19], nonce );
|
be32enc( &pdata[19], nonce );
|
||||||
|
@@ -80,7 +80,7 @@ void x16r_8way_prehash( void *vdata, void *pdata )
|
|||||||
// Called by wrapper hash function to optionally continue hashing and
|
// Called by wrapper hash function to optionally continue hashing and
|
||||||
// convert to final hash.
|
// convert to final hash.
|
||||||
|
|
||||||
void x16r_8way_hash_generic( void* output, const void* input )
|
int x16r_8way_hash_generic( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t vhash[20*8] __attribute__ ((aligned (128)));
|
uint32_t vhash[20*8] __attribute__ ((aligned (128)));
|
||||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||||
@@ -424,6 +424,9 @@ void x16r_8way_hash_generic( void* output, const void* input )
|
|||||||
hash7, vhash );
|
hash7, vhash );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
size = 64;
|
size = 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -435,14 +438,17 @@ void x16r_8way_hash_generic( void* output, const void* input )
|
|||||||
memcpy( output+320, hash5, 64 );
|
memcpy( output+320, hash5, 64 );
|
||||||
memcpy( output+384, hash6, 64 );
|
memcpy( output+384, hash6, 64 );
|
||||||
memcpy( output+448, hash7, 64 );
|
memcpy( output+448, hash7, 64 );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// x16-r,-s,-rt wrapper called directly by scanhash to repackage 512 bit
|
// x16-r,-s,-rt wrapper called directly by scanhash to repackage 512 bit
|
||||||
// hash to 256 bit final hash.
|
// hash to 256 bit final hash.
|
||||||
void x16r_8way_hash( void* output, const void* input )
|
int x16r_8way_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint8_t hash[64*8] __attribute__ ((aligned (128)));
|
uint8_t hash[64*8] __attribute__ ((aligned (128)));
|
||||||
x16r_8way_hash_generic( hash, input );
|
if ( !x16r_8way_hash_generic( hash, input, thrid ) )
|
||||||
|
return 0;
|
||||||
|
|
||||||
memcpy( output, hash, 32 );
|
memcpy( output, hash, 32 );
|
||||||
memcpy( output+32, hash+64, 32 );
|
memcpy( output+32, hash+64, 32 );
|
||||||
@@ -452,7 +458,9 @@ void x16r_8way_hash( void* output, const void* input )
|
|||||||
memcpy( output+160, hash+320, 32 );
|
memcpy( output+160, hash+320, 32 );
|
||||||
memcpy( output+192, hash+384, 32 );
|
memcpy( output+192, hash+384, 32 );
|
||||||
memcpy( output+224, hash+448, 32 );
|
memcpy( output+224, hash+448, 32 );
|
||||||
}
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
// x16r only
|
// x16r only
|
||||||
int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -492,8 +500,7 @@ int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
|
|||||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x16r_8way_hash( hash, vdata );
|
if( x16r_8way_hash( hash, vdata, thr_id ) );
|
||||||
|
|
||||||
for ( int i = 0; i < 8; i++ )
|
for ( int i = 0; i < 8; i++ )
|
||||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
@@ -565,7 +572,7 @@ void x16r_4way_prehash( void *vdata, void *pdata )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void x16r_4way_hash_generic( void* output, const void* input )
|
int x16r_4way_hash_generic( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t vhash[20*4] __attribute__ ((aligned (128)));
|
uint32_t vhash[20*4] __attribute__ ((aligned (128)));
|
||||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||||
@@ -794,23 +801,31 @@ void x16r_4way_hash_generic( void* output, const void* input )
|
|||||||
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
|
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
size = 64;
|
size = 64;
|
||||||
}
|
}
|
||||||
memcpy( output, hash0, 64 );
|
memcpy( output, hash0, 64 );
|
||||||
memcpy( output+64, hash1, 64 );
|
memcpy( output+64, hash1, 64 );
|
||||||
memcpy( output+128, hash2, 64 );
|
memcpy( output+128, hash2, 64 );
|
||||||
memcpy( output+192, hash3, 64 );
|
memcpy( output+192, hash3, 64 );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void x16r_4way_hash( void* output, const void* input )
|
int x16r_4way_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint8_t hash[64*4] __attribute__ ((aligned (64)));
|
uint8_t hash[64*4] __attribute__ ((aligned (64)));
|
||||||
x16r_4way_hash_generic( hash, input );
|
if ( !x16r_4way_hash_generic( hash, input, thrid ) )
|
||||||
|
return 0;
|
||||||
|
|
||||||
memcpy( output, hash, 32 );
|
memcpy( output, hash, 32 );
|
||||||
memcpy( output+32, hash+64, 32 );
|
memcpy( output+32, hash+64, 32 );
|
||||||
memcpy( output+64, hash+128, 32 );
|
memcpy( output+64, hash+128, 32 );
|
||||||
memcpy( output+96, hash+192, 32 );
|
memcpy( output+96, hash+192, 32 );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
|
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -849,7 +864,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
|
|||||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x16r_4way_hash( hash, vdata );
|
if ( x16r_4way_hash( hash, vdata, thr_id ) );
|
||||||
for ( int i = 0; i < 4; i++ )
|
for ( int i = 0; i < 4; i++ )
|
||||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
|
@@ -131,8 +131,8 @@ typedef union _x16r_8way_context_overlay x16r_8way_context_overlay;
|
|||||||
extern __thread x16r_8way_context_overlay x16r_ctx;
|
extern __thread x16r_8way_context_overlay x16r_ctx;
|
||||||
|
|
||||||
void x16r_8way_prehash( void *, void * );
|
void x16r_8way_prehash( void *, void * );
|
||||||
void x16r_8way_hash_generic( void *, const void * );
|
int x16r_8way_hash_generic( void *, const void *, int );
|
||||||
void x16r_8way_hash( void *, const void * );
|
int x16r_8way_hash( void *, const void *, int );
|
||||||
int scanhash_x16r_8way( struct work *, uint32_t ,
|
int scanhash_x16r_8way( struct work *, uint32_t ,
|
||||||
uint64_t *, struct thr_info * );
|
uint64_t *, struct thr_info * );
|
||||||
extern __thread x16r_8way_context_overlay x16r_ctx;
|
extern __thread x16r_8way_context_overlay x16r_ctx;
|
||||||
@@ -166,8 +166,8 @@ typedef union _x16r_4way_context_overlay x16r_4way_context_overlay;
|
|||||||
extern __thread x16r_4way_context_overlay x16r_ctx;
|
extern __thread x16r_4way_context_overlay x16r_ctx;
|
||||||
|
|
||||||
void x16r_4way_prehash( void *, void * );
|
void x16r_4way_prehash( void *, void * );
|
||||||
void x16r_4way_hash_generic( void *, const void * );
|
int x16r_4way_hash_generic( void *, const void *, int );
|
||||||
void x16r_4way_hash( void *, const void * );
|
int x16r_4way_hash( void *, const void *, int );
|
||||||
int scanhash_x16r_4way( struct work *, uint32_t,
|
int scanhash_x16r_4way( struct work *, uint32_t,
|
||||||
uint64_t *, struct thr_info * );
|
uint64_t *, struct thr_info * );
|
||||||
extern __thread x16r_4way_context_overlay x16r_ctx;
|
extern __thread x16r_4way_context_overlay x16r_ctx;
|
||||||
@@ -205,26 +205,26 @@ typedef union _x16r_context_overlay x16r_context_overlay;
|
|||||||
extern __thread x16r_context_overlay x16_ctx;
|
extern __thread x16r_context_overlay x16_ctx;
|
||||||
|
|
||||||
void x16r_prehash( void *, void * );
|
void x16r_prehash( void *, void * );
|
||||||
void x16r_hash_generic( void *, const void * );
|
int x16r_hash_generic( void *, const void *, int );
|
||||||
void x16r_hash( void *, const void * );
|
int x16r_hash( void *, const void *, int );
|
||||||
int scanhash_x16r( struct work *, uint32_t, uint64_t *, struct thr_info * );
|
int scanhash_x16r( struct work *, uint32_t, uint64_t *, struct thr_info * );
|
||||||
|
|
||||||
// x16Rv2
|
// x16Rv2
|
||||||
#if defined(X16RV2_8WAY)
|
#if defined(X16RV2_8WAY)
|
||||||
|
|
||||||
void x16rv2_8way_hash( void *state, const void *input );
|
int x16rv2_8way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
#elif defined(X16RV2_4WAY)
|
#elif defined(X16RV2_4WAY)
|
||||||
|
|
||||||
void x16rv2_4way_hash( void *state, const void *input );
|
int x16rv2_4way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
void x16rv2_hash( void *state, const void *input );
|
int x16rv2_hash( void *state, const void *input, int thr_id );
|
||||||
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
@@ -254,21 +254,21 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
|
|||||||
// x21s
|
// x21s
|
||||||
#if defined(X16R_8WAY)
|
#if defined(X16R_8WAY)
|
||||||
|
|
||||||
void x21s_8way_hash( void *state, const void *input );
|
int x21s_8way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
bool x21s_8way_thread_init();
|
bool x21s_8way_thread_init();
|
||||||
|
|
||||||
#elif defined(X16R_4WAY)
|
#elif defined(X16R_4WAY)
|
||||||
|
|
||||||
void x21s_4way_hash( void *state, const void *input );
|
int x21s_4way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
bool x21s_4way_thread_init();
|
bool x21s_4way_thread_init();
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
void x21s_hash( void *state, const void *input );
|
int x21s_hash( void *state, const void *input, int thr_id );
|
||||||
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
bool x21s_thread_init();
|
bool x21s_thread_init();
|
||||||
|
@@ -48,7 +48,7 @@ void x16r_prehash( void *edata, void *pdata )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void x16r_hash_generic( void* output, const void* input )
|
int x16r_hash_generic( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t _ALIGN(128) hash[16];
|
uint32_t _ALIGN(128) hash[16];
|
||||||
x16r_context_overlay ctx;
|
x16r_context_overlay ctx;
|
||||||
@@ -178,18 +178,24 @@ void x16r_hash_generic( void* output, const void* input )
|
|||||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
in = (void*) hash;
|
in = (void*) hash;
|
||||||
size = 64;
|
size = 64;
|
||||||
}
|
}
|
||||||
memcpy( output, hash, 64 );
|
memcpy( output, hash, 64 );
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void x16r_hash( void* output, const void* input )
|
int x16r_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint8_t hash[64] __attribute__ ((aligned (64)));
|
uint8_t hash[64] __attribute__ ((aligned (64)));
|
||||||
x16r_hash_generic( hash, input );
|
if ( !x16r_hash_generic( hash, input, thrid ) )
|
||||||
|
return 0;
|
||||||
|
|
||||||
memcpy( output, hash, 32 );
|
memcpy( output, hash, 32 );
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x16r( struct work *work, uint32_t max_nonce,
|
int scanhash_x16r( struct work *work, uint32_t max_nonce,
|
||||||
@@ -223,8 +229,7 @@ int scanhash_x16r( struct work *work, uint32_t max_nonce,
|
|||||||
do
|
do
|
||||||
{
|
{
|
||||||
edata[19] = nonce;
|
edata[19] = nonce;
|
||||||
x16r_hash( hash32, edata );
|
if ( x16r_hash( hash32, edata, thr_id ) )
|
||||||
|
|
||||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( nonce );
|
pdata[19] = bswap_32( nonce );
|
||||||
|
@@ -41,8 +41,7 @@ int scanhash_x16rt_8way( struct work *work, uint32_t max_nonce,
|
|||||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x16r_8way_hash( hash, vdata );
|
if ( x16r_8way_hash( hash, vdata, thr_id ) )
|
||||||
|
|
||||||
for ( int i = 0; i < 8; i++ )
|
for ( int i = 0; i < 8; i++ )
|
||||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
@@ -95,7 +94,7 @@ int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
|
|||||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x16r_4way_hash( hash, vdata );
|
if ( x16r_4way_hash( hash, vdata, thr_id ) )
|
||||||
for ( int i = 0; i < 4; i++ )
|
for ( int i = 0; i < 4; i++ )
|
||||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
|
@@ -36,8 +36,7 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
|
|||||||
do
|
do
|
||||||
{
|
{
|
||||||
edata[19] = nonce;
|
edata[19] = nonce;
|
||||||
x16r_hash( hash32, edata );
|
if ( x16r_hash( hash32, edata, thr_id ) )
|
||||||
|
|
||||||
if ( valid_hash( hash32, ptarget ) && !bench )
|
if ( valid_hash( hash32, ptarget ) && !bench )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( nonce );
|
pdata[19] = bswap_32( nonce );
|
||||||
|
@@ -65,7 +65,7 @@ union _x16rv2_8way_context_overlay
|
|||||||
typedef union _x16rv2_8way_context_overlay x16rv2_8way_context_overlay;
|
typedef union _x16rv2_8way_context_overlay x16rv2_8way_context_overlay;
|
||||||
static __thread x16rv2_8way_context_overlay x16rv2_ctx;
|
static __thread x16rv2_8way_context_overlay x16rv2_ctx;
|
||||||
|
|
||||||
void x16rv2_8way_hash( void* output, const void* input )
|
int x16rv2_8way_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t vhash[24*8] __attribute__ ((aligned (128)));
|
uint32_t vhash[24*8] __attribute__ ((aligned (128)));
|
||||||
uint32_t hash0[24] __attribute__ ((aligned (64)));
|
uint32_t hash0[24] __attribute__ ((aligned (64)));
|
||||||
@@ -563,6 +563,9 @@ void x16rv2_8way_hash( void* output, const void* input )
|
|||||||
hash7, vhash );
|
hash7, vhash );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
size = 64;
|
size = 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -574,6 +577,7 @@ void x16rv2_8way_hash( void* output, const void* input )
|
|||||||
memcpy( output+160, hash5, 32 );
|
memcpy( output+160, hash5, 32 );
|
||||||
memcpy( output+192, hash6, 32 );
|
memcpy( output+192, hash6, 32 );
|
||||||
memcpy( output+224, hash7, 32 );
|
memcpy( output+224, hash7, 32 );
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -669,8 +673,7 @@ int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
|||||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x16rv2_8way_hash( hash, vdata );
|
if ( x16rv2_8way_hash( hash, vdata, thr_id ) )
|
||||||
|
|
||||||
for ( int i = 0; i < 8; i++ )
|
for ( int i = 0; i < 8; i++ )
|
||||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
@@ -718,7 +721,7 @@ inline void padtiger512( uint32_t* hash )
|
|||||||
for ( int i = 6; i < 16; i++ ) hash[i] = 0;
|
for ( int i = 6; i < 16; i++ ) hash[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void x16rv2_4way_hash( void* output, const void* input )
|
int x16rv2_4way_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||||
uint32_t hash1[20] __attribute__ ((aligned (64)));
|
uint32_t hash1[20] __attribute__ ((aligned (64)));
|
||||||
@@ -1023,12 +1026,16 @@ void x16rv2_4way_hash( void* output, const void* input )
|
|||||||
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
size = 64;
|
size = 64;
|
||||||
}
|
}
|
||||||
memcpy( output, hash0, 32 );
|
memcpy( output, hash0, 32 );
|
||||||
memcpy( output+32, hash1, 32 );
|
memcpy( output+32, hash1, 32 );
|
||||||
memcpy( output+64, hash2, 32 );
|
memcpy( output+64, hash2, 32 );
|
||||||
memcpy( output+96, hash3, 32 );
|
memcpy( output+96, hash3, 32 );
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -1119,7 +1126,7 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
|||||||
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x16rv2_4way_hash( hash, vdata );
|
if ( x16rv2_4way_hash( hash, vdata, thr_id ) )
|
||||||
for ( int i = 0; i < 4; i++ )
|
for ( int i = 0; i < 4; i++ )
|
||||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
|
@@ -67,7 +67,7 @@ inline void padtiger512(uint32_t* hash) {
|
|||||||
for (int i = (24/4); i < (64/4); i++) hash[i] = 0;
|
for (int i = (24/4); i < (64/4); i++) hash[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void x16rv2_hash( void* output, const void* input )
|
int x16rv2_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t _ALIGN(128) hash[16];
|
uint32_t _ALIGN(128) hash[16];
|
||||||
x16rv2_context_overlay ctx;
|
x16rv2_context_overlay ctx;
|
||||||
@@ -180,10 +180,14 @@ void x16rv2_hash( void* output, const void* input )
|
|||||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
in = (void*) hash;
|
in = (void*) hash;
|
||||||
size = 64;
|
size = 64;
|
||||||
}
|
}
|
||||||
memcpy(output, hash, 32);
|
memcpy(output, hash, 32);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||||
@@ -221,8 +225,7 @@ int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
|||||||
do
|
do
|
||||||
{
|
{
|
||||||
edata[19] = nonce;
|
edata[19] = nonce;
|
||||||
x16rv2_hash( hash32, edata );
|
if ( x16rv2_hash( hash32, edata, thr_id ) )
|
||||||
|
|
||||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( nonce );
|
pdata[19] = bswap_32( nonce );
|
||||||
|
@@ -30,7 +30,7 @@ union _x21s_8way_context_overlay
|
|||||||
|
|
||||||
typedef union _x21s_8way_context_overlay x21s_8way_context_overlay;
|
typedef union _x21s_8way_context_overlay x21s_8way_context_overlay;
|
||||||
|
|
||||||
void x21s_8way_hash( void* output, const void* input )
|
int x21s_8way_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t vhash[16*8] __attribute__ ((aligned (128)));
|
uint32_t vhash[16*8] __attribute__ ((aligned (128)));
|
||||||
uint8_t shash[64*8] __attribute__ ((aligned (64)));
|
uint8_t shash[64*8] __attribute__ ((aligned (64)));
|
||||||
@@ -44,7 +44,8 @@ void x21s_8way_hash( void* output, const void* input )
|
|||||||
uint32_t *hash7 = (uint32_t*)( shash+448 );
|
uint32_t *hash7 = (uint32_t*)( shash+448 );
|
||||||
x21s_8way_context_overlay ctx;
|
x21s_8way_context_overlay ctx;
|
||||||
|
|
||||||
x16r_8way_hash_generic( shash, input );
|
if ( !x16r_8way_hash_generic( shash, input, thrid ) )
|
||||||
|
return 0;
|
||||||
|
|
||||||
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
|
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
|
||||||
hash7 );
|
hash7 );
|
||||||
@@ -124,6 +125,8 @@ void x21s_8way_hash( void* output, const void* input )
|
|||||||
sha256_8way_init( &ctx.sha256 );
|
sha256_8way_init( &ctx.sha256 );
|
||||||
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
||||||
sha256_8way_close( &ctx.sha256, output );
|
sha256_8way_close( &ctx.sha256, output );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -166,8 +169,7 @@ int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
|||||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x21s_8way_hash( hash, vdata );
|
if ( x21s_8way_hash( hash, vdata, thr_id ) )
|
||||||
|
|
||||||
for ( int lane = 0; lane < 8; lane++ )
|
for ( int lane = 0; lane < 8; lane++ )
|
||||||
if ( unlikely( hash7[lane] <= Htarg ) )
|
if ( unlikely( hash7[lane] <= Htarg ) )
|
||||||
{
|
{
|
||||||
@@ -215,7 +217,7 @@ union _x21s_4way_context_overlay
|
|||||||
|
|
||||||
typedef union _x21s_4way_context_overlay x21s_4way_context_overlay;
|
typedef union _x21s_4way_context_overlay x21s_4way_context_overlay;
|
||||||
|
|
||||||
void x21s_4way_hash( void* output, const void* input )
|
int x21s_4way_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t vhash[16*4] __attribute__ ((aligned (64)));
|
uint32_t vhash[16*4] __attribute__ ((aligned (64)));
|
||||||
uint8_t shash[64*4] __attribute__ ((aligned (64)));
|
uint8_t shash[64*4] __attribute__ ((aligned (64)));
|
||||||
@@ -225,8 +227,9 @@ void x21s_4way_hash( void* output, const void* input )
|
|||||||
uint32_t *hash2 = (uint32_t*)( shash+128 );
|
uint32_t *hash2 = (uint32_t*)( shash+128 );
|
||||||
uint32_t *hash3 = (uint32_t*)( shash+192 );
|
uint32_t *hash3 = (uint32_t*)( shash+192 );
|
||||||
|
|
||||||
x16r_4way_hash_generic( shash, input );
|
if ( !x16r_4way_hash_generic( shash, input, thrid ) )
|
||||||
|
return 0;
|
||||||
|
|
||||||
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
haval256_5_4way_init( &ctx.haval );
|
haval256_5_4way_init( &ctx.haval );
|
||||||
@@ -299,6 +302,8 @@ void x21s_4way_hash( void* output, const void* input )
|
|||||||
dintrlv_4x32( output, output+32, output+64,output+96, vhash, 256 );
|
dintrlv_4x32( output, output+32, output+64,output+96, vhash, 256 );
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -337,7 +342,7 @@ int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
|||||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x21s_4way_hash( hash, vdata );
|
if ( x21s_4way_hash( hash, vdata, thr_id ) )
|
||||||
for ( int i = 0; i < 4; i++ )
|
for ( int i = 0; i < 4; i++ )
|
||||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
|
@@ -27,12 +27,13 @@ union _x21s_context_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x21s_context_overlay x21s_context_overlay;
|
typedef union _x21s_context_overlay x21s_context_overlay;
|
||||||
|
|
||||||
void x21s_hash( void* output, const void* input )
|
int x21s_hash( void* output, const void* input, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t _ALIGN(128) hash[16];
|
uint32_t _ALIGN(128) hash[16];
|
||||||
x21s_context_overlay ctx;
|
x21s_context_overlay ctx;
|
||||||
|
|
||||||
x16r_hash_generic( hash, input );
|
if ( !x16r_hash_generic( hash, input, thrid ) )
|
||||||
|
return 0;
|
||||||
|
|
||||||
sph_haval256_5_init( &ctx.haval );
|
sph_haval256_5_init( &ctx.haval );
|
||||||
sph_haval256_5( &ctx.haval, (const void*) hash, 64) ;
|
sph_haval256_5( &ctx.haval, (const void*) hash, 64) ;
|
||||||
@@ -54,6 +55,8 @@ void x21s_hash( void* output, const void* input )
|
|||||||
SHA256_Final( (unsigned char*)hash, &ctx.sha256 );
|
SHA256_Final( (unsigned char*)hash, &ctx.sha256 );
|
||||||
|
|
||||||
memcpy( output, hash, 32 );
|
memcpy( output, hash, 32 );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||||
@@ -87,8 +90,7 @@ int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
|||||||
do
|
do
|
||||||
{
|
{
|
||||||
edata[19] = nonce;
|
edata[19] = nonce;
|
||||||
x21s_hash( hash32, edata );
|
if ( x21s_hash( hash32, edata, thr_id ) )
|
||||||
|
|
||||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( nonce );
|
pdata[19] = bswap_32( nonce );
|
||||||
|
@@ -62,7 +62,7 @@ union _x22i_8way_ctx_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x22i_8way_ctx_overlay x22i_8way_ctx_overlay;
|
typedef union _x22i_8way_ctx_overlay x22i_8way_ctx_overlay;
|
||||||
|
|
||||||
void x22i_8way_hash( void *output, const void *input )
|
int x22i_8way_hash( void *output, const void *input, int thrid )
|
||||||
{
|
{
|
||||||
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
||||||
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
|
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
|
||||||
@@ -129,6 +129,8 @@ void x22i_8way_hash( void *output, const void *input )
|
|||||||
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_8way_close( &ctx.keccak, vhash );
|
keccak512_8way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa512_4way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa512_4way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -214,6 +216,8 @@ void x22i_8way_hash( void *output, const void *input )
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
hamsi512_8way_init( &ctx.hamsi );
|
hamsi512_8way_init( &ctx.hamsi );
|
||||||
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_8way_close( &ctx.hamsi, vhash );
|
hamsi512_8way_close( &ctx.hamsi, vhash );
|
||||||
@@ -346,6 +350,8 @@ void x22i_8way_hash( void *output, const void *input )
|
|||||||
sph_tiger (&ctx.tiger, (const void*) hash7, 64);
|
sph_tiger (&ctx.tiger, (const void*) hash7, 64);
|
||||||
sph_tiger_close(&ctx.tiger, (void*) hashA7);
|
sph_tiger_close(&ctx.tiger, (void*) hashA7);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
memset( hash0, 0, 64 );
|
memset( hash0, 0, 64 );
|
||||||
memset( hash1, 0, 64 );
|
memset( hash1, 0, 64 );
|
||||||
memset( hash2, 0, 64 );
|
memset( hash2, 0, 64 );
|
||||||
@@ -399,6 +405,8 @@ void x22i_8way_hash( void *output, const void *input )
|
|||||||
sha256_8way_init( &ctx.sha256 );
|
sha256_8way_init( &ctx.sha256 );
|
||||||
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
||||||
sha256_8way_close( &ctx.sha256, output );
|
sha256_8way_close( &ctx.sha256, output );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -428,8 +436,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
|||||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x22i_8way_hash( hash, vdata );
|
if ( x22i_8way_hash( hash, vdata, thr_id ) )
|
||||||
|
|
||||||
for ( int lane = 0; lane < 8; lane++ )
|
for ( int lane = 0; lane < 8; lane++ )
|
||||||
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
||||||
{
|
{
|
||||||
@@ -437,7 +444,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
|||||||
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( n + lane );
|
pdata[19] = bswap_32( n + lane );
|
||||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
submit_solution( work, lane_hash, mythr );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*noncev = _mm512_add_epi32( *noncev,
|
*noncev = _mm512_add_epi32( *noncev,
|
||||||
@@ -524,7 +531,7 @@ union _x22i_4way_ctx_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay;
|
typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay;
|
||||||
|
|
||||||
void x22i_4way_hash( void *output, const void *input )
|
int x22i_4way_hash( void *output, const void *input, int thrid )
|
||||||
{
|
{
|
||||||
uint64_t hash0[8*4] __attribute__ ((aligned (64)));
|
uint64_t hash0[8*4] __attribute__ ((aligned (64)));
|
||||||
uint64_t hash1[8*4] __attribute__ ((aligned (64)));
|
uint64_t hash1[8*4] __attribute__ ((aligned (64)));
|
||||||
@@ -563,6 +570,8 @@ void x22i_4way_hash( void *output, const void *input )
|
|||||||
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa512_2way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa512_2way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -591,6 +600,8 @@ void x22i_4way_hash( void *output, const void *input )
|
|||||||
|
|
||||||
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
@@ -636,6 +647,8 @@ void x22i_4way_hash( void *output, const void *input )
|
|||||||
sha512_4way_close( &ctx.sha512, vhash );
|
sha512_4way_close( &ctx.sha512, vhash );
|
||||||
dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash );
|
dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0);
|
ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0);
|
||||||
ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1);
|
ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1);
|
||||||
ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2);
|
ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2);
|
||||||
@@ -668,6 +681,8 @@ void x22i_4way_hash( void *output, const void *input )
|
|||||||
sph_tiger (&ctx.tiger, (const void*) hash3, 64);
|
sph_tiger (&ctx.tiger, (const void*) hash3, 64);
|
||||||
sph_tiger_close(&ctx.tiger, (void*) hashA3);
|
sph_tiger_close(&ctx.tiger, (void*) hashA3);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
memset( hash0, 0, 64 );
|
memset( hash0, 0, 64 );
|
||||||
memset( hash1, 0, 64 );
|
memset( hash1, 0, 64 );
|
||||||
memset( hash2, 0, 64 );
|
memset( hash2, 0, 64 );
|
||||||
@@ -700,8 +715,9 @@ void x22i_4way_hash( void *output, const void *input )
|
|||||||
sha256_4way_init( &ctx.sha256 );
|
sha256_4way_init( &ctx.sha256 );
|
||||||
sha256_4way_update( &ctx.sha256, vhash, 64 );
|
sha256_4way_update( &ctx.sha256, vhash, 64 );
|
||||||
sha256_4way_close( &ctx.sha256, output );
|
sha256_4way_close( &ctx.sha256, output );
|
||||||
}
|
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr )
|
uint64_t *hashes_done, struct thr_info *mythr )
|
||||||
@@ -729,8 +745,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
|||||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x22i_4way_hash( hash, vdata );
|
if ( x22i_4way_hash( hash, vdata, thr_id ) )
|
||||||
|
|
||||||
for ( int lane = 0; lane < 4; lane++ )
|
for ( int lane = 0; lane < 4; lane++ )
|
||||||
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
||||||
{
|
{
|
||||||
@@ -738,7 +753,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
|||||||
if ( valid_hash( lane_hash, ptarget ) )
|
if ( valid_hash( lane_hash, ptarget ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( n + lane );
|
pdata[19] = bswap_32( n + lane );
|
||||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
submit_solution( work, lane_hash, mythr );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*noncev = _mm256_add_epi32( *noncev,
|
*noncev = _mm256_add_epi32( *noncev,
|
||||||
|
@@ -16,19 +16,19 @@ bool register_x22i_algo( algo_gate_t* gate );
|
|||||||
|
|
||||||
#if defined(X22I_8WAY)
|
#if defined(X22I_8WAY)
|
||||||
|
|
||||||
void x22i_8way_hash( void *state, const void *input );
|
int x22i_8way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
#elif defined(X22I_4WAY)
|
#elif defined(X22I_4WAY)
|
||||||
|
|
||||||
void x22i_4way_hash( void *state, const void *input );
|
int x22i_4way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x22i_4way( struct work *work, uint32_t max_nonce,
|
int scanhash_x22i_4way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
void x22i_hash( void *state, const void *input );
|
int x22i_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
@@ -44,19 +44,19 @@ bool register_x25i_algo( algo_gate_t* gate );
|
|||||||
|
|
||||||
#if defined(X25X_8WAY)
|
#if defined(X25X_8WAY)
|
||||||
|
|
||||||
void x25x_8way_hash( void *state, const void *input );
|
int x25x_8way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
#elif defined(X25X_4WAY)
|
#elif defined(X25X_4WAY)
|
||||||
|
|
||||||
void x25x_4way_hash( void *state, const void *input );
|
int x25x_4way_hash( void *state, const void *input, int thrid );
|
||||||
int scanhash_x25x_4way( struct work *work, uint32_t max_nonce,
|
int scanhash_x25x_4way( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
void x25x_hash( void *state, const void *input );
|
int x25x_hash( void *state, const void *input, int thrif );
|
||||||
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
|
@@ -59,7 +59,7 @@ union _x22i_context_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x22i_context_overlay x22i_context_overlay;
|
typedef union _x22i_context_overlay x22i_context_overlay;
|
||||||
|
|
||||||
void x22i_hash( void *output, const void *input )
|
int x22i_hash( void *output, const void *input, int thrid )
|
||||||
{
|
{
|
||||||
unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
|
unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
|
||||||
unsigned char hash2[65] __attribute__((aligned(64))) = {0};
|
unsigned char hash2[65] __attribute__((aligned(64))) = {0};
|
||||||
@@ -95,6 +95,8 @@ void x22i_hash( void *output, const void *input )
|
|||||||
sph_keccak512(&ctx.keccak, (const void*) hash, 64);
|
sph_keccak512(&ctx.keccak, (const void*) hash, 64);
|
||||||
sph_keccak512_close(&ctx.keccak, hash);
|
sph_keccak512_close(&ctx.keccak, hash);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
init_luffa( &ctx.luffa, 512 );
|
init_luffa( &ctx.luffa, 512 );
|
||||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||||
(const BitSequence*)hash, 64 );
|
(const BitSequence*)hash, 64 );
|
||||||
@@ -121,6 +123,8 @@ void x22i_hash( void *output, const void *input )
|
|||||||
sph_echo512_close( &ctx.echo, hash );
|
sph_echo512_close( &ctx.echo, hash );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
sph_hamsi512_init(&ctx.hamsi);
|
sph_hamsi512_init(&ctx.hamsi);
|
||||||
sph_hamsi512(&ctx.hamsi, (const void*) hash, 64);
|
sph_hamsi512(&ctx.hamsi, (const void*) hash, 64);
|
||||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||||
@@ -143,6 +147,8 @@ void x22i_hash( void *output, const void *input )
|
|||||||
|
|
||||||
ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2);
|
ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
memset(hash, 0, 64);
|
memset(hash, 0, 64);
|
||||||
sph_haval256_5_init(&ctx.haval);
|
sph_haval256_5_init(&ctx.haval);
|
||||||
sph_haval256_5(&ctx.haval,(const void*) hash2, 64);
|
sph_haval256_5(&ctx.haval,(const void*) hash2, 64);
|
||||||
@@ -165,6 +171,8 @@ void x22i_hash( void *output, const void *input )
|
|||||||
SHA256_Final( (unsigned char*) hash, &ctx.sha256 );
|
SHA256_Final( (unsigned char*) hash, &ctx.sha256 );
|
||||||
|
|
||||||
memcpy(output, hash, 32);
|
memcpy(output, hash, 32);
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||||
@@ -188,7 +196,7 @@ int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
|||||||
do
|
do
|
||||||
{
|
{
|
||||||
edata[19] = n;
|
edata[19] = n;
|
||||||
x22i_hash( hash64, edata );
|
if ( x22i_hash( hash64, edata, thr_id ) );
|
||||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( n );
|
pdata[19] = bswap_32( n );
|
||||||
|
@@ -94,7 +94,7 @@ union _x25x_8way_ctx_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x25x_8way_ctx_overlay x25x_8way_ctx_overlay;
|
typedef union _x25x_8way_ctx_overlay x25x_8way_ctx_overlay;
|
||||||
|
|
||||||
void x25x_8way_hash( void *output, const void *input )
|
int x25x_8way_hash( void *output, const void *input, int thrid )
|
||||||
{
|
{
|
||||||
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
||||||
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
||||||
@@ -179,13 +179,15 @@ void x25x_8way_hash( void *output, const void *input )
|
|||||||
jh512_8way_close( &ctx.jh, vhash );
|
jh512_8way_close( &ctx.jh, vhash );
|
||||||
dintrlv_8x64_512( hash0[4], hash1[4], hash2[4], hash3[4],
|
dintrlv_8x64_512( hash0[4], hash1[4], hash2[4], hash3[4],
|
||||||
hash4[4], hash5[4], hash6[4], hash7[4], vhash );
|
hash4[4], hash5[4], hash6[4], hash7[4], vhash );
|
||||||
|
|
||||||
keccak512_8way_init( &ctx.keccak );
|
keccak512_8way_init( &ctx.keccak );
|
||||||
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_8way_close( &ctx.keccak, vhash );
|
keccak512_8way_close( &ctx.keccak, vhash );
|
||||||
dintrlv_8x64_512( hash0[5], hash1[5], hash2[5], hash3[5],
|
dintrlv_8x64_512( hash0[5], hash1[5], hash2[5], hash3[5],
|
||||||
hash4[5], hash5[5], hash6[5], hash7[5], vhash );
|
hash4[5], hash5[5], hash6[5], hash7[5], vhash );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_4way_init( &ctx.luffa, 512 );
|
luffa_4way_init( &ctx.luffa, 512 );
|
||||||
@@ -261,6 +263,7 @@ void x25x_8way_hash( void *output, const void *input )
|
|||||||
intrlv_8x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10],
|
intrlv_8x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10],
|
||||||
hash4[10], hash5[10], hash6[10], hash7[10] );
|
hash4[10], hash5[10], hash6[10], hash7[10] );
|
||||||
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
@@ -292,6 +295,8 @@ void x25x_8way_hash( void *output, const void *input )
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
hamsi512_8way_init( &ctx.hamsi );
|
hamsi512_8way_init( &ctx.hamsi );
|
||||||
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_8way_close( &ctx.hamsi, vhash );
|
hamsi512_8way_close( &ctx.hamsi, vhash );
|
||||||
@@ -407,6 +412,8 @@ void x25x_8way_hash( void *output, const void *input )
|
|||||||
sph_tiger (&ctx.tiger, (const void*) hash7[17], 64);
|
sph_tiger (&ctx.tiger, (const void*) hash7[17], 64);
|
||||||
sph_tiger_close(&ctx.tiger, (void*) hash7[18]);
|
sph_tiger_close(&ctx.tiger, (void*) hash7[18]);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
intrlv_2x256( vhash, hash0[18], hash1[18], 256 );
|
intrlv_2x256( vhash, hash0[18], hash1[18], 256 );
|
||||||
LYRA2X_2WAY( vhash, 32, vhash, 32, 1, 4, 4 );
|
LYRA2X_2WAY( vhash, 32, vhash, 32, 1, 4, 4 );
|
||||||
dintrlv_2x256( hash0[19], hash1[19], vhash, 256 );
|
dintrlv_2x256( hash0[19], hash1[19], vhash, 256 );
|
||||||
@@ -468,6 +475,8 @@ void x25x_8way_hash( void *output, const void *input )
|
|||||||
laneHash(512, (const BitSequence*)hash6[22], 512, (BitSequence*)hash6[23]);
|
laneHash(512, (const BitSequence*)hash6[22], 512, (BitSequence*)hash6[23]);
|
||||||
laneHash(512, (const BitSequence*)hash7[22], 512, (BitSequence*)hash7[23]);
|
laneHash(512, (const BitSequence*)hash7[22], 512, (BitSequence*)hash7[23]);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
x25x_shuffle( hash0 );
|
x25x_shuffle( hash0 );
|
||||||
x25x_shuffle( hash1 );
|
x25x_shuffle( hash1 );
|
||||||
x25x_shuffle( hash2 );
|
x25x_shuffle( hash2 );
|
||||||
@@ -528,6 +537,8 @@ void x25x_8way_hash( void *output, const void *input )
|
|||||||
|
|
||||||
blake2s_8way_init( &ctx.blake2s, 32 );
|
blake2s_8way_init( &ctx.blake2s, 32 );
|
||||||
blake2s_8way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
blake2s_8way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||||
@@ -557,7 +568,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
|||||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x25x_8way_hash( hash, vdata );
|
if ( x25x_8way_hash( hash, vdata, thr_id ) );
|
||||||
|
|
||||||
for ( int lane = 0; lane < 8; lane++ )
|
for ( int lane = 0; lane < 8; lane++ )
|
||||||
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
||||||
@@ -566,7 +577,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
|||||||
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( n + lane );
|
pdata[19] = bswap_32( n + lane );
|
||||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
submit_solution( work, lane_hash, mythr );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*noncev = _mm512_add_epi32( *noncev,
|
*noncev = _mm512_add_epi32( *noncev,
|
||||||
@@ -654,7 +665,7 @@ union _x25x_4way_ctx_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay;
|
typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay;
|
||||||
|
|
||||||
void x25x_4way_hash( void *output, const void *input )
|
int x25x_4way_hash( void *output, const void *input, int thrid )
|
||||||
{
|
{
|
||||||
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
|
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
|
||||||
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
||||||
@@ -686,6 +697,8 @@ void x25x_4way_hash( void *output, const void *input )
|
|||||||
jh512_4way_close( &ctx.jh, vhash );
|
jh512_4way_close( &ctx.jh, vhash );
|
||||||
dintrlv_4x64_512( hash0[4], hash1[4], hash2[4], hash3[4], vhash );
|
dintrlv_4x64_512( hash0[4], hash1[4], hash2[4], hash3[4], vhash );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
keccak512_4way_init( &ctx.keccak );
|
keccak512_4way_init( &ctx.keccak );
|
||||||
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
@@ -738,6 +751,8 @@ void x25x_4way_hash( void *output, const void *input )
|
|||||||
|
|
||||||
intrlv_4x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10] );
|
intrlv_4x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10] );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
@@ -819,6 +834,8 @@ void x25x_4way_hash( void *output, const void *input )
|
|||||||
LYRA2RE( (void*)hash3[19], 32, (const void*)hash3[18], 32,
|
LYRA2RE( (void*)hash3[19], 32, (const void*)hash3[18], 32,
|
||||||
(const void*)hash3[18], 32, 1, 4, 4 );
|
(const void*)hash3[18], 32, 1, 4, 4 );
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
sph_gost512_init(&ctx.gost);
|
sph_gost512_init(&ctx.gost);
|
||||||
sph_gost512 (&ctx.gost, (const void*) hash0[19], 64);
|
sph_gost512 (&ctx.gost, (const void*) hash0[19], 64);
|
||||||
sph_gost512_close(&ctx.gost, (void*) hash0[20]);
|
sph_gost512_close(&ctx.gost, (void*) hash0[20]);
|
||||||
@@ -850,6 +867,8 @@ void x25x_4way_hash( void *output, const void *input )
|
|||||||
laneHash(512, (const BitSequence*)hash2[22], 512, (BitSequence*)hash2[23]);
|
laneHash(512, (const BitSequence*)hash2[22], 512, (BitSequence*)hash2[23]);
|
||||||
laneHash(512, (const BitSequence*)hash3[22], 512, (BitSequence*)hash3[23]);
|
laneHash(512, (const BitSequence*)hash3[22], 512, (BitSequence*)hash3[23]);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
x25x_shuffle( hash0 );
|
x25x_shuffle( hash0 );
|
||||||
x25x_shuffle( hash1 );
|
x25x_shuffle( hash1 );
|
||||||
x25x_shuffle( hash2 );
|
x25x_shuffle( hash2 );
|
||||||
@@ -882,6 +901,8 @@ void x25x_4way_hash( void *output, const void *input )
|
|||||||
|
|
||||||
blake2s_4way_init( &ctx.blake2s, 32 );
|
blake2s_4way_init( &ctx.blake2s, 32 );
|
||||||
blake2s_4way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
blake2s_4way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
||||||
@@ -910,8 +931,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
|||||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
x25x_4way_hash( hash, vdata );
|
if ( x25x_4way_hash( hash, vdata, thr_id ) )
|
||||||
|
|
||||||
for ( int lane = 0; lane < 4; lane++ )
|
for ( int lane = 0; lane < 4; lane++ )
|
||||||
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
||||||
{
|
{
|
||||||
@@ -919,7 +939,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
|||||||
if ( valid_hash( lane_hash, ptarget ) )
|
if ( valid_hash( lane_hash, ptarget ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( n + lane );
|
pdata[19] = bswap_32( n + lane );
|
||||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
submit_solution( work, lane_hash, mythr );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*noncev = _mm256_add_epi32( *noncev,
|
*noncev = _mm256_add_epi32( *noncev,
|
||||||
|
@@ -64,7 +64,7 @@ union _x25x_context_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x25x_context_overlay x25x_context_overlay;
|
typedef union _x25x_context_overlay x25x_context_overlay;
|
||||||
|
|
||||||
void x25x_hash( void *output, const void *input )
|
int x25x_hash( void *output, const void *input, int thrid )
|
||||||
{
|
{
|
||||||
unsigned char hash[25][64] __attribute__((aligned(64))) = {0};
|
unsigned char hash[25][64] __attribute__((aligned(64))) = {0};
|
||||||
x25x_context_overlay ctx;
|
x25x_context_overlay ctx;
|
||||||
@@ -99,6 +99,8 @@ void x25x_hash( void *output, const void *input )
|
|||||||
sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64);
|
sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64);
|
||||||
sph_keccak512_close(&ctx.keccak, &hash[5]);
|
sph_keccak512_close(&ctx.keccak, &hash[5]);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
init_luffa( &ctx.luffa, 512 );
|
init_luffa( &ctx.luffa, 512 );
|
||||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6],
|
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6],
|
||||||
(const BitSequence*)&hash[5], 64 );
|
(const BitSequence*)&hash[5], 64 );
|
||||||
@@ -125,7 +127,9 @@ void x25x_hash( void *output, const void *input )
|
|||||||
sph_echo512_close( &ctx.echo, &hash[10] );
|
sph_echo512_close( &ctx.echo, &hash[10] );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sph_hamsi512_init(&ctx.hamsi);
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
|
sph_hamsi512_init(&ctx.hamsi);
|
||||||
sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64);
|
sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64);
|
||||||
sph_hamsi512_close(&ctx.hamsi, &hash[11]);
|
sph_hamsi512_close(&ctx.hamsi, &hash[11]);
|
||||||
|
|
||||||
@@ -151,6 +155,8 @@ void x25x_hash( void *output, const void *input )
|
|||||||
sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64);
|
sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64);
|
||||||
sph_haval256_5_close(&ctx.haval,&hash[17]);
|
sph_haval256_5_close(&ctx.haval,&hash[17]);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return 0;
|
||||||
|
|
||||||
sph_tiger_init(&ctx.tiger);
|
sph_tiger_init(&ctx.tiger);
|
||||||
sph_tiger (&ctx.tiger, (const void*) &hash[17], 64);
|
sph_tiger (&ctx.tiger, (const void*) &hash[17], 64);
|
||||||
sph_tiger_close(&ctx.tiger, (void*) &hash[18]);
|
sph_tiger_close(&ctx.tiger, (void*) &hash[18]);
|
||||||
@@ -199,6 +205,8 @@ void x25x_hash( void *output, const void *input )
|
|||||||
blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 );
|
blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 );
|
||||||
|
|
||||||
memcpy(output, &hash[24], 32);
|
memcpy(output, &hash[24], 32);
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||||
@@ -222,7 +230,7 @@ int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
|||||||
do
|
do
|
||||||
{
|
{
|
||||||
edata[19] = n;
|
edata[19] = n;
|
||||||
x25x_hash( hash64, edata );
|
if ( x25x_hash( hash64, edata, thr_id ) );
|
||||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||||
{
|
{
|
||||||
pdata[19] = bswap_32( n );
|
pdata[19] = bswap_32( n );
|
||||||
|
@@ -79,7 +79,7 @@ int main(int argc, const char * const *argv)
|
|||||||
for (i = 0; i < sizeof(src); i++)
|
for (i = 0; i < sizeof(src); i++)
|
||||||
src.u8[i] = i * 3;
|
src.u8[i] = i * 3;
|
||||||
|
|
||||||
if (yespower_tls(src.u8, sizeof(src), ¶ms, &dst)) {
|
if (!yespower_tls(src.u8, sizeof(src), ¶ms, &dst)) {
|
||||||
puts("FAILED");
|
puts("FAILED");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@@ -53,7 +53,7 @@ int scanhash_yespower_r8g( struct work *work, uint32_t max_nonce,
|
|||||||
|
|
||||||
do {
|
do {
|
||||||
yespower_tls( (unsigned char *)endiandata, params.perslen,
|
yespower_tls( (unsigned char *)endiandata, params.perslen,
|
||||||
¶ms, (yespower_binary_t*)hash );
|
¶ms, (yespower_binary_t*)hash, thr_id );
|
||||||
|
|
||||||
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
|
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
|
@@ -194,11 +194,13 @@ static int free_region(yespower_region_t *region)
|
|||||||
#define restrict
|
#define restrict
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
#ifdef __GNUC__
|
#ifdef __GNUC__
|
||||||
#define unlikely(exp) __builtin_expect(exp, 0)
|
#define unlikely(exp) __builtin_expect(exp, 0)
|
||||||
#else
|
#else
|
||||||
#define unlikely(exp) (exp)
|
#define unlikely(exp) (exp)
|
||||||
#endif
|
#endif
|
||||||
|
*/
|
||||||
|
|
||||||
#ifdef __SSE__
|
#ifdef __SSE__
|
||||||
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
||||||
@@ -1113,7 +1115,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
|
|||||||
int yespower_b2b(yespower_local_t *local,
|
int yespower_b2b(yespower_local_t *local,
|
||||||
const uint8_t *src, size_t srclen,
|
const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params,
|
const yespower_params_t *params,
|
||||||
yespower_binary_t *dst)
|
yespower_binary_t *dst, int thrid )
|
||||||
{
|
{
|
||||||
uint32_t N = params->N;
|
uint32_t N = params->N;
|
||||||
uint32_t r = params->r;
|
uint32_t r = params->r;
|
||||||
@@ -1168,17 +1170,25 @@ int yespower_b2b(yespower_local_t *local,
|
|||||||
srclen = 0;
|
srclen = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
pbkdf2_blake2b_yp(init_hash, sizeof(init_hash), src, srclen, 1, B, 128);
|
pbkdf2_blake2b_yp(init_hash, sizeof(init_hash), src, srclen, 1, B, 128);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
memcpy(init_hash, B, sizeof(init_hash));
|
memcpy(init_hash, B, sizeof(init_hash));
|
||||||
smix_1_0(B, r, N, V, XY, &ctx);
|
smix_1_0(B, r, N, V, XY, &ctx);
|
||||||
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
hmac_blake2b_yp_hash((uint8_t *)dst, B + B_size - 64, 64, init_hash, sizeof(init_hash));
|
hmac_blake2b_yp_hash((uint8_t *)dst, B + B_size - 64, 64, init_hash, sizeof(init_hash));
|
||||||
|
|
||||||
/* Success! */
|
/* Success! */
|
||||||
return 0;
|
return 1;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
memset(dst, 0xff, sizeof(*dst));
|
memset(dst, 0xff, sizeof(*dst));
|
||||||
return -1;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1189,7 +1199,7 @@ fail:
|
|||||||
* Return 0 on success; or -1 on error.
|
* Return 0 on success; or -1 on error.
|
||||||
*/
|
*/
|
||||||
int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params, yespower_binary_t *dst)
|
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||||
{
|
{
|
||||||
static __thread int initialized = 0;
|
static __thread int initialized = 0;
|
||||||
static __thread yespower_local_t local;
|
static __thread yespower_local_t local;
|
||||||
@@ -1199,7 +1209,7 @@ int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
|||||||
initialized = 1;
|
initialized = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return yespower_b2b(&local, src, srclen, params, dst);
|
return yespower_b2b(&local, src, srclen, params, dst, thrid);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
int yespower_init_local(yespower_local_t *local)
|
int yespower_init_local(yespower_local_t *local)
|
||||||
|
@@ -34,9 +34,10 @@ static yespower_params_t yespower_params;
|
|||||||
|
|
||||||
// YESPOWER
|
// YESPOWER
|
||||||
|
|
||||||
void yespower_hash( const char *input, char *output, uint32_t len )
|
int yespower_hash( const char *input, char *output, uint32_t len, int thrid )
|
||||||
{
|
{
|
||||||
yespower_tls( input, len, &yespower_params, (yespower_binary_t*)output );
|
return yespower_tls( input, len, &yespower_params,
|
||||||
|
(yespower_binary_t*)output, thrid );
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
||||||
@@ -55,7 +56,7 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
|||||||
be32enc( &endiandata[k], pdata[k] );
|
be32enc( &endiandata[k], pdata[k] );
|
||||||
endiandata[19] = n;
|
endiandata[19] = n;
|
||||||
do {
|
do {
|
||||||
yespower_hash( (char*)endiandata, (char*)vhash, 80 );
|
if ( yespower_hash( (char*)endiandata, (char*)vhash, 80, thr_id ) )
|
||||||
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
be32enc( pdata+19, n );
|
be32enc( pdata+19, n );
|
||||||
@@ -70,9 +71,9 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
|||||||
|
|
||||||
// YESPOWER-B2B
|
// YESPOWER-B2B
|
||||||
|
|
||||||
void yespower_b2b_hash( const char *input, char *output, uint32_t len )
|
int yespower_b2b_hash( const char *input, char *output, uint32_t len, int thrid )
|
||||||
{
|
{
|
||||||
yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output );
|
return yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output, thrid );
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
|
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
|
||||||
@@ -91,7 +92,7 @@ int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
|
|||||||
be32enc( &endiandata[k], pdata[k] );
|
be32enc( &endiandata[k], pdata[k] );
|
||||||
endiandata[19] = n;
|
endiandata[19] = n;
|
||||||
do {
|
do {
|
||||||
yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80 );
|
if (yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80, thr_id ) )
|
||||||
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
be32enc( pdata+19, n );
|
be32enc( pdata+19, n );
|
||||||
|
@@ -107,11 +107,13 @@
|
|||||||
#define restrict
|
#define restrict
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
#ifdef __GNUC__
|
#ifdef __GNUC__
|
||||||
#define unlikely(exp) __builtin_expect(exp, 0)
|
#define unlikely(exp) __builtin_expect(exp, 0)
|
||||||
#else
|
#else
|
||||||
#define unlikely(exp) (exp)
|
#define unlikely(exp) (exp)
|
||||||
#endif
|
#endif
|
||||||
|
*/
|
||||||
|
|
||||||
#ifdef __SSE__
|
#ifdef __SSE__
|
||||||
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
||||||
@@ -1023,7 +1025,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
|
|||||||
int yespower(yespower_local_t *local,
|
int yespower(yespower_local_t *local,
|
||||||
const uint8_t *src, size_t srclen,
|
const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params,
|
const yespower_params_t *params,
|
||||||
yespower_binary_t *dst)
|
yespower_binary_t *dst, int thrid )
|
||||||
{
|
{
|
||||||
yespower_version_t version = params->version;
|
yespower_version_t version = params->version;
|
||||||
uint32_t N = params->N;
|
uint32_t N = params->N;
|
||||||
@@ -1077,15 +1079,24 @@ int yespower(yespower_local_t *local,
|
|||||||
if (version == YESPOWER_0_5) {
|
if (version == YESPOWER_0_5) {
|
||||||
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1,
|
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1,
|
||||||
B, B_size);
|
B, B_size);
|
||||||
memcpy(sha256, B, sizeof(sha256));
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
|
memcpy(sha256, B, sizeof(sha256));
|
||||||
smix(B, r, N, V, XY, &ctx);
|
smix(B, r, N, V, XY, &ctx);
|
||||||
PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1,
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
|
PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1,
|
||||||
(uint8_t *)dst, sizeof(*dst));
|
(uint8_t *)dst, sizeof(*dst));
|
||||||
|
|
||||||
if (pers) {
|
if (pers) {
|
||||||
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
|
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
|
||||||
sha256);
|
sha256);
|
||||||
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
|
|
||||||
|
if ( work_restart[thrid].restart ) return false;
|
||||||
|
|
||||||
|
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ctx.S2 = S + 2 * Swidth_to_Sbytes1(Swidth);
|
ctx.S2 = S + 2 * Swidth_to_Sbytes1(Swidth);
|
||||||
@@ -1106,7 +1117,7 @@ int yespower(yespower_local_t *local,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Success! */
|
/* Success! */
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1117,7 +1128,7 @@ int yespower(yespower_local_t *local,
|
|||||||
* Return 0 on success; or -1 on error.
|
* Return 0 on success; or -1 on error.
|
||||||
*/
|
*/
|
||||||
int yespower_tls(const uint8_t *src, size_t srclen,
|
int yespower_tls(const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params, yespower_binary_t *dst)
|
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||||
{
|
{
|
||||||
static __thread int initialized = 0;
|
static __thread int initialized = 0;
|
||||||
static __thread yespower_local_t local;
|
static __thread yespower_local_t local;
|
||||||
@@ -1128,7 +1139,7 @@ int yespower_tls(const uint8_t *src, size_t srclen,
|
|||||||
initialized = 1;
|
initialized = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return yespower(&local, src, srclen, params, dst);
|
return yespower( &local, src, srclen, params, dst, thrid );
|
||||||
}
|
}
|
||||||
|
|
||||||
int yespower_init_local(yespower_local_t *local)
|
int yespower_init_local(yespower_local_t *local)
|
||||||
|
@@ -32,6 +32,7 @@
|
|||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdlib.h> /* for size_t */
|
#include <stdlib.h> /* for size_t */
|
||||||
|
#include "miner.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
@@ -109,11 +110,11 @@ extern int yespower_free_local(yespower_local_t *local);
|
|||||||
*/
|
*/
|
||||||
extern int yespower(yespower_local_t *local,
|
extern int yespower(yespower_local_t *local,
|
||||||
const uint8_t *src, size_t srclen,
|
const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params, yespower_binary_t *dst);
|
const yespower_params_t *params, yespower_binary_t *dst, int thrid);
|
||||||
|
|
||||||
extern int yespower_b2b(yespower_local_t *local,
|
extern int yespower_b2b(yespower_local_t *local,
|
||||||
const uint8_t *src, size_t srclen,
|
const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params, yespower_binary_t *dst);
|
const yespower_params_t *params, yespower_binary_t *dst, int thrid );
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* yespower_tls(src, srclen, params, dst):
|
* yespower_tls(src, srclen, params, dst):
|
||||||
@@ -125,10 +126,10 @@ extern int yespower_b2b(yespower_local_t *local,
|
|||||||
* MT-safe as long as dst is local to the thread.
|
* MT-safe as long as dst is local to the thread.
|
||||||
*/
|
*/
|
||||||
extern int yespower_tls(const uint8_t *src, size_t srclen,
|
extern int yespower_tls(const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params, yespower_binary_t *dst);
|
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
|
||||||
|
|
||||||
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||||
const yespower_params_t *params, yespower_binary_t *dst);
|
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
20
configure
vendored
20
configure
vendored
@@ -1,6 +1,6 @@
|
|||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Guess values for system-dependent variables and create Makefiles.
|
# Guess values for system-dependent variables and create Makefiles.
|
||||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.5.
|
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.6.1.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||||
@@ -577,8 +577,8 @@ MAKEFLAGS=
|
|||||||
# Identity of this package.
|
# Identity of this package.
|
||||||
PACKAGE_NAME='cpuminer-opt'
|
PACKAGE_NAME='cpuminer-opt'
|
||||||
PACKAGE_TARNAME='cpuminer-opt'
|
PACKAGE_TARNAME='cpuminer-opt'
|
||||||
PACKAGE_VERSION='3.12.5'
|
PACKAGE_VERSION='3.12.6.1'
|
||||||
PACKAGE_STRING='cpuminer-opt 3.12.5'
|
PACKAGE_STRING='cpuminer-opt 3.12.6.1'
|
||||||
PACKAGE_BUGREPORT=''
|
PACKAGE_BUGREPORT=''
|
||||||
PACKAGE_URL=''
|
PACKAGE_URL=''
|
||||||
|
|
||||||
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
|
|||||||
# Omit some internal or obsolete options to make the list less imposing.
|
# Omit some internal or obsolete options to make the list less imposing.
|
||||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||||
cat <<_ACEOF
|
cat <<_ACEOF
|
||||||
\`configure' configures cpuminer-opt 3.12.5 to adapt to many kinds of systems.
|
\`configure' configures cpuminer-opt 3.12.6.1 to adapt to many kinds of systems.
|
||||||
|
|
||||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
|
|
||||||
@@ -1404,7 +1404,7 @@ fi
|
|||||||
|
|
||||||
if test -n "$ac_init_help"; then
|
if test -n "$ac_init_help"; then
|
||||||
case $ac_init_help in
|
case $ac_init_help in
|
||||||
short | recursive ) echo "Configuration of cpuminer-opt 3.12.5:";;
|
short | recursive ) echo "Configuration of cpuminer-opt 3.12.6.1:";;
|
||||||
esac
|
esac
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
|
|
||||||
@@ -1509,7 +1509,7 @@ fi
|
|||||||
test -n "$ac_init_help" && exit $ac_status
|
test -n "$ac_init_help" && exit $ac_status
|
||||||
if $ac_init_version; then
|
if $ac_init_version; then
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
cpuminer-opt configure 3.12.5
|
cpuminer-opt configure 3.12.6.1
|
||||||
generated by GNU Autoconf 2.69
|
generated by GNU Autoconf 2.69
|
||||||
|
|
||||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||||
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
|
|||||||
This file contains any messages produced by compilers while
|
This file contains any messages produced by compilers while
|
||||||
running configure, to aid debugging if configure makes a mistake.
|
running configure, to aid debugging if configure makes a mistake.
|
||||||
|
|
||||||
It was created by cpuminer-opt $as_me 3.12.5, which was
|
It was created by cpuminer-opt $as_me 3.12.6.1, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
$ $0 $@
|
$ $0 $@
|
||||||
@@ -2993,7 +2993,7 @@ fi
|
|||||||
|
|
||||||
# Define the identity of the package.
|
# Define the identity of the package.
|
||||||
PACKAGE='cpuminer-opt'
|
PACKAGE='cpuminer-opt'
|
||||||
VERSION='3.12.5'
|
VERSION='3.12.6.1'
|
||||||
|
|
||||||
|
|
||||||
cat >>confdefs.h <<_ACEOF
|
cat >>confdefs.h <<_ACEOF
|
||||||
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||||||
# report actual input values of CONFIG_FILES etc. instead of their
|
# report actual input values of CONFIG_FILES etc. instead of their
|
||||||
# values after options handling.
|
# values after options handling.
|
||||||
ac_log="
|
ac_log="
|
||||||
This file was extended by cpuminer-opt $as_me 3.12.5, which was
|
This file was extended by cpuminer-opt $as_me 3.12.6.1, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
CONFIG_FILES = $CONFIG_FILES
|
CONFIG_FILES = $CONFIG_FILES
|
||||||
@@ -6756,7 +6756,7 @@ _ACEOF
|
|||||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||||
ac_cs_version="\\
|
ac_cs_version="\\
|
||||||
cpuminer-opt config.status 3.12.5
|
cpuminer-opt config.status 3.12.6.1
|
||||||
configured by $0, generated by GNU Autoconf 2.69,
|
configured by $0, generated by GNU Autoconf 2.69,
|
||||||
with options \\"\$ac_cs_config\\"
|
with options \\"\$ac_cs_config\\"
|
||||||
|
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
AC_INIT([cpuminer-opt], [3.12.5])
|
AC_INIT([cpuminer-opt], [3.12.6.1])
|
||||||
|
|
||||||
AC_PREREQ([2.59c])
|
AC_PREREQ([2.59c])
|
||||||
AC_CANONICAL_SYSTEM
|
AC_CANONICAL_SYSTEM
|
||||||
|
371
cpu-miner.c
371
cpu-miner.c
@@ -160,9 +160,8 @@ uint32_t accepted_share_count = 0;
|
|||||||
uint32_t rejected_share_count = 0;
|
uint32_t rejected_share_count = 0;
|
||||||
uint32_t stale_share_count = 0;
|
uint32_t stale_share_count = 0;
|
||||||
uint32_t solved_block_count = 0;
|
uint32_t solved_block_count = 0;
|
||||||
uint64_t new_work_count = 0;
|
|
||||||
double *thr_hashrates;
|
double *thr_hashrates;
|
||||||
double global_hashrate = 0;
|
double global_hashrate = 0.;
|
||||||
double stratum_diff = 0.;
|
double stratum_diff = 0.;
|
||||||
double net_diff = 0.;
|
double net_diff = 0.;
|
||||||
double net_hashrate = 0.;
|
double net_hashrate = 0.;
|
||||||
@@ -196,6 +195,8 @@ static uint64_t stale_sum = 0;
|
|||||||
static uint64_t reject_sum = 0;
|
static uint64_t reject_sum = 0;
|
||||||
static double norm_diff_sum = 0.;
|
static double norm_diff_sum = 0.;
|
||||||
static uint32_t last_block_height = 0;
|
static uint32_t last_block_height = 0;
|
||||||
|
static double highest_share = 0; // all shares include discard and reject
|
||||||
|
static double lowest_share = 9e99; // lowest accepted
|
||||||
//static bool new_job = false;
|
//static bool new_job = false;
|
||||||
static double last_targetdiff = 0.;
|
static double last_targetdiff = 0.;
|
||||||
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
|
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
|
||||||
@@ -218,6 +219,20 @@ char* lp_id;
|
|||||||
|
|
||||||
static void workio_cmd_free(struct workio_cmd *wc);
|
static void workio_cmd_free(struct workio_cmd *wc);
|
||||||
|
|
||||||
|
static void format_affinity_map( char *map_str, uint64_t map )
|
||||||
|
{
|
||||||
|
int n = num_cpus < 64 ? num_cpus : 64;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for ( i = 0; i < n; i++ )
|
||||||
|
{
|
||||||
|
if ( map & 1 ) map_str[i] = '!';
|
||||||
|
else map_str[i] = '.';
|
||||||
|
map >>= 1;
|
||||||
|
}
|
||||||
|
memset( &map_str[i], 0, 64 - i );
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef __linux /* Linux specific policy and affinity management */
|
#ifdef __linux /* Linux specific policy and affinity management */
|
||||||
#include <sched.h>
|
#include <sched.h>
|
||||||
|
|
||||||
@@ -489,7 +504,7 @@ static bool get_mininginfo( CURL *curl, struct work *work )
|
|||||||
net_blocks = json_integer_value( key );
|
net_blocks = json_integer_value( key );
|
||||||
|
|
||||||
if ( opt_debug )
|
if ( opt_debug )
|
||||||
applog(LOG_INFO," Mining info: diff %.5g, net_hashrate %f, height %d",
|
applog(LOG_INFO,"Mining info: diff %.5g, net_hashrate %f, height %d",
|
||||||
net_diff, net_hashrate, net_blocks );
|
net_diff, net_hashrate, net_blocks );
|
||||||
|
|
||||||
if ( !work->height )
|
if ( !work->height )
|
||||||
@@ -894,6 +909,7 @@ static inline void sprintf_et( char *str, int seconds )
|
|||||||
}
|
}
|
||||||
|
|
||||||
const double exp32 = 4294967296.; // 2**32
|
const double exp32 = 4294967296.; // 2**32
|
||||||
|
const double exp48 = 4294967296. * 65536.; // 2**48
|
||||||
const double exp64 = 4294967296. * 4294967296.; // 2**64
|
const double exp64 = 4294967296. * 4294967296.; // 2**64
|
||||||
|
|
||||||
struct share_stats_t
|
struct share_stats_t
|
||||||
@@ -938,7 +954,6 @@ void report_summary_log( bool force )
|
|||||||
uint64_t accepts = accept_sum; accept_sum = 0;
|
uint64_t accepts = accept_sum; accept_sum = 0;
|
||||||
uint64_t rejects = reject_sum; reject_sum = 0;
|
uint64_t rejects = reject_sum; reject_sum = 0;
|
||||||
uint64_t stales = stale_sum; stale_sum = 0;
|
uint64_t stales = stale_sum; stale_sum = 0;
|
||||||
// uint64_t new_work = new_work_count; new_work_count = 0;
|
|
||||||
|
|
||||||
memcpy( &start_time, &five_min_start, sizeof start_time );
|
memcpy( &start_time, &five_min_start, sizeof start_time );
|
||||||
memcpy( &five_min_start, &now, sizeof now );
|
memcpy( &five_min_start, &now, sizeof now );
|
||||||
@@ -961,21 +976,6 @@ void report_summary_log( bool force )
|
|||||||
char et_str[24];
|
char et_str[24];
|
||||||
char upt_str[24];
|
char upt_str[24];
|
||||||
|
|
||||||
/*
|
|
||||||
uint64_t work_time = new_work ? et.tv_sec / new_work : 15;
|
|
||||||
// Adjust scantime for frequent new work to prevent stales.
|
|
||||||
if ( work_time < 2 * opt_scantime )
|
|
||||||
{
|
|
||||||
if ( opt_scantime > min_scantime )
|
|
||||||
{
|
|
||||||
opt_scantime--;
|
|
||||||
if ( !opt_quiet )
|
|
||||||
applog( LOG_BLUE, "Getwork scan time reduced to %d seconds",
|
|
||||||
opt_scantime );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
scale_hash_for_display( &shrate, shr_units );
|
scale_hash_for_display( &shrate, shr_units );
|
||||||
scale_hash_for_display( &ghrate, ghr_units );
|
scale_hash_for_display( &ghrate, ghr_units );
|
||||||
scale_hash_for_display( &sess_hrate, sess_hr_units );
|
scale_hash_for_display( &sess_hrate, sess_hr_units );
|
||||||
@@ -994,7 +994,7 @@ void report_summary_log( bool force )
|
|||||||
|
|
||||||
if ( accepted_share_count < submitted_share_count )
|
if ( accepted_share_count < submitted_share_count )
|
||||||
{
|
{
|
||||||
double lost_ghrate = uptime.tv_sec == 0. ? 0.
|
double lost_ghrate = uptime.tv_sec == 0 ? 0.
|
||||||
: exp32 * last_targetdiff
|
: exp32 * last_targetdiff
|
||||||
* (double)(submitted_share_count - accepted_share_count )
|
* (double)(submitted_share_count - accepted_share_count )
|
||||||
/ (double)uptime.tv_sec;
|
/ (double)uptime.tv_sec;
|
||||||
@@ -1020,8 +1020,11 @@ void report_summary_log( bool force )
|
|||||||
applog2( LOG_INFO,"Rejected %6d %6d",
|
applog2( LOG_INFO,"Rejected %6d %6d",
|
||||||
rejects, rejected_share_count );
|
rejects, rejected_share_count );
|
||||||
if ( solved_block_count )
|
if ( solved_block_count )
|
||||||
applog2( LOG_INFO,"Blocks solved %6d",
|
applog2( LOG_INFO,"Blocks Solved %6d",
|
||||||
solved_block_count );
|
solved_block_count );
|
||||||
|
applog2( LOG_INFO, "Hi/Lo Share Diff %.5g / %.5g",
|
||||||
|
highest_share, lowest_share );
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool lowdiff_debug = false;
|
bool lowdiff_debug = false;
|
||||||
@@ -1076,6 +1079,10 @@ static int share_result( int result, struct work *work,
|
|||||||
if ( likely( result ) )
|
if ( likely( result ) )
|
||||||
{
|
{
|
||||||
accepted_share_count++;
|
accepted_share_count++;
|
||||||
|
if ( my_stats.share_diff < lowest_share )
|
||||||
|
lowest_share = my_stats.share_diff;
|
||||||
|
if ( my_stats.share_diff > highest_share )
|
||||||
|
highest_share = my_stats.share_diff;
|
||||||
sprintf( sres, "S%d", stale_share_count );
|
sprintf( sres, "S%d", stale_share_count );
|
||||||
sprintf( rres, "R%d", rejected_share_count );
|
sprintf( rres, "R%d", rejected_share_count );
|
||||||
if unlikely( ( my_stats.net_diff > 0. )
|
if unlikely( ( my_stats.net_diff > 0. )
|
||||||
@@ -1096,7 +1103,8 @@ static int share_result( int result, struct work *work,
|
|||||||
{
|
{
|
||||||
sprintf( ares, "A%d", accepted_share_count );
|
sprintf( ares, "A%d", accepted_share_count );
|
||||||
sprintf( bres, "B%d", solved_block_count );
|
sprintf( bres, "B%d", solved_block_count );
|
||||||
if ( work ) stale = work->stale;
|
stale = work ? work->data[ algo_gate.ntime_index ]
|
||||||
|
!= g_work.data[ algo_gate.ntime_index ] : false;
|
||||||
if ( reason ) stale = stale || strstr( reason, "Invalid job id" );
|
if ( reason ) stale = stale || strstr( reason, "Invalid job id" );
|
||||||
if ( stale )
|
if ( stale )
|
||||||
{
|
{
|
||||||
@@ -1160,37 +1168,34 @@ static int share_result( int result, struct work *work,
|
|||||||
my_stats.share_diff, share_ratio, bcol, stratum.block_height,
|
my_stats.share_diff, share_ratio, bcol, stratum.block_height,
|
||||||
scol, my_stats.job_id );
|
scol, my_stats.job_id );
|
||||||
else
|
else
|
||||||
|
{
|
||||||
|
uint64_t height = work ? work->height : last_block_height;
|
||||||
applog2( LOG_INFO, "Diff %.5g (%.3g), %sBlock %d",
|
applog2( LOG_INFO, "Diff %.5g (%.3g), %sBlock %d",
|
||||||
my_stats.share_diff, share_ratio, bcol, stratum.block_height );
|
my_stats.share_diff, share_ratio, bcol, height );
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( unlikely( reason && !result ) )
|
if ( unlikely( opt_debug || !( opt_quiet || result || stale ) ) )
|
||||||
{
|
{
|
||||||
if ( !( opt_quiet || stale ) )
|
uint32_t str[8];
|
||||||
{
|
|
||||||
|
if ( reason )
|
||||||
applog( LOG_WARNING, "Reject reason: %s", reason );
|
applog( LOG_WARNING, "Reject reason: %s", reason );
|
||||||
|
|
||||||
uint32_t str1[8], str2[8];
|
// display share hash and target for troubleshooting
|
||||||
char str3[65];
|
diff_to_target( str, my_stats.share_diff );
|
||||||
|
applog2( LOG_INFO, "Hash: %08x%08x%08x%08x...",
|
||||||
// display share hash and target for troubleshooting
|
str[7], str[6], str[5], str[4] );
|
||||||
diff_to_target( str1, my_stats.share_diff );
|
uint32_t *targ;
|
||||||
for ( int i = 0; i < 8; i++ )
|
if ( work )
|
||||||
be32enc( str2 + i, str1[7 - i] );
|
targ = work->target;
|
||||||
bin2hex( str3, (unsigned char*)str2, 12 );
|
else
|
||||||
applog2( LOG_INFO, "Share diff: %.5g, Hash: %s...",
|
{
|
||||||
my_stats.share_diff, str3 );
|
diff_to_target( str, my_stats.target_diff );
|
||||||
|
targ = &str[0];
|
||||||
diff_to_target( str1, my_stats.target_diff );
|
|
||||||
for ( int i = 0; i < 8; i++ )
|
|
||||||
be32enc( str2 + i, str1[7 - i] );
|
|
||||||
bin2hex( str3, (unsigned char*)str2, 12 );
|
|
||||||
applog2( LOG_INFO, "Target diff: %.5g, Targ: %s...",
|
|
||||||
my_stats.target_diff, str3 );
|
|
||||||
}
|
}
|
||||||
|
applog2( LOG_INFO, "Target: %08x%08x%08x%08x...",
|
||||||
if ( unlikely( opt_reset_on_stale && stale ) )
|
targ[7], targ[6], targ[5], targ[4] );
|
||||||
stratum_need_reset = true;
|
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@@ -1336,34 +1341,6 @@ char* std_malloc_txs_request( struct work *work )
|
|||||||
|
|
||||||
static bool submit_upstream_work( CURL *curl, struct work *work )
|
static bool submit_upstream_work( CURL *curl, struct work *work )
|
||||||
{
|
{
|
||||||
work->stale = false;
|
|
||||||
/*
|
|
||||||
// Submit anyway, discardring here messes up the stats
|
|
||||||
if ( !have_stratum && allow_mininginfo )
|
|
||||||
{
|
|
||||||
struct work mining_info;
|
|
||||||
get_mininginfo( curl, &mining_info );
|
|
||||||
if ( work->height < mining_info.height )
|
|
||||||
{
|
|
||||||
if ( !opt_quiet )
|
|
||||||
applog( LOG_WARNING, "Block %u already solved, current block %d",
|
|
||||||
work->height, mining_info.height );
|
|
||||||
work->stale = true;
|
|
||||||
// g_work_time = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* pass if the previous hash is not the current previous hash */
|
|
||||||
if ( !submit_old && ( work->data[ algo_gate.ntime_index ]
|
|
||||||
!= g_work.data[ algo_gate.ntime_index] ) )
|
|
||||||
{
|
|
||||||
if ( !opt_debug )
|
|
||||||
applog( LOG_WARNING, "Stale work detected, submitting anyway" );
|
|
||||||
work->stale = true;
|
|
||||||
g_work_time = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( have_stratum )
|
if ( have_stratum )
|
||||||
{
|
{
|
||||||
char req[JSON_BUF_LEN];
|
char req[JSON_BUF_LEN];
|
||||||
@@ -1499,11 +1476,10 @@ start:
|
|||||||
|
|
||||||
if ( work->height > last_block_height )
|
if ( work->height > last_block_height )
|
||||||
{
|
{
|
||||||
new_work_count++;
|
|
||||||
last_block_height = work->height;
|
last_block_height = work->height;
|
||||||
applog( LOG_BLUE, "New block %d, net diff %.5g, target diff %.5g, ntime %08x",
|
applog( LOG_BLUE, "New Block %d, Net Diff %.5g, Target Diff %.5g, Ntime %08x",
|
||||||
work->height, net_diff, work->targetdiff,
|
work->height, net_diff, work->targetdiff,
|
||||||
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
||||||
|
|
||||||
if ( !opt_quiet && net_diff && net_hashrate )
|
if ( !opt_quiet && net_diff && net_hashrate )
|
||||||
{
|
{
|
||||||
@@ -1529,35 +1505,15 @@ start:
|
|||||||
scale_hash_for_display ( &miner_hr, miner_hr_units );
|
scale_hash_for_display ( &miner_hr, miner_hr_units );
|
||||||
scale_hash_for_display ( &net_hr, net_hr_units );
|
scale_hash_for_display ( &net_hr, net_hr_units );
|
||||||
applog2( LOG_INFO,
|
applog2( LOG_INFO,
|
||||||
"Miner TTF @ %.2f %sh/s %s, net TTF @ %.2f %sh/s %s",
|
"Miner TTF @ %.2f %sh/s %s, Net TTF @ %.2f %sh/s %s",
|
||||||
miner_hr, miner_hr_units, miner_ttf,
|
miner_hr, miner_hr_units, miner_ttf,
|
||||||
net_hr, net_hr_units, net_ttf );
|
net_hr, net_hr_units, net_ttf );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // work->height > last_block_height
|
} // work->height > last_block_height
|
||||||
else if ( memcmp( &work->data[1], &g_work.data[1], 32 ) )
|
else if ( memcmp( &work->data[1], &g_work.data[1], 32 ) )
|
||||||
{
|
applog( LOG_BLUE, "New Work, Ntime %08lx",
|
||||||
new_work_count++;
|
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
||||||
applog( LOG_BLUE, "New work, ntime %08lx",
|
|
||||||
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
|
||||||
if ( opt_debug )
|
|
||||||
{
|
|
||||||
uint32_t *old = g_work.data;
|
|
||||||
uint32_t *new = work->data;
|
|
||||||
printf("old: %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
|
||||||
old[0],old[1],old[2],old[3],old[4],old[5],old[6],old[7]);
|
|
||||||
printf(" %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
|
||||||
old[8],old[9],old[10],old[11],old[12],old[13],old[14],old[15]);
|
|
||||||
printf(" %08x %08x %08x %08x/n",
|
|
||||||
old[16],old[17],old[18],old[19]);
|
|
||||||
printf("new: %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
|
||||||
new[0],new[1],new[2],new[3],new[4],new[5],new[6],new[7]);
|
|
||||||
printf(" %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
|
||||||
new[8],new[9],new[10],new[11],new[12],new[13],new[14],new[15]);
|
|
||||||
printf(" %08x %08x %08x %08x/n",
|
|
||||||
new[16],new[17],new[18],new[19]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // rc
|
} // rc
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
@@ -1758,7 +1714,7 @@ static inline double u256_to_double( const uint64_t *u )
|
|||||||
|
|
||||||
static void update_submit_stats( struct work *work, const void *hash )
|
static void update_submit_stats( struct work *work, const void *hash )
|
||||||
{
|
{
|
||||||
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
// work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||||
|
|
||||||
pthread_mutex_lock( &stats_lock );
|
pthread_mutex_lock( &stats_lock );
|
||||||
|
|
||||||
@@ -1776,51 +1732,25 @@ static void update_submit_stats( struct work *work, const void *hash )
|
|||||||
pthread_mutex_unlock( &stats_lock );
|
pthread_mutex_unlock( &stats_lock );
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
//deprecated
|
|
||||||
void work_set_target_ratio( struct work* work, const void *hash )
|
|
||||||
{
|
|
||||||
submitted_share_count++;
|
|
||||||
work->sharediff = work->targetdiff * (double)( ((uint64_t*)hash)[3] )
|
|
||||||
/ (double)( ((uint64_t*)work->target)[3] );
|
|
||||||
// work->sharediff = likely( hash ) ? target_to_diff( (uint32_t*)hash ) : 0.;
|
|
||||||
|
|
||||||
// collect some share stats
|
|
||||||
// Frequent share submission combined with high latency can caused
|
|
||||||
// shares to be submitted faster than they are acked. If severe enough
|
|
||||||
// it can overflow the queue and overwrite stats for a share.
|
|
||||||
pthread_mutex_lock( &stats_lock );
|
|
||||||
|
|
||||||
share_stats[ s_put_ptr ].share_count = submitted_share_count;
|
|
||||||
gettimeofday( &share_stats[ s_put_ptr ].submit_time, NULL );
|
|
||||||
share_stats[ s_put_ptr ].share_diff = work->sharediff;
|
|
||||||
share_stats[ s_put_ptr ].net_diff = net_diff;
|
|
||||||
share_stats[ s_put_ptr ].stratum_diff = stratum_diff;
|
|
||||||
share_stats[ s_put_ptr ].target_diff = work->targetdiff;
|
|
||||||
if ( have_stratum )
|
|
||||||
strncpy( share_stats[ s_put_ptr ].job_id, work->job_id, 30 );
|
|
||||||
s_put_ptr = stats_ptr_incr( s_put_ptr );
|
|
||||||
|
|
||||||
pthread_mutex_unlock( &stats_lock );
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
bool submit_solution( struct work *work, const void *hash,
|
bool submit_solution( struct work *work, const void *hash,
|
||||||
struct thr_info *thr )
|
struct thr_info *thr )
|
||||||
{
|
{
|
||||||
if ( likely( submit_work( thr, work ) ) )
|
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||||
|
|
||||||
|
if ( likely( submit_work( thr, work ) ) )
|
||||||
{
|
{
|
||||||
update_submit_stats( work, hash );
|
update_submit_stats( work, hash );
|
||||||
|
|
||||||
if ( !opt_quiet )
|
if ( !opt_quiet )
|
||||||
{
|
{
|
||||||
if ( have_stratum )
|
if ( have_stratum )
|
||||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d, job %s",
|
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
|
||||||
submitted_share_count, work->sharediff, work->height,
|
submitted_share_count, work->sharediff, work->height,
|
||||||
work->job_id );
|
work->job_id );
|
||||||
else
|
else
|
||||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d",
|
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
|
||||||
submitted_share_count, work->sharediff, work->height );
|
submitted_share_count, work->sharediff, work->height,
|
||||||
|
work->data[ algo_gate.ntime_index ] );
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( unlikely( lowdiff_debug ) )
|
if ( unlikely( lowdiff_debug ) )
|
||||||
@@ -1843,19 +1773,22 @@ bool submit_solution( struct work *work, const void *hash,
|
|||||||
bool submit_lane_solution( struct work *work, const void *hash,
|
bool submit_lane_solution( struct work *work, const void *hash,
|
||||||
struct thr_info *thr, const int lane )
|
struct thr_info *thr, const int lane )
|
||||||
{
|
{
|
||||||
if ( likely( submit_work( thr, work ) ) )
|
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||||
{
|
|
||||||
|
if ( likely( submit_work( thr, work ) ) )
|
||||||
|
{
|
||||||
update_submit_stats( work, hash );
|
update_submit_stats( work, hash );
|
||||||
|
|
||||||
if ( !opt_quiet )
|
if ( !opt_quiet )
|
||||||
{
|
{
|
||||||
if ( have_stratum )
|
if ( have_stratum )
|
||||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d, job %s",
|
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
|
||||||
submitted_share_count, work->sharediff, work->height,
|
submitted_share_count, work->sharediff, work->height,
|
||||||
work->job_id );
|
work->job_id );
|
||||||
else
|
else
|
||||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d",
|
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
|
||||||
submitted_share_count, work->sharediff, work->height );
|
submitted_share_count, work->sharediff, work->height,
|
||||||
|
work->data[ algo_gate.ntime_index ] );
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( lowdiff_debug )
|
if ( lowdiff_debug )
|
||||||
@@ -2024,6 +1957,8 @@ static void *miner_thread( void *userdata )
|
|||||||
if (!opt_benchmark && opt_priority == 0)
|
if (!opt_benchmark && opt_priority == 0)
|
||||||
{
|
{
|
||||||
setpriority(PRIO_PROCESS, 0, 19);
|
setpriority(PRIO_PROCESS, 0, 19);
|
||||||
|
if ( !thr_id && !opt_quiet )
|
||||||
|
applog(LOG_INFO, "Miner thread priority %d (nice 19)", opt_priority );
|
||||||
drop_policy();
|
drop_policy();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@@ -2040,9 +1975,9 @@ static void *miner_thread( void *userdata )
|
|||||||
case 4: prio = -10; break;
|
case 4: prio = -10; break;
|
||||||
case 5: prio = -15;
|
case 5: prio = -15;
|
||||||
}
|
}
|
||||||
if (opt_debug)
|
if ( !( thr_id || opt_quiet ) )
|
||||||
applog(LOG_DEBUG, "Thread %d priority %d (nice %d)", thr_id,
|
applog( LOG_INFO, "Miner thread priority %d (nice %d)",
|
||||||
opt_priority, prio );
|
opt_priority, prio );
|
||||||
#endif
|
#endif
|
||||||
setpriority(PRIO_PROCESS, 0, prio);
|
setpriority(PRIO_PROCESS, 0, prio);
|
||||||
if ( opt_priority == 0 )
|
if ( opt_priority == 0 )
|
||||||
@@ -2057,7 +1992,7 @@ static void *miner_thread( void *userdata )
|
|||||||
{
|
{
|
||||||
affine_to_cpu_mask( thr_id, (uint128_t)1 << (thr_id % num_cpus) );
|
affine_to_cpu_mask( thr_id, (uint128_t)1 << (thr_id % num_cpus) );
|
||||||
if ( opt_debug )
|
if ( opt_debug )
|
||||||
applog( LOG_DEBUG, "Binding thread %d to cpu %d.",
|
applog( LOG_INFO, "Binding thread %d to cpu %d.",
|
||||||
thr_id, thr_id % num_cpus,
|
thr_id, thr_id % num_cpus,
|
||||||
u128_hi64( (uint128_t)1 << (thr_id % num_cpus) ),
|
u128_hi64( (uint128_t)1 << (thr_id % num_cpus) ),
|
||||||
u128_lo64( (uint128_t)1 << (thr_id % num_cpus) ) );
|
u128_lo64( (uint128_t)1 << (thr_id % num_cpus) ) );
|
||||||
@@ -2078,14 +2013,14 @@ static void *miner_thread( void *userdata )
|
|||||||
{
|
{
|
||||||
#if AFFINITY_USES_UINT128
|
#if AFFINITY_USES_UINT128
|
||||||
if ( num_cpus > 64 )
|
if ( num_cpus > 64 )
|
||||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx %016llx",
|
applog( LOG_INFO, "Binding thread %d to mask %016llx %016llx",
|
||||||
thr_id, u128_hi64( opt_affinity ),
|
thr_id, u128_hi64( opt_affinity ),
|
||||||
u128_lo64( opt_affinity ) );
|
u128_lo64( opt_affinity ) );
|
||||||
else
|
else
|
||||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx",
|
applog( LOG_INFO, "Binding thread %d to mask %016llx",
|
||||||
thr_id, opt_affinity );
|
thr_id, opt_affinity );
|
||||||
#else
|
#else
|
||||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx",
|
applog( LOG_INFO, "Binding thread %d to mask %016llx",
|
||||||
thr_id, opt_affinity );
|
thr_id, opt_affinity );
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@@ -2127,7 +2062,6 @@ static void *miner_thread( void *userdata )
|
|||||||
if ( time(NULL) - g_work_time >= scantime
|
if ( time(NULL) - g_work_time >= scantime
|
||||||
|| *nonceptr >= end_nonce )
|
|| *nonceptr >= end_nonce )
|
||||||
{
|
{
|
||||||
report_summary_log( false );
|
|
||||||
if ( unlikely( !get_work( mythr, &g_work ) ) )
|
if ( unlikely( !get_work( mythr, &g_work ) ) )
|
||||||
{
|
{
|
||||||
applog( LOG_ERR, "work retrieval failed, exiting "
|
applog( LOG_ERR, "work retrieval failed, exiting "
|
||||||
@@ -2629,16 +2563,13 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
|||||||
pthread_mutex_unlock( &stats_lock );
|
pthread_mutex_unlock( &stats_lock );
|
||||||
|
|
||||||
if ( stratum_diff != sctx->job.diff )
|
if ( stratum_diff != sctx->job.diff )
|
||||||
applog( LOG_BLUE, "New diff %g, block %d, job %s, ntime %08x",
|
applog( LOG_BLUE, "New Diff %g, Block %d, Job %s",
|
||||||
sctx->job.diff, sctx->block_height, g_work->job_id,
|
sctx->job.diff, sctx->block_height, g_work->job_id );
|
||||||
bswap_32( g_work->data[ algo_gate.ntime_index ] ) );
|
|
||||||
else if ( last_block_height != sctx->block_height )
|
else if ( last_block_height != sctx->block_height )
|
||||||
applog( LOG_BLUE, "New block %d, job %si, ntime %08x",
|
applog( LOG_BLUE, "New Block %d, Job %s",
|
||||||
sctx->block_height, g_work->job_id,
|
sctx->block_height, g_work->job_id );
|
||||||
bswap_32( g_work->data[ algo_gate.ntime_index ] ) );
|
|
||||||
else if ( g_work->job_id )
|
else if ( g_work->job_id )
|
||||||
applog( LOG_BLUE,"New job %s, ntime %08x", g_work->job_id,
|
applog( LOG_BLUE,"New Job %s", g_work->job_id );
|
||||||
bswap_32( g_work->data[ algo_gate.ntime_index ] ) );
|
|
||||||
|
|
||||||
// Update data and calculate new estimates.
|
// Update data and calculate new estimates.
|
||||||
if ( ( stratum_diff != sctx->job.diff )
|
if ( ( stratum_diff != sctx->job.diff )
|
||||||
@@ -2666,7 +2597,7 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
|||||||
sprintf_et( block_ttf, ( net_diff * exp32 ) / hr );
|
sprintf_et( block_ttf, ( net_diff * exp32 ) / hr );
|
||||||
sprintf_et( share_ttf, g_work->targetdiff * exp32 / hr );
|
sprintf_et( share_ttf, g_work->targetdiff * exp32 / hr );
|
||||||
scale_hash_for_display ( &hr, hr_units );
|
scale_hash_for_display ( &hr, hr_units );
|
||||||
applog2( LOG_INFO, "TTF @ %.2f %sh/s: block %s, share %s",
|
applog2( LOG_INFO, "TTF @ %.2f %sh/s: Block %s, Share %s",
|
||||||
hr, hr_units, block_ttf, share_ttf );
|
hr, hr_units, block_ttf, share_ttf );
|
||||||
|
|
||||||
if ( !multipool && last_block_height > session_first_block )
|
if ( !multipool && last_block_height > session_first_block )
|
||||||
@@ -2680,13 +2611,13 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
|||||||
if ( net_diff && net_ttf )
|
if ( net_diff && net_ttf )
|
||||||
{
|
{
|
||||||
double net_hr = net_diff * exp32 / net_ttf;
|
double net_hr = net_diff * exp32 / net_ttf;
|
||||||
char net_ttf_str[32];
|
// char net_ttf_str[32];
|
||||||
char net_hr_units[4] = {0};
|
char net_hr_units[4] = {0};
|
||||||
|
|
||||||
sprintf_et( net_ttf_str, net_ttf );
|
// sprintf_et( net_ttf_str, net_ttf );
|
||||||
scale_hash_for_display ( &net_hr, net_hr_units );
|
scale_hash_for_display ( &net_hr, net_hr_units );
|
||||||
applog2( LOG_INFO, "Net TTF @ %.2f %sh/s: %s",
|
applog2( LOG_INFO, "Net hash rate (est) %.2f %sh/s",
|
||||||
net_hr, net_hr_units, net_ttf_str );
|
net_hr, net_hr_units );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // hr > 0
|
} // hr > 0
|
||||||
@@ -2697,12 +2628,12 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
|||||||
static void *stratum_thread(void *userdata )
|
static void *stratum_thread(void *userdata )
|
||||||
{
|
{
|
||||||
struct thr_info *mythr = (struct thr_info *) userdata;
|
struct thr_info *mythr = (struct thr_info *) userdata;
|
||||||
char *s;
|
char *s = NULL;
|
||||||
|
|
||||||
stratum.url = (char*) tq_pop(mythr->q, NULL);
|
stratum.url = (char*) tq_pop(mythr->q, NULL);
|
||||||
if (!stratum.url)
|
if (!stratum.url)
|
||||||
goto out;
|
goto out;
|
||||||
applog( LOG_INFO, "Stratum connect %s", short_url );
|
applog( LOG_BLUE, "Stratum connect %s", short_url );
|
||||||
|
|
||||||
while (1)
|
while (1)
|
||||||
{
|
{
|
||||||
@@ -2762,30 +2693,26 @@ static void *stratum_thread(void *userdata )
|
|||||||
restart_threads();
|
restart_threads();
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( stratum_socket_full( &stratum, opt_timeout ) )
|
if ( likely( stratum_socket_full( &stratum, opt_timeout ) ) )
|
||||||
{
|
{
|
||||||
s = stratum_recv_line(&stratum);
|
if ( likely( s = stratum_recv_line( &stratum ) ) )
|
||||||
if ( !s )
|
{
|
||||||
|
if ( likely( !stratum_handle_method( &stratum, s ) ) )
|
||||||
|
stratum_handle_response( s );
|
||||||
|
free( s );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
applog(LOG_WARNING, "Stratum connection interrupted");
|
applog(LOG_WARNING, "Stratum connection interrupted");
|
||||||
|
stratum_disconnect( &stratum );
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
s = NULL;
|
|
||||||
applog(LOG_ERR, "Stratum connection timeout");
|
applog(LOG_ERR, "Stratum connection timeout");
|
||||||
|
stratum_disconnect( &stratum );
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( s )
|
|
||||||
{
|
|
||||||
if ( !stratum_handle_method( &stratum, s ) )
|
|
||||||
stratum_handle_response( s );
|
|
||||||
free( s );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// stratum_errors++;
|
|
||||||
// check if this redundant
|
|
||||||
stratum_disconnect( &stratum );
|
|
||||||
}
|
|
||||||
} // loop
|
} // loop
|
||||||
out:
|
out:
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -3422,7 +3349,7 @@ bool check_cpu_capability ()
|
|||||||
" with VC++ 2013\n");
|
" with VC++ 2013\n");
|
||||||
#elif defined(__GNUC__)
|
#elif defined(__GNUC__)
|
||||||
" with GCC");
|
" with GCC");
|
||||||
printf(" %d.%d.%d.\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
|
printf(" %d.%d.%d\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
|
||||||
#else
|
#else
|
||||||
printf(".\n");
|
printf(".\n");
|
||||||
#endif
|
#endif
|
||||||
@@ -3550,7 +3477,7 @@ int main(int argc, char *argv[])
|
|||||||
num_cpus += cpus;
|
num_cpus += cpus;
|
||||||
|
|
||||||
if (opt_debug)
|
if (opt_debug)
|
||||||
applog(LOG_DEBUG, "Found %d cpus on cpu group %d", cpus, i);
|
applog(LOG_DEBUG, "Found %d cpus on cpu group %d", cpus, i);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
SYSTEM_INFO sysinfo;
|
SYSTEM_INFO sysinfo;
|
||||||
@@ -3570,7 +3497,6 @@ int main(int argc, char *argv[])
|
|||||||
if (num_cpus < 1)
|
if (num_cpus < 1)
|
||||||
num_cpus = 1;
|
num_cpus = 1;
|
||||||
|
|
||||||
|
|
||||||
if (!opt_n_threads)
|
if (!opt_n_threads)
|
||||||
opt_n_threads = num_cpus;
|
opt_n_threads = num_cpus;
|
||||||
|
|
||||||
@@ -3644,12 +3570,13 @@ int main(int argc, char *argv[])
|
|||||||
pthread_mutex_init( &stratum.sock_lock, NULL );
|
pthread_mutex_init( &stratum.sock_lock, NULL );
|
||||||
pthread_mutex_init( &stratum.work_lock, NULL );
|
pthread_mutex_init( &stratum.work_lock, NULL );
|
||||||
|
|
||||||
flags = !opt_benchmark
|
flags = CURL_GLOBAL_ALL;
|
||||||
&& ( strncmp( rpc_url, "https:", 6 )
|
if ( !opt_benchmark )
|
||||||
|| strncasecmp(rpc_url, "stratum+tcps://", 15 ) )
|
if ( strncasecmp( rpc_url, "https:", 6 )
|
||||||
? ( CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL )
|
&& strncasecmp( rpc_url, "stratum+tcps://", 15 ) )
|
||||||
: CURL_GLOBAL_ALL;
|
flags &= ~CURL_GLOBAL_SSL;
|
||||||
if ( curl_global_init( flags ) )
|
|
||||||
|
if ( curl_global_init( flags ) )
|
||||||
{
|
{
|
||||||
applog(LOG_ERR, "CURL initialization failed");
|
applog(LOG_ERR, "CURL initialization failed");
|
||||||
return 1;
|
return 1;
|
||||||
@@ -3689,27 +3616,24 @@ int main(int argc, char *argv[])
|
|||||||
if (opt_priority > 0)
|
if (opt_priority > 0)
|
||||||
{
|
{
|
||||||
DWORD prio = NORMAL_PRIORITY_CLASS;
|
DWORD prio = NORMAL_PRIORITY_CLASS;
|
||||||
switch (opt_priority) {
|
switch (opt_priority)
|
||||||
case 1:
|
{
|
||||||
prio = BELOW_NORMAL_PRIORITY_CLASS;
|
case 1:
|
||||||
|
prio = BELOW_NORMAL_PRIORITY_CLASS;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
prio = ABOVE_NORMAL_PRIORITY_CLASS;
|
prio = ABOVE_NORMAL_PRIORITY_CLASS;
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
prio = HIGH_PRIORITY_CLASS;
|
prio = HIGH_PRIORITY_CLASS;
|
||||||
break;
|
break;
|
||||||
case 5:
|
case 5:
|
||||||
prio = REALTIME_PRIORITY_CLASS;
|
prio = REALTIME_PRIORITY_CLASS;
|
||||||
}
|
}
|
||||||
SetPriorityClass(GetCurrentProcess(), prio);
|
SetPriorityClass(GetCurrentProcess(), prio);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if ( num_cpus != opt_n_threads )
|
|
||||||
applog( LOG_INFO,"%u CPU cores available, %u miner threads selected.",
|
|
||||||
num_cpus, opt_n_threads );
|
|
||||||
|
|
||||||
// To be confirmed with more than 64 cpus
|
// To be confirmed with more than 64 cpus
|
||||||
if ( opt_affinity != -1 )
|
if ( opt_affinity != -1 )
|
||||||
{
|
{
|
||||||
@@ -3741,6 +3665,13 @@ int main(int argc, char *argv[])
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( !opt_quiet && ( opt_n_threads < num_cpus ) )
|
||||||
|
{
|
||||||
|
char affinity_map[64];
|
||||||
|
format_affinity_map( affinity_map, opt_affinity );
|
||||||
|
applog( LOG_INFO, "CPU affinity [%s]", affinity_map );
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef HAVE_SYSLOG_H
|
#ifdef HAVE_SYSLOG_H
|
||||||
if (use_syslog)
|
if (use_syslog)
|
||||||
openlog("cpuminer", LOG_PID, LOG_USER);
|
openlog("cpuminer", LOG_PID, LOG_USER);
|
||||||
@@ -3791,7 +3722,7 @@ int main(int argc, char *argv[])
|
|||||||
/* start longpoll thread */
|
/* start longpoll thread */
|
||||||
err = thread_create(thr, longpoll_thread);
|
err = thread_create(thr, longpoll_thread);
|
||||||
if (err) {
|
if (err) {
|
||||||
applog(LOG_ERR, "long poll thread create failed");
|
applog(LOG_ERR, "Long poll thread create failed");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3811,7 +3742,7 @@ int main(int argc, char *argv[])
|
|||||||
err = thread_create(thr, stratum_thread);
|
err = thread_create(thr, stratum_thread);
|
||||||
if (err)
|
if (err)
|
||||||
{
|
{
|
||||||
applog(LOG_ERR, "stratum thread create failed");
|
applog(LOG_ERR, "Stratum thread create failed");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (have_stratum)
|
if (have_stratum)
|
||||||
@@ -3852,18 +3783,16 @@ int main(int argc, char *argv[])
|
|||||||
return 1;
|
return 1;
|
||||||
err = thread_create(thr, miner_thread);
|
err = thread_create(thr, miner_thread);
|
||||||
if (err) {
|
if (err) {
|
||||||
applog(LOG_ERR, "thread %d create failed", i);
|
applog(LOG_ERR, "Miner thread %d create failed", i);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
applog(LOG_INFO, "%d miner threads started, "
|
applog( LOG_INFO, "%d of %d miner threads started using '%s' algorithm",
|
||||||
"using '%s' algorithm.",
|
opt_n_threads, num_cpus, algo_names[opt_algo] );
|
||||||
opt_n_threads,
|
|
||||||
algo_names[opt_algo]);
|
|
||||||
|
|
||||||
/* main loop - simply wait for workio thread to exit */
|
/* main loop - simply wait for workio thread to exit */
|
||||||
pthread_join(thr_info[work_thr_id].pth, NULL);
|
pthread_join( thr_info[work_thr_id].pth, NULL );
|
||||||
applog(LOG_WARNING, "workio thread dead, exiting.");
|
applog( LOG_WARNING, "workio thread dead, exiting." );
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
11
miner.h
11
miner.h
@@ -323,6 +323,7 @@ int timeval_subtract( struct timeval *result, struct timeval *x,
|
|||||||
// diff_to_hash = 2**32 = 0x100000000 = 4294967296 = exp32;
|
// diff_to_hash = 2**32 = 0x100000000 = 4294967296 = exp32;
|
||||||
|
|
||||||
const double exp32; // 2**32
|
const double exp32; // 2**32
|
||||||
|
const double exp48; // 2**48
|
||||||
const double exp64; // 2**64
|
const double exp64; // 2**64
|
||||||
|
|
||||||
bool fulltest( const uint32_t *hash, const uint32_t *target );
|
bool fulltest( const uint32_t *hash, const uint32_t *target );
|
||||||
@@ -345,6 +346,9 @@ struct thr_info {
|
|||||||
|
|
||||||
//struct thr_info *thr_info;
|
//struct thr_info *thr_info;
|
||||||
|
|
||||||
|
void test_hash_and_submit( struct work *work, const void *hash,
|
||||||
|
struct thr_info *thr );
|
||||||
|
|
||||||
bool submit_solution( struct work *work, const void *hash,
|
bool submit_solution( struct work *work, const void *hash,
|
||||||
struct thr_info *thr );
|
struct thr_info *thr );
|
||||||
|
|
||||||
@@ -773,7 +777,7 @@ extern const int pk_buffer_size_max;
|
|||||||
extern int pk_buffer_size;
|
extern int pk_buffer_size;
|
||||||
|
|
||||||
static char const usage[] = "\
|
static char const usage[] = "\
|
||||||
Usage: " PACKAGE_NAME " [OPTIONS]\n\
|
Usage: cpuminer [OPTIONS]\n\
|
||||||
Options:\n\
|
Options:\n\
|
||||||
-a, --algo=ALGO specify the algorithm to use\n\
|
-a, --algo=ALGO specify the algorithm to use\n\
|
||||||
allium Garlicoin (GRLC)\n\
|
allium Garlicoin (GRLC)\n\
|
||||||
@@ -868,7 +872,7 @@ Options:\n\
|
|||||||
yespower-b2b generic yespower + blake2b\n\
|
yespower-b2b generic yespower + blake2b\n\
|
||||||
zr5 Ziftr\n\
|
zr5 Ziftr\n\
|
||||||
-N, --param-n N parameter for scrypt based algos\n\
|
-N, --param-n N parameter for scrypt based algos\n\
|
||||||
-R, --patam-r R parameter for scrypt based algos\n\
|
-R, --param-r R parameter for scrypt based algos\n\
|
||||||
-K, --param-key Key (pers) parameter for algos that use it\n\
|
-K, --param-key Key (pers) parameter for algos that use it\n\
|
||||||
-o, --url=URL URL of mining server\n\
|
-o, --url=URL URL of mining server\n\
|
||||||
-O, --userpass=U:P username:password pair for mining server\n\
|
-O, --userpass=U:P username:password pair for mining server\n\
|
||||||
@@ -886,7 +890,7 @@ Options:\n\
|
|||||||
long polling is unavailable, in seconds (default: 5)\n\
|
long polling is unavailable, in seconds (default: 5)\n\
|
||||||
--randomize Randomize scan range start to reduce duplicates\n\
|
--randomize Randomize scan range start to reduce duplicates\n\
|
||||||
--reset-on-stale Workaround reset stratum if too many stale shares\n\
|
--reset-on-stale Workaround reset stratum if too many stale shares\n\
|
||||||
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
|
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
|
||||||
-m, --diff-multiplier Multiply difficulty by this factor (std is 1.0)\n\
|
-m, --diff-multiplier Multiply difficulty by this factor (std is 1.0)\n\
|
||||||
--hash-meter Display thread hash rates\n\
|
--hash-meter Display thread hash rates\n\
|
||||||
--coinbase-addr=ADDR payout address for solo mining\n\
|
--coinbase-addr=ADDR payout address for solo mining\n\
|
||||||
@@ -908,7 +912,6 @@ Options:\n\
|
|||||||
"\
|
"\
|
||||||
-B, --background run the miner in the background\n\
|
-B, --background run the miner in the background\n\
|
||||||
--benchmark run in offline benchmark mode\n\
|
--benchmark run in offline benchmark mode\n\
|
||||||
--cputest debug hashes from cpu algorithms\n\
|
|
||||||
--cpu-affinity set process affinity to cpu core(s), mask 0x3 for cores 0 and 1\n\
|
--cpu-affinity set process affinity to cpu core(s), mask 0x3 for cores 0 and 1\n\
|
||||||
--cpu-priority set process priority (default: 0 idle, 2 normal to 5 highest)\n\
|
--cpu-priority set process priority (default: 0 idle, 2 normal to 5 highest)\n\
|
||||||
-b, --api-bind IP/Port for the miner API (default: 127.0.0.1:4048)\n\
|
-b, --api-bind IP/Port for the miner API (default: 127.0.0.1:4048)\n\
|
||||||
|
11
util.c
11
util.c
@@ -1064,7 +1064,6 @@ void diff_to_target(uint32_t *target, double diff)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// deprecated
|
// deprecated
|
||||||
void work_set_target(struct work* work, double diff)
|
void work_set_target(struct work* work, double diff)
|
||||||
{
|
{
|
||||||
@@ -1075,12 +1074,11 @@ void work_set_target(struct work* work, double diff)
|
|||||||
double target_to_diff( uint32_t* target )
|
double target_to_diff( uint32_t* target )
|
||||||
{
|
{
|
||||||
uint64_t *targ = (uint64_t*)target;
|
uint64_t *targ = (uint64_t*)target;
|
||||||
return target ? 1. / ( ( (double)targ[3] / exp32 )
|
// extract 64 bits from target[ 240:176 ]
|
||||||
+ ( (double)targ[2] )
|
uint64_t m = ( targ[3] << 16 ) | ( targ[2] >> 48 );
|
||||||
+ ( (double)targ[1] * exp32 )
|
return m ? (exp48-1.) / (double)m : 0.;
|
||||||
+ ( (double)targ[0] * exp64 ) )
|
|
||||||
: 0.;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
double target_to_diff(uint32_t* target)
|
double target_to_diff(uint32_t* target)
|
||||||
{
|
{
|
||||||
@@ -1095,6 +1093,7 @@ double target_to_diff(uint32_t* target)
|
|||||||
(uint64_t)tgt[23] << 8 |
|
(uint64_t)tgt[23] << 8 |
|
||||||
(uint64_t)tgt[22] << 0;
|
(uint64_t)tgt[22] << 0;
|
||||||
|
|
||||||
|
|
||||||
if (!m)
|
if (!m)
|
||||||
return 0.;
|
return 0.;
|
||||||
else
|
else
|
||||||
|
Reference in New Issue
Block a user