mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6e8b8ed34f | ||
![]() |
c0aadbcc99 | ||
![]() |
3da149418a |
41
README.md
41
README.md
@@ -37,25 +37,25 @@ Requirements
|
||||
------------
|
||||
|
||||
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
|
||||
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI
|
||||
optimizations a CPU with AES_NI is required. This includes Intel Westmere
|
||||
and newer and AMD equivalents. Further optimizations are available on some
|
||||
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
|
||||
Intel Core2 and newer and AMD equivalents. Further optimizations are available
|
||||
on some algoritms for CPUs with AES, AVX, AVX2, SHA, AVX512 and VAES.
|
||||
|
||||
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
|
||||
performance.
|
||||
|
||||
ARM CPUs are not supported.
|
||||
ARM and Aarch64 CPUs are not supported.
|
||||
|
||||
2. 64 bit Linux OS. Ubuntu and Fedora based distributions, including Mint and
|
||||
Centos, are known to work and have all dependencies in their repositories.
|
||||
Others may work but may require more effort. Older versions such as Centos 6
|
||||
don't work due to missing features.
|
||||
2. 64 bit Linux or Windows OS. Ubuntu and Fedora based distributions,
|
||||
including Mint and Centos, are known to work and have all dependencies
|
||||
in their repositories. Others may work but may require more effort. Older
|
||||
versions such as Centos 6 don't work due to missing features.
|
||||
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
|
||||
|
||||
MacOS, OSx and Android are not supported.
|
||||
|
||||
3. Stratum pool. Some algos may work wallet mining using getwork or GBT. YMMV.
|
||||
3. Stratum pool supporting stratum+tcp:// or stratum+ssl:// protocols or
|
||||
RPC getwork using http:// or https://.
|
||||
GBT is YMMV.
|
||||
|
||||
Supported Algorithms
|
||||
--------------------
|
||||
@@ -152,6 +152,27 @@ Supported Algorithms
|
||||
yespower-b2b generic yespower + blake2b
|
||||
zr5 Ziftr
|
||||
|
||||
Many variations of scrypt based algos can be mine by specifying their
|
||||
parameters:
|
||||
|
||||
scryptn2: --algo scrypt --param-n 1048576
|
||||
|
||||
cpupower: --algo yespower --param-key "CPUpower: The number of CPU working or available for proof-of-work mining"
|
||||
|
||||
power2b: --algo yespower-b2b --param-n 2048 --param-r 32 --param-key "Now I am become Death, the destroyer of worlds"
|
||||
|
||||
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
|
||||
|
||||
yespoweriots: --algo yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
|
||||
|
||||
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
|
||||
|
||||
yespoweric: --algo yespower --param-n 2048 --param-r 32 --param-key "IsotopeC"
|
||||
|
||||
yespowerurx: --algo yespower --param-n 2048 --param-r 32 --param-key "UraniumX"
|
||||
|
||||
yespowerltncg: --algo yespower --param-n 2048 --param-r 32 --param-key "LTNCGYES"
|
||||
|
||||
Errata
|
||||
------
|
||||
|
||||
|
@@ -65,6 +65,44 @@ If not what makes it happen or not happen?
|
||||
Change Log
|
||||
----------
|
||||
|
||||
v3.12.6.1
|
||||
|
||||
Issue #252: Fixed SSL mining (stratum+tcps://)
|
||||
|
||||
Issue #254 Fixed benchmark.
|
||||
|
||||
Issue #253: Implemented stale share reduction for yespower, x25x, x22i, x21s,
|
||||
x16*, scryptn2, more to come.
|
||||
|
||||
v3.12.6
|
||||
|
||||
Issue #246: improved stale share detection for getwork.
|
||||
|
||||
Improved precision of target_to_diff conversion from 4 digits to 20+.
|
||||
|
||||
Display hash and target debug data for all rejected shares.
|
||||
|
||||
A graphical representation of CPU affinity is displayed when using --threads.
|
||||
|
||||
Added highest and lowest accepted share to summary log.
|
||||
|
||||
Other small changes to logs to improve consistency and clarity.
|
||||
|
||||
v3.12.5
|
||||
|
||||
Issues #246 & #251: fixed incorrect share diff for stratum and getwork,
|
||||
fixed incorrect target diff for getwork. Stats should now be correct for
|
||||
getwork as well as stratum.
|
||||
|
||||
Issue #252: Fixed stratum+tcps not using curl ssl.
|
||||
|
||||
Getwork: reduce stale blocks, faster response to new work.
|
||||
|
||||
Added ntime to new job/work logs.
|
||||
|
||||
README.md now lists the parameters for yespower variations that don't have
|
||||
a specific algo name.
|
||||
|
||||
v3.12.4.6
|
||||
|
||||
Issue #246: fixed getwork repeated new block logs with same height. New work
|
||||
|
@@ -97,21 +97,23 @@ int null_scanhash()
|
||||
return 0;
|
||||
}
|
||||
|
||||
void null_hash()
|
||||
int null_hash()
|
||||
{
|
||||
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
|
||||
return 0;
|
||||
};
|
||||
/*
|
||||
void null_hash_suw()
|
||||
{
|
||||
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
|
||||
};
|
||||
|
||||
*/
|
||||
void init_algo_gate( algo_gate_t* gate )
|
||||
{
|
||||
gate->miner_thread_init = (void*)&return_true;
|
||||
gate->scanhash = (void*)&null_scanhash;
|
||||
gate->hash = (void*)&null_hash;
|
||||
gate->hash_suw = (void*)&null_hash_suw;
|
||||
// gate->hash_suw = (void*)&null_hash_suw;
|
||||
gate->get_new_work = (void*)&std_get_new_work;
|
||||
gate->work_decode = (void*)&std_le_work_decode;
|
||||
gate->decode_extra_data = (void*)&do_nothing;
|
||||
|
@@ -113,9 +113,10 @@ typedef struct
|
||||
// mandatory functions, must be overwritten
|
||||
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
|
||||
|
||||
// not used anywhere
|
||||
// optional unsafe, must be overwritten if algo uses function
|
||||
void ( *hash ) ( void*, const void*, uint32_t ) ;
|
||||
void ( *hash_suw ) ( void*, const void* );
|
||||
int ( *hash ) ( void*, const void*, uint32_t ) ;
|
||||
//void ( *hash_suw ) ( void*, const void* );
|
||||
|
||||
//optional, safe to use default in most cases
|
||||
|
||||
@@ -213,8 +214,8 @@ void four_way_not_tested();
|
||||
int null_scanhash();
|
||||
|
||||
// displays warning
|
||||
void null_hash ();
|
||||
void null_hash_suw();
|
||||
int null_hash ();
|
||||
//void null_hash_suw();
|
||||
|
||||
// optional safe targets, default listed first unless noted.
|
||||
|
||||
|
@@ -311,7 +311,7 @@ bool register_m7m_algo( algo_gate_t *gate )
|
||||
{
|
||||
gate->optimizations = SHA_OPT;
|
||||
init_m7m_ctx();
|
||||
gate->scanhash = (void*)scanhash_m7m_hash;
|
||||
gate->scanhash = (void*)&scanhash_m7m_hash;
|
||||
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
|
||||
gate->work_decode = (void*)&std_be_work_decode;
|
||||
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
|
||||
|
@@ -424,7 +424,7 @@ static bool scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
|
||||
}
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
static int scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
||||
int thrid )
|
||||
{
|
||||
@@ -449,6 +449,8 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
|
||||
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
X[k * 32 + i] = W[4 * i + k];
|
||||
@@ -458,6 +460,8 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
scrypt_core(X + 2 * 32, V, N);
|
||||
scrypt_core(X + 3 * 32, V, N);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[4 * i + k] = X[k * 32 + i];
|
||||
@@ -468,13 +472,13 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
for (k = 0; k < 4; k++)
|
||||
output[k * 8 + i] = W[4 * i + k];
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#ifdef HAVE_SCRYPT_3WAY
|
||||
|
||||
static bool scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||
static int scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
||||
int thrid )
|
||||
{
|
||||
@@ -492,23 +496,23 @@ static bool scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||
HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8);
|
||||
HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
|
||||
PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32);
|
||||
PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
scrypt_core_3way(X, V, N);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
|
||||
PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8);
|
||||
PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16);
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
@@ -539,13 +543,13 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32);
|
||||
HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128);
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
@@ -557,7 +561,7 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
scrypt_core_3way(X + 2 * 96, V, N);
|
||||
scrypt_core_3way(X + 3 * 96, V, N);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
@@ -573,14 +577,14 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
for (k = 0; k < 4; k++)
|
||||
output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k];
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#endif /* HAVE_SCRYPT_3WAY */
|
||||
|
||||
#ifdef HAVE_SCRYPT_6WAY
|
||||
static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
||||
static int scrypt_1024_1_1_256_24way( const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate,
|
||||
unsigned char *scratchpad, int N, int thrid )
|
||||
{
|
||||
@@ -607,13 +611,13 @@ static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
||||
HMAC_SHA256_80_init_8way( W + 256, tstate + 64, ostate + 64 );
|
||||
HMAC_SHA256_80_init_8way( W + 512, tstate + 128, ostate + 128 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_80_128_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
|
||||
PBKDF2_SHA256_80_128_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
|
||||
PBKDF2_SHA256_80_128_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for ( j = 0; j < 3; j++ )
|
||||
for ( i = 0; i < 32; i++ )
|
||||
@@ -622,10 +626,13 @@ static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
||||
|
||||
scrypt_core_6way( X + 0 * 32, V, N );
|
||||
scrypt_core_6way( X + 6 * 32, V, N );
|
||||
scrypt_core_6way( X + 12 * 32, V, N );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
scrypt_core_6way( X + 12 * 32, V, N );
|
||||
scrypt_core_6way( X + 18 * 32, V, N );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for ( j = 0; j < 3; j++ )
|
||||
for ( i = 0; i < 32; i++ )
|
||||
@@ -641,7 +648,7 @@ static bool scrypt_1024_1_1_256_24way( const uint32_t *input,
|
||||
for ( k = 0; k < 8; k++ )
|
||||
output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k];
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
#endif /* HAVE_SCRYPT_6WAY */
|
||||
|
||||
@@ -706,12 +713,13 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
||||
if ( rc )
|
||||
for ( i = 0; i < throughput; i++ )
|
||||
{
|
||||
if ( unlikely( valid_hash( hash + i * 8, ptarget ) ) )
|
||||
if ( unlikely( valid_hash( hash + i * 8, ptarget ) ) )
|
||||
{
|
||||
pdata[19] = data[i * 20 + 19];
|
||||
submit_lane_solution( work, hash, mythr, i );
|
||||
submit_solution( work, hash + i * 8, mythr );
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} while ( likely( ( n < ( max_nonce - throughput ) ) && !(*restart) ) );
|
||||
|
||||
*hashes_done = n - pdata[19];
|
||||
|
@@ -77,7 +77,7 @@ typedef union _hex_context_overlay hex_context_overlay;
|
||||
|
||||
static __thread x16r_context_overlay hex_ctx;
|
||||
|
||||
void hex_hash( void* output, const void* input )
|
||||
int hex_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x16r_context_overlay ctx;
|
||||
@@ -214,11 +214,15 @@ void hex_hash( void* output, const void* input )
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
algo = (uint8_t)hash[0] % X16R_HASH_FUNC_COUNT;
|
||||
in = (void*) hash;
|
||||
size = 64;
|
||||
}
|
||||
memcpy(output, hash, 32);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_hex( struct work *work, uint32_t max_nonce,
|
||||
@@ -286,8 +290,7 @@ int scanhash_hex( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
hex_hash( hash32, edata );
|
||||
|
||||
if ( hex_hash( hash32, edata, thr_id ) );
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
be32enc( &pdata[19], nonce );
|
||||
|
@@ -80,7 +80,7 @@ void x16r_8way_prehash( void *vdata, void *pdata )
|
||||
// Called by wrapper hash function to optionally continue hashing and
|
||||
// convert to final hash.
|
||||
|
||||
void x16r_8way_hash_generic( void* output, const void* input )
|
||||
int x16r_8way_hash_generic( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[20*8] __attribute__ ((aligned (128)));
|
||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||
@@ -424,6 +424,9 @@ void x16r_8way_hash_generic( void* output, const void* input )
|
||||
hash7, vhash );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
|
||||
@@ -435,14 +438,17 @@ void x16r_8way_hash_generic( void* output, const void* input )
|
||||
memcpy( output+320, hash5, 64 );
|
||||
memcpy( output+384, hash6, 64 );
|
||||
memcpy( output+448, hash7, 64 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// x16-r,-s,-rt wrapper called directly by scanhash to repackage 512 bit
|
||||
// hash to 256 bit final hash.
|
||||
void x16r_8way_hash( void* output, const void* input )
|
||||
int x16r_8way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint8_t hash[64*8] __attribute__ ((aligned (128)));
|
||||
x16r_8way_hash_generic( hash, input );
|
||||
if ( !x16r_8way_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
memcpy( output+32, hash+64, 32 );
|
||||
@@ -452,7 +458,9 @@ void x16r_8way_hash( void* output, const void* input )
|
||||
memcpy( output+160, hash+320, 32 );
|
||||
memcpy( output+192, hash+384, 32 );
|
||||
memcpy( output+224, hash+448, 32 );
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// x16r only
|
||||
int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -492,8 +500,7 @@ int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_8way_hash( hash, vdata );
|
||||
|
||||
if( x16r_8way_hash( hash, vdata, thr_id ) );
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
@@ -565,7 +572,7 @@ void x16r_4way_prehash( void *vdata, void *pdata )
|
||||
}
|
||||
}
|
||||
|
||||
void x16r_4way_hash_generic( void* output, const void* input )
|
||||
int x16r_4way_hash_generic( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[20*4] __attribute__ ((aligned (128)));
|
||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||
@@ -794,23 +801,31 @@ void x16r_4way_hash_generic( void* output, const void* input )
|
||||
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
memcpy( output, hash0, 64 );
|
||||
memcpy( output+64, hash1, 64 );
|
||||
memcpy( output+128, hash2, 64 );
|
||||
memcpy( output+192, hash3, 64 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void x16r_4way_hash( void* output, const void* input )
|
||||
int x16r_4way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint8_t hash[64*4] __attribute__ ((aligned (64)));
|
||||
x16r_4way_hash_generic( hash, input );
|
||||
if ( !x16r_4way_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
memcpy( output+32, hash+64, 32 );
|
||||
memcpy( output+64, hash+128, 32 );
|
||||
memcpy( output+96, hash+192, 32 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
|
||||
@@ -849,7 +864,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_4way_hash( hash, vdata );
|
||||
if ( x16r_4way_hash( hash, vdata, thr_id ) );
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -131,8 +131,8 @@ typedef union _x16r_8way_context_overlay x16r_8way_context_overlay;
|
||||
extern __thread x16r_8way_context_overlay x16r_ctx;
|
||||
|
||||
void x16r_8way_prehash( void *, void * );
|
||||
void x16r_8way_hash_generic( void *, const void * );
|
||||
void x16r_8way_hash( void *, const void * );
|
||||
int x16r_8way_hash_generic( void *, const void *, int );
|
||||
int x16r_8way_hash( void *, const void *, int );
|
||||
int scanhash_x16r_8way( struct work *, uint32_t ,
|
||||
uint64_t *, struct thr_info * );
|
||||
extern __thread x16r_8way_context_overlay x16r_ctx;
|
||||
@@ -166,8 +166,8 @@ typedef union _x16r_4way_context_overlay x16r_4way_context_overlay;
|
||||
extern __thread x16r_4way_context_overlay x16r_ctx;
|
||||
|
||||
void x16r_4way_prehash( void *, void * );
|
||||
void x16r_4way_hash_generic( void *, const void * );
|
||||
void x16r_4way_hash( void *, const void * );
|
||||
int x16r_4way_hash_generic( void *, const void *, int );
|
||||
int x16r_4way_hash( void *, const void *, int );
|
||||
int scanhash_x16r_4way( struct work *, uint32_t,
|
||||
uint64_t *, struct thr_info * );
|
||||
extern __thread x16r_4way_context_overlay x16r_ctx;
|
||||
@@ -205,26 +205,26 @@ typedef union _x16r_context_overlay x16r_context_overlay;
|
||||
extern __thread x16r_context_overlay x16_ctx;
|
||||
|
||||
void x16r_prehash( void *, void * );
|
||||
void x16r_hash_generic( void *, const void * );
|
||||
void x16r_hash( void *, const void * );
|
||||
int x16r_hash_generic( void *, const void *, int );
|
||||
int x16r_hash( void *, const void *, int );
|
||||
int scanhash_x16r( struct work *, uint32_t, uint64_t *, struct thr_info * );
|
||||
|
||||
// x16Rv2
|
||||
#if defined(X16RV2_8WAY)
|
||||
|
||||
void x16rv2_8way_hash( void *state, const void *input );
|
||||
int x16rv2_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#elif defined(X16RV2_4WAY)
|
||||
|
||||
void x16rv2_4way_hash( void *state, const void *input );
|
||||
int x16rv2_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void x16rv2_hash( void *state, const void *input );
|
||||
int x16rv2_hash( void *state, const void *input, int thr_id );
|
||||
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
@@ -254,21 +254,21 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
|
||||
// x21s
|
||||
#if defined(X16R_8WAY)
|
||||
|
||||
void x21s_8way_hash( void *state, const void *input );
|
||||
int x21s_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
bool x21s_8way_thread_init();
|
||||
|
||||
#elif defined(X16R_4WAY)
|
||||
|
||||
void x21s_4way_hash( void *state, const void *input );
|
||||
int x21s_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
bool x21s_4way_thread_init();
|
||||
|
||||
#else
|
||||
|
||||
void x21s_hash( void *state, const void *input );
|
||||
int x21s_hash( void *state, const void *input, int thr_id );
|
||||
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
bool x21s_thread_init();
|
||||
|
@@ -48,7 +48,7 @@ void x16r_prehash( void *edata, void *pdata )
|
||||
}
|
||||
}
|
||||
|
||||
void x16r_hash_generic( void* output, const void* input )
|
||||
int x16r_hash_generic( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x16r_context_overlay ctx;
|
||||
@@ -178,18 +178,24 @@ void x16r_hash_generic( void* output, const void* input )
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
in = (void*) hash;
|
||||
size = 64;
|
||||
}
|
||||
memcpy( output, hash, 64 );
|
||||
return true;
|
||||
}
|
||||
|
||||
void x16r_hash( void* output, const void* input )
|
||||
int x16r_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint8_t hash[64] __attribute__ ((aligned (64)));
|
||||
x16r_hash_generic( hash, input );
|
||||
if ( !x16r_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
memcpy( output, hash, 32 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16r( struct work *work, uint32_t max_nonce,
|
||||
@@ -223,8 +229,7 @@ int scanhash_x16r( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x16r_hash( hash32, edata );
|
||||
|
||||
if ( x16r_hash( hash32, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -41,8 +41,7 @@ int scanhash_x16rt_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_8way_hash( hash, vdata );
|
||||
|
||||
if ( x16r_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
@@ -95,7 +94,7 @@ int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_4way_hash( hash, vdata );
|
||||
if ( x16r_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -36,8 +36,7 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x16r_hash( hash32, edata );
|
||||
|
||||
if ( x16r_hash( hash32, edata, thr_id ) )
|
||||
if ( valid_hash( hash32, ptarget ) && !bench )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -65,7 +65,7 @@ union _x16rv2_8way_context_overlay
|
||||
typedef union _x16rv2_8way_context_overlay x16rv2_8way_context_overlay;
|
||||
static __thread x16rv2_8way_context_overlay x16rv2_ctx;
|
||||
|
||||
void x16rv2_8way_hash( void* output, const void* input )
|
||||
int x16rv2_8way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[24*8] __attribute__ ((aligned (128)));
|
||||
uint32_t hash0[24] __attribute__ ((aligned (64)));
|
||||
@@ -563,6 +563,9 @@ void x16rv2_8way_hash( void* output, const void* input )
|
||||
hash7, vhash );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
|
||||
@@ -574,6 +577,7 @@ void x16rv2_8way_hash( void* output, const void* input )
|
||||
memcpy( output+160, hash5, 32 );
|
||||
memcpy( output+192, hash6, 32 );
|
||||
memcpy( output+224, hash7, 32 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -669,8 +673,7 @@ int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16rv2_8way_hash( hash, vdata );
|
||||
|
||||
if ( x16rv2_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
@@ -718,7 +721,7 @@ inline void padtiger512( uint32_t* hash )
|
||||
for ( int i = 6; i < 16; i++ ) hash[i] = 0;
|
||||
}
|
||||
|
||||
void x16rv2_4way_hash( void* output, const void* input )
|
||||
int x16rv2_4way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||
uint32_t hash1[20] __attribute__ ((aligned (64)));
|
||||
@@ -1023,12 +1026,16 @@ void x16rv2_4way_hash( void* output, const void* input )
|
||||
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
memcpy( output, hash0, 32 );
|
||||
memcpy( output+32, hash1, 32 );
|
||||
memcpy( output+64, hash2, 32 );
|
||||
memcpy( output+96, hash3, 32 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||
@@ -1119,7 +1126,7 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||
|
||||
do
|
||||
{
|
||||
x16rv2_4way_hash( hash, vdata );
|
||||
if ( x16rv2_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -67,7 +67,7 @@ inline void padtiger512(uint32_t* hash) {
|
||||
for (int i = (24/4); i < (64/4); i++) hash[i] = 0;
|
||||
}
|
||||
|
||||
void x16rv2_hash( void* output, const void* input )
|
||||
int x16rv2_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x16rv2_context_overlay ctx;
|
||||
@@ -180,10 +180,14 @@ void x16rv2_hash( void* output, const void* input )
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
in = (void*) hash;
|
||||
size = 64;
|
||||
}
|
||||
memcpy(output, hash, 32);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||
@@ -221,8 +225,7 @@ int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x16rv2_hash( hash32, edata );
|
||||
|
||||
if ( x16rv2_hash( hash32, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -30,7 +30,7 @@ union _x21s_8way_context_overlay
|
||||
|
||||
typedef union _x21s_8way_context_overlay x21s_8way_context_overlay;
|
||||
|
||||
void x21s_8way_hash( void* output, const void* input )
|
||||
int x21s_8way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[16*8] __attribute__ ((aligned (128)));
|
||||
uint8_t shash[64*8] __attribute__ ((aligned (64)));
|
||||
@@ -44,7 +44,8 @@ void x21s_8way_hash( void* output, const void* input )
|
||||
uint32_t *hash7 = (uint32_t*)( shash+448 );
|
||||
x21s_8way_context_overlay ctx;
|
||||
|
||||
x16r_8way_hash_generic( shash, input );
|
||||
if ( !x16r_8way_hash_generic( shash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
|
||||
hash7 );
|
||||
@@ -124,6 +125,8 @@ void x21s_8way_hash( void* output, const void* input )
|
||||
sha256_8way_init( &ctx.sha256 );
|
||||
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
||||
sha256_8way_close( &ctx.sha256, output );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -166,8 +169,7 @@ int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x21s_8way_hash( hash, vdata );
|
||||
|
||||
if ( x21s_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( hash7[lane] <= Htarg ) )
|
||||
{
|
||||
@@ -215,7 +217,7 @@ union _x21s_4way_context_overlay
|
||||
|
||||
typedef union _x21s_4way_context_overlay x21s_4way_context_overlay;
|
||||
|
||||
void x21s_4way_hash( void* output, const void* input )
|
||||
int x21s_4way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[16*4] __attribute__ ((aligned (64)));
|
||||
uint8_t shash[64*4] __attribute__ ((aligned (64)));
|
||||
@@ -225,8 +227,9 @@ void x21s_4way_hash( void* output, const void* input )
|
||||
uint32_t *hash2 = (uint32_t*)( shash+128 );
|
||||
uint32_t *hash3 = (uint32_t*)( shash+192 );
|
||||
|
||||
x16r_4way_hash_generic( shash, input );
|
||||
|
||||
if ( !x16r_4way_hash_generic( shash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
haval256_5_4way_init( &ctx.haval );
|
||||
@@ -299,6 +302,8 @@ void x21s_4way_hash( void* output, const void* input )
|
||||
dintrlv_4x32( output, output+32, output+64,output+96, vhash, 256 );
|
||||
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||
@@ -337,7 +342,7 @@ int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x21s_4way_hash( hash, vdata );
|
||||
if ( x21s_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -27,12 +27,13 @@ union _x21s_context_overlay
|
||||
};
|
||||
typedef union _x21s_context_overlay x21s_context_overlay;
|
||||
|
||||
void x21s_hash( void* output, const void* input )
|
||||
int x21s_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x21s_context_overlay ctx;
|
||||
|
||||
x16r_hash_generic( hash, input );
|
||||
if ( !x16r_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
sph_haval256_5_init( &ctx.haval );
|
||||
sph_haval256_5( &ctx.haval, (const void*) hash, 64) ;
|
||||
@@ -54,6 +55,8 @@ void x21s_hash( void* output, const void* input )
|
||||
SHA256_Final( (unsigned char*)hash, &ctx.sha256 );
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||
@@ -87,8 +90,7 @@ int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x21s_hash( hash32, edata );
|
||||
|
||||
if ( x21s_hash( hash32, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -62,7 +62,7 @@ union _x22i_8way_ctx_overlay
|
||||
};
|
||||
typedef union _x22i_8way_ctx_overlay x22i_8way_ctx_overlay;
|
||||
|
||||
void x22i_8way_hash( void *output, const void *input )
|
||||
int x22i_8way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
||||
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
|
||||
@@ -129,6 +129,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_8way_close( &ctx.keccak, vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
luffa512_4way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
||||
@@ -214,6 +216,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
hamsi512_8way_init( &ctx.hamsi );
|
||||
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_8way_close( &ctx.hamsi, vhash );
|
||||
@@ -346,6 +350,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
sph_tiger (&ctx.tiger, (const void*) hash7, 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) hashA7);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
memset( hash0, 0, 64 );
|
||||
memset( hash1, 0, 64 );
|
||||
memset( hash2, 0, 64 );
|
||||
@@ -399,6 +405,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
sha256_8way_init( &ctx.sha256 );
|
||||
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
||||
sha256_8way_close( &ctx.sha256, output );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -428,8 +436,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x22i_8way_hash( hash, vdata );
|
||||
|
||||
if ( x22i_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
||||
{
|
||||
@@ -437,7 +444,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -524,7 +531,7 @@ union _x22i_4way_ctx_overlay
|
||||
};
|
||||
typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay;
|
||||
|
||||
void x22i_4way_hash( void *output, const void *input )
|
||||
int x22i_4way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t hash0[8*4] __attribute__ ((aligned (64)));
|
||||
uint64_t hash1[8*4] __attribute__ ((aligned (64)));
|
||||
@@ -563,6 +570,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
luffa512_2way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
||||
@@ -591,6 +600,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
|
||||
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
hamsi512_4way_init( &ctx.hamsi );
|
||||
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||
@@ -636,6 +647,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
sha512_4way_close( &ctx.sha512, vhash );
|
||||
dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0);
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1);
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2);
|
||||
@@ -668,6 +681,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
sph_tiger (&ctx.tiger, (const void*) hash3, 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) hashA3);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
memset( hash0, 0, 64 );
|
||||
memset( hash1, 0, 64 );
|
||||
memset( hash2, 0, 64 );
|
||||
@@ -700,8 +715,9 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
sha256_4way_init( &ctx.sha256 );
|
||||
sha256_4way_update( &ctx.sha256, vhash, 64 );
|
||||
sha256_4way_close( &ctx.sha256, output );
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
@@ -729,8 +745,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x22i_4way_hash( hash, vdata );
|
||||
|
||||
if ( x22i_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
||||
{
|
||||
@@ -738,7 +753,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -16,19 +16,19 @@ bool register_x22i_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(X22I_8WAY)
|
||||
|
||||
void x22i_8way_hash( void *state, const void *input );
|
||||
int x22i_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#elif defined(X22I_4WAY)
|
||||
|
||||
void x22i_4way_hash( void *state, const void *input );
|
||||
int x22i_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x22i_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void x22i_hash( void *state, const void *input );
|
||||
int x22i_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
@@ -44,19 +44,19 @@ bool register_x25i_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(X25X_8WAY)
|
||||
|
||||
void x25x_8way_hash( void *state, const void *input );
|
||||
int x25x_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#elif defined(X25X_4WAY)
|
||||
|
||||
void x25x_4way_hash( void *state, const void *input );
|
||||
int x25x_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x25x_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void x25x_hash( void *state, const void *input );
|
||||
int x25x_hash( void *state, const void *input, int thrif );
|
||||
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
|
@@ -59,7 +59,7 @@ union _x22i_context_overlay
|
||||
};
|
||||
typedef union _x22i_context_overlay x22i_context_overlay;
|
||||
|
||||
void x22i_hash( void *output, const void *input )
|
||||
int x22i_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
|
||||
unsigned char hash2[65] __attribute__((aligned(64))) = {0};
|
||||
@@ -95,6 +95,8 @@ void x22i_hash( void *output, const void *input )
|
||||
sph_keccak512(&ctx.keccak, (const void*) hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
@@ -121,6 +123,8 @@ void x22i_hash( void *output, const void *input )
|
||||
sph_echo512_close( &ctx.echo, hash );
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_hamsi512_init(&ctx.hamsi);
|
||||
sph_hamsi512(&ctx.hamsi, (const void*) hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
@@ -143,6 +147,8 @@ void x22i_hash( void *output, const void *input )
|
||||
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
memset(hash, 0, 64);
|
||||
sph_haval256_5_init(&ctx.haval);
|
||||
sph_haval256_5(&ctx.haval,(const void*) hash2, 64);
|
||||
@@ -165,6 +171,8 @@ void x22i_hash( void *output, const void *input )
|
||||
SHA256_Final( (unsigned char*) hash, &ctx.sha256 );
|
||||
|
||||
memcpy(output, hash, 32);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||
@@ -188,7 +196,7 @@ int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
x22i_hash( hash64, edata );
|
||||
if ( x22i_hash( hash64, edata, thr_id ) );
|
||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
|
@@ -94,7 +94,7 @@ union _x25x_8way_ctx_overlay
|
||||
};
|
||||
typedef union _x25x_8way_ctx_overlay x25x_8way_ctx_overlay;
|
||||
|
||||
void x25x_8way_hash( void *output, const void *input )
|
||||
int x25x_8way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
||||
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
||||
@@ -179,13 +179,15 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
jh512_8way_close( &ctx.jh, vhash );
|
||||
dintrlv_8x64_512( hash0[4], hash1[4], hash2[4], hash3[4],
|
||||
hash4[4], hash5[4], hash6[4], hash7[4], vhash );
|
||||
|
||||
|
||||
keccak512_8way_init( &ctx.keccak );
|
||||
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_8way_close( &ctx.keccak, vhash );
|
||||
dintrlv_8x64_512( hash0[5], hash1[5], hash2[5], hash3[5],
|
||||
hash4[5], hash5[5], hash6[5], hash7[5], vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
luffa_4way_init( &ctx.luffa, 512 );
|
||||
@@ -261,6 +263,7 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
intrlv_8x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10],
|
||||
hash4[10], hash5[10], hash6[10], hash7[10] );
|
||||
|
||||
|
||||
#else
|
||||
|
||||
init_echo( &ctx.echo, 512 );
|
||||
@@ -292,6 +295,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
hamsi512_8way_init( &ctx.hamsi );
|
||||
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_8way_close( &ctx.hamsi, vhash );
|
||||
@@ -407,6 +412,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
sph_tiger (&ctx.tiger, (const void*) hash7[17], 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) hash7[18]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
intrlv_2x256( vhash, hash0[18], hash1[18], 256 );
|
||||
LYRA2X_2WAY( vhash, 32, vhash, 32, 1, 4, 4 );
|
||||
dintrlv_2x256( hash0[19], hash1[19], vhash, 256 );
|
||||
@@ -468,6 +475,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
laneHash(512, (const BitSequence*)hash6[22], 512, (BitSequence*)hash6[23]);
|
||||
laneHash(512, (const BitSequence*)hash7[22], 512, (BitSequence*)hash7[23]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
x25x_shuffle( hash0 );
|
||||
x25x_shuffle( hash1 );
|
||||
x25x_shuffle( hash2 );
|
||||
@@ -528,6 +537,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
|
||||
blake2s_8way_init( &ctx.blake2s, 32 );
|
||||
blake2s_8way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -557,7 +568,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x25x_8way_hash( hash, vdata );
|
||||
if ( x25x_8way_hash( hash, vdata, thr_id ) );
|
||||
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
||||
@@ -566,7 +577,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -654,7 +665,7 @@ union _x25x_4way_ctx_overlay
|
||||
};
|
||||
typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay;
|
||||
|
||||
void x25x_4way_hash( void *output, const void *input )
|
||||
int x25x_4way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
|
||||
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
||||
@@ -686,6 +697,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
jh512_4way_close( &ctx.jh, vhash );
|
||||
dintrlv_4x64_512( hash0[4], hash1[4], hash2[4], hash3[4], vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
keccak512_4way_init( &ctx.keccak );
|
||||
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhash );
|
||||
@@ -738,6 +751,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
|
||||
intrlv_4x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10] );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
hamsi512_4way_init( &ctx.hamsi );
|
||||
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||
@@ -819,6 +834,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
LYRA2RE( (void*)hash3[19], 32, (const void*)hash3[18], 32,
|
||||
(const void*)hash3[18], 32, 1, 4, 4 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_gost512_init(&ctx.gost);
|
||||
sph_gost512 (&ctx.gost, (const void*) hash0[19], 64);
|
||||
sph_gost512_close(&ctx.gost, (void*) hash0[20]);
|
||||
@@ -850,6 +867,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
laneHash(512, (const BitSequence*)hash2[22], 512, (BitSequence*)hash2[23]);
|
||||
laneHash(512, (const BitSequence*)hash3[22], 512, (BitSequence*)hash3[23]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
x25x_shuffle( hash0 );
|
||||
x25x_shuffle( hash1 );
|
||||
x25x_shuffle( hash2 );
|
||||
@@ -882,6 +901,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
|
||||
blake2s_4way_init( &ctx.blake2s, 32 );
|
||||
blake2s_4way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
||||
@@ -910,8 +931,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x25x_4way_hash( hash, vdata );
|
||||
|
||||
if ( x25x_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
||||
{
|
||||
@@ -919,7 +939,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -64,7 +64,7 @@ union _x25x_context_overlay
|
||||
};
|
||||
typedef union _x25x_context_overlay x25x_context_overlay;
|
||||
|
||||
void x25x_hash( void *output, const void *input )
|
||||
int x25x_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
unsigned char hash[25][64] __attribute__((aligned(64))) = {0};
|
||||
x25x_context_overlay ctx;
|
||||
@@ -99,6 +99,8 @@ void x25x_hash( void *output, const void *input )
|
||||
sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64);
|
||||
sph_keccak512_close(&ctx.keccak, &hash[5]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6],
|
||||
(const BitSequence*)&hash[5], 64 );
|
||||
@@ -125,7 +127,9 @@ void x25x_hash( void *output, const void *input )
|
||||
sph_echo512_close( &ctx.echo, &hash[10] );
|
||||
#endif
|
||||
|
||||
sph_hamsi512_init(&ctx.hamsi);
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_hamsi512_init(&ctx.hamsi);
|
||||
sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, &hash[11]);
|
||||
|
||||
@@ -151,6 +155,8 @@ void x25x_hash( void *output, const void *input )
|
||||
sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64);
|
||||
sph_haval256_5_close(&ctx.haval,&hash[17]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_tiger_init(&ctx.tiger);
|
||||
sph_tiger (&ctx.tiger, (const void*) &hash[17], 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) &hash[18]);
|
||||
@@ -199,6 +205,8 @@ void x25x_hash( void *output, const void *input )
|
||||
blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 );
|
||||
|
||||
memcpy(output, &hash[24], 32);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||
@@ -222,7 +230,7 @@ int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
x25x_hash( hash64, edata );
|
||||
if ( x25x_hash( hash64, edata, thr_id ) );
|
||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
|
@@ -79,7 +79,7 @@ int main(int argc, const char * const *argv)
|
||||
for (i = 0; i < sizeof(src); i++)
|
||||
src.u8[i] = i * 3;
|
||||
|
||||
if (yespower_tls(src.u8, sizeof(src), ¶ms, &dst)) {
|
||||
if (!yespower_tls(src.u8, sizeof(src), ¶ms, &dst)) {
|
||||
puts("FAILED");
|
||||
return 1;
|
||||
}
|
||||
|
@@ -53,7 +53,7 @@ int scanhash_yespower_r8g( struct work *work, uint32_t max_nonce,
|
||||
|
||||
do {
|
||||
yespower_tls( (unsigned char *)endiandata, params.perslen,
|
||||
¶ms, (yespower_binary_t*)hash );
|
||||
¶ms, (yespower_binary_t*)hash, thr_id );
|
||||
|
||||
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
|
@@ -194,11 +194,13 @@ static int free_region(yespower_region_t *region)
|
||||
#define restrict
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifdef __GNUC__
|
||||
#define unlikely(exp) __builtin_expect(exp, 0)
|
||||
#else
|
||||
#define unlikely(exp) (exp)
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef __SSE__
|
||||
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
||||
@@ -1113,7 +1115,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
|
||||
int yespower_b2b(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params,
|
||||
yespower_binary_t *dst)
|
||||
yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
uint32_t N = params->N;
|
||||
uint32_t r = params->r;
|
||||
@@ -1168,17 +1170,25 @@ int yespower_b2b(yespower_local_t *local,
|
||||
srclen = 0;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
pbkdf2_blake2b_yp(init_hash, sizeof(init_hash), src, srclen, 1, B, 128);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
memcpy(init_hash, B, sizeof(init_hash));
|
||||
smix_1_0(B, r, N, V, XY, &ctx);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
hmac_blake2b_yp_hash((uint8_t *)dst, B + B_size - 64, 64, init_hash, sizeof(init_hash));
|
||||
|
||||
/* Success! */
|
||||
return 0;
|
||||
return 1;
|
||||
|
||||
fail:
|
||||
memset(dst, 0xff, sizeof(*dst));
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1189,7 +1199,7 @@ fail:
|
||||
* Return 0 on success; or -1 on error.
|
||||
*/
|
||||
int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst)
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
static __thread int initialized = 0;
|
||||
static __thread yespower_local_t local;
|
||||
@@ -1199,7 +1209,7 @@ int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
return yespower_b2b(&local, src, srclen, params, dst);
|
||||
return yespower_b2b(&local, src, srclen, params, dst, thrid);
|
||||
}
|
||||
/*
|
||||
int yespower_init_local(yespower_local_t *local)
|
||||
|
@@ -34,9 +34,10 @@ static yespower_params_t yespower_params;
|
||||
|
||||
// YESPOWER
|
||||
|
||||
void yespower_hash( const char *input, char *output, uint32_t len )
|
||||
int yespower_hash( const char *input, char *output, uint32_t len, int thrid )
|
||||
{
|
||||
yespower_tls( input, len, &yespower_params, (yespower_binary_t*)output );
|
||||
return yespower_tls( input, len, &yespower_params,
|
||||
(yespower_binary_t*)output, thrid );
|
||||
}
|
||||
|
||||
int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
||||
@@ -55,7 +56,7 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
||||
be32enc( &endiandata[k], pdata[k] );
|
||||
endiandata[19] = n;
|
||||
do {
|
||||
yespower_hash( (char*)endiandata, (char*)vhash, 80 );
|
||||
if ( yespower_hash( (char*)endiandata, (char*)vhash, 80, thr_id ) )
|
||||
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
be32enc( pdata+19, n );
|
||||
@@ -70,9 +71,9 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
||||
|
||||
// YESPOWER-B2B
|
||||
|
||||
void yespower_b2b_hash( const char *input, char *output, uint32_t len )
|
||||
int yespower_b2b_hash( const char *input, char *output, uint32_t len, int thrid )
|
||||
{
|
||||
yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output );
|
||||
return yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output, thrid );
|
||||
}
|
||||
|
||||
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
|
||||
@@ -91,7 +92,7 @@ int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
|
||||
be32enc( &endiandata[k], pdata[k] );
|
||||
endiandata[19] = n;
|
||||
do {
|
||||
yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80 );
|
||||
if (yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80, thr_id ) )
|
||||
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
be32enc( pdata+19, n );
|
||||
|
@@ -107,11 +107,13 @@
|
||||
#define restrict
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifdef __GNUC__
|
||||
#define unlikely(exp) __builtin_expect(exp, 0)
|
||||
#else
|
||||
#define unlikely(exp) (exp)
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef __SSE__
|
||||
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
||||
@@ -1023,7 +1025,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
|
||||
int yespower(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params,
|
||||
yespower_binary_t *dst)
|
||||
yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
yespower_version_t version = params->version;
|
||||
uint32_t N = params->N;
|
||||
@@ -1077,15 +1079,24 @@ int yespower(yespower_local_t *local,
|
||||
if (version == YESPOWER_0_5) {
|
||||
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1,
|
||||
B, B_size);
|
||||
memcpy(sha256, B, sizeof(sha256));
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
memcpy(sha256, B, sizeof(sha256));
|
||||
smix(B, r, N, V, XY, &ctx);
|
||||
PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1,
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1,
|
||||
(uint8_t *)dst, sizeof(*dst));
|
||||
|
||||
if (pers) {
|
||||
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
|
||||
sha256);
|
||||
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
|
||||
}
|
||||
} else {
|
||||
ctx.S2 = S + 2 * Swidth_to_Sbytes1(Swidth);
|
||||
@@ -1106,7 +1117,7 @@ int yespower(yespower_local_t *local,
|
||||
}
|
||||
|
||||
/* Success! */
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1117,7 +1128,7 @@ int yespower(yespower_local_t *local,
|
||||
* Return 0 on success; or -1 on error.
|
||||
*/
|
||||
int yespower_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst)
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
static __thread int initialized = 0;
|
||||
static __thread yespower_local_t local;
|
||||
@@ -1128,7 +1139,7 @@ int yespower_tls(const uint8_t *src, size_t srclen,
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
return yespower(&local, src, srclen, params, dst);
|
||||
return yespower( &local, src, srclen, params, dst, thrid );
|
||||
}
|
||||
|
||||
int yespower_init_local(yespower_local_t *local)
|
||||
|
@@ -32,6 +32,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h> /* for size_t */
|
||||
#include "miner.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -109,11 +110,11 @@ extern int yespower_free_local(yespower_local_t *local);
|
||||
*/
|
||||
extern int yespower(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid);
|
||||
|
||||
extern int yespower_b2b(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid );
|
||||
|
||||
/**
|
||||
* yespower_tls(src, srclen, params, dst):
|
||||
@@ -125,10 +126,10 @@ extern int yespower_b2b(yespower_local_t *local,
|
||||
* MT-safe as long as dst is local to the thread.
|
||||
*/
|
||||
extern int yespower_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
|
||||
|
||||
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
20
configure
vendored
20
configure
vendored
@@ -1,6 +1,6 @@
|
||||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.4.6.
|
||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.6.1.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
@@ -577,8 +577,8 @@ MAKEFLAGS=
|
||||
# Identity of this package.
|
||||
PACKAGE_NAME='cpuminer-opt'
|
||||
PACKAGE_TARNAME='cpuminer-opt'
|
||||
PACKAGE_VERSION='3.12.4.6'
|
||||
PACKAGE_STRING='cpuminer-opt 3.12.4.6'
|
||||
PACKAGE_VERSION='3.12.6.1'
|
||||
PACKAGE_STRING='cpuminer-opt 3.12.6.1'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
|
||||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures cpuminer-opt 3.12.4.6 to adapt to many kinds of systems.
|
||||
\`configure' configures cpuminer-opt 3.12.6.1 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
@@ -1404,7 +1404,7 @@ fi
|
||||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 3.12.4.6:";;
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 3.12.6.1:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
@@ -1509,7 +1509,7 @@ fi
|
||||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
cpuminer-opt configure 3.12.4.6
|
||||
cpuminer-opt configure 3.12.6.1
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
|
||||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by cpuminer-opt $as_me 3.12.4.6, which was
|
||||
It was created by cpuminer-opt $as_me 3.12.6.1, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
@@ -2993,7 +2993,7 @@ fi
|
||||
|
||||
# Define the identity of the package.
|
||||
PACKAGE='cpuminer-opt'
|
||||
VERSION='3.12.4.6'
|
||||
VERSION='3.12.6.1'
|
||||
|
||||
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by cpuminer-opt $as_me 3.12.4.6, which was
|
||||
This file was extended by cpuminer-opt $as_me 3.12.6.1, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
@@ -6756,7 +6756,7 @@ _ACEOF
|
||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
cpuminer-opt config.status 3.12.4.6
|
||||
cpuminer-opt config.status 3.12.6.1
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
AC_INIT([cpuminer-opt], [3.12.4.6])
|
||||
AC_INIT([cpuminer-opt], [3.12.6.1])
|
||||
|
||||
AC_PREREQ([2.59c])
|
||||
AC_CANONICAL_SYSTEM
|
||||
|
413
cpu-miner.c
413
cpu-miner.c
@@ -102,6 +102,7 @@ static int opt_fail_pause = 10;
|
||||
static int opt_time_limit = 0;
|
||||
int opt_timeout = 300;
|
||||
static int opt_scantime = 5;
|
||||
const int min_scantime = 1;
|
||||
//static const bool opt_time = true;
|
||||
enum algos opt_algo = ALGO_NULL;
|
||||
char* opt_param_key = NULL;
|
||||
@@ -160,7 +161,7 @@ uint32_t rejected_share_count = 0;
|
||||
uint32_t stale_share_count = 0;
|
||||
uint32_t solved_block_count = 0;
|
||||
double *thr_hashrates;
|
||||
double global_hashrate = 0;
|
||||
double global_hashrate = 0.;
|
||||
double stratum_diff = 0.;
|
||||
double net_diff = 0.;
|
||||
double net_hashrate = 0.;
|
||||
@@ -194,6 +195,8 @@ static uint64_t stale_sum = 0;
|
||||
static uint64_t reject_sum = 0;
|
||||
static double norm_diff_sum = 0.;
|
||||
static uint32_t last_block_height = 0;
|
||||
static double highest_share = 0; // all shares include discard and reject
|
||||
static double lowest_share = 9e99; // lowest accepted
|
||||
//static bool new_job = false;
|
||||
static double last_targetdiff = 0.;
|
||||
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
|
||||
@@ -216,6 +219,20 @@ char* lp_id;
|
||||
|
||||
static void workio_cmd_free(struct workio_cmd *wc);
|
||||
|
||||
static void format_affinity_map( char *map_str, uint64_t map )
|
||||
{
|
||||
int n = num_cpus < 64 ? num_cpus : 64;
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < n; i++ )
|
||||
{
|
||||
if ( map & 1 ) map_str[i] = '!';
|
||||
else map_str[i] = '.';
|
||||
map >>= 1;
|
||||
}
|
||||
memset( &map_str[i], 0, 64 - i );
|
||||
}
|
||||
|
||||
#ifdef __linux /* Linux specific policy and affinity management */
|
||||
#include <sched.h>
|
||||
|
||||
@@ -436,9 +453,6 @@ static bool work_decode( const json_t *val, struct work *work )
|
||||
if ( !allow_mininginfo )
|
||||
net_diff = algo_gate.calc_network_diff( work );
|
||||
work->targetdiff = target_to_diff( work->target );
|
||||
// for api stats, on longpoll pools
|
||||
// This needs cleanup, stratum_diff doean't apply solo mining
|
||||
// and targetdiff is redundant, same as net_diff.
|
||||
stratum_diff = last_targetdiff = work->targetdiff;
|
||||
work->sharediff = 0;
|
||||
algo_gate.decode_extra_data( work, &net_blocks );
|
||||
@@ -490,7 +504,7 @@ static bool get_mininginfo( CURL *curl, struct work *work )
|
||||
net_blocks = json_integer_value( key );
|
||||
|
||||
if ( opt_debug )
|
||||
applog(LOG_INFO," Mining info: diff %.5g, net_hashrate %f, height %d",
|
||||
applog(LOG_INFO,"Mining info: diff %.5g, net_hashrate %f, height %d",
|
||||
net_diff, net_hashrate, net_blocks );
|
||||
|
||||
if ( !work->height )
|
||||
@@ -895,6 +909,7 @@ static inline void sprintf_et( char *str, int seconds )
|
||||
}
|
||||
|
||||
const double exp32 = 4294967296.; // 2**32
|
||||
const double exp48 = 4294967296. * 65536.; // 2**48
|
||||
const double exp64 = 4294967296. * 4294967296.; // 2**64
|
||||
|
||||
struct share_stats_t
|
||||
@@ -939,6 +954,7 @@ void report_summary_log( bool force )
|
||||
uint64_t accepts = accept_sum; accept_sum = 0;
|
||||
uint64_t rejects = reject_sum; reject_sum = 0;
|
||||
uint64_t stales = stale_sum; stale_sum = 0;
|
||||
|
||||
memcpy( &start_time, &five_min_start, sizeof start_time );
|
||||
memcpy( &five_min_start, &now, sizeof now );
|
||||
|
||||
@@ -949,12 +965,10 @@ void report_summary_log( bool force )
|
||||
|
||||
double share_time = (double)et.tv_sec + (double)et.tv_usec / 1e6;
|
||||
double ghrate = global_hashrate;
|
||||
|
||||
double shrate = share_time == 0. ? 0. : exp32 * last_targetdiff
|
||||
* (double)(accepts) / share_time;
|
||||
double sess_hrate = uptime.tv_sec == 0. ? 0. : exp32 * norm_diff_sum
|
||||
/ (double)uptime.tv_sec;
|
||||
|
||||
double submit_rate = share_time == 0. ? 0. : (double)submits*60. / share_time;
|
||||
char shr_units[4] = {0};
|
||||
char ghr_units[4] = {0};
|
||||
@@ -980,7 +994,7 @@ void report_summary_log( bool force )
|
||||
|
||||
if ( accepted_share_count < submitted_share_count )
|
||||
{
|
||||
double lost_ghrate = uptime.tv_sec == 0. ? 0.
|
||||
double lost_ghrate = uptime.tv_sec == 0 ? 0.
|
||||
: exp32 * last_targetdiff
|
||||
* (double)(submitted_share_count - accepted_share_count )
|
||||
/ (double)uptime.tv_sec;
|
||||
@@ -1006,8 +1020,11 @@ void report_summary_log( bool force )
|
||||
applog2( LOG_INFO,"Rejected %6d %6d",
|
||||
rejects, rejected_share_count );
|
||||
if ( solved_block_count )
|
||||
applog2( LOG_INFO,"Blocks solved %6d",
|
||||
applog2( LOG_INFO,"Blocks Solved %6d",
|
||||
solved_block_count );
|
||||
applog2( LOG_INFO, "Hi/Lo Share Diff %.5g / %.5g",
|
||||
highest_share, lowest_share );
|
||||
|
||||
}
|
||||
|
||||
bool lowdiff_debug = false;
|
||||
@@ -1062,6 +1079,10 @@ static int share_result( int result, struct work *work,
|
||||
if ( likely( result ) )
|
||||
{
|
||||
accepted_share_count++;
|
||||
if ( my_stats.share_diff < lowest_share )
|
||||
lowest_share = my_stats.share_diff;
|
||||
if ( my_stats.share_diff > highest_share )
|
||||
highest_share = my_stats.share_diff;
|
||||
sprintf( sres, "S%d", stale_share_count );
|
||||
sprintf( rres, "R%d", rejected_share_count );
|
||||
if unlikely( ( my_stats.net_diff > 0. )
|
||||
@@ -1082,7 +1103,8 @@ static int share_result( int result, struct work *work,
|
||||
{
|
||||
sprintf( ares, "A%d", accepted_share_count );
|
||||
sprintf( bres, "B%d", solved_block_count );
|
||||
if ( work ) stale = work->stale;
|
||||
stale = work ? work->data[ algo_gate.ntime_index ]
|
||||
!= g_work.data[ algo_gate.ntime_index ] : false;
|
||||
if ( reason ) stale = stale || strstr( reason, "Invalid job id" );
|
||||
if ( stale )
|
||||
{
|
||||
@@ -1095,7 +1117,7 @@ static int share_result( int result, struct work *work,
|
||||
rejected_share_count++;
|
||||
sprintf( sres, "S%d", stale_share_count );
|
||||
sprintf( rres, "Rejected %d" , rejected_share_count );
|
||||
lowdiff_debug = true;
|
||||
// lowdiff_debug = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1146,37 +1168,34 @@ static int share_result( int result, struct work *work,
|
||||
my_stats.share_diff, share_ratio, bcol, stratum.block_height,
|
||||
scol, my_stats.job_id );
|
||||
else
|
||||
{
|
||||
uint64_t height = work ? work->height : last_block_height;
|
||||
applog2( LOG_INFO, "Diff %.5g (%.3g), %sBlock %d",
|
||||
my_stats.share_diff, share_ratio, bcol, stratum.block_height );
|
||||
my_stats.share_diff, share_ratio, bcol, height );
|
||||
}
|
||||
}
|
||||
|
||||
if ( unlikely( reason && !result ) )
|
||||
if ( unlikely( opt_debug || !( opt_quiet || result || stale ) ) )
|
||||
{
|
||||
if ( !( opt_quiet || stale ) )
|
||||
{
|
||||
uint32_t str[8];
|
||||
|
||||
if ( reason )
|
||||
applog( LOG_WARNING, "Reject reason: %s", reason );
|
||||
|
||||
uint32_t str1[8], str2[8];
|
||||
char str3[65];
|
||||
|
||||
// display share hash and target for troubleshooting
|
||||
diff_to_target( str1, my_stats.share_diff );
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
be32enc( str2 + i, str1[7 - i] );
|
||||
bin2hex( str3, (unsigned char*)str2, 12 );
|
||||
applog2( LOG_INFO, "Share diff: %.5g, Hash: %s...",
|
||||
my_stats.share_diff, str3 );
|
||||
|
||||
diff_to_target( str1, my_stats.target_diff );
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
be32enc( str2 + i, str1[7 - i] );
|
||||
bin2hex( str3, (unsigned char*)str2, 12 );
|
||||
applog2( LOG_INFO, "Target diff: %.5g, Targ: %s...",
|
||||
my_stats.target_diff, str3 );
|
||||
|
||||
// display share hash and target for troubleshooting
|
||||
diff_to_target( str, my_stats.share_diff );
|
||||
applog2( LOG_INFO, "Hash: %08x%08x%08x%08x...",
|
||||
str[7], str[6], str[5], str[4] );
|
||||
uint32_t *targ;
|
||||
if ( work )
|
||||
targ = work->target;
|
||||
else
|
||||
{
|
||||
diff_to_target( str, my_stats.target_diff );
|
||||
targ = &str[0];
|
||||
}
|
||||
|
||||
if ( unlikely( opt_reset_on_stale && stale ) )
|
||||
stratum_need_reset = true;
|
||||
applog2( LOG_INFO, "Target: %08x%08x%08x%08x...",
|
||||
targ[7], targ[6], targ[5], targ[4] );
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -1322,30 +1341,6 @@ char* std_malloc_txs_request( struct work *work )
|
||||
|
||||
static bool submit_upstream_work( CURL *curl, struct work *work )
|
||||
{
|
||||
work->stale = false;
|
||||
// Submit anyway, discardring here messes up the stats
|
||||
if ( !have_stratum && allow_mininginfo )
|
||||
{
|
||||
struct work mining_info;
|
||||
get_mininginfo( curl, &mining_info );
|
||||
if ( work->height < mining_info.height )
|
||||
{
|
||||
if ( !opt_quiet )
|
||||
applog( LOG_WARNING, "Block %u already solved, current block %d",
|
||||
work->height, mining_info.height );
|
||||
work->stale = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* pass if the previous hash is not the current previous hash */
|
||||
if ( !( work->stale || submit_old )
|
||||
&& memcmp( &work->data[1], &g_work.data[1], 32 ) )
|
||||
{
|
||||
if ( !opt_quiet )
|
||||
applog( LOG_WARNING, "Stale work detected, submitting anyway" );
|
||||
work->stale = true;
|
||||
}
|
||||
|
||||
if ( have_stratum )
|
||||
{
|
||||
char req[JSON_BUF_LEN];
|
||||
@@ -1482,8 +1477,9 @@ start:
|
||||
if ( work->height > last_block_height )
|
||||
{
|
||||
last_block_height = work->height;
|
||||
applog( LOG_BLUE, "New block %d, net diff %.5g, target diff %.5g",
|
||||
work->height, net_diff, work->targetdiff );
|
||||
applog( LOG_BLUE, "New Block %d, Net Diff %.5g, Target Diff %.5g, Ntime %08x",
|
||||
work->height, net_diff, work->targetdiff,
|
||||
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
||||
|
||||
if ( !opt_quiet && net_diff && net_hashrate )
|
||||
{
|
||||
@@ -1509,33 +1505,15 @@ start:
|
||||
scale_hash_for_display ( &miner_hr, miner_hr_units );
|
||||
scale_hash_for_display ( &net_hr, net_hr_units );
|
||||
applog2( LOG_INFO,
|
||||
"Miner TTF @ %.2f %sh/s %s, net TTF @ %.2f %sh/s %s",
|
||||
miner_hr, miner_hr_units, miner_ttf,
|
||||
net_hr, net_hr_units, net_ttf );
|
||||
"Miner TTF @ %.2f %sh/s %s, Net TTF @ %.2f %sh/s %s",
|
||||
miner_hr, miner_hr_units, miner_ttf,
|
||||
net_hr, net_hr_units, net_ttf );
|
||||
}
|
||||
}
|
||||
} // work->height > last_block_height
|
||||
else if ( memcmp( &work->data[1], &g_work.data[1], 32 ) )
|
||||
{
|
||||
applog( LOG_BLUE, "New work" );
|
||||
if ( opt_debug )
|
||||
{
|
||||
uint32_t *old = g_work.data;
|
||||
uint32_t *new = work->data;
|
||||
printf("old: %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
||||
old[0],old[1],old[2],old[3],old[4],old[5],old[6],old[7]);
|
||||
printf(" %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
||||
old[8],old[9],old[10],old[11],old[12],old[13],old[14],old[15]);
|
||||
printf(" %08x %08x %08x %08x/n",
|
||||
old[16],old[17],old[18],old[19]);
|
||||
printf("new: %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
||||
new[0],new[1],new[2],new[3],new[4],new[5],new[6],new[7]);
|
||||
printf(" %08x %08x %08x %08x %08x %08x %08x %08x/n",
|
||||
new[8],new[9],new[10],new[11],new[12],new[13],new[14],new[15]);
|
||||
printf(" %08x %08x %08x %08x/n",
|
||||
new[16],new[17],new[18],new[19]);
|
||||
}
|
||||
}
|
||||
applog( LOG_BLUE, "New Work, Ntime %08lx",
|
||||
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
||||
} // rc
|
||||
|
||||
return rc;
|
||||
@@ -1588,8 +1566,6 @@ static bool workio_get_work( struct workio_cmd *wc, CURL *curl )
|
||||
if ( !tq_push(wc->thr->q, ret_work ) )
|
||||
free( ret_work );
|
||||
|
||||
report_summary_log( false );
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1732,17 +1708,13 @@ err_out:
|
||||
// double precision floating point with 15 decimal digits precision.
|
||||
static inline double u256_to_double( const uint64_t *u )
|
||||
{
|
||||
const double exp64 = 4294967296.0 * 4294967296.0; // 2**64
|
||||
return ( ( u[3] * exp64 + u[2] ) * exp64 + u[1] ) * exp64 + u[0];
|
||||
}
|
||||
*/
|
||||
|
||||
static void update_submit_stats( struct work *work, const void *hash )
|
||||
{
|
||||
// Workaround until problems with target_to_diff are resolved.
|
||||
work->sharediff = work->targetdiff * (double)( ((uint64_t*)hash)[3] )
|
||||
/ (double)( ((uint64_t*)work->target)[3] );
|
||||
// work->sharediff = likely( hash ) ? target_to_diff( (uint32_t*)hash ) : 0.;
|
||||
// work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||
|
||||
pthread_mutex_lock( &stats_lock );
|
||||
|
||||
@@ -1760,49 +1732,25 @@ static void update_submit_stats( struct work *work, const void *hash )
|
||||
pthread_mutex_unlock( &stats_lock );
|
||||
}
|
||||
|
||||
//deprecated
|
||||
void work_set_target_ratio( struct work* work, const void *hash )
|
||||
{
|
||||
submitted_share_count++;
|
||||
work->sharediff = work->targetdiff * (double)( ((uint64_t*)hash)[3] )
|
||||
/ (double)( ((uint64_t*)work->target)[3] );
|
||||
// work->sharediff = likely( hash ) ? target_to_diff( (uint32_t*)hash ) : 0.;
|
||||
|
||||
// collect some share stats
|
||||
// Frequent share submission combined with high latency can caused
|
||||
// shares to be submitted faster than they are acked. If severe enough
|
||||
// it can overflow the queue and overwrite stats for a share.
|
||||
pthread_mutex_lock( &stats_lock );
|
||||
|
||||
share_stats[ s_put_ptr ].share_count = submitted_share_count;
|
||||
gettimeofday( &share_stats[ s_put_ptr ].submit_time, NULL );
|
||||
share_stats[ s_put_ptr ].share_diff = work->sharediff;
|
||||
share_stats[ s_put_ptr ].net_diff = net_diff;
|
||||
share_stats[ s_put_ptr ].stratum_diff = stratum_diff;
|
||||
share_stats[ s_put_ptr ].target_diff = work->targetdiff;
|
||||
if ( have_stratum )
|
||||
strncpy( share_stats[ s_put_ptr ].job_id, work->job_id, 30 );
|
||||
s_put_ptr = stats_ptr_incr( s_put_ptr );
|
||||
|
||||
pthread_mutex_unlock( &stats_lock );
|
||||
}
|
||||
|
||||
bool submit_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr )
|
||||
{
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
{
|
||||
update_submit_stats( work, hash );
|
||||
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
if ( have_stratum )
|
||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d, job %s",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d",
|
||||
submitted_share_count, work->sharediff, work->height );
|
||||
if ( have_stratum )
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->data[ algo_gate.ntime_index ] );
|
||||
}
|
||||
|
||||
if ( unlikely( lowdiff_debug ) )
|
||||
@@ -1825,19 +1773,22 @@ bool submit_solution( struct work *work, const void *hash,
|
||||
bool submit_lane_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr, const int lane )
|
||||
{
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
{
|
||||
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
{
|
||||
update_submit_stats( work, hash );
|
||||
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
if ( have_stratum )
|
||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d, job %s",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d",
|
||||
submitted_share_count, work->sharediff, work->height );
|
||||
if ( have_stratum )
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->data[ algo_gate.ntime_index ] );
|
||||
}
|
||||
|
||||
if ( lowdiff_debug )
|
||||
@@ -1857,49 +1808,6 @@ bool submit_lane_solution( struct work *work, const void *hash,
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// The new way, replaces fulltest and submit_solution
|
||||
bool test_hash_and_submit( struct work *work, const void *hash,
|
||||
struct thr_info *thr )
|
||||
{
|
||||
work->sharediff = work->targetdiff * (double)( ((uint64_t*)hash)[3] )
|
||||
/ (double)( ((uint64_t*)work->target)[3] );
|
||||
|
||||
if ( work->sharediff >= work->targetdiff )
|
||||
{
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
{
|
||||
update_submit_stats( work, hash );
|
||||
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
if ( have_stratum )
|
||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d, job %s",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d Submit diff %.5g, block %d",
|
||||
submitted_share_count, work->sharediff, work->height );
|
||||
}
|
||||
|
||||
if ( lowdiff_debug )
|
||||
{
|
||||
uint32_t* h = (uint32_t*)hash;
|
||||
uint32_t* t = (uint32_t*)work->target;
|
||||
applog(LOG_INFO,"Hash[7:0]: %08x %08x %08x %08x %08x %08x %08x %08x",
|
||||
h[7],h[6],h[5],h[4],h[3],h[2],h[1],h[0]);
|
||||
applog(LOG_INFO,"Targ[7:0]: %08x %08x %08x %08x %08x %08x %08x %08x",
|
||||
t[7],t[6],t[5],t[4],t[3],t[2],t[1],t[0]);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
else
|
||||
applog( LOG_WARNING, "%d failed to submit share, thread %d",
|
||||
submitted_share_count, thr->id );
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool wanna_mine(int thr_id)
|
||||
{
|
||||
bool state = true;
|
||||
@@ -2049,6 +1957,8 @@ static void *miner_thread( void *userdata )
|
||||
if (!opt_benchmark && opt_priority == 0)
|
||||
{
|
||||
setpriority(PRIO_PROCESS, 0, 19);
|
||||
if ( !thr_id && !opt_quiet )
|
||||
applog(LOG_INFO, "Miner thread priority %d (nice 19)", opt_priority );
|
||||
drop_policy();
|
||||
}
|
||||
else
|
||||
@@ -2065,9 +1975,9 @@ static void *miner_thread( void *userdata )
|
||||
case 4: prio = -10; break;
|
||||
case 5: prio = -15;
|
||||
}
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Thread %d priority %d (nice %d)", thr_id,
|
||||
opt_priority, prio );
|
||||
if ( !( thr_id || opt_quiet ) )
|
||||
applog( LOG_INFO, "Miner thread priority %d (nice %d)",
|
||||
opt_priority, prio );
|
||||
#endif
|
||||
setpriority(PRIO_PROCESS, 0, prio);
|
||||
if ( opt_priority == 0 )
|
||||
@@ -2082,7 +1992,7 @@ static void *miner_thread( void *userdata )
|
||||
{
|
||||
affine_to_cpu_mask( thr_id, (uint128_t)1 << (thr_id % num_cpus) );
|
||||
if ( opt_debug )
|
||||
applog( LOG_DEBUG, "Binding thread %d to cpu %d.",
|
||||
applog( LOG_INFO, "Binding thread %d to cpu %d.",
|
||||
thr_id, thr_id % num_cpus,
|
||||
u128_hi64( (uint128_t)1 << (thr_id % num_cpus) ),
|
||||
u128_lo64( (uint128_t)1 << (thr_id % num_cpus) ) );
|
||||
@@ -2103,14 +2013,14 @@ static void *miner_thread( void *userdata )
|
||||
{
|
||||
#if AFFINITY_USES_UINT128
|
||||
if ( num_cpus > 64 )
|
||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx %016llx",
|
||||
applog( LOG_INFO, "Binding thread %d to mask %016llx %016llx",
|
||||
thr_id, u128_hi64( opt_affinity ),
|
||||
u128_lo64( opt_affinity ) );
|
||||
else
|
||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx",
|
||||
applog( LOG_INFO, "Binding thread %d to mask %016llx",
|
||||
thr_id, opt_affinity );
|
||||
#else
|
||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx",
|
||||
applog( LOG_INFO, "Binding thread %d to mask %016llx",
|
||||
thr_id, opt_affinity );
|
||||
#endif
|
||||
}
|
||||
@@ -2146,13 +2056,13 @@ static void *miner_thread( void *userdata )
|
||||
}
|
||||
else
|
||||
{
|
||||
int min_scantime = have_longpoll ? LP_SCANTIME : opt_scantime;
|
||||
int scantime = have_longpoll ? LP_SCANTIME : opt_scantime;
|
||||
pthread_mutex_lock( &g_work_lock );
|
||||
|
||||
if ( time(NULL) - g_work_time >= min_scantime
|
||||
if ( time(NULL) - g_work_time >= scantime
|
||||
|| *nonceptr >= end_nonce )
|
||||
{
|
||||
if ( unlikely( !get_work( mythr, &g_work ) ) )
|
||||
if ( unlikely( !get_work( mythr, &g_work ) ) )
|
||||
{
|
||||
applog( LOG_ERR, "work retrieval failed, exiting "
|
||||
"mining thread %d", thr_id );
|
||||
@@ -2160,7 +2070,8 @@ static void *miner_thread( void *userdata )
|
||||
goto out;
|
||||
}
|
||||
g_work_time = time(NULL);
|
||||
}
|
||||
restart_threads();
|
||||
}
|
||||
algo_gate.get_new_work( &work, &g_work, thr_id, &end_nonce );
|
||||
|
||||
pthread_mutex_unlock( &g_work_lock );
|
||||
@@ -2627,10 +2538,8 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
algo_gate.set_work_data_endian( g_work );
|
||||
g_work->height = sctx->block_height;
|
||||
g_work->targetdiff = sctx->job.diff
|
||||
/ ( opt_target_factor * opt_diff_factor );
|
||||
/ ( opt_target_factor * opt_diff_factor );
|
||||
diff_to_target( g_work->target, g_work->targetdiff );
|
||||
// work_set_target( g_work, sctx->job.diff
|
||||
// / ( opt_target_factor * opt_diff_factor ) );
|
||||
|
||||
pthread_mutex_unlock( &sctx->work_lock );
|
||||
|
||||
@@ -2654,13 +2563,13 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
pthread_mutex_unlock( &stats_lock );
|
||||
|
||||
if ( stratum_diff != sctx->job.diff )
|
||||
applog( LOG_BLUE, "New stratum diff %g, block %d, job %s",
|
||||
applog( LOG_BLUE, "New Diff %g, Block %d, Job %s",
|
||||
sctx->job.diff, sctx->block_height, g_work->job_id );
|
||||
else if ( last_block_height != sctx->block_height )
|
||||
applog( LOG_BLUE, "New block %d, job %s",
|
||||
sctx->block_height, g_work->job_id );
|
||||
applog( LOG_BLUE, "New Block %d, Job %s",
|
||||
sctx->block_height, g_work->job_id );
|
||||
else if ( g_work->job_id )
|
||||
applog( LOG_BLUE,"New job %s", g_work->job_id );
|
||||
applog( LOG_BLUE,"New Job %s", g_work->job_id );
|
||||
|
||||
// Update data and calculate new estimates.
|
||||
if ( ( stratum_diff != sctx->job.diff )
|
||||
@@ -2677,7 +2586,7 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
applog2( LOG_INFO, "Diff: Net %.5g, Stratum %.5g, Target %.5g",
|
||||
net_diff, stratum_diff, last_targetdiff );
|
||||
net_diff, stratum_diff, g_work->targetdiff );
|
||||
|
||||
if ( likely( hr > 0. ) )
|
||||
{
|
||||
@@ -2686,9 +2595,9 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
char share_ttf[32];
|
||||
|
||||
sprintf_et( block_ttf, ( net_diff * exp32 ) / hr );
|
||||
sprintf_et( share_ttf, last_targetdiff * exp32 / hr );
|
||||
sprintf_et( share_ttf, g_work->targetdiff * exp32 / hr );
|
||||
scale_hash_for_display ( &hr, hr_units );
|
||||
applog2( LOG_INFO, "TTF @ %.2f %sh/s: block %s, share %s",
|
||||
applog2( LOG_INFO, "TTF @ %.2f %sh/s: Block %s, Share %s",
|
||||
hr, hr_units, block_ttf, share_ttf );
|
||||
|
||||
if ( !multipool && last_block_height > session_first_block )
|
||||
@@ -2702,13 +2611,13 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
if ( net_diff && net_ttf )
|
||||
{
|
||||
double net_hr = net_diff * exp32 / net_ttf;
|
||||
char net_ttf_str[32];
|
||||
// char net_ttf_str[32];
|
||||
char net_hr_units[4] = {0};
|
||||
|
||||
sprintf_et( net_ttf_str, net_ttf );
|
||||
// sprintf_et( net_ttf_str, net_ttf );
|
||||
scale_hash_for_display ( &net_hr, net_hr_units );
|
||||
applog2( LOG_INFO, "Net TTF @ %.2f %sh/s: %s",
|
||||
net_hr, net_hr_units, net_ttf_str );
|
||||
applog2( LOG_INFO, "Net hash rate (est) %.2f %sh/s",
|
||||
net_hr, net_hr_units );
|
||||
}
|
||||
}
|
||||
} // hr > 0
|
||||
@@ -2719,12 +2628,12 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
static void *stratum_thread(void *userdata )
|
||||
{
|
||||
struct thr_info *mythr = (struct thr_info *) userdata;
|
||||
char *s;
|
||||
char *s = NULL;
|
||||
|
||||
stratum.url = (char*) tq_pop(mythr->q, NULL);
|
||||
if (!stratum.url)
|
||||
goto out;
|
||||
applog( LOG_INFO, "Stratum connect %s", short_url );
|
||||
applog( LOG_BLUE, "Stratum connect %s", short_url );
|
||||
|
||||
while (1)
|
||||
{
|
||||
@@ -2784,30 +2693,26 @@ static void *stratum_thread(void *userdata )
|
||||
restart_threads();
|
||||
}
|
||||
|
||||
if ( stratum_socket_full( &stratum, opt_timeout ) )
|
||||
if ( likely( stratum_socket_full( &stratum, opt_timeout ) ) )
|
||||
{
|
||||
s = stratum_recv_line(&stratum);
|
||||
if ( !s )
|
||||
if ( likely( s = stratum_recv_line( &stratum ) ) )
|
||||
{
|
||||
if ( likely( !stratum_handle_method( &stratum, s ) ) )
|
||||
stratum_handle_response( s );
|
||||
free( s );
|
||||
}
|
||||
else
|
||||
{
|
||||
applog(LOG_WARNING, "Stratum connection interrupted");
|
||||
stratum_disconnect( &stratum );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
s = NULL;
|
||||
applog(LOG_ERR, "Stratum connection timeout");
|
||||
stratum_disconnect( &stratum );
|
||||
}
|
||||
|
||||
if ( s )
|
||||
{
|
||||
if ( !stratum_handle_method( &stratum, s ) )
|
||||
stratum_handle_response( s );
|
||||
free( s );
|
||||
}
|
||||
else
|
||||
{
|
||||
// stratum_errors++;
|
||||
// check if this redundant
|
||||
stratum_disconnect( &stratum );
|
||||
}
|
||||
} // loop
|
||||
out:
|
||||
return NULL;
|
||||
@@ -3444,7 +3349,7 @@ bool check_cpu_capability ()
|
||||
" with VC++ 2013\n");
|
||||
#elif defined(__GNUC__)
|
||||
" with GCC");
|
||||
printf(" %d.%d.%d.\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
|
||||
printf(" %d.%d.%d\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
|
||||
#else
|
||||
printf(".\n");
|
||||
#endif
|
||||
@@ -3572,7 +3477,7 @@ int main(int argc, char *argv[])
|
||||
num_cpus += cpus;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Found %d cpus on cpu group %d", cpus, i);
|
||||
applog(LOG_DEBUG, "Found %d cpus on cpu group %d", cpus, i);
|
||||
}
|
||||
#else
|
||||
SYSTEM_INFO sysinfo;
|
||||
@@ -3592,7 +3497,6 @@ int main(int argc, char *argv[])
|
||||
if (num_cpus < 1)
|
||||
num_cpus = 1;
|
||||
|
||||
|
||||
if (!opt_n_threads)
|
||||
opt_n_threads = num_cpus;
|
||||
|
||||
@@ -3666,12 +3570,13 @@ int main(int argc, char *argv[])
|
||||
pthread_mutex_init( &stratum.sock_lock, NULL );
|
||||
pthread_mutex_init( &stratum.work_lock, NULL );
|
||||
|
||||
flags = !opt_benchmark
|
||||
&& ( strncmp( rpc_url, "https:", 6 )
|
||||
|| strncasecmp(rpc_url, "stratum+tcps://", 15 ) )
|
||||
? ( CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL )
|
||||
: CURL_GLOBAL_ALL;
|
||||
if ( curl_global_init( flags ) )
|
||||
flags = CURL_GLOBAL_ALL;
|
||||
if ( !opt_benchmark )
|
||||
if ( strncasecmp( rpc_url, "https:", 6 )
|
||||
&& strncasecmp( rpc_url, "stratum+tcps://", 15 ) )
|
||||
flags &= ~CURL_GLOBAL_SSL;
|
||||
|
||||
if ( curl_global_init( flags ) )
|
||||
{
|
||||
applog(LOG_ERR, "CURL initialization failed");
|
||||
return 1;
|
||||
@@ -3711,27 +3616,24 @@ int main(int argc, char *argv[])
|
||||
if (opt_priority > 0)
|
||||
{
|
||||
DWORD prio = NORMAL_PRIORITY_CLASS;
|
||||
switch (opt_priority) {
|
||||
case 1:
|
||||
prio = BELOW_NORMAL_PRIORITY_CLASS;
|
||||
switch (opt_priority)
|
||||
{
|
||||
case 1:
|
||||
prio = BELOW_NORMAL_PRIORITY_CLASS;
|
||||
break;
|
||||
case 3:
|
||||
prio = ABOVE_NORMAL_PRIORITY_CLASS;
|
||||
case 3:
|
||||
prio = ABOVE_NORMAL_PRIORITY_CLASS;
|
||||
break;
|
||||
case 4:
|
||||
prio = HIGH_PRIORITY_CLASS;
|
||||
case 4:
|
||||
prio = HIGH_PRIORITY_CLASS;
|
||||
break;
|
||||
case 5:
|
||||
prio = REALTIME_PRIORITY_CLASS;
|
||||
case 5:
|
||||
prio = REALTIME_PRIORITY_CLASS;
|
||||
}
|
||||
SetPriorityClass(GetCurrentProcess(), prio);
|
||||
}
|
||||
#endif
|
||||
|
||||
if ( num_cpus != opt_n_threads )
|
||||
applog( LOG_INFO,"%u CPU cores available, %u miner threads selected.",
|
||||
num_cpus, opt_n_threads );
|
||||
|
||||
// To be confirmed with more than 64 cpus
|
||||
if ( opt_affinity != -1 )
|
||||
{
|
||||
@@ -3763,6 +3665,13 @@ int main(int argc, char *argv[])
|
||||
*/
|
||||
}
|
||||
|
||||
if ( !opt_quiet && ( opt_n_threads < num_cpus ) )
|
||||
{
|
||||
char affinity_map[64];
|
||||
format_affinity_map( affinity_map, opt_affinity );
|
||||
applog( LOG_INFO, "CPU affinity [%s]", affinity_map );
|
||||
}
|
||||
|
||||
#ifdef HAVE_SYSLOG_H
|
||||
if (use_syslog)
|
||||
openlog("cpuminer", LOG_PID, LOG_USER);
|
||||
@@ -3813,7 +3722,7 @@ int main(int argc, char *argv[])
|
||||
/* start longpoll thread */
|
||||
err = thread_create(thr, longpoll_thread);
|
||||
if (err) {
|
||||
applog(LOG_ERR, "long poll thread create failed");
|
||||
applog(LOG_ERR, "Long poll thread create failed");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -3833,7 +3742,7 @@ int main(int argc, char *argv[])
|
||||
err = thread_create(thr, stratum_thread);
|
||||
if (err)
|
||||
{
|
||||
applog(LOG_ERR, "stratum thread create failed");
|
||||
applog(LOG_ERR, "Stratum thread create failed");
|
||||
return 1;
|
||||
}
|
||||
if (have_stratum)
|
||||
@@ -3874,18 +3783,16 @@ int main(int argc, char *argv[])
|
||||
return 1;
|
||||
err = thread_create(thr, miner_thread);
|
||||
if (err) {
|
||||
applog(LOG_ERR, "thread %d create failed", i);
|
||||
applog(LOG_ERR, "Miner thread %d create failed", i);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
applog(LOG_INFO, "%d miner threads started, "
|
||||
"using '%s' algorithm.",
|
||||
opt_n_threads,
|
||||
algo_names[opt_algo]);
|
||||
applog( LOG_INFO, "%d of %d miner threads started using '%s' algorithm",
|
||||
opt_n_threads, num_cpus, algo_names[opt_algo] );
|
||||
|
||||
/* main loop - simply wait for workio thread to exit */
|
||||
pthread_join(thr_info[work_thr_id].pth, NULL);
|
||||
applog(LOG_WARNING, "workio thread dead, exiting.");
|
||||
pthread_join( thr_info[work_thr_id].pth, NULL );
|
||||
applog( LOG_WARNING, "workio thread dead, exiting." );
|
||||
return 0;
|
||||
}
|
||||
|
17
miner.h
17
miner.h
@@ -323,6 +323,7 @@ int timeval_subtract( struct timeval *result, struct timeval *x,
|
||||
// diff_to_hash = 2**32 = 0x100000000 = 4294967296 = exp32;
|
||||
|
||||
const double exp32; // 2**32
|
||||
const double exp48; // 2**48
|
||||
const double exp64; // 2**64
|
||||
|
||||
bool fulltest( const uint32_t *hash, const uint32_t *target );
|
||||
@@ -345,13 +346,16 @@ struct thr_info {
|
||||
|
||||
//struct thr_info *thr_info;
|
||||
|
||||
void test_hash_and_submit( struct work *work, const void *hash,
|
||||
struct thr_info *thr );
|
||||
|
||||
bool submit_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr );
|
||||
|
||||
// deprecated
|
||||
bool submit_lane_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr, const int lane );
|
||||
|
||||
bool test_hash_and_submit( struct work*, const void*, struct thr_info* );
|
||||
|
||||
bool submit_work( struct thr_info *thr, const struct work *work_in );
|
||||
|
||||
|
||||
@@ -773,7 +777,7 @@ extern const int pk_buffer_size_max;
|
||||
extern int pk_buffer_size;
|
||||
|
||||
static char const usage[] = "\
|
||||
Usage: " PACKAGE_NAME " [OPTIONS]\n\
|
||||
Usage: cpuminer [OPTIONS]\n\
|
||||
Options:\n\
|
||||
-a, --algo=ALGO specify the algorithm to use\n\
|
||||
allium Garlicoin (GRLC)\n\
|
||||
@@ -868,8 +872,8 @@ Options:\n\
|
||||
yespower-b2b generic yespower + blake2b\n\
|
||||
zr5 Ziftr\n\
|
||||
-N, --param-n N parameter for scrypt based algos\n\
|
||||
-R, --patam-r R parameter for scrypt based algos\n\
|
||||
-K, --param-key Key parameter for algos that use it\n\
|
||||
-R, --param-r R parameter for scrypt based algos\n\
|
||||
-K, --param-key Key (pers) parameter for algos that use it\n\
|
||||
-o, --url=URL URL of mining server\n\
|
||||
-O, --userpass=U:P username:password pair for mining server\n\
|
||||
-u, --user=USERNAME username for mining server\n\
|
||||
@@ -886,7 +890,7 @@ Options:\n\
|
||||
long polling is unavailable, in seconds (default: 5)\n\
|
||||
--randomize Randomize scan range start to reduce duplicates\n\
|
||||
--reset-on-stale Workaround reset stratum if too many stale shares\n\
|
||||
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
|
||||
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
|
||||
-m, --diff-multiplier Multiply difficulty by this factor (std is 1.0)\n\
|
||||
--hash-meter Display thread hash rates\n\
|
||||
--coinbase-addr=ADDR payout address for solo mining\n\
|
||||
@@ -908,7 +912,6 @@ Options:\n\
|
||||
"\
|
||||
-B, --background run the miner in the background\n\
|
||||
--benchmark run in offline benchmark mode\n\
|
||||
--cputest debug hashes from cpu algorithms\n\
|
||||
--cpu-affinity set process affinity to cpu core(s), mask 0x3 for cores 0 and 1\n\
|
||||
--cpu-priority set process priority (default: 0 idle, 2 normal to 5 highest)\n\
|
||||
-b, --api-bind IP/Port for the miner API (default: 127.0.0.1:4048)\n\
|
||||
|
42
util.c
42
util.c
@@ -1041,16 +1041,19 @@ bool fulltest( const uint32_t *hash, const uint32_t *target )
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
void diff_to_target(uint32_t *target, double diff)
|
||||
{
|
||||
uint64_t m;
|
||||
int k;
|
||||
|
||||
for (k = 6; k > 0 && diff > 1.0; k--)
|
||||
diff /= 4294967296.0;
|
||||
diff /= exp32;
|
||||
|
||||
m = (uint64_t)(4294901760.0 / diff);
|
||||
// diff /= 4294967296.0;
|
||||
|
||||
// m = (uint64_t)(4294901760.0 / diff);
|
||||
|
||||
m = (uint64_t)(exp32 / diff);
|
||||
|
||||
if (m == 0 && k == 6)
|
||||
memset(target, 0xff, 32);
|
||||
@@ -1060,28 +1063,6 @@ void diff_to_target(uint32_t *target, double diff)
|
||||
target[k + 1] = (uint32_t)(m >> 32);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
void diff_to_target(uint32_t *target, double diff)
|
||||
{
|
||||
uint64_t *t = (uint64_t*)target;
|
||||
uint64_t m;
|
||||
int k;
|
||||
|
||||
for ( k = 3; k > 0 && diff > 1.0; k-- )
|
||||
diff /= exp64;
|
||||
|
||||
m = (uint64_t)( 0xffff0000 / diff );
|
||||
|
||||
if unlikely( m == 0 && k == 3 )
|
||||
memset( t, 0xff, 32 );
|
||||
else
|
||||
{
|
||||
memset( t, 0, 32 );
|
||||
t[k] = m;
|
||||
}
|
||||
}
|
||||
|
||||
// deprecated
|
||||
void work_set_target(struct work* work, double diff)
|
||||
@@ -1090,6 +1071,15 @@ void work_set_target(struct work* work, double diff)
|
||||
work->targetdiff = diff;
|
||||
}
|
||||
|
||||
double target_to_diff( uint32_t* target )
|
||||
{
|
||||
uint64_t *targ = (uint64_t*)target;
|
||||
// extract 64 bits from target[ 240:176 ]
|
||||
uint64_t m = ( targ[3] << 16 ) | ( targ[2] >> 48 );
|
||||
return m ? (exp48-1.) / (double)m : 0.;
|
||||
}
|
||||
|
||||
/*
|
||||
double target_to_diff(uint32_t* target)
|
||||
{
|
||||
uchar* tgt = (uchar*) target;
|
||||
@@ -1103,11 +1093,13 @@ double target_to_diff(uint32_t* target)
|
||||
(uint64_t)tgt[23] << 8 |
|
||||
(uint64_t)tgt[22] << 0;
|
||||
|
||||
|
||||
if (!m)
|
||||
return 0.;
|
||||
else
|
||||
return (double)0x0000ffff00000000/m;
|
||||
}
|
||||
*/
|
||||
|
||||
#ifdef WIN32
|
||||
#define socket_blocks() (WSAGetLastError() == WSAEWOULDBLOCK)
|
||||
|
Reference in New Issue
Block a user