mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
fb9163185a | ||
![]() |
6e8b8ed34f | ||
![]() |
c0aadbcc99 | ||
![]() |
3da149418a | ||
![]() |
720610cce5 | ||
![]() |
cedcf4d070 |
@@ -163,6 +163,7 @@ cpuminer_SOURCES = \
|
||||
algo/sha/sha256-hash-4way.c \
|
||||
algo/sha/sha512-hash-4way.c \
|
||||
algo/sha/hmac-sha256-hash.c \
|
||||
algo/sha/hmac-sha256-hash-4way.c \
|
||||
algo/sha/sha2.c \
|
||||
algo/sha/sha256t-gate.c \
|
||||
algo/sha/sha256t-4way.c \
|
||||
|
41
README.md
41
README.md
@@ -37,25 +37,25 @@ Requirements
|
||||
------------
|
||||
|
||||
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
|
||||
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI
|
||||
optimizations a CPU with AES_NI is required. This includes Intel Westmere
|
||||
and newer and AMD equivalents. Further optimizations are available on some
|
||||
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
|
||||
Intel Core2 and newer and AMD equivalents. Further optimizations are available
|
||||
on some algoritms for CPUs with AES, AVX, AVX2, SHA, AVX512 and VAES.
|
||||
|
||||
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
|
||||
performance.
|
||||
|
||||
ARM CPUs are not supported.
|
||||
ARM and Aarch64 CPUs are not supported.
|
||||
|
||||
2. 64 bit Linux OS. Ubuntu and Fedora based distributions, including Mint and
|
||||
Centos, are known to work and have all dependencies in their repositories.
|
||||
Others may work but may require more effort. Older versions such as Centos 6
|
||||
don't work due to missing features.
|
||||
2. 64 bit Linux or Windows OS. Ubuntu and Fedora based distributions,
|
||||
including Mint and Centos, are known to work and have all dependencies
|
||||
in their repositories. Others may work but may require more effort. Older
|
||||
versions such as Centos 6 don't work due to missing features.
|
||||
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
|
||||
|
||||
MacOS, OSx and Android are not supported.
|
||||
|
||||
3. Stratum pool. Some algos may work wallet mining using getwork or GBT. YMMV.
|
||||
3. Stratum pool supporting stratum+tcp:// or stratum+ssl:// protocols or
|
||||
RPC getwork using http:// or https://.
|
||||
GBT is YMMV.
|
||||
|
||||
Supported Algorithms
|
||||
--------------------
|
||||
@@ -152,6 +152,27 @@ Supported Algorithms
|
||||
yespower-b2b generic yespower + blake2b
|
||||
zr5 Ziftr
|
||||
|
||||
Many variations of scrypt based algos can be mine by specifying their
|
||||
parameters:
|
||||
|
||||
scryptn2: --algo scrypt --param-n 1048576
|
||||
|
||||
cpupower: --algo yespower --param-key "CPUpower: The number of CPU working or available for proof-of-work mining"
|
||||
|
||||
power2b: --algo yespower-b2b --param-n 2048 --param-r 32 --param-key "Now I am become Death, the destroyer of worlds"
|
||||
|
||||
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
|
||||
|
||||
yespoweriots: --algo yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
|
||||
|
||||
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
|
||||
|
||||
yespoweric: --algo yespower --param-n 2048 --param-r 32 --param-key "IsotopeC"
|
||||
|
||||
yespowerurx: --algo yespower --param-n 2048 --param-r 32 --param-key "UraniumX"
|
||||
|
||||
yespowerltncg: --algo yespower --param-n 2048 --param-r 32 --param-key "LTNCGYES"
|
||||
|
||||
Errata
|
||||
------
|
||||
|
||||
|
@@ -65,8 +65,77 @@ If not what makes it happen or not happen?
|
||||
Change Log
|
||||
----------
|
||||
|
||||
v3.12.7
|
||||
|
||||
Issue #257: fixed a file descriptor leak which caused the CPU temperature
|
||||
and frequency query to report zeros after mining for a couple of hours.
|
||||
|
||||
Issue #253: stale share reduction for yescrypt, sonoa.
|
||||
|
||||
v3.12.6.1
|
||||
|
||||
Issue #252: Fixed SSL mining (stratum+tcps://)
|
||||
|
||||
Issue #254 Fixed benchmark.
|
||||
|
||||
Issue #253: Implemented stale share reduction for yespower, x25x, x22i, x21s,
|
||||
x16*, scryptn2, more to come.
|
||||
|
||||
v3.12.6
|
||||
|
||||
Issue #246: improved stale share detection for getwork.
|
||||
|
||||
Improved precision of target_to_diff conversion from 4 digits to 20+.
|
||||
|
||||
Display hash and target debug data for all rejected shares.
|
||||
|
||||
A graphical representation of CPU affinity is displayed when using --threads.
|
||||
|
||||
Added highest and lowest accepted share to summary log.
|
||||
|
||||
Other small changes to logs to improve consistency and clarity.
|
||||
|
||||
v3.12.5
|
||||
|
||||
Issues #246 & #251: fixed incorrect share diff for stratum and getwork,
|
||||
fixed incorrect target diff for getwork. Stats should now be correct for
|
||||
getwork as well as stratum.
|
||||
|
||||
Issue #252: Fixed stratum+tcps not using curl ssl.
|
||||
|
||||
Getwork: reduce stale blocks, faster response to new work.
|
||||
|
||||
Added ntime to new job/work logs.
|
||||
|
||||
README.md now lists the parameters for yespower variations that don't have
|
||||
a specific algo name.
|
||||
|
||||
v3.12.4.6
|
||||
|
||||
Issue #246: fixed getwork repeated new block logs with same height. New work
|
||||
for the same block is now reported as "New work" instead of "New block".
|
||||
Also added a check that work is new before generating "New work" log.
|
||||
|
||||
Added target diff to getwork new block log.
|
||||
|
||||
Changed share ratio in share result log to simple fraction, no longer %.
|
||||
|
||||
Added debug log to display mininginfo, use -D.
|
||||
|
||||
v3.12.4.5
|
||||
|
||||
Issue #246: better stale share detection for getwork, and enhanced logging
|
||||
of stale shares for stratum & getwork.
|
||||
|
||||
Issue #251: fixed incorrect share difficulty and share ratio in share
|
||||
result log.
|
||||
|
||||
Changed submit log to include share diff and block height.
|
||||
|
||||
Small cosmetic changes to logs.
|
||||
|
||||
v3.12.4.4
|
||||
|
||||
Issue #246: Fixed net hashrate in getwork block log,
|
||||
removed duplicate getwork block log,
|
||||
other small tweaks to stats logs for getwork.
|
||||
|
@@ -97,21 +97,23 @@ int null_scanhash()
|
||||
return 0;
|
||||
}
|
||||
|
||||
void null_hash()
|
||||
int null_hash()
|
||||
{
|
||||
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
|
||||
return 0;
|
||||
};
|
||||
/*
|
||||
void null_hash_suw()
|
||||
{
|
||||
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
|
||||
};
|
||||
|
||||
*/
|
||||
void init_algo_gate( algo_gate_t* gate )
|
||||
{
|
||||
gate->miner_thread_init = (void*)&return_true;
|
||||
gate->scanhash = (void*)&null_scanhash;
|
||||
gate->hash = (void*)&null_hash;
|
||||
gate->hash_suw = (void*)&null_hash_suw;
|
||||
// gate->hash_suw = (void*)&null_hash_suw;
|
||||
gate->get_new_work = (void*)&std_get_new_work;
|
||||
gate->work_decode = (void*)&std_le_work_decode;
|
||||
gate->decode_extra_data = (void*)&do_nothing;
|
||||
@@ -230,11 +232,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
|
||||
case ALGO_X22I: register_x22i_algo ( gate ); break;
|
||||
case ALGO_X25X: register_x25x_algo ( gate ); break;
|
||||
case ALGO_XEVAN: register_xevan_algo ( gate ); break;
|
||||
/* case ALGO_YESCRYPT: register_yescrypt_05_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8: register_yescryptr8_05_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR16: register_yescryptr16_05_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR32: register_yescryptr32_05_algo ( gate ); break;
|
||||
*/
|
||||
case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8: register_yescryptr8_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8G: register_yescryptr8g_algo ( gate ); break;
|
||||
|
@@ -113,9 +113,10 @@ typedef struct
|
||||
// mandatory functions, must be overwritten
|
||||
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
|
||||
|
||||
// not used anywhere
|
||||
// optional unsafe, must be overwritten if algo uses function
|
||||
void ( *hash ) ( void*, const void*, uint32_t ) ;
|
||||
void ( *hash_suw ) ( void*, const void* );
|
||||
int ( *hash ) ( void*, const void*, uint32_t ) ;
|
||||
//void ( *hash_suw ) ( void*, const void* );
|
||||
|
||||
//optional, safe to use default in most cases
|
||||
|
||||
@@ -213,8 +214,8 @@ void four_way_not_tested();
|
||||
int null_scanhash();
|
||||
|
||||
// displays warning
|
||||
void null_hash ();
|
||||
void null_hash_suw();
|
||||
int null_hash ();
|
||||
//void null_hash_suw();
|
||||
|
||||
// optional safe targets, default listed first unless noted.
|
||||
|
||||
|
@@ -311,7 +311,7 @@ bool register_m7m_algo( algo_gate_t *gate )
|
||||
{
|
||||
gate->optimizations = SHA_OPT;
|
||||
init_m7m_ctx();
|
||||
gate->scanhash = (void*)scanhash_m7m_hash;
|
||||
gate->scanhash = (void*)&scanhash_m7m_hash;
|
||||
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
|
||||
gate->work_decode = (void*)&std_be_work_decode;
|
||||
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
|
||||
|
@@ -424,7 +424,7 @@ static bool scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
|
||||
}
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
static int scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
||||
int thrid )
|
||||
{
|
||||
@@ -440,7 +440,8 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
for (i = 0; i < 20; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[4 * i + k] = input[k * 20 + i];
|
||||
for (i = 0; i < 8; i++)
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
tstate[4 * i + k] = midstate[i];
|
||||
|
||||
@@ -448,6 +449,8 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
|
||||
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
X[k * 32 + i] = W[4 * i + k];
|
||||
@@ -457,6 +460,8 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
scrypt_core(X + 2 * 32, V, N);
|
||||
scrypt_core(X + 3 * 32, V, N);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[4 * i + k] = X[k * 32 + i];
|
||||
@@ -466,13 +471,14 @@ static bool scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
output[k * 8 + i] = W[4 * i + k];
|
||||
return true;
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#ifdef HAVE_SCRYPT_3WAY
|
||||
|
||||
static bool scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||
static int scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
||||
int thrid )
|
||||
{
|
||||
@@ -490,22 +496,23 @@ static bool scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||
HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8);
|
||||
HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
|
||||
PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32);
|
||||
PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
scrypt_core_3way(X, V, N);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
|
||||
PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8);
|
||||
PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16);
|
||||
return true;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
@@ -526,7 +533,8 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
for (i = 0; i < 20; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[128 * j + 4 * i + k] = input[80 * j + k * 20 + i];
|
||||
for (j = 0; j < 3; j++)
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
tstate[32 * j + 4 * i + k] = midstate[i];
|
||||
@@ -535,13 +543,13 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32);
|
||||
HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128);
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
@@ -553,7 +561,7 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
scrypt_core_3way(X + 2 * 96, V, N);
|
||||
scrypt_core_3way(X + 3 * 96, V, N);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
@@ -568,16 +576,17 @@ static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k];
|
||||
return true;
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#endif /* HAVE_SCRYPT_3WAY */
|
||||
|
||||
#ifdef HAVE_SCRYPT_6WAY
|
||||
static bool scrypt_1024_1_1_256_24way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
|
||||
int thrid )
|
||||
static int scrypt_1024_1_1_256_24way( const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate,
|
||||
unsigned char *scratchpad, int N, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) tstate[24 * 8];
|
||||
uint32_t _ALIGN(128) ostate[24 * 8];
|
||||
@@ -586,55 +595,60 @@ static bool scrypt_1024_1_1_256_24way(const uint32_t *input,
|
||||
uint32_t *V;
|
||||
int i, j, k;
|
||||
|
||||
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
|
||||
V = (uint32_t *)( ( (uintptr_t)(scratchpad) + 63 ) & ~ (uintptr_t)(63) );
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 20; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
for ( j = 0; j < 3; j++ )
|
||||
for ( i = 0; i < 20; i++ )
|
||||
for ( k = 0; k < 8; k++ )
|
||||
W[8 * 32 * j + 8 * i + k] = input[8 * 20 * j + k * 20 + i];
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
|
||||
for ( j = 0; j < 3; j++ )
|
||||
for ( i = 0; i < 8; i++ )
|
||||
for ( k = 0; k < 8; k++ )
|
||||
tstate[8 * 8 * j + 8 * i + k] = midstate[i];
|
||||
|
||||
HMAC_SHA256_80_init_8way(W + 0, tstate + 0, ostate + 0);
|
||||
HMAC_SHA256_80_init_8way(W + 256, tstate + 64, ostate + 64);
|
||||
HMAC_SHA256_80_init_8way(W + 512, tstate + 128, ostate + 128);
|
||||
HMAC_SHA256_80_init_8way( W + 0, tstate + 0, ostate + 0 );
|
||||
HMAC_SHA256_80_init_8way( W + 256, tstate + 64, ostate + 64 );
|
||||
HMAC_SHA256_80_init_8way( W + 512, tstate + 128, ostate + 128 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_80_128_8way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_80_128_8way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
PBKDF2_SHA256_80_128_8way(tstate + 128, ostate + 128, W + 512, W + 512);
|
||||
PBKDF2_SHA256_80_128_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
|
||||
PBKDF2_SHA256_80_128_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
|
||||
PBKDF2_SHA256_80_128_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
for ( j = 0; j < 3; j++ )
|
||||
for ( i = 0; i < 32; i++ )
|
||||
for ( k = 0; k < 8; k++ )
|
||||
X[8 * 32 * j + k * 32 + i] = W[8 * 32 * j + 8 * i + k];
|
||||
|
||||
scrypt_core_6way(X + 0 * 32, V, N);
|
||||
scrypt_core_6way(X + 6 * 32, V, N);
|
||||
scrypt_core_6way(X + 12 * 32, V, N);
|
||||
scrypt_core_6way(X + 18 * 32, V, N);
|
||||
scrypt_core_6way( X + 0 * 32, V, N );
|
||||
scrypt_core_6way( X + 6 * 32, V, N );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
scrypt_core_6way( X + 12 * 32, V, N );
|
||||
scrypt_core_6way( X + 18 * 32, V, N );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
for ( j = 0; j < 3; j++ )
|
||||
for ( i = 0; i < 32; i++ )
|
||||
for ( k = 0; k < 8; k++ )
|
||||
W[8 * 32 * j + 8 * i + k] = X[8 * 32 * j + k * 32 + i];
|
||||
|
||||
PBKDF2_SHA256_128_32_8way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_128_32_8way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
PBKDF2_SHA256_128_32_8way(tstate + 128, ostate + 128, W + 512, W + 512);
|
||||
PBKDF2_SHA256_128_32_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
|
||||
PBKDF2_SHA256_128_32_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
|
||||
PBKDF2_SHA256_128_32_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
for ( j = 0; j < 3; j++ )
|
||||
for ( i = 0; i < 8; i++ )
|
||||
for ( k = 0; k < 8; k++ )
|
||||
output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k];
|
||||
return true;
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif /* HAVE_SCRYPT_6WAY */
|
||||
|
||||
@@ -699,12 +713,13 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
||||
if ( rc )
|
||||
for ( i = 0; i < throughput; i++ )
|
||||
{
|
||||
if ( unlikely( valid_hash( hash + i * 8, ptarget ) ) )
|
||||
if ( unlikely( valid_hash( hash + i * 8, ptarget ) ) )
|
||||
{
|
||||
pdata[19] = data[i * 20 + 19];
|
||||
submit_lane_solution( work, hash, mythr, i );
|
||||
submit_solution( work, hash + i * 8, mythr );
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} while ( likely( ( n < ( max_nonce - throughput ) ) && !(*restart) ) );
|
||||
|
||||
*hashes_done = n - pdata[19];
|
||||
|
440
algo/sha/hmac-sha256-hash-4way.c
Normal file
440
algo/sha/hmac-sha256-hash-4way.c
Normal file
@@ -0,0 +1,440 @@
|
||||
/*-
|
||||
* Copyright 2005,2007,2009 Colin Percival
|
||||
* Copywright 2020 JayDDee246@gmail.com
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include "hmac-sha256-hash-4way.h"
|
||||
#include "compat.h"
|
||||
|
||||
// HMAC 4-way SSE2
|
||||
|
||||
/**
|
||||
* HMAC_SHA256_Buf(K, Klen, in, len, digest):
|
||||
* Compute the HMAC-SHA256 of ${len} bytes from ${in} using the key ${K} of
|
||||
* length ${Klen}, and write the result to ${digest}.
|
||||
*/
|
||||
void
|
||||
hmac_sha256_4way_full( void *digest, const void *K, size_t Klen,
|
||||
const void *in, size_t len )
|
||||
{
|
||||
hmac_sha256_4way_context ctx;
|
||||
hmac_sha256_4way_init( &ctx, K, Klen );
|
||||
hmac_sha256_4way_update( &ctx, in, len );
|
||||
hmac_sha256_4way_close( &ctx, digest );
|
||||
}
|
||||
|
||||
/* Initialize an HMAC-SHA256 operation with the given key. */
|
||||
void
|
||||
hmac_sha256_4way_init( hmac_sha256_4way_context *ctx, const void *_K,
|
||||
size_t Klen )
|
||||
{
|
||||
unsigned char pad[64*4] __attribute__ ((aligned (64)));
|
||||
unsigned char khash[32*4] __attribute__ ((aligned (64)));
|
||||
const unsigned char * K = _K;
|
||||
size_t i;
|
||||
|
||||
/* If Klen > 64, the key is really SHA256(K). */
|
||||
if ( Klen > 64 )
|
||||
{
|
||||
sha256_4way_init( &ctx->ictx );
|
||||
sha256_4way_update( &ctx->ictx, K, Klen );
|
||||
sha256_4way_close( &ctx->ictx, khash );
|
||||
K = khash;
|
||||
Klen = 32;
|
||||
}
|
||||
|
||||
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
||||
sha256_4way_init( &ctx->ictx );
|
||||
memset( pad, 0x36, 64*4 );
|
||||
|
||||
for ( i = 0; i < Klen; i++ )
|
||||
casti_m128i( pad, i ) = _mm_xor_si128( casti_m128i( pad, i ),
|
||||
casti_m128i( K, i ) );
|
||||
|
||||
sha256_4way_update( &ctx->ictx, pad, 64 );
|
||||
|
||||
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
||||
sha256_4way_init( &ctx->octx );
|
||||
memset( pad, 0x5c, 64*4 );
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m128i( pad, i ) = _mm_xor_si128( casti_m128i( pad, i ),
|
||||
casti_m128i( K, i ) );
|
||||
sha256_4way_update( &ctx->octx, pad, 64 );
|
||||
}
|
||||
|
||||
/* Add bytes to the HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_4way_update( hmac_sha256_4way_context *ctx, const void *in,
|
||||
size_t len )
|
||||
{
|
||||
/* Feed data to the inner SHA256 operation. */
|
||||
sha256_4way_update( &ctx->ictx, in, len );
|
||||
}
|
||||
|
||||
/* Finish an HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_4way_close( hmac_sha256_4way_context *ctx, void *digest )
|
||||
{
|
||||
unsigned char ihash[32*4] __attribute__ ((aligned (64)));
|
||||
|
||||
/* Finish the inner SHA256 operation. */
|
||||
sha256_4way_close( &ctx->ictx, ihash );
|
||||
|
||||
/* Feed the inner hash to the outer SHA256 operation. */
|
||||
sha256_4way_update( &ctx->octx, ihash, 32 );
|
||||
|
||||
/* Finish the outer SHA256 operation. */
|
||||
sha256_4way_close( &ctx->octx, digest );
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void
|
||||
pbkdf2_sha256_4way( uint8_t *buf, size_t dkLen,
|
||||
const uint8_t *passwd, size_t passwdlen,
|
||||
const uint8_t *salt, size_t saltlen, uint64_t c )
|
||||
{
|
||||
hmac_sha256_4way_context PShctx, hctx;
|
||||
uint8_t _ALIGN(128) T[32*4];
|
||||
uint8_t _ALIGN(128) U[32*4];
|
||||
__m128i ivec;
|
||||
size_t i, clen;
|
||||
uint64_t j;
|
||||
int k;
|
||||
|
||||
/* Compute HMAC state after processing P and S. */
|
||||
hmac_sha256_4way_init( &PShctx, passwd, passwdlen );
|
||||
hmac_sha256_4way_update( &PShctx, salt, saltlen );
|
||||
|
||||
/* Iterate through the blocks. */
|
||||
for ( i = 0; i * 32 < dkLen; i++ )
|
||||
{
|
||||
/* Generate INT(i + 1). */
|
||||
ivec = _mm_set1_epi32( bswap_32( i+1 ) );
|
||||
|
||||
/* Compute U_1 = PRF(P, S || INT(i)). */
|
||||
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_4way_context) );
|
||||
hmac_sha256_4way_update( &hctx, &ivec, 4 );
|
||||
hmac_sha256_4way_close( &hctx, U );
|
||||
|
||||
/* T_i = U_1 ... */
|
||||
memcpy( T, U, 32*4 );
|
||||
|
||||
for ( j = 2; j <= c; j++ )
|
||||
{
|
||||
/* Compute U_j. */
|
||||
hmac_sha256_4way_init( &hctx, passwd, passwdlen );
|
||||
hmac_sha256_4way_update( &hctx, U, 32 );
|
||||
hmac_sha256_4way_close( &hctx, U );
|
||||
|
||||
/* ... xor U_j ... */
|
||||
for ( k = 0; k < 8; k++ )
|
||||
casti_m128i( T, k ) = _mm_xor_si128( casti_m128i( T, k ),
|
||||
casti_m128i( U, k ) );
|
||||
}
|
||||
|
||||
/* Copy as many bytes as necessary into buf. */
|
||||
clen = dkLen - i * 32;
|
||||
if ( clen > 32 )
|
||||
clen = 32;
|
||||
memcpy( &buf[ i*32*4 ], T, clen*4 );
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
// HMAC 8-way AVX2
|
||||
|
||||
void
|
||||
hmac_sha256_8way_full( void *digest, const void *K, size_t Klen,
|
||||
const void *in, size_t len )
|
||||
{
|
||||
hmac_sha256_8way_context ctx;
|
||||
hmac_sha256_8way_init( &ctx, K, Klen );
|
||||
hmac_sha256_8way_update( &ctx, in, len );
|
||||
hmac_sha256_8way_close( &ctx, digest );
|
||||
}
|
||||
|
||||
/* Initialize an HMAC-SHA256 operation with the given key. */
|
||||
void
|
||||
hmac_sha256_8way_init( hmac_sha256_8way_context *ctx, const void *_K,
|
||||
size_t Klen )
|
||||
{
|
||||
unsigned char pad[64*8] __attribute__ ((aligned (128)));
|
||||
unsigned char khash[32*8] __attribute__ ((aligned (128)));
|
||||
const unsigned char * K = _K;
|
||||
size_t i;
|
||||
|
||||
/* If Klen > 64, the key is really SHA256(K). */
|
||||
if ( Klen > 64 )
|
||||
{
|
||||
sha256_8way_init( &ctx->ictx );
|
||||
sha256_8way_update( &ctx->ictx, K, Klen );
|
||||
sha256_8way_close( &ctx->ictx, khash );
|
||||
K = khash;
|
||||
Klen = 32;
|
||||
}
|
||||
|
||||
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
||||
sha256_8way_init( &ctx->ictx );
|
||||
memset( pad, 0x36, 64*8);
|
||||
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m256i( pad, i ) = _mm256_xor_si256( casti_m256i( pad, i ),
|
||||
casti_m256i( K, i ) );
|
||||
|
||||
sha256_8way_update( &ctx->ictx, pad, 64 );
|
||||
|
||||
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
||||
sha256_8way_init( &ctx->octx );
|
||||
memset( pad, 0x5c, 64*8 );
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m256i( pad, i ) = _mm256_xor_si256( casti_m256i( pad, i ),
|
||||
casti_m256i( K, i ) );
|
||||
sha256_8way_update( &ctx->octx, pad, 64 );
|
||||
}
|
||||
|
||||
void
|
||||
hmac_sha256_8way_update( hmac_sha256_8way_context *ctx, const void *in,
|
||||
size_t len )
|
||||
{
|
||||
/* Feed data to the inner SHA256 operation. */
|
||||
sha256_8way_update( &ctx->ictx, in, len );
|
||||
}
|
||||
|
||||
/* Finish an HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_8way_close( hmac_sha256_8way_context *ctx, void *digest )
|
||||
{
|
||||
unsigned char ihash[32*8] __attribute__ ((aligned (128)));
|
||||
|
||||
/* Finish the inner SHA256 operation. */
|
||||
sha256_8way_close( &ctx->ictx, ihash );
|
||||
|
||||
/* Feed the inner hash to the outer SHA256 operation. */
|
||||
sha256_8way_update( &ctx->octx, ihash, 32 );
|
||||
|
||||
/* Finish the outer SHA256 operation. */
|
||||
sha256_8way_close( &ctx->octx, digest );
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void
|
||||
pbkdf2_sha256_8way( uint8_t *buf, size_t dkLen, const uint8_t *passwd,
|
||||
size_t passwdlen, const uint8_t *salt, size_t saltlen,
|
||||
uint64_t c )
|
||||
{
|
||||
hmac_sha256_8way_context PShctx, hctx;
|
||||
uint8_t _ALIGN(128) T[32*8];
|
||||
uint8_t _ALIGN(128) U[32*8];
|
||||
size_t i, clen;
|
||||
uint64_t j;
|
||||
int k;
|
||||
|
||||
/* Compute HMAC state after processing P and S. */
|
||||
hmac_sha256_8way_init( &PShctx, passwd, passwdlen );
|
||||
|
||||
// saltlen can be odd number of bytes
|
||||
hmac_sha256_8way_update( &PShctx, salt, saltlen );
|
||||
|
||||
/* Iterate through the blocks. */
|
||||
for ( i = 0; i * 32 < dkLen; i++ )
|
||||
{
|
||||
__m256i ivec = _mm256_set1_epi32( bswap_32( i+1 ) );
|
||||
|
||||
/* Compute U_1 = PRF(P, S || INT(i)). */
|
||||
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_8way_context) );
|
||||
hmac_sha256_8way_update( &hctx, &ivec, 4 );
|
||||
hmac_sha256_8way_close( &hctx, U );
|
||||
|
||||
/* T_i = U_1 ... */
|
||||
memcpy( T, U, 32*8 );
|
||||
|
||||
for ( j = 2; j <= c; j++ )
|
||||
{
|
||||
/* Compute U_j. */
|
||||
hmac_sha256_8way_init( &hctx, passwd, passwdlen );
|
||||
hmac_sha256_8way_update( &hctx, U, 32 );
|
||||
hmac_sha256_8way_close( &hctx, U );
|
||||
|
||||
/* ... xor U_j ... */
|
||||
for ( k = 0; k < 8; k++ )
|
||||
casti_m256i( T, k ) = _mm256_xor_si256( casti_m256i( T, k ),
|
||||
casti_m256i( U, k ) );
|
||||
}
|
||||
|
||||
/* Copy as many bytes as necessary into buf. */
|
||||
clen = dkLen - i * 32;
|
||||
if ( clen > 32 )
|
||||
clen = 32;
|
||||
memcpy( &buf[ i*32*8 ], T, clen*8 );
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
// HMAC 16-way AVX512
|
||||
|
||||
void
|
||||
hmac_sha256_16way_full( void *digest, const void *K, size_t Klen,
|
||||
const void *in, size_t len )
|
||||
{
|
||||
hmac_sha256_16way_context ctx;
|
||||
hmac_sha256_16way_init( &ctx, K, Klen );
|
||||
hmac_sha256_16way_update( &ctx, in, len );
|
||||
hmac_sha256_16way_close( &ctx, digest );
|
||||
}
|
||||
|
||||
void
|
||||
hmac_sha256_16way_init( hmac_sha256_16way_context *ctx, const void *_K,
|
||||
size_t Klen )
|
||||
{
|
||||
unsigned char pad[64*16] __attribute__ ((aligned (128)));
|
||||
unsigned char khash[32*16] __attribute__ ((aligned (128)));
|
||||
const unsigned char * K = _K;
|
||||
size_t i;
|
||||
|
||||
/* If Klen > 64, the key is really SHA256(K). */
|
||||
if ( Klen > 64 )
|
||||
{
|
||||
sha256_16way_init( &ctx->ictx );
|
||||
sha256_16way_update( &ctx->ictx, K, Klen );
|
||||
sha256_16way_close( &ctx->ictx, khash );
|
||||
K = khash;
|
||||
Klen = 32;
|
||||
}
|
||||
|
||||
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
||||
sha256_16way_init( &ctx->ictx );
|
||||
memset( pad, 0x36, 64*16 );
|
||||
|
||||
for ( i = 0; i < Klen; i++ )
|
||||
casti_m512i( pad, i ) = _mm512_xor_si512( casti_m512i( pad, i ),
|
||||
casti_m512i( K, i ) );
|
||||
sha256_16way_update( &ctx->ictx, pad, 64 );
|
||||
|
||||
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
||||
sha256_16way_init( &ctx->octx );
|
||||
memset( pad, 0x5c, 64*16 );
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m512i( pad, i ) = _mm512_xor_si512( casti_m512i( pad, i ),
|
||||
casti_m512i( K, i ) );
|
||||
sha256_16way_update( &ctx->octx, pad, 64 );
|
||||
}
|
||||
|
||||
void
|
||||
hmac_sha256_16way_update( hmac_sha256_16way_context *ctx, const void *in,
|
||||
size_t len )
|
||||
{
|
||||
/* Feed data to the inner SHA256 operation. */
|
||||
sha256_16way_update( &ctx->ictx, in, len );
|
||||
}
|
||||
|
||||
/* Finish an HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_16way_close( hmac_sha256_16way_context *ctx, void *digest )
|
||||
{
|
||||
unsigned char ihash[32*16] __attribute__ ((aligned (128)));
|
||||
|
||||
/* Finish the inner SHA256 operation. */
|
||||
sha256_16way_close( &ctx->ictx, ihash );
|
||||
|
||||
/* Feed the inner hash to the outer SHA256 operation. */
|
||||
sha256_16way_update( &ctx->octx, ihash, 32 );
|
||||
|
||||
/* Finish the outer SHA256 operation. */
|
||||
sha256_16way_close( &ctx->octx, digest );
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void
|
||||
pbkdf2_sha256_16way( uint8_t *buf, size_t dkLen,
|
||||
const uint8_t *passwd, size_t passwdlen,
|
||||
const uint8_t *salt, size_t saltlen, uint64_t c )
|
||||
{
|
||||
hmac_sha256_16way_context PShctx, hctx;
|
||||
uint8_t _ALIGN(128) T[32*16];
|
||||
uint8_t _ALIGN(128) U[32*16];
|
||||
__m512i ivec;
|
||||
size_t i, clen;
|
||||
uint64_t j;
|
||||
int k;
|
||||
|
||||
/* Compute HMAC state after processing P and S. */
|
||||
hmac_sha256_16way_init( &PShctx, passwd, passwdlen );
|
||||
hmac_sha256_16way_update( &PShctx, salt, saltlen );
|
||||
|
||||
/* Iterate through the blocks. */
|
||||
for ( i = 0; i * 32 < dkLen; i++ )
|
||||
{
|
||||
/* Generate INT(i + 1). */
|
||||
ivec = _mm512_set1_epi32( bswap_32( i+1 ) );
|
||||
|
||||
/* Compute U_1 = PRF(P, S || INT(i)). */
|
||||
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_16way_context) );
|
||||
hmac_sha256_16way_update( &hctx, &ivec, 4 );
|
||||
hmac_sha256_16way_close( &hctx, U );
|
||||
|
||||
/* T_i = U_1 ... */
|
||||
memcpy( T, U, 32*16 );
|
||||
|
||||
for ( j = 2; j <= c; j++ )
|
||||
{
|
||||
/* Compute U_j. */
|
||||
hmac_sha256_16way_init( &hctx, passwd, passwdlen );
|
||||
hmac_sha256_16way_update( &hctx, U, 32 );
|
||||
hmac_sha256_16way_close( &hctx, U );
|
||||
|
||||
/* ... xor U_j ... */
|
||||
for ( k = 0; k < 8; k++ )
|
||||
casti_m512i( T, k ) = _mm512_xor_si512( casti_m512i( T, k ),
|
||||
casti_m512i( U, k ) );
|
||||
}
|
||||
|
||||
/* Copy as many bytes as necessary into buf. */
|
||||
clen = dkLen - i * 32;
|
||||
if ( clen > 32 )
|
||||
clen = 32;
|
||||
memcpy( &buf[ i*32*16 ], T, clen*16 );
|
||||
}
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
#endif // AVX2
|
||||
|
107
algo/sha/hmac-sha256-hash-4way.h
Normal file
107
algo/sha/hmac-sha256-hash-4way.h
Normal file
@@ -0,0 +1,107 @@
|
||||
/*-
|
||||
* Copyright 2005,2007,2009 Colin Percival
|
||||
* Copyright 2020 JayDDee@gmailcom
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD: src/lib/libmd/sha256_Y.h,v 1.2 2006/01/17 15:35:56 phk Exp $
|
||||
*/
|
||||
|
||||
#ifndef HMAC_SHA256_4WAY_H__
|
||||
#define HMAC_SHA256_4WAY_H__
|
||||
|
||||
|
||||
// Tested only 8-way with null pers
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdint.h>
|
||||
#include "simd-utils.h"
|
||||
#include "sha-hash-4way.h"
|
||||
|
||||
typedef struct _hmac_sha256_4way_context
|
||||
{
|
||||
sha256_4way_context ictx;
|
||||
sha256_4way_context octx;
|
||||
} hmac_sha256_4way_context;
|
||||
|
||||
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
|
||||
void hmac_sha256_4way_init( hmac_sha256_4way_context *, const void *, size_t );
|
||||
void hmac_sha256_4way_update( hmac_sha256_4way_context *, const void *,
|
||||
size_t );
|
||||
void hmac_sha256_4way_close( hmac_sha256_4way_context *, void* );
|
||||
void hmac_sha256_4way_full( void*, const void *, size_t Klen, const void *,
|
||||
size_t len );
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void pbkdf2_sha256_4way( uint8_t *, size_t, const uint8_t *, size_t,
|
||||
const uint8_t *, size_t, uint64_t );
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
typedef struct _hmac_sha256_8way_context
|
||||
{
|
||||
sha256_8way_context ictx;
|
||||
sha256_8way_context octx;
|
||||
} hmac_sha256_8way_context;
|
||||
|
||||
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
|
||||
void hmac_sha256_8way_init( hmac_sha256_8way_context *, const void *, size_t );
|
||||
void hmac_sha256_8way_update( hmac_sha256_8way_context *, const void *,
|
||||
size_t );
|
||||
void hmac_sha256_8way_close( hmac_sha256_8way_context *, void* );
|
||||
void hmac_sha256_8way_full( void*, const void *, size_t Klen, const void *,
|
||||
size_t len );
|
||||
|
||||
void pbkdf2_sha256_8way( uint8_t *, size_t, const uint8_t *, size_t,
|
||||
const uint8_t *, size_t, uint64_t );
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct _hmac_sha256_16way_context
|
||||
{
|
||||
sha256_16way_context ictx;
|
||||
sha256_16way_context octx;
|
||||
} hmac_sha256_16way_context;
|
||||
|
||||
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
|
||||
void hmac_sha256_16way_init( hmac_sha256_16way_context *,
|
||||
const void *, size_t );
|
||||
void hmac_sha256_16way_update( hmac_sha256_16way_context *, const void *,
|
||||
size_t );
|
||||
void hmac_sha256_16way_close( hmac_sha256_16way_context *, void* );
|
||||
void hmac_sha256_16way_full( void*, const void *, size_t Klen, const void *,
|
||||
size_t len );
|
||||
|
||||
void pbkdf2_sha256_16way( uint8_t *, size_t, const uint8_t *, size_t,
|
||||
const uint8_t *, size_t, uint64_t );
|
||||
|
||||
|
||||
|
||||
#endif // AVX512
|
||||
#endif // AVX2
|
||||
|
||||
#endif // HMAC_SHA256_4WAY_H__
|
@@ -81,16 +81,17 @@ HMAC_SHA256_Init( HMAC_SHA256_CTX *ctx, const void *_K, size_t Klen )
|
||||
|
||||
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
||||
SHA256_Init( &ctx->ictx );
|
||||
memset( pad, 0x36, 64 );
|
||||
for ( i = 0; i < Klen; i++ )
|
||||
pad[i] ^= K[i];
|
||||
|
||||
|
||||
for ( i = 0; i < Klen; i++ ) pad[i] = K[i] ^ 0x36;
|
||||
memset( pad + Klen, 0x36, 64 - Klen );
|
||||
SHA256_Update( &ctx->ictx, pad, 64 );
|
||||
|
||||
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
||||
SHA256_Init( &ctx->octx );
|
||||
memset(pad, 0x5c, 64);
|
||||
for ( i = 0; i < Klen; i++ )
|
||||
pad[i] ^= K[i];
|
||||
|
||||
for ( i = 0; i < Klen; i++ ) pad[i] = K[i] ^ 0x5c;
|
||||
memset( pad + Klen, 0x5c, 64 - Klen );
|
||||
SHA256_Update( &ctx->octx, pad, 64 );
|
||||
}
|
||||
|
||||
@@ -161,7 +162,13 @@ PBKDF2_SHA256( const uint8_t *passwd, size_t passwdlen, const uint8_t *salt,
|
||||
HMAC_SHA256_Final( U, &hctx );
|
||||
|
||||
/* ... xor U_j ... */
|
||||
for ( k = 0; k < 32; k++ )
|
||||
// _mm256_xor_si256( *(__m256i*)T, *(__m256i*)U );
|
||||
// _mm_xor_si128( ((__m128i*)T)[0], ((__m128i*)U)[0] );
|
||||
// _mm_xor_si128( ((__m128i*)T)[1], ((__m128i*)U)[1] );
|
||||
|
||||
// for ( k = 0; k < 4; k++ ) T[k] ^= U[k];
|
||||
|
||||
for ( k = 0; k < 32; k++ )
|
||||
T[k] ^= U[k];
|
||||
}
|
||||
|
||||
|
@@ -58,6 +58,7 @@ void sha256_4way_init( sha256_4way_context *sc );
|
||||
void sha256_4way_update( sha256_4way_context *sc, const void *data,
|
||||
size_t len );
|
||||
void sha256_4way_close( sha256_4way_context *sc, void *dst );
|
||||
void sha256_4way_full( void *dst, const void *data, size_t len );
|
||||
|
||||
#endif // SSE2
|
||||
|
||||
@@ -75,6 +76,7 @@ typedef struct {
|
||||
void sha256_8way_init( sha256_8way_context *sc );
|
||||
void sha256_8way_update( sha256_8way_context *sc, const void *data, size_t len );
|
||||
void sha256_8way_close( sha256_8way_context *sc, void *dst );
|
||||
void sha256_8way_full( void *dst, const void *data, size_t len );
|
||||
|
||||
#endif // AVX2
|
||||
|
||||
@@ -92,6 +94,7 @@ typedef struct {
|
||||
void sha256_16way_init( sha256_16way_context *sc );
|
||||
void sha256_16way_update( sha256_16way_context *sc, const void *data, size_t len );
|
||||
void sha256_16way_close( sha256_16way_context *sc, void *dst );
|
||||
void sha256_16way_full( void *dst, const void *data, size_t len );
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
@@ -110,6 +113,7 @@ void sha512_4way_init( sha512_4way_context *sc);
|
||||
void sha512_4way_update( sha512_4way_context *sc, const void *data,
|
||||
size_t len );
|
||||
void sha512_4way_close( sha512_4way_context *sc, void *dst );
|
||||
void sha512_4way_full( void *dst, const void *data, size_t len );
|
||||
|
||||
#endif // AVX2
|
||||
|
||||
@@ -128,6 +132,7 @@ void sha512_8way_init( sha512_8way_context *sc);
|
||||
void sha512_8way_update( sha512_8way_context *sc, const void *data,
|
||||
size_t len );
|
||||
void sha512_8way_close( sha512_8way_context *sc, void *dst );
|
||||
void sha512_8way_full( void *dst, const void *data, size_t len );
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
|
@@ -330,6 +330,14 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
mm128_block_bswap_32( dst, sc->val );
|
||||
}
|
||||
|
||||
void sha256_4way_full( void *dst, const void *data, size_t len )
|
||||
{
|
||||
sha256_4way_context ctx;
|
||||
sha256_4way_init( &ctx );
|
||||
sha256_4way_update( &ctx, data, len );
|
||||
sha256_4way_close( &ctx, dst );
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
// SHA-256 8 way
|
||||
@@ -498,6 +506,10 @@ void sha256_8way_init( sha256_8way_context *sc )
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
// need to handle odd byte length for yespower.
|
||||
// Assume only last update is odd.
|
||||
|
||||
void sha256_8way_update( sha256_8way_context *sc, const void *data, size_t len )
|
||||
{
|
||||
__m256i *vdata = (__m256i*)data;
|
||||
@@ -564,6 +576,13 @@ void sha256_8way_close( sha256_8way_context *sc, void *dst )
|
||||
mm256_block_bswap_32( dst, sc->val );
|
||||
}
|
||||
|
||||
void sha256_8way_full( void *dst, const void *data, size_t len )
|
||||
{
|
||||
sha256_8way_context ctx;
|
||||
sha256_8way_init( &ctx );
|
||||
sha256_8way_update( &ctx, data, len );
|
||||
sha256_8way_close( &ctx, dst );
|
||||
}
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
@@ -791,6 +810,14 @@ void sha256_16way_close( sha256_16way_context *sc, void *dst )
|
||||
mm512_block_bswap_32( dst, sc->val );
|
||||
}
|
||||
|
||||
void sha256_16way_full( void *dst, const void *data, size_t len )
|
||||
{
|
||||
sha256_16way_context ctx;
|
||||
sha256_16way_init( &ctx );
|
||||
sha256_16way_update( &ctx, data, len );
|
||||
sha256_16way_close( &ctx, dst );
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
#endif // __AVX2__
|
||||
#endif // __SSE2__
|
||||
|
@@ -77,7 +77,7 @@ typedef union _hex_context_overlay hex_context_overlay;
|
||||
|
||||
static __thread x16r_context_overlay hex_ctx;
|
||||
|
||||
void hex_hash( void* output, const void* input )
|
||||
int hex_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x16r_context_overlay ctx;
|
||||
@@ -214,11 +214,15 @@ void hex_hash( void* output, const void* input )
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
algo = (uint8_t)hash[0] % X16R_HASH_FUNC_COUNT;
|
||||
in = (void*) hash;
|
||||
size = 64;
|
||||
}
|
||||
memcpy(output, hash, 32);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_hex( struct work *work, uint32_t max_nonce,
|
||||
@@ -286,8 +290,7 @@ int scanhash_hex( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
hex_hash( hash32, edata );
|
||||
|
||||
if ( hex_hash( hash32, edata, thr_id ) );
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
be32enc( &pdata[19], nonce );
|
||||
|
@@ -80,7 +80,7 @@ void x16r_8way_prehash( void *vdata, void *pdata )
|
||||
// Called by wrapper hash function to optionally continue hashing and
|
||||
// convert to final hash.
|
||||
|
||||
void x16r_8way_hash_generic( void* output, const void* input )
|
||||
int x16r_8way_hash_generic( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[20*8] __attribute__ ((aligned (128)));
|
||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||
@@ -424,6 +424,9 @@ void x16r_8way_hash_generic( void* output, const void* input )
|
||||
hash7, vhash );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
|
||||
@@ -435,14 +438,17 @@ void x16r_8way_hash_generic( void* output, const void* input )
|
||||
memcpy( output+320, hash5, 64 );
|
||||
memcpy( output+384, hash6, 64 );
|
||||
memcpy( output+448, hash7, 64 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// x16-r,-s,-rt wrapper called directly by scanhash to repackage 512 bit
|
||||
// hash to 256 bit final hash.
|
||||
void x16r_8way_hash( void* output, const void* input )
|
||||
int x16r_8way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint8_t hash[64*8] __attribute__ ((aligned (128)));
|
||||
x16r_8way_hash_generic( hash, input );
|
||||
if ( !x16r_8way_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
memcpy( output+32, hash+64, 32 );
|
||||
@@ -452,7 +458,9 @@ void x16r_8way_hash( void* output, const void* input )
|
||||
memcpy( output+160, hash+320, 32 );
|
||||
memcpy( output+192, hash+384, 32 );
|
||||
memcpy( output+224, hash+448, 32 );
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// x16r only
|
||||
int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -492,8 +500,7 @@ int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_8way_hash( hash, vdata );
|
||||
|
||||
if( x16r_8way_hash( hash, vdata, thr_id ) );
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
@@ -565,7 +572,7 @@ void x16r_4way_prehash( void *vdata, void *pdata )
|
||||
}
|
||||
}
|
||||
|
||||
void x16r_4way_hash_generic( void* output, const void* input )
|
||||
int x16r_4way_hash_generic( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[20*4] __attribute__ ((aligned (128)));
|
||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||
@@ -794,23 +801,31 @@ void x16r_4way_hash_generic( void* output, const void* input )
|
||||
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
memcpy( output, hash0, 64 );
|
||||
memcpy( output+64, hash1, 64 );
|
||||
memcpy( output+128, hash2, 64 );
|
||||
memcpy( output+192, hash3, 64 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void x16r_4way_hash( void* output, const void* input )
|
||||
int x16r_4way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint8_t hash[64*4] __attribute__ ((aligned (64)));
|
||||
x16r_4way_hash_generic( hash, input );
|
||||
if ( !x16r_4way_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
memcpy( output+32, hash+64, 32 );
|
||||
memcpy( output+64, hash+128, 32 );
|
||||
memcpy( output+96, hash+192, 32 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
|
||||
@@ -849,7 +864,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_4way_hash( hash, vdata );
|
||||
if ( x16r_4way_hash( hash, vdata, thr_id ) );
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -131,8 +131,8 @@ typedef union _x16r_8way_context_overlay x16r_8way_context_overlay;
|
||||
extern __thread x16r_8way_context_overlay x16r_ctx;
|
||||
|
||||
void x16r_8way_prehash( void *, void * );
|
||||
void x16r_8way_hash_generic( void *, const void * );
|
||||
void x16r_8way_hash( void *, const void * );
|
||||
int x16r_8way_hash_generic( void *, const void *, int );
|
||||
int x16r_8way_hash( void *, const void *, int );
|
||||
int scanhash_x16r_8way( struct work *, uint32_t ,
|
||||
uint64_t *, struct thr_info * );
|
||||
extern __thread x16r_8way_context_overlay x16r_ctx;
|
||||
@@ -166,8 +166,8 @@ typedef union _x16r_4way_context_overlay x16r_4way_context_overlay;
|
||||
extern __thread x16r_4way_context_overlay x16r_ctx;
|
||||
|
||||
void x16r_4way_prehash( void *, void * );
|
||||
void x16r_4way_hash_generic( void *, const void * );
|
||||
void x16r_4way_hash( void *, const void * );
|
||||
int x16r_4way_hash_generic( void *, const void *, int );
|
||||
int x16r_4way_hash( void *, const void *, int );
|
||||
int scanhash_x16r_4way( struct work *, uint32_t,
|
||||
uint64_t *, struct thr_info * );
|
||||
extern __thread x16r_4way_context_overlay x16r_ctx;
|
||||
@@ -205,26 +205,26 @@ typedef union _x16r_context_overlay x16r_context_overlay;
|
||||
extern __thread x16r_context_overlay x16_ctx;
|
||||
|
||||
void x16r_prehash( void *, void * );
|
||||
void x16r_hash_generic( void *, const void * );
|
||||
void x16r_hash( void *, const void * );
|
||||
int x16r_hash_generic( void *, const void *, int );
|
||||
int x16r_hash( void *, const void *, int );
|
||||
int scanhash_x16r( struct work *, uint32_t, uint64_t *, struct thr_info * );
|
||||
|
||||
// x16Rv2
|
||||
#if defined(X16RV2_8WAY)
|
||||
|
||||
void x16rv2_8way_hash( void *state, const void *input );
|
||||
int x16rv2_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#elif defined(X16RV2_4WAY)
|
||||
|
||||
void x16rv2_4way_hash( void *state, const void *input );
|
||||
int x16rv2_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void x16rv2_hash( void *state, const void *input );
|
||||
int x16rv2_hash( void *state, const void *input, int thr_id );
|
||||
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
@@ -254,21 +254,21 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
|
||||
// x21s
|
||||
#if defined(X16R_8WAY)
|
||||
|
||||
void x21s_8way_hash( void *state, const void *input );
|
||||
int x21s_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
bool x21s_8way_thread_init();
|
||||
|
||||
#elif defined(X16R_4WAY)
|
||||
|
||||
void x21s_4way_hash( void *state, const void *input );
|
||||
int x21s_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
bool x21s_4way_thread_init();
|
||||
|
||||
#else
|
||||
|
||||
void x21s_hash( void *state, const void *input );
|
||||
int x21s_hash( void *state, const void *input, int thr_id );
|
||||
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
bool x21s_thread_init();
|
||||
|
@@ -48,7 +48,7 @@ void x16r_prehash( void *edata, void *pdata )
|
||||
}
|
||||
}
|
||||
|
||||
void x16r_hash_generic( void* output, const void* input )
|
||||
int x16r_hash_generic( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x16r_context_overlay ctx;
|
||||
@@ -178,18 +178,24 @@ void x16r_hash_generic( void* output, const void* input )
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
in = (void*) hash;
|
||||
size = 64;
|
||||
}
|
||||
memcpy( output, hash, 64 );
|
||||
return true;
|
||||
}
|
||||
|
||||
void x16r_hash( void* output, const void* input )
|
||||
int x16r_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint8_t hash[64] __attribute__ ((aligned (64)));
|
||||
x16r_hash_generic( hash, input );
|
||||
if ( !x16r_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
memcpy( output, hash, 32 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16r( struct work *work, uint32_t max_nonce,
|
||||
@@ -223,8 +229,7 @@ int scanhash_x16r( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x16r_hash( hash32, edata );
|
||||
|
||||
if ( x16r_hash( hash32, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -41,8 +41,7 @@ int scanhash_x16rt_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_8way_hash( hash, vdata );
|
||||
|
||||
if ( x16r_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
@@ -95,7 +94,7 @@ int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16r_4way_hash( hash, vdata );
|
||||
if ( x16r_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -36,8 +36,7 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x16r_hash( hash32, edata );
|
||||
|
||||
if ( x16r_hash( hash32, edata, thr_id ) )
|
||||
if ( valid_hash( hash32, ptarget ) && !bench )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -65,7 +65,7 @@ union _x16rv2_8way_context_overlay
|
||||
typedef union _x16rv2_8way_context_overlay x16rv2_8way_context_overlay;
|
||||
static __thread x16rv2_8way_context_overlay x16rv2_ctx;
|
||||
|
||||
void x16rv2_8way_hash( void* output, const void* input )
|
||||
int x16rv2_8way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[24*8] __attribute__ ((aligned (128)));
|
||||
uint32_t hash0[24] __attribute__ ((aligned (64)));
|
||||
@@ -563,6 +563,9 @@ void x16rv2_8way_hash( void* output, const void* input )
|
||||
hash7, vhash );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
|
||||
@@ -574,6 +577,7 @@ void x16rv2_8way_hash( void* output, const void* input )
|
||||
memcpy( output+160, hash5, 32 );
|
||||
memcpy( output+192, hash6, 32 );
|
||||
memcpy( output+224, hash7, 32 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -669,8 +673,7 @@ int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x16rv2_8way_hash( hash, vdata );
|
||||
|
||||
if ( x16rv2_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
@@ -718,7 +721,7 @@ inline void padtiger512( uint32_t* hash )
|
||||
for ( int i = 6; i < 16; i++ ) hash[i] = 0;
|
||||
}
|
||||
|
||||
void x16rv2_4way_hash( void* output, const void* input )
|
||||
int x16rv2_4way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||
uint32_t hash1[20] __attribute__ ((aligned (64)));
|
||||
@@ -1023,12 +1026,16 @@ void x16rv2_4way_hash( void* output, const void* input )
|
||||
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
size = 64;
|
||||
}
|
||||
memcpy( output, hash0, 32 );
|
||||
memcpy( output+32, hash1, 32 );
|
||||
memcpy( output+64, hash2, 32 );
|
||||
memcpy( output+96, hash3, 32 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||
@@ -1119,7 +1126,7 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
|
||||
|
||||
do
|
||||
{
|
||||
x16rv2_4way_hash( hash, vdata );
|
||||
if ( x16rv2_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -67,7 +67,7 @@ inline void padtiger512(uint32_t* hash) {
|
||||
for (int i = (24/4); i < (64/4); i++) hash[i] = 0;
|
||||
}
|
||||
|
||||
void x16rv2_hash( void* output, const void* input )
|
||||
int x16rv2_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x16rv2_context_overlay ctx;
|
||||
@@ -180,10 +180,14 @@ void x16rv2_hash( void* output, const void* input )
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
break;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
in = (void*) hash;
|
||||
size = 64;
|
||||
}
|
||||
memcpy(output, hash, 32);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||
@@ -221,8 +225,7 @@ int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x16rv2_hash( hash32, edata );
|
||||
|
||||
if ( x16rv2_hash( hash32, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -30,7 +30,7 @@ union _x21s_8way_context_overlay
|
||||
|
||||
typedef union _x21s_8way_context_overlay x21s_8way_context_overlay;
|
||||
|
||||
void x21s_8way_hash( void* output, const void* input )
|
||||
int x21s_8way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[16*8] __attribute__ ((aligned (128)));
|
||||
uint8_t shash[64*8] __attribute__ ((aligned (64)));
|
||||
@@ -44,7 +44,8 @@ void x21s_8way_hash( void* output, const void* input )
|
||||
uint32_t *hash7 = (uint32_t*)( shash+448 );
|
||||
x21s_8way_context_overlay ctx;
|
||||
|
||||
x16r_8way_hash_generic( shash, input );
|
||||
if ( !x16r_8way_hash_generic( shash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
|
||||
hash7 );
|
||||
@@ -124,6 +125,8 @@ void x21s_8way_hash( void* output, const void* input )
|
||||
sha256_8way_init( &ctx.sha256 );
|
||||
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
||||
sha256_8way_close( &ctx.sha256, output );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -166,8 +169,7 @@ int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x21s_8way_hash( hash, vdata );
|
||||
|
||||
if ( x21s_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( hash7[lane] <= Htarg ) )
|
||||
{
|
||||
@@ -215,7 +217,7 @@ union _x21s_4way_context_overlay
|
||||
|
||||
typedef union _x21s_4way_context_overlay x21s_4way_context_overlay;
|
||||
|
||||
void x21s_4way_hash( void* output, const void* input )
|
||||
int x21s_4way_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t vhash[16*4] __attribute__ ((aligned (64)));
|
||||
uint8_t shash[64*4] __attribute__ ((aligned (64)));
|
||||
@@ -225,8 +227,9 @@ void x21s_4way_hash( void* output, const void* input )
|
||||
uint32_t *hash2 = (uint32_t*)( shash+128 );
|
||||
uint32_t *hash3 = (uint32_t*)( shash+192 );
|
||||
|
||||
x16r_4way_hash_generic( shash, input );
|
||||
|
||||
if ( !x16r_4way_hash_generic( shash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
haval256_5_4way_init( &ctx.haval );
|
||||
@@ -299,6 +302,8 @@ void x21s_4way_hash( void* output, const void* input )
|
||||
dintrlv_4x32( output, output+32, output+64,output+96, vhash, 256 );
|
||||
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||
@@ -337,7 +342,7 @@ int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x21s_4way_hash( hash, vdata );
|
||||
if ( x21s_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
|
@@ -27,12 +27,13 @@ union _x21s_context_overlay
|
||||
};
|
||||
typedef union _x21s_context_overlay x21s_context_overlay;
|
||||
|
||||
void x21s_hash( void* output, const void* input )
|
||||
int x21s_hash( void* output, const void* input, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[16];
|
||||
x21s_context_overlay ctx;
|
||||
|
||||
x16r_hash_generic( hash, input );
|
||||
if ( !x16r_hash_generic( hash, input, thrid ) )
|
||||
return 0;
|
||||
|
||||
sph_haval256_5_init( &ctx.haval );
|
||||
sph_haval256_5( &ctx.haval, (const void*) hash, 64) ;
|
||||
@@ -54,6 +55,8 @@ void x21s_hash( void* output, const void* input )
|
||||
SHA256_Final( (unsigned char*)hash, &ctx.sha256 );
|
||||
|
||||
memcpy( output, hash, 32 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||
@@ -87,8 +90,7 @@ int scanhash_x21s( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = nonce;
|
||||
x21s_hash( hash32, edata );
|
||||
|
||||
if ( x21s_hash( hash32, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( nonce );
|
||||
|
@@ -58,7 +58,7 @@ union _sonoa_8way_context_overlay
|
||||
|
||||
typedef union _sonoa_8way_context_overlay sonoa_8way_context_overlay;
|
||||
|
||||
void sonoa_8way_hash( void *state, const void *input )
|
||||
int sonoa_8way_hash( void *state, const void *input, int thrid )
|
||||
{
|
||||
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
||||
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
|
||||
@@ -186,6 +186,7 @@ void sonoa_8way_hash( void *state, const void *input )
|
||||
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 2
|
||||
|
||||
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
|
||||
@@ -301,6 +302,7 @@ void sonoa_8way_hash( void *state, const void *input )
|
||||
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_8way_close( &ctx.hamsi, vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 3
|
||||
|
||||
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
|
||||
@@ -430,6 +432,7 @@ void sonoa_8way_hash( void *state, const void *input )
|
||||
sph_fugue512_full( &ctx.fugue, hash6, hash6, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash7, hash7, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 4
|
||||
|
||||
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
|
||||
@@ -627,6 +630,7 @@ void sonoa_8way_hash( void *state, const void *input )
|
||||
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 5
|
||||
|
||||
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
|
||||
@@ -779,6 +783,7 @@ void sonoa_8way_hash( void *state, const void *input )
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash6, hash6, 64 );
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash7, hash7, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 6
|
||||
|
||||
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
|
||||
@@ -947,6 +952,7 @@ void sonoa_8way_hash( void *state, const void *input )
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash6, hash6, 64 );
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash7, hash7, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 7
|
||||
|
||||
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
|
||||
@@ -1108,6 +1114,8 @@ void sonoa_8way_hash( void *state, const void *input )
|
||||
haval256_5_8way_init( &ctx.haval );
|
||||
haval256_5_8way_update( &ctx.haval, vhashA, 64 );
|
||||
haval256_5_8way_close( &ctx.haval, state );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_sonoa_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -1133,8 +1141,7 @@ int scanhash_sonoa_8way( struct work *work, uint32_t max_nonce,
|
||||
|
||||
do
|
||||
{
|
||||
sonoa_8way_hash( hash, vdata );
|
||||
|
||||
if ( sonoa_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if unlikely( ( hashd7[ lane ] <= targ32 ) )
|
||||
{
|
||||
@@ -1142,7 +1149,7 @@ int scanhash_sonoa_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -1179,7 +1186,7 @@ union _sonoa_4way_context_overlay
|
||||
|
||||
typedef union _sonoa_4way_context_overlay sonoa_4way_context_overlay;
|
||||
|
||||
void sonoa_4way_hash( void *state, const void *input )
|
||||
int sonoa_4way_hash( void *state, const void *input, int thrid )
|
||||
{
|
||||
uint64_t hash0[8] __attribute__ ((aligned (64)));
|
||||
uint64_t hash1[8] __attribute__ ((aligned (64)));
|
||||
@@ -1243,6 +1250,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
||||
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
|
||||
(const BitSequence *)hash3, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 2
|
||||
|
||||
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
||||
@@ -1302,6 +1310,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
||||
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 3
|
||||
|
||||
bmw512_4way_init( &ctx.bmw );
|
||||
@@ -1366,6 +1375,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
||||
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 4
|
||||
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
||||
|
||||
@@ -1462,6 +1472,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
||||
shavite512_2way_init( &ctx.shavite );
|
||||
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 5
|
||||
rintrlv_2x128_4x64( vhash, vhashA, vhashB, 512 );
|
||||
|
||||
@@ -1546,6 +1557,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 6
|
||||
|
||||
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
||||
@@ -1638,6 +1650,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
// 7
|
||||
|
||||
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
||||
@@ -1728,6 +1741,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
||||
haval256_5_4way_init( &ctx.haval );
|
||||
haval256_5_4way_update( &ctx.haval, vhashB, 64 );
|
||||
haval256_5_4way_close( &ctx.haval, state );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_sonoa_4way( struct work *work, const uint32_t max_nonce,
|
||||
@@ -1752,8 +1767,7 @@ int scanhash_sonoa_4way( struct work *work, const uint32_t max_nonce,
|
||||
|
||||
do
|
||||
{
|
||||
sonoa_4way_hash( hash, vdata );
|
||||
|
||||
if ( sonoa_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hashd7[ lane ] <= targ32 ) )
|
||||
{
|
||||
@@ -1761,7 +1775,7 @@ int scanhash_sonoa_4way( struct work *work, const uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -4,14 +4,14 @@ bool register_sonoa_algo( algo_gate_t* gate )
|
||||
{
|
||||
#if defined (SONOA_8WAY)
|
||||
gate->scanhash = (void*)&scanhash_sonoa_8way;
|
||||
gate->hash = (void*)&sonoa_8way_hash;
|
||||
// gate->hash = (void*)&sonoa_8way_hash;
|
||||
#elif defined (SONOA_4WAY)
|
||||
gate->scanhash = (void*)&scanhash_sonoa_4way;
|
||||
gate->hash = (void*)&sonoa_4way_hash;
|
||||
// gate->hash = (void*)&sonoa_4way_hash;
|
||||
#else
|
||||
init_sonoa_ctx();
|
||||
gate->scanhash = (void*)&scanhash_sonoa;
|
||||
gate->hash = (void*)&sonoa_hash;
|
||||
// gate->hash = (void*)&sonoa_hash;
|
||||
#endif
|
||||
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;
|
||||
return true;
|
||||
|
@@ -14,19 +14,19 @@ bool register_sonoa_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(SONOA_8WAY)
|
||||
|
||||
void sonoa_8way_hash( void *state, const void *input );
|
||||
int sonoa_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_sonoa_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#elif defined(SONOA_4WAY)
|
||||
|
||||
void sonoa_4way_hash( void *state, const void *input );
|
||||
int sonoa_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_sonoa_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void sonoa_hash( void *state, const void *input );
|
||||
int sonoa_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_sonoa( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void init_sonoa_ctx();
|
||||
|
573
algo/x17/sonoa.c
573
algo/x17/sonoa.c
@@ -83,27 +83,27 @@ void init_sonoa_ctx()
|
||||
sph_haval256_5_init(&sonoa_ctx.haval);
|
||||
};
|
||||
|
||||
void sonoa_hash( void *state, const void *input )
|
||||
int sonoa_hash( void *state, const void *input, int thrid )
|
||||
{
|
||||
uint8_t hash[128] __attribute__ ((aligned (64)));
|
||||
sonoa_ctx_holder ctx __attribute__ ((aligned (64)));
|
||||
memcpy( &ctx, &sonoa_ctx, sizeof(sonoa_ctx) );
|
||||
sonoa_ctx_holder ctx __attribute__ ((aligned (64)));
|
||||
memcpy( &ctx, &sonoa_ctx, sizeof(sonoa_ctx) );
|
||||
|
||||
sph_blake512(&ctx.blake, input, 80);
|
||||
sph_blake512(&ctx.blake, input, 80);
|
||||
sph_blake512_close(&ctx.blake, hash);
|
||||
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
#else
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
@@ -112,454 +112,461 @@ void sonoa_hash( void *state, const void *input )
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
|
||||
#if defined(__AES__)
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
//
|
||||
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
#else
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
//
|
||||
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
#else
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*)hash,
|
||||
(const byte*)hash, 64 );
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
//
|
||||
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
#else
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
//
|
||||
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
#else
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
//
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
#else
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
|
||||
SHA512_Update( &ctx.sha512, hash, 64 );
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
SHA512_Update( &ctx.sha512, hash, 64 );
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
//
|
||||
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
sph_bmw512_init( &ctx.bmw);
|
||||
sph_bmw512(&ctx.bmw, hash, 64);
|
||||
sph_bmw512_close(&ctx.bmw, hash);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash,
|
||||
(const char*)hash, 512 );
|
||||
#else
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512_init(&ctx.groestl );
|
||||
sph_groestl512(&ctx.groestl, hash, 64);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
sph_skein512_init( &ctx.skein);
|
||||
sph_skein512(&ctx.skein, hash, 64);
|
||||
sph_skein512_close(&ctx.skein, hash);
|
||||
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512(&ctx.jh, hash, 64);
|
||||
sph_jh512_close(&ctx.jh, hash);
|
||||
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
sph_keccak512_init( &ctx.keccak );
|
||||
sph_keccak512(&ctx.keccak, hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
cubehashInit( &ctx.cubehash, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
|
||||
(const byte*)hash, 64 );
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512(&ctx.shavite, hash, 64);
|
||||
sph_shavite512_close(&ctx.shavite, hash);
|
||||
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo ( &ctx.echo, (BitSequence *)hash,
|
||||
(const BitSequence *)hash, 512 );
|
||||
#else
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
sph_echo512_init( &ctx.echo );
|
||||
sph_echo512(&ctx.echo, hash, 64);
|
||||
sph_echo512_close(&ctx.echo, hash);
|
||||
#endif
|
||||
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
sph_hamsi512_init( &ctx.hamsi );
|
||||
sph_hamsi512(&ctx.hamsi, hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512(&ctx.fugue, hash, 64);
|
||||
sph_fugue512_close(&ctx.fugue, hash);
|
||||
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
sph_shabal512_init( &ctx.shabal );
|
||||
sph_shabal512(&ctx.shabal, hash, 64);
|
||||
sph_shabal512_close(&ctx.shabal, hash);
|
||||
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool(&ctx.whirlpool, hash, 64);
|
||||
sph_whirlpool_close(&ctx.whirlpool, hash);
|
||||
|
||||
SHA512_Init( &ctx.sha512 );
|
||||
SHA512_Update( &ctx.sha512, hash, 64 );
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
SHA512_Init( &ctx.sha512 );
|
||||
SHA512_Update( &ctx.sha512, hash, 64 );
|
||||
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
|
||||
|
||||
sph_haval256_5(&ctx.haval,(const void*) hash, 64);
|
||||
sph_haval256_5_close(&ctx.haval, hash);
|
||||
sph_haval256_5(&ctx.haval,(const void*) hash, 64);
|
||||
sph_haval256_5_close(&ctx.haval, hash);
|
||||
|
||||
memcpy(state, hash, 32);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_sonoa( struct work *work, uint32_t max_nonce,
|
||||
@@ -579,7 +586,7 @@ int scanhash_sonoa( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
sonoa_hash( hash64, edata );
|
||||
if ( sonoa_hash( hash64, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
|
@@ -62,7 +62,7 @@ union _x22i_8way_ctx_overlay
|
||||
};
|
||||
typedef union _x22i_8way_ctx_overlay x22i_8way_ctx_overlay;
|
||||
|
||||
void x22i_8way_hash( void *output, const void *input )
|
||||
int x22i_8way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
||||
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
|
||||
@@ -129,6 +129,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_8way_close( &ctx.keccak, vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
luffa512_4way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
||||
@@ -214,6 +216,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
hamsi512_8way_init( &ctx.hamsi );
|
||||
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_8way_close( &ctx.hamsi, vhash );
|
||||
@@ -346,6 +350,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
sph_tiger (&ctx.tiger, (const void*) hash7, 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) hashA7);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
memset( hash0, 0, 64 );
|
||||
memset( hash1, 0, 64 );
|
||||
memset( hash2, 0, 64 );
|
||||
@@ -399,6 +405,8 @@ void x22i_8way_hash( void *output, const void *input )
|
||||
sha256_8way_init( &ctx.sha256 );
|
||||
sha256_8way_update( &ctx.sha256, vhash, 64 );
|
||||
sha256_8way_close( &ctx.sha256, output );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -428,8 +436,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x22i_8way_hash( hash, vdata );
|
||||
|
||||
if ( x22i_8way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
||||
{
|
||||
@@ -437,7 +444,7 @@ int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -524,7 +531,7 @@ union _x22i_4way_ctx_overlay
|
||||
};
|
||||
typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay;
|
||||
|
||||
void x22i_4way_hash( void *output, const void *input )
|
||||
int x22i_4way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t hash0[8*4] __attribute__ ((aligned (64)));
|
||||
uint64_t hash1[8*4] __attribute__ ((aligned (64)));
|
||||
@@ -563,6 +570,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
luffa512_2way_full( &ctx.luffa, vhashA, vhashA, 64 );
|
||||
@@ -591,6 +600,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
|
||||
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
hamsi512_4way_init( &ctx.hamsi );
|
||||
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||
@@ -636,6 +647,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
sha512_4way_close( &ctx.sha512, vhash );
|
||||
dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0);
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1);
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2);
|
||||
@@ -668,6 +681,8 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
sph_tiger (&ctx.tiger, (const void*) hash3, 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) hashA3);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
memset( hash0, 0, 64 );
|
||||
memset( hash1, 0, 64 );
|
||||
memset( hash2, 0, 64 );
|
||||
@@ -700,8 +715,9 @@ void x22i_4way_hash( void *output, const void *input )
|
||||
sha256_4way_init( &ctx.sha256 );
|
||||
sha256_4way_update( &ctx.sha256, vhash, 64 );
|
||||
sha256_4way_close( &ctx.sha256, output );
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
@@ -729,8 +745,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x22i_4way_hash( hash, vdata );
|
||||
|
||||
if ( x22i_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
||||
{
|
||||
@@ -738,7 +753,7 @@ int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -16,19 +16,19 @@ bool register_x22i_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(X22I_8WAY)
|
||||
|
||||
void x22i_8way_hash( void *state, const void *input );
|
||||
int x22i_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#elif defined(X22I_4WAY)
|
||||
|
||||
void x22i_4way_hash( void *state, const void *input );
|
||||
int x22i_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x22i_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void x22i_hash( void *state, const void *input );
|
||||
int x22i_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
@@ -44,19 +44,19 @@ bool register_x25i_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(X25X_8WAY)
|
||||
|
||||
void x25x_8way_hash( void *state, const void *input );
|
||||
int x25x_8way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#elif defined(X25X_4WAY)
|
||||
|
||||
void x25x_4way_hash( void *state, const void *input );
|
||||
int x25x_4way_hash( void *state, const void *input, int thrid );
|
||||
int scanhash_x25x_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void x25x_hash( void *state, const void *input );
|
||||
int x25x_hash( void *state, const void *input, int thrif );
|
||||
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
|
@@ -59,7 +59,7 @@ union _x22i_context_overlay
|
||||
};
|
||||
typedef union _x22i_context_overlay x22i_context_overlay;
|
||||
|
||||
void x22i_hash( void *output, const void *input )
|
||||
int x22i_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
|
||||
unsigned char hash2[65] __attribute__((aligned(64))) = {0};
|
||||
@@ -95,6 +95,8 @@ void x22i_hash( void *output, const void *input )
|
||||
sph_keccak512(&ctx.keccak, (const void*) hash, 64);
|
||||
sph_keccak512_close(&ctx.keccak, hash);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
|
||||
(const BitSequence*)hash, 64 );
|
||||
@@ -121,6 +123,8 @@ void x22i_hash( void *output, const void *input )
|
||||
sph_echo512_close( &ctx.echo, hash );
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_hamsi512_init(&ctx.hamsi);
|
||||
sph_hamsi512(&ctx.hamsi, (const void*) hash, 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, hash);
|
||||
@@ -143,6 +147,8 @@ void x22i_hash( void *output, const void *input )
|
||||
|
||||
ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
memset(hash, 0, 64);
|
||||
sph_haval256_5_init(&ctx.haval);
|
||||
sph_haval256_5(&ctx.haval,(const void*) hash2, 64);
|
||||
@@ -165,6 +171,8 @@ void x22i_hash( void *output, const void *input )
|
||||
SHA256_Final( (unsigned char*) hash, &ctx.sha256 );
|
||||
|
||||
memcpy(output, hash, 32);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||
@@ -188,7 +196,7 @@ int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
x22i_hash( hash64, edata );
|
||||
if ( x22i_hash( hash64, edata, thr_id ) );
|
||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
|
@@ -94,7 +94,7 @@ union _x25x_8way_ctx_overlay
|
||||
};
|
||||
typedef union _x25x_8way_ctx_overlay x25x_8way_ctx_overlay;
|
||||
|
||||
void x25x_8way_hash( void *output, const void *input )
|
||||
int x25x_8way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
|
||||
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
||||
@@ -179,13 +179,15 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
jh512_8way_close( &ctx.jh, vhash );
|
||||
dintrlv_8x64_512( hash0[4], hash1[4], hash2[4], hash3[4],
|
||||
hash4[4], hash5[4], hash6[4], hash7[4], vhash );
|
||||
|
||||
|
||||
keccak512_8way_init( &ctx.keccak );
|
||||
keccak512_8way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_8way_close( &ctx.keccak, vhash );
|
||||
dintrlv_8x64_512( hash0[5], hash1[5], hash2[5], hash3[5],
|
||||
hash4[5], hash5[5], hash6[5], hash7[5], vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
luffa_4way_init( &ctx.luffa, 512 );
|
||||
@@ -261,6 +263,7 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
intrlv_8x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10],
|
||||
hash4[10], hash5[10], hash6[10], hash7[10] );
|
||||
|
||||
|
||||
#else
|
||||
|
||||
init_echo( &ctx.echo, 512 );
|
||||
@@ -292,6 +295,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
|
||||
#endif
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
hamsi512_8way_init( &ctx.hamsi );
|
||||
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_8way_close( &ctx.hamsi, vhash );
|
||||
@@ -407,6 +412,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
sph_tiger (&ctx.tiger, (const void*) hash7[17], 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) hash7[18]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
intrlv_2x256( vhash, hash0[18], hash1[18], 256 );
|
||||
LYRA2X_2WAY( vhash, 32, vhash, 32, 1, 4, 4 );
|
||||
dintrlv_2x256( hash0[19], hash1[19], vhash, 256 );
|
||||
@@ -468,6 +475,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
laneHash(512, (const BitSequence*)hash6[22], 512, (BitSequence*)hash6[23]);
|
||||
laneHash(512, (const BitSequence*)hash7[22], 512, (BitSequence*)hash7[23]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
x25x_shuffle( hash0 );
|
||||
x25x_shuffle( hash1 );
|
||||
x25x_shuffle( hash2 );
|
||||
@@ -528,6 +537,8 @@ void x25x_8way_hash( void *output, const void *input )
|
||||
|
||||
blake2s_8way_init( &ctx.blake2s, 32 );
|
||||
blake2s_8way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -557,7 +568,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x25x_8way_hash( hash, vdata );
|
||||
if ( x25x_8way_hash( hash, vdata, thr_id ) );
|
||||
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
|
||||
@@ -566,7 +577,7 @@ int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -654,7 +665,7 @@ union _x25x_4way_ctx_overlay
|
||||
};
|
||||
typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay;
|
||||
|
||||
void x25x_4way_hash( void *output, const void *input )
|
||||
int x25x_4way_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
|
||||
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
|
||||
@@ -686,6 +697,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
jh512_4way_close( &ctx.jh, vhash );
|
||||
dintrlv_4x64_512( hash0[4], hash1[4], hash2[4], hash3[4], vhash );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
keccak512_4way_init( &ctx.keccak );
|
||||
keccak512_4way_update( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhash );
|
||||
@@ -738,6 +751,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
|
||||
intrlv_4x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10] );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
hamsi512_4way_init( &ctx.hamsi );
|
||||
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||
@@ -819,6 +834,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
LYRA2RE( (void*)hash3[19], 32, (const void*)hash3[18], 32,
|
||||
(const void*)hash3[18], 32, 1, 4, 4 );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_gost512_init(&ctx.gost);
|
||||
sph_gost512 (&ctx.gost, (const void*) hash0[19], 64);
|
||||
sph_gost512_close(&ctx.gost, (void*) hash0[20]);
|
||||
@@ -850,6 +867,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
laneHash(512, (const BitSequence*)hash2[22], 512, (BitSequence*)hash2[23]);
|
||||
laneHash(512, (const BitSequence*)hash3[22], 512, (BitSequence*)hash3[23]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
x25x_shuffle( hash0 );
|
||||
x25x_shuffle( hash1 );
|
||||
x25x_shuffle( hash2 );
|
||||
@@ -882,6 +901,8 @@ void x25x_4way_hash( void *output, const void *input )
|
||||
|
||||
blake2s_4way_init( &ctx.blake2s, 32 );
|
||||
blake2s_4way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
||||
@@ -910,8 +931,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
x25x_4way_hash( hash, vdata );
|
||||
|
||||
if ( x25x_4way_hash( hash, vdata, thr_id ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
|
||||
{
|
||||
@@ -919,7 +939,7 @@ int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -64,7 +64,7 @@ union _x25x_context_overlay
|
||||
};
|
||||
typedef union _x25x_context_overlay x25x_context_overlay;
|
||||
|
||||
void x25x_hash( void *output, const void *input )
|
||||
int x25x_hash( void *output, const void *input, int thrid )
|
||||
{
|
||||
unsigned char hash[25][64] __attribute__((aligned(64))) = {0};
|
||||
x25x_context_overlay ctx;
|
||||
@@ -99,6 +99,8 @@ void x25x_hash( void *output, const void *input )
|
||||
sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64);
|
||||
sph_keccak512_close(&ctx.keccak, &hash[5]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6],
|
||||
(const BitSequence*)&hash[5], 64 );
|
||||
@@ -125,7 +127,9 @@ void x25x_hash( void *output, const void *input )
|
||||
sph_echo512_close( &ctx.echo, &hash[10] );
|
||||
#endif
|
||||
|
||||
sph_hamsi512_init(&ctx.hamsi);
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_hamsi512_init(&ctx.hamsi);
|
||||
sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64);
|
||||
sph_hamsi512_close(&ctx.hamsi, &hash[11]);
|
||||
|
||||
@@ -151,6 +155,8 @@ void x25x_hash( void *output, const void *input )
|
||||
sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64);
|
||||
sph_haval256_5_close(&ctx.haval,&hash[17]);
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
sph_tiger_init(&ctx.tiger);
|
||||
sph_tiger (&ctx.tiger, (const void*) &hash[17], 64);
|
||||
sph_tiger_close(&ctx.tiger, (void*) &hash[18]);
|
||||
@@ -199,6 +205,8 @@ void x25x_hash( void *output, const void *input )
|
||||
blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 );
|
||||
|
||||
memcpy(output, &hash[24], 32);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||
@@ -222,7 +230,7 @@ int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
x25x_hash( hash64, edata );
|
||||
if ( x25x_hash( hash64, edata, thr_id ) );
|
||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#undef HUGEPAGE_SIZE
|
||||
#endif
|
||||
|
||||
/*
|
||||
static __inline uint32_t
|
||||
le32dec(const void *pp)
|
||||
{
|
||||
@@ -50,6 +51,7 @@ le32enc(void *pp, uint32_t x)
|
||||
p[2] = (x >> 16) & 0xff;
|
||||
p[3] = (x >> 24) & 0xff;
|
||||
}
|
||||
*/
|
||||
|
||||
static void *
|
||||
alloc_region(yescrypt_region_t * region, size_t size)
|
||||
@@ -154,7 +156,7 @@ int yescrypt_init_shared(yescrypt_shared_t * shared, const uint8_t * param, size
|
||||
if (yescrypt_kdf(&dummy, shared1,
|
||||
param, paramlen, NULL, 0, N, r, p, 0,
|
||||
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_1,
|
||||
salt, sizeof(salt)))
|
||||
salt, sizeof(salt), 0 ) )
|
||||
goto out;
|
||||
|
||||
half1 = half2 = *shared;
|
||||
@@ -166,19 +168,19 @@ int yescrypt_init_shared(yescrypt_shared_t * shared, const uint8_t * param, size
|
||||
if (p > 1 && yescrypt_kdf(&half1, &half2.shared1,
|
||||
param, paramlen, salt, sizeof(salt), N, r, p, 0,
|
||||
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_2,
|
||||
salt, sizeof(salt)))
|
||||
salt, sizeof(salt), 0 ))
|
||||
goto out;
|
||||
|
||||
if (yescrypt_kdf(&half2, &half1.shared1,
|
||||
param, paramlen, salt, sizeof(salt), N, r, p, 0,
|
||||
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_1,
|
||||
salt, sizeof(salt)))
|
||||
salt, sizeof(salt), 0))
|
||||
goto out;
|
||||
|
||||
if (yescrypt_kdf(&half1, &half2.shared1,
|
||||
param, paramlen, salt, sizeof(salt), N, r, p, 0,
|
||||
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_1,
|
||||
buf, buflen))
|
||||
buf, buflen, 0))
|
||||
goto out;
|
||||
|
||||
shared->mask1 = mask;
|
||||
|
@@ -1149,7 +1149,7 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
|
||||
const uint8_t * passwd, size_t passwdlen,
|
||||
const uint8_t * salt, size_t saltlen,
|
||||
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
|
||||
uint8_t * buf, size_t buflen)
|
||||
uint8_t * buf, size_t buflen, int thrid )
|
||||
{
|
||||
uint8_t _ALIGN(128) sha256[32];
|
||||
yescrypt_region_t tmp;
|
||||
@@ -1157,6 +1157,7 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
|
||||
size_t B_size, V_size, XY_size, need;
|
||||
uint8_t * B, * S;
|
||||
salsa20_blk_t * V, * XY;
|
||||
int retval = 1;
|
||||
|
||||
/*
|
||||
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
|
||||
@@ -1312,6 +1313,12 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
|
||||
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
|
||||
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
|
||||
|
||||
if ( work_restart[thrid].restart )
|
||||
{
|
||||
retval = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (t || flags)
|
||||
memcpy(sha256, B, sizeof(sha256));
|
||||
|
||||
@@ -1339,9 +1346,21 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
|
||||
}
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart )
|
||||
{
|
||||
retval = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
|
||||
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
|
||||
|
||||
if ( work_restart[thrid].restart )
|
||||
{
|
||||
retval = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Except when computing classic scrypt, allow all computation so far
|
||||
* to be performed on the client. The final steps below match those of
|
||||
@@ -1370,9 +1389,10 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (free_region(&tmp))
|
||||
return -1;
|
||||
|
||||
/* Success! */
|
||||
return 0;
|
||||
return retval;
|
||||
}
|
||||
|
@@ -106,7 +106,8 @@ static const uint8_t* decode64_uint32(uint32_t* dst, uint32_t dstbits, const uin
|
||||
}
|
||||
|
||||
uint8_t* yescrypt_r(const yescrypt_shared_t* shared, yescrypt_local_t* local,
|
||||
const uint8_t* passwd, size_t passwdlen, const uint8_t* setting, uint8_t* buf, size_t buflen)
|
||||
const uint8_t* passwd, size_t passwdlen, const uint8_t* setting,
|
||||
uint8_t* buf, size_t buflen, int thrid )
|
||||
{
|
||||
uint8_t hash[HASH_SIZE];
|
||||
const uint8_t * src, * salt;
|
||||
@@ -210,7 +211,9 @@ uint8_t* yescrypt_r(const yescrypt_shared_t* shared, yescrypt_local_t* local,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, N, r, p, 0, flags, hash, sizeof(hash))) {
|
||||
if ( yescrypt_kdf( shared, local, passwd, passwdlen, salt, saltlen, N, r, p,
|
||||
0, flags, hash, sizeof(hash), thrid ) == -1 )
|
||||
{
|
||||
printf("died10 ...");
|
||||
fflush(stdout);
|
||||
return NULL;
|
||||
@@ -237,7 +240,7 @@ uint8_t* yescrypt_r(const yescrypt_shared_t* shared, yescrypt_local_t* local,
|
||||
return buf;
|
||||
}
|
||||
|
||||
uint8_t* yescrypt(const uint8_t* passwd, const uint8_t* setting)
|
||||
uint8_t* yescrypt(const uint8_t* passwd, const uint8_t* setting, int thrid )
|
||||
{
|
||||
static uint8_t buf[4 + 1 + 5 + 5 + BYTES2CHARS(32) + 1 + HASH_LEN + 1];
|
||||
yescrypt_shared_t shared;
|
||||
@@ -252,7 +255,7 @@ uint8_t* yescrypt(const uint8_t* passwd, const uint8_t* setting)
|
||||
return NULL;
|
||||
}
|
||||
retval = yescrypt_r(&shared, &local,
|
||||
passwd, 80, setting, buf, sizeof(buf));
|
||||
passwd, 80, setting, buf, sizeof(buf), thrid );
|
||||
//printf("hashse='%s'\n", (char *)retval);
|
||||
if (yescrypt_free_local(&local)) {
|
||||
yescrypt_free_shared(&shared);
|
||||
@@ -329,7 +332,7 @@ uint8_t* yescrypt_gensalt(uint32_t N_log2, uint32_t r, uint32_t p, yescrypt_flag
|
||||
|
||||
static int yescrypt_bsty(const uint8_t * passwd, size_t passwdlen,
|
||||
const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p,
|
||||
uint8_t * buf, size_t buflen)
|
||||
uint8_t * buf, size_t buflen, int thrid )
|
||||
{
|
||||
static __thread int initialized = 0;
|
||||
static __thread yescrypt_shared_t shared;
|
||||
@@ -349,7 +352,7 @@ static int yescrypt_bsty(const uint8_t * passwd, size_t passwdlen,
|
||||
}
|
||||
retval = yescrypt_kdf(&shared, &local,
|
||||
passwd, passwdlen, salt, saltlen, N, r, p, 0, YESCRYPT_FLAGS,
|
||||
buf, buflen);
|
||||
buf, buflen, thrid );
|
||||
#if 0
|
||||
if (yescrypt_free_local(&local)) {
|
||||
yescrypt_free_shared(&shared);
|
||||
@@ -370,16 +373,16 @@ char *yescrypt_client_key = NULL;
|
||||
int yescrypt_client_key_len = 0;
|
||||
|
||||
/* main hash 80 bytes input */
|
||||
void yescrypt_hash( const char *input, char *output, uint32_t len )
|
||||
int yescrypt_hash( const char *input, char *output, uint32_t len, int thrid )
|
||||
{
|
||||
yescrypt_bsty( (uint8_t*)input, len, (uint8_t*)input, len, YESCRYPT_N,
|
||||
YESCRYPT_R, YESCRYPT_P, (uint8_t*)output, 32 );
|
||||
return yescrypt_bsty( (uint8_t*)input, len, (uint8_t*)input, len, YESCRYPT_N,
|
||||
YESCRYPT_R, YESCRYPT_P, (uint8_t*)output, 32, thrid );
|
||||
}
|
||||
|
||||
/* for util.c test */
|
||||
void yescrypthash(void *output, const void *input)
|
||||
int yescrypthash(void *output, const void *input, int thrid)
|
||||
{
|
||||
yescrypt_hash((char*) input, (char*) output, 80);
|
||||
return yescrypt_hash((char*) input, (char*) output, 80, thrid);
|
||||
}
|
||||
|
||||
int scanhash_yescrypt( struct work *work, uint32_t max_nonce,
|
||||
@@ -392,13 +395,13 @@ int scanhash_yescrypt( struct work *work, uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce;
|
||||
uint32_t n = first_nonce;
|
||||
int thr_id = mythr->id; // thr_id arg is deprecated
|
||||
int thr_id = mythr->id;
|
||||
|
||||
for ( int k = 0; k < 19; k++ )
|
||||
be32enc( &endiandata[k], pdata[k] );
|
||||
endiandata[19] = n;
|
||||
do {
|
||||
yescrypt_hash((char*) endiandata, (char*) vhash, 80);
|
||||
if ( yescrypt_hash((char*) endiandata, (char*) vhash, 80, thr_id ) )
|
||||
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
be32enc( pdata+19, n );
|
||||
|
@@ -38,12 +38,13 @@ extern "C" {
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h> /* for size_t */
|
||||
#include <stdbool.h>
|
||||
#include "miner.h"
|
||||
|
||||
//#define __SSE4_1__
|
||||
|
||||
void yescrypt_hash(const char* input, char* output, uint32_t len);
|
||||
int yescrypt_hash(const char* input, char* output, uint32_t len, int thrid );
|
||||
|
||||
void yescrypthash(void *output, const void *input);
|
||||
int yescrypthash(void *output, const void *input, int thrid );
|
||||
|
||||
/**
|
||||
* crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
|
||||
@@ -301,7 +302,7 @@ extern int yescrypt_kdf(const yescrypt_shared_t * __shared,
|
||||
const uint8_t * __salt, size_t __saltlen,
|
||||
uint64_t __N, uint32_t __r, uint32_t __p, uint32_t __t,
|
||||
yescrypt_flags_t __flags,
|
||||
uint8_t * __buf, size_t __buflen);
|
||||
uint8_t * __buf, size_t __buflen, int thrid);
|
||||
|
||||
/**
|
||||
* yescrypt_r(shared, local, passwd, passwdlen, setting, buf, buflen):
|
||||
@@ -321,7 +322,7 @@ extern uint8_t * yescrypt_r(const yescrypt_shared_t * __shared,
|
||||
yescrypt_local_t * __local,
|
||||
const uint8_t * __passwd, size_t __passwdlen,
|
||||
const uint8_t * __setting,
|
||||
uint8_t * __buf, size_t __buflen);
|
||||
uint8_t * __buf, size_t __buflen, int thrid);
|
||||
|
||||
/**
|
||||
* yescrypt(passwd, setting):
|
||||
@@ -339,7 +340,7 @@ extern uint8_t * yescrypt_r(const yescrypt_shared_t * __shared,
|
||||
*
|
||||
* MT-unsafe.
|
||||
*/
|
||||
extern uint8_t * yescrypt(const uint8_t * __passwd, const uint8_t * __setting);
|
||||
extern uint8_t * yescrypt(const uint8_t * __passwd, const uint8_t * __setting, int thrid );
|
||||
|
||||
/**
|
||||
* yescrypt_gensalt_r(N_log2, r, p, flags, src, srclen, buf, buflen):
|
||||
|
@@ -79,7 +79,7 @@ int main(int argc, const char * const *argv)
|
||||
for (i = 0; i < sizeof(src); i++)
|
||||
src.u8[i] = i * 3;
|
||||
|
||||
if (yespower_tls(src.u8, sizeof(src), ¶ms, &dst)) {
|
||||
if (!yespower_tls(src.u8, sizeof(src), ¶ms, &dst)) {
|
||||
puts("FAILED");
|
||||
return 1;
|
||||
}
|
||||
|
@@ -51,9 +51,13 @@ int scanhash_yespower_r8g( struct work *work, uint32_t max_nonce,
|
||||
be32enc( &endiandata[ i], pdata[ i ]);
|
||||
endiandata[19] = n;
|
||||
|
||||
// do sha256 prehash
|
||||
SHA256_Init( &sha256_prehash_ctx );
|
||||
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
|
||||
|
||||
do {
|
||||
yespower_tls( (unsigned char *)endiandata, params.perslen,
|
||||
¶ms, (yespower_binary_t*)hash );
|
||||
¶ms, (yespower_binary_t*)hash, thr_id );
|
||||
|
||||
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
|
692
algo/yespower/yespower-4way.c
Normal file
692
algo/yespower/yespower-4way.c
Normal file
@@ -0,0 +1,692 @@
|
||||
/*-
|
||||
* Copyright 2009 Colin Percival
|
||||
* Copyright 2013-2018 Alexander Peslyak
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* This file was originally written by Colin Percival as part of the Tarsnap
|
||||
* online backup system.
|
||||
*
|
||||
* This is a proof-of-work focused fork of yescrypt, including reference and
|
||||
* cut-down implementation of the obsolete yescrypt 0.5 (based off its first
|
||||
* submission to PHC back in 2014) and a new proof-of-work specific variation
|
||||
* known as yespower 1.0. The former is intended as an upgrade for
|
||||
* cryptocurrencies that already use yescrypt 0.5 and the latter may be used
|
||||
* as a further upgrade (hard fork) by those and other cryptocurrencies. The
|
||||
* version of algorithm to use is requested through parameters, allowing for
|
||||
* both algorithms to co-exist in client and miner implementations (such as in
|
||||
* preparation for a hard-fork).
|
||||
*
|
||||
* This is the reference implementation. Its purpose is to provide a simple
|
||||
* human- and machine-readable specification that implementations intended
|
||||
* for actual use should be tested against. It is deliberately mostly not
|
||||
* optimized, and it is not meant to be used in production. Instead, use
|
||||
* yespower-opt.c.
|
||||
*/
|
||||
/*
|
||||
#warning "This reference implementation is deliberately mostly not optimized. Use yespower-opt.c instead unless you're testing (against) the reference implementation on purpose."
|
||||
*/
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "algo/sha/hmac-sha256-hash-4way.h"
|
||||
//#include "sysendian.h"
|
||||
|
||||
#include "yespower.h"
|
||||
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
|
||||
static void blkcpy_8way( __m256i *dst, const __m256i *src, size_t count )
|
||||
{
|
||||
do {
|
||||
*dst++ = *src++;
|
||||
} while (--count);
|
||||
}
|
||||
|
||||
static void blkxor_8way( __m256i *dst, const __m256i *src, size_t count )
|
||||
{
|
||||
do {
|
||||
*dst++ ^= *src++;
|
||||
} while (--count);
|
||||
}
|
||||
|
||||
/**
|
||||
* salsa20(B):
|
||||
* Apply the Salsa20 core to the provided block.
|
||||
*/
|
||||
static void salsa20_8way( __m256i B[16], uint32_t rounds )
|
||||
{
|
||||
__m256i x[16];
|
||||
size_t i;
|
||||
|
||||
/* SIMD unshuffle */
|
||||
for ( i = 0; i < 16; i++ )
|
||||
x[i * 5 % 16] = B[i];
|
||||
|
||||
for ( i = 0; i < rounds; i += 2 )
|
||||
{
|
||||
#define R( a, b, c ) mm256_rol_32( _mm256_add_epi32( a, b ), c )
|
||||
/* Operate on columns */
|
||||
|
||||
x[ 4] = _mm256_xor_si256( x[ 4], R( x[ 0], x[12], 7 ) );
|
||||
x[ 8] = _mm256_xor_si256( x[ 8], R( x[ 4], x[ 0], 9 ) );
|
||||
x[12] = _mm256_xor_si256( x[12], R( x[ 8], x[ 4], 13 ) );
|
||||
x[ 0] = _mm256_xor_si256( x[ 0], R( x[12], x[ 8], 18 ) );
|
||||
|
||||
x[ 9] = _mm256_xor_si256( x[ 9], R( x[ 5], x[ 1], 7 ) );
|
||||
x[13] = _mm256_xor_si256( x[13], R( x[ 9], x[ 5], 9 ) );
|
||||
x[ 1] = _mm256_xor_si256( x[ 1], R( x[13], x[ 9], 13 ) );
|
||||
x[ 5] = _mm256_xor_si256( x[ 5], R( x[ 1], x[13], 18 ) );
|
||||
|
||||
x[14] = _mm256_xor_si256( x[14], R( x[10], x[ 6], 7 ) );
|
||||
x[ 2] = _mm256_xor_si256( x[ 2], R( x[14], x[10], 9 ) );
|
||||
x[ 6] = _mm256_xor_si256( x[ 6], R( x[ 2], x[14], 13 ) );
|
||||
x[10] = _mm256_xor_si256( x[10], R( x[ 6], x[ 2], 18 ) );
|
||||
|
||||
x[ 3] = _mm256_xor_si256( x[ 3], R( x[15], x[11], 7 ) );
|
||||
x[ 7] = _mm256_xor_si256( x[ 7], R( x[ 3], x[15], 9 ) );
|
||||
x[11] = _mm256_xor_si256( x[11], R( x[ 7], x[ 3], 13 ) );
|
||||
x[15] = _mm256_xor_si256( x[15], R( x[11], x[ 7], 18 ) );
|
||||
|
||||
/* Operate on rows */
|
||||
|
||||
x[ 1] = _mm256_xor_si256( x[ 1], R( x[ 0], x[ 3], 7 ) );
|
||||
x[ 2] = _mm256_xor_si256( x[ 2], R( x[ 1], x[ 0], 9 ) );
|
||||
x[ 3] = _mm256_xor_si256( x[ 3], R( x[ 2], x[ 1], 13 ) );
|
||||
x[ 0] = _mm256_xor_si256( x[ 0], R( x[ 3], x[ 2], 18 ) );
|
||||
|
||||
x[ 6] = _mm256_xor_si256( x[ 6], R( x[ 5], x[ 4], 7 ) );
|
||||
x[ 7] = _mm256_xor_si256( x[ 7], R( x[ 6], x[ 5], 9 ) );
|
||||
x[ 4] = _mm256_xor_si256( x[ 4], R( x[ 7], x[ 6], 13 ) );
|
||||
x[ 5] = _mm256_xor_si256( x[ 5], R( x[ 4], x[ 7], 18 ) );
|
||||
|
||||
x[11] = _mm256_xor_si256( x[11], R( x[10], x[ 9], 7 ) );
|
||||
x[ 8] = _mm256_xor_si256( x[ 8], R( x[11], x[10], 9 ) );
|
||||
x[ 9] = _mm256_xor_si256( x[ 9], R( x[ 8], x[11], 13 ) );
|
||||
x[10] = _mm256_xor_si256( x[10], R( x[ 9], x[ 8], 18 ) );
|
||||
|
||||
x[12] = _mm256_xor_si256( x[12], R( x[15], x[14], 7 ) );
|
||||
x[13] = _mm256_xor_si256( x[13], R( x[12], x[15], 9 ) );
|
||||
x[14] = _mm256_xor_si256( x[14], R( x[13], x[12], 13 ) );
|
||||
x[15] = _mm256_xor_si256( x[15], R( x[14], x[13], 18 ) );
|
||||
|
||||
#undef R
|
||||
}
|
||||
|
||||
/* SIMD shuffle */
|
||||
for (i = 0; i < 16; i++)
|
||||
B[i] = _mm256_add_epi32( B[i], x[i * 5 % 16] );
|
||||
}
|
||||
|
||||
/**
|
||||
* blockmix_salsa(B):
|
||||
* Compute B = BlockMix_{salsa20, 1}(B). The input B must be 128 bytes in
|
||||
* length.
|
||||
*/
|
||||
static void blockmix_salsa_8way( __m256i *B, uint32_t rounds )
|
||||
{
|
||||
__m256i X[16];
|
||||
size_t i;
|
||||
|
||||
/* 1: X <-- B_{2r - 1} */
|
||||
blkcpy_8way( X, &B[16], 16 );
|
||||
|
||||
/* 2: for i = 0 to 2r - 1 do */
|
||||
for ( i = 0; i < 2; i++ )
|
||||
{
|
||||
/* 3: X <-- H(X xor B_i) */
|
||||
blkxor_8way( X, &B[i * 16], 16 );
|
||||
salsa20_8way( X, rounds );
|
||||
|
||||
/* 4: Y_i <-- X */
|
||||
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
|
||||
blkcpy_8way( &B[i * 16], X, 16 );
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* These are tunable, but they must meet certain constraints and are part of
|
||||
* what defines a yespower version.
|
||||
*/
|
||||
#define PWXsimple 2
|
||||
#define PWXgather 4
|
||||
/* Version 0.5 */
|
||||
#define PWXrounds_0_5 6
|
||||
#define Swidth_0_5 8
|
||||
/* Version 1.0 */
|
||||
#define PWXrounds_1_0 3
|
||||
#define Swidth_1_0 11
|
||||
|
||||
/* Derived values. Not tunable on their own. */
|
||||
#define PWXbytes (PWXgather * PWXsimple * 8)
|
||||
#define PWXwords (PWXbytes / sizeof(uint32_t))
|
||||
#define rmin ((PWXbytes + 127) / 128)
|
||||
|
||||
/* Runtime derived values. Not tunable on their own. */
|
||||
#define Swidth_to_Sbytes1(Swidth) ((1 << Swidth) * PWXsimple * 8)
|
||||
#define Swidth_to_Smask(Swidth) (((1 << Swidth) - 1) * PWXsimple * 8)
|
||||
|
||||
typedef struct {
|
||||
__m256i (*S0)[2], (*S1)[2], (*S2)[2];
|
||||
__m256i *S;
|
||||
yespower_version_t version;
|
||||
uint32_t salsa20_rounds;
|
||||
uint32_t PWXrounds, Swidth, Sbytes, Smask;
|
||||
size_t w;
|
||||
} pwxform_8way_ctx_t __attribute__ ((aligned (128)));
|
||||
|
||||
/**
|
||||
* pwxform(B):
|
||||
* Transform the provided block using the provided S-boxes.
|
||||
*/
|
||||
static void pwxform_8way( __m256i *B, pwxform_8way_ctx_t *ctx )
|
||||
{
|
||||
__m256i (*X)[PWXsimple][2] = (__m256i (*)[PWXsimple][2])B;
|
||||
__m256i (*S0)[2] = ctx->S0, (*S1)[2] = ctx->S1, (*S2)[2] = ctx->S2;
|
||||
__m256i Smask = _mm256_set1_epi32( ctx->Smask );
|
||||
size_t w = ctx->w;
|
||||
size_t i, j, k;
|
||||
|
||||
/* 1: for i = 0 to PWXrounds - 1 do */
|
||||
for ( i = 0; i < ctx->PWXrounds; i++ )
|
||||
{
|
||||
/* 2: for j = 0 to PWXgather - 1 do */
|
||||
for ( j = 0; j < PWXgather; j++ )
|
||||
{
|
||||
// Are these pointers or data?
|
||||
__m256i xl = X[j][0][0];
|
||||
__m256i xh = X[j][0][1];
|
||||
__m256i (*p0)[2], (*p1)[2];
|
||||
|
||||
// 3: p0 <-- (lo(B_{j,0}) & Smask) / (PWXsimple * 8)
|
||||
|
||||
// playing with pointers
|
||||
/*
|
||||
p0 = S0 + (xl & Smask) / sizeof(*S0);
|
||||
// 4: p1 <-- (hi(B_{j,0}) & Smask) / (PWXsimple * 8)
|
||||
p1 = S1 + (xh & Smask) / sizeof(*S1);
|
||||
*/
|
||||
/* 5: for k = 0 to PWXsimple - 1 do */
|
||||
for ( k = 0; k < PWXsimple; k++ )
|
||||
{
|
||||
|
||||
// shift from 32 bit data to 64 bit data
|
||||
__m256i x0, x1, s00, s01, s10, s11;
|
||||
__m128i *p0k = (__m128i*)p0[k];
|
||||
__m128i *p1k = (__m128i*)p1[k];
|
||||
|
||||
|
||||
s00 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p0k[0] ),
|
||||
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p0k[2] ), 32 ) );
|
||||
s01 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p0k[1] ),
|
||||
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p0k[3] ), 32 ) );
|
||||
s10 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p1k[0] ),
|
||||
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p1k[2] ), 32 ) );
|
||||
s11 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p1k[1] ),
|
||||
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p1k[3] ), 32 ) );
|
||||
|
||||
__m128i *xx = (__m128i*)X[j][k];
|
||||
x0 = _mm256_mul_epu32( _mm256_cvtepu32_epi64( xx[0] ),
|
||||
_mm256_cvtepu32_epi64( xx[2] ) );
|
||||
x1 = _mm256_mul_epu32( _mm256_cvtepu32_epi64( xx[1] ),
|
||||
_mm256_cvtepu32_epi64( xx[3] ) );
|
||||
|
||||
x0 = _mm256_add_epi64( x0, s00 );
|
||||
x1 = _mm256_add_epi64( x1, s01 );
|
||||
|
||||
x0 = _mm256_xor_si256( x0, s10 );
|
||||
x1 = _mm256_xor_si256( x1, s11 );
|
||||
|
||||
X[j][k][0] = x0;
|
||||
X[j][k][1] = x1;
|
||||
}
|
||||
|
||||
if ( ctx->version != YESPOWER_0_5 &&
|
||||
( i == 0 || j < PWXgather / 2 ) )
|
||||
{
|
||||
if ( j & 1 )
|
||||
{
|
||||
for ( k = 0; k < PWXsimple; k++ )
|
||||
{
|
||||
S1[w][0] = X[j][k][0];
|
||||
S1[w][1] = X[j][k][1];
|
||||
w++;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for ( k = 0; k < PWXsimple; k++ )
|
||||
{
|
||||
S0[w + k][0] = X[j][k][0];
|
||||
S0[w + k][1] = X[j][k][1];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ( ctx->version != YESPOWER_0_5 )
|
||||
{
|
||||
/* 14: (S0, S1, S2) <-- (S2, S0, S1) */
|
||||
ctx->S0 = S2;
|
||||
ctx->S1 = S0;
|
||||
ctx->S2 = S1;
|
||||
/* 15: w <-- w mod 2^Swidth */
|
||||
ctx->w = w & ( ( 1 << ctx->Swidth ) * PWXsimple - 1 );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* blockmix_pwxform(B, ctx, r):
|
||||
* Compute B = BlockMix_pwxform{salsa20, ctx, r}(B). The input B must be
|
||||
* 128r bytes in length.
|
||||
*/
|
||||
static void blockmix_pwxform_8way( uint32_t *B, pwxform_8way_ctx_t *ctx,
|
||||
size_t r )
|
||||
{
|
||||
__m256i X[PWXwords];
|
||||
size_t r1, i;
|
||||
|
||||
/* Convert 128-byte blocks to PWXbytes blocks */
|
||||
/* 1: r_1 <-- 128r / PWXbytes */
|
||||
r1 = 128 * r / PWXbytes;
|
||||
|
||||
/* 2: X <-- B'_{r_1 - 1} */
|
||||
blkcpy_8way( X, &B[ (r1 - 1) * PWXwords ], PWXwords );
|
||||
|
||||
/* 3: for i = 0 to r_1 - 1 do */
|
||||
for ( i = 0; i < r1; i++ )
|
||||
{
|
||||
/* 4: if r_1 > 1 */
|
||||
if ( r1 > 1 )
|
||||
{
|
||||
/* 5: X <-- X xor B'_i */
|
||||
blkxor_8way( X, &B[ i * PWXwords ], PWXwords );
|
||||
}
|
||||
|
||||
/* 7: X <-- pwxform(X) */
|
||||
pwxform_8way( X, ctx );
|
||||
|
||||
/* 8: B'_i <-- X */
|
||||
blkcpy_8way( &B[ i * PWXwords ], X, PWXwords );
|
||||
}
|
||||
|
||||
/* 10: i <-- floor((r_1 - 1) * PWXbytes / 64) */
|
||||
i = ( r1 - 1 ) * PWXbytes / 64;
|
||||
|
||||
/* 11: B_i <-- H(B_i) */
|
||||
salsa20_8way( &B[i * 16], ctx->salsa20_rounds );
|
||||
|
||||
#if 1 /* No-op with our current pwxform settings, but do it to make sure */
|
||||
/* 12: for i = i + 1 to 2r - 1 do */
|
||||
for ( i++; i < 2 * r; i++ )
|
||||
{
|
||||
/* 13: B_i <-- H(B_i xor B_{i-1}) */
|
||||
blkxor_8way( &B[i * 16], &B[ (i - 1) * 16 ], 16 );
|
||||
salsa20_8way( &B[i * 16], ctx->salsa20_rounds );
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// This looks a lot like data dependent addressing
|
||||
|
||||
/**
|
||||
* integerify(B, r):
|
||||
* Return the result of parsing B_{2r-1} as a little-endian integer.
|
||||
*/
|
||||
static __m256i integerify8( const __m256i *B, size_t r )
|
||||
{
|
||||
/*
|
||||
* Our 32-bit words are in host byte order. Also, they are SIMD-shuffled, but
|
||||
* we only care about the least significant 32 bits anyway.
|
||||
*/
|
||||
const __m256i *X = &B[ (2 * r - 1) * 16 ];
|
||||
return X[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* p2floor(x):
|
||||
* Largest power of 2 not greater than argument.
|
||||
*/
|
||||
static uint32_t p2floor8( uint32_t x )
|
||||
{
|
||||
uint32_t y;
|
||||
while ( ( y = x & (x - 1) ) )
|
||||
x = y;
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* wrap(x, i):
|
||||
* Wrap x to the range 0 to i-1.
|
||||
*/
|
||||
static uint32_t wrap8( uint32_t x, uint32_t i )
|
||||
{
|
||||
uint32_t n = p2floor( i );
|
||||
return ( x & (n - 1) ) + (i - n);
|
||||
}
|
||||
|
||||
/**
|
||||
* smix1(B, r, N, V, X, ctx):
|
||||
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
|
||||
* length; the temporary storage V must be 128rN bytes in length; the temporary
|
||||
* storage X must be 128r bytes in length.
|
||||
*/
|
||||
static void smix1_8way( __m256i *B, size_t r, uint32_t N,
|
||||
__m256i *V, __m256i *X, pwxform_8way_ctx_t *ctx )
|
||||
{
|
||||
size_t s = 32 * r;
|
||||
uint32_t i, j;
|
||||
size_t k;
|
||||
|
||||
/* 1: X <-- B */
|
||||
for ( k = 0; k < 2 * r; k++ )
|
||||
for ( i = 0; i < 16; i++ )
|
||||
X[ k * 16 + i ] = B[ k * 16 + ( i * 5 % 16 ) ];
|
||||
|
||||
if ( ctx->version != YESPOWER_0_5 )
|
||||
{
|
||||
for ( k = 1; k < r; k++ )
|
||||
{
|
||||
blkcpy_8way( &X[k * 32], &X[ (k - 1) * 32 ], 32 );
|
||||
blockmix_pwxform_8way( &X[k * 32], ctx, 1 );
|
||||
}
|
||||
}
|
||||
|
||||
/* 2: for i = 0 to N - 1 do */
|
||||
for ( i = 0; i < N; i++ )
|
||||
{
|
||||
/* 3: V_i <-- X */
|
||||
blkcpy_8way( &V[i * s], X, s );
|
||||
|
||||
if ( i > 1 )
|
||||
{
|
||||
|
||||
// is j int or vector? Integrify has data dependent addressing?
|
||||
|
||||
/* j <-- Wrap(Integerify(X), i) */
|
||||
// j = wrap8( integerify8( X, r ), i );
|
||||
|
||||
/* X <-- X xor V_j */
|
||||
blkxor_8way( X, &V[j * s], s );
|
||||
}
|
||||
|
||||
/* 4: X <-- H(X) */
|
||||
if ( V != ctx->S )
|
||||
blockmix_pwxform_8way( X, ctx, r );
|
||||
else
|
||||
blockmix_salsa_8way( X, ctx->salsa20_rounds );
|
||||
}
|
||||
|
||||
/* B' <-- X */
|
||||
for ( k = 0; k < 2 * r; k++ )
|
||||
for ( i = 0; i < 16; i++ )
|
||||
B[ k * 16 + ( i * 5 % 16 ) ] = X[ k * 16 + i ];
|
||||
}
|
||||
|
||||
/**
|
||||
* smix2(B, r, N, Nloop, V, X, ctx):
|
||||
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
|
||||
* length; the temporary storage V must be 128rN bytes in length; the temporary
|
||||
* storage X must be 128r bytes in length. The value N must be a power of 2
|
||||
* greater than 1.
|
||||
*/
|
||||
static void smix2_8way( __m256i *B, size_t r, uint32_t N, uint32_t Nloop,
|
||||
__m256i *V, __m256i *X, pwxform_8way_ctx_t *ctx )
|
||||
{
|
||||
size_t s = 32 * r;
|
||||
uint32_t i, j;
|
||||
size_t k;
|
||||
|
||||
/* X <-- B */
|
||||
for ( k = 0; k < 2 * r; k++ )
|
||||
for ( i = 0; i < 16; i++ )
|
||||
X[ k * 16 + i ] = B[ k * 16 + ( i * 5 % 16 ) ];
|
||||
|
||||
/* 6: for i = 0 to N - 1 do */
|
||||
for ( i = 0; i < Nloop; i++ )
|
||||
{
|
||||
/* 7: j <-- Integerify(X) mod N */
|
||||
// j = integerify8(X, r) & (N - 1);
|
||||
|
||||
/* 8.1: X <-- X xor V_j */
|
||||
blkxor_8way( X, &V[j * s], s );
|
||||
/* V_j <-- X */
|
||||
if ( Nloop != 2 )
|
||||
blkcpy_8way( &V[j * s], X, s );
|
||||
|
||||
/* 8.2: X <-- H(X) */
|
||||
blockmix_pwxform_8way( X, ctx, r );
|
||||
}
|
||||
|
||||
/* 10: B' <-- X */
|
||||
for ( k = 0; k < 2 * r; k++ )
|
||||
for ( i = 0; i < 16; i++ )
|
||||
B[ k * 16 + ( i * 5 % 16 ) ] = X[ k * 16 + i ];
|
||||
}
|
||||
|
||||
/**
|
||||
* smix(B, r, N, p, t, V, X, ctx):
|
||||
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
|
||||
* temporary storage V must be 128rN bytes in length; the temporary storage
|
||||
* X must be 128r bytes in length. The value N must be a power of 2 and at
|
||||
* least 16.
|
||||
*/
|
||||
static void smix_8way( __m256i *B, size_t r, uint32_t N,
|
||||
__m256i *V, __m256i *X, pwxform_8way_ctx_t *ctx)
|
||||
{
|
||||
uint32_t Nloop_all = (N + 2) / 3; /* 1/3, round up */
|
||||
uint32_t Nloop_rw = Nloop_all;
|
||||
|
||||
Nloop_all++; Nloop_all &= ~(uint32_t)1; /* round up to even */
|
||||
|
||||
if ( ctx->version == YESPOWER_0_5 )
|
||||
Nloop_rw &= ~(uint32_t)1; /* round down to even */
|
||||
else
|
||||
Nloop_rw++; Nloop_rw &= ~(uint32_t)1; /* round up to even */
|
||||
|
||||
smix1_8way( B, 1, ctx->Sbytes / 128, ctx->S, X, ctx );
|
||||
smix1_8way( B, r, N, V, X, ctx );
|
||||
smix2_8way( B, r, N, Nloop_rw /* must be > 2 */, V, X, ctx );
|
||||
smix2_8way( B, r, N, Nloop_all - Nloop_rw /* 0 or 2 */, V, X, ctx );
|
||||
}
|
||||
|
||||
/**
|
||||
* yespower(local, src, srclen, params, dst):
|
||||
* Compute yespower(src[0 .. srclen - 1], N, r), to be checked for "< target".
|
||||
*
|
||||
* Return 0 on success; or -1 on error.
|
||||
*/
|
||||
int yespower_8way( yespower_local_t *local, const __m256i *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_8way_binary_t *dst,
|
||||
int thrid )
|
||||
{
|
||||
yespower_version_t version = params->version;
|
||||
uint32_t N = params->N;
|
||||
uint32_t r = params->r;
|
||||
const uint8_t *pers = params->pers;
|
||||
size_t perslen = params->perslen;
|
||||
int retval = -1;
|
||||
size_t B_size, V_size;
|
||||
uint32_t *B, *V, *X, *S;
|
||||
pwxform_8way_ctx_t ctx;
|
||||
__m256i sha256[8];
|
||||
|
||||
/* Sanity-check parameters */
|
||||
if ( (version != YESPOWER_0_5 && version != YESPOWER_1_0 ) ||
|
||||
N < 1024 || N > 512 * 1024 || r < 8 || r > 32 ||
|
||||
(N & (N - 1)) != 0 || r < rmin ||
|
||||
(!pers && perslen) )
|
||||
{
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Allocate memory */
|
||||
B_size = (size_t)128 * r;
|
||||
V_size = B_size * N;
|
||||
if ((V = malloc(V_size)) == NULL)
|
||||
return -1;
|
||||
if ((B = malloc(B_size)) == NULL)
|
||||
goto free_V;
|
||||
if ((X = malloc(B_size)) == NULL)
|
||||
goto free_B;
|
||||
ctx.version = version;
|
||||
if (version == YESPOWER_0_5) {
|
||||
ctx.salsa20_rounds = 8;
|
||||
ctx.PWXrounds = PWXrounds_0_5;
|
||||
ctx.Swidth = Swidth_0_5;
|
||||
ctx.Sbytes = 2 * Swidth_to_Sbytes1(ctx.Swidth);
|
||||
} else {
|
||||
ctx.salsa20_rounds = 2;
|
||||
ctx.PWXrounds = PWXrounds_1_0;
|
||||
ctx.Swidth = Swidth_1_0;
|
||||
ctx.Sbytes = 3 * Swidth_to_Sbytes1(ctx.Swidth);
|
||||
}
|
||||
if ((S = malloc(ctx.Sbytes)) == NULL)
|
||||
goto free_X;
|
||||
ctx.S = S;
|
||||
ctx.S0 = (__m256i (*)[2])S;
|
||||
ctx.S1 = ctx.S0 + (1 << ctx.Swidth) * PWXsimple;
|
||||
ctx.S2 = ctx.S1 + (1 << ctx.Swidth) * PWXsimple;
|
||||
ctx.Smask = Swidth_to_Smask(ctx.Swidth);
|
||||
ctx.w = 0;
|
||||
|
||||
// do prehash
|
||||
sha256_8way_full( sha256, src, srclen );
|
||||
|
||||
|
||||
// need flexible size, use malloc;
|
||||
__m256i vpers[128];
|
||||
|
||||
if ( version != YESPOWER_0_5 && perslen )
|
||||
for ( int i = 0; i < perslen/4 + 1; i++ )
|
||||
vpers[i] = _mm256_set1_epi32( pers[i] );
|
||||
|
||||
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
|
||||
pbkdf2_sha256_8way( B, B_size, sha256, sizeof(sha256), vpers, perslen, 1 );
|
||||
|
||||
blkcpy_8way( sha256, B, sizeof(sha256) / sizeof(sha256[0] ) );
|
||||
|
||||
/* 3: B_i <-- MF(B_i, N) */
|
||||
smix_8way( B, r, N, V, X, &ctx );
|
||||
|
||||
if ( version == YESPOWER_0_5 )
|
||||
{
|
||||
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
|
||||
pbkdf2_sha256_8way( dst, sizeof(*dst), sha256, sizeof(sha256),
|
||||
B, B_size, 1 );
|
||||
|
||||
if ( pers )
|
||||
{
|
||||
hmac_sha256_8way_full( dst, sizeof(*dst), vpers, perslen, sha256 );
|
||||
sha256_8way_full( dst, sha256, sizeof(sha256) );
|
||||
}
|
||||
}
|
||||
else
|
||||
hmac_sha256_8way_full( dst, B + B_size - 64, 64, sha256, sizeof(sha256) );
|
||||
|
||||
/* Success! */
|
||||
retval = 1;
|
||||
|
||||
/* Free memory */
|
||||
free(S);
|
||||
free_X:
|
||||
free(X);
|
||||
free_B:
|
||||
free(B);
|
||||
free_V:
|
||||
free(V);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
int yespower_8way_tls( const __m256i *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_8way_binary_t *dst, int trhid )
|
||||
{
|
||||
/* The reference implementation doesn't use thread-local storage */
|
||||
return yespower_8way( NULL, src, srclen, params, dst, trhid );
|
||||
}
|
||||
|
||||
int yespower_init_local8( yespower_local_t *local )
|
||||
{
|
||||
/* The reference implementation doesn't use the local structure */
|
||||
local->base = local->aligned = NULL;
|
||||
local->base_size = local->aligned_size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int yespower_free_local8( yespower_local_t *local )
|
||||
{
|
||||
/* The reference implementation frees its memory in yespower() */
|
||||
(void)local; /* unused */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int yespower_8way_hash( const char *input, char *output, uint32_t len,
|
||||
int thrid )
|
||||
{
|
||||
return yespower_8way_tls( input, len, &yespower_params,
|
||||
(yespower_binary_t*)output, thrid );
|
||||
}
|
||||
|
||||
int scanhash_yespower_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t _ALIGN(128) hash[8*8];
|
||||
uint32_t _ALIGN(128) vdata[20*8];
|
||||
uint32_t _ALIGN(128) endiandata[20];
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce;
|
||||
uint32_t n = first_nonce;
|
||||
const int thr_id = mythr->id;
|
||||
|
||||
for ( int k = 0; k < 19; k++ )
|
||||
be32enc( &endiandata[k], pdata[k] );
|
||||
endiandata[19] = n;
|
||||
|
||||
// do sha256 prehash
|
||||
SHA256_Init( &sha256_prehash_ctx );
|
||||
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
|
||||
|
||||
do {
|
||||
if ( yespower_hash( vdata, hash, 80, thr_id ) )
|
||||
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
be32enc( pdata+19, n );
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
endiandata[19] = ++n;
|
||||
} while ( n < last_nonce && !work_restart[thr_id].restart );
|
||||
*hashes_done = n - first_nonce;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // AVX2
|
@@ -194,11 +194,13 @@ static int free_region(yespower_region_t *region)
|
||||
#define restrict
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifdef __GNUC__
|
||||
#define unlikely(exp) __builtin_expect(exp, 0)
|
||||
#else
|
||||
#define unlikely(exp) (exp)
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef __SSE__
|
||||
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
||||
@@ -1113,7 +1115,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
|
||||
int yespower_b2b(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params,
|
||||
yespower_binary_t *dst)
|
||||
yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
uint32_t N = params->N;
|
||||
uint32_t r = params->r;
|
||||
@@ -1168,17 +1170,25 @@ int yespower_b2b(yespower_local_t *local,
|
||||
srclen = 0;
|
||||
}
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
pbkdf2_blake2b_yp(init_hash, sizeof(init_hash), src, srclen, 1, B, 128);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
memcpy(init_hash, B, sizeof(init_hash));
|
||||
smix_1_0(B, r, N, V, XY, &ctx);
|
||||
|
||||
if ( work_restart[thrid].restart ) return false;
|
||||
|
||||
hmac_blake2b_yp_hash((uint8_t *)dst, B + B_size - 64, 64, init_hash, sizeof(init_hash));
|
||||
|
||||
/* Success! */
|
||||
return 0;
|
||||
return 1;
|
||||
|
||||
fail:
|
||||
memset(dst, 0xff, sizeof(*dst));
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1189,7 +1199,7 @@ fail:
|
||||
* Return 0 on success; or -1 on error.
|
||||
*/
|
||||
int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst)
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
static __thread int initialized = 0;
|
||||
static __thread yespower_local_t local;
|
||||
@@ -1199,7 +1209,7 @@ int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
return yespower_b2b(&local, src, srclen, params, dst);
|
||||
return yespower_b2b(&local, src, srclen, params, dst, thrid);
|
||||
}
|
||||
/*
|
||||
int yespower_init_local(yespower_local_t *local)
|
||||
|
@@ -30,13 +30,16 @@
|
||||
|
||||
#include "algo-gate-api.h"
|
||||
|
||||
static yespower_params_t yespower_params;
|
||||
yespower_params_t yespower_params;
|
||||
|
||||
SHA256_CTX sha256_prehash_ctx;
|
||||
|
||||
// YESPOWER
|
||||
|
||||
void yespower_hash( const char *input, char *output, uint32_t len )
|
||||
int yespower_hash( const char *input, char *output, uint32_t len, int thrid )
|
||||
{
|
||||
yespower_tls( input, len, &yespower_params, (yespower_binary_t*)output );
|
||||
return yespower_tls( input, len, &yespower_params,
|
||||
(yespower_binary_t*)output, thrid );
|
||||
}
|
||||
|
||||
int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
||||
@@ -54,8 +57,13 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
||||
for ( int k = 0; k < 19; k++ )
|
||||
be32enc( &endiandata[k], pdata[k] );
|
||||
endiandata[19] = n;
|
||||
|
||||
// do sha256 prehash
|
||||
SHA256_Init( &sha256_prehash_ctx );
|
||||
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
|
||||
|
||||
do {
|
||||
yespower_hash( (char*)endiandata, (char*)vhash, 80 );
|
||||
if ( yespower_hash( (char*)endiandata, (char*)vhash, 80, thr_id ) )
|
||||
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
be32enc( pdata+19, n );
|
||||
@@ -70,9 +78,9 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
|
||||
|
||||
// YESPOWER-B2B
|
||||
|
||||
void yespower_b2b_hash( const char *input, char *output, uint32_t len )
|
||||
int yespower_b2b_hash( const char *input, char *output, uint32_t len, int thrid )
|
||||
{
|
||||
yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output );
|
||||
return yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output, thrid );
|
||||
}
|
||||
|
||||
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
|
||||
@@ -85,13 +93,18 @@ int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
uint32_t n = first_nonce;
|
||||
const uint32_t last_nonce = max_nonce;
|
||||
const int thr_id = mythr->id; // thr_id arg is deprecated
|
||||
const int thr_id = mythr->id;
|
||||
|
||||
for ( int k = 0; k < 19; k++ )
|
||||
be32enc( &endiandata[k], pdata[k] );
|
||||
endiandata[19] = n;
|
||||
|
||||
// do sha256 prehash
|
||||
SHA256_Init( &sha256_prehash_ctx );
|
||||
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
|
||||
|
||||
do {
|
||||
yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80 );
|
||||
if (yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80, thr_id ) )
|
||||
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
be32enc( pdata+19, n );
|
||||
@@ -151,7 +164,7 @@ bool register_yespowerr16_algo( algo_gate_t* gate )
|
||||
return true;
|
||||
};
|
||||
|
||||
/* not used
|
||||
/* not used, doesn't work
|
||||
bool register_yescrypt_05_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
@@ -165,6 +178,40 @@ bool register_yescrypt_05_algo( algo_gate_t* gate )
|
||||
return true;
|
||||
}
|
||||
|
||||
bool register_yescrypt_05_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_yespower;
|
||||
yespower_params.version = YESPOWER_0_5;
|
||||
|
||||
if ( opt_param_n ) yespower_params.N = opt_param_n;
|
||||
else yespower_params.N = 2048;
|
||||
|
||||
if ( opt_param_r ) yespower_params.r = opt_param_r;
|
||||
else yespower_params.r = 8;
|
||||
|
||||
if ( opt_param_key )
|
||||
{
|
||||
yespower_params.pers = opt_param_key;
|
||||
yespower_params.perslen = strlen( opt_param_key );
|
||||
}
|
||||
else
|
||||
{
|
||||
yespower_params.pers = NULL;
|
||||
yespower_params.perslen = 0;
|
||||
}
|
||||
|
||||
// YESCRYPT_P = 1;
|
||||
|
||||
applog( LOG_NOTICE,"Yescrypt parameters: N= %d, R= %d.",
|
||||
yespower_params.N, yespower_params.r );
|
||||
if ( yespower_params.pers )
|
||||
applog( LOG_NOTICE,"Key= \"%s\"\n", yespower_params.pers );
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool register_yescryptr8_05_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
|
@@ -96,6 +96,8 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "algo/sha/hmac-sha256-hash.h"
|
||||
#include "algo/sha/hmac-sha256-hash-4way.h"
|
||||
|
||||
#include "yespower.h"
|
||||
#include "yespower-platform.c"
|
||||
|
||||
@@ -107,11 +109,13 @@
|
||||
#define restrict
|
||||
#endif
|
||||
|
||||
/*
|
||||
#ifdef __GNUC__
|
||||
#define unlikely(exp) __builtin_expect(exp, 0)
|
||||
#else
|
||||
#define unlikely(exp) (exp)
|
||||
#endif
|
||||
*/
|
||||
|
||||
#ifdef __SSE__
|
||||
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
|
||||
@@ -1023,7 +1027,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
|
||||
int yespower(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params,
|
||||
yespower_binary_t *dst)
|
||||
yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
yespower_version_t version = params->version;
|
||||
uint32_t N = params->N;
|
||||
@@ -1036,12 +1040,13 @@ int yespower(yespower_local_t *local,
|
||||
salsa20_blk_t *V, *XY;
|
||||
pwxform_ctx_t ctx;
|
||||
uint8_t sha256[32];
|
||||
SHA256_CTX sha256_ctx;
|
||||
|
||||
/* Sanity-check parameters */
|
||||
if ((version != YESPOWER_0_5 && version != YESPOWER_1_0) ||
|
||||
N < 1024 || N > 512 * 1024 || r < 8 || r > 32 ||
|
||||
(N & (N - 1)) != 0 ||
|
||||
(!pers && perslen)) {
|
||||
if ( (version != YESPOWER_0_5 && version != YESPOWER_1_0)
|
||||
|| N < 1024 || N > 512 * 1024 || r < 8 || r > 32
|
||||
|| (N & (N - 1)) != 0 || ( !pers && perslen ) )
|
||||
{
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
@@ -1049,20 +1054,22 @@ int yespower(yespower_local_t *local,
|
||||
/* Allocate memory */
|
||||
B_size = (size_t)128 * r;
|
||||
V_size = B_size * N;
|
||||
if (version == YESPOWER_0_5) {
|
||||
if ( version == YESPOWER_0_5 )
|
||||
{
|
||||
XY_size = B_size * 2;
|
||||
Swidth = Swidth_0_5;
|
||||
ctx.Sbytes = 2 * Swidth_to_Sbytes1(Swidth);
|
||||
ctx.Sbytes = 2 * Swidth_to_Sbytes1( Swidth );
|
||||
} else {
|
||||
XY_size = B_size + 64;
|
||||
Swidth = Swidth_1_0;
|
||||
ctx.Sbytes = 3 * Swidth_to_Sbytes1(Swidth);
|
||||
ctx.Sbytes = 3 * Swidth_to_Sbytes1( Swidth );
|
||||
}
|
||||
need = B_size + V_size + XY_size + ctx.Sbytes;
|
||||
if (local->aligned_size < need) {
|
||||
if (free_region(local))
|
||||
if ( local->aligned_size < need )
|
||||
{
|
||||
if ( free_region( local ) )
|
||||
return -1;
|
||||
if (!alloc_region(local, need))
|
||||
if ( !alloc_region( local, need ) )
|
||||
return -1;
|
||||
}
|
||||
B = (uint8_t *)local->aligned;
|
||||
@@ -1070,43 +1077,85 @@ int yespower(yespower_local_t *local,
|
||||
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
|
||||
S = (uint8_t *)XY + XY_size;
|
||||
ctx.S0 = S;
|
||||
ctx.S1 = S + Swidth_to_Sbytes1(Swidth);
|
||||
ctx.S1 = S + Swidth_to_Sbytes1( Swidth );
|
||||
|
||||
SHA256_Buf(src, srclen, sha256);
|
||||
|
||||
if (version == YESPOWER_0_5) {
|
||||
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1,
|
||||
B, B_size);
|
||||
memcpy(sha256, B, sizeof(sha256));
|
||||
smix(B, r, N, V, XY, &ctx);
|
||||
PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1,
|
||||
(uint8_t *)dst, sizeof(*dst));
|
||||
// copy prehash, do tail
|
||||
memcpy( &sha256_ctx, &sha256_prehash_ctx, sizeof sha256_ctx );
|
||||
SHA256_Update( &sha256_ctx, src+64, srclen-64 );
|
||||
SHA256_Final( sha256, &sha256_ctx );
|
||||
|
||||
if (pers) {
|
||||
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
|
||||
sha256);
|
||||
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
|
||||
// SHA256_Buf(src, srclen, sha256);
|
||||
|
||||
if ( version == YESPOWER_0_5 )
|
||||
{
|
||||
PBKDF2_SHA256( sha256, sizeof(sha256), src, srclen, 1, B, B_size );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
memcpy( sha256, B, sizeof(sha256) );
|
||||
smix( B, r, N, V, XY, &ctx );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256( sha256, sizeof(sha256), B, B_size, 1, (uint8_t *)dst,
|
||||
sizeof(*dst) );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
if ( pers )
|
||||
{
|
||||
src = pers;
|
||||
srclen = perslen;
|
||||
}
|
||||
else
|
||||
srclen = 0;
|
||||
|
||||
HMAC_SHA256_CTX ctx;
|
||||
HMAC_SHA256_Init( &ctx, dst, sizeof(*dst) );
|
||||
HMAC_SHA256_Update( &ctx, src, srclen );
|
||||
HMAC_SHA256_Final( sha256, &ctx );
|
||||
|
||||
// SHA256_CTX ctx;
|
||||
SHA256_Init( &sha256_ctx );
|
||||
SHA256_Update( &sha256_ctx, sha256, sizeof(sha256) );
|
||||
SHA256_Final( (unsigned char*)dst, &sha256_ctx );
|
||||
|
||||
|
||||
/*
|
||||
if ( pers )
|
||||
{
|
||||
HMAC_SHA256_Buf( dst, sizeof(*dst), pers, perslen, sha256 );
|
||||
SHA256_Buf( sha256, sizeof(sha256), (uint8_t *)dst );
|
||||
}
|
||||
} else {
|
||||
ctx.S2 = S + 2 * Swidth_to_Sbytes1(Swidth);
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx.S2 = S + 2 * Swidth_to_Sbytes1( Swidth );
|
||||
ctx.w = 0;
|
||||
|
||||
if (pers) {
|
||||
if ( pers )
|
||||
{
|
||||
src = pers;
|
||||
srclen = perslen;
|
||||
} else {
|
||||
srclen = 0;
|
||||
}
|
||||
else
|
||||
srclen = 0;
|
||||
|
||||
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1, B, 128);
|
||||
memcpy(sha256, B, sizeof(sha256));
|
||||
smix_1_0(B, r, N, V, XY, &ctx);
|
||||
HMAC_SHA256_Buf(B + B_size - 64, 64,
|
||||
sha256, sizeof(sha256), (uint8_t *)dst);
|
||||
PBKDF2_SHA256( sha256, sizeof(sha256), src, srclen, 1, B, 128 );
|
||||
memcpy( sha256, B, sizeof(sha256) );
|
||||
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
smix_1_0( B, r, N, V, XY, &ctx );
|
||||
|
||||
HMAC_SHA256_Buf( B + B_size - 64, 64, sha256, sizeof(sha256),
|
||||
(uint8_t *)dst );
|
||||
}
|
||||
|
||||
/* Success! */
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1117,7 +1166,7 @@ int yespower(yespower_local_t *local,
|
||||
* Return 0 on success; or -1 on error.
|
||||
*/
|
||||
int yespower_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst)
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
static __thread int initialized = 0;
|
||||
static __thread yespower_local_t local;
|
||||
@@ -1128,7 +1177,7 @@ int yespower_tls(const uint8_t *src, size_t srclen,
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
return yespower(&local, src, srclen, params, dst);
|
||||
return yespower( &local, src, srclen, params, dst, thrid );
|
||||
}
|
||||
|
||||
int yespower_init_local(yespower_local_t *local)
|
||||
|
@@ -453,9 +453,8 @@ static void smix(uint32_t *B, size_t r, uint32_t N,
|
||||
*
|
||||
* Return 0 on success; or -1 on error.
|
||||
*/
|
||||
int yespower(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst)
|
||||
int yespower( yespower_local_t *local, const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
yespower_version_t version = params->version;
|
||||
uint32_t N = params->N;
|
||||
@@ -534,17 +533,16 @@ int yespower(yespower_local_t *local,
|
||||
|
||||
if (pers) {
|
||||
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
|
||||
return true;
|
||||
(uint8_t *)sha256);
|
||||
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
|
||||
}
|
||||
} else {
|
||||
HMAC_SHA256_Buf_P((uint8_t *)B + B_size - 64, 64,
|
||||
HMAC_SHA256_Buf((uint8_t *)B + B_size - 64, 64,
|
||||
sha256, sizeof(sha256), (uint8_t *)dst);
|
||||
}
|
||||
|
||||
/* Success! */
|
||||
retval = 0;
|
||||
retval = 1;
|
||||
|
||||
/* Free memory */
|
||||
free(S);
|
||||
@@ -559,10 +557,10 @@ free_V:
|
||||
}
|
||||
|
||||
int yespower_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst)
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
|
||||
{
|
||||
/* The reference implementation doesn't use thread-local storage */
|
||||
return yespower(NULL, src, srclen, params, dst);
|
||||
return yespower(NULL, src, srclen, params, dst, thrid );
|
||||
}
|
||||
|
||||
int yespower_init_local(yespower_local_t *local)
|
||||
|
@@ -32,6 +32,9 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h> /* for size_t */
|
||||
#include "miner.h"
|
||||
#include "simd-utils.h"
|
||||
#include <openssl/sha.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -73,6 +76,10 @@ typedef struct {
|
||||
unsigned char uc[32];
|
||||
} yespower_binary_t __attribute__ ((aligned (64)));
|
||||
|
||||
yespower_params_t yespower_params;
|
||||
|
||||
SHA256_CTX sha256_prehash_ctx;
|
||||
|
||||
/**
|
||||
* yespower_init_local(local):
|
||||
* Initialize the thread-local (RAM) data structure. Actual memory allocation
|
||||
@@ -109,11 +116,11 @@ extern int yespower_free_local(yespower_local_t *local);
|
||||
*/
|
||||
extern int yespower(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid);
|
||||
|
||||
extern int yespower_b2b(yespower_local_t *local,
|
||||
const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thrid );
|
||||
|
||||
/**
|
||||
* yespower_tls(src, srclen, params, dst):
|
||||
@@ -125,10 +132,28 @@ extern int yespower_b2b(yespower_local_t *local,
|
||||
* MT-safe as long as dst is local to the thread.
|
||||
*/
|
||||
extern int yespower_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
|
||||
|
||||
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_binary_t *dst);
|
||||
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
|
||||
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__m256i uc[8];
|
||||
} yespower_8way_binary_t __attribute__ ((aligned (128)));
|
||||
|
||||
extern int yespower_8way( yespower_local_t *local, const __m256i *src,
|
||||
size_t srclen, const yespower_params_t *params,
|
||||
yespower_8way_binary_t *dst, int thrid );
|
||||
|
||||
|
||||
extern int yespower_8way_tls( const __m256i *src, size_t srclen,
|
||||
const yespower_params_t *params, yespower_8way_binary_t *dst, int thr_id );
|
||||
|
||||
#endif // AVX2
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
20
configure
vendored
20
configure
vendored
@@ -1,6 +1,6 @@
|
||||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.4.4.
|
||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.7.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
@@ -577,8 +577,8 @@ MAKEFLAGS=
|
||||
# Identity of this package.
|
||||
PACKAGE_NAME='cpuminer-opt'
|
||||
PACKAGE_TARNAME='cpuminer-opt'
|
||||
PACKAGE_VERSION='3.12.4.4'
|
||||
PACKAGE_STRING='cpuminer-opt 3.12.4.4'
|
||||
PACKAGE_VERSION='3.12.7'
|
||||
PACKAGE_STRING='cpuminer-opt 3.12.7'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
|
||||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures cpuminer-opt 3.12.4.4 to adapt to many kinds of systems.
|
||||
\`configure' configures cpuminer-opt 3.12.7 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
@@ -1404,7 +1404,7 @@ fi
|
||||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 3.12.4.4:";;
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 3.12.7:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
@@ -1509,7 +1509,7 @@ fi
|
||||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
cpuminer-opt configure 3.12.4.4
|
||||
cpuminer-opt configure 3.12.7
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
|
||||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by cpuminer-opt $as_me 3.12.4.4, which was
|
||||
It was created by cpuminer-opt $as_me 3.12.7, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
@@ -2993,7 +2993,7 @@ fi
|
||||
|
||||
# Define the identity of the package.
|
||||
PACKAGE='cpuminer-opt'
|
||||
VERSION='3.12.4.4'
|
||||
VERSION='3.12.7'
|
||||
|
||||
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by cpuminer-opt $as_me 3.12.4.4, which was
|
||||
This file was extended by cpuminer-opt $as_me 3.12.7, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
@@ -6756,7 +6756,7 @@ _ACEOF
|
||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
cpuminer-opt config.status 3.12.4.4
|
||||
cpuminer-opt config.status 3.12.7
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
AC_INIT([cpuminer-opt], [3.12.4.4])
|
||||
AC_INIT([cpuminer-opt], [3.12.7])
|
||||
|
||||
AC_PREREQ([2.59c])
|
||||
AC_CANONICAL_SYSTEM
|
||||
|
530
cpu-miner.c
530
cpu-miner.c
@@ -102,6 +102,7 @@ static int opt_fail_pause = 10;
|
||||
static int opt_time_limit = 0;
|
||||
int opt_timeout = 300;
|
||||
static int opt_scantime = 5;
|
||||
const int min_scantime = 1;
|
||||
//static const bool opt_time = true;
|
||||
enum algos opt_algo = ALGO_NULL;
|
||||
char* opt_param_key = NULL;
|
||||
@@ -160,7 +161,7 @@ uint32_t rejected_share_count = 0;
|
||||
uint32_t stale_share_count = 0;
|
||||
uint32_t solved_block_count = 0;
|
||||
double *thr_hashrates;
|
||||
double global_hashrate = 0;
|
||||
double global_hashrate = 0.;
|
||||
double stratum_diff = 0.;
|
||||
double net_diff = 0.;
|
||||
double net_hashrate = 0.;
|
||||
@@ -192,8 +193,11 @@ static uint64_t submit_sum = 0;
|
||||
static uint64_t accept_sum = 0;
|
||||
static uint64_t stale_sum = 0;
|
||||
static uint64_t reject_sum = 0;
|
||||
static uint64_t solved_sum = 0;
|
||||
static double norm_diff_sum = 0.;
|
||||
static uint32_t last_block_height = 0;
|
||||
static double highest_share = 0; // all shares include discard and reject
|
||||
static double lowest_share = 9e99; // lowest accepted
|
||||
//static bool new_job = false;
|
||||
static double last_targetdiff = 0.;
|
||||
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
|
||||
@@ -216,6 +220,20 @@ char* lp_id;
|
||||
|
||||
static void workio_cmd_free(struct workio_cmd *wc);
|
||||
|
||||
static void format_affinity_map( char *map_str, uint64_t map )
|
||||
{
|
||||
int n = num_cpus < 64 ? num_cpus : 64;
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < n; i++ )
|
||||
{
|
||||
if ( map & 1 ) map_str[i] = '!';
|
||||
else map_str[i] = '.';
|
||||
map >>= 1;
|
||||
}
|
||||
memset( &map_str[i], 0, 64 - i );
|
||||
}
|
||||
|
||||
#ifdef __linux /* Linux specific policy and affinity management */
|
||||
#include <sched.h>
|
||||
|
||||
@@ -436,9 +454,6 @@ static bool work_decode( const json_t *val, struct work *work )
|
||||
if ( !allow_mininginfo )
|
||||
net_diff = algo_gate.calc_network_diff( work );
|
||||
work->targetdiff = target_to_diff( work->target );
|
||||
// for api stats, on longpoll pools
|
||||
// This needs cleanup, stratum_diff doean't apply solo mining
|
||||
// and targetdiff is redundant, same as net_diff.
|
||||
stratum_diff = last_targetdiff = work->targetdiff;
|
||||
work->sharediff = 0;
|
||||
algo_gate.decode_extra_data( work, &net_blocks );
|
||||
@@ -489,6 +504,10 @@ static bool get_mininginfo( CURL *curl, struct work *work )
|
||||
if ( key && json_is_integer( key ) )
|
||||
net_blocks = json_integer_value( key );
|
||||
|
||||
if ( opt_debug )
|
||||
applog(LOG_INFO,"Mining info: diff %.5g, net_hashrate %f, height %d",
|
||||
net_diff, net_hashrate, net_blocks );
|
||||
|
||||
if ( !work->height )
|
||||
{
|
||||
// complete missing data from getwork
|
||||
@@ -890,16 +909,9 @@ static inline void sprintf_et( char *str, int seconds )
|
||||
sprintf( str, "%um%02us", min, sec );
|
||||
}
|
||||
|
||||
// Bitcoin formula for converting difficulty to an equivalent
|
||||
// number of hashes.
|
||||
//
|
||||
// https://en.bitcoin.it/wiki/Difficulty
|
||||
//
|
||||
// hash = diff * 2**32
|
||||
//
|
||||
// diff_to_hash = 2**32 = 0x100000000 = 4294967296;
|
||||
|
||||
const double diff_to_hash = 4294967296.;
|
||||
const double exp32 = 4294967296.; // 2**32
|
||||
const double exp48 = 4294967296. * 65536.; // 2**48
|
||||
const double exp64 = 4294967296. * 4294967296.; // 2**64
|
||||
|
||||
struct share_stats_t
|
||||
{
|
||||
@@ -943,6 +955,8 @@ void report_summary_log( bool force )
|
||||
uint64_t accepts = accept_sum; accept_sum = 0;
|
||||
uint64_t rejects = reject_sum; reject_sum = 0;
|
||||
uint64_t stales = stale_sum; stale_sum = 0;
|
||||
uint64_t solved = solved_sum; solved_sum = 0;
|
||||
|
||||
memcpy( &start_time, &five_min_start, sizeof start_time );
|
||||
memcpy( &five_min_start, &now, sizeof now );
|
||||
|
||||
@@ -953,12 +967,10 @@ void report_summary_log( bool force )
|
||||
|
||||
double share_time = (double)et.tv_sec + (double)et.tv_usec / 1e6;
|
||||
double ghrate = global_hashrate;
|
||||
|
||||
double shrate = share_time == 0. ? 0. : diff_to_hash * last_targetdiff
|
||||
double shrate = share_time == 0. ? 0. : exp32 * last_targetdiff
|
||||
* (double)(accepts) / share_time;
|
||||
double sess_hrate = uptime.tv_sec == 0. ? 0. : diff_to_hash * norm_diff_sum
|
||||
double sess_hrate = uptime.tv_sec == 0. ? 0. : exp32 * norm_diff_sum
|
||||
/ (double)uptime.tv_sec;
|
||||
|
||||
double submit_rate = share_time == 0. ? 0. : (double)submits*60. / share_time;
|
||||
char shr_units[4] = {0};
|
||||
char ghr_units[4] = {0};
|
||||
@@ -973,8 +985,8 @@ void report_summary_log( bool force )
|
||||
sprintf_et( et_str, et.tv_sec );
|
||||
sprintf_et( upt_str, uptime.tv_sec );
|
||||
|
||||
applog( LOG_NOTICE, "Periodic Report %s %s", et_str, upt_str );
|
||||
applog2( LOG_INFO, "%s: %s", algo_names[ opt_algo ], short_url );
|
||||
applog( LOG_BLUE, "%s: %s", algo_names[ opt_algo ], short_url );
|
||||
applog2( LOG_NOTICE, "Periodic Report %s %s", et_str, upt_str );
|
||||
applog2( LOG_INFO, "Share rate %.2f/min %.2f/min",
|
||||
submit_rate, (double)submitted_share_count*60. /
|
||||
( (double)uptime.tv_sec + (double)uptime.tv_usec / 1e6 ) );
|
||||
@@ -984,12 +996,12 @@ void report_summary_log( bool force )
|
||||
|
||||
if ( accepted_share_count < submitted_share_count )
|
||||
{
|
||||
double lost_ghrate = uptime.tv_sec == 0. ? 0.
|
||||
: diff_to_hash * last_targetdiff
|
||||
double lost_ghrate = uptime.tv_sec == 0 ? 0.
|
||||
: exp32 * last_targetdiff
|
||||
* (double)(submitted_share_count - accepted_share_count )
|
||||
/ (double)uptime.tv_sec;
|
||||
double lost_shrate = share_time == 0. ? 0.
|
||||
: diff_to_hash * last_targetdiff * (double)(submits - accepts )
|
||||
: exp32 * last_targetdiff * (double)(submits - accepts )
|
||||
/ share_time;
|
||||
char lshr_units[4] = {0};
|
||||
char lghr_units[4] = {0};
|
||||
@@ -1010,13 +1022,16 @@ void report_summary_log( bool force )
|
||||
applog2( LOG_INFO,"Rejected %6d %6d",
|
||||
rejects, rejected_share_count );
|
||||
if ( solved_block_count )
|
||||
applog2( LOG_INFO,"Blocks solved %6d",
|
||||
solved_block_count );
|
||||
applog2( LOG_INFO,"Blocks Solved %6d %6d",
|
||||
solved, solved_block_count );
|
||||
applog2( LOG_INFO, "Hi/Lo Share Diff %.5g / %.5g",
|
||||
highest_share, lowest_share );
|
||||
|
||||
}
|
||||
|
||||
bool lowdiff_debug = false;
|
||||
|
||||
static int share_result( int result, struct work *null_work,
|
||||
static int share_result( int result, struct work *work,
|
||||
const char *reason )
|
||||
{
|
||||
double share_time = 0., share_ratio = 0.;
|
||||
@@ -1061,12 +1076,15 @@ static int share_result( int result, struct work *null_work,
|
||||
}
|
||||
|
||||
share_ratio = my_stats.net_diff == 0. ? 0. : my_stats.share_diff /
|
||||
my_stats.net_diff * 100.;
|
||||
|
||||
my_stats.net_diff;
|
||||
// check result
|
||||
if ( likely( result ) )
|
||||
{
|
||||
accepted_share_count++;
|
||||
if ( my_stats.share_diff < lowest_share )
|
||||
lowest_share = my_stats.share_diff;
|
||||
if ( my_stats.share_diff > highest_share )
|
||||
highest_share = my_stats.share_diff;
|
||||
sprintf( sres, "S%d", stale_share_count );
|
||||
sprintf( rres, "R%d", rejected_share_count );
|
||||
if unlikely( ( my_stats.net_diff > 0. )
|
||||
@@ -1087,9 +1105,11 @@ static int share_result( int result, struct work *null_work,
|
||||
{
|
||||
sprintf( ares, "A%d", accepted_share_count );
|
||||
sprintf( bres, "B%d", solved_block_count );
|
||||
if ( reason && strstr( reason, "Invalid job id" ) )
|
||||
stale = work ? work->data[ algo_gate.ntime_index ]
|
||||
!= g_work.data[ algo_gate.ntime_index ] : false;
|
||||
if ( reason ) stale = stale || strstr( reason, "Invalid job id" );
|
||||
if ( stale )
|
||||
{
|
||||
stale = true;
|
||||
stale_share_count++;
|
||||
sprintf( sres, "Stale %d", stale_share_count );
|
||||
sprintf( rres, "R%d", rejected_share_count );
|
||||
@@ -1099,7 +1119,7 @@ static int share_result( int result, struct work *null_work,
|
||||
rejected_share_count++;
|
||||
sprintf( sres, "S%d", stale_share_count );
|
||||
sprintf( rres, "Rejected %d" , rejected_share_count );
|
||||
lowdiff_debug = true;
|
||||
// lowdiff_debug = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1114,6 +1134,7 @@ static int share_result( int result, struct work *null_work,
|
||||
{
|
||||
accept_sum++;
|
||||
norm_diff_sum += my_stats.target_diff;
|
||||
if ( solved ) solved_sum++;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1125,95 +1146,59 @@ static int share_result( int result, struct work *null_work,
|
||||
|
||||
pthread_mutex_unlock( &stats_lock );
|
||||
|
||||
/*
|
||||
if ( likely( result ) )
|
||||
{
|
||||
if ( unlikely( solved ) )
|
||||
{
|
||||
sprintf( bres, "BLOCK SOLVED %d", solved_block_count );
|
||||
sprintf( ares, "A%d", accepted_share_count );
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf( bres, "B%d", solved_block_count );
|
||||
sprintf( ares, "Accepted %d", accepted_share_count );
|
||||
}
|
||||
sprintf( sres, "S%d", stale_share_count );
|
||||
sprintf( rres, "R%d", rejected_share_count );
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf( ares, "A%d", accepted_share_count );
|
||||
sprintf( bres, "B%d", solved_block_count );
|
||||
if ( stale )
|
||||
{
|
||||
sprintf( sres, "Stale %d", stale_share_count );
|
||||
sprintf( rres, "R%d", rejected_share_count );
|
||||
}
|
||||
else
|
||||
{
|
||||
sprintf( sres, "S%d", stale_share_count );
|
||||
sprintf( rres, "Rejected %d" , rejected_share_count );
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
if ( use_colors )
|
||||
{
|
||||
bcol = acol = scol = rcol = CL_WHT;
|
||||
bcol = acol = scol = rcol = CL_N;
|
||||
if ( likely( result ) )
|
||||
{
|
||||
acol = CL_GRN;
|
||||
if ( unlikely( solved ) ) bcol = CL_MAG;
|
||||
acol = CL_WHT CL_GRN;
|
||||
if ( unlikely( solved ) ) bcol = CL_WHT CL_MAG;
|
||||
}
|
||||
else if ( stale ) scol = CL_YL2;
|
||||
else rcol = CL_RED;
|
||||
else if ( stale ) scol = CL_WHT CL_YL2;
|
||||
else rcol = CL_WHT CL_RED;
|
||||
}
|
||||
else
|
||||
bcol = acol = scol = rcol = "\0";
|
||||
|
||||
applog( LOG_NOTICE, "%d %s%s %s%s %s%s %s%s" CL_WHT ", %.3f sec (%dms)",
|
||||
applog( LOG_NOTICE, "%d %s%s %s%s %s%s %s%s" CL_N ", %.3f sec (%dms)",
|
||||
my_stats.share_count, acol, ares, scol, sres, rcol, rres, bcol,
|
||||
bres, share_time, latency );
|
||||
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
if ( have_stratum )
|
||||
applog2( LOG_NOTICE, "Diff %.5g (%.3g%), %sBlock %d, %sJob %s" CL_WHT,
|
||||
applog2( LOG_INFO, "Diff %.5g (%.3g), %sBlock %d" CL_N ", %sJob %s",
|
||||
my_stats.share_diff, share_ratio, bcol, stratum.block_height,
|
||||
scol, my_stats.job_id );
|
||||
else
|
||||
applog2( LOG_NOTICE, "Diff %.5g (%.3g%), %sBlock %d" CL_WHT,
|
||||
my_stats.share_diff, share_ratio, bcol, stratum.block_height );
|
||||
{
|
||||
uint64_t height = work ? work->height : last_block_height;
|
||||
applog2( LOG_INFO, "Diff %.5g (%.3g), %sBlock %d",
|
||||
my_stats.share_diff, share_ratio, bcol, height );
|
||||
}
|
||||
}
|
||||
|
||||
if ( unlikely( reason && !result ) )
|
||||
if ( unlikely( opt_debug || !( opt_quiet || result || stale ) ) )
|
||||
{
|
||||
if ( !( opt_quiet || stale ) )
|
||||
{
|
||||
uint32_t str[8];
|
||||
|
||||
if ( reason )
|
||||
applog( LOG_WARNING, "Reject reason: %s", reason );
|
||||
|
||||
uint32_t str1[8], str2[8];
|
||||
char str3[65];
|
||||
|
||||
// display share hash and target for troubleshooting
|
||||
diff_to_target( str1, my_stats.share_diff );
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
be32enc( str2 + i, str1[7 - i] );
|
||||
bin2hex( str3, (unsigned char*)str2, 12 );
|
||||
applog2( LOG_INFO, "Share diff: %.5g, Hash: %s...",
|
||||
my_stats.share_diff, str3 );
|
||||
|
||||
diff_to_target( str1, my_stats.target_diff );
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
be32enc( str2 + i, str1[7 - i] );
|
||||
bin2hex( str3, (unsigned char*)str2, 12 );
|
||||
applog2( LOG_INFO, "Target diff: %.5g, Targ: %s...",
|
||||
my_stats.target_diff, str3 );
|
||||
|
||||
// display share hash and target for troubleshooting
|
||||
diff_to_target( str, my_stats.share_diff );
|
||||
applog2( LOG_INFO, "Hash: %08x%08x%08x%08x...",
|
||||
str[7], str[6], str[5], str[4] );
|
||||
uint32_t *targ;
|
||||
if ( work )
|
||||
targ = work->target;
|
||||
else
|
||||
{
|
||||
diff_to_target( str, my_stats.target_diff );
|
||||
targ = &str[0];
|
||||
}
|
||||
|
||||
if ( unlikely( opt_reset_on_stale && stale ) )
|
||||
stratum_need_reset = true;
|
||||
applog2( LOG_INFO, "Target: %08x%08x%08x%08x...",
|
||||
targ[7], targ[6], targ[5], targ[4] );
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -1359,28 +1344,6 @@ char* std_malloc_txs_request( struct work *work )
|
||||
|
||||
static bool submit_upstream_work( CURL *curl, struct work *work )
|
||||
{
|
||||
|
||||
/* pass if the previous hash is not the current previous hash */
|
||||
/* Submit anyway, discardring here messes up the stats
|
||||
if ( !submit_old && memcmp( &work->data[1], &g_work.data[1], 32 ) )
|
||||
{
|
||||
applog( LOG_WARNING, "Stale work detected, discarding" );
|
||||
return true;
|
||||
}
|
||||
|
||||
if ( !have_stratum && allow_mininginfo )
|
||||
{
|
||||
struct work mining_info;
|
||||
get_mininginfo( curl, &mining_info );
|
||||
if ( work->height < mining_info.height )
|
||||
{
|
||||
applog( LOG_WARNING, "Block %u was already solved, current block %d",
|
||||
work->height, mining_info.height );
|
||||
return true;
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
if ( have_stratum )
|
||||
{
|
||||
char req[JSON_BUF_LEN];
|
||||
@@ -1501,47 +1464,61 @@ start:
|
||||
else
|
||||
rc = work_decode( json_object_get( val, "result" ), work );
|
||||
|
||||
if ( opt_protocol && rc )
|
||||
if ( rc )
|
||||
{
|
||||
timeval_subtract( &diff, &tv_end, &tv_start );
|
||||
applog( LOG_DEBUG, "got new work in %.2f ms",
|
||||
( 1000.0 * diff.tv_sec ) + ( 0.001 * diff.tv_usec ) );
|
||||
}
|
||||
|
||||
json_decref( val );
|
||||
// store work height in solo
|
||||
get_mininginfo(curl, work);
|
||||
|
||||
applog( LOG_BLUE, "New block %d, diff %.5g", work->height, net_diff );
|
||||
|
||||
if ( !opt_quiet && net_diff && net_hashrate )
|
||||
{
|
||||
double miner_hr = 0.;
|
||||
pthread_mutex_lock( &stats_lock );
|
||||
|
||||
for ( int i = 0; i < opt_n_threads; i++ )
|
||||
miner_hr += thr_hashrates[i];
|
||||
global_hashrate = miner_hr;
|
||||
|
||||
pthread_mutex_unlock( &stats_lock );
|
||||
|
||||
if ( miner_hr )
|
||||
if ( opt_protocol )
|
||||
{
|
||||
double net_hr = net_hashrate;
|
||||
char net_hr_units[4] = {0};
|
||||
char miner_hr_units[4] = {0};
|
||||
char net_ttf[32];
|
||||
char miner_ttf[32];
|
||||
|
||||
sprintf_et( net_ttf, net_diff * diff_to_hash / net_hr );
|
||||
sprintf_et( miner_ttf, net_diff * diff_to_hash / miner_hr );
|
||||
scale_hash_for_display ( &miner_hr, miner_hr_units );
|
||||
scale_hash_for_display ( &net_hr, net_hr_units );
|
||||
applog2(LOG_INFO, "Miner TTF @ %.2f %sh/s %s, net TTF @ %.2f %sh/s %s",
|
||||
miner_hr, miner_hr_units, miner_ttf,
|
||||
net_hr, net_hr_units, net_ttf );
|
||||
timeval_subtract( &diff, &tv_end, &tv_start );
|
||||
applog( LOG_DEBUG, "got new work in %.2f ms",
|
||||
( 1000.0 * diff.tv_sec ) + ( 0.001 * diff.tv_usec ) );
|
||||
}
|
||||
}
|
||||
|
||||
json_decref( val );
|
||||
// store work height in solo
|
||||
get_mininginfo(curl, work);
|
||||
|
||||
if ( work->height > last_block_height )
|
||||
{
|
||||
last_block_height = work->height;
|
||||
applog( LOG_BLUE, "New Block %d, Net Diff %.5g, Target Diff %.5g, Ntime %08x",
|
||||
work->height, net_diff, work->targetdiff,
|
||||
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
||||
|
||||
if ( !opt_quiet && net_diff && net_hashrate )
|
||||
{
|
||||
double miner_hr = 0.;
|
||||
pthread_mutex_lock( &stats_lock );
|
||||
|
||||
for ( int i = 0; i < opt_n_threads; i++ )
|
||||
miner_hr += thr_hashrates[i];
|
||||
global_hashrate = miner_hr;
|
||||
|
||||
pthread_mutex_unlock( &stats_lock );
|
||||
|
||||
if ( miner_hr )
|
||||
{
|
||||
double net_hr = net_hashrate;
|
||||
char net_hr_units[4] = {0};
|
||||
char miner_hr_units[4] = {0};
|
||||
char net_ttf[32];
|
||||
char miner_ttf[32];
|
||||
|
||||
sprintf_et( net_ttf, ( work->targetdiff * exp32 ) / net_hr );
|
||||
sprintf_et( miner_ttf, ( work->targetdiff * exp32 ) / miner_hr );
|
||||
scale_hash_for_display ( &miner_hr, miner_hr_units );
|
||||
scale_hash_for_display ( &net_hr, net_hr_units );
|
||||
applog2( LOG_INFO,
|
||||
"Miner TTF @ %.2f %sh/s %s, Net TTF @ %.2f %sh/s %s",
|
||||
miner_hr, miner_hr_units, miner_ttf,
|
||||
net_hr, net_hr_units, net_ttf );
|
||||
}
|
||||
}
|
||||
} // work->height > last_block_height
|
||||
else if ( memcmp( &work->data[1], &g_work.data[1], 32 ) )
|
||||
applog( LOG_BLUE, "New Work, Ntime %08lx",
|
||||
bswap_32( work->data[ algo_gate.ntime_index ] ) );
|
||||
} // rc
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1592,8 +1569,6 @@ static bool workio_get_work( struct workio_cmd *wc, CURL *curl )
|
||||
if ( !tq_push(wc->thr->q, ret_work ) )
|
||||
free( ret_work );
|
||||
|
||||
report_summary_log( false );
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1706,8 +1681,7 @@ static bool get_work(struct thr_info *thr, struct work *work)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool submit_work( struct thr_info *thr,
|
||||
const struct work *work_in )
|
||||
bool submit_work( struct thr_info *thr, const struct work *work_in )
|
||||
{
|
||||
struct workio_cmd *wc;
|
||||
|
||||
@@ -1731,34 +1705,23 @@ err_out:
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
// __float128?
|
||||
// Convert little endian 256 bit (38 decimal digits) unsigned integer to
|
||||
// double precision floating point with 15 decimal digits precision.
|
||||
// returns u * ( 2**256 )
|
||||
static inline double u256_to_double( const uint64_t *u )
|
||||
{
|
||||
const double exp64 = 4294967296.0 * 4294967296.0; // 2**64
|
||||
return ( ( u[3] * exp64 + u[2] ) * exp64 + u[1] ) * exp64 + u[0];
|
||||
}
|
||||
*/
|
||||
|
||||
void work_set_target_ratio( struct work* work, const void *hash )
|
||||
static void update_submit_stats( struct work *work, const void *hash )
|
||||
{
|
||||
if ( likely( hash ) )
|
||||
{
|
||||
double dhash = u256_to_double( (const uint64_t*)hash );
|
||||
if ( likely( dhash > 0. ) )
|
||||
work->sharediff = work->targetdiff *
|
||||
u256_to_double( (const uint64_t*)( work->target ) ) / dhash;
|
||||
}
|
||||
else
|
||||
work->sharediff = 0.;
|
||||
// work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||
|
||||
// collect some share stats
|
||||
// Frequent share submission combined with high latency can caused
|
||||
// shares to be submitted faster than they are acked. If severe enough
|
||||
// it can overflow the queue and overwrite stats for a share.
|
||||
pthread_mutex_lock( &stats_lock );
|
||||
|
||||
submitted_share_count++;
|
||||
share_stats[ s_put_ptr ].share_count = submitted_share_count;
|
||||
gettimeofday( &share_stats[ s_put_ptr ].submit_time, NULL );
|
||||
share_stats[ s_put_ptr ].share_diff = work->sharediff;
|
||||
@@ -1775,22 +1738,25 @@ void work_set_target_ratio( struct work* work, const void *hash )
|
||||
bool submit_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr )
|
||||
{
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
{
|
||||
submitted_share_count++;
|
||||
work_set_target_ratio( work, hash );
|
||||
update_submit_stats( work, hash );
|
||||
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
if ( have_stratum )
|
||||
applog( LOG_NOTICE, "%d submitted by thread %d, job %s",
|
||||
submitted_share_count, thr->id, work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d submitted by thread %d",
|
||||
submitted_share_count, thr->id );
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->data[ algo_gate.ntime_index ] );
|
||||
}
|
||||
|
||||
if ( lowdiff_debug )
|
||||
if ( unlikely( lowdiff_debug ) )
|
||||
{
|
||||
uint32_t* h = (uint32_t*)hash;
|
||||
uint32_t* t = (uint32_t*)work->target;
|
||||
@@ -1802,27 +1768,30 @@ bool submit_solution( struct work *work, const void *hash,
|
||||
return true;
|
||||
}
|
||||
else
|
||||
applog( LOG_WARNING, "%d failed to submit share thread %d.",
|
||||
submitted_share_count, thr->id );
|
||||
applog( LOG_WARNING, "%d failed to submit share", submitted_share_count );
|
||||
return false;
|
||||
}
|
||||
|
||||
// deprecated, use submit_solution
|
||||
bool submit_lane_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr, const int lane )
|
||||
{
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
{
|
||||
submitted_share_count++;
|
||||
work_set_target_ratio( work, hash );
|
||||
work->sharediff = hash ? exp32 / ( (uint64_t*)hash )[3] : 0.;
|
||||
|
||||
if ( likely( submit_work( thr, work ) ) )
|
||||
{
|
||||
update_submit_stats( work, hash );
|
||||
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
if ( have_stratum )
|
||||
applog( LOG_NOTICE, "%d submitted by thread %d, lane %d, job %s",
|
||||
submitted_share_count, thr->id, lane, work->job_id );
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Job %s",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->job_id );
|
||||
else
|
||||
applog( LOG_NOTICE, "%d submitted by thread %d, lane %d",
|
||||
submitted_share_count, thr->id, lane );
|
||||
applog( LOG_NOTICE, "%d Submitted Diff %.5g, Block %d, Ntime %08x",
|
||||
submitted_share_count, work->sharediff, work->height,
|
||||
work->data[ algo_gate.ntime_index ] );
|
||||
}
|
||||
|
||||
if ( lowdiff_debug )
|
||||
@@ -1837,8 +1806,8 @@ bool submit_lane_solution( struct work *work, const void *hash,
|
||||
return true;
|
||||
}
|
||||
else
|
||||
applog( LOG_WARNING, "%d failed to submit share, thread %d, lane %d.",
|
||||
submitted_share_count, thr->id, lane );
|
||||
applog( LOG_WARNING, "%d failed to submit share", submitted_share_count );
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1991,6 +1960,8 @@ static void *miner_thread( void *userdata )
|
||||
if (!opt_benchmark && opt_priority == 0)
|
||||
{
|
||||
setpriority(PRIO_PROCESS, 0, 19);
|
||||
if ( !thr_id && !opt_quiet )
|
||||
applog(LOG_INFO, "Miner thread priority %d (nice 19)", opt_priority );
|
||||
drop_policy();
|
||||
}
|
||||
else
|
||||
@@ -2007,9 +1978,9 @@ static void *miner_thread( void *userdata )
|
||||
case 4: prio = -10; break;
|
||||
case 5: prio = -15;
|
||||
}
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Thread %d priority %d (nice %d)", thr_id,
|
||||
opt_priority, prio );
|
||||
if ( !( thr_id || opt_quiet ) )
|
||||
applog( LOG_INFO, "Miner thread priority %d (nice %d)",
|
||||
opt_priority, prio );
|
||||
#endif
|
||||
setpriority(PRIO_PROCESS, 0, prio);
|
||||
if ( opt_priority == 0 )
|
||||
@@ -2024,7 +1995,7 @@ static void *miner_thread( void *userdata )
|
||||
{
|
||||
affine_to_cpu_mask( thr_id, (uint128_t)1 << (thr_id % num_cpus) );
|
||||
if ( opt_debug )
|
||||
applog( LOG_DEBUG, "Binding thread %d to cpu %d.",
|
||||
applog( LOG_INFO, "Binding thread %d to cpu %d.",
|
||||
thr_id, thr_id % num_cpus,
|
||||
u128_hi64( (uint128_t)1 << (thr_id % num_cpus) ),
|
||||
u128_lo64( (uint128_t)1 << (thr_id % num_cpus) ) );
|
||||
@@ -2045,14 +2016,14 @@ static void *miner_thread( void *userdata )
|
||||
{
|
||||
#if AFFINITY_USES_UINT128
|
||||
if ( num_cpus > 64 )
|
||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx %016llx",
|
||||
applog( LOG_INFO, "Binding thread %d to mask %016llx %016llx",
|
||||
thr_id, u128_hi64( opt_affinity ),
|
||||
u128_lo64( opt_affinity ) );
|
||||
else
|
||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx",
|
||||
applog( LOG_INFO, "Binding thread %d to mask %016llx",
|
||||
thr_id, opt_affinity );
|
||||
#else
|
||||
applog( LOG_DEBUG, "Binding thread %d to mask %016llx",
|
||||
applog( LOG_INFO, "Binding thread %d to mask %016llx",
|
||||
thr_id, opt_affinity );
|
||||
#endif
|
||||
}
|
||||
@@ -2088,13 +2059,13 @@ static void *miner_thread( void *userdata )
|
||||
}
|
||||
else
|
||||
{
|
||||
int min_scantime = have_longpoll ? LP_SCANTIME : opt_scantime;
|
||||
int scantime = have_longpoll ? LP_SCANTIME : opt_scantime;
|
||||
pthread_mutex_lock( &g_work_lock );
|
||||
|
||||
if ( time(NULL) - g_work_time >= min_scantime
|
||||
if ( time(NULL) - g_work_time >= scantime
|
||||
|| *nonceptr >= end_nonce )
|
||||
{
|
||||
if ( unlikely( !get_work( mythr, &g_work ) ) )
|
||||
if ( unlikely( !get_work( mythr, &g_work ) ) )
|
||||
{
|
||||
applog( LOG_ERR, "work retrieval failed, exiting "
|
||||
"mining thread %d", thr_id );
|
||||
@@ -2102,7 +2073,8 @@ static void *miner_thread( void *userdata )
|
||||
goto out;
|
||||
}
|
||||
g_work_time = time(NULL);
|
||||
}
|
||||
restart_threads();
|
||||
}
|
||||
algo_gate.get_new_work( &work, &g_work, thr_id, &end_nonce );
|
||||
|
||||
pthread_mutex_unlock( &g_work_lock );
|
||||
@@ -2228,16 +2200,18 @@ static void *miner_thread( void *userdata )
|
||||
}
|
||||
|
||||
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
|
||||
// Display CPU temperature and clock rate.
|
||||
if (!opt_quiet && mythr->id == 0 )
|
||||
{
|
||||
int temp = cpu_temp(0);
|
||||
static struct timeval cpu_temp_time = {0};
|
||||
timeval_subtract( &diff, &tv_end, &cpu_temp_time );
|
||||
int wait = temp >= 80 ? 30 : temp >= 70 ? 60 : 120;
|
||||
|
||||
if ( ( diff.tv_sec > wait ) || ( temp > hi_temp ) )
|
||||
{
|
||||
char tempstr[32];
|
||||
int lo_freq, hi_freq;
|
||||
float lo_freq = 0., hi_freq = 0.;
|
||||
linux_cpu_hilo_freq( &lo_freq, &hi_freq );
|
||||
memcpy( &cpu_temp_time, &tv_end, sizeof(cpu_temp_time) );
|
||||
if ( use_colors && ( temp >= 70 ) )
|
||||
@@ -2249,12 +2223,13 @@ static void *miner_thread( void *userdata )
|
||||
}
|
||||
else
|
||||
sprintf( tempstr, "%d C", temp );
|
||||
applog( LOG_INFO,"CPU temp: curr %s (max %d), Freq: %.3f/%.3f GHz",
|
||||
tempstr, hi_temp, (float)lo_freq / 1e6, (float)hi_freq/ 1e6 );
|
||||
applog( LOG_NOTICE,"CPU temp: curr %s (max %d), Freq: %.3f/%.3f GHz",
|
||||
tempstr, hi_temp, lo_freq / 1e6, hi_freq / 1e6 );
|
||||
if ( temp > hi_temp ) hi_temp = temp;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// display hashrate
|
||||
if ( unlikely( opt_hash_meter ) )
|
||||
{
|
||||
@@ -2567,8 +2542,10 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
algo_gate.build_extraheader( g_work, sctx );
|
||||
net_diff = algo_gate.calc_network_diff( g_work );
|
||||
algo_gate.set_work_data_endian( g_work );
|
||||
work_set_target( g_work, sctx->job.diff
|
||||
/ ( opt_target_factor * opt_diff_factor ) );
|
||||
g_work->height = sctx->block_height;
|
||||
g_work->targetdiff = sctx->job.diff
|
||||
/ ( opt_target_factor * opt_diff_factor );
|
||||
diff_to_target( g_work->target, g_work->targetdiff );
|
||||
|
||||
pthread_mutex_unlock( &sctx->work_lock );
|
||||
|
||||
@@ -2592,13 +2569,13 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
pthread_mutex_unlock( &stats_lock );
|
||||
|
||||
if ( stratum_diff != sctx->job.diff )
|
||||
applog( LOG_BLUE, "New stratum diff %g, block %d, job %s",
|
||||
applog( LOG_BLUE, "New Diff %g, Block %d, Job %s",
|
||||
sctx->job.diff, sctx->block_height, g_work->job_id );
|
||||
else if ( last_block_height != sctx->block_height )
|
||||
applog( LOG_BLUE, "New block %d, job %s",
|
||||
sctx->block_height, g_work->job_id );
|
||||
applog( LOG_BLUE, "New Block %d, Job %s",
|
||||
sctx->block_height, g_work->job_id );
|
||||
else if ( g_work->job_id )
|
||||
applog( LOG_BLUE,"New job %s", g_work->job_id );
|
||||
applog( LOG_BLUE,"New Job %s", g_work->job_id );
|
||||
|
||||
// Update data and calculate new estimates.
|
||||
if ( ( stratum_diff != sctx->job.diff )
|
||||
@@ -2615,7 +2592,7 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
if ( !opt_quiet )
|
||||
{
|
||||
applog2( LOG_INFO, "Diff: Net %.5g, Stratum %.5g, Target %.5g",
|
||||
net_diff, stratum_diff, last_targetdiff );
|
||||
net_diff, stratum_diff, g_work->targetdiff );
|
||||
|
||||
if ( likely( hr > 0. ) )
|
||||
{
|
||||
@@ -2623,10 +2600,10 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
char block_ttf[32];
|
||||
char share_ttf[32];
|
||||
|
||||
sprintf_et( block_ttf, net_diff * diff_to_hash / hr );
|
||||
sprintf_et( share_ttf, last_targetdiff * diff_to_hash / hr );
|
||||
sprintf_et( block_ttf, ( net_diff * exp32 ) / hr );
|
||||
sprintf_et( share_ttf, g_work->targetdiff * exp32 / hr );
|
||||
scale_hash_for_display ( &hr, hr_units );
|
||||
applog2( LOG_INFO, "TTF @ %.2f %sh/s: block %s, share %s",
|
||||
applog2( LOG_INFO, "TTF @ %.2f %sh/s: Block %s, Share %s",
|
||||
hr, hr_units, block_ttf, share_ttf );
|
||||
|
||||
if ( !multipool && last_block_height > session_first_block )
|
||||
@@ -2639,14 +2616,14 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
: et.tv_sec / ( last_block_height - session_first_block );
|
||||
if ( net_diff && net_ttf )
|
||||
{
|
||||
double net_hr = net_diff * diff_to_hash / net_ttf;
|
||||
char net_ttf_str[32];
|
||||
double net_hr = net_diff * exp32 / net_ttf;
|
||||
// char net_ttf_str[32];
|
||||
char net_hr_units[4] = {0};
|
||||
|
||||
sprintf_et( net_ttf_str, net_ttf );
|
||||
// sprintf_et( net_ttf_str, net_ttf );
|
||||
scale_hash_for_display ( &net_hr, net_hr_units );
|
||||
applog2( LOG_INFO, "Net TTF @ %.2f %sh/s: %s",
|
||||
net_hr, net_hr_units, net_ttf_str );
|
||||
applog2( LOG_INFO, "Net hash rate (est) %.2f %sh/s",
|
||||
net_hr, net_hr_units );
|
||||
}
|
||||
}
|
||||
} // hr > 0
|
||||
@@ -2657,12 +2634,12 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
static void *stratum_thread(void *userdata )
|
||||
{
|
||||
struct thr_info *mythr = (struct thr_info *) userdata;
|
||||
char *s;
|
||||
char *s = NULL;
|
||||
|
||||
stratum.url = (char*) tq_pop(mythr->q, NULL);
|
||||
if (!stratum.url)
|
||||
goto out;
|
||||
applog( LOG_INFO, "Stratum connect %s", short_url );
|
||||
applog( LOG_BLUE, "Stratum connect %s", short_url );
|
||||
|
||||
while (1)
|
||||
{
|
||||
@@ -2722,30 +2699,26 @@ static void *stratum_thread(void *userdata )
|
||||
restart_threads();
|
||||
}
|
||||
|
||||
if ( stratum_socket_full( &stratum, opt_timeout ) )
|
||||
if ( likely( stratum_socket_full( &stratum, opt_timeout ) ) )
|
||||
{
|
||||
s = stratum_recv_line(&stratum);
|
||||
if ( !s )
|
||||
if ( likely( s = stratum_recv_line( &stratum ) ) )
|
||||
{
|
||||
if ( likely( !stratum_handle_method( &stratum, s ) ) )
|
||||
stratum_handle_response( s );
|
||||
free( s );
|
||||
}
|
||||
else
|
||||
{
|
||||
applog(LOG_WARNING, "Stratum connection interrupted");
|
||||
stratum_disconnect( &stratum );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
s = NULL;
|
||||
applog(LOG_ERR, "Stratum connection timeout");
|
||||
stratum_disconnect( &stratum );
|
||||
}
|
||||
|
||||
if ( s )
|
||||
{
|
||||
if ( !stratum_handle_method( &stratum, s ) )
|
||||
stratum_handle_response( s );
|
||||
free( s );
|
||||
}
|
||||
else
|
||||
{
|
||||
// stratum_errors++;
|
||||
// check if this redundant
|
||||
stratum_disconnect( &stratum );
|
||||
}
|
||||
} // loop
|
||||
out:
|
||||
return NULL;
|
||||
@@ -3382,7 +3355,7 @@ bool check_cpu_capability ()
|
||||
" with VC++ 2013\n");
|
||||
#elif defined(__GNUC__)
|
||||
" with GCC");
|
||||
printf(" %d.%d.%d.\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
|
||||
printf(" %d.%d.%d\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__);
|
||||
#else
|
||||
printf(".\n");
|
||||
#endif
|
||||
@@ -3510,7 +3483,7 @@ int main(int argc, char *argv[])
|
||||
num_cpus += cpus;
|
||||
|
||||
if (opt_debug)
|
||||
applog(LOG_DEBUG, "Found %d cpus on cpu group %d", cpus, i);
|
||||
applog(LOG_DEBUG, "Found %d cpus on cpu group %d", cpus, i);
|
||||
}
|
||||
#else
|
||||
SYSTEM_INFO sysinfo;
|
||||
@@ -3530,7 +3503,6 @@ int main(int argc, char *argv[])
|
||||
if (num_cpus < 1)
|
||||
num_cpus = 1;
|
||||
|
||||
|
||||
if (!opt_n_threads)
|
||||
opt_n_threads = num_cpus;
|
||||
|
||||
@@ -3604,12 +3576,13 @@ int main(int argc, char *argv[])
|
||||
pthread_mutex_init( &stratum.sock_lock, NULL );
|
||||
pthread_mutex_init( &stratum.work_lock, NULL );
|
||||
|
||||
flags = !opt_benchmark
|
||||
&& ( strncmp( rpc_url, "https:", 6 )
|
||||
|| strncasecmp(rpc_url, "stratum+tcps://", 15 ) )
|
||||
? ( CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL )
|
||||
: CURL_GLOBAL_ALL;
|
||||
if ( curl_global_init( flags ) )
|
||||
flags = CURL_GLOBAL_ALL;
|
||||
if ( !opt_benchmark )
|
||||
if ( strncasecmp( rpc_url, "https:", 6 )
|
||||
&& strncasecmp( rpc_url, "stratum+tcps://", 15 ) )
|
||||
flags &= ~CURL_GLOBAL_SSL;
|
||||
|
||||
if ( curl_global_init( flags ) )
|
||||
{
|
||||
applog(LOG_ERR, "CURL initialization failed");
|
||||
return 1;
|
||||
@@ -3649,27 +3622,24 @@ int main(int argc, char *argv[])
|
||||
if (opt_priority > 0)
|
||||
{
|
||||
DWORD prio = NORMAL_PRIORITY_CLASS;
|
||||
switch (opt_priority) {
|
||||
case 1:
|
||||
prio = BELOW_NORMAL_PRIORITY_CLASS;
|
||||
switch (opt_priority)
|
||||
{
|
||||
case 1:
|
||||
prio = BELOW_NORMAL_PRIORITY_CLASS;
|
||||
break;
|
||||
case 3:
|
||||
prio = ABOVE_NORMAL_PRIORITY_CLASS;
|
||||
case 3:
|
||||
prio = ABOVE_NORMAL_PRIORITY_CLASS;
|
||||
break;
|
||||
case 4:
|
||||
prio = HIGH_PRIORITY_CLASS;
|
||||
case 4:
|
||||
prio = HIGH_PRIORITY_CLASS;
|
||||
break;
|
||||
case 5:
|
||||
prio = REALTIME_PRIORITY_CLASS;
|
||||
case 5:
|
||||
prio = REALTIME_PRIORITY_CLASS;
|
||||
}
|
||||
SetPriorityClass(GetCurrentProcess(), prio);
|
||||
}
|
||||
#endif
|
||||
|
||||
if ( num_cpus != opt_n_threads )
|
||||
applog( LOG_INFO,"%u CPU cores available, %u miner threads selected.",
|
||||
num_cpus, opt_n_threads );
|
||||
|
||||
// To be confirmed with more than 64 cpus
|
||||
if ( opt_affinity != -1 )
|
||||
{
|
||||
@@ -3701,9 +3671,13 @@ int main(int argc, char *argv[])
|
||||
*/
|
||||
}
|
||||
|
||||
applog( LOG_INFO, "Extranonce subscribe: %s",
|
||||
opt_extranonce ? "YES" : "NO" );
|
||||
|
||||
if ( !opt_quiet && ( opt_n_threads < num_cpus ) )
|
||||
{
|
||||
char affinity_map[64];
|
||||
format_affinity_map( affinity_map, opt_affinity );
|
||||
applog( LOG_INFO, "CPU affinity [%s]", affinity_map );
|
||||
}
|
||||
|
||||
#ifdef HAVE_SYSLOG_H
|
||||
if (use_syslog)
|
||||
openlog("cpuminer", LOG_PID, LOG_USER);
|
||||
@@ -3754,7 +3728,7 @@ int main(int argc, char *argv[])
|
||||
/* start longpoll thread */
|
||||
err = thread_create(thr, longpoll_thread);
|
||||
if (err) {
|
||||
applog(LOG_ERR, "long poll thread create failed");
|
||||
applog(LOG_ERR, "Long poll thread create failed");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -3774,7 +3748,7 @@ int main(int argc, char *argv[])
|
||||
err = thread_create(thr, stratum_thread);
|
||||
if (err)
|
||||
{
|
||||
applog(LOG_ERR, "stratum thread create failed");
|
||||
applog(LOG_ERR, "Stratum thread create failed");
|
||||
return 1;
|
||||
}
|
||||
if (have_stratum)
|
||||
@@ -3815,18 +3789,16 @@ int main(int argc, char *argv[])
|
||||
return 1;
|
||||
err = thread_create(thr, miner_thread);
|
||||
if (err) {
|
||||
applog(LOG_ERR, "thread %d create failed", i);
|
||||
applog(LOG_ERR, "Miner thread %d create failed", i);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
applog(LOG_INFO, "%d miner threads started, "
|
||||
"using '%s' algorithm.",
|
||||
opt_n_threads,
|
||||
algo_names[opt_algo]);
|
||||
applog( LOG_INFO, "%d of %d miner threads started using '%s' algorithm",
|
||||
opt_n_threads, num_cpus, algo_names[opt_algo] );
|
||||
|
||||
/* main loop - simply wait for workio thread to exit */
|
||||
pthread_join(thr_info[work_thr_id].pth, NULL);
|
||||
applog(LOG_WARNING, "workio thread dead, exiting.");
|
||||
pthread_join( thr_info[work_thr_id].pth, NULL );
|
||||
applog( LOG_WARNING, "workio thread dead, exiting." );
|
||||
return 0;
|
||||
}
|
||||
|
36
miner.h
36
miner.h
@@ -312,6 +312,20 @@ int varint_encode( unsigned char *p, uint64_t n );
|
||||
size_t address_to_script( unsigned char *out, size_t outsz, const char *addr );
|
||||
int timeval_subtract( struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
// Bitcoin formula for converting difficulty to an equivalent
|
||||
// number of hashes.
|
||||
//
|
||||
// https://en.bitcoin.it/wiki/Difficulty
|
||||
//
|
||||
// hash = diff * 2**32
|
||||
//
|
||||
// diff_to_hash = 2**32 = 0x100000000 = 4294967296 = exp32;
|
||||
|
||||
const double exp32; // 2**32
|
||||
const double exp48; // 2**48
|
||||
const double exp64; // 2**64
|
||||
|
||||
bool fulltest( const uint32_t *hash, const uint32_t *target );
|
||||
bool valid_hash( const void*, const void* );
|
||||
|
||||
@@ -332,11 +346,15 @@ struct thr_info {
|
||||
|
||||
//struct thr_info *thr_info;
|
||||
|
||||
bool submit_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr );
|
||||
bool submit_lane_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr, const int lane );
|
||||
void test_hash_and_submit( struct work *work, const void *hash,
|
||||
struct thr_info *thr );
|
||||
|
||||
bool submit_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr );
|
||||
|
||||
// deprecated
|
||||
bool submit_lane_solution( struct work *work, const void *hash,
|
||||
struct thr_info *thr, const int lane );
|
||||
|
||||
bool submit_work( struct thr_info *thr, const struct work *work_in );
|
||||
|
||||
@@ -378,6 +396,7 @@ struct work {
|
||||
size_t xnonce2_len;
|
||||
unsigned char *xnonce2;
|
||||
bool sapling;
|
||||
bool stale;
|
||||
|
||||
// x16rt
|
||||
uint32_t merkleroothash[8];
|
||||
@@ -758,7 +777,7 @@ extern const int pk_buffer_size_max;
|
||||
extern int pk_buffer_size;
|
||||
|
||||
static char const usage[] = "\
|
||||
Usage: " PACKAGE_NAME " [OPTIONS]\n\
|
||||
Usage: cpuminer [OPTIONS]\n\
|
||||
Options:\n\
|
||||
-a, --algo=ALGO specify the algorithm to use\n\
|
||||
allium Garlicoin (GRLC)\n\
|
||||
@@ -853,8 +872,8 @@ Options:\n\
|
||||
yespower-b2b generic yespower + blake2b\n\
|
||||
zr5 Ziftr\n\
|
||||
-N, --param-n N parameter for scrypt based algos\n\
|
||||
-R, --patam-r R parameter for scrypt based algos\n\
|
||||
-K, --param-key Key parameter for algos that use it\n\
|
||||
-R, --param-r R parameter for scrypt based algos\n\
|
||||
-K, --param-key Key (pers) parameter for algos that use it\n\
|
||||
-o, --url=URL URL of mining server\n\
|
||||
-O, --userpass=U:P username:password pair for mining server\n\
|
||||
-u, --user=USERNAME username for mining server\n\
|
||||
@@ -871,7 +890,7 @@ Options:\n\
|
||||
long polling is unavailable, in seconds (default: 5)\n\
|
||||
--randomize Randomize scan range start to reduce duplicates\n\
|
||||
--reset-on-stale Workaround reset stratum if too many stale shares\n\
|
||||
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
|
||||
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
|
||||
-m, --diff-multiplier Multiply difficulty by this factor (std is 1.0)\n\
|
||||
--hash-meter Display thread hash rates\n\
|
||||
--coinbase-addr=ADDR payout address for solo mining\n\
|
||||
@@ -893,7 +912,6 @@ Options:\n\
|
||||
"\
|
||||
-B, --background run the miner in the background\n\
|
||||
--benchmark run in offline benchmark mode\n\
|
||||
--cputest debug hashes from cpu algorithms\n\
|
||||
--cpu-affinity set process affinity to cpu core(s), mask 0x3 for cores 0 and 1\n\
|
||||
--cpu-priority set process priority (default: 0 idle, 2 normal to 5 highest)\n\
|
||||
-b, --api-bind IP/Port for the miner API (default: 127.0.0.1:4048)\n\
|
||||
|
@@ -676,6 +676,14 @@ static inline void mm128_bswap32_intrlv80_4x32( void *d, const void *src )
|
||||
d[7] = *( (const uint32_t*)(s7) +(i) ); \
|
||||
} while(0)
|
||||
|
||||
static inline void intrlv_8x32b( void *dst, const void *s0, const void *s1,
|
||||
const void *s2, const void *s3, const void *s4, const void *s5,
|
||||
const void *s6, const void *s7, const int bit_len )
|
||||
{
|
||||
for ( int i = 0; i < bit_len/32; i++ )
|
||||
ILEAVE_8x32( i );
|
||||
}
|
||||
|
||||
static inline void intrlv_8x32( void *dst, const void *s0, const void *s1,
|
||||
const void *s2, const void *s3, const void *s4, const void *s5,
|
||||
const void *s6, const void *s7, const int bit_len )
|
||||
@@ -730,6 +738,14 @@ static inline void intrlv_8x32_512( void *dst, const void *s0, const void *s1,
|
||||
*( (uint32_t*)(d7) +(i) ) = s[7]; \
|
||||
} while(0)
|
||||
|
||||
static inline void dintrlv_8x32b( void *d0, void *d1, void *d2, void *d3,
|
||||
void *d4, void *d5, void *d6, void *d7, const void *src,
|
||||
const int bit_len )
|
||||
{
|
||||
for ( int i = 0; i < bit_len/32; i++ )
|
||||
DLEAVE_8x32( i );
|
||||
}
|
||||
|
||||
static inline void dintrlv_8x32( void *d0, void *d1, void *d2, void *d3,
|
||||
void *d4, void *d5, void *d6, void *d7, const void *src,
|
||||
const int bit_len )
|
||||
|
43
sysinfos.c
43
sysinfos.c
@@ -67,7 +67,6 @@
|
||||
#define HWMON_ALT5 \
|
||||
"/sys/class/hwmon/hwmon0/device/temp1_input"
|
||||
|
||||
|
||||
static inline float linux_cputemp(int core)
|
||||
{
|
||||
float tc = 0.0;
|
||||
@@ -97,49 +96,43 @@ static inline float linux_cputemp(int core)
|
||||
return tc;
|
||||
}
|
||||
|
||||
#define CPUFREQ_PATH \
|
||||
|
||||
#define CPUFREQ_PATH0\
|
||||
"/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq"
|
||||
|
||||
#define CPUFREQ_PATHn \
|
||||
"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq"
|
||||
|
||||
|
||||
// "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq"
|
||||
static inline uint32_t linux_cpufreq(int core)
|
||||
static inline float linux_cpufreq(int core)
|
||||
{
|
||||
FILE *fd = fopen(CPUFREQ_PATH, "r");
|
||||
uint32_t freq = 0;
|
||||
FILE *fd = fopen( CPUFREQ_PATH0, "r" );
|
||||
long int freq = 0;
|
||||
|
||||
if (!fd)
|
||||
return freq;
|
||||
|
||||
if (!fscanf(fd, "%d", &freq))
|
||||
return freq;
|
||||
|
||||
return freq;
|
||||
if ( !fd ) return (float)freq;
|
||||
if ( !fscanf( fd, "%ld", &freq ) ) freq = 0;
|
||||
fclose( fd );
|
||||
return (float)freq;
|
||||
}
|
||||
|
||||
static inline void linux_cpu_hilo_freq( uint32_t* lo, uint32_t *hi )
|
||||
static inline void linux_cpu_hilo_freq( float *lo, float *hi )
|
||||
{
|
||||
uint64_t freq = 0, hi_freq = 0, lo_freq = 0xffffffffffffffff;
|
||||
long int freq = 0, hi_freq = 0, lo_freq = 0x7fffffff;
|
||||
|
||||
for ( int i = 0; i < num_cpus; i++ )
|
||||
{
|
||||
char path[64];
|
||||
sprintf( path, CPUFREQ_PATHn, i );
|
||||
|
||||
FILE *fd = fopen( path, "r" );
|
||||
if ( fd )
|
||||
if ( !fd ) return;
|
||||
else if ( fscanf( fd, "%ld", &freq ) )
|
||||
{
|
||||
if ( fscanf( fd, "%ld", &freq ) )
|
||||
{
|
||||
if ( freq > hi_freq ) hi_freq = freq;
|
||||
if ( freq < lo_freq ) lo_freq = freq;
|
||||
}
|
||||
if ( freq > hi_freq ) hi_freq = freq;
|
||||
if ( freq < lo_freq ) lo_freq = freq;
|
||||
}
|
||||
fclose( fd );
|
||||
}
|
||||
*hi = hi_freq;
|
||||
*lo = lo_freq;
|
||||
*hi = (float)hi_freq;
|
||||
*lo = (float)lo_freq;
|
||||
}
|
||||
|
||||
|
||||
|
91
util.c
91
util.c
@@ -983,6 +983,7 @@ int timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
return x->tv_sec < y->tv_sec;
|
||||
}
|
||||
|
||||
// deprecated, use test_hash_and_submit
|
||||
// Use this when deinterleaved
|
||||
// do 64 bit test 4 iterations
|
||||
inline bool valid_hash( const void *hash, const void *target )
|
||||
@@ -999,6 +1000,7 @@ inline bool valid_hash( const void *hash, const void *target )
|
||||
return true;
|
||||
}
|
||||
|
||||
// deprecated, use test_hash_and_submit
|
||||
bool fulltest( const uint32_t *hash, const uint32_t *target )
|
||||
{
|
||||
int i;
|
||||
@@ -1041,35 +1043,43 @@ bool fulltest( const uint32_t *hash, const uint32_t *target )
|
||||
|
||||
void diff_to_target(uint32_t *target, double diff)
|
||||
{
|
||||
uint64_t m;
|
||||
int k;
|
||||
|
||||
const double exp64 = (double)0xffffffffffffffff + 1.;
|
||||
for ( k = 3; k > 0 && diff > 1.0; k-- )
|
||||
diff /= exp64;
|
||||
uint64_t m;
|
||||
int k;
|
||||
|
||||
// for (k = 6; k > 0 && diff > 1.0; k--)
|
||||
// diff /= 4294967296.0;
|
||||
m = (uint64_t)( 0xffff0000 / diff );
|
||||
if unlikely( m == 0 && k == 3 )
|
||||
memset( target, 0xff, 32 );
|
||||
else
|
||||
{
|
||||
memset( target, 0, 32 );
|
||||
((uint64_t*)target)[k] = m;
|
||||
// target[k] = (uint32_t)m;
|
||||
// target[k + 1] = (uint32_t)(m >> 32);
|
||||
}
|
||||
for (k = 6; k > 0 && diff > 1.0; k--)
|
||||
diff /= exp32;
|
||||
|
||||
// diff /= 4294967296.0;
|
||||
|
||||
// m = (uint64_t)(4294901760.0 / diff);
|
||||
|
||||
m = (uint64_t)(exp32 / diff);
|
||||
|
||||
if (m == 0 && k == 6)
|
||||
memset(target, 0xff, 32);
|
||||
else {
|
||||
memset(target, 0, 32);
|
||||
target[k] = (uint32_t)m;
|
||||
target[k + 1] = (uint32_t)(m >> 32);
|
||||
}
|
||||
}
|
||||
|
||||
// Only used by stratum pools
|
||||
// deprecated
|
||||
void work_set_target(struct work* work, double diff)
|
||||
{
|
||||
diff_to_target( work->target, diff );
|
||||
work->targetdiff = diff;
|
||||
}
|
||||
|
||||
// Only used by longpoll pools
|
||||
double target_to_diff( uint32_t* target )
|
||||
{
|
||||
uint64_t *targ = (uint64_t*)target;
|
||||
// extract 64 bits from target[ 240:176 ]
|
||||
uint64_t m = ( targ[3] << 16 ) | ( targ[2] >> 48 );
|
||||
return m ? (exp48-1.) / (double)m : 0.;
|
||||
}
|
||||
|
||||
/*
|
||||
double target_to_diff(uint32_t* target)
|
||||
{
|
||||
uchar* tgt = (uchar*) target;
|
||||
@@ -1083,11 +1093,13 @@ double target_to_diff(uint32_t* target)
|
||||
(uint64_t)tgt[23] << 8 |
|
||||
(uint64_t)tgt[22] << 0;
|
||||
|
||||
|
||||
if (!m)
|
||||
return 0.;
|
||||
else
|
||||
return (double)0x0000ffff00000000/m;
|
||||
}
|
||||
*/
|
||||
|
||||
#ifdef WIN32
|
||||
#define socket_blocks() (WSAGetLastError() == WSAEWOULDBLOCK)
|
||||
@@ -1546,35 +1558,44 @@ bool stratum_authorize(struct stratum_ctx *sctx, const char *user, const char *p
|
||||
|
||||
ret = true;
|
||||
|
||||
if (!opt_extranonce)
|
||||
if ( !opt_extranonce )
|
||||
goto out;
|
||||
|
||||
// subscribe to extranonce (optional)
|
||||
sprintf(s, "{\"id\": 3, \"method\": \"mining.extranonce.subscribe\", \"params\": []}");
|
||||
|
||||
if (!stratum_send_line(sctx, s))
|
||||
if ( !stratum_send_line( sctx, s ) )
|
||||
goto out;
|
||||
|
||||
if (!socket_full(sctx->sock, 3)) {
|
||||
applog(LOG_WARNING, "stratum extranonce subscribe timed out");
|
||||
goto out;
|
||||
if ( !socket_full( sctx->sock, 3 ) )
|
||||
{
|
||||
applog( LOG_WARNING, "Extranonce disabled, subscribe timed out" );
|
||||
opt_extranonce = false;
|
||||
goto out;
|
||||
}
|
||||
if ( !opt_quiet )
|
||||
applog( LOG_INFO, "Extranonce subscription enabled" );
|
||||
|
||||
sret = stratum_recv_line(sctx);
|
||||
if (sret) {
|
||||
json_t *extra = JSON_LOADS(sret, &err);
|
||||
if (!extra) {
|
||||
sret = stratum_recv_line( sctx );
|
||||
if ( sret )
|
||||
{
|
||||
json_t *extra = JSON_LOADS( sret, &err );
|
||||
if ( !extra )
|
||||
{
|
||||
applog(LOG_WARNING, "JSON decode failed(%d): %s", err.line, err.text);
|
||||
} else {
|
||||
if (json_integer_value(json_object_get(extra, "id")) != 3) {
|
||||
}
|
||||
else
|
||||
{
|
||||
if ( json_integer_value(json_object_get( extra, "id" ) ) != 3 )
|
||||
{
|
||||
// we receive a standard method if extranonce is ignored
|
||||
if (!stratum_handle_method(sctx, sret))
|
||||
applog(LOG_WARNING, "Stratum answer id is not correct!");
|
||||
if ( !stratum_handle_method( sctx, sret ) )
|
||||
applog( LOG_WARNING, "Stratum answer id is not correct!" );
|
||||
}
|
||||
res_val = json_object_get(extra, "result");
|
||||
res_val = json_object_get( extra, "result" );
|
||||
// if (opt_debug && (!res_val || json_is_false(res_val)))
|
||||
// applog(LOG_DEBUG, "extranonce subscribe not supported");
|
||||
json_decref(extra);
|
||||
json_decref( extra );
|
||||
}
|
||||
free(sret);
|
||||
}
|
||||
|
Reference in New Issue
Block a user