Compare commits

..

17 Commits

Author SHA1 Message Date
Jay D Dee
fb9163185a v3.12.7 2020-03-20 16:30:12 -04:00
Jay D Dee
6e8b8ed34f v3.12.6.1 2020-03-07 14:11:06 -05:00
Jay D Dee
c0aadbcc99 v3.12.6 2020-03-05 18:43:20 -05:00
Jay D Dee
3da149418a v3.12.5 2020-03-01 13:18:17 -05:00
Jay D Dee
720610cce5 v3.12.4.6 2020-02-28 18:20:32 -05:00
Jay D Dee
cedcf4d070 v3.12.4.5 2020-02-28 02:42:22 -05:00
Jay D Dee
81b50c3c71 v3.12.4.4 2020-02-25 14:07:32 -05:00
Jay D Dee
0e1e88f53e v3.12.4.3 2020-02-24 21:35:19 -05:00
Jay D Dee
45c77a5c81 v3.12.4.2 2020-02-23 15:31:06 -05:00
Jay D Dee
dbce7e0721 v3.12.4.1 2020-02-22 18:06:39 -05:00
Jay D Dee
6d66051de6 v3.12.4 2020-02-21 16:34:53 -05:00
Jay D Dee
b93be8816a v3.12.3.1 2020-02-18 12:05:47 -05:00
Jay D Dee
19b0ac6d5c v3.12.3 2020-02-13 04:25:33 -05:00
Jay D Dee
3da2b958cf v3.12.2 2020-02-09 13:30:40 -05:00
Jay D Dee
dc2f8d81d3 v3.12.1 2020-02-07 20:18:20 -05:00
Jay D Dee
fc97ef174a v3.12.0.1 2020-02-06 22:50:20 -05:00
Jay D Dee
13523a12f9 v3.12.0 2020-02-05 22:50:58 -05:00
119 changed files with 6919 additions and 13193 deletions

View File

@@ -21,15 +21,6 @@ cpuminer_SOURCES = \
api.c \
sysinfos.c \
algo-gate-api.c\
crypto/oaes_lib.c \
crypto/c_keccak.c \
crypto/c_groestl.c \
crypto/c_blake256.c \
crypto/c_jh.c \
crypto/c_skein.c \
crypto/hash.c \
crypto/aesb.c \
crypto/magimath.cpp \
algo/argon2/argon2a/argon2a.c \
algo/argon2/argon2a/ar2/argon2.c \
algo/argon2/argon2a/ar2/opt.c \
@@ -76,10 +67,6 @@ cpuminer_SOURCES = \
algo/bmw/bmw512-gate.c \
algo/bmw/bmw512.c \
algo/bmw/bmw512-4way.c \
algo/cryptonight/cryptolight.c \
algo/cryptonight/cryptonight-common.c\
algo/cryptonight/cryptonight-aesni.c\
algo/cryptonight/cryptonight.c\
algo/cubehash/cubehash_sse2.c\
algo/cubehash/cube-hash-2way.c \
algo/echo/sph_echo.c \
@@ -141,7 +128,8 @@ cpuminer_SOURCES = \
algo/lyra2/allium.c \
algo/lyra2/phi2-4way.c \
algo/lyra2/phi2.c \
algo/m7m.c \
algo//m7m/m7m.c \
algo/m7m/magimath.cpp \
algo/nist5/nist5-gate.c \
algo/nist5/nist5-4way.c \
algo/nist5/nist5.c \
@@ -175,6 +163,7 @@ cpuminer_SOURCES = \
algo/sha/sha256-hash-4way.c \
algo/sha/sha512-hash-4way.c \
algo/sha/hmac-sha256-hash.c \
algo/sha/hmac-sha256-hash-4way.c \
algo/sha/sha2.c \
algo/sha/sha256t-gate.c \
algo/sha/sha256t-4way.c \

View File

@@ -12,10 +12,24 @@ a false positive, they are flagged simply because they are cryptocurrency
miners. The source code is open for anyone to inspect. If you don't trust
the software, don't use it.
New thread:
https://bitcointalk.org/index.php?topic=5226770.msg53865575#msg53865575
Old thread:
https://bitcointalk.org/index.php?topic=1326803.0
mailto://jayddee246@gmail.com
This note is to confirm that bitcointalk users JayDDee and joblo are the
same person.
I created a new BCT user JayDDee to match my github user id.
The old thread has been locked but still contains useful information for
reading.
See file RELEASE_NOTES for change log and INSTALL_LINUX or INSTALL_WINDOWS
for compile instructions.
@@ -23,25 +37,25 @@ Requirements
------------
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI
optimizations a CPU with AES_NI is required. This includes Intel Westmere
and newer and AMD equivalents. Further optimizations are available on some
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
Intel Core2 and newer and AMD equivalents. Further optimizations are available
on some algoritms for CPUs with AES, AVX, AVX2, SHA, AVX512 and VAES.
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
performance.
ARM CPUs are not supported.
ARM and Aarch64 CPUs are not supported.
2. 64 bit Linux OS. Ubuntu and Fedora based distributions, including Mint and
Centos, are known to work and have all dependencies in their repositories.
Others may work but may require more effort. Older versions such as Centos 6
don't work due to missing features.
2. 64 bit Linux or Windows OS. Ubuntu and Fedora based distributions,
including Mint and Centos, are known to work and have all dependencies
in their repositories. Others may work but may require more effort. Older
versions such as Centos 6 don't work due to missing features.
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
MacOS, OSx and Android are not supported.
3. Stratum pool. Some algos may work wallet mining using getwork or GBT. YMMV.
3. Stratum pool supporting stratum+tcp:// or stratum+ssl:// protocols or
RPC getwork using http:// or https://.
GBT is YMMV.
Supported Algorithms
--------------------
@@ -138,6 +152,27 @@ Supported Algorithms
yespower-b2b generic yespower + blake2b
zr5 Ziftr
Many variations of scrypt based algos can be mine by specifying their
parameters:
scryptn2: --algo scrypt --param-n 1048576
cpupower: --algo yespower --param-key "CPUpower: The number of CPU working or available for proof-of-work mining"
power2b: --algo yespower-b2b --param-n 2048 --param-r 32 --param-key "Now I am become Death, the destroyer of worlds"
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
yespoweriots: --algo yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
yespoweric: --algo yespower --param-n 2048 --param-r 32 --param-key "IsotopeC"
yespowerurx: --algo yespower --param-n 2048 --param-r 32 --param-key "UraniumX"
yespowerltncg: --algo yespower --param-n 2048 --param-r 32 --param-key "LTNCGYES"
Errata
------

View File

@@ -65,10 +65,177 @@ If not what makes it happen or not happen?
Change Log
----------
v3.12.7
Issue #257: fixed a file descriptor leak which caused the CPU temperature
and frequency query to report zeros after mining for a couple of hours.
Issue #253: stale share reduction for yescrypt, sonoa.
v3.12.6.1
Issue #252: Fixed SSL mining (stratum+tcps://)
Issue #254 Fixed benchmark.
Issue #253: Implemented stale share reduction for yespower, x25x, x22i, x21s,
x16*, scryptn2, more to come.
v3.12.6
Issue #246: improved stale share detection for getwork.
Improved precision of target_to_diff conversion from 4 digits to 20+.
Display hash and target debug data for all rejected shares.
A graphical representation of CPU affinity is displayed when using --threads.
Added highest and lowest accepted share to summary log.
Other small changes to logs to improve consistency and clarity.
v3.12.5
Issues #246 & #251: fixed incorrect share diff for stratum and getwork,
fixed incorrect target diff for getwork. Stats should now be correct for
getwork as well as stratum.
Issue #252: Fixed stratum+tcps not using curl ssl.
Getwork: reduce stale blocks, faster response to new work.
Added ntime to new job/work logs.
README.md now lists the parameters for yespower variations that don't have
a specific algo name.
v3.12.4.6
Issue #246: fixed getwork repeated new block logs with same height. New work
for the same block is now reported as "New work" instead of "New block".
Also added a check that work is new before generating "New work" log.
Added target diff to getwork new block log.
Changed share ratio in share result log to simple fraction, no longer %.
Added debug log to display mininginfo, use -D.
v3.12.4.5
Issue #246: better stale share detection for getwork, and enhanced logging
of stale shares for stratum & getwork.
Issue #251: fixed incorrect share difficulty and share ratio in share
result log.
Changed submit log to include share diff and block height.
Small cosmetic changes to logs.
v3.12.4.4
Issue #246: Fixed net hashrate in getwork block log,
removed duplicate getwork block log,
other small tweaks to stats logs for getwork.
Issue #248: Fixed chronic stale shares with scrypt:1048576 (scryptn2).
v3.12.4.3
Fixed segfault in new block log for getwork.
Disabled silent discarding of stale work after the submit is logged.
v3.12.4.2
Issue #245: fixed getwork stale shares, solo mining with getwork now works.
Issue #246: implemented block and summary logs for getwork.
v3.12.4.1
Issue #245: fix scantime when mining solo with getwork.
Added debug logs for creation of stratum and longpoll threads, use -D to
enable.
v3.12.4
Issue #244: Change longpoll to ignore job id.
Lyra2rev2 AVX2 +3%, AVX512 +6%.
v3.12.3.1
Issue #241: Fixed regression that broke coinbase address in v3.11.7.
v3.12.3
Issue #238: Fixed skunk AVX2.
Issue #239: Faster AVX2 & AVX512 for skein +44%, skein2 +30%, plus marginal
increases for skunk, x16r, x16rv2, x16rt, x16rt-veil, x16s, x21s.
Faster anime VAES +57%, AVX512 +21%, AVX2 +3%.
Redesigned code reponsible for #236.
v3.12.2
Fixed xevan, skein, skein2 AVX2, #238.
Reversed polarity of AVX2 vector bit test utilities, and all users, to be
logically and semantically correct. Follow up to issue #236.
v3.12.1
Fixed anime AVX2 low difficulty shares, git issue #236.
Periodic summary now reports lost hash rate due to rejected and stale shares,
displayed only when non-zero.
v3.12.0.1
Fixed hodl rejects, git issue #237.
Fixed debug code added in v3.12.0 to work with AVX2 to be enabled only
after low difficulty share have been seen to avoid unnecessarily excessive
log outout.
Added more digits of precision to diff in log output to help diagnose
low difficulty shares.
v3.12.0
Faster phi2 AVX2 +62%, AVX512 +150% on Intel CPUs. AMD Ryzen AVX2 is
YMMV due to its inferiour AVX2 implementation.
Fixed Hodl stats, rejects are still an issue since v3.9.5, git issue #237.
API can now be enabled with "-b port" or "--api-bind port".
It will use the default address 127.0.0.1.
Editorial: Short form options should only be used on the command line to save
typing. Configuration files and scripts should always use the long form
"--api-bind addr:port" without relying on any defaults. This is a general
recommendation that applies to all options for any application.
Removed obsolete cryptonight, all variants, and supporting code for more
size reduction and faster compiling.
Tweaked the timing of the CPU temperature and frequency log (Linux only).
Added some debug code to collect more info aboout low difficulty rejects,
git issue #236.
v3.11.9
Fixed x16r invalid shares when Luffa was first in hash order.
API is disabled by default.
New startup message for status of stratum connection, API & extranonce.
New log report for CPU temperature, frequency of fastest and slowest cores.

View File

@@ -97,23 +97,24 @@ int null_scanhash()
return 0;
}
void null_hash()
int null_hash()
{
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
return 0;
};
/*
void null_hash_suw()
{
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
};
*/
void init_algo_gate( algo_gate_t* gate )
{
gate->miner_thread_init = (void*)&return_true;
gate->scanhash = (void*)&null_scanhash;
gate->hash = (void*)&null_hash;
gate->hash_suw = (void*)&null_hash_suw;
// gate->hash_suw = (void*)&null_hash_suw;
gate->get_new_work = (void*)&std_get_new_work;
gate->get_nonceptr = (void*)&std_get_nonceptr;
gate->work_decode = (void*)&std_le_work_decode;
gate->decode_extra_data = (void*)&do_nothing;
gate->gen_merkle_root = (void*)&sha256d_gen_merkle_root;
@@ -129,7 +130,6 @@ void init_algo_gate( algo_gate_t* gate )
gate->resync_threads = (void*)&do_nothing;
gate->do_this_thread = (void*)&return_true;
gate->longpoll_rpc_call = (void*)&std_longpoll_rpc_call;
gate->stratum_handle_response = (void*)&std_stratum_handle_response;
gate->get_work_data_size = (void*)&std_get_work_data_size;
gate->optimizations = EMPTY_SET;
gate->ntime_index = STD_NTIME_INDEX;
@@ -168,9 +168,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_BLAKECOIN: register_blakecoin_algo ( gate ); break;
case ALGO_BMW512: register_bmw512_algo ( gate ); break;
case ALGO_C11: register_c11_algo ( gate ); break;
case ALGO_CRYPTOLIGHT: register_cryptolight_algo ( gate ); break;
case ALGO_CRYPTONIGHT: register_cryptonight_algo ( gate ); break;
case ALGO_CRYPTONIGHTV7: register_cryptonightv7_algo ( gate ); break;
case ALGO_DECRED: register_decred_algo ( gate ); break;
case ALGO_DEEP: register_deep_algo ( gate ); break;
case ALGO_DMD_GR: register_dmd_gr_algo ( gate ); break;
@@ -235,11 +232,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_X22I: register_x22i_algo ( gate ); break;
case ALGO_X25X: register_x25x_algo ( gate ); break;
case ALGO_XEVAN: register_xevan_algo ( gate ); break;
/* case ALGO_YESCRYPT: register_yescrypt_05_algo ( gate ); break;
case ALGO_YESCRYPTR8: register_yescryptr8_05_algo ( gate ); break;
case ALGO_YESCRYPTR16: register_yescryptr16_05_algo ( gate ); break;
case ALGO_YESCRYPTR32: register_yescryptr32_05_algo ( gate ); break;
*/
case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break;
case ALGO_YESCRYPTR8: register_yescryptr8_algo ( gate ); break;
case ALGO_YESCRYPTR8G: register_yescryptr8g_algo ( gate ); break;
@@ -266,25 +258,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
// restore warnings
#pragma GCC diagnostic pop
// override std defaults with jr2 defaults
bool register_json_rpc2( algo_gate_t *gate )
{
// gate->wait_for_diff = (void*)&do_nothing;
gate->get_new_work = (void*)&jr2_get_new_work;
gate->get_nonceptr = (void*)&jr2_get_nonceptr;
gate->stratum_gen_work = (void*)&jr2_stratum_gen_work;
gate->build_stratum_request = (void*)&jr2_build_stratum_request;
gate->submit_getwork_result = (void*)&jr2_submit_getwork_result;
gate->longpoll_rpc_call = (void*)&jr2_longpoll_rpc_call;
gate->work_decode = (void*)&jr2_work_decode;
gate->stratum_handle_response = (void*)&jr2_stratum_handle_response;
gate->nonce_index = JR2_NONCE_INDEX;
jsonrpc_2 = true; // still needed
opt_extranonce = false;
// have_gbt = false;
return true;
}
// run the alternate hash function for a specific algo
void exec_hash_function( int algo, void *output, const void *pdata )
{
@@ -305,39 +278,37 @@ void exec_hash_function( int algo, void *output, const void *pdata )
const char* const algo_alias_map[][2] =
{
// alias proper
{ "argon2d-crds", "argon2d250" },
{ "argon2d-dyn", "argon2d500" },
{ "argon2d-uis", "argon2d4096" },
{ "bcd", "x13bcd" },
{ "bitcore", "timetravel10" },
{ "bitzeny", "yescryptr8" },
{ "blake256r8", "blakecoin" },
{ "blake256r8vnl", "vanilla" },
{ "blake256r14", "blake" },
{ "blake256r14dcr", "decred" },
{ "cryptonote", "cryptonight" },
{ "cryptonight-light", "cryptolight" },
{ "diamond", "dmd-gr" },
{ "droplp", "drop" },
{ "espers", "hmq1725" },
{ "flax", "c11" },
{ "hsr", "x13sm3" },
{ "jackpot", "jha" },
{ "jane", "scryptjane" },
{ "lyra2", "lyra2re" },
{ "lyra2v2", "lyra2rev2" },
{ "lyra2v3", "lyra2rev3" },
{ "myrgr", "myr-gr" },
{ "myriad", "myr-gr" },
{ "neo", "neoscrypt" },
{ "phi", "phi1612" },
{ "sib", "x11gost" },
{ "timetravel8", "timetravel" },
{ "veil", "x16rt-veil" },
{ "x16r-hex", "hex" },
{ "yenten", "yescryptr16" },
{ "ziftr", "zr5" },
{ NULL, NULL }
{ "argon2d-crds", "argon2d250" },
{ "argon2d-dyn", "argon2d500" },
{ "argon2d-uis", "argon2d4096" },
{ "bcd", "x13bcd" },
{ "bitcore", "timetravel10" },
{ "bitzeny", "yescryptr8" },
{ "blake256r8", "blakecoin" },
{ "blake256r8vnl", "vanilla" },
{ "blake256r14", "blake" },
{ "blake256r14dcr", "decred" },
{ "diamond", "dmd-gr" },
{ "espers", "hmq1725" },
{ "flax", "c11" },
{ "hsr", "x13sm3" },
{ "jackpot", "jha" },
{ "jane", "scryptjane" },
{ "lyra2", "lyra2re" },
{ "lyra2v2", "lyra2rev2" },
{ "lyra2v3", "lyra2rev3" },
{ "myrgr", "myr-gr" },
{ "myriad", "myr-gr" },
{ "neo", "neoscrypt" },
{ "phi", "phi1612" },
{ "scryptn2", "scrypt:1048576" },
{ "sib", "x11gost" },
{ "timetravel8", "timetravel" },
{ "veil", "x16rt-veil" },
{ "x16r-hex", "hex" },
{ "yenten", "yescryptr16" },
{ "ziftr", "zr5" },
{ NULL, NULL }
};
// if arg is a valid alias for a known algo it is updated with the proper
@@ -350,7 +321,7 @@ void get_algo_alias( char** algo_or_alias )
if ( !strcasecmp( *algo_or_alias, algo_alias_map[i][ ALIAS ] ) )
{
// found valid alias, return proper name
*algo_or_alias = (const char*)( algo_alias_map[i][ PROPER ] );
*algo_or_alias = (char*)( algo_alias_map[i][ PROPER ] );
return;
}
}

View File

@@ -113,9 +113,10 @@ typedef struct
// mandatory functions, must be overwritten
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
// not used anywhere
// optional unsafe, must be overwritten if algo uses function
void ( *hash ) ( void*, const void*, uint32_t ) ;
void ( *hash_suw ) ( void*, const void* );
int ( *hash ) ( void*, const void*, uint32_t ) ;
//void ( *hash_suw ) ( void*, const void* );
//optional, safe to use default in most cases
@@ -129,9 +130,6 @@ void ( *stratum_gen_work ) ( struct stratum_ctx*, struct work* );
// Get thread local copy of blockheader with unique nonce.
void ( *get_new_work ) ( struct work*, struct work*, int, uint32_t* );
// Return pointer to nonce in blockheader.
uint32_t *( *get_nonceptr ) ( uint32_t* );
// Decode getwork blockheader
bool ( *work_decode ) ( const json_t*, struct work* );
@@ -169,7 +167,6 @@ bool ( *do_this_thread ) ( int );
void ( *resync_threads ) ( struct work* );
json_t* (*longpoll_rpc_call) ( CURL*, int*, char* );
bool ( *stratum_handle_response ) ( json_t* );
set_t optimizations;
int ( *get_work_data_size ) ();
int ntime_index;
@@ -217,36 +214,27 @@ void four_way_not_tested();
int null_scanhash();
// displays warning
void null_hash ();
void null_hash_suw();
int null_hash ();
//void null_hash_suw();
// optional safe targets, default listed first unless noted.
uint32_t *std_get_nonceptr( uint32_t *work_data );
uint32_t *jr2_get_nonceptr( uint32_t *work_data );
void std_get_new_work( struct work *work, struct work *g_work, int thr_id,
uint32_t* end_nonce_ptr );
void jr2_get_new_work( struct work *work, struct work *g_work, int thr_id,
uint32_t* end_nonce_ptr );
void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *work );
void jr2_stratum_gen_work( struct stratum_ctx *sctx, struct work *work );
void sha256d_gen_merkle_root( char *merkle_root, struct stratum_ctx *sctx );
void SHA256_gen_merkle_root ( char *merkle_root, struct stratum_ctx *sctx );
bool std_le_work_decode( const json_t *val, struct work *work );
bool std_be_work_decode( const json_t *val, struct work *work );
bool jr2_work_decode( const json_t *val, struct work *work );
bool std_le_submit_getwork_result( CURL *curl, struct work *work );
bool std_be_submit_getwork_result( CURL *curl, struct work *work );
bool jr2_submit_getwork_result( CURL *curl, struct work *work );
void std_le_build_stratum_request( char *req, struct work *work );
void std_be_build_stratum_request( char *req, struct work *work );
void jr2_build_stratum_request ( char *req, struct work *work );
char* std_malloc_txs_request( struct work *work );
@@ -263,10 +251,10 @@ void std_build_block_header( struct work* g_work, uint32_t version,
void std_build_extraheader( struct work *work, struct stratum_ctx *sctx );
json_t* std_longpoll_rpc_call( CURL *curl, int *err, char *lp_url );
json_t* jr2_longpoll_rpc_call( CURL *curl, int *err );
//json_t* jr2_longpoll_rpc_call( CURL *curl, int *err );
bool std_stratum_handle_response( json_t *val );
bool jr2_stratum_handle_response( json_t *val );
//bool std_stratum_handle_response( json_t *val );
//bool jr2_stratum_handle_response( json_t *val );
bool std_ready_to_mine( struct work* work, struct stratum_ctx* stratum,
int thr_id );
@@ -288,7 +276,7 @@ bool register_algo( algo_gate_t *gate );
// Overrides a common set of functions used by RPC2 and other RPC2-specific
// init. Called by algo's register function before initializing algo-specific
// functions and data.
bool register_json_rpc2( algo_gate_t *gate );
//bool register_json_rpc2( algo_gate_t *gate );
// use this to call the hash function of an algo directly, ie util.c test.
void exec_hash_function( int algo, void *output, const void *pdata );

View File

@@ -153,7 +153,7 @@ bool register_decred_algo( algo_gate_t* gate )
gate->hash = (void*)&decred_hash;
#endif
gate->optimizations = AVX2_OPT;
gate->get_nonceptr = (void*)&decred_get_nonceptr;
// gate->get_nonceptr = (void*)&decred_get_nonceptr;
gate->decode_extra_data = (void*)&decred_decode_extradata;
gate->build_stratum_request = (void*)&decred_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;

View File

@@ -138,7 +138,7 @@ void bmw512_2way_close( bmw512_2way_context *ctx, void *dst );
#if defined(__AVX2__)
// BMW-512 4 way 64
// BMW-512 64 bit 4 way
typedef struct {
__m256i buf[16];
@@ -149,7 +149,6 @@ typedef struct {
typedef bmw_4way_big_context bmw512_4way_context;
void bmw512_4way_init(void *cc);
void bmw512_4way_update(void *cc, const void *data, size_t len);
@@ -164,6 +163,7 @@ void bmw512_4way_addbits_and_close(
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// BMW-512 64 bit 8 way
typedef struct {
__m512i buf[16];
__m512i H[16];
@@ -171,6 +171,8 @@ typedef struct {
uint64_t bit_count;
} bmw512_8way_context __attribute__((aligned(128)));
void bmw512_8way_full( bmw512_8way_context *ctx, void *out, const void *data,
size_t len );
void bmw512_8way_init( bmw512_8way_context *ctx );
void bmw512_8way_update( bmw512_8way_context *ctx, const void *data,
size_t len );

View File

@@ -1507,6 +1507,93 @@ void bmw512_8way_close( bmw512_8way_context *ctx, void *dst )
casti_m512i( dst, u ) = h1[ v ];
}
void bmw512_8way_full( bmw512_8way_context *ctx, void *out, const void *data,
size_t len )
{
__m512i *vdata = (__m512i*)data;
__m512i *buf = ctx->buf;
__m512i htmp[16];
__m512i *H = ctx->H;
__m512i *h2 = htmp;
uint64_t bit_count = len * 8;
size_t ptr = 0;
const int buf_size = 128; // bytes of one lane, compatible with len
// Init
H[ 0] = m512_const1_64( 0x8081828384858687 );
H[ 1] = m512_const1_64( 0x88898A8B8C8D8E8F );
H[ 2] = m512_const1_64( 0x9091929394959697 );
H[ 3] = m512_const1_64( 0x98999A9B9C9D9E9F );
H[ 4] = m512_const1_64( 0xA0A1A2A3A4A5A6A7 );
H[ 5] = m512_const1_64( 0xA8A9AAABACADAEAF );
H[ 6] = m512_const1_64( 0xB0B1B2B3B4B5B6B7 );
H[ 7] = m512_const1_64( 0xB8B9BABBBCBDBEBF );
H[ 8] = m512_const1_64( 0xC0C1C2C3C4C5C6C7 );
H[ 9] = m512_const1_64( 0xC8C9CACBCCCDCECF );
H[10] = m512_const1_64( 0xD0D1D2D3D4D5D6D7 );
H[11] = m512_const1_64( 0xD8D9DADBDCDDDEDF );
H[12] = m512_const1_64( 0xE0E1E2E3E4E5E6E7 );
H[13] = m512_const1_64( 0xE8E9EAEBECEDEEEF );
H[14] = m512_const1_64( 0xF0F1F2F3F4F5F6F7 );
H[15] = m512_const1_64( 0xF8F9FAFBFCFDFEFF );
// Update
while ( len > 0 )
{
size_t clen;
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_512( buf + (ptr>>3), vdata, clen >> 3 );
vdata = vdata + (clen>>3);
len -= clen;
ptr += clen;
if ( ptr == buf_size )
{
__m512i *ht;
compress_big_8way( buf, H, h2 );
ht = H;
H = h2;
h2 = ht;
ptr = 0;
}
}
if ( H != ctx->H )
memcpy_512( ctx->H, H, 16 );
// Close
{
__m512i h1[16], h2[16];
size_t u, v;
buf[ ptr>>3 ] = m512_const1_64( 0x80 );
ptr += 8;
if ( ptr > (buf_size - 8) )
{
memset_zero_512( buf + (ptr>>3), (buf_size - ptr) >> 3 );
compress_big_8way( buf, H, h1 );
ptr = 0;
H = h1;
}
memset_zero_512( buf + (ptr>>3), (buf_size - 8 - ptr) >> 3 );
buf[ (buf_size - 8) >> 3 ] = _mm512_set1_epi64( bit_count );
compress_big_8way( buf, H, h2 );
for ( u = 0; u < 16; u ++ )
buf[ u ] = h2[ u ];
compress_big_8way( buf, final_b8, h1 );
for (u = 0, v = 8; u < 8; u ++, v ++)
casti_m512i( out, u ) = h1[ v ];
}
}
#endif // AVX512
#ifdef __cplusplus

View File

@@ -1,371 +0,0 @@
// Copyright (c) 2012-2013 The Cryptonote developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "algo-gate-api.h"
#if defined(__arm__) || defined(_MSC_VER)
#ifndef NOASM
#define NOASM
#endif
#endif
#include "crypto/oaes_lib.h"
#include "crypto/c_keccak.h"
#include "crypto/c_groestl.h"
#include "crypto/c_blake256.h"
#include "crypto/c_jh.h"
#include "crypto/c_skein.h"
#include "crypto/int-util.h"
#include "crypto/hash-ops.h"
#if USE_INT128
#if __GNUC__ == 4 && __GNUC_MINOR__ >= 4 && __GNUC_MINOR__ < 6
typedef unsigned int uint128_t __attribute__ ((__mode__ (TI)));
#elif defined (_MSC_VER)
/* only for mingw64 on windows */
#undef USE_INT128
#define USE_INT128 (0)
#else
typedef __uint128_t uint128_t;
#endif
#endif
#define LITE 1
#if LITE /* cryptonight-light */
#define MEMORY (1 << 20)
#define ITER (1 << 19)
#else
#define MEMORY (1 << 21) /* 2 MiB */
#define ITER (1 << 20)
#endif
#define AES_BLOCK_SIZE 16
#define AES_KEY_SIZE 32 /*16*/
#define INIT_SIZE_BLK 8
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
#pragma pack(push, 1)
union cn_slow_hash_state {
union hash_state hs;
struct {
uint8_t k[64];
uint8_t init[INIT_SIZE_BYTE];
};
};
#pragma pack(pop)
static void do_blake_hash(const void* input, size_t len, char* output) {
blake256_hash((uint8_t*)output, input, len);
}
static void do_groestl_hash(const void* input, size_t len, char* output) {
groestl(input, len * 8, (uint8_t*)output);
}
static void do_jh_hash(const void* input, size_t len, char* output) {
int r = jh_hash(HASH_SIZE * 8, input, 8 * len, (uint8_t*)output);
assert(likely(SUCCESS == r));
}
static void do_skein_hash(const void* input, size_t len, char* output) {
int r = skein_hash(8 * HASH_SIZE, input, 8 * len, (uint8_t*)output);
assert(likely(SKEIN_SUCCESS == r));
}
extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
extern int aesb_pseudo_round_mut(uint8_t *val, uint8_t *expandedKey);
#if !defined(_MSC_VER) && !defined(NOASM)
extern int fast_aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
extern int fast_aesb_pseudo_round_mut(uint8_t *val, uint8_t *expandedKey);
#else
#define fast_aesb_single_round aesb_single_round
#define fast_aesb_pseudo_round_mut aesb_pseudo_round_mut
#endif
#if defined(NOASM) || !defined(__x86_64__)
static uint64_t mul128(uint64_t multiplier, uint64_t multiplicand, uint64_t* product_hi) {
// multiplier = ab = a * 2^32 + b
// multiplicand = cd = c * 2^32 + d
// ab * cd = a * c * 2^64 + (a * d + b * c) * 2^32 + b * d
uint64_t a = hi_dword(multiplier);
uint64_t b = lo_dword(multiplier);
uint64_t c = hi_dword(multiplicand);
uint64_t d = lo_dword(multiplicand);
uint64_t ac = a * c;
uint64_t ad = a * d;
uint64_t bc = b * c;
uint64_t bd = b * d;
uint64_t adbc = ad + bc;
uint64_t adbc_carry = adbc < ad ? 1 : 0;
// multiplier * multiplicand = product_hi * 2^64 + product_lo
uint64_t product_lo = bd + (adbc << 32);
uint64_t product_lo_carry = product_lo < bd ? 1 : 0;
*product_hi = ac + (adbc >> 32) + (adbc_carry << 32) + product_lo_carry;
assert(ac <= *product_hi);
return product_lo;
}
#else
extern uint64_t mul128(uint64_t multiplier, uint64_t multiplicand, uint64_t* product_hi);
#endif
static void (* const extra_hashes[4])(const void *, size_t, char *) = {
do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash
};
static inline size_t e2i(const uint8_t* a) {
#if !LITE
return ((uint32_t *)a)[0] & 0x1FFFF0;
#else
return ((uint32_t *)a)[0] & 0xFFFF0;
#endif
}
static inline void mul_sum_xor_dst(const uint8_t* a, uint8_t* c, uint8_t* dst) {
uint64_t hi, lo = mul128(((uint64_t*) a)[0], ((uint64_t*) dst)[0], &hi) + ((uint64_t*) c)[1];
hi += ((uint64_t*) c)[0];
((uint64_t*) c)[0] = ((uint64_t*) dst)[0] ^ hi;
((uint64_t*) c)[1] = ((uint64_t*) dst)[1] ^ lo;
((uint64_t*) dst)[0] = hi;
((uint64_t*) dst)[1] = lo;
}
static inline void xor_blocks(uint8_t* a, const uint8_t* b) {
#if USE_INT128
*((uint128_t*) a) ^= *((uint128_t*) b);
#else
((uint64_t*) a)[0] ^= ((uint64_t*) b)[0];
((uint64_t*) a)[1] ^= ((uint64_t*) b)[1];
#endif
}
static inline void xor_blocks_dst(const uint8_t* a, const uint8_t* b, uint8_t* dst) {
#if USE_INT128
*((uint128_t*) dst) = *((uint128_t*) a) ^ *((uint128_t*) b);
#else
((uint64_t*) dst)[0] = ((uint64_t*) a)[0] ^ ((uint64_t*) b)[0];
((uint64_t*) dst)[1] = ((uint64_t*) a)[1] ^ ((uint64_t*) b)[1];
#endif
}
struct cryptonight_ctx {
uint8_t _ALIGN(16) long_state[MEMORY];
union cn_slow_hash_state state;
uint8_t _ALIGN(16) text[INIT_SIZE_BYTE];
uint8_t _ALIGN(16) a[AES_BLOCK_SIZE];
uint8_t _ALIGN(16) b[AES_BLOCK_SIZE];
uint8_t _ALIGN(16) c[AES_BLOCK_SIZE];
oaes_ctx* aes_ctx;
};
static void cryptolight_hash_ctx(void* output, const void* input, int len, struct cryptonight_ctx* ctx)
{
len = 76;
hash_process(&ctx->state.hs, (const uint8_t*) input, len);
ctx->aes_ctx = (oaes_ctx*) oaes_alloc();
size_t i, j;
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
oaes_key_import_data(ctx->aes_ctx, ctx->state.hs.b, AES_KEY_SIZE);
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 0], ctx->aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 1], ctx->aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 2], ctx->aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 3], ctx->aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 4], ctx->aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 5], ctx->aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 6], ctx->aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 7], ctx->aes_ctx->key->exp_data);
memcpy(&ctx->long_state[i], ctx->text, INIT_SIZE_BYTE);
}
xor_blocks_dst(&ctx->state.k[0], &ctx->state.k[32], ctx->a);
xor_blocks_dst(&ctx->state.k[16], &ctx->state.k[48], ctx->b);
for (i = 0; likely(i < ITER / 4); ++i) {
/* Dependency chain: address -> read value ------+
* written value <-+ hard function (AES or MUL) <+
* next address <-+
*/
/* Iteration 1 */
j = e2i(ctx->a);
aesb_single_round(&ctx->long_state[j], ctx->c, ctx->a);
xor_blocks_dst(ctx->c, ctx->b, &ctx->long_state[j]);
/* Iteration 2 */
mul_sum_xor_dst(ctx->c, ctx->a, &ctx->long_state[e2i(ctx->c)]);
/* Iteration 3 */
j = e2i(ctx->a);
aesb_single_round(&ctx->long_state[j], ctx->b, ctx->a);
xor_blocks_dst(ctx->b, ctx->c, &ctx->long_state[j]);
/* Iteration 4 */
mul_sum_xor_dst(ctx->b, ctx->a, &ctx->long_state[e2i(ctx->b)]);
}
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
oaes_key_import_data(ctx->aes_ctx, &ctx->state.hs.b[32], AES_KEY_SIZE);
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
xor_blocks(&ctx->text[0 * AES_BLOCK_SIZE], &ctx->long_state[i + 0 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[0 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[1 * AES_BLOCK_SIZE], &ctx->long_state[i + 1 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[1 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[2 * AES_BLOCK_SIZE], &ctx->long_state[i + 2 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[2 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[3 * AES_BLOCK_SIZE], &ctx->long_state[i + 3 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[3 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[4 * AES_BLOCK_SIZE], &ctx->long_state[i + 4 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[4 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[5 * AES_BLOCK_SIZE], &ctx->long_state[i + 5 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[5 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[6 * AES_BLOCK_SIZE], &ctx->long_state[i + 6 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[6 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[7 * AES_BLOCK_SIZE], &ctx->long_state[i + 7 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx->text[7 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
}
memcpy(ctx->state.init, ctx->text, INIT_SIZE_BYTE);
hash_permutation(&ctx->state.hs);
/*memcpy(hash, &state, 32);*/
extra_hashes[ctx->state.hs.b[0] & 3](&ctx->state, 200, output);
oaes_free((OAES_CTX **) &ctx->aes_ctx);
}
void cryptolight_hash(void* output, const void* input, int len) {
struct cryptonight_ctx *ctx = (struct cryptonight_ctx*)malloc(sizeof(struct cryptonight_ctx));
cryptolight_hash_ctx(output, input, len, ctx);
free(ctx);
}
#if defined(__AES__)
static void cryptolight_hash_ctx_aes_ni(void* output, const void* input,
int len, struct cryptonight_ctx* ctx)
{
hash_process(&ctx->state.hs, (const uint8_t*)input, len);
ctx->aes_ctx = (oaes_ctx*) oaes_alloc();
size_t i, j;
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
oaes_key_import_data(ctx->aes_ctx, ctx->state.hs.b, AES_KEY_SIZE);
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 0], ctx->aes_ctx->key->exp_data);
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 1], ctx->aes_ctx->key->exp_data);
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 2], ctx->aes_ctx->key->exp_data);
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 3], ctx->aes_ctx->key->exp_data);
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 4], ctx->aes_ctx->key->exp_data);
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 5], ctx->aes_ctx->key->exp_data);
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 6], ctx->aes_ctx->key->exp_data);
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 7], ctx->aes_ctx->key->exp_data);
memcpy(&ctx->long_state[i], ctx->text, INIT_SIZE_BYTE);
}
xor_blocks_dst(&ctx->state.k[0], &ctx->state.k[32], ctx->a);
xor_blocks_dst(&ctx->state.k[16], &ctx->state.k[48], ctx->b);
for (i = 0; likely(i < ITER / 4); ++i) {
/* Dependency chain: address -> read value ------+
* written value <-+ hard function (AES or MUL) <+
* next address <-+
*/
/* Iteration 1 */
j = e2i(ctx->a);
fast_aesb_single_round(&ctx->long_state[j], ctx->c, ctx->a);
xor_blocks_dst(ctx->c, ctx->b, &ctx->long_state[j]);
/* Iteration 2 */
mul_sum_xor_dst(ctx->c, ctx->a, &ctx->long_state[e2i(ctx->c)]);
/* Iteration 3 */
j = e2i(ctx->a);
fast_aesb_single_round(&ctx->long_state[j], ctx->b, ctx->a);
xor_blocks_dst(ctx->b, ctx->c, &ctx->long_state[j]);
/* Iteration 4 */
mul_sum_xor_dst(ctx->b, ctx->a, &ctx->long_state[e2i(ctx->b)]);
}
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
oaes_key_import_data(ctx->aes_ctx, &ctx->state.hs.b[32], AES_KEY_SIZE);
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
xor_blocks(&ctx->text[0 * AES_BLOCK_SIZE], &ctx->long_state[i + 0 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[0 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[1 * AES_BLOCK_SIZE], &ctx->long_state[i + 1 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[1 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[2 * AES_BLOCK_SIZE], &ctx->long_state[i + 2 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[2 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[3 * AES_BLOCK_SIZE], &ctx->long_state[i + 3 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[3 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[4 * AES_BLOCK_SIZE], &ctx->long_state[i + 4 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[4 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[5 * AES_BLOCK_SIZE], &ctx->long_state[i + 5 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[5 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[6 * AES_BLOCK_SIZE], &ctx->long_state[i + 6 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[6 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
xor_blocks(&ctx->text[7 * AES_BLOCK_SIZE], &ctx->long_state[i + 7 * AES_BLOCK_SIZE]);
fast_aesb_pseudo_round_mut(&ctx->text[7 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
}
memcpy(ctx->state.init, ctx->text, INIT_SIZE_BYTE);
hash_permutation(&ctx->state.hs);
/*memcpy(hash, &state, 32);*/
extra_hashes[ctx->state.hs.b[0] & 3](&ctx->state, 200, output);
oaes_free((OAES_CTX **) &ctx->aes_ctx);
}
#endif
int scanhash_cryptolight( struct work *work,
uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t *nonceptr = (uint32_t*) (((char*)pdata) + 39);
uint32_t n = *nonceptr - 1;
const uint32_t first_nonce = n + 1;
//const uint32_t Htarg = ptarget[7];
uint32_t _ALIGN(32) hash[HASH_SIZE / 4];
int thr_id = mythr->id;
struct cryptonight_ctx *ctx = (struct cryptonight_ctx*)malloc(sizeof(struct cryptonight_ctx));
#if defined(__AES__)
do {
*nonceptr = ++n;
cryptolight_hash_ctx_aes_ni(hash, pdata, 76, ctx);
if (unlikely(hash[7] < ptarget[7])) {
*hashes_done = n - first_nonce + 1;
free(ctx);
return true;
}
} while (likely((n <= max_nonce && !work_restart[thr_id].restart)));
#else
do {
*nonceptr = ++n;
cryptolight_hash_ctx(hash, pdata, 76, ctx);
if (unlikely(hash[7] < ptarget[7])) {
*hashes_done = n - first_nonce + 1;
free(ctx);
return true;
}
} while (likely((n <= max_nonce && !work_restart[thr_id].restart)));
#endif
free(ctx);
*hashes_done = n - first_nonce + 1;
return 0;
}
bool register_cryptolight_algo( algo_gate_t* gate )
{
applog(LOG_WARNING,"Cryptonight algorithm and variants are no longer");
applog(LOG_WARNING,"supported by cpuminer-opt. Shares submitted will");
applog(LOG_WARNING,"likely be rejected. Proceed at your own risk.\n");
register_json_rpc2( gate );
gate->optimizations = SSE2_OPT | AES_OPT;
gate->scanhash = (void*)&scanhash_cryptolight;
gate->hash = (void*)&cryptolight_hash;
gate->hash_suw = (void*)&cryptolight_hash;
return true;
};

View File

@@ -1,357 +0,0 @@
#if defined(__AES__)
#include <x86intrin.h>
#include <memory.h>
#include "cryptonight.h"
#include "miner.h"
#include "crypto/c_keccak.h"
#include <immintrin.h>
static inline void ExpandAESKey256_sub1(__m128i *tmp1, __m128i *tmp2)
{
__m128i tmp4;
*tmp2 = _mm_shuffle_epi32(*tmp2, 0xFF);
tmp4 = _mm_slli_si128(*tmp1, 0x04);
*tmp1 = _mm_xor_si128(*tmp1, tmp4);
tmp4 = _mm_slli_si128(tmp4, 0x04);
*tmp1 = _mm_xor_si128(*tmp1, tmp4);
tmp4 = _mm_slli_si128(tmp4, 0x04);
*tmp1 = _mm_xor_si128(*tmp1, tmp4);
*tmp1 = _mm_xor_si128(*tmp1, *tmp2);
}
static inline void ExpandAESKey256_sub2(__m128i *tmp1, __m128i *tmp3)
{
__m128i tmp2, tmp4;
tmp4 = _mm_aeskeygenassist_si128(*tmp1, 0x00);
tmp2 = _mm_shuffle_epi32(tmp4, 0xAA);
tmp4 = _mm_slli_si128(*tmp3, 0x04);
*tmp3 = _mm_xor_si128(*tmp3, tmp4);
tmp4 = _mm_slli_si128(tmp4, 0x04);
*tmp3 = _mm_xor_si128(*tmp3, tmp4);
tmp4 = _mm_slli_si128(tmp4, 0x04);
*tmp3 = _mm_xor_si128(*tmp3, tmp4);
*tmp3 = _mm_xor_si128(*tmp3, tmp2);
}
// Special thanks to Intel for helping me
// with ExpandAESKey256() and its subroutines
static inline void ExpandAESKey256(char *keybuf)
{
__m128i tmp1, tmp2, tmp3, *keys;
keys = (__m128i *)keybuf;
tmp1 = _mm_load_si128((__m128i *)keybuf);
tmp3 = _mm_load_si128((__m128i *)(keybuf+0x10));
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x01);
ExpandAESKey256_sub1(&tmp1, &tmp2);
keys[2] = tmp1;
ExpandAESKey256_sub2(&tmp1, &tmp3);
keys[3] = tmp3;
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x02);
ExpandAESKey256_sub1(&tmp1, &tmp2);
keys[4] = tmp1;
ExpandAESKey256_sub2(&tmp1, &tmp3);
keys[5] = tmp3;
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x04);
ExpandAESKey256_sub1(&tmp1, &tmp2);
keys[6] = tmp1;
ExpandAESKey256_sub2(&tmp1, &tmp3);
keys[7] = tmp3;
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x08);
ExpandAESKey256_sub1(&tmp1, &tmp2);
keys[8] = tmp1;
ExpandAESKey256_sub2(&tmp1, &tmp3);
keys[9] = tmp3;
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x10);
ExpandAESKey256_sub1(&tmp1, &tmp2);
keys[10] = tmp1;
ExpandAESKey256_sub2(&tmp1, &tmp3);
keys[11] = tmp3;
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x20);
ExpandAESKey256_sub1(&tmp1, &tmp2);
keys[12] = tmp1;
ExpandAESKey256_sub2(&tmp1, &tmp3);
keys[13] = tmp3;
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x40);
ExpandAESKey256_sub1(&tmp1, &tmp2);
keys[14] = tmp1;
}
// align to 64 byte cache line
typedef struct
{
uint8_t long_state[MEMORY] __attribute((aligned(64)));
union cn_slow_hash_state state;
uint8_t text[INIT_SIZE_BYTE] __attribute((aligned(64)));
uint64_t a[AES_BLOCK_SIZE >> 3] __attribute__((aligned(64)));
uint64_t b[AES_BLOCK_SIZE >> 3] __attribute__((aligned(64)));
uint8_t c[AES_BLOCK_SIZE] __attribute__((aligned(64)));
} cryptonight_ctx;
static __thread cryptonight_ctx ctx;
void cryptonight_hash_aes( void *restrict output, const void *input, int len )
{
uint8_t ExpandedKey[256] __attribute__((aligned(64)));
__m128i *longoutput, *expkey, *xmminput;
size_t i, j;
keccak( (const uint8_t*)input, 76, (char*)&ctx.state.hs.b, 200 );
if ( cryptonightV7 && len < 43 )
return;
const uint64_t tweak = cryptonightV7
? *((const uint64_t*) (((const uint8_t*)input) + 35))
^ ctx.state.hs.w[24] : 0;
memcpy( ExpandedKey, ctx.state.hs.b, AES_KEY_SIZE );
ExpandAESKey256( ExpandedKey );
memcpy( ctx.text, ctx.state.init, INIT_SIZE_BYTE );
longoutput = (__m128i*)ctx.long_state;
xmminput = (__m128i*)ctx.text;
expkey = (__m128i*)ExpandedKey;
// prefetch expkey, xmminput and enough longoutput for 4 iterations
_mm_prefetch( xmminput, _MM_HINT_T0 );
_mm_prefetch( xmminput + 4, _MM_HINT_T0 );
_mm_prefetch( expkey, _MM_HINT_T0 );
_mm_prefetch( expkey + 4, _MM_HINT_T0 );
_mm_prefetch( expkey + 8, _MM_HINT_T0 );
for ( i = 0; i < 64; i += 16 )
{
__builtin_prefetch( longoutput + i, 1, 0 );
__builtin_prefetch( longoutput + i + 4, 1, 0 );
__builtin_prefetch( longoutput + i + 8, 1, 0 );
__builtin_prefetch( longoutput + i + 12, 1, 0 );
}
// n-4 iterations
for ( i = 0; likely( i < MEMORY_M128I - 4*INIT_SIZE_M128I );
i += INIT_SIZE_M128I )
{
// prefetch 4 iterations ahead.
__builtin_prefetch( longoutput + i + 64, 1, 0 );
__builtin_prefetch( longoutput + i + 68, 1, 0 );
for ( j = 0; j < 10; j++ )
{
xmminput[0] = _mm_aesenc_si128( xmminput[0], expkey[j] );
xmminput[1] = _mm_aesenc_si128( xmminput[1], expkey[j] );
xmminput[2] = _mm_aesenc_si128( xmminput[2], expkey[j] );
xmminput[3] = _mm_aesenc_si128( xmminput[3], expkey[j] );
xmminput[4] = _mm_aesenc_si128( xmminput[4], expkey[j] );
xmminput[5] = _mm_aesenc_si128( xmminput[5], expkey[j] );
xmminput[6] = _mm_aesenc_si128( xmminput[6], expkey[j] );
xmminput[7] = _mm_aesenc_si128( xmminput[7], expkey[j] );
}
_mm_store_si128( &( longoutput[i ] ), xmminput[0] );
_mm_store_si128( &( longoutput[i+1] ), xmminput[1] );
_mm_store_si128( &( longoutput[i+2] ), xmminput[2] );
_mm_store_si128( &( longoutput[i+3] ), xmminput[3] );
_mm_store_si128( &( longoutput[i+4] ), xmminput[4] );
_mm_store_si128( &( longoutput[i+5] ), xmminput[5] );
_mm_store_si128( &( longoutput[i+6] ), xmminput[6] );
_mm_store_si128( &( longoutput[i+7] ), xmminput[7] );
}
// last 4 iterations
for ( ; likely( i < MEMORY_M128I ); i += INIT_SIZE_M128I )
{
for ( j = 0; j < 10; j++ )
{
xmminput[0] = _mm_aesenc_si128( xmminput[0], expkey[j] );
xmminput[1] = _mm_aesenc_si128( xmminput[1], expkey[j] );
xmminput[2] = _mm_aesenc_si128( xmminput[2], expkey[j] );
xmminput[3] = _mm_aesenc_si128( xmminput[3], expkey[j] );
xmminput[4] = _mm_aesenc_si128( xmminput[4], expkey[j] );
xmminput[5] = _mm_aesenc_si128( xmminput[5], expkey[j] );
xmminput[6] = _mm_aesenc_si128( xmminput[6], expkey[j] );
xmminput[7] = _mm_aesenc_si128( xmminput[7], expkey[j] );
}
_mm_store_si128( &( longoutput[i ] ), xmminput[0] );
_mm_store_si128( &( longoutput[i+1] ), xmminput[1] );
_mm_store_si128( &( longoutput[i+2] ), xmminput[2] );
_mm_store_si128( &( longoutput[i+3] ), xmminput[3] );
_mm_store_si128( &( longoutput[i+4] ), xmminput[4] );
_mm_store_si128( &( longoutput[i+5] ), xmminput[5] );
_mm_store_si128( &( longoutput[i+6] ), xmminput[6] );
_mm_store_si128( &( longoutput[i+7] ), xmminput[7] );
}
ctx.a[0] = ((uint64_t *)ctx.state.k)[0] ^ ((uint64_t *)ctx.state.k)[4];
ctx.b[0] = ((uint64_t *)ctx.state.k)[2] ^ ((uint64_t *)ctx.state.k)[6];
ctx.a[1] = ((uint64_t *)ctx.state.k)[1] ^ ((uint64_t *)ctx.state.k)[5];
ctx.b[1] = ((uint64_t *)ctx.state.k)[3] ^ ((uint64_t *)ctx.state.k)[7];
uint64_t a[2] __attribute((aligned(16))),
b[2] __attribute((aligned(16))),
c[2] __attribute((aligned(16)));
a[0] = ctx.a[0];
a[1] = ctx.a[1];
__m128i b_x = _mm_load_si128( (__m128i*)ctx.b );
__m128i a_x = _mm_load_si128( (__m128i*)a );
__m128i* lsa = (__m128i*)&ctx.long_state[ a[0] & 0x1FFFF0 ];
__m128i c_x = _mm_load_si128( lsa );
uint64_t *nextblock;
uint64_t hi, lo;
// n-1 iterations
for( i = 0; __builtin_expect( i < 0x7ffff, 1 ); i++ )
{
c_x = _mm_aesenc_si128( c_x, a_x );
_mm_store_si128( (__m128i*)c, c_x );
b_x = _mm_xor_si128( b_x, c_x );
nextblock = (uint64_t *)&ctx.long_state[c[0] & 0x1FFFF0];
_mm_store_si128( lsa, b_x );
if ( cryptonightV7 )
{
const uint8_t tmp = ( (const uint8_t*)(lsa) )[11];
const uint8_t index = ( ( (tmp >> 3) & 6 ) | (tmp & 1) ) << 1;
((uint8_t*)(lsa))[11] = tmp ^ ( ( 0x75310 >> index) & 0x30 );
}
b[0] = nextblock[0];
b[1] = nextblock[1];
// hi,lo = 64bit x 64bit multiply of c[0] and b[0]
__asm__( "mulq %3\n\t"
: "=d" ( hi ),
"=a" ( lo )
: "%a" ( c[0] ),
"rm" ( b[0] )
: "cc" );
b_x = c_x;
a[0] += hi;
a[1] += lo;
nextblock[0] = a[0];
nextblock[1] = cryptonightV7 ? a[1] ^ tweak : a[1];
a[0] ^= b[0];
a[1] ^= b[1];
lsa = (__m128i*)&ctx.long_state[ a[0] & 0x1FFFF0 ];
a_x = _mm_load_si128( (__m128i*)a );
c_x = _mm_load_si128( lsa );
}
// abreviated nth iteration
c_x = _mm_aesenc_si128( c_x, a_x );
_mm_store_si128( (__m128i*)c, c_x );
b_x = _mm_xor_si128( b_x, c_x );
nextblock = (uint64_t *)&ctx.long_state[c[0] & 0x1FFFF0];
_mm_store_si128( lsa, b_x );
if ( cryptonightV7 )
{
const uint8_t tmp = ( (const uint8_t*)(lsa) )[11];
const uint8_t index = ( ( (tmp >> 3) & 6 ) | (tmp & 1) ) << 1;
((uint8_t*)(lsa))[11] = tmp ^ ( ( 0x75310 >> index) & 0x30 );
}
b[0] = nextblock[0];
b[1] = nextblock[1];
__asm__( "mulq %3\n\t"
: "=d" ( hi ),
"=a" ( lo )
: "%a" ( c[0] ),
"rm" ( b[0] )
: "cc" );
a[0] += hi;
a[1] += lo;
nextblock[0] = a[0];
nextblock[1] = cryptonightV7 ? a[1] ^ tweak : a[1];
a[0] ^= b[0];
a[1] ^= b[1];
memcpy( ExpandedKey, &ctx.state.hs.b[32], AES_KEY_SIZE );
ExpandAESKey256( ExpandedKey );
memcpy( ctx.text, ctx.state.init, INIT_SIZE_BYTE );
// prefetch expkey, all of xmminput and enough longoutput for 4 loops
_mm_prefetch( xmminput, _MM_HINT_T0 );
_mm_prefetch( xmminput + 4, _MM_HINT_T0 );
for ( i = 0; i < 64; i += 16 )
{
_mm_prefetch( longoutput + i, _MM_HINT_T0 );
_mm_prefetch( longoutput + i + 4, _MM_HINT_T0 );
_mm_prefetch( longoutput + i + 8, _MM_HINT_T0 );
_mm_prefetch( longoutput + i + 12, _MM_HINT_T0 );
}
_mm_prefetch( expkey, _MM_HINT_T0 );
_mm_prefetch( expkey + 4, _MM_HINT_T0 );
_mm_prefetch( expkey + 8, _MM_HINT_T0 );
// n-4 iterations
for ( i = 0; likely( i < MEMORY_M128I - 4*INIT_SIZE_M128I );
i += INIT_SIZE_M128I )
{
// stay 4 iterations ahead.
_mm_prefetch( longoutput + i + 64, _MM_HINT_T0 );
_mm_prefetch( longoutput + i + 68, _MM_HINT_T0 );
xmminput[0] = _mm_xor_si128( longoutput[i ], xmminput[0] );
xmminput[1] = _mm_xor_si128( longoutput[i+1], xmminput[1] );
xmminput[2] = _mm_xor_si128( longoutput[i+2], xmminput[2] );
xmminput[3] = _mm_xor_si128( longoutput[i+3], xmminput[3] );
xmminput[4] = _mm_xor_si128( longoutput[i+4], xmminput[4] );
xmminput[5] = _mm_xor_si128( longoutput[i+5], xmminput[5] );
xmminput[6] = _mm_xor_si128( longoutput[i+6], xmminput[6] );
xmminput[7] = _mm_xor_si128( longoutput[i+7], xmminput[7] );
for( j = 0; j < 10; j++ )
{
xmminput[0] = _mm_aesenc_si128( xmminput[0], expkey[j] );
xmminput[1] = _mm_aesenc_si128( xmminput[1], expkey[j] );
xmminput[2] = _mm_aesenc_si128( xmminput[2], expkey[j] );
xmminput[3] = _mm_aesenc_si128( xmminput[3], expkey[j] );
xmminput[4] = _mm_aesenc_si128( xmminput[4], expkey[j] );
xmminput[5] = _mm_aesenc_si128( xmminput[5], expkey[j] );
xmminput[6] = _mm_aesenc_si128( xmminput[6], expkey[j] );
xmminput[7] = _mm_aesenc_si128( xmminput[7], expkey[j] );
}
}
// last 4 iterations
for ( ; likely( i < MEMORY_M128I ); i += INIT_SIZE_M128I )
{
xmminput[0] = _mm_xor_si128( longoutput[i ], xmminput[0] );
xmminput[1] = _mm_xor_si128( longoutput[i+1], xmminput[1] );
xmminput[2] = _mm_xor_si128( longoutput[i+2], xmminput[2] );
xmminput[3] = _mm_xor_si128( longoutput[i+3], xmminput[3] );
xmminput[4] = _mm_xor_si128( longoutput[i+4], xmminput[4] );
xmminput[5] = _mm_xor_si128( longoutput[i+5], xmminput[5] );
xmminput[6] = _mm_xor_si128( longoutput[i+6], xmminput[6] );
xmminput[7] = _mm_xor_si128( longoutput[i+7], xmminput[7] );
for( j = 0; j < 10; j++ )
{
xmminput[0] = _mm_aesenc_si128( xmminput[0], expkey[j] );
xmminput[1] = _mm_aesenc_si128( xmminput[1], expkey[j] );
xmminput[2] = _mm_aesenc_si128( xmminput[2], expkey[j] );
xmminput[3] = _mm_aesenc_si128( xmminput[3], expkey[j] );
xmminput[4] = _mm_aesenc_si128( xmminput[4], expkey[j] );
xmminput[5] = _mm_aesenc_si128( xmminput[5], expkey[j] );
xmminput[6] = _mm_aesenc_si128( xmminput[6], expkey[j] );
xmminput[7] = _mm_aesenc_si128( xmminput[7], expkey[j] );
}
}
memcpy( ctx.state.init, ctx.text, INIT_SIZE_BYTE);
keccakf( (uint64_t*)&ctx.state.hs.w, 24 );
extra_hashes[ctx.state.hs.b[0] & 3](&ctx.state, 200, output);
}
#endif

View File

@@ -1,133 +0,0 @@
// Copyright (c) 2012-2013 The Cryptonote developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
// Modified for CPUminer by Lucas Jones
#include "cpuminer-config.h"
#include "algo-gate-api.h"
#if defined(__AES__)
#include "algo/groestl/aes_ni/hash-groestl256.h"
#else
#include "crypto/c_groestl.h"
#endif
#include "crypto/c_blake256.h"
#include "crypto/c_jh.h"
#include "crypto/c_skein.h"
#include "cryptonight.h"
/*
#if defined __unix__ && (!defined __APPLE__)
#include <sys/mman.h>
#elif defined _WIN32
#include <windows.h>
#endif
*/
void do_blake_hash(const void* input, size_t len, char* output) {
blake256_hash((uint8_t*)output, input, len);
}
void do_groestl_hash(const void* input, size_t len, char* output) {
#if defined(__AES__)
hashState_groestl256 ctx;
init_groestl256( &ctx, 32 );
update_and_final_groestl256( &ctx, output, input, len * 8 );
#else
groestl(input, len * 8, (uint8_t*)output);
#endif
}
void do_jh_hash(const void* input, size_t len, char* output) {
jh_hash(32 * 8, input, 8 * len, (uint8_t*)output);
}
void do_skein_hash(const void* input, size_t len, char* output) {
skein_hash(8 * 32, input, 8 * len, (uint8_t*)output);
}
void (* const extra_hashes[4])( const void *, size_t, char *) =
{ do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash };
void cryptonight_hash( void *restrict output, const void *input, int len )
{
#if defined(__AES__)
cryptonight_hash_aes( output, input, len );
#else
cryptonight_hash_ctx ( output, input, len );
#endif
}
void cryptonight_hash_suw( void *restrict output, const void *input )
{
#if defined(__AES__)
cryptonight_hash_aes( output, input, 76 );
#else
cryptonight_hash_ctx ( output, input, 76 );
#endif
}
bool cryptonightV7 = false;
int scanhash_cryptonight( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id;
uint32_t *nonceptr = (uint32_t*) (((char*)pdata) + 39);
uint32_t n = *nonceptr - 1;
const uint32_t first_nonce = n + 1;
const uint32_t Htarg = ptarget[7];
uint32_t hash[32 / 4] __attribute__((aligned(32)));
// if ( ( cryptonightV7 && ( *(uint8_t*)pdata < 7 ) )
// || ( !cryptonightV7 && ( *(uint8_t*)pdata == 7 ) ) )
// applog(LOG_WARNING,"Cryptonight variant mismatch, shares may be rejected.");
do
{
*nonceptr = ++n;
cryptonight_hash( hash, pdata, 76 );
if (unlikely( hash[7] < Htarg ))
{
*hashes_done = n - first_nonce + 1;
// work_set_target_ratio( work, hash );
return true;
}
} while (likely((n <= max_nonce && !work_restart[thr_id].restart)));
*hashes_done = n - first_nonce + 1;
return 0;
}
bool register_cryptonight_algo( algo_gate_t* gate )
{
applog(LOG_WARNING,"Cryptonight algorithm and variants are no longer");
applog(LOG_WARNING,"supported by cpuminer-opt. Shares submitted will");
applog(LOG_WARNING,"likely be rejected. Proceed at your own risk.\n");
cryptonightV7 = false;
register_json_rpc2( gate );
gate->optimizations = SSE2_OPT | AES_OPT;
gate->scanhash = (void*)&scanhash_cryptonight;
gate->hash = (void*)&cryptonight_hash;
gate->hash_suw = (void*)&cryptonight_hash_suw;
return true;
};
bool register_cryptonightv7_algo( algo_gate_t* gate )
{
applog(LOG_WARNING,"Cryptonight algorithm and variants are no longer");
applog(LOG_WARNING,"supported by cpuminer-opt. Shares submitted will");
applog(LOG_WARNING,"likely be rejected. Proceed at your own risk.\n");
cryptonightV7 = true;
register_json_rpc2( gate );
gate->optimizations = SSE2_OPT | AES_OPT;
gate->scanhash = (void*)&scanhash_cryptonight;
gate->hash = (void*)&cryptonight_hash;
gate->hash_suw = (void*)&cryptonight_hash_suw;
return true;
};

View File

@@ -1,310 +0,0 @@
// Copyright (c) 2012-2013 The Cryptonote developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
// Modified for CPUminer by Lucas Jones
#include "miner.h"
#include <memory.h>
#if defined(__arm__) || defined(_MSC_VER)
#ifndef NOASM
#define NOASM
#endif
#endif
#include "crypto/oaes_lib.h"
#include "crypto/c_keccak.h"
#include "crypto/c_groestl.h"
#include "crypto/c_blake256.h"
#include "crypto/c_jh.h"
#include "crypto/c_skein.h"
#include "crypto/int-util.h"
//#include "crypto/hash-ops.h"
#include "cryptonight.h"
#if USE_INT128
#if __GNUC__ == 4 && __GNUC_MINOR__ >= 4 && __GNUC_MINOR__ < 6
typedef unsigned int uint128_t __attribute__ ((__mode__ (TI)));
#elif defined (_MSC_VER)
/* only for mingw64 on windows */
#undef USE_INT128
#define USE_INT128 (0)
#else
typedef __uint128_t uint128_t;
#endif
#endif
#define LITE 0
#if LITE /* cryptonight-light */
#define MEMORY (1 << 20)
#define ITER (1 << 19)
#else
#define MEMORY (1 << 21) /* 2 MiB */
#define ITER (1 << 20)
#endif
#define AES_BLOCK_SIZE 16
#define AES_KEY_SIZE 32 /*16*/
#define INIT_SIZE_BLK 8
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
/*
#pragma pack(push, 1)
union cn_slow_hash_state {
union hash_state hs;
struct {
uint8_t k[64];
uint8_t init[INIT_SIZE_BYTE];
};
};
#pragma pack(pop)
static void do_blake_hash(const void* input, size_t len, char* output) {
blake256_hash((uint8_t*)output, input, len);
}
static void do_groestl_hash(const void* input, size_t len, char* output) {
groestl(input, len * 8, (uint8_t*)output);
}
static void do_jh_hash(const void* input, size_t len, char* output) {
int r = jh_hash(HASH_SIZE * 8, input, 8 * len, (uint8_t*)output);
assert(likely(SUCCESS == r));
}
static void do_skein_hash(const void* input, size_t len, char* output) {
int r = skein_hash(8 * HASH_SIZE, input, 8 * len, (uint8_t*)output);
assert(likely(SKEIN_SUCCESS == r));
}
*/
extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
extern int aesb_pseudo_round_mut(uint8_t *val, uint8_t *expandedKey);
#if !defined(_MSC_VER) && !defined(NOASM)
extern int fast_aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
extern int fast_aesb_pseudo_round_mut(uint8_t *val, uint8_t *expandedKey);
#else
#define fast_aesb_single_round aesb_single_round
#define fast_aesb_pseudo_round_mut aesb_pseudo_round_mut
#endif
#if defined(NOASM) || !defined(__x86_64__)
static uint64_t mul128(uint64_t multiplier, uint64_t multiplicand, uint64_t* product_hi) {
// multiplier = ab = a * 2^32 + b
// multiplicand = cd = c * 2^32 + d
// ab * cd = a * c * 2^64 + (a * d + b * c) * 2^32 + b * d
uint64_t a = hi_dword(multiplier);
uint64_t b = lo_dword(multiplier);
uint64_t c = hi_dword(multiplicand);
uint64_t d = lo_dword(multiplicand);
uint64_t ac = a * c;
uint64_t ad = a * d;
uint64_t bc = b * c;
uint64_t bd = b * d;
uint64_t adbc = ad + bc;
uint64_t adbc_carry = adbc < ad ? 1 : 0;
// multiplier * multiplicand = product_hi * 2^64 + product_lo
uint64_t product_lo = bd + (adbc << 32);
uint64_t product_lo_carry = product_lo < bd ? 1 : 0;
*product_hi = ac + (adbc >> 32) + (adbc_carry << 32) + product_lo_carry;
assert(ac <= *product_hi);
return product_lo;
}
#else
extern uint64_t mul128(uint64_t multiplier, uint64_t multiplicand, uint64_t* product_hi);
#endif
/*
static void (* const extra_hashes[4])(const void *, size_t, char *) = {
do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash
};
*/
static inline size_t e2i(const uint8_t* a) {
#if !LITE
return ((uint32_t *)a)[0] & 0x1FFFF0;
#else
return ((uint32_t *)a)[0] & 0xFFFF0;
#endif
}
static inline void mul_sum_xor_dst( const uint8_t* a, uint8_t* c, uint8_t* dst,
const uint64_t tweak )
{
uint64_t hi, lo = mul128(((uint64_t*) a)[0], ((uint64_t*) dst)[0], &hi) + ((uint64_t*) c)[1];
hi += ((uint64_t*) c)[0];
((uint64_t*) c)[0] = ((uint64_t*) dst)[0] ^ hi;
((uint64_t*) c)[1] = ((uint64_t*) dst)[1] ^ lo;
((uint64_t*) dst)[0] = hi;
((uint64_t*) dst)[1] = cryptonightV7 ? lo ^ tweak : lo;
}
static inline void xor_blocks(uint8_t* a, const uint8_t* b) {
#if USE_INT128
*((uint128_t*) a) ^= *((uint128_t*) b);
#else
((uint64_t*) a)[0] ^= ((uint64_t*) b)[0];
((uint64_t*) a)[1] ^= ((uint64_t*) b)[1];
#endif
}
static inline void xor_blocks_dst(const uint8_t* a, const uint8_t* b, uint8_t* dst) {
#if USE_INT128
*((uint128_t*) dst) = *((uint128_t*) a) ^ *((uint128_t*) b);
#else
((uint64_t*) dst)[0] = ((uint64_t*) a)[0] ^ ((uint64_t*) b)[0];
((uint64_t*) dst)[1] = ((uint64_t*) a)[1] ^ ((uint64_t*) b)[1];
#endif
}
typedef struct {
uint8_t _ALIGN(16) long_state[MEMORY];
union cn_slow_hash_state state;
uint8_t _ALIGN(16) text[INIT_SIZE_BYTE];
uint8_t _ALIGN(16) a[AES_BLOCK_SIZE];
uint8_t _ALIGN(16) b[AES_BLOCK_SIZE];
uint8_t _ALIGN(16) c[AES_BLOCK_SIZE];
oaes_ctx* aes_ctx;
} cryptonight_ctx;
static __thread cryptonight_ctx ctx;
void cryptonight_hash_ctx(void* output, const void* input, int len)
{
// hash_process(&ctx.state.hs, (const uint8_t*) input, len);
keccak( (const uint8_t*)input, 76, (char*)&ctx.state.hs.b, 200 );
if ( cryptonightV7 && len < 43 )
return;
const uint64_t tweak = cryptonightV7
? *((const uint64_t*) (((const uint8_t*)input) + 35))
^ ctx.state.hs.w[24] : 0;
ctx.aes_ctx = (oaes_ctx*) oaes_alloc();
__builtin_prefetch( ctx.text, 0, 3 );
__builtin_prefetch( ctx.text + 64, 0, 3 );
__builtin_prefetch( ctx.long_state, 1, 0 );
__builtin_prefetch( ctx.long_state + 64, 1, 0 );
__builtin_prefetch( ctx.long_state + 128, 1, 0 );
__builtin_prefetch( ctx.long_state + 192, 1, 0 );
__builtin_prefetch( ctx.long_state + 256, 1, 0 );
__builtin_prefetch( ctx.long_state + 320, 1, 0 );
__builtin_prefetch( ctx.long_state + 384, 1, 0 );
__builtin_prefetch( ctx.long_state + 448, 1, 0 );
size_t i, j;
memcpy(ctx.text, ctx.state.init, INIT_SIZE_BYTE);
oaes_key_import_data(ctx.aes_ctx, ctx.state.hs.b, AES_KEY_SIZE);
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
__builtin_prefetch( ctx.long_state + i + 512, 1, 0 );
__builtin_prefetch( ctx.long_state + i + 576, 1, 0 );
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 0], ctx.aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 1], ctx.aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 2], ctx.aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 3], ctx.aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 4], ctx.aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 5], ctx.aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 6], ctx.aes_ctx->key->exp_data);
aesb_pseudo_round_mut(&ctx.text[AES_BLOCK_SIZE * 7], ctx.aes_ctx->key->exp_data);
memcpy(&ctx.long_state[i], ctx.text, INIT_SIZE_BYTE);
}
xor_blocks_dst(&ctx.state.k[0], &ctx.state.k[32], ctx.a);
xor_blocks_dst(&ctx.state.k[16], &ctx.state.k[48], ctx.b);
for (i = 0; likely(i < ITER / 4); ++i)
{
/* Dependency chain: address -> read value ------+
* written value <-+ hard function (AES or MUL) <+
* next address <-+
*/
/* Iteration 1 */
j = e2i(ctx.a);
aesb_single_round(&ctx.long_state[j], ctx.c, ctx.a);
xor_blocks_dst(ctx.c, ctx.b, &ctx.long_state[j]);
if ( cryptonightV7 )
{
uint8_t *lsa = (uint8_t*)&ctx.long_state[((uint64_t *)(ctx.a))[0] & 0x1FFFF0];
const uint8_t tmp = lsa[11];
const uint8_t index = ( ( (tmp >> 3) & 6 ) | (tmp & 1) ) << 1;
lsa[11] = tmp ^ ( ( 0x75310 >> index) & 0x30 );
}
/* Iteration 2 */
mul_sum_xor_dst(ctx.c, ctx.a, &ctx.long_state[e2i(ctx.c)], tweak );
/* Iteration 3 */
j = e2i(ctx.a);
aesb_single_round(&ctx.long_state[j], ctx.b, ctx.a);
xor_blocks_dst(ctx.b, ctx.c, &ctx.long_state[j]);
if ( cryptonightV7 )
{
uint8_t *lsa = (uint8_t*)&ctx.long_state[((uint64_t *)(ctx.a))[0] & 0x1FFFF0];
const uint8_t tmp = lsa[11];
const uint8_t index = ( ( (tmp >> 3) & 6 ) | (tmp & 1) ) << 1;
lsa[11] = tmp ^ ( ( 0x75310 >> index) & 0x30 );
}
/* Iteration 4 */
mul_sum_xor_dst(ctx.b, ctx.a, &ctx.long_state[e2i(ctx.b)], tweak );
}
__builtin_prefetch( ctx.text, 0, 3 );
__builtin_prefetch( ctx.text + 64, 0, 3 );
__builtin_prefetch( ctx.long_state, 1, 0 );
__builtin_prefetch( ctx.long_state + 64, 1, 0 );
__builtin_prefetch( ctx.long_state + 128, 1, 0 );
__builtin_prefetch( ctx.long_state + 192, 1, 0 );
__builtin_prefetch( ctx.long_state + 256, 1, 0 );
__builtin_prefetch( ctx.long_state + 320, 1, 0 );
__builtin_prefetch( ctx.long_state + 384, 1, 0 );
__builtin_prefetch( ctx.long_state + 448, 1, 0 );
memcpy(ctx.text, ctx.state.init, INIT_SIZE_BYTE);
oaes_key_import_data(ctx.aes_ctx, &ctx.state.hs.b[32], AES_KEY_SIZE);
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
__builtin_prefetch( ctx.long_state + i + 512, 1, 0 );
__builtin_prefetch( ctx.long_state + i + 576, 1, 0 );
xor_blocks(&ctx.text[0 * AES_BLOCK_SIZE], &ctx.long_state[i + 0 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[0 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
xor_blocks(&ctx.text[1 * AES_BLOCK_SIZE], &ctx.long_state[i + 1 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[1 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
xor_blocks(&ctx.text[2 * AES_BLOCK_SIZE], &ctx.long_state[i + 2 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[2 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
xor_blocks(&ctx.text[3 * AES_BLOCK_SIZE], &ctx.long_state[i + 3 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[3 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
xor_blocks(&ctx.text[4 * AES_BLOCK_SIZE], &ctx.long_state[i + 4 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[4 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
xor_blocks(&ctx.text[5 * AES_BLOCK_SIZE], &ctx.long_state[i + 5 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[5 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
xor_blocks(&ctx.text[6 * AES_BLOCK_SIZE], &ctx.long_state[i + 6 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[6 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
xor_blocks(&ctx.text[7 * AES_BLOCK_SIZE], &ctx.long_state[i + 7 * AES_BLOCK_SIZE]);
aesb_pseudo_round_mut(&ctx.text[7 * AES_BLOCK_SIZE], ctx.aes_ctx->key->exp_data);
}
memcpy(ctx.state.init, ctx.text, INIT_SIZE_BYTE);
// hash_permutation(&ctx.state.hs);
keccakf( (uint64_t*)&ctx.state.hs.w, 24 );
/*memcpy(hash, &state, 32);*/
extra_hashes[ctx.state.hs.b[0] & 3](&ctx.state, 200, output);
oaes_free((OAES_CTX **) &ctx.aes_ctx);
}

View File

@@ -1,51 +0,0 @@
#ifndef __CRYPTONIGHT_H_INCLUDED
#define __CRYPTONIGHT_H_INCLUDED
#include <stddef.h>
#include "crypto/oaes_lib.h"
#include "miner.h"
#define MEMORY (1 << 21) /* 2 MiB */
#define MEMORY_M128I (MEMORY >> 4) // 2 MiB / 16 = 128 ki * __m128i
#define ITER (1 << 20)
#define AES_BLOCK_SIZE 16
#define AES_KEY_SIZE 32 /*16*/
#define INIT_SIZE_BLK 8
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE) // 128
#define INIT_SIZE_M128I (INIT_SIZE_BYTE >> 4) // 8
#pragma pack(push, 1)
union hash_state {
uint8_t b[200];
uint64_t w[25];
};
#pragma pack(pop)
#pragma pack(push, 1)
union cn_slow_hash_state {
union hash_state hs;
struct {
uint8_t k[64];
uint8_t init[INIT_SIZE_BYTE];
};
};
#pragma pack(pop)
void do_blake_hash(const void* input, size_t len, char* output);
void do_groestl_hash(const void* input, size_t len, char* output);
void do_jh_hash(const void* input, size_t len, char* output);
void do_skein_hash(const void* input, size_t len, char* output);
void cryptonight_hash_ctx(void* output, const void* input, int len);
void keccakf(uint64_t st[25], int rounds);
extern void (* const extra_hashes[4])(const void *, size_t, char *);
int scanhash_cryptonight( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void cryptonight_hash_aes( void *restrict output, const void *input, int len );
extern bool cryptonightV7;
#endif

View File

@@ -179,14 +179,6 @@ int cube_4way_full( cube_4way_context *sp, void *output, int hashbitlen,
sp->rounds = 16;
sp->pos = 0;
h[ 0] = m512_const1_128( iv[0] );
h[ 1] = m512_const1_128( iv[1] );
h[ 2] = m512_const1_128( iv[2] );
h[ 3] = m512_const1_128( iv[3] );
h[ 4] = m512_const1_128( iv[4] );
h[ 5] = m512_const1_128( iv[5] );
h[ 6] = m512_const1_128( iv[6] );
h[ 7] = m512_const1_128( iv[7] );
h[ 0] = m512_const1_128( iv[0] );
h[ 1] = m512_const1_128( iv[1] );
h[ 2] = m512_const1_128( iv[2] );
@@ -447,14 +439,6 @@ int cube_2way_full( cube_2way_context *sp, void *output, int hashbitlen,
sp->rounds = 16;
sp->pos = 0;
h[ 0] = m256_const1_128( iv[0] );
h[ 1] = m256_const1_128( iv[1] );
h[ 2] = m256_const1_128( iv[2] );
h[ 3] = m256_const1_128( iv[3] );
h[ 4] = m256_const1_128( iv[4] );
h[ 5] = m256_const1_128( iv[5] );
h[ 6] = m256_const1_128( iv[6] );
h[ 7] = m256_const1_128( iv[7] );
h[ 0] = m256_const1_128( iv[0] );
h[ 1] = m256_const1_128( iv[1] );
h[ 2] = m256_const1_128( iv[2] );

View File

@@ -28,6 +28,27 @@ int cube_4way_update_close( cube_4way_context *sp, void *output,
int cube_4way_full( cube_4way_context *sp, void *output, int hashbitlen,
const void *data, size_t size );
int cube_4x256_full( cube_4way_context *sp, void *output, int hashbitlen,
const void *data, size_t size );
#define cube512_4way_init( sp ) cube_4way_update( sp, 512 )
#define cube512_4way_update cube_4way_update
#define cube512_4way_update_close cube_4way_update
#define cube512_4way_close cube_4way_update
#define cube512_4way_full( sp, output, data, size ) \
cube_4way_full( sp, output, 512, data, size )
#define cube512_4x256_full( sp, output, data, size ) \
cube_4x256_full( sp, output, 512, data, size )
#define cube256_4way_init( sp ) cube_4way_update( sp, 256 )
#define cube256_4way_update cube_4way_update
#define cube256_4way_update_close cube_4way_update
#define cube256_4way_close cube_4way_update
#define cube256_4way_full( sp, output, data, size ) \
cube_4way_full( sp, output, 256, data, size )
#define cube256_4x256_full( sp, output, data, size ) \
cube_4x256_full( sp, output, 256, data, size )
#endif
// 2x128, 2 way parallel SSE2

View File

@@ -22,18 +22,26 @@ typedef struct
} echo_4way_context __attribute__ ((aligned (64)));
int echo_4way_init( echo_4way_context *state, int hashbitlen );
#define echo512_4way_init( state ) echo_4way_init( state, 512 )
#define echo256_4way_init( state ) echo_4way_init( state, 256 )
int echo_4way_update( echo_4way_context *state, const void *data,
unsigned int databitlen);
#define echo512_4way_update echo_4way_update
int echo_close( echo_4way_context *state, void *hashval );
#define echo512_4way_close echo_4way_close
int echo_4way_update_close( echo_4way_context *state, void *hashval,
const void *data, int databitlen );
#define echo512_4way_update_close echo_4way_update_close
int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
const void *data, int datalen );
#define echo512_4way_full( state, hashval, data, datalen ) \
echo_4way_full( state, hashval, 512, data, datalen )
#define echo256_4way_full( state, hashval, data, datalen ) \
echo_4way_full( state, hashval, 256, data, datalen )
#endif
#endif

View File

@@ -74,6 +74,14 @@ void sph_fugue512_close(void *cc, void *dst);
void sph_fugue512_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
#define sph_fugue512_full( cc, dst, data, len ) \
do{ \
sph_fugue512_init( cc ); \
sph_fugue512( cc, data, len ); \
sph_fugue512_close( cc, dst ); \
}while(0)
#ifdef __cplusplus
}
#endif

View File

@@ -144,7 +144,7 @@ int hodl_scanhash( struct work* work, uint32_t max_nonce,
#if defined(__AES__)
GenRandomGarbage( (CacheEntry*)hodl_scratchbuf, work->data, mythr->id );
pthread_barrier_wait( &hodl_barrier );
return scanhash_hodl_wolf( work, max_nonce, hashes_done, thr_info );
return scanhash_hodl_wolf( work, max_nonce, hashes_done, mythr );
#endif
return false;
}

View File

@@ -129,9 +129,10 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
if( FinalPoW[7] <= ptarget[7] )
{
pdata[20] = swab32( BlockHdr[20] );
pdata[21] = swab32( BlockHdr[21] );
*hashes_done = CollisionCount;
return(1);
pdata[21] = swab32( BlockHdr[21] );
*hashes_done = CollisionCount;
submit_solution( work, FinalPoW, mythr );
return(0);
}
}
}
@@ -198,7 +199,8 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
pdata[20] = swab32( BlockHdr[20] );
pdata[21] = swab32( BlockHdr[21] );
*hashes_done = CollisionCount;
return(1);
submit_solution( work, FinalPoW, mythr );
return(0);
}
}
}

View File

@@ -76,37 +76,34 @@ int scanhash_allium( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash[8];
uint32_t _ALIGN(128) endiandata[20];
uint32_t _ALIGN(128) edata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id;
if ( opt_benchmark )
ptarget[7] = 0x3ffff;
for ( int i = 0; i < 19; i++ )
be32enc( &endiandata[i], pdata[i] );
edata[i] = bswap_32( pdata[i] );
sph_blake256_init( &allium_ctx.blake );
sph_blake256( &allium_ctx.blake, endiandata, 64 );
sph_blake256( &allium_ctx.blake, edata, 64 );
do {
be32enc( &endiandata[19], nonce );
allium_hash( hash, endiandata );
if ( hash[7] <= Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
edata[19] = nonce;
allium_hash( hash, edata );
if ( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
pdata[19] = bswap_32( nonce );
submit_solution( work, hash, mythr );
}
nonce++;
} while ( nonce < max_nonce && !work_restart[thr_id].restart );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
*hashes_done = pdata[19] - first_nonce;
return 0;
}

View File

@@ -94,12 +94,12 @@ bool lyra2rev2_thread_init()
const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
int size = (int64_t)ROW_LEN_BYTES * 4; // nRows;
#if defined (LYRA2REV2_8WAY)
#if defined (LYRA2REV2_16WAY)
l2v2_wholeMatrix = _mm_malloc( 2 * size, 64 ); // 2 way
init_lyra2rev2_8way_ctx();;
#elif defined (LYRA2REV2_4WAY)
init_lyra2rev2_16way_ctx();;
#elif defined (LYRA2REV2_8WAY)
l2v2_wholeMatrix = _mm_malloc( size, 64 );
init_lyra2rev2_4way_ctx();;
init_lyra2rev2_8way_ctx();;
#else
l2v2_wholeMatrix = _mm_malloc( size, 64 );
init_lyra2rev2_ctx();
@@ -109,17 +109,17 @@ bool lyra2rev2_thread_init()
bool register_lyra2rev2_algo( algo_gate_t* gate )
{
#if defined (LYRA2REV2_8WAY)
#if defined (LYRA2REV2_16WAY)
gate->scanhash = (void*)&scanhash_lyra2rev2_16way;
gate->hash = (void*)&lyra2rev2_16way_hash;
#elif defined (LYRA2REV2_8WAY)
gate->scanhash = (void*)&scanhash_lyra2rev2_8way;
gate->hash = (void*)&lyra2rev2_8way_hash;
#elif defined (LYRA2REV2_4WAY)
gate->scanhash = (void*)&scanhash_lyra2rev2_4way;
gate->hash = (void*)&lyra2rev2_4way_hash;
#else
gate->scanhash = (void*)&scanhash_lyra2rev2;
gate->hash = (void*)&lyra2rev2_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
gate->miner_thread_init = (void*)&lyra2rev2_thread_init;
opt_target_factor = 256.0;
return true;
@@ -228,13 +228,14 @@ void phi2_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
bool register_phi2_algo( algo_gate_t* gate )
{
// init_phi2_ctx();
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;
gate->get_work_data_size = (void*)&phi2_get_work_data_size;
gate->decode_extra_data = (void*)&phi2_decode_extra_data;
gate->build_extraheader = (void*)&phi2_build_extraheader;
opt_target_factor = 256.0;
#if defined(PHI2_4WAY)
#if defined(PHI2_8WAY)
gate->scanhash = (void*)&scanhash_phi2_8way;
#elif defined(PHI2_4WAY)
gate->scanhash = (void*)&scanhash_phi2_4way;
#else
init_phi2_ctx();

View File

@@ -51,30 +51,32 @@ bool init_lyra2rev3_ctx();
//////////////////////////////////
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define LYRA2REV2_8WAY 1
#define LYRA2REV2_16WAY 1
#elif defined(__AVX2__)
#define LYRA2REV2_4WAY 1
#define LYRA2REV2_8WAY 1
#endif
extern __thread uint64_t* l2v2_wholeMatrix;
bool register_lyra2rev2_algo( algo_gate_t* gate );
#if defined(LYRA2REV2_8WAY)
#if defined(LYRA2REV2_16WAY)
void lyra2rev2_16way_hash( void *state, const void *input );
int scanhash_lyra2rev2_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_lyra2rev2_16way_ctx();
#elif defined(LYRA2REV2_8WAY)
void lyra2rev2_8way_hash( void *state, const void *input );
int scanhash_lyra2rev2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_lyra2rev2_8way_ctx();
#elif defined(LYRA2REV2_4WAY)
void lyra2rev2_4way_hash( void *state, const void *input );
int scanhash_lyra2rev2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_lyra2rev2_4way_ctx();
#else
void lyra2rev2_hash( void *state, const void *input );
int scanhash_lyra2rev2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
@@ -184,19 +186,26 @@ bool init_allium_ctx();
/////////////////////////////////////////
#if defined(__AVX2__) && defined(__AES__)
// #define PHI2_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define PHI2_8WAY 1
#elif defined(__AVX2__) && defined(__AES__)
#define PHI2_4WAY 1
#endif
extern bool phi2_has_roots;
bool register_phi2_algo( algo_gate_t* gate );
#if defined(PHI2_4WAY)
#if defined(PHI2_8WAY)
void phi2_8way_hash( void *state, const void *input );
int scanhash_phi2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(PHI2_4WAY)
void phi2_hash_4way( void *state, const void *input );
int scanhash_phi2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
//void init_phi2_ctx();
#else

View File

@@ -7,23 +7,227 @@
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/cubehash/cube-hash-2way.h"
#if defined (LYRA2REV2_8WAY)
#if defined (LYRA2REV2_16WAY)
typedef struct {
blake256_16way_context blake;
keccak256_8way_context keccak;
cubehashParam cube;
skein256_8way_context skein;
bmw256_16way_context bmw;
} lyra2v2_16way_ctx_holder __attribute__ ((aligned (64)));
static lyra2v2_16way_ctx_holder l2v2_16way_ctx;
bool init_lyra2rev2_16way_ctx()
{
keccak256_8way_init( &l2v2_16way_ctx.keccak );
cubehashInit( &l2v2_16way_ctx.cube, 256, 16, 32 );
skein256_8way_init( &l2v2_16way_ctx.skein );
bmw256_16way_init( &l2v2_16way_ctx.bmw );
return true;
}
void lyra2rev2_16way_hash( void *state, const void *input )
{
uint32_t vhash[8*16] __attribute__ ((aligned (128)));
uint32_t hash0[8] __attribute__ ((aligned (64)));
uint32_t hash1[8] __attribute__ ((aligned (64)));
uint32_t hash2[8] __attribute__ ((aligned (64)));
uint32_t hash3[8] __attribute__ ((aligned (64)));
uint32_t hash4[8] __attribute__ ((aligned (64)));
uint32_t hash5[8] __attribute__ ((aligned (64)));
uint32_t hash6[8] __attribute__ ((aligned (64)));
uint32_t hash7[8] __attribute__ ((aligned (64)));
uint32_t hash8[8] __attribute__ ((aligned (64)));
uint32_t hash9[8] __attribute__ ((aligned (64)));
uint32_t hash10[8] __attribute__ ((aligned (64)));
uint32_t hash11[8] __attribute__ ((aligned (64)));
uint32_t hash12[8] __attribute__ ((aligned (64)));
uint32_t hash13[8] __attribute__ ((aligned (64)));
uint32_t hash14[8] __attribute__ ((aligned (64)));
uint32_t hash15[8] __attribute__ ((aligned (64)));
lyra2v2_16way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &l2v2_16way_ctx, sizeof(l2v2_16way_ctx) );
blake256_16way_update( &ctx.blake, input + (64<<4), 16 );
blake256_16way_close( &ctx.blake, vhash );
dintrlv_16x32( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7,
hash8, hash9, hash10, hash11,
hash12, hash13, hash14, hash15, vhash, 256 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, 256 );
keccak256_8way_update( &ctx.keccak, vhash, 32 );
keccak256_8way_close( &ctx.keccak, vhash );
dintrlv_8x64( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash, 256 );
intrlv_8x64( vhash, hash8, hash9, hash10, hash11,
hash12, hash13, hash14, hash15, 256 );
keccak256_8way_init( &ctx.keccak );
keccak256_8way_update( &ctx.keccak, vhash, 32 );
keccak256_8way_close( &ctx.keccak, vhash );
dintrlv_8x64( hash8, hash9, hash10, hash11,
hash12, hash13, hash14, hash5, vhash, 256 );
cubehash_full( &ctx.cube, (byte*) hash0, 256, (const byte*) hash0, 32 );
cubehash_full( &ctx.cube, (byte*) hash1, 256, (const byte*) hash1, 32 );
cubehash_full( &ctx.cube, (byte*) hash2, 256, (const byte*) hash2, 32 );
cubehash_full( &ctx.cube, (byte*) hash3, 256, (const byte*) hash3, 32 );
cubehash_full( &ctx.cube, (byte*) hash4, 256, (const byte*) hash4, 32 );
cubehash_full( &ctx.cube, (byte*) hash5, 256, (const byte*) hash5, 32 );
cubehash_full( &ctx.cube, (byte*) hash6, 256, (const byte*) hash6, 32 );
cubehash_full( &ctx.cube, (byte*) hash7, 256, (const byte*) hash7, 32 );
cubehash_full( &ctx.cube, (byte*) hash8, 256, (const byte*) hash8, 32 );
cubehash_full( &ctx.cube, (byte*) hash9, 256, (const byte*) hash9, 32 );
cubehash_full( &ctx.cube, (byte*) hash10, 256, (const byte*) hash10, 32 );
cubehash_full( &ctx.cube, (byte*) hash11, 256, (const byte*) hash11, 32 );
cubehash_full( &ctx.cube, (byte*) hash12, 256, (const byte*) hash12, 32 );
cubehash_full( &ctx.cube, (byte*) hash13, 256, (const byte*) hash13, 32 );
cubehash_full( &ctx.cube, (byte*) hash14, 256, (const byte*) hash14, 32 );
cubehash_full( &ctx.cube, (byte*) hash15, 256, (const byte*) hash15, 32 );
intrlv_2x256( vhash, hash0, hash1, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash0, hash1, vhash, 256 );
intrlv_2x256( vhash, hash2, hash3, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash2, hash3, vhash, 256 );
intrlv_2x256( vhash, hash4, hash5, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash4, hash5, vhash, 256 );
intrlv_2x256( vhash, hash6, hash7, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash6, hash7, vhash, 256 );
intrlv_2x256( vhash, hash8, hash9, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash8, hash9, vhash, 256 );
intrlv_2x256( vhash, hash10, hash11, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash10, hash11, vhash, 256 );
intrlv_2x256( vhash, hash12, hash13, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash12, hash13, vhash, 256 );
intrlv_2x256( vhash, hash14, hash15, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash14, hash15, vhash, 256 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, 256 );
skein256_8way_update( &ctx.skein, vhash, 32 );
skein256_8way_close( &ctx.skein, vhash );
dintrlv_8x64( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash, 256 );
intrlv_8x64( vhash, hash8, hash9, hash10, hash11, hash12,
hash13, hash14, hash15, 256 );
skein256_8way_init( &ctx.skein );
skein256_8way_update( &ctx.skein, vhash, 32 );
skein256_8way_close( &ctx.skein, vhash );
dintrlv_8x64( hash8, hash9, hash10, hash11,
hash12, hash13, hash14, hash15, vhash, 256 );
cubehash_full( &ctx.cube, (byte*) hash0, 256, (const byte*) hash0, 32 );
cubehash_full( &ctx.cube, (byte*) hash1, 256, (const byte*) hash1, 32 );
cubehash_full( &ctx.cube, (byte*) hash2, 256, (const byte*) hash2, 32 );
cubehash_full( &ctx.cube, (byte*) hash3, 256, (const byte*) hash3, 32 );
cubehash_full( &ctx.cube, (byte*) hash4, 256, (const byte*) hash4, 32 );
cubehash_full( &ctx.cube, (byte*) hash5, 256, (const byte*) hash5, 32 );
cubehash_full( &ctx.cube, (byte*) hash6, 256, (const byte*) hash6, 32 );
cubehash_full( &ctx.cube, (byte*) hash7, 256, (const byte*) hash7, 32 );
cubehash_full( &ctx.cube, (byte*) hash8, 256, (const byte*) hash8, 32 );
cubehash_full( &ctx.cube, (byte*) hash9, 256, (const byte*) hash9, 32 );
cubehash_full( &ctx.cube, (byte*) hash10, 256, (const byte*) hash10, 32 );
cubehash_full( &ctx.cube, (byte*) hash11, 256, (const byte*) hash11, 32 );
cubehash_full( &ctx.cube, (byte*) hash12, 256, (const byte*) hash12, 32 );
cubehash_full( &ctx.cube, (byte*) hash13, 256, (const byte*) hash13, 32 );
cubehash_full( &ctx.cube, (byte*) hash14, 256, (const byte*) hash14, 32 );
cubehash_full( &ctx.cube, (byte*) hash15, 256, (const byte*) hash15, 32 );
intrlv_16x32( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7,
hash8, hash9, hash10, hash11,
hash12, hash13, hash14, hash15, 256 );
bmw256_16way_update( &ctx.bmw, vhash, 32 );
bmw256_16way_close( &ctx.bmw, state );
}
int scanhash_lyra2rev2_16way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*16] __attribute__ ((aligned (128)));
uint32_t vdata[20*16] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &hash[7*16];
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 16;
uint32_t n = first_nonce;
const uint32_t targ32 = ptarget[7];
__m512i *noncev = (__m512i*)vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
if ( bench ) ptarget[7] = 0x0000ff;
mm512_bswap32_intrlv80_16x32( vdata, pdata );
*noncev = _mm512_set_epi32( n+15, n+14, n+13, n+12, n+11, n+10, n+ 9, n+ 8,
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+ 1, n );
blake256_16way_init( &l2v2_16way_ctx.blake );
blake256_16way_update( &l2v2_16way_ctx.blake, vdata, 64 );
do
{
lyra2rev2_16way_hash( hash, vdata );
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hashd7[lane] <= targ32 ) )
{
extr_lane_16x32( lane_hash, hash, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm512_add_epi32( *noncev, m512_const1_32( 16 ) );
n += 16;
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (LYRA2REV2_8WAY)
typedef struct {
blake256_8way_context blake;
keccak256_8way_context keccak;
cube_4way_context cube;
skein256_8way_context skein;
bmw256_8way_context bmw;
keccak256_4way_context keccak;
cubehashParam cube;
skein256_4way_context skein;
bmw256_8way_context bmw;
} lyra2v2_8way_ctx_holder __attribute__ ((aligned (64)));
static lyra2v2_8way_ctx_holder l2v2_8way_ctx;
bool init_lyra2rev2_8way_ctx()
{
keccak256_8way_init( &l2v2_8way_ctx.keccak );
cube_4way_init( &l2v2_8way_ctx.cube, 256, 16, 32 );
skein256_8way_init( &l2v2_8way_ctx.skein );
keccak256_4way_init( &l2v2_8way_ctx.keccak );
cubehashInit( &l2v2_8way_ctx.cube, 256, 16, 32 );
skein256_4way_init( &l2v2_8way_ctx.skein );
bmw256_8way_init( &l2v2_8way_ctx.bmw );
return true;
}
@@ -31,8 +235,6 @@ bool init_lyra2rev2_8way_ctx()
void lyra2rev2_8way_hash( void *state, const void *input )
{
uint32_t vhash[8*8] __attribute__ ((aligned (128)));
uint32_t vhashA[8*8] __attribute__ ((aligned (64)));
uint32_t vhashB[8*8] __attribute__ ((aligned (64)));
uint32_t hash0[8] __attribute__ ((aligned (64)));
uint32_t hash1[8] __attribute__ ((aligned (64)));
uint32_t hash2[8] __attribute__ ((aligned (64)));
@@ -47,103 +249,113 @@ void lyra2rev2_8way_hash( void *state, const void *input )
blake256_8way_update( &ctx.blake, input + (64<<3), 16 );
blake256_8way_close( &ctx.blake, vhash );
rintrlv_8x32_8x64( vhashA, vhash, 256 );
dintrlv_8x32( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash, 256 );
keccak256_8way_update( &ctx.keccak, vhashA, 32 );
keccak256_8way_close( &ctx.keccak, vhash );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 256 );
keccak256_4way_update( &ctx.keccak, vhash, 32 );
keccak256_4way_close( &ctx.keccak, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 256 );
intrlv_4x64( vhash, hash4, hash5, hash6, hash7, 256 );
keccak256_4way_init( &ctx.keccak );
keccak256_4way_update( &ctx.keccak, vhash, 32 );
keccak256_4way_close( &ctx.keccak, vhash );
dintrlv_4x64( hash4, hash5, hash6, hash7, vhash, 256 );
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 256 );
cubehash_full( &ctx.cube, (byte*) hash0, 256, (const byte*) hash0, 32 );
cubehash_full( &ctx.cube, (byte*) hash1, 256, (const byte*) hash1, 32 );
cubehash_full( &ctx.cube, (byte*) hash2, 256, (const byte*) hash2, 32 );
cubehash_full( &ctx.cube, (byte*) hash3, 256, (const byte*) hash3, 32 );
cubehash_full( &ctx.cube, (byte*) hash4, 256, (const byte*) hash4, 32 );
cubehash_full( &ctx.cube, (byte*) hash5, 256, (const byte*) hash5, 32 );
cubehash_full( &ctx.cube, (byte*) hash6, 256, (const byte*) hash6, 32 );
cubehash_full( &ctx.cube, (byte*) hash7, 256, (const byte*) hash7, 32 );
cube_4way_update_close( &ctx.cube, vhashA, vhashA, 32 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashB, vhashB, 32 );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhashA, 256 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhashB, 256 );
intrlv_2x256( vhash, hash0, hash1, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash0, hash1, vhash, 256 );
intrlv_2x256( vhash, hash2, hash3, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash2, hash3, vhash, 256 );
intrlv_2x256( vhash, hash4, hash5, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash4, hash5, vhash, 256 );
intrlv_2x256( vhash, hash6, hash7, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash6, hash7, vhash, 256 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 256 );
skein256_8way_update( &ctx.skein, vhash, 32 );
skein256_8way_close( &ctx.skein, vhash );
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashA, vhashA, 32 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashB, vhashB, 32 );
LYRA2REV2( l2v2_wholeMatrix, hash0, 32, hash0, 32, hash0, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash1, 32, hash1, 32, hash1, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash2, 32, hash2, 32, hash2, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash4, 32, hash4, 32, hash4, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash5, 32, hash5, 32, hash5, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash6, 32, hash6, 32, hash6, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash7, 32, hash7, 32, hash7, 32, 1, 4, 4 );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhashA, 256 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhashB, 256 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 256 );
skein256_4way_update( &ctx.skein, vhash, 32 );
skein256_4way_close( &ctx.skein, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 256 );
intrlv_4x64( vhash, hash4, hash5, hash6, hash7, 256 );
skein256_4way_init( &ctx.skein );
skein256_4way_update( &ctx.skein, vhash, 32 );
skein256_4way_close( &ctx.skein, vhash );
dintrlv_4x64( hash4, hash5, hash6, hash7, vhash, 256 );
intrlv_8x32( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 256 );
cubehash_full( &ctx.cube, (byte*) hash0, 256, (const byte*) hash0, 32 );
cubehash_full( &ctx.cube, (byte*) hash1, 256, (const byte*) hash1, 32 );
cubehash_full( &ctx.cube, (byte*) hash2, 256, (const byte*) hash2, 32 );
cubehash_full( &ctx.cube, (byte*) hash3, 256, (const byte*) hash3, 32 );
cubehash_full( &ctx.cube, (byte*) hash4, 256, (const byte*) hash4, 32 );
cubehash_full( &ctx.cube, (byte*) hash5, 256, (const byte*) hash5, 32 );
cubehash_full( &ctx.cube, (byte*) hash6, 256, (const byte*) hash6, 32 );
cubehash_full( &ctx.cube, (byte*) hash7, 256, (const byte*) hash7, 32 );
intrlv_8x32( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, 256 );
bmw256_8way_update( &ctx.bmw, vhash, 32 );
bmw256_8way_close( &ctx.bmw, state );
}
int scanhash_lyra2rev2_8way( struct work *work, uint32_t max_nonce,
int scanhash_lyra2rev2_8way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &hash[7*8];
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
int thr_id = mythr->id;
const uint32_t targ32 = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
if ( bench ) ptarget[7] = 0x0000ff;
mm256_bswap32_intrlv80_8x32( vdata, pdata );
*noncev = _mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n );
blake256_8way_init( &l2v2_8way_ctx.blake );
blake256_8way_update( &l2v2_8way_ctx.blake, vdata, 64 );
do
{
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
lyra2rev2_8way_hash( hash, vdata );
pdata[19] = n;
for ( int lane = 0; lane < 8; lane++ ) if ( hash7[lane] <= Htarg )
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hashd7[lane] <= targ32 ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm256_add_epi32( *noncev, m256_const1_32( 8 ) );
n += 8;
} while ( (n < last_nonce) && !work_restart[thr_id].restart);
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif
/*
#elif defined (LYRA2REV2_4WAY)
typedef struct {
@@ -226,15 +438,16 @@ int scanhash_lyra2rev2_4way( struct work *work, uint32_t max_nonce,
{
uint32_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *hashd7 = &(hash[7<<2]);
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
__m128i *noncev = (__m128i*)vdata + 19; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t targ32 = ptarget[7];
__m128i *noncev = (__m128i*)vdata + 19;
int thr_id = mythr->id;
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
@@ -249,21 +462,22 @@ int scanhash_lyra2rev2_4way( struct work *work, uint32_t max_nonce,
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
lyra2rev2_4way_hash( hash, vdata );
pdata[19] = n;
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
for ( int lane = 0; lane < 4; lane++ ) if ( hashd7[lane] <= targ32 )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
if ( valid_hash( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 4;
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
} while ( (n < last_nonce) && !work_restart[thr_id].restart);
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif
*/

View File

@@ -99,7 +99,7 @@ int scanhash_lyra2rev2( struct work *work,
lyra2rev2_hash(hash, endiandata);
if (hash[7] <= Htarg )
if( fulltest( hash, ptarget ) && !opt_benchmark )
if( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );

View File

@@ -130,15 +130,15 @@ int scanhash_lyra2rev3_16way( struct work *work, const uint32_t max_nonce,
{
uint32_t hash[8*16] __attribute__ ((aligned (128)));
uint32_t vdata[20*16] __attribute__ ((aligned (64)));
uint32_t *hash7 = &hash[7<<4];
uint32_t *hashd7 = &hash[7*16];
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
const uint32_t last_nonce = max_nonce - 16;
const uint32_t Htarg = ptarget[7];
__m512i *noncev = (__m512i*)vdata + 19; // aligned
const uint32_t targ32 = ptarget[7];
__m512i *noncev = (__m512i*)vdata + 19;
const int thr_id = mythr->id;
if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff;
@@ -159,10 +159,10 @@ int scanhash_lyra2rev3_16way( struct work *work, const uint32_t max_nonce,
pdata[19] = n;
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
if ( unlikely( hashd7[lane] <= targ32 ) )
{
extr_lane_16x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
if ( likely( valid_hash( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
@@ -170,6 +170,7 @@ int scanhash_lyra2rev3_16way( struct work *work, const uint32_t max_nonce,
}
n += 16;
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
@@ -194,7 +195,7 @@ bool init_lyra2rev3_8way_ctx()
void lyra2rev3_8way_hash( void *state, const void *input )
{
uint32_t vhash[8*8] __attribute__ ((aligned (64)));
uint32_t vhash[8*8] __attribute__ ((aligned (128)));
uint32_t hash0[8] __attribute__ ((aligned (64)));
uint32_t hash1[8] __attribute__ ((aligned (32)));
uint32_t hash2[8] __attribute__ ((aligned (32)));
@@ -250,17 +251,17 @@ void lyra2rev3_8way_hash( void *state, const void *input )
int scanhash_lyra2rev3_8way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &hash[7<<3];
uint32_t *hashd7 = &hash[7*8];
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
const uint32_t targ32 = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
@@ -277,7 +278,7 @@ int scanhash_lyra2rev3_8way( struct work *work, const uint32_t max_nonce,
pdata[19] = n;
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
if ( unlikely( hashd7[lane] <= targ32 ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
@@ -357,42 +358,41 @@ int scanhash_lyra2rev3_4way( struct work *work, const uint32_t max_nonce,
{
uint32_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *hashd7 = &(hash[7*4]);
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
__m128i *noncev = (__m128i*)vdata + 19; // aligned
const int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t targ32 = ptarget[7];
__m128i *noncev = (__m128i*)vdata + 19;
const int thr_id = mythr->id;
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm128_bswap32_intrlv80_4x32( vdata, pdata );
*noncev = _mm_set_epi32( n+3, n+2, n+1, n );
blake256_4way_init( &l2v3_4way_ctx.blake );
blake256_4way_update( &l2v3_4way_ctx.blake, vdata, 64 );
do
{
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
lyra2rev3_4way_hash( hash, vdata );
pdata[19] = n;
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
for ( int lane = 0; lane < 4; lane++ ) if ( hashd7[lane] <= targ32 )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
if ( valid_hash( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm_add_epi32( *noncev, m128_const1_32( 4 ) );
n += 4;
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
pdata[19] = n;
*hashes_done = n - first_nonce + 1;
return 0;
}

View File

@@ -88,7 +88,7 @@ int scanhash_lyra2rev3( struct work *work,
lyra2rev3_hash(hash, endiandata);
if (hash[7] <= Htarg )
if( fulltest( hash, ptarget ) && !opt_benchmark )
if( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );

View File

@@ -56,7 +56,7 @@ int scanhash_lyra2z( struct work *work, uint32_t max_nonce,
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
if (opt_benchmark)
ptarget[7] = 0x0000ff;
@@ -65,14 +65,13 @@ int scanhash_lyra2z( struct work *work, uint32_t max_nonce,
be32enc(&endiandata[i], pdata[i]);
}
lyra2z_midstate( endiandata );
lyra2z_midstate( endiandata );
do {
be32enc(&endiandata[19], nonce);
lyra2z_hash( hash, endiandata );
if ( hash[7] <= Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
if ( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );

View File

@@ -9,7 +9,7 @@ void lyra2z330_hash(void *state, const void *input, uint32_t height)
{
uint32_t _ALIGN(256) hash[16];
LYRA2Z( lyra2z330_wholeMatrix, hash, 32, input, 80, input, 80,
LYRA2Z( lyra2z330_wholeMatrix, hash, 32, input, 80, input, 80,
2, 330, 256 );
memcpy(state, hash, 32);
@@ -18,38 +18,40 @@ void lyra2z330_hash(void *state, const void *input, uint32_t height)
int scanhash_lyra2z330( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__ ((aligned (64)));
uint32_t hash[8] __attribute__ ((aligned (128)));
uint32_t edata[20] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id;
if (opt_benchmark)
ptarget[7] = 0x0000ff;
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
do
{
be32enc( &endiandata[19], nonce );
lyra2z330_hash( hash, endiandata, work->height );
if ( hash[7] <= Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
edata[19] = nonce;
LYRA2Z( lyra2z330_wholeMatrix, hash, 32, edata, 80, edata, 80,
2, 330, 256 );
// lyra2z330_hash( hash, edata, work->height );
if ( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
be32enc( pdata + 19, nonce );
submit_solution( work, hash, mythr );
}
nonce++;
} while ( nonce < max_nonce && !work_restart[thr_id].restart );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
*hashes_done = nonce - first_nonce;
return 0;
}

View File

@@ -1,233 +1,501 @@
/**
* Phi-2 algo Implementation
*/
#include "lyra2-gate.h"
#if defined(PHI2_4WAY)
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/gost/sph_gost.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "lyra2.h"
#if defined(__VAES__)
#include "algo/echo/echo-hash-4way.h"
#elif defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#endif
#if defined(PHI2_8WAY)
typedef struct {
cubehashParam cube;
jh512_8way_context jh;
#if defined(__VAES__)
echo_4way_context echo;
#else
hashState_echo echo;
#endif
sph_gost512_context gost;
skein512_8way_context skein;
} phi2_8way_ctx_holder;
void phi2_8way_hash( void *state, const void *input )
{
unsigned char _ALIGN(128) hash[64*8];
unsigned char _ALIGN(128) hashA[64*2];
unsigned char _ALIGN(64) hash0[64];
unsigned char _ALIGN(64) hash1[64];
unsigned char _ALIGN(64) hash2[64];
unsigned char _ALIGN(64) hash3[64];
unsigned char _ALIGN(64) hash4[64];
unsigned char _ALIGN(64) hash5[64];
unsigned char _ALIGN(64) hash6[64];
unsigned char _ALIGN(64) hash7[64];
const int size = phi2_has_roots ? 144 : 80 ;
phi2_8way_ctx_holder ctx __attribute__ ((aligned (64)));
cubehash_full( &ctx.cube, (byte*)hash0, 512,
(const byte*)input, size );
cubehash_full( &ctx.cube, (byte*)hash1, 512,
(const byte*)input + 144, size );
cubehash_full( &ctx.cube, (byte*)hash2, 512,
(const byte*)input + 2*144, size );
cubehash_full( &ctx.cube, (byte*)hash3, 512,
(const byte*)input + 3*144, size );
cubehash_full( &ctx.cube, (byte*)hash4, 512,
(const byte*)input + 4*144, size );
cubehash_full( &ctx.cube, (byte*)hash5, 512,
(const byte*)input + 5*144, size );
cubehash_full( &ctx.cube, (byte*)hash6, 512,
(const byte*)input + 6*144, size );
cubehash_full( &ctx.cube, (byte*)hash7, 512,
(const byte*)input + 7*144, size );
intrlv_2x256( hashA, hash0, hash1, 512 );
LYRA2RE_2WAY( hash, 32, hashA, 32, 1, 8, 8 );
LYRA2RE_2WAY( hash + 2*32, 32, hashA + 2*32, 32, 1, 8, 8 );
dintrlv_2x256( hash0, hash1, hash, 512 );
intrlv_2x256( hashA, hash2, hash3, 512 );
LYRA2RE_2WAY( hash, 32, hashA, 32, 1, 8, 8 );
LYRA2RE_2WAY( hash + 2*32, 32, hashA + 2*32, 32, 1, 8, 8 );
dintrlv_2x256( hash2, hash3, hash, 512 );
intrlv_2x256( hashA, hash4, hash5, 512 );
LYRA2RE_2WAY( hash, 32, hashA, 32, 1, 8, 8 );
LYRA2RE_2WAY( hash + 2*32, 32, hashA + 2*32, 32, 1, 8, 8 );
dintrlv_2x256( hash4, hash5, hash, 512 );
intrlv_2x256( hashA, hash6, hash7, 512 );
LYRA2RE_2WAY( hash, 32, hashA, 32, 1, 8, 8 );
LYRA2RE_2WAY( hash + 2*32, 32, hashA + 2*32, 32, 1, 8, 8 );
dintrlv_2x256( hash6, hash7, hash, 512 );
intrlv_8x64_512( hash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, (const void*)hash, 64 );
jh512_8way_close( &ctx.jh, (void*)hash );
dintrlv_8x64_512( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, hash );
#if defined (__VAES__)
unsigned char _ALIGN(64) hashA0[64];
unsigned char _ALIGN(64) hashA1[64];
unsigned char _ALIGN(64) hashA2[64];
unsigned char _ALIGN(64) hashA3[64];
unsigned char _ALIGN(64) hashA4[64];
unsigned char _ALIGN(64) hashA5[64];
unsigned char _ALIGN(64) hashA6[64];
unsigned char _ALIGN(64) hashA7[64];
intrlv_4x128_512( hash, hash0, hash1, hash2, hash3 );
echo_4way_full( &ctx.echo, hash, 512, hash, 64 );
echo_4way_full( &ctx.echo, hash, 512, hash, 64 );
dintrlv_4x128_512( hashA0, hashA1, hashA2, hashA3, hash );
intrlv_4x128_512( hash, hash4, hash5, hash6, hash7 );
echo_4way_full( &ctx.echo, hash, 512, hash, 64 );
echo_4way_full( &ctx.echo, hash, 512, hash, 64 );
dintrlv_4x128_512( hashA4, hashA5, hashA6, hashA7, hash );
#endif
if ( hash0[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash0, 64 );
sph_gost512_close( &ctx.gost, (void*)hash0 );
}
else
#if defined (__VAES__)
memcpy( hash0, hashA0, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash0, 512,
(const BitSequence *)hash0, 64 );
echo_full( &ctx.echo, (BitSequence *)hash0, 512,
(const BitSequence *)hash0, 64 );
}
#endif
if ( hash1[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash1, 64 );
sph_gost512_close( &ctx.gost, (void*)hash1 );
}
else
#if defined (__VAES__)
memcpy( hash1, hashA1, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash1, 512,
(const BitSequence *)hash1, 64 );
echo_full( &ctx.echo, (BitSequence *)hash1, 512,
(const BitSequence *)hash1, 64 );
}
#endif
if ( hash2[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash2, 64 );
sph_gost512_close( &ctx.gost, (void*)hash2 );
}
else
#if defined (__VAES__)
memcpy( hash2, hashA2, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash2, 512,
(const BitSequence *)hash2, 64 );
echo_full( &ctx.echo, (BitSequence *)hash2, 512,
(const BitSequence *)hash2, 64 );
}
#endif
if ( hash3[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash3, 64 );
sph_gost512_close( &ctx.gost, (void*)hash3 );
}
else
#if defined (__VAES__)
memcpy( hash3, hashA3, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)hash3, 64 );
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)hash3, 64 );
}
#endif
if ( hash4[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash4, 64 );
sph_gost512_close( &ctx.gost, (void*)hash4 );
}
else
#if defined (__VAES__)
memcpy( hash4, hashA4, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash4, 512,
(const BitSequence *)hash4, 64 );
echo_full( &ctx.echo, (BitSequence *)hash4, 512,
(const BitSequence *)hash4, 64 );
}
#endif
if ( hash5[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash5, 64 );
sph_gost512_close( &ctx.gost, (void*)hash5 );
}
else
#if defined (__VAES__)
memcpy( hash5, hashA5, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash5, 512,
(const BitSequence *)hash5, 64 );
echo_full( &ctx.echo, (BitSequence *)hash5, 512,
(const BitSequence *)hash5, 64 );
}
#endif
if ( hash6[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash6, 64 );
sph_gost512_close( &ctx.gost, (void*)hash6 );
}
else
#if defined (__VAES__)
memcpy( hash6, hashA6, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash6, 512,
(const BitSequence *)hash6, 64 );
echo_full( &ctx.echo, (BitSequence *)hash6, 512,
(const BitSequence *)hash6, 64 );
}
#endif
if ( hash7[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash7, 64 );
sph_gost512_close( &ctx.gost, (void*)hash7 );
}
else
#if defined (__VAES__)
memcpy( hash7, hashA7, 64 );
#else
{
echo_full( &ctx.echo, (BitSequence *)hash7, 512,
(const BitSequence *)hash7, 64 );
echo_full( &ctx.echo, (BitSequence *)hash7, 512,
(const BitSequence *)hash7, 64 );
}
#endif
intrlv_8x64_512( hash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
skein512_8way_init( &ctx.skein );
skein512_8way_update( &ctx.skein, (const void*)hash, 64 );
skein512_8way_close( &ctx.skein, (void*)hash );
for ( int i = 0; i < 4; i++ )
casti_m512i( state, i ) = _mm512_xor_si512( casti_m512i( hash, i ),
casti_m512i( hash, i+4 ) );
}
int scanhash_phi2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash[16*8];
uint32_t _ALIGN(128) edata[36*8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t *hash7 = &(hash[49]);
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
if ( bench ) ptarget[7] = 0x00ff;
phi2_has_roots = false;
for ( int i = 0; i < 36; i++ )
{
be32enc( &edata[i], pdata[i] );
edata[ i + 36 ] = edata[ i + 2*36 ] = edata[ i + 3*36 ] =
edata[ i + 4*36 ] = edata[ i + 5*36 ] = edata[ i + 6*36 ] =
edata[ i + 7*36 ] = edata[ i ];
if ( i >= 20 && pdata[i] ) phi2_has_roots = true;
}
edata[ 19 ] = n;
edata[ 36 + 19 ] = n+1;
edata[ 2*36 + 19 ] = n+2;
edata[ 3*36 + 19 ] = n+3;
edata[ 4*36 + 19 ] = n+4;
edata[ 5*36 + 19 ] = n+5;
edata[ 6*36 + 19 ] = n+6;
edata[ 7*36 + 19 ] = n+7;
do {
phi2_8way_hash( hash, edata );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash7[ lane<<1 ] <= Htarg && !bench ) )
{
uint64_t _ALIGN(64) lane_hash[8];
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
be32enc( pdata + 19, n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
edata[ 19 ] += 8;
edata[ 36 + 19 ] += 8;
edata[ 2*36 + 19 ] += 8;
edata[ 3*36 + 19 ] += 8;
edata[ 4*36 + 19 ] += 8;
edata[ 5*36 + 19 ] += 8;
edata[ 6*36 + 19 ] += 8;
edata[ 7*36 + 19 ] += 8;
} while ( (n < last_nonce) && !work_restart[thr_id].restart);
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(PHI2_4WAY)
typedef struct {
cubehashParam cube;
jh512_4way_context jh;
#if defined(__AES__)
hashState_echo echo;
// hashState_echo echo2;
#else
sph_echo512_context echo;
#endif
sph_gost512_context gost;
skein512_4way_context skein;
} phi2_ctx_holder;
/*
phi2_ctx_holder phi2_ctx;
} phi2_4way_ctx_holder;
void init_phi2_ctx()
phi2_4way_ctx_holder phi2_4way_ctx;
void phi2_4way_hash(void *state, const void *input)
{
cubehashInit( &phi2_ctx.cube, 512, 16, 32 );
sph_jh512_init(&phi2_ctx.jh);
init_echo( &phi2_ctx.echo1, 512 );
init_echo( &phi2_ctx.echo2, 512 );
sph_gost512_init(&phi2_ctx.gost);
sph_skein512_init(&phi2_ctx.skein);
};
*/
void phi2_hash_4way( void *state, const void *input )
{
uint32_t hash[4][16] __attribute__ ((aligned (64)));
uint32_t hashA[4][16] __attribute__ ((aligned (64)));
uint32_t hashB[4][16] __attribute__ ((aligned (64)));
uint32_t vhash[4*16] __attribute__ ((aligned (64)));
unsigned char _ALIGN(128) hash[64*4];
unsigned char _ALIGN(64) hash0[64];
unsigned char _ALIGN(64) hash1[64];
unsigned char _ALIGN(64) hash2[64];
unsigned char _ALIGN(64) hash3[64];
unsigned char _ALIGN(64) hash0A[64];
unsigned char _ALIGN(64) hash1A[64];
unsigned char _ALIGN(64) hash2A[64];
unsigned char _ALIGN(64) hash3A[64];
const int size = phi2_has_roots ? 144 : 80 ;
phi2_4way_ctx_holder ctx __attribute__ ((aligned (64)));
// unsigned char _ALIGN(128) hash[64];
// unsigned char _ALIGN(128) hashA[64];
// unsigned char _ALIGN(128) hashB[64];
cubehash_full( &ctx.cube, (byte*)hash0A, 512,
(const byte*)input, size );
cubehash_full( &ctx.cube, (byte*)hash1A, 512,
(const byte*)input + 144, size );
cubehash_full( &ctx.cube, (byte*)hash2A, 512,
(const byte*)input + 2*144, size );
cubehash_full( &ctx.cube, (byte*)hash3A, 512,
(const byte*)input + 3*144, size );
LYRA2RE( &hash0[ 0], 32, hash0A, 32, hash0A, 32, 1, 8, 8 );
LYRA2RE( &hash0[32], 32, hash0A+32, 32, hash0A+32, 32, 1, 8, 8 );
LYRA2RE( &hash1[ 0], 32, hash1A, 32, hash1A, 32, 1, 8, 8 );
LYRA2RE( &hash1[32], 32, hash1A+32, 32, hash1A+32, 32, 1, 8, 8 );
LYRA2RE( &hash2[ 0], 32, hash2A, 32, hash2A, 32, 1, 8, 8 );
LYRA2RE( &hash2[32], 32, hash2A+32, 32, hash2A+32, 32, 1, 8, 8 );
LYRA2RE( &hash3[ 0], 32, hash3A, 32, hash3A, 32, 1, 8, 8 );
LYRA2RE( &hash3[32], 32, hash3A+32, 32, hash3A+32, 32, 1, 8, 8 );
phi2_ctx_holder ctx __attribute__ ((aligned (64)));
// memcpy( &ctx, &phi2_ctx, sizeof(phi2_ctx) );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*)hashB[0], (const byte*)input,
phi2_has_roots ? 144 : 80 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*)hashB[1], (const byte*)input+144,
phi2_has_roots ? 144 : 80 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*)hashB[2], (const byte*)input+288,
phi2_has_roots ? 144 : 80 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*)hashB[3], (const byte*)input+432,
phi2_has_roots ? 144 : 80 );
LYRA2RE( &hashA[0][0], 32, &hashB[0][0], 32, &hashB[0][0], 32, 1, 8, 8 );
LYRA2RE( &hashA[0][8], 32, &hashB[0][8], 32, &hashB[0][8], 32, 1, 8, 8 );
LYRA2RE( &hashA[1][0], 32, &hashB[1][0], 32, &hashB[1][0], 32, 1, 8, 8 );
LYRA2RE( &hashA[1][8], 32, &hashB[1][8], 32, &hashB[1][8], 32, 1, 8, 8 );
LYRA2RE( &hashA[2][0], 32, &hashB[2][0], 32, &hashB[2][0], 32, 1, 8, 8 );
LYRA2RE( &hashA[2][8], 32, &hashB[2][8], 32, &hashB[2][8], 32, 1, 8, 8 );
LYRA2RE( &hashA[3][0], 32, &hashB[3][0], 32, &hashB[3][0], 32, 1, 8, 8 );
LYRA2RE( &hashA[3][8], 32, &hashB[3][8], 32, &hashB[3][8], 32, 1, 8, 8 );
intrlv_4x64( vhash, hashA[0], hashA[1], hashA[2], hashA[3], 512 );
intrlv_4x64_512( hash, hash0, hash1, hash2, hash3 );
jh512_4way_init( &ctx.jh );
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
jh512_4way_update( &ctx.jh, (const void*)hash, 64 );
jh512_4way_close( &ctx.jh, (void*)hash );
dintrlv_4x64( hash[0], hash[1], hash[2], hash[3], vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, hash );
if ( hash[0][0] & 1 )
if ( hash0[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash[0], 64 );
sph_gost512_close( &ctx.gost, (void*)hash[0] );
sph_gost512( &ctx.gost, (const void*)hash0, 64 );
sph_gost512_close( &ctx.gost, (void*)hash0 );
}
else
{
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[0],
(const BitSequence *)hash[0], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[0],
(const BitSequence *)hash[0], 512 );
echo_full( &ctx.echo, (BitSequence *)hash0, 512,
(const BitSequence *)hash0, 64 );
echo_full( &ctx.echo, (BitSequence *)hash0, 512,
(const BitSequence *)hash0, 64 );
}
if ( hash[1][0] & 1 )
if ( hash1[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash[1], 64 );
sph_gost512_close( &ctx.gost, (void*)hash[1] );
sph_gost512( &ctx.gost, (const void*)hash1, 64 );
sph_gost512_close( &ctx.gost, (void*)hash1 );
}
else
{
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[1],
(const BitSequence *)hash[1], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[1],
(const BitSequence *)hash[1], 512 );
echo_full( &ctx.echo, (BitSequence *)hash1, 512,
(const BitSequence *)hash1, 64 );
echo_full( &ctx.echo, (BitSequence *)hash1, 512,
(const BitSequence *)hash1, 64 );
}
if ( hash[2][0] & 1 )
if ( hash2[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash[2], 64 );
sph_gost512_close( &ctx.gost, (void*)hash[2] );
sph_gost512( &ctx.gost, (const void*)hash2, 64 );
sph_gost512_close( &ctx.gost, (void*)hash2 );
}
else
{
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[2],
(const BitSequence *)hash[2], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[2],
(const BitSequence *)hash[2], 512 );
echo_full( &ctx.echo, (BitSequence *)hash2, 512,
(const BitSequence *)hash2, 64 );
echo_full( &ctx.echo, (BitSequence *)hash2, 512,
(const BitSequence *)hash2, 64 );
}
if ( hash[3][0] & 1 )
if ( hash3[0] & 1 )
{
sph_gost512_init( &ctx.gost );
sph_gost512( &ctx.gost, (const void*)hash[3], 64 );
sph_gost512_close( &ctx.gost, (void*)hash[3] );
sph_gost512( &ctx.gost, (const void*)hash3, 64 );
sph_gost512_close( &ctx.gost, (void*)hash3 );
}
else
{
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[3],
(const BitSequence *)hash[3], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash[3],
(const BitSequence *)hash[3], 512 );
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)hash3, 64 );
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)hash3, 64 );
}
intrlv_4x64( vhash, hash[0], hash[1], hash[2], hash[3], 512 );
intrlv_4x64_512( hash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_update( &ctx.skein, (const void*)hash, 64 );
skein512_4way_close( &ctx.skein, (void*)hash );
for (int i=0; i<4; i++)
{
( (uint64_t*)vhash )[i] ^= ( (uint64_t*)vhash )[i+4];
( (uint64_t*)vhash+ 8 )[i] ^= ( (uint64_t*)vhash+ 8 )[i+4];
( (uint64_t*)vhash+16 )[i] ^= ( (uint64_t*)vhash+16 )[i+4];
( (uint64_t*)vhash+24 )[i] ^= ( (uint64_t*)vhash+24 )[i+4];
}
// for ( int i = 0; i < 4; i++ )
// casti_m256i( vhash, i ) = _mm256_xor_si256( casti_m256i( vhash, i ),
// casti_m256i( vhash, i+4 ) );
memcpy( state, vhash, 128 );
for ( int i = 0; i < 4; i++ )
casti_m256i( state, i ) = _mm256_xor_si256( casti_m256i( hash, i ),
casti_m256i( hash, i+4 ) );
}
int scanhash_phi2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash[8];
uint32_t _ALIGN(128) edata[36];
uint32_t vdata[4][36] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[25]);
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t _ALIGN(128) hash[16*4];
uint32_t _ALIGN(128) edata[36*4];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t *hash7 = &(hash[25]); // 3*8+1
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
if(opt_benchmark){
ptarget[7] = 0x00ff;
}
// Data is not interleaved, but hash is.
// any non-zero data at index 20 or above sets roots true.
// Split up the operations, bswap first, then set roots.
phi2_has_roots = false;
for ( int i=0; i < 36; i++ )
{
be32enc(&edata[i], pdata[i]);
if (i >= 20 && pdata[i]) phi2_has_roots = true;
}
/*
casti_m256i( vdata[0], 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
casti_m256i( vdata[0], 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
casti_m256i( vdata[0], 2 ) = mm256_bswap_32( casti_m256i( pdata, 2 ) );
casti_m256i( vdata[0], 3 ) = mm256_bswap_32( casti_m256i( pdata, 3 ) );
casti_m128i( vdata[0], 8 ) = mm128_bswap_32( casti_m128i( pdata, 8 ) );
phi2_has_roots = mm128_anybits1( casti_m128i( vdata[0], 5 ) ) ||
mm128_anybits1( casti_m128i( vdata[0], 6 ) ) ||
mm128_anybits1( casti_m128i( vdata[0], 7 ) ) ||
mm128_anybits1( casti_m128i( vdata[0], 8 ) );
*/
memcpy( vdata[0], edata, 144 );
memcpy( vdata[1], edata, 144 );
memcpy( vdata[2], edata, 144 );
memcpy( vdata[3], edata, 144 );
do {
be32enc( &vdata[0][19], n );
be32enc( &vdata[1][19], n+1 );
be32enc( &vdata[2][19], n+2 );
be32enc( &vdata[3][19], n+3 );
phi2_hash_4way( hash, vdata );
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[ lane<<1 ] < Htarg )
{
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 4;
} while ( ( n < max_nonce - 4 ) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
if ( bench ) ptarget[7] = 0x00ff;
#endif // PHI2_4WAY
phi2_has_roots = false;
for ( int i = 0; i < 36; i++ )
{
be32enc( &edata[i], pdata[i] );
edata[ i+36 ] = edata[ i+72 ] = edata[ i+108 ] = edata[i];
if ( i >= 20 && pdata[i] ) phi2_has_roots = true;
}
edata[ 19 ] = n;
edata[ 36 + 19 ] = n+1;
edata[ 2*36 + 19 ] = n+2;
edata[ 3*36 + 19 ] = n+3;
do {
phi2_4way_hash( hash, edata );
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash7[ lane<<1 ] <= Htarg && !bench ) )
{
uint64_t _ALIGN(64) lane_hash[8];
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
be32enc( pdata + 19, n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
edata[ 19 ] += 4;
edata[ 36 + 19 ] += 4;
edata[ 2*36 + 19 ] += 4;
edata[ 3*36 + 19 ] += 4;
n +=4;
} while ( (n < last_nonce) && !work_restart[thr_id].restart);
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif

View File

@@ -99,7 +99,6 @@ int scanhash_phi2( struct work *work, uint32_t max_nonce,
uint32_t _ALIGN(128) edata[36];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
const int thr_id = mythr->id;

View File

@@ -311,7 +311,7 @@ bool register_m7m_algo( algo_gate_t *gate )
{
gate->optimizations = SHA_OPT;
init_m7m_ctx();
gate->scanhash = (void*)scanhash_m7m_hash;
gate->scanhash = (void*)&scanhash_m7m_hash;
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;

View File

@@ -158,7 +158,7 @@ void zr5_get_new_work( struct work* work, struct work* g_work, int thr_id,
{
// ignore POK in first word
const int wkcmp_sz = 72; // (19-1) * sizeof(uint32_t)
uint32_t *nonceptr = algo_gate.get_nonceptr( work->data );
uint32_t *nonceptr = work->data + algo_gate.nonce_index;
if ( memcmp( &work->data[1], &g_work->data[1], wkcmp_sz )
|| ( *nonceptr >= *end_nonce_ptr ) )
{

View File

@@ -1,18 +1,241 @@
#include "cpuminer-config.h"
#include "anime-gate.h"
#if defined (ANIME_4WAY)
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#if defined(__VAES__)
#include "algo/groestl/groestl512-hash-4way.h"
#endif
#if defined (ANIME_8WAY)
typedef struct {
blake512_8way_context blake;
bmw512_8way_context bmw;
#if defined(__VAES__)
groestl512_4way_context groestl;
#else
hashState_groestl groestl;
#endif
jh512_8way_context jh;
skein512_8way_context skein;
keccak512_8way_context keccak;
} anime_8way_ctx_holder;
anime_8way_ctx_holder anime_8way_ctx __attribute__ ((aligned (64)));
void init_anime_8way_ctx()
{
blake512_8way_init( &anime_8way_ctx.blake );
bmw512_8way_init( &anime_8way_ctx.bmw );
#if defined(__VAES__)
groestl512_4way_init( &anime_8way_ctx.groestl, 64 );
#else
init_groestl( &anime_8way_ctx.groestl, 64 );
#endif
skein512_8way_init( &anime_8way_ctx.skein );
jh512_8way_init( &anime_8way_ctx.jh );
keccak512_8way_init( &anime_8way_ctx.keccak );
}
void anime_8way_hash( void *state, const void *input )
{
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
uint64_t vhashB[8*8] __attribute__ ((aligned (64)));
uint64_t vhashC[8*8] __attribute__ ((aligned (64)));
#if !defined(__VAES__)
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t hash4[8] __attribute__ ((aligned (64)));
uint64_t hash5[8] __attribute__ ((aligned (64)));
uint64_t hash6[8] __attribute__ ((aligned (64)));
uint64_t hash7[8] __attribute__ ((aligned (64)));
#endif
__m512i* vh = (__m512i*)vhash;
__m512i* vhA = (__m512i*)vhashA;
__m512i* vhB = (__m512i*)vhashB;
__m512i* vhC = (__m512i*)vhashC;
const __m512i bit3_mask = m512_const1_64( 8 );
const __m512i zero = _mm512_setzero_si512();
__mmask8 vh_mask;
anime_8way_ctx_holder ctx;
memcpy( &ctx, &anime_8way_ctx, sizeof(anime_8way_ctx) );
bmw512_8way_full( &ctx.bmw, vhash, input, 80 );
blake512_8way_full( &ctx.blake, vhash, vhash, 64 );
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
#if defined(__VAES__)
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
if ( ( vh_mask & 0x0f ) != 0x0f )
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
if ( ( vh_mask & 0xf0 ) != 0xf0 )
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
rintrlv_4x128_8x64( vhashC, vhashA, vhashB, 512 );
#else
dintrlv_8x64_512( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash );
if ( hash0[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
if ( hash1[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
if ( hash2[0] & 8)
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
if ( hash3[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
if ( hash4[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
if ( hash5[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
if ( hash6[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
if ( hash7[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
intrlv_8x64_512( vhashC, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
#endif
if ( vh_mask & 0xff )
skein512_8way_full( &ctx.skein, vhashB, vhash, 64 );
mm512_blend_hash_8x64( vh, vhC, vhB, vh_mask );
#if defined(__VAES__)
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
#else
dintrlv_8x64_512( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash );
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
#endif
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, vhash, 64 );
jh512_8way_close( &ctx.jh, vhash );
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
if ( ( vh_mask & 0xff ) != 0xff )
blake512_8way_full( &ctx.blake, vhashA, vhash, 64 );
if ( vh_mask & 0xff )
bmw512_8way_full( &ctx.bmw, vhashB, vhash, 64 );
mm512_blend_hash_8x64( vh, vhA, vhB, vh_mask );
keccak512_8way_init( &ctx.keccak );
keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhash );
skein512_8way_full( &ctx.skein, vhash, vhash, 64 );
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
if ( ( vh_mask & 0xff ) != 0xff )
{
keccak512_8way_init( &ctx.keccak );
keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhashA );
}
if ( vh_mask & 0xff )
{
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, vhash, 64 );
jh512_8way_close( &ctx.jh, vhashB );
}
casti_m512i( state,0 ) = _mm512_mask_blend_epi64( vh_mask, vhA[0], vhB[0] );
casti_m512i( state,1 ) = _mm512_mask_blend_epi64( vh_mask, vhA[1], vhB[1] );
casti_m512i( state,2 ) = _mm512_mask_blend_epi64( vh_mask, vhA[2], vhB[2] );
casti_m512i( state,3 ) = _mm512_mask_blend_epi64( vh_mask, vhA[3], vhB[3] );
}
int scanhash_anime_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint64_t hash64[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint64_t *hash64_q3 = &(hash64[3*8]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint64_t targ64_q3 = ((uint64_t*)ptarget)[3];
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ), *noncev );
do
{
anime_8way_hash( hash64, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash64_q3[ lane ] <= targ64_q3 && !bench ) )
{
extr_lane_8x64( lane_hash, hash64, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (ANIME_4WAY)
typedef struct {
blake512_4way_context blake;
@@ -23,18 +246,6 @@ typedef struct {
keccak512_4way_context keccak;
} anime_4way_ctx_holder;
anime_4way_ctx_holder anime_4way_ctx __attribute__ ((aligned (64)));
void init_anime_4way_ctx()
{
blake512_4way_init( &anime_4way_ctx.blake );
bmw512_4way_init( &anime_4way_ctx.bmw );
init_groestl( &anime_4way_ctx.groestl, 64 );
skein512_4way_init( &anime_4way_ctx.skein );
jh512_4way_init( &anime_4way_ctx.jh );
keccak512_4way_init( &anime_4way_ctx.keccak );
}
void anime_4way_hash( void *state, const void *input )
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
@@ -48,81 +259,61 @@ void anime_4way_hash( void *state, const void *input )
__m256i* vhA = (__m256i*)vhashA;
__m256i* vhB = (__m256i*)vhashB;
__m256i vh_mask;
const uint32_t mask = 8;
int h_mask;
const __m256i bit3_mask = m256_const1_64( 8 );
const __m256i zero = _mm256_setzero_si256();
anime_4way_ctx_holder ctx;
memcpy( &ctx, &anime_4way_ctx, sizeof(anime_4way_ctx) );
bmw512_4way_init( &ctx.bmw );
bmw512_4way_update( &ctx.bmw, input, 80 );
bmw512_4way_close( &ctx.bmw, vhash );
blake512_4way_update( &ctx.blake, vhash, 64 );
blake512_4way_close( &ctx.blake, vhash );
blake512_4way_full( &ctx.blake, vhash, vhash, 64 );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
h_mask = _mm256_movemask_epi8( vh_mask );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
if ( hash0[0] & mask )
{
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(char*)hash0, 512 );
}
if ( hash1[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(char*)hash1, 512 );
}
if ( hash2[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(char*)hash2, 512 );
}
if ( hash3[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(char*)hash3, 512 );
}
// A
if ( hash0[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
if ( hash1[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
if ( hash2[0] & 8)
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
if ( hash3[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
if ( mm256_anybits0( vh_mask ) )
{
skein512_4way_update( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhashB );
}
// B
if ( h_mask & 0xffffffff )
skein512_4way_full( &ctx.skein, vhashB, vhash, 64 );
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
jh512_4way_init( &ctx.jh );
jh512_4way_update( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
h_mask = _mm256_movemask_epi8( vh_mask );
if ( mm256_anybits1( vh_mask ) )
{
blake512_4way_init( &ctx.blake );
blake512_4way_update( &ctx.blake, vhash, 64 );
blake512_4way_close( &ctx.blake, vhashA );
}
if ( mm256_anybits0( vh_mask ) )
// A
if ( ( h_mask & 0xffffffff ) != 0xffffffff )
blake512_4way_full( &ctx.blake, vhashA, vhash, 64 );
// B
if ( h_mask & 0xffffffff )
{
bmw512_4way_init( &ctx.bmw );
bmw512_4way_update( &ctx.bmw, vhash, 64 );
@@ -131,90 +322,76 @@ void anime_4way_hash( void *state, const void *input )
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_full( &ctx.skein, vhash, vhash, 64 );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
h_mask = _mm256_movemask_epi8( vh_mask );
if ( mm256_anybits1( vh_mask ) )
// A
if ( ( h_mask & 0xffffffff ) != 0xffffffff )
{
keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhashA );
}
if ( mm256_anybits0( vh_mask ) )
// B
if ( h_mask & 0xffffffff )
{
jh512_4way_init( &ctx.jh );
jh512_4way_update( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhashB );
}
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 );
casti_m256i( state, 0 ) = _mm256_blendv_epi8( vhA[0], vhB[0], vh_mask );
casti_m256i( state, 1 ) = _mm256_blendv_epi8( vhA[1], vhB[1], vh_mask );
casti_m256i( state, 2 ) = _mm256_blendv_epi8( vhA[2], vhB[2], vh_mask );
casti_m256i( state, 3 ) = _mm256_blendv_epi8( vhA[3], vhB[3], vh_mask );
}
int scanhash_anime_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint64_t hash64[4*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint64_t *hash64_q3 = &(hash64[3*4]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint64_t targ64_q3 = ((uint64_t*)ptarget)[3];
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t Htarg = ptarget[7];
uint64_t htmax[] = {
0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000
};
uint32_t masks[] = {
0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0
};
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
anime_4way_hash( hash64, vdata );
for (int m=0; m < 6; m++)
if (Htarg <= htmax[m])
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash64_q3[ lane ] <= targ64_q3 && !bench ) )
{
uint32_t mask = masks[m];
do
extr_lane_4x64( lane_hash, hash64, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
anime_4way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 4; i++ )
if ( ( ( (hash+(i<<3))[7] & mask ) == 0 )
&& fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
break;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*hashes_done = n - first_nonce + 1;
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -2,8 +2,10 @@
bool register_anime_algo( algo_gate_t* gate )
{
#if defined (ANIME_4WAY)
init_anime_4way_ctx();
#if defined (ANIME_8WAY)
gate->scanhash = (void*)&scanhash_anime_8way;
gate->hash = (void*)&anime_8way_hash;
#elif defined (ANIME_4WAY)
gate->scanhash = (void*)&scanhash_anime_4way;
gate->hash = (void*)&anime_4way_hash;
#else
@@ -11,7 +13,7 @@ bool register_anime_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_anime;
gate->hash = (void*)&anime_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;
return true;
};

View File

@@ -4,18 +4,25 @@
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__) && defined(__AES__)
#define ANIME_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define ANIME_8WAY 1
#elif defined(__AVX2__) && defined(__AES__)
#define ANIME_4WAY 1
#endif
bool register_anime_algo( algo_gate_t* gate );
#if defined(ANIME_4WAY)
#if defined(ANIME_8WAY)
void anime_8way_hash( void *state, const void *input );
int scanhash_anime_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(ANIME_4WAY)
void anime_4way_hash( void *state, const void *input );
int scanhash_anime_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_anime_4way_ctx();
#endif

View File

@@ -126,49 +126,28 @@ int scanhash_anime( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t hash[8] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t Htarg = ptarget[7];
uint64_t htmax[] = {
0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000
};
uint32_t masks[] = {
0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0
};
const int thr_id = mythr->id;
const int bench = opt_benchmark;
swab32_array( edata, pdata, 20 );
swab32_array( endiandata, pdata, 20 );
for (int m=0; m < 6; m++)
if (Htarg <= htmax[m])
{
uint32_t mask = masks[m];
do
{
be32enc( &endiandata[19], n );
anime_hash( hash, endiandata );
pdata[19] = n;
if ( ( hash[7] & mask ) == 0 && fulltest( hash, ptarget ) )
submit_solution( work, hash, mythr );
n++;
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce + 1;
do
{
edata[19] = n;
anime_hash( hash, edata );
if ( valid_hash( hash, ptarget ) && !bench )
{
be32enc( &pdata[19], n );
submit_solution( work, hash, mythr );
}
n++;
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -72,12 +72,10 @@ void quark_8way_hash( void *state, const void *input )
memcpy( &ctx, &quark_8way_ctx, sizeof(quark_8way_ctx) );
blake512_8way_update( &ctx.blake, input, 80 );
blake512_8way_close( &ctx.blake, vhash );
bmw512_8way_update( &ctx.bmw, vhash, 64 );
bmw512_8way_close( &ctx.bmw, vhash );
blake512_8way_full( &ctx.blake, vhash, input, 80 );
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
@@ -86,70 +84,34 @@ void quark_8way_hash( void *state, const void *input )
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
if ( ( vh_mask & 0x0f ) != 0x0f )
{
groestl512_4way_init( &ctx.groestl, 64 );
groestl512_4way_update_close( &ctx.groestl, vhashA, vhashA, 512 );
}
if ( ( vh_mask & 0xf0 ) != 0xf0 )
{
groestl512_4way_init( &ctx.groestl, 64 );
groestl512_4way_update_close( &ctx.groestl, vhashB, vhashB, 512 );
}
rintrlv_4x128_8x64( vhashC, vhashA, vhashB, 512 );
if ( ( vh_mask & 0x0f ) != 0x0f )
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
if ( ( vh_mask & 0xf0 ) != 0xf0 )
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
rintrlv_4x128_8x64( vhashC, vhashA, vhashB, 512 );
#else
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, 512 );
if ( hash0[0] & mask )
{
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(char*)hash0, 512 );
}
if ( hash1[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(char*)hash1, 512 );
}
if ( hash2[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(char*)hash2, 512 );
}
if ( hash3[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(char*)hash3, 512 );
}
if ( hash4[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash4,
(char*)hash4, 512 );
}
if ( hash5[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash5,
(char*)hash5, 512 );
}
if ( hash6[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash6,
(char*)hash6, 512 );
}
if ( hash7[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash7,
(char*)hash7, 512 );
}
if ( hash0[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
if ( hash1[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
if ( hash2[0] & 8)
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
if ( hash3[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
if ( hash4[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
if ( hash5[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
if ( hash6[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
if ( hash7[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
intrlv_8x64( vhashC, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 512 );
@@ -157,10 +119,7 @@ void quark_8way_hash( void *state, const void *input )
#endif
if ( vh_mask & 0xff )
{
skein512_8way_update( &ctx.skein, vhash, 64 );
skein512_8way_close( &ctx.skein, vhashB );
}
skein512_8way_full( &ctx.skein, vhashB, vhash, 64 );
mm512_blend_hash_8x64( vh, vhC, vhB, vh_mask );
@@ -168,10 +127,10 @@ void quark_8way_hash( void *state, const void *input )
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
groestl512_4way_init( &ctx.groestl, 64 );
groestl512_4way_update_close( &ctx.groestl, vhashA, vhashA, 512 );
groestl512_4way_init( &ctx.groestl, 64 );
groestl512_4way_update_close( &ctx.groestl, vhashB, vhashB, 512 );
if ( ( vh_mask & 0x0f ) != 0x0f )
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
if ( ( vh_mask & 0xf0 ) != 0xf0 )
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
@@ -180,22 +139,22 @@ void quark_8way_hash( void *state, const void *input )
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
if ( hash0[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
if ( hash1[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
if ( hash2[0] & 8)
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
if ( hash3[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
if ( hash4[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
if ( hash5[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
if ( hash6[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
if ( hash7[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
512 );
@@ -209,27 +168,16 @@ void quark_8way_hash( void *state, const void *input )
zero );
if ( ( vh_mask & 0xff ) != 0xff )
{
blake512_8way_init( &ctx.blake );
blake512_8way_update( &ctx.blake, vhash, 64 );
blake512_8way_close( &ctx.blake, vhashA );
}
blake512_8way_full( &ctx.blake, vhashA, vhash, 64 );
if ( vh_mask & 0xff )
{
bmw512_8way_init( &ctx.bmw );
bmw512_8way_update( &ctx.bmw, vhash, 64 );
bmw512_8way_close( &ctx.bmw, vhashB );
}
bmw512_8way_full( &ctx.bmw, vhashB, vhash, 64 );
mm512_blend_hash_8x64( vh, vhA, vhB, vh_mask );
keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhash );
skein512_8way_init( &ctx.skein );
skein512_8way_update( &ctx.skein, vhash, 64 );
skein512_8way_close( &ctx.skein, vhash );
skein512_8way_full( &ctx.skein, vhash, vhash, 64 );
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
@@ -258,41 +206,44 @@ void quark_8way_hash( void *state, const void *input )
int scanhash_quark_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[24*8] __attribute__ ((aligned (64)));
uint64_t hash64[4*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]);
uint32_t *pdata = work->data;
uint64_t *hash64_q3 = &(hash64[3*8]);
uint32_t *ptarget = work->target;
const uint64_t targ64_q3 = ((uint64_t*)ptarget)[3];
uint32_t *pdata = work->data;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ), *noncev );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
quark_8way_hash( hash64, vdata );
quark_8way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 8; i++ )
if ( unlikely( hash7[ i<<1 ] <= Htarg ) )
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash64_q3[ lane ] <= targ64_q3 && !bench ) )
{
extr_lane_8x64( lane_hash, hash, i, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
extr_lane_8x64( lane_hash, hash64, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = n+i;
submit_lane_solution( work, lane_hash, mythr, i );
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( ( n < max_nonce-8 ) && !work_restart[thr_id].restart );
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
@@ -333,67 +284,47 @@ void quark_4way_hash( void *state, const void *input )
__m256i* vhA = (__m256i*)vhashA;
__m256i* vhB = (__m256i*)vhashB;
__m256i vh_mask;
int h_mask;
quark_4way_ctx_holder ctx;
const __m256i bit3_mask = m256_const1_64( 8 );
const uint32_t mask = 8;
const __m256i zero = _mm256_setzero_si256();
memcpy( &ctx, &quark_4way_ctx, sizeof(quark_4way_ctx) );
blake512_4way_update( &ctx.blake, input, 80 );
blake512_4way_close( &ctx.blake, vhash );
blake512_4way_full( &ctx.blake, vhash, input, 80 );
bmw512_4way_update( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
h_mask = _mm256_movemask_epi8( vh_mask );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
if ( hash0[0] & mask )
{
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(char*)hash0, 512 );
}
if ( hash1[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(char*)hash1, 512 );
}
if ( hash2[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(char*)hash2, 512 );
}
if ( hash3[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(char*)hash3, 512 );
}
// A
if ( hash0[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
if ( hash1[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
if ( hash2[0] & 8)
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
if ( hash3[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
if ( mm256_anybits1( vh_mask ) )
{
skein512_4way_update( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhashB );
}
// B
if ( likely( h_mask & 0xffffffff ) )
skein512_4way_full( &ctx.skein, vhashB, vhash, 64 );
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
@@ -401,15 +332,13 @@ void quark_4way_hash( void *state, const void *input )
jh512_4way_close( &ctx.jh, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
h_mask = _mm256_movemask_epi8( vh_mask );
if ( mm256_anybits0( vh_mask ) )
{
blake512_4way_init( &ctx.blake );
blake512_4way_update( &ctx.blake, vhash, 64 );
blake512_4way_close( &ctx.blake, vhashA );
}
if ( mm256_anybits1( vh_mask ) )
// A
if ( likely( ( h_mask & 0xffffffff ) != 0xffffffff ) )
blake512_4way_full( &ctx.blake, vhashA, vhash, 64 );
// B
if ( likely( h_mask & 0xffffffff ) )
{
bmw512_4way_init( &ctx.bmw );
bmw512_4way_update( &ctx.bmw, vhash, 64 );
@@ -421,20 +350,20 @@ void quark_4way_hash( void *state, const void *input )
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_full( &ctx.skein, vhash, vhash, 64 );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
h_mask = _mm256_movemask_epi8( vh_mask );
if ( mm256_anybits0( vh_mask ) )
// A
if ( likely( ( h_mask & 0xffffffff ) != 0xffffffff ) )
{
keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhashA );
}
if ( mm256_anybits1( vh_mask ) )
// B
if ( likely( h_mask & 0xffffffff ) )
{
jh512_4way_init( &ctx.jh );
jh512_4way_update( &ctx.jh, vhash, 64 );
@@ -451,41 +380,44 @@ void quark_4way_hash( void *state, const void *input )
int scanhash_quark_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint64_t hash64[4*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[25]);
uint64_t *hash64_q3 = &(hash64[3*4]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint64_t targ64_q3 = ((uint64_t*)ptarget)[3];
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
quark_4way_hash( hash64, vdata );
quark_4way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 4; i++ )
if ( unlikely( hash7[ i<<1 ] <= Htarg ) )
for ( int lane = 0; lane < 4; lane++ )
if ( hash64_q3[ lane ] <= targ64_q3 && !bench )
{
extr_lane_4x64( lane_hash, hash, i, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
extr_lane_4x64( lane_hash, hash64, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = n+i;
submit_lane_solution( work, lane_hash, mythr, i );
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -380,7 +380,7 @@ static inline void PBKDF2_SHA256_128_32_8way(uint32_t *tstate,
#endif /* HAVE_SHA256_8WAY */
#if defined(USE_ASM) && defined(__x86_64__)
//#if defined(USE_ASM) && defined(__x86_64__)
#define SCRYPT_MAX_WAYS 12
#define HAVE_SCRYPT_3WAY 1
@@ -394,113 +394,6 @@ void scrypt_core_3way(uint32_t *X, uint32_t *V, int N);
void scrypt_core_6way(uint32_t *X, uint32_t *V, int N);
#endif
#elif defined(USE_ASM) && defined(__i386__)
#define SCRYPT_MAX_WAYS 4
#define scrypt_best_throughput() 1
void scrypt_core(uint32_t *X, uint32_t *V, int N);
#elif defined(USE_ASM) && defined(__arm__) && defined(__APCS_32__)
void scrypt_core(uint32_t *X, uint32_t *V, int N);
#if defined(__ARM_NEON__)
#undef HAVE_SHA256_4WAY
#define SCRYPT_MAX_WAYS 3
#define HAVE_SCRYPT_3WAY 1
#define scrypt_best_throughput() 3
void scrypt_core_3way(uint32_t *X, uint32_t *V, int N);
#endif
#else
static inline void xor_salsa8(uint32_t B[16], const uint32_t Bx[16])
{
uint32_t x00,x01,x02,x03,x04,x05,x06,x07,x08,x09,x10,x11,x12,x13,x14,x15;
int i;
x00 = (B[ 0] ^= Bx[ 0]);
x01 = (B[ 1] ^= Bx[ 1]);
x02 = (B[ 2] ^= Bx[ 2]);
x03 = (B[ 3] ^= Bx[ 3]);
x04 = (B[ 4] ^= Bx[ 4]);
x05 = (B[ 5] ^= Bx[ 5]);
x06 = (B[ 6] ^= Bx[ 6]);
x07 = (B[ 7] ^= Bx[ 7]);
x08 = (B[ 8] ^= Bx[ 8]);
x09 = (B[ 9] ^= Bx[ 9]);
x10 = (B[10] ^= Bx[10]);
x11 = (B[11] ^= Bx[11]);
x12 = (B[12] ^= Bx[12]);
x13 = (B[13] ^= Bx[13]);
x14 = (B[14] ^= Bx[14]);
x15 = (B[15] ^= Bx[15]);
for (i = 0; i < 8; i += 2) {
#define R(a, b) (((a) << (b)) | ((a) >> (32 - (b))))
/* Operate on columns. */
x04 ^= R(x00+x12, 7); x09 ^= R(x05+x01, 7);
x14 ^= R(x10+x06, 7); x03 ^= R(x15+x11, 7);
x08 ^= R(x04+x00, 9); x13 ^= R(x09+x05, 9);
x02 ^= R(x14+x10, 9); x07 ^= R(x03+x15, 9);
x12 ^= R(x08+x04,13); x01 ^= R(x13+x09,13);
x06 ^= R(x02+x14,13); x11 ^= R(x07+x03,13);
x00 ^= R(x12+x08,18); x05 ^= R(x01+x13,18);
x10 ^= R(x06+x02,18); x15 ^= R(x11+x07,18);
/* Operate on rows. */
x01 ^= R(x00+x03, 7); x06 ^= R(x05+x04, 7);
x11 ^= R(x10+x09, 7); x12 ^= R(x15+x14, 7);
x02 ^= R(x01+x00, 9); x07 ^= R(x06+x05, 9);
x08 ^= R(x11+x10, 9); x13 ^= R(x12+x15, 9);
x03 ^= R(x02+x01,13); x04 ^= R(x07+x06,13);
x09 ^= R(x08+x11,13); x14 ^= R(x13+x12,13);
x00 ^= R(x03+x02,18); x05 ^= R(x04+x07,18);
x10 ^= R(x09+x08,18); x15 ^= R(x14+x13,18);
#undef R
}
B[ 0] += x00;
B[ 1] += x01;
B[ 2] += x02;
B[ 3] += x03;
B[ 4] += x04;
B[ 5] += x05;
B[ 6] += x06;
B[ 7] += x07;
B[ 8] += x08;
B[ 9] += x09;
B[10] += x10;
B[11] += x11;
B[12] += x12;
B[13] += x13;
B[14] += x14;
B[15] += x15;
}
static inline void scrypt_core(uint32_t *X, uint32_t *V, int N)
{
int i;
for (i = 0; i < N; i++) {
memcpy(&V[i * 32], X, 128);
xor_salsa8(&X[0], &X[16]);
xor_salsa8(&X[16], &X[0]);
}
for (i = 0; i < N; i++) {
uint32_t j = 32 * (X[16] & (N - 1));
for (uint8_t k = 0; k < 32; k++)
X[k] ^= V[j + k];
xor_salsa8(&X[0], &X[16]);
xor_salsa8(&X[16], &X[0]);
}
}
#endif
#ifndef SCRYPT_MAX_WAYS
#define SCRYPT_MAX_WAYS 1
#define scrypt_best_throughput() 1
@@ -511,8 +404,8 @@ unsigned char *scrypt_buffer_alloc(int N)
return (uchar*) malloc((size_t)N * SCRYPT_MAX_WAYS * 128 + 63);
}
static void scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
uint32_t *midstate, unsigned char *scratchpad, int N)
static bool scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
uint32_t *midstate, unsigned char *scratchpad, int N, int thr_id )
{
uint32_t tstate[8], ostate[8];
uint32_t X[32];
@@ -527,11 +420,13 @@ static void scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
scrypt_core(X, V, N);
PBKDF2_SHA256_128_32(tstate, ostate, X, output);
return true;
}
#ifdef HAVE_SHA256_4WAY
static void scrypt_1024_1_1_256_4way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N)
static int scrypt_1024_1_1_256_4way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
int thrid )
{
uint32_t _ALIGN(128) tstate[4 * 8];
uint32_t _ALIGN(128) ostate[4 * 8];
@@ -545,32 +440,47 @@ static void scrypt_1024_1_1_256_4way(const uint32_t *input,
for (i = 0; i < 20; i++)
for (k = 0; k < 4; k++)
W[4 * i + k] = input[k * 20 + i];
for (i = 0; i < 8; i++)
for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++)
tstate[4 * i + k] = midstate[i];
HMAC_SHA256_80_init_4way(W, tstate, ostate);
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
for (i = 0; i < 32; i++)
HMAC_SHA256_80_init_4way(W, tstate, ostate);
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
if ( work_restart[thrid].restart ) return 0;
for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++)
X[k * 32 + i] = W[4 * i + k];
scrypt_core(X + 0 * 32, V, N);
scrypt_core(X + 0 * 32, V, N);
scrypt_core(X + 1 * 32, V, N);
scrypt_core(X + 2 * 32, V, N);
scrypt_core(X + 3 * 32, V, N);
for (i = 0; i < 32; i++)
if ( work_restart[thrid].restart ) return 0;
for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++)
W[4 * i + k] = X[k * 32 + i];
PBKDF2_SHA256_128_32_4way(tstate, ostate, W, W);
for (i = 0; i < 8; i++)
PBKDF2_SHA256_128_32_4way(tstate, ostate, W, W);
for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++)
output[k * 8 + i] = W[4 * i + k];
return 1;
}
#endif /* HAVE_SHA256_4WAY */
#ifdef HAVE_SCRYPT_3WAY
static void scrypt_1024_1_1_256_3way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N)
static int scrypt_1024_1_1_256_3way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
int thrid )
{
uint32_t _ALIGN(64) tstate[3 * 8], ostate[3 * 8];
uint32_t _ALIGN(64) X[3 * 32];
@@ -581,23 +491,34 @@ static void scrypt_1024_1_1_256_3way(const uint32_t *input,
memcpy(tstate + 0, midstate, 32);
memcpy(tstate + 8, midstate, 32);
memcpy(tstate + 16, midstate, 32);
HMAC_SHA256_80_init(input + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init(input + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8);
HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16);
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32);
PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64);
scrypt_core_3way(X, V, N);
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
scrypt_core_3way(X, V, N);
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8);
PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16);
return 1;
}
#ifdef HAVE_SHA256_4WAY
static void scrypt_1024_1_1_256_12way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N)
static bool scrypt_1024_1_1_256_12way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N,
int thrid )
{
uint32_t _ALIGN(128) tstate[12 * 8];
uint32_t _ALIGN(128) ostate[12 * 8];
@@ -612,43 +533,60 @@ static void scrypt_1024_1_1_256_12way(const uint32_t *input,
for (i = 0; i < 20; i++)
for (k = 0; k < 4; k++)
W[128 * j + 4 * i + k] = input[80 * j + k * 20 + i];
for (j = 0; j < 3; j++)
for (j = 0; j < 3; j++)
for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++)
tstate[32 * j + 4 * i + k] = midstate[i];
HMAC_SHA256_80_init_4way(W + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init_4way(W + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32);
HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64);
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128);
PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256);
for (j = 0; j < 3; j++)
if ( work_restart[thrid].restart ) return 0;
for (j = 0; j < 3; j++)
for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++)
X[128 * j + k * 32 + i] = W[128 * j + 4 * i + k];
scrypt_core_3way(X + 0 * 96, V, N);
scrypt_core_3way(X + 0 * 96, V, N);
scrypt_core_3way(X + 1 * 96, V, N);
scrypt_core_3way(X + 2 * 96, V, N);
scrypt_core_3way(X + 3 * 96, V, N);
for (j = 0; j < 3; j++)
if ( work_restart[thrid].restart ) return 0;
for (j = 0; j < 3; j++)
for (i = 0; i < 32; i++)
for (k = 0; k < 4; k++)
W[128 * j + 4 * i + k] = X[128 * j + k * 32 + i];
PBKDF2_SHA256_128_32_4way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_128_32_4way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_128_32_4way(tstate + 32, ostate + 32, W + 128, W + 128);
PBKDF2_SHA256_128_32_4way(tstate + 64, ostate + 64, W + 256, W + 256);
for (j = 0; j < 3; j++)
for (j = 0; j < 3; j++)
for (i = 0; i < 8; i++)
for (k = 0; k < 4; k++)
output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k];
return 1;
}
#endif /* HAVE_SHA256_4WAY */
#endif /* HAVE_SCRYPT_3WAY */
#ifdef HAVE_SCRYPT_6WAY
static void scrypt_1024_1_1_256_24way(const uint32_t *input,
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad, int N)
static int scrypt_1024_1_1_256_24way( const uint32_t *input,
uint32_t *output, uint32_t *midstate,
unsigned char *scratchpad, int N, int thrid )
{
uint32_t _ALIGN(128) tstate[24 * 8];
uint32_t _ALIGN(128) ostate[24 * 8];
@@ -657,41 +595,60 @@ static void scrypt_1024_1_1_256_24way(const uint32_t *input,
uint32_t *V;
int i, j, k;
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
V = (uint32_t *)( ( (uintptr_t)(scratchpad) + 63 ) & ~ (uintptr_t)(63) );
for (j = 0; j < 3; j++)
for (i = 0; i < 20; i++)
for (k = 0; k < 8; k++)
for ( j = 0; j < 3; j++ )
for ( i = 0; i < 20; i++ )
for ( k = 0; k < 8; k++ )
W[8 * 32 * j + 8 * i + k] = input[8 * 20 * j + k * 20 + i];
for (j = 0; j < 3; j++)
for (i = 0; i < 8; i++)
for (k = 0; k < 8; k++)
for ( j = 0; j < 3; j++ )
for ( i = 0; i < 8; i++ )
for ( k = 0; k < 8; k++ )
tstate[8 * 8 * j + 8 * i + k] = midstate[i];
HMAC_SHA256_80_init_8way(W + 0, tstate + 0, ostate + 0);
HMAC_SHA256_80_init_8way(W + 256, tstate + 64, ostate + 64);
HMAC_SHA256_80_init_8way(W + 512, tstate + 128, ostate + 128);
PBKDF2_SHA256_80_128_8way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_80_128_8way(tstate + 64, ostate + 64, W + 256, W + 256);
PBKDF2_SHA256_80_128_8way(tstate + 128, ostate + 128, W + 512, W + 512);
for (j = 0; j < 3; j++)
for (i = 0; i < 32; i++)
for (k = 0; k < 8; k++)
HMAC_SHA256_80_init_8way( W + 0, tstate + 0, ostate + 0 );
HMAC_SHA256_80_init_8way( W + 256, tstate + 64, ostate + 64 );
HMAC_SHA256_80_init_8way( W + 512, tstate + 128, ostate + 128 );
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256_80_128_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
PBKDF2_SHA256_80_128_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
PBKDF2_SHA256_80_128_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
if ( work_restart[thrid].restart ) return 0;
for ( j = 0; j < 3; j++ )
for ( i = 0; i < 32; i++ )
for ( k = 0; k < 8; k++ )
X[8 * 32 * j + k * 32 + i] = W[8 * 32 * j + 8 * i + k];
scrypt_core_6way(X + 0 * 32, V, N);
scrypt_core_6way(X + 6 * 32, V, N);
scrypt_core_6way(X + 12 * 32, V, N);
scrypt_core_6way(X + 18 * 32, V, N);
for (j = 0; j < 3; j++)
for (i = 0; i < 32; i++)
for (k = 0; k < 8; k++)
scrypt_core_6way( X + 0 * 32, V, N );
scrypt_core_6way( X + 6 * 32, V, N );
if ( work_restart[thrid].restart ) return 0;
scrypt_core_6way( X + 12 * 32, V, N );
scrypt_core_6way( X + 18 * 32, V, N );
if ( work_restart[thrid].restart ) return 0;
for ( j = 0; j < 3; j++ )
for ( i = 0; i < 32; i++ )
for ( k = 0; k < 8; k++ )
W[8 * 32 * j + 8 * i + k] = X[8 * 32 * j + k * 32 + i];
PBKDF2_SHA256_128_32_8way(tstate + 0, ostate + 0, W + 0, W + 0);
PBKDF2_SHA256_128_32_8way(tstate + 64, ostate + 64, W + 256, W + 256);
PBKDF2_SHA256_128_32_8way(tstate + 128, ostate + 128, W + 512, W + 512);
for (j = 0; j < 3; j++)
for (i = 0; i < 8; i++)
for (k = 0; k < 8; k++)
PBKDF2_SHA256_128_32_8way( tstate + 0, ostate + 0, W + 0, W + 0 );
PBKDF2_SHA256_128_32_8way( tstate + 64, ostate + 64, W + 256, W + 256 );
PBKDF2_SHA256_128_32_8way( tstate + 128, ostate + 128, W + 512, W + 512 );
for ( j = 0; j < 3; j++ )
for ( i = 0; i < 8; i++ )
for ( k = 0; k < 8; k++ )
output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k];
return 1;
}
#endif /* HAVE_SCRYPT_6WAY */
@@ -703,16 +660,18 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
uint32_t data[SCRYPT_MAX_WAYS * 20], hash[SCRYPT_MAX_WAYS * 8];
uint32_t midstate[8];
uint32_t n = pdata[19] - 1;
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
int throughput = scrypt_best_throughput();
int i;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
#ifdef HAVE_SHA256_4WAY
if (sha256_use_4way())
throughput *= 4;
#endif
// applog(LOG_INFO,"Scrypt thoughput %d",throughput);
for (i = 0; i < throughput; i++)
memcpy(data + i * 20, pdata, 80);
@@ -720,46 +679,50 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
sha256_transform(midstate, data, 0);
do {
bool rc = true;
for (i = 0; i < throughput; i++)
data[i * 20 + 19] = ++n;
#if defined(HAVE_SHA256_4WAY)
if (throughput == 4)
scrypt_1024_1_1_256_4way(data, hash, midstate,
scratchbuf, scratchbuf_size );
rc = scrypt_1024_1_1_256_4way(data, hash, midstate,
scratchbuf, scratchbuf_size, thr_id );
else
#endif
#if defined(HAVE_SCRYPT_3WAY) && defined(HAVE_SHA256_4WAY)
if (throughput == 12)
scrypt_1024_1_1_256_12way(data, hash, midstate,
scratchbuf, scratchbuf_size );
rc = scrypt_1024_1_1_256_12way(data, hash, midstate,
scratchbuf, scratchbuf_size, thr_id );
else
#endif
#if defined(HAVE_SCRYPT_6WAY)
if (throughput == 24)
scrypt_1024_1_1_256_24way(data, hash, midstate,
scratchbuf, scratchbuf_size );
rc = scrypt_1024_1_1_256_24way(data, hash, midstate,
scratchbuf, scratchbuf_size, thr_id );
else
#endif
#if defined(HAVE_SCRYPT_3WAY)
if (throughput == 3)
scrypt_1024_1_1_256_3way(data, hash, midstate,
scratchbuf, scratchbuf_size );
rc = scrypt_1024_1_1_256_3way(data, hash, midstate,
scratchbuf, scratchbuf_size, thr_id );
else
#endif
scrypt_1024_1_1_256(data, hash, midstate, scratchbuf,
scratchbuf_size );
rc = scrypt_1024_1_1_256(data, hash, midstate, scratchbuf,
scratchbuf_size, thr_id );
for (i = 0; i < throughput; i++) {
if (unlikely(hash[i * 8 + 7] <= Htarg && fulltest(hash + i * 8, ptarget))) {
if ( rc )
for ( i = 0; i < throughput; i++ )
{
if ( unlikely( valid_hash( hash + i * 8, ptarget ) ) )
{
pdata[19] = data[i * 20 + 19];
submit_solution( work, hash, mythr );
submit_solution( work, hash + i * 8, mythr );
}
}
} while (likely(n < max_nonce && !work_restart[thr_id].restart));
}
} while ( likely( ( n < ( max_nonce - throughput ) ) && !(*restart) ) );
*hashes_done = n - pdata[19] + 1;
*hashes_done = n - pdata[19];
pdata[19] = n;
return 0;
}
@@ -778,7 +741,6 @@ bool register_scrypt_algo( algo_gate_t* gate )
gate->optimizations = SSE2_OPT | AVX2_OPT;
gate->miner_thread_init =(void*)&scrypt_miner_thread_init;
gate->scanhash = (void*)&scanhash_scrypt;
// gate->hash = (void*)&scrypt_1024_1_1_256_24way;
opt_target_factor = 65536.0;
if ( !opt_param_n )

View File

@@ -0,0 +1,440 @@
/*-
* Copyright 2005,2007,2009 Colin Percival
* Copywright 2020 JayDDee246@gmail.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/types.h>
#include <stdint.h>
#include <string.h>
#include "hmac-sha256-hash-4way.h"
#include "compat.h"
// HMAC 4-way SSE2
/**
* HMAC_SHA256_Buf(K, Klen, in, len, digest):
* Compute the HMAC-SHA256 of ${len} bytes from ${in} using the key ${K} of
* length ${Klen}, and write the result to ${digest}.
*/
void
hmac_sha256_4way_full( void *digest, const void *K, size_t Klen,
const void *in, size_t len )
{
hmac_sha256_4way_context ctx;
hmac_sha256_4way_init( &ctx, K, Klen );
hmac_sha256_4way_update( &ctx, in, len );
hmac_sha256_4way_close( &ctx, digest );
}
/* Initialize an HMAC-SHA256 operation with the given key. */
void
hmac_sha256_4way_init( hmac_sha256_4way_context *ctx, const void *_K,
size_t Klen )
{
unsigned char pad[64*4] __attribute__ ((aligned (64)));
unsigned char khash[32*4] __attribute__ ((aligned (64)));
const unsigned char * K = _K;
size_t i;
/* If Klen > 64, the key is really SHA256(K). */
if ( Klen > 64 )
{
sha256_4way_init( &ctx->ictx );
sha256_4way_update( &ctx->ictx, K, Klen );
sha256_4way_close( &ctx->ictx, khash );
K = khash;
Klen = 32;
}
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
sha256_4way_init( &ctx->ictx );
memset( pad, 0x36, 64*4 );
for ( i = 0; i < Klen; i++ )
casti_m128i( pad, i ) = _mm_xor_si128( casti_m128i( pad, i ),
casti_m128i( K, i ) );
sha256_4way_update( &ctx->ictx, pad, 64 );
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
sha256_4way_init( &ctx->octx );
memset( pad, 0x5c, 64*4 );
for ( i = 0; i < Klen/4; i++ )
casti_m128i( pad, i ) = _mm_xor_si128( casti_m128i( pad, i ),
casti_m128i( K, i ) );
sha256_4way_update( &ctx->octx, pad, 64 );
}
/* Add bytes to the HMAC-SHA256 operation. */
void
hmac_sha256_4way_update( hmac_sha256_4way_context *ctx, const void *in,
size_t len )
{
/* Feed data to the inner SHA256 operation. */
sha256_4way_update( &ctx->ictx, in, len );
}
/* Finish an HMAC-SHA256 operation. */
void
hmac_sha256_4way_close( hmac_sha256_4way_context *ctx, void *digest )
{
unsigned char ihash[32*4] __attribute__ ((aligned (64)));
/* Finish the inner SHA256 operation. */
sha256_4way_close( &ctx->ictx, ihash );
/* Feed the inner hash to the outer SHA256 operation. */
sha256_4way_update( &ctx->octx, ihash, 32 );
/* Finish the outer SHA256 operation. */
sha256_4way_close( &ctx->octx, digest );
}
/**
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
*/
void
pbkdf2_sha256_4way( uint8_t *buf, size_t dkLen,
const uint8_t *passwd, size_t passwdlen,
const uint8_t *salt, size_t saltlen, uint64_t c )
{
hmac_sha256_4way_context PShctx, hctx;
uint8_t _ALIGN(128) T[32*4];
uint8_t _ALIGN(128) U[32*4];
__m128i ivec;
size_t i, clen;
uint64_t j;
int k;
/* Compute HMAC state after processing P and S. */
hmac_sha256_4way_init( &PShctx, passwd, passwdlen );
hmac_sha256_4way_update( &PShctx, salt, saltlen );
/* Iterate through the blocks. */
for ( i = 0; i * 32 < dkLen; i++ )
{
/* Generate INT(i + 1). */
ivec = _mm_set1_epi32( bswap_32( i+1 ) );
/* Compute U_1 = PRF(P, S || INT(i)). */
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_4way_context) );
hmac_sha256_4way_update( &hctx, &ivec, 4 );
hmac_sha256_4way_close( &hctx, U );
/* T_i = U_1 ... */
memcpy( T, U, 32*4 );
for ( j = 2; j <= c; j++ )
{
/* Compute U_j. */
hmac_sha256_4way_init( &hctx, passwd, passwdlen );
hmac_sha256_4way_update( &hctx, U, 32 );
hmac_sha256_4way_close( &hctx, U );
/* ... xor U_j ... */
for ( k = 0; k < 8; k++ )
casti_m128i( T, k ) = _mm_xor_si128( casti_m128i( T, k ),
casti_m128i( U, k ) );
}
/* Copy as many bytes as necessary into buf. */
clen = dkLen - i * 32;
if ( clen > 32 )
clen = 32;
memcpy( &buf[ i*32*4 ], T, clen*4 );
}
}
#if defined(__AVX2__)
// HMAC 8-way AVX2
void
hmac_sha256_8way_full( void *digest, const void *K, size_t Klen,
const void *in, size_t len )
{
hmac_sha256_8way_context ctx;
hmac_sha256_8way_init( &ctx, K, Klen );
hmac_sha256_8way_update( &ctx, in, len );
hmac_sha256_8way_close( &ctx, digest );
}
/* Initialize an HMAC-SHA256 operation with the given key. */
void
hmac_sha256_8way_init( hmac_sha256_8way_context *ctx, const void *_K,
size_t Klen )
{
unsigned char pad[64*8] __attribute__ ((aligned (128)));
unsigned char khash[32*8] __attribute__ ((aligned (128)));
const unsigned char * K = _K;
size_t i;
/* If Klen > 64, the key is really SHA256(K). */
if ( Klen > 64 )
{
sha256_8way_init( &ctx->ictx );
sha256_8way_update( &ctx->ictx, K, Klen );
sha256_8way_close( &ctx->ictx, khash );
K = khash;
Klen = 32;
}
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
sha256_8way_init( &ctx->ictx );
memset( pad, 0x36, 64*8);
for ( i = 0; i < Klen/4; i++ )
casti_m256i( pad, i ) = _mm256_xor_si256( casti_m256i( pad, i ),
casti_m256i( K, i ) );
sha256_8way_update( &ctx->ictx, pad, 64 );
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
sha256_8way_init( &ctx->octx );
memset( pad, 0x5c, 64*8 );
for ( i = 0; i < Klen/4; i++ )
casti_m256i( pad, i ) = _mm256_xor_si256( casti_m256i( pad, i ),
casti_m256i( K, i ) );
sha256_8way_update( &ctx->octx, pad, 64 );
}
void
hmac_sha256_8way_update( hmac_sha256_8way_context *ctx, const void *in,
size_t len )
{
/* Feed data to the inner SHA256 operation. */
sha256_8way_update( &ctx->ictx, in, len );
}
/* Finish an HMAC-SHA256 operation. */
void
hmac_sha256_8way_close( hmac_sha256_8way_context *ctx, void *digest )
{
unsigned char ihash[32*8] __attribute__ ((aligned (128)));
/* Finish the inner SHA256 operation. */
sha256_8way_close( &ctx->ictx, ihash );
/* Feed the inner hash to the outer SHA256 operation. */
sha256_8way_update( &ctx->octx, ihash, 32 );
/* Finish the outer SHA256 operation. */
sha256_8way_close( &ctx->octx, digest );
}
/**
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
*/
void
pbkdf2_sha256_8way( uint8_t *buf, size_t dkLen, const uint8_t *passwd,
size_t passwdlen, const uint8_t *salt, size_t saltlen,
uint64_t c )
{
hmac_sha256_8way_context PShctx, hctx;
uint8_t _ALIGN(128) T[32*8];
uint8_t _ALIGN(128) U[32*8];
size_t i, clen;
uint64_t j;
int k;
/* Compute HMAC state after processing P and S. */
hmac_sha256_8way_init( &PShctx, passwd, passwdlen );
// saltlen can be odd number of bytes
hmac_sha256_8way_update( &PShctx, salt, saltlen );
/* Iterate through the blocks. */
for ( i = 0; i * 32 < dkLen; i++ )
{
__m256i ivec = _mm256_set1_epi32( bswap_32( i+1 ) );
/* Compute U_1 = PRF(P, S || INT(i)). */
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_8way_context) );
hmac_sha256_8way_update( &hctx, &ivec, 4 );
hmac_sha256_8way_close( &hctx, U );
/* T_i = U_1 ... */
memcpy( T, U, 32*8 );
for ( j = 2; j <= c; j++ )
{
/* Compute U_j. */
hmac_sha256_8way_init( &hctx, passwd, passwdlen );
hmac_sha256_8way_update( &hctx, U, 32 );
hmac_sha256_8way_close( &hctx, U );
/* ... xor U_j ... */
for ( k = 0; k < 8; k++ )
casti_m256i( T, k ) = _mm256_xor_si256( casti_m256i( T, k ),
casti_m256i( U, k ) );
}
/* Copy as many bytes as necessary into buf. */
clen = dkLen - i * 32;
if ( clen > 32 )
clen = 32;
memcpy( &buf[ i*32*8 ], T, clen*8 );
}
}
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// HMAC 16-way AVX512
void
hmac_sha256_16way_full( void *digest, const void *K, size_t Klen,
const void *in, size_t len )
{
hmac_sha256_16way_context ctx;
hmac_sha256_16way_init( &ctx, K, Klen );
hmac_sha256_16way_update( &ctx, in, len );
hmac_sha256_16way_close( &ctx, digest );
}
void
hmac_sha256_16way_init( hmac_sha256_16way_context *ctx, const void *_K,
size_t Klen )
{
unsigned char pad[64*16] __attribute__ ((aligned (128)));
unsigned char khash[32*16] __attribute__ ((aligned (128)));
const unsigned char * K = _K;
size_t i;
/* If Klen > 64, the key is really SHA256(K). */
if ( Klen > 64 )
{
sha256_16way_init( &ctx->ictx );
sha256_16way_update( &ctx->ictx, K, Klen );
sha256_16way_close( &ctx->ictx, khash );
K = khash;
Klen = 32;
}
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
sha256_16way_init( &ctx->ictx );
memset( pad, 0x36, 64*16 );
for ( i = 0; i < Klen; i++ )
casti_m512i( pad, i ) = _mm512_xor_si512( casti_m512i( pad, i ),
casti_m512i( K, i ) );
sha256_16way_update( &ctx->ictx, pad, 64 );
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
sha256_16way_init( &ctx->octx );
memset( pad, 0x5c, 64*16 );
for ( i = 0; i < Klen/4; i++ )
casti_m512i( pad, i ) = _mm512_xor_si512( casti_m512i( pad, i ),
casti_m512i( K, i ) );
sha256_16way_update( &ctx->octx, pad, 64 );
}
void
hmac_sha256_16way_update( hmac_sha256_16way_context *ctx, const void *in,
size_t len )
{
/* Feed data to the inner SHA256 operation. */
sha256_16way_update( &ctx->ictx, in, len );
}
/* Finish an HMAC-SHA256 operation. */
void
hmac_sha256_16way_close( hmac_sha256_16way_context *ctx, void *digest )
{
unsigned char ihash[32*16] __attribute__ ((aligned (128)));
/* Finish the inner SHA256 operation. */
sha256_16way_close( &ctx->ictx, ihash );
/* Feed the inner hash to the outer SHA256 operation. */
sha256_16way_update( &ctx->octx, ihash, 32 );
/* Finish the outer SHA256 operation. */
sha256_16way_close( &ctx->octx, digest );
}
/**
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
*/
void
pbkdf2_sha256_16way( uint8_t *buf, size_t dkLen,
const uint8_t *passwd, size_t passwdlen,
const uint8_t *salt, size_t saltlen, uint64_t c )
{
hmac_sha256_16way_context PShctx, hctx;
uint8_t _ALIGN(128) T[32*16];
uint8_t _ALIGN(128) U[32*16];
__m512i ivec;
size_t i, clen;
uint64_t j;
int k;
/* Compute HMAC state after processing P and S. */
hmac_sha256_16way_init( &PShctx, passwd, passwdlen );
hmac_sha256_16way_update( &PShctx, salt, saltlen );
/* Iterate through the blocks. */
for ( i = 0; i * 32 < dkLen; i++ )
{
/* Generate INT(i + 1). */
ivec = _mm512_set1_epi32( bswap_32( i+1 ) );
/* Compute U_1 = PRF(P, S || INT(i)). */
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_16way_context) );
hmac_sha256_16way_update( &hctx, &ivec, 4 );
hmac_sha256_16way_close( &hctx, U );
/* T_i = U_1 ... */
memcpy( T, U, 32*16 );
for ( j = 2; j <= c; j++ )
{
/* Compute U_j. */
hmac_sha256_16way_init( &hctx, passwd, passwdlen );
hmac_sha256_16way_update( &hctx, U, 32 );
hmac_sha256_16way_close( &hctx, U );
/* ... xor U_j ... */
for ( k = 0; k < 8; k++ )
casti_m512i( T, k ) = _mm512_xor_si512( casti_m512i( T, k ),
casti_m512i( U, k ) );
}
/* Copy as many bytes as necessary into buf. */
clen = dkLen - i * 32;
if ( clen > 32 )
clen = 32;
memcpy( &buf[ i*32*16 ], T, clen*16 );
}
}
#endif // AVX512
#endif // AVX2

View File

@@ -0,0 +1,107 @@
/*-
* Copyright 2005,2007,2009 Colin Percival
* Copyright 2020 JayDDee@gmailcom
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/lib/libmd/sha256_Y.h,v 1.2 2006/01/17 15:35:56 phk Exp $
*/
#ifndef HMAC_SHA256_4WAY_H__
#define HMAC_SHA256_4WAY_H__
// Tested only 8-way with null pers
#include <sys/types.h>
#include <stdint.h>
#include "simd-utils.h"
#include "sha-hash-4way.h"
typedef struct _hmac_sha256_4way_context
{
sha256_4way_context ictx;
sha256_4way_context octx;
} hmac_sha256_4way_context;
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
void hmac_sha256_4way_init( hmac_sha256_4way_context *, const void *, size_t );
void hmac_sha256_4way_update( hmac_sha256_4way_context *, const void *,
size_t );
void hmac_sha256_4way_close( hmac_sha256_4way_context *, void* );
void hmac_sha256_4way_full( void*, const void *, size_t Klen, const void *,
size_t len );
/**
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
*/
void pbkdf2_sha256_4way( uint8_t *, size_t, const uint8_t *, size_t,
const uint8_t *, size_t, uint64_t );
#if defined(__AVX2__)
typedef struct _hmac_sha256_8way_context
{
sha256_8way_context ictx;
sha256_8way_context octx;
} hmac_sha256_8way_context;
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
void hmac_sha256_8way_init( hmac_sha256_8way_context *, const void *, size_t );
void hmac_sha256_8way_update( hmac_sha256_8way_context *, const void *,
size_t );
void hmac_sha256_8way_close( hmac_sha256_8way_context *, void* );
void hmac_sha256_8way_full( void*, const void *, size_t Klen, const void *,
size_t len );
void pbkdf2_sha256_8way( uint8_t *, size_t, const uint8_t *, size_t,
const uint8_t *, size_t, uint64_t );
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct _hmac_sha256_16way_context
{
sha256_16way_context ictx;
sha256_16way_context octx;
} hmac_sha256_16way_context;
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
void hmac_sha256_16way_init( hmac_sha256_16way_context *,
const void *, size_t );
void hmac_sha256_16way_update( hmac_sha256_16way_context *, const void *,
size_t );
void hmac_sha256_16way_close( hmac_sha256_16way_context *, void* );
void hmac_sha256_16way_full( void*, const void *, size_t Klen, const void *,
size_t len );
void pbkdf2_sha256_16way( uint8_t *, size_t, const uint8_t *, size_t,
const uint8_t *, size_t, uint64_t );
#endif // AVX512
#endif // AVX2
#endif // HMAC_SHA256_4WAY_H__

View File

@@ -81,16 +81,17 @@ HMAC_SHA256_Init( HMAC_SHA256_CTX *ctx, const void *_K, size_t Klen )
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
SHA256_Init( &ctx->ictx );
memset( pad, 0x36, 64 );
for ( i = 0; i < Klen; i++ )
pad[i] ^= K[i];
for ( i = 0; i < Klen; i++ ) pad[i] = K[i] ^ 0x36;
memset( pad + Klen, 0x36, 64 - Klen );
SHA256_Update( &ctx->ictx, pad, 64 );
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
SHA256_Init( &ctx->octx );
memset(pad, 0x5c, 64);
for ( i = 0; i < Klen; i++ )
pad[i] ^= K[i];
for ( i = 0; i < Klen; i++ ) pad[i] = K[i] ^ 0x5c;
memset( pad + Klen, 0x5c, 64 - Klen );
SHA256_Update( &ctx->octx, pad, 64 );
}
@@ -161,7 +162,13 @@ PBKDF2_SHA256( const uint8_t *passwd, size_t passwdlen, const uint8_t *salt,
HMAC_SHA256_Final( U, &hctx );
/* ... xor U_j ... */
for ( k = 0; k < 32; k++ )
// _mm256_xor_si256( *(__m256i*)T, *(__m256i*)U );
// _mm_xor_si128( ((__m128i*)T)[0], ((__m128i*)U)[0] );
// _mm_xor_si128( ((__m128i*)T)[1], ((__m128i*)U)[1] );
// for ( k = 0; k < 4; k++ ) T[k] ^= U[k];
for ( k = 0; k < 32; k++ )
T[k] ^= U[k];
}

View File

@@ -58,6 +58,7 @@ void sha256_4way_init( sha256_4way_context *sc );
void sha256_4way_update( sha256_4way_context *sc, const void *data,
size_t len );
void sha256_4way_close( sha256_4way_context *sc, void *dst );
void sha256_4way_full( void *dst, const void *data, size_t len );
#endif // SSE2
@@ -75,6 +76,7 @@ typedef struct {
void sha256_8way_init( sha256_8way_context *sc );
void sha256_8way_update( sha256_8way_context *sc, const void *data, size_t len );
void sha256_8way_close( sha256_8way_context *sc, void *dst );
void sha256_8way_full( void *dst, const void *data, size_t len );
#endif // AVX2
@@ -92,6 +94,7 @@ typedef struct {
void sha256_16way_init( sha256_16way_context *sc );
void sha256_16way_update( sha256_16way_context *sc, const void *data, size_t len );
void sha256_16way_close( sha256_16way_context *sc, void *dst );
void sha256_16way_full( void *dst, const void *data, size_t len );
#endif // AVX512
@@ -110,6 +113,7 @@ void sha512_4way_init( sha512_4way_context *sc);
void sha512_4way_update( sha512_4way_context *sc, const void *data,
size_t len );
void sha512_4way_close( sha512_4way_context *sc, void *dst );
void sha512_4way_full( void *dst, const void *data, size_t len );
#endif // AVX2
@@ -128,6 +132,7 @@ void sha512_8way_init( sha512_8way_context *sc);
void sha512_8way_update( sha512_8way_context *sc, const void *data,
size_t len );
void sha512_8way_close( sha512_8way_context *sc, void *dst );
void sha512_8way_full( void *dst, const void *data, size_t len );
#endif // AVX512

View File

@@ -330,6 +330,14 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
mm128_block_bswap_32( dst, sc->val );
}
void sha256_4way_full( void *dst, const void *data, size_t len )
{
sha256_4way_context ctx;
sha256_4way_init( &ctx );
sha256_4way_update( &ctx, data, len );
sha256_4way_close( &ctx, dst );
}
#if defined(__AVX2__)
// SHA-256 8 way
@@ -498,6 +506,10 @@ void sha256_8way_init( sha256_8way_context *sc )
*/
}
// need to handle odd byte length for yespower.
// Assume only last update is odd.
void sha256_8way_update( sha256_8way_context *sc, const void *data, size_t len )
{
__m256i *vdata = (__m256i*)data;
@@ -564,6 +576,13 @@ void sha256_8way_close( sha256_8way_context *sc, void *dst )
mm256_block_bswap_32( dst, sc->val );
}
void sha256_8way_full( void *dst, const void *data, size_t len )
{
sha256_8way_context ctx;
sha256_8way_init( &ctx );
sha256_8way_update( &ctx, data, len );
sha256_8way_close( &ctx, dst );
}
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
@@ -791,6 +810,14 @@ void sha256_16way_close( sha256_16way_context *sc, void *dst )
mm512_block_bswap_32( dst, sc->val );
}
void sha256_16way_full( void *dst, const void *data, size_t len )
{
sha256_16way_context ctx;
sha256_16way_init( &ctx );
sha256_16way_update( &ctx, data, len );
sha256_16way_close( &ctx, dst );
}
#endif // AVX512
#endif // __AVX2__
#endif // __SSE2__

View File

@@ -33,7 +33,7 @@
#include <stddef.h>
#include <string.h>
#ifdef __AES__
#if defined(__AES__)
#include "sph_shavite.h"
#include "simd-utils.h"

View File

@@ -35,6 +35,8 @@
#include "sph_shavite.h"
#if !defined(__AES__)
#ifdef __cplusplus
extern "C"{
#endif
@@ -1762,3 +1764,6 @@ sph_shavite512_sw_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst
#ifdef __cplusplus
}
#endif
#endif // !AES

View File

@@ -262,15 +262,9 @@ void sph_shavite384_close(void *cc, void *dst);
void sph_shavite384_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
// Always define sw but only define aesni when available
// Define fptrs for aesni or sw, not both.
void sph_shavite512_sw_init(void *cc);
void sph_shavite512_sw(void *cc, const void *data, size_t len);
void sph_shavite512_sw_close(void *cc, void *dst);
void sph_shavite512_sw_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
//Don't call these directly from application code, use the macros below.
#ifdef __AES__
void sph_shavite512_aesni_init(void *cc);
void sph_shavite512_aesni(void *cc, const void *data, size_t len);
void sph_shavite512_aesni_close(void *cc, void *dst);
@@ -285,6 +279,13 @@ void sph_shavite512_aesni_addbits_and_close(
#else
void sph_shavite512_sw_init(void *cc);
void sph_shavite512_sw(void *cc, const void *data, size_t len);
void sph_shavite512_sw_close(void *cc, void *dst);
void sph_shavite512_sw_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
#define sph_shavite512_init sph_shavite512_sw_init
#define sph_shavite512 sph_shavite512_sw
#define sph_shavite512_close sph_shavite512_sw_close
@@ -293,6 +294,20 @@ void sph_shavite512_aesni_addbits_and_close(
#endif
// Use these macros from application code.
#define shavite512_context sph_shavite512_context
#define shavite512_init sph_shavite512_init
#define shavite512_update sph_shavite512
#define shavite512_close sph_shavite512_close
#define shavite512_full( cc, dst, data, len ) \
do{ \
shavite512_init( cc ); \
shavite512_update( cc, data, len ); \
shavite512_close( cc, dst ); \
}while(0)
#ifdef __cplusplus
}
#endif

View File

@@ -13,18 +13,18 @@
#if defined (SKEIN_8WAY)
static __thread skein512_8way_context skein512_8way_ctx
__attribute__ ((aligned (64)));
void skeinhash_8way( void *state, const void *input )
{
uint64_t vhash64[8*8] __attribute__ ((aligned (128)));
skein512_8way_context ctx_skein;
memcpy( &ctx_skein, &skein512_8way_ctx, sizeof( ctx_skein ) );
uint32_t vhash32[16*8] __attribute__ ((aligned (128)));
sha256_8way_context ctx_sha256;
skein512_8way_init( &ctx_skein );
skein512_8way_update( &ctx_skein, input, 80 );
skein512_8way_close( &ctx_skein, vhash64 );
skein512_8way_final16( &ctx_skein, vhash64, input + (64*8) );
rintrlv_8x64_8x32( vhash32, vhash64, 512 );
sha256_8way_init( &ctx_sha256 );
@@ -36,63 +36,70 @@ int scanhash_skein_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*8] __attribute__ ((aligned (128)));
uint32_t hash[16*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t *hash_d7 = &(hash[7*8]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t targ_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id;
__m512i *noncev = (__m512i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ), *noncev );
skein512_8way_prehash64( &skein512_8way_ctx, vdata );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
skeinhash_8way( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane ] <= Htarg )
if ( unlikely( hash_d7[ lane ] <= targ_d7 ) && !bench )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) )
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart );
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (SKEIN_4WAY)
static __thread skein512_4way_context skein512_4way_ctx
__attribute__ ((aligned (64)));
void skeinhash_4way( void *state, const void *input )
{
uint64_t vhash64[8*4] __attribute__ ((aligned (128)));
skein512_4way_context ctx_skein;
memcpy( &ctx_skein, &skein512_4way_ctx, sizeof( ctx_skein ) );
#if defined(__SHA__)
uint32_t hash0[16] __attribute__ ((aligned (64)));
uint32_t hash1[16] __attribute__ ((aligned (64)));
uint32_t hash2[16] __attribute__ ((aligned (64)));
uint32_t hash3[16] __attribute__ ((aligned (64)));
SHA256_CTX ctx_sha256;
SHA256_CTX ctx_sha256;
#else
uint32_t vhash32[16*4] __attribute__ ((aligned (64)));
sha256_4way_context ctx_sha256;
#endif
skein512_4way_init( &ctx_skein );
skein512_4way_update( &ctx_skein, input, 80 );
skein512_4way_close( &ctx_skein, vhash64 );
skein512_4way_final16( &ctx_skein, vhash64, input + (64*4) );
#if defined(__SHA__)
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 512 );
@@ -127,38 +134,43 @@ int scanhash_skein_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t hash[16*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *hash_d7 = &(hash[7<<2]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t targ_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id;
__m256i *noncev = (__m256i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
skein512_4way_prehash64( &skein512_4way_ctx, vdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
skeinhash_4way( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( hash7[ lane ] <= Htarg )
if ( unlikely( ( hash_d7[ lane ] <= targ_d7 ) && !bench ) )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) )
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart );
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -4,14 +4,16 @@
bool register_skein_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT | AVX512_OPT | SHA_OPT;
#if defined (SKEIN_8WAY)
gate->optimizations = AVX2_OPT | AVX512_OPT;
gate->scanhash = (void*)&scanhash_skein_8way;
gate->hash = (void*)&skeinhash_8way;
#elif defined (SKEIN_4WAY)
gate->optimizations = AVX2_OPT | AVX512_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_skein_4way;
gate->hash = (void*)&skeinhash_4way;
#else
gate->optimizations = AVX2_OPT | AVX512_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_skein;
gate->hash = (void*)&skeinhash;
#endif

View File

@@ -654,6 +654,160 @@ skein_big_close_8way( skein512_8way_context *sc, unsigned ub, unsigned n,
memcpy_512( dst, buf, out_len >> 3 );
}
void skein512_8way_full( skein512_8way_context *sc, void *out, const void *data,
size_t len )
{
__m512i h0, h1, h2, h3, h4, h5, h6, h7;
__m512i *vdata = (__m512i*)data;
__m512i *buf = sc->buf;
size_t ptr = 0;
unsigned first;
uint64_t bcount = 0;
const int buf_size = 64; // 64 * _m256i
// Init
h0 = m512_const1_64( 0x4903ADFF749C51CE );
h1 = m512_const1_64( 0x0D95DE399746DF03 );
h2 = m512_const1_64( 0x8FD1934127C79BCE );
h3 = m512_const1_64( 0x9A255629FF352CB1 );
h4 = m512_const1_64( 0x5DB62599DF6CA7B0 );
h5 = m512_const1_64( 0xEABE394CA9D5C3F4 );
h6 = m512_const1_64( 0x991112C71A75B523 );
h7 = m512_const1_64( 0xAE18A40B660FCC33 );
// Update
if ( len <= buf_size - ptr )
{
memcpy_512( buf + (ptr>>3), vdata, len>>3 );
ptr += len;
}
else
{
first = ( bcount == 0 ) << 7;
do {
size_t clen;
if ( ptr == buf_size )
{
bcount ++;
UBI_BIG_8WAY( 96 + first, 0 );
first = 0;
ptr = 0;
}
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_512( buf + (ptr>>3), vdata, clen>>3 );
ptr += clen;
vdata += (clen>>3);
len -= clen;
} while ( len > 0 );
}
// Close
unsigned et;
memset_zero_512( buf + (ptr>>3), (buf_size - ptr) >> 3 );
et = 352 + ((bcount == 0) << 7);
UBI_BIG_8WAY( et, ptr );
memset_zero_512( buf, buf_size >> 3 );
bcount = 0;
UBI_BIG_8WAY( 510, 8 );
casti_m512i( out, 0 ) = h0;
casti_m512i( out, 1 ) = h1;
casti_m512i( out, 2 ) = h2;
casti_m512i( out, 3 ) = h3;
casti_m512i( out, 4 ) = h4;
casti_m512i( out, 5 ) = h5;
casti_m512i( out, 6 ) = h6;
casti_m512i( out, 7 ) = h7;
}
void
skein512_8way_prehash64( skein512_8way_context *sc, const void *data )
{
__m512i *vdata = (__m512*)data;
__m512i *buf = sc->buf;
buf[0] = vdata[0];
buf[1] = vdata[1];
buf[2] = vdata[2];
buf[3] = vdata[3];
buf[4] = vdata[4];
buf[5] = vdata[5];
buf[6] = vdata[6];
buf[7] = vdata[7];
register __m512i h0 = m512_const1_64( 0x4903ADFF749C51CE );
register __m512i h1 = m512_const1_64( 0x0D95DE399746DF03 );
register __m512i h2 = m512_const1_64( 0x8FD1934127C79BCE );
register __m512i h3 = m512_const1_64( 0x9A255629FF352CB1 );
register __m512i h4 = m512_const1_64( 0x5DB62599DF6CA7B0 );
register __m512i h5 = m512_const1_64( 0xEABE394CA9D5C3F4 );
register __m512i h6 = m512_const1_64( 0x991112C71A75B523 );
register __m512i h7 = m512_const1_64( 0xAE18A40B660FCC33 );
uint64_t bcount = 1;
UBI_BIG_8WAY( 224, 0 );
sc->h0 = h0;
sc->h1 = h1;
sc->h2 = h2;
sc->h3 = h3;
sc->h4 = h4;
sc->h5 = h5;
sc->h6 = h6;
sc->h7 = h7;
}
void
skein512_8way_final16( skein512_8way_context *sc, void *output,
const void *data )
{
__m512i *in = (__m512i*)data;
__m512i *buf = sc->buf;
__m512i *out = (__m512i*)output;
register __m512i h0 = sc->h0;
register __m512i h1 = sc->h1;
register __m512i h2 = sc->h2;
register __m512i h3 = sc->h3;
register __m512i h4 = sc->h4;
register __m512i h5 = sc->h5;
register __m512i h6 = sc->h6;
register __m512i h7 = sc->h7;
const __m512i zero = m512_zero;
buf[0] = in[0];
buf[1] = in[1];
buf[2] = zero;
buf[3] = zero;
buf[4] = zero;
buf[5] = zero;
buf[6] = zero;
buf[7] = zero;
uint64_t bcount = 1;
UBI_BIG_8WAY( 352, 16 );
buf[0] = zero;
buf[1] = zero;
bcount = 0;
UBI_BIG_8WAY( 510, 8 );
out[0] = h0;
out[1] = h1;
out[2] = h2;
out[3] = h3;
out[4] = h4;
out[5] = h5;
out[6] = h6;
out[7] = h7;
}
void
skein256_8way_update(void *cc, const void *data, size_t len)
{
@@ -709,6 +863,7 @@ void skein512_4way_init( skein512_4way_context *sc )
sc->ptr = 0;
}
// Do not use for 128 bt data length
static void
skein_big_core_4way( skein512_4way_context *sc, const void *data,
size_t len )
@@ -794,6 +949,156 @@ skein_big_close_4way( skein512_4way_context *sc, unsigned ub, unsigned n,
memcpy_256( dst, buf, out_len >> 3 );
}
void
skein512_4way_full( skein512_4way_context *sc, void *out, const void *data,
size_t len )
{
__m256i h0, h1, h2, h3, h4, h5, h6, h7;
__m256i *vdata = (__m256i*)data;
__m256i *buf = sc->buf;
size_t ptr = 0;
unsigned first;
const int buf_size = 64; // 64 * __m256i
uint64_t bcount = 0;
h0 = m256_const1_64( 0x4903ADFF749C51CE );
h1 = m256_const1_64( 0x0D95DE399746DF03 );
h2 = m256_const1_64( 0x8FD1934127C79BCE );
h3 = m256_const1_64( 0x9A255629FF352CB1 );
h4 = m256_const1_64( 0x5DB62599DF6CA7B0 );
h5 = m256_const1_64( 0xEABE394CA9D5C3F4 );
h6 = m256_const1_64( 0x991112C71A75B523 );
h7 = m256_const1_64( 0xAE18A40B660FCC33 );
// Update
if ( len <= buf_size - ptr )
{
memcpy_256( buf + (ptr>>3), vdata, len>>3 );
ptr += len;
}
else
{
first = ( bcount == 0 ) << 7;
do {
size_t clen;
if ( ptr == buf_size )
{
bcount ++;
UBI_BIG_4WAY( 96 + first, 0 );
first = 0;
ptr = 0;
}
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_256( buf + (ptr>>3), vdata, clen>>3 );
ptr += clen;
vdata += (clen>>3);
len -= clen;
} while ( len > 0 );
}
// Close
unsigned et;
memset_zero_256( buf + (ptr>>3), (buf_size - ptr) >> 3 );
et = 352 + ((bcount == 0) << 7);
UBI_BIG_4WAY( et, ptr );
memset_zero_256( buf, buf_size >> 3 );
bcount = 0;
UBI_BIG_4WAY( 510, 8 );
casti_m256i( out, 0 ) = h0;
casti_m256i( out, 1 ) = h1;
casti_m256i( out, 2 ) = h2;
casti_m256i( out, 3 ) = h3;
casti_m256i( out, 4 ) = h4;
casti_m256i( out, 5 ) = h5;
casti_m256i( out, 6 ) = h6;
casti_m256i( out, 7 ) = h7;
}
void
skein512_4way_prehash64( skein512_4way_context *sc, const void *data )
{
__m256i *vdata = (__m256i*)data;
__m256i *buf = sc->buf;
buf[0] = vdata[0];
buf[1] = vdata[1];
buf[2] = vdata[2];
buf[3] = vdata[3];
buf[4] = vdata[4];
buf[5] = vdata[5];
buf[6] = vdata[6];
buf[7] = vdata[7];
register __m256i h0 = m256_const1_64( 0x4903ADFF749C51CE );
register __m256i h1 = m256_const1_64( 0x0D95DE399746DF03 );
register __m256i h2 = m256_const1_64( 0x8FD1934127C79BCE );
register __m256i h3 = m256_const1_64( 0x9A255629FF352CB1 );
register __m256i h4 = m256_const1_64( 0x5DB62599DF6CA7B0 );
register __m256i h5 = m256_const1_64( 0xEABE394CA9D5C3F4 );
register __m256i h6 = m256_const1_64( 0x991112C71A75B523 );
register __m256i h7 = m256_const1_64( 0xAE18A40B660FCC33 );
uint64_t bcount = 1;
UBI_BIG_4WAY( 224, 0 );
sc->h0 = h0;
sc->h1 = h1;
sc->h2 = h2;
sc->h3 = h3;
sc->h4 = h4;
sc->h5 = h5;
sc->h6 = h6;
sc->h7 = h7;
}
void
skein512_4way_final16( skein512_4way_context *sc, void *out, const void *data )
{
__m256i *vdata = (__m256i*)data;
__m256i *buf = sc->buf;
register __m256i h0 = sc->h0;
register __m256i h1 = sc->h1;
register __m256i h2 = sc->h2;
register __m256i h3 = sc->h3;
register __m256i h4 = sc->h4;
register __m256i h5 = sc->h5;
register __m256i h6 = sc->h6;
register __m256i h7 = sc->h7;
const __m256i zero = m256_zero;
buf[0] = vdata[0];
buf[1] = vdata[1];
buf[2] = zero;
buf[3] = zero;
buf[4] = zero;
buf[5] = zero;
buf[6] = zero;
buf[7] = zero;
uint64_t bcount = 1;
UBI_BIG_4WAY( 352, 16 );
buf[0] = zero;
buf[1] = zero;
bcount = 0;
UBI_BIG_4WAY( 510, 8 );
casti_m256i( out, 0 ) = h0;
casti_m256i( out, 1 ) = h1;
casti_m256i( out, 2 ) = h2;
casti_m256i( out, 3 ) = h3;
casti_m256i( out, 4 ) = h4;
casti_m256i( out, 5 ) = h5;
casti_m256i( out, 6 ) = h6;
casti_m256i( out, 7 ) = h7;
}
void
skein256_4way_update(void *cc, const void *data, size_t len)
{
@@ -806,6 +1111,9 @@ skein256_4way_close(void *cc, void *dst)
skein_big_close_4way(cc, 0, 0, dst, 32);
}
// Do not use with 128 bit data
void
skein512_4way_update(void *cc, const void *data, size_t len)
{

View File

@@ -63,10 +63,16 @@ typedef struct
typedef skein_8way_big_context skein512_8way_context;
typedef skein_8way_big_context skein256_8way_context;
void skein512_8way_full( skein512_8way_context *sc, void *out,
const void *data, size_t len );
void skein512_8way_init( skein512_8way_context *sc );
void skein512_8way_update( void *cc, const void *data, size_t len );
void skein512_8way_close( void *cc, void *dst );
void skein512_8way_prehash64( skein512_8way_context *sc, const void *data );
void skein512_8way_final16( skein512_8way_context *sc, void *out,
const void *data );
void skein256_8way_init( skein256_8way_context *sc );
void skein256_8way_update( void *cc, const void *data, size_t len );
void skein256_8way_close( void *cc, void *dst );
@@ -85,6 +91,8 @@ typedef skein_4way_big_context skein512_4way_context;
typedef skein_4way_big_context skein256_4way_context;
void skein512_4way_init( skein512_4way_context *sc );
void skein512_4way_full( skein512_4way_context *sc, void *out,
const void *data, size_t len );
void skein512_4way_update( void *cc, const void *data, size_t len );
void skein512_4way_close( void *cc, void *dst );
@@ -92,6 +100,10 @@ void skein256_4way_init( skein256_4way_context *sc );
void skein256_4way_update( void *cc, const void *data, size_t len );
void skein256_4way_close( void *cc, void *dst );
void skein512_4way_prehash64( skein512_4way_context *sc, const void *data );
void skein512_4way_final16( skein512_4way_context *sc, void *out,
const void *data );
#ifdef __cplusplus
}
#endif

View File

@@ -5,114 +5,126 @@
#if defined(SKEIN_8WAY)
static __thread skein512_8way_context skein512_8way_ctx
__attribute__ ((aligned (64)));
void skein2hash_8way( void *output, const void *input )
{
skein512_8way_context ctx;
uint64_t hash[16*8] __attribute__ ((aligned (128)));
skein512_8way_context ctx;
memcpy( &ctx, &skein512_8way_ctx, sizeof( ctx ) );
skein512_8way_init( &ctx );
skein512_8way_update( &ctx, input, 80 );
skein512_8way_close( &ctx, hash );
skein512_8way_init( &ctx );
skein512_8way_update( &ctx, hash, 64 );
skein512_8way_close( &ctx, output );
skein512_8way_final16( &ctx, hash, input + (64*8) );
skein512_8way_full( &ctx, output, hash, 64 );
}
int scanhash_skein2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[16*8] __attribute__ ((aligned (128)));
uint64_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]);
uint64_t *hashq3 = &(hash[3*8]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint64_t targq3 = ((uint64_t*)ptarget)[3];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id;
__m512i *noncev = (__m512i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
skein512_8way_context ctx;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ), *noncev );
skein512_8way_prehash64( &ctx, vdata );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
skein2hash_8way( hash, vdata );
skein512_8way_final16( &ctx, hash, vdata + (16*8) );
skein512_8way_full( &ctx, hash, hash, 64 );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane<<1 ] <= Htarg )
if ( unlikely( hashq3[ lane ] <= targq3 && !bench ) )
{
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
if ( valid_hash( lane_hash, ptarget ) && !bench )
{
pdata[19] = n + lane;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart );
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(SKEIN_4WAY)
static __thread skein512_4way_context skein512_4way_ctx
__attribute__ ((aligned (64)));
void skein2hash_4way( void *output, const void *input )
{
skein512_4way_context ctx;
memcpy( &ctx, &skein512_4way_ctx, sizeof( ctx ) );
uint64_t hash[16*4] __attribute__ ((aligned (64)));
skein512_4way_init( &ctx );
skein512_4way_update( &ctx, input, 80 );
skein512_4way_close( &ctx, hash );
skein512_4way_init( &ctx );
skein512_4way_update( &ctx, hash, 64 );
skein512_4way_close( &ctx, output );
skein512_4way_final16( &ctx, hash, input + (64*4) );
skein512_4way_full( &ctx, output, hash, 64 );
}
int scanhash_skein2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[16*4] __attribute__ ((aligned (64)));
uint64_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[25]);
uint64_t *hash_q3 = &(hash[3*4]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint64_t targ_q3 = ((uint64_t*)ptarget)[3];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
__m256i *noncev = (__m256i*)vdata + 9;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
skein512_4way_context ctx;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
skein512_4way_prehash64( &ctx, vdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
skein2hash_4way( hash, vdata );
skein512_4way_final16( &ctx, hash, vdata + (16*4) );
skein512_4way_full( &ctx, hash, hash, 64 );
for ( int lane = 0; lane < 4; lane++ )
if ( hash7[ lane<<1 ] <= Htarg )
if ( hash_q3[ lane ] <= targ_q3 )
{
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
if ( valid_hash( lane_hash, ptarget ) && !bench )
{
pdata[19] = n + lane;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -120,6 +120,13 @@ void sph_whirlpool(void *cc, const void *data, size_t len);
*/
void sph_whirlpool_close(void *cc, void *dst);
#define sph_whirlpool512_full( cc, dst, data, len ) \
do{ \
sph_whirlpool_init( cc ); \
sph_whirlpool( cc, data, len ); \
sph_whirlpool_close( cc, dst ); \
}while(0)
/**
* WHIRLPOOL-0 uses the same structure than plain WHIRLPOOL.
*/

View File

@@ -35,8 +35,7 @@ void skunk_8way_hash( void *output, const void *input )
skunk_8way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &skunk_8way_ctx, sizeof(skunk_8way_ctx) );
skein512_8way_update( &ctx.skein, input, 80 );
skein512_8way_close( &ctx.skein, vhash );
skein512_8way_final16( &ctx.skein, vhash, input );
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, vhash, 512 );
@@ -104,35 +103,35 @@ int scanhash_skunk_8way( struct work *work, uint32_t max_nonce,
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id;
__m512i *noncev = (__m512i*)vdata + 9;
const int thr_id = mythr->id;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
const bool bench = opt_benchmark;
if ( opt_benchmark )
((uint32_t*)ptarget)[7] = 0x0cff;
if ( bench ) ptarget[7] = 0x0fff;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
skein512_8way_prehash64( &skunk_8way_ctx.skein, vdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ), *noncev );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
skunk_8way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 8; i++ )
if ( unlikely( (hash+(i<<3))[7] <= Htarg ) )
if ( likely( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark ) )
if ( unlikely( valid_hash( hash+(i<<3), ptarget ) && !bench ) )
{
pdata[19] = n+i;
pdata[19] = bswap_32( n+i );
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n +=8;
} while ( likely( ( n < max_nonce-8 ) && !(*restart) ) );
} while ( likely( ( n < last_nonce ) && !( *restart ) ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
@@ -159,17 +158,16 @@ static __thread skunk_4way_ctx_holder skunk_4way_ctx;
void skunk_4way_hash( void *output, const void *input )
{
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t vhash[8*4] __attribute__ ((aligned (64)));
skunk_4way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &skunk_4way_ctx, sizeof(skunk_4way_ctx) );
skein512_4way_update( &ctx.skein, input, 80 );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_final16( &ctx.skein, vhash, input + (64*4) );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*)hash0, 64 );
@@ -213,40 +211,40 @@ void skunk_4way_hash( void *output, const void *input )
int scanhash_skunk_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t hash[4*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
volatile uint8_t *restart = &(work_restart[thr_id].restart);
__m256i *noncev = (__m256i*)vdata + 9;
const int thr_id = mythr->id;
volatile uint8_t *restart = &( work_restart[ thr_id ].restart );
const bool bench = opt_benchmark;
if ( opt_benchmark )
((uint32_t*)ptarget)[7] = 0x0cff;
if ( bench ) ptarget[7] = 0x0fff;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
skein512_4way_prehash64( &skunk_4way_ctx.skein, vdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
skunk_4way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 4; i++ )
if ( (hash+(i<<3))[7] <= Htarg )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
if ( unlikely( valid_hash( hash+(i<<3), ptarget ) && !bench ) )
{
pdata[19] = n+i;
pdata[19] = bswap_32( n + i );
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n +=4;
} while ( ( n < max_nonce ) && !(*restart) );
*hashes_done = n - first_nonce + 1;
} while ( likely( ( n < last_nonce ) && !( *restart ) ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -30,9 +30,6 @@
#include "algo/groestl/aes_ni/hash-groestl.h"
#endif
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
static void hex_getAlgoString(const uint32_t* prevblock, char *output)
{
char *sptr = output;
@@ -50,6 +47,7 @@ static void hex_getAlgoString(const uint32_t* prevblock, char *output)
*sptr = '\0';
}
/*
union _hex_context_overlay
{
#if defined(__AES__)
@@ -66,7 +64,7 @@ union _hex_context_overlay
sph_keccak512_context keccak;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
@@ -75,18 +73,19 @@ union _hex_context_overlay
SHA512_CTX sha512;
};
typedef union _hex_context_overlay hex_context_overlay;
*/
static __thread hex_context_overlay hex_ctx;
static __thread x16r_context_overlay hex_ctx;
void hex_hash( void* output, const void* input )
int hex_hash( void* output, const void* input, int thrid )
{
uint32_t _ALIGN(128) hash[16];
hex_context_overlay ctx;
x16r_context_overlay ctx;
memcpy( &ctx, &hex_ctx, sizeof(ctx) );
void *in = (void*) input;
int size = 80;
char elem = hashOrder[0];
char elem = x16r_hash_order[0];
uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
for ( int i = 0; i < 16; i++ )
@@ -160,9 +159,7 @@ void hex_hash( void* output, const void* input )
}
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in, size );
sph_shavite512_close( &ctx.shavite, hash );
shavite512_full( &ctx.shavite, hash, in, size );
break;
case SIMD:
init_sd( &ctx.simd, 512 );
@@ -190,9 +187,7 @@ void hex_hash( void* output, const void* input )
sph_hamsi512_close( &ctx.hamsi, hash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in, size );
sph_fugue512_close( &ctx.fugue, hash );
sph_fugue512_full( &ctx.fugue, hash, in, size );
break;
case SHABAL:
if ( i == 0 )
@@ -206,13 +201,12 @@ void hex_hash( void* output, const void* input )
break;
case WHIRLPOOL:
if ( i == 0 )
sph_whirlpool( &ctx.whirlpool, in+64, 16 );
else
{
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in, size );
sph_whirlpool( &ctx.whirlpool, in+64, 16 );
sph_whirlpool_close( &ctx.whirlpool, hash );
}
sph_whirlpool_close( &ctx.whirlpool, hash );
else
sph_whirlpool512_full( &ctx.whirlpool, hash, in, size );
break;
case SHA_512:
SHA512_Init( &ctx.sha512 );
@@ -220,11 +214,15 @@ void hex_hash( void* output, const void* input )
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break;
}
if ( work_restart[thrid].restart ) return 0;
algo = (uint8_t)hash[0] % X16R_HASH_FUNC_COUNT;
in = (void*) hash;
size = 64;
}
memcpy(output, hash, 32);
return 1;
}
int scanhash_hex( struct work *work, uint32_t max_nonce,
@@ -235,7 +233,7 @@ int scanhash_hex( struct work *work, uint32_t max_nonce,
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
const uint32_t last_nonce = max_nonce;
const int thr_id = mythr->id;
uint32_t nonce = first_nonce;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
@@ -244,17 +242,18 @@ int scanhash_hex( struct work *work, uint32_t max_nonce,
mm128_bswap32_80( edata, pdata );
static __thread uint32_t s_ntime = UINT32_MAX;
uint32_t ntime = swab32(pdata[17]);
if ( s_ntime != ntime )
{
hex_getAlgoString( (const uint32_t*) (&edata[1]), hashOrder );
hex_getAlgoString( (const uint32_t*) (&edata[1]), x16r_hash_order );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_INFO, "hash order %s (%08x)", hashOrder, ntime );
applog( LOG_INFO, "hash order %s (%08x)", x16r_hash_order, ntime );
}
// Do midstate prehash on hash functions with block size <= 64 bytes.
const char elem = hashOrder[0];
const char elem = x16r_hash_order[0];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
{
@@ -291,8 +290,7 @@ int scanhash_hex( struct work *work, uint32_t max_nonce,
do
{
edata[19] = nonce;
hex_hash( hash32, edata );
if ( hex_hash( hash32, edata, thr_id ) );
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{
be32enc( &pdata[19], nonce );

View File

@@ -80,7 +80,7 @@ void x16r_8way_prehash( void *vdata, void *pdata )
// Called by wrapper hash function to optionally continue hashing and
// convert to final hash.
void x16r_8way_hash_generic( void* output, const void* input )
int x16r_8way_hash_generic( void* output, const void* input, int thrid )
{
uint32_t vhash[20*8] __attribute__ ((aligned (128)));
uint32_t hash0[20] __attribute__ ((aligned (64)));
@@ -287,30 +287,14 @@ void x16r_8way_hash_generic( void* output, const void* input )
shavite512_4way_full( &ctx.shavite, vhash, vhash, size );
dintrlv_4x128_512( hash4, hash5, hash6, hash7, vhash );
#else
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in0, size );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in1, size );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in2, size );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in3, size );
sph_shavite512_close( &ctx.shavite, hash3 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in4, size );
sph_shavite512_close( &ctx.shavite, hash4 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in5, size );
sph_shavite512_close( &ctx.shavite, hash5 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in6, size );
sph_shavite512_close( &ctx.shavite, hash6 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in7, size );
sph_shavite512_close( &ctx.shavite, hash7 );
shavite512_full( &ctx.shavite, hash0, in0, size );
shavite512_full( &ctx.shavite, hash1, in1, size );
shavite512_full( &ctx.shavite, hash2, in2, size );
shavite512_full( &ctx.shavite, hash3, in3, size );
shavite512_full( &ctx.shavite, hash4, in4, size );
shavite512_full( &ctx.shavite, hash5, in5, size );
shavite512_full( &ctx.shavite, hash6, in6, size );
shavite512_full( &ctx.shavite, hash7, in7, size );
#endif
break;
case SIMD:
@@ -363,30 +347,14 @@ void x16r_8way_hash_generic( void* output, const void* input )
hash7, vhash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in0, size );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in1, size );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in2, size );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in3, size );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in4, size );
sph_fugue512_close( &ctx.fugue, hash4 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in5, size );
sph_fugue512_close( &ctx.fugue, hash5 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in6, size );
sph_fugue512_close( &ctx.fugue, hash6 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in7, size );
sph_fugue512_close( &ctx.fugue, hash7 );
sph_fugue512_full( &ctx.fugue, hash0, in0, size );
sph_fugue512_full( &ctx.fugue, hash1, in1, size );
sph_fugue512_full( &ctx.fugue, hash2, in2, size );
sph_fugue512_full( &ctx.fugue, hash3, in3, size );
sph_fugue512_full( &ctx.fugue, hash4, in4, size );
sph_fugue512_full( &ctx.fugue, hash5, in5, size );
sph_fugue512_full( &ctx.fugue, hash6, in6, size );
sph_fugue512_full( &ctx.fugue, hash7, in7, size );
break;
case SHABAL:
intrlv_8x32( vhash, in0, in1, in2, in3, in4, in5, in6, in7,
@@ -431,30 +399,14 @@ void x16r_8way_hash_generic( void* output, const void* input )
}
else
{
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in0, size );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in1, size );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in2, size );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in3, size );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in4, size );
sph_whirlpool_close( &ctx.whirlpool, hash4 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in5, size );
sph_whirlpool_close( &ctx.whirlpool, hash5 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in6, size );
sph_whirlpool_close( &ctx.whirlpool, hash6 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in7, size );
sph_whirlpool_close( &ctx.whirlpool, hash7 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, in0, size );
sph_whirlpool512_full( &ctx.whirlpool, hash1, in1, size );
sph_whirlpool512_full( &ctx.whirlpool, hash2, in2, size );
sph_whirlpool512_full( &ctx.whirlpool, hash3, in3, size );
sph_whirlpool512_full( &ctx.whirlpool, hash4, in4, size );
sph_whirlpool512_full( &ctx.whirlpool, hash5, in5, size );
sph_whirlpool512_full( &ctx.whirlpool, hash6, in6, size );
sph_whirlpool512_full( &ctx.whirlpool, hash7, in7, size );
}
break;
case SHA_512:
@@ -472,6 +424,9 @@ void x16r_8way_hash_generic( void* output, const void* input )
hash7, vhash );
break;
}
if ( work_restart[thrid].restart ) return 0;
size = 64;
}
@@ -483,14 +438,17 @@ void x16r_8way_hash_generic( void* output, const void* input )
memcpy( output+320, hash5, 64 );
memcpy( output+384, hash6, 64 );
memcpy( output+448, hash7, 64 );
return 1;
}
// x16-r,-s,-rt wrapper called directly by scanhash to repackage 512 bit
// hash to 256 bit final hash.
void x16r_8way_hash( void* output, const void* input )
int x16r_8way_hash( void* output, const void* input, int thrid )
{
uint8_t hash[64*8] __attribute__ ((aligned (128)));
x16r_8way_hash_generic( hash, input );
if ( !x16r_8way_hash_generic( hash, input, thrid ) )
return 0;
memcpy( output, hash, 32 );
memcpy( output+32, hash+64, 32 );
@@ -500,7 +458,9 @@ void x16r_8way_hash( void* output, const void* input )
memcpy( output+160, hash+320, 32 );
memcpy( output+192, hash+384, 32 );
memcpy( output+224, hash+448, 32 );
}
return 1;
}
// x16r only
int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
@@ -540,8 +500,7 @@ int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x16r_8way_hash( hash, vdata );
if( x16r_8way_hash( hash, vdata, thr_id ) );
for ( int i = 0; i < 8; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{
@@ -576,8 +535,7 @@ void x16r_4way_prehash( void *vdata, void *pdata )
break;
case SKEIN:
mm256_bswap32_intrlv80_4x64( vdata, pdata );
skein512_4way_init( &x16r_ctx.skein );
skein512_4way_update( &x16r_ctx.skein, vdata, 64 );
skein512_4way_prehash64( &x16r_ctx.skein, vdata );
break;
case LUFFA:
mm128_bswap32_80( edata, pdata );
@@ -614,7 +572,7 @@ void x16r_4way_prehash( void *vdata, void *pdata )
}
}
void x16r_4way_hash_generic( void* output, const void* input )
int x16r_4way_hash_generic( void* output, const void* input, int thrid )
{
uint32_t vhash[20*4] __attribute__ ((aligned (128)));
uint32_t hash0[20] __attribute__ ((aligned (64)));
@@ -692,14 +650,12 @@ void x16r_4way_hash_generic( void* output, const void* input )
break;
case SKEIN:
if ( i == 0 )
skein512_4way_update( &ctx.skein, input + (64<<2), 16 );
skein512_4way_final16( &ctx.skein, vhash, input + (64*4) );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, size );
skein512_4way_full( &ctx.skein, vhash, vhash, size );
}
skein512_4way_close( &ctx.skein, vhash );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case LUFFA:
@@ -755,18 +711,10 @@ void x16r_4way_hash_generic( void* output, const void* input )
}
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in0, size );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in1, size );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in2, size );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in3, size );
sph_shavite512_close( &ctx.shavite, hash3 );
shavite512_full( &ctx.shavite, hash0, in0, size );
shavite512_full( &ctx.shavite, hash1, in1, size );
shavite512_full( &ctx.shavite, hash2, in2, size );
shavite512_full( &ctx.shavite, hash3, in3, size );
break;
case SIMD:
intrlv_2x128( vhash, in0, in1, size<<3 );
@@ -799,18 +747,10 @@ void x16r_4way_hash_generic( void* output, const void* input )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in0, size );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in1, size );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in2, size );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in3, size );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_full( &ctx.fugue, hash0, in0, size );
sph_fugue512_full( &ctx.fugue, hash1, in1, size );
sph_fugue512_full( &ctx.fugue, hash2, in2, size );
sph_fugue512_full( &ctx.fugue, hash3, in3, size );
break;
case SHABAL:
intrlv_4x32( vhash, in0, in1, in2, in3, size<<3 );
@@ -841,18 +781,10 @@ void x16r_4way_hash_generic( void* output, const void* input )
}
else
{
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in0, size );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in1, size );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in2, size );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in3, size );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, in0, size );
sph_whirlpool512_full( &ctx.whirlpool, hash1, in1, size );
sph_whirlpool512_full( &ctx.whirlpool, hash2, in2, size );
sph_whirlpool512_full( &ctx.whirlpool, hash3, in3, size );
}
break;
case SHA_512:
@@ -869,23 +801,31 @@ void x16r_4way_hash_generic( void* output, const void* input )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
}
if ( work_restart[thrid].restart ) return 0;
size = 64;
}
memcpy( output, hash0, 64 );
memcpy( output+64, hash1, 64 );
memcpy( output+128, hash2, 64 );
memcpy( output+192, hash3, 64 );
return 1;
}
void x16r_4way_hash( void* output, const void* input )
int x16r_4way_hash( void* output, const void* input, int thrid )
{
uint8_t hash[64*4] __attribute__ ((aligned (64)));
x16r_4way_hash_generic( hash, input );
if ( !x16r_4way_hash_generic( hash, input, thrid ) )
return 0;
memcpy( output, hash, 32 );
memcpy( output+32, hash+64, 32 );
memcpy( output+64, hash+128, 32 );
memcpy( output+96, hash+192, 32 );
return 1;
}
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
@@ -924,7 +864,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x16r_4way_hash( hash, vdata );
if ( x16r_4way_hash( hash, vdata, thr_id ) );
for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{

View File

@@ -121,7 +121,7 @@ union _x16r_8way_context_overlay
echo_4way_context echo;
#else
hashState_groestl groestl;
sph_shavite512_context shavite;
shavite512_context shavite;
hashState_echo echo;
#endif
} __attribute__ ((aligned (64)));
@@ -131,8 +131,8 @@ typedef union _x16r_8way_context_overlay x16r_8way_context_overlay;
extern __thread x16r_8way_context_overlay x16r_ctx;
void x16r_8way_prehash( void *, void * );
void x16r_8way_hash_generic( void *, const void * );
void x16r_8way_hash( void *, const void * );
int x16r_8way_hash_generic( void *, const void *, int );
int x16r_8way_hash( void *, const void *, int );
int scanhash_x16r_8way( struct work *, uint32_t ,
uint64_t *, struct thr_info * );
extern __thread x16r_8way_context_overlay x16r_ctx;
@@ -152,7 +152,7 @@ union _x16r_4way_context_overlay
luffa_2way_context luffa;
hashState_luffa luffa1;
cubehashParam cube;
sph_shavite512_context shavite;
shavite512_context shavite;
simd_2way_context simd;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
@@ -166,8 +166,8 @@ typedef union _x16r_4way_context_overlay x16r_4way_context_overlay;
extern __thread x16r_4way_context_overlay x16r_ctx;
void x16r_4way_prehash( void *, void * );
void x16r_4way_hash_generic( void *, const void * );
void x16r_4way_hash( void *, const void * );
int x16r_4way_hash_generic( void *, const void *, int );
int x16r_4way_hash( void *, const void *, int );
int scanhash_x16r_4way( struct work *, uint32_t,
uint64_t *, struct thr_info * );
extern __thread x16r_4way_context_overlay x16r_ctx;
@@ -191,7 +191,7 @@ union _x16r_context_overlay
sph_keccak512_context keccak;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
@@ -205,26 +205,26 @@ typedef union _x16r_context_overlay x16r_context_overlay;
extern __thread x16r_context_overlay x16_ctx;
void x16r_prehash( void *, void * );
void x16r_hash_generic( void *, const void * );
void x16r_hash( void *, const void * );
int x16r_hash_generic( void *, const void *, int );
int x16r_hash( void *, const void *, int );
int scanhash_x16r( struct work *, uint32_t, uint64_t *, struct thr_info * );
// x16Rv2
#if defined(X16RV2_8WAY)
void x16rv2_8way_hash( void *state, const void *input );
int x16rv2_8way_hash( void *state, const void *input, int thrid );
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(X16RV2_4WAY)
void x16rv2_4way_hash( void *state, const void *input );
int x16rv2_4way_hash( void *state, const void *input, int thrid );
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#else
void x16rv2_hash( void *state, const void *input );
int x16rv2_hash( void *state, const void *input, int thr_id );
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
@@ -254,21 +254,21 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
// x21s
#if defined(X16R_8WAY)
void x21s_8way_hash( void *state, const void *input );
int x21s_8way_hash( void *state, const void *input, int thrid );
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool x21s_8way_thread_init();
#elif defined(X16R_4WAY)
void x21s_4way_hash( void *state, const void *input );
int x21s_4way_hash( void *state, const void *input, int thrid );
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool x21s_4way_thread_init();
#else
void x21s_hash( void *state, const void *input );
int x21s_hash( void *state, const void *input, int thr_id );
int scanhash_x21s( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool x21s_thread_init();

View File

@@ -48,7 +48,7 @@ void x16r_prehash( void *edata, void *pdata )
}
}
void x16r_hash_generic( void* output, const void* input )
int x16r_hash_generic( void* output, const void* input, int thrid )
{
uint32_t _ALIGN(128) hash[16];
x16r_context_overlay ctx;
@@ -124,9 +124,7 @@ void x16r_hash_generic( void* output, const void* input )
(byte*)in, size );
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in, size );
sph_shavite512_close( &ctx.shavite, hash );
shavite512_full( &ctx.shavite, hash, in, size );
break;
case SIMD:
simd_full( &ctx.simd, (BitSequence *)hash,
@@ -134,7 +132,8 @@ void x16r_hash_generic( void* output, const void* input )
break;
case ECHO:
#if defined(__AES__)
echo_full( &ctx.echo, hash, 512, in, size );
echo_full( &ctx.echo, (BitSequence*)hash, 512,
(const BitSequence*)in, size );
#else
sph_echo512_init( &ctx.echo );
sph_echo512( &ctx.echo, in, size );
@@ -152,9 +151,7 @@ void x16r_hash_generic( void* output, const void* input )
sph_hamsi512_close( &ctx.hamsi, hash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in, size );
sph_fugue512_close( &ctx.fugue, hash );
sph_fugue512_full( &ctx.fugue, hash, in, size );
break;
case SHABAL:
if ( i == 0 )
@@ -168,13 +165,12 @@ void x16r_hash_generic( void* output, const void* input )
break;
case WHIRLPOOL:
if ( i == 0 )
sph_whirlpool( &ctx.whirlpool, in+64, 16 );
else
{
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in, size );
sph_whirlpool( &ctx.whirlpool, in+64, 16 );
sph_whirlpool_close( &ctx.whirlpool, hash );
}
sph_whirlpool_close( &ctx.whirlpool, hash );
else
sph_whirlpool512_full( &ctx.whirlpool, hash, in, size );
break;
case SHA_512:
SHA512_Init( &ctx.sha512 );
@@ -182,18 +178,24 @@ void x16r_hash_generic( void* output, const void* input )
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break;
}
if ( work_restart[thrid].restart ) return 0;
in = (void*) hash;
size = 64;
}
memcpy( output, hash, 64 );
return true;
}
void x16r_hash( void* output, const void* input )
int x16r_hash( void* output, const void* input, int thrid )
{
uint8_t hash[64] __attribute__ ((aligned (64)));
x16r_hash_generic( hash, input );
if ( !x16r_hash_generic( hash, input, thrid ) )
return 0;
memcpy( output, hash, 32 );
memcpy( output, hash, 32 );
return 1;
}
int scanhash_x16r( struct work *work, uint32_t max_nonce,
@@ -227,8 +229,7 @@ int scanhash_x16r( struct work *work, uint32_t max_nonce,
do
{
edata[19] = nonce;
x16r_hash( hash32, edata );
if ( x16r_hash( hash32, edata, thr_id ) )
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( nonce );
@@ -237,7 +238,7 @@ int scanhash_x16r( struct work *work, uint32_t max_nonce,
nonce++;
} while ( nonce < max_nonce && !(*restart) );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
*hashes_done = pdata[19] - first_nonce;
return 0;
}

View File

@@ -41,8 +41,7 @@ int scanhash_x16rt_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x16r_8way_hash( hash, vdata );
if ( x16r_8way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 8; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{
@@ -95,7 +94,7 @@ int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x16r_4way_hash( hash, vdata );
if ( x16r_4way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{

View File

@@ -36,8 +36,7 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
do
{
edata[19] = nonce;
x16r_hash( hash32, edata );
if ( x16r_hash( hash32, edata, thr_id ) )
if ( valid_hash( hash32, ptarget ) && !bench )
{
pdata[19] = bswap_32( nonce );
@@ -46,7 +45,7 @@ int scanhash_x16rt( struct work *work, uint32_t max_nonce,
nonce++;
} while ( nonce < max_nonce && !(*restart) );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
*hashes_done = pdata[19] - first_nonce;
return 0;
}

View File

@@ -35,9 +35,6 @@
#if defined (X16RV2_8WAY)
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
union _x16rv2_8way_context_overlay
{
blake512_8way_context blake;
@@ -60,7 +57,7 @@ union _x16rv2_8way_context_overlay
echo_4way_context echo;
#else
hashState_groestl groestl;
sph_shavite512_context shavite;
shavite512_context shavite;
hashState_echo echo;
#endif
} __attribute__ ((aligned (64)));
@@ -68,7 +65,7 @@ union _x16rv2_8way_context_overlay
typedef union _x16rv2_8way_context_overlay x16rv2_8way_context_overlay;
static __thread x16rv2_8way_context_overlay x16rv2_ctx;
void x16rv2_8way_hash( void* output, const void* input )
int x16rv2_8way_hash( void* output, const void* input, int thrid )
{
uint32_t vhash[24*8] __attribute__ ((aligned (128)));
uint32_t hash0[24] __attribute__ ((aligned (64)));
@@ -96,7 +93,7 @@ void x16rv2_8way_hash( void* output, const void* input )
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];
const char elem = x16r_hash_order[i];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
@@ -374,30 +371,14 @@ void x16rv2_8way_hash( void* output, const void* input )
shavite512_4way_full( &ctx.shavite, vhash, vhash, size );
dintrlv_4x128_512( hash4, hash5, hash6, hash7, vhash );
#else
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in0, size );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in1, size );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in2, size );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in3, size );
sph_shavite512_close( &ctx.shavite, hash3 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in4, size );
sph_shavite512_close( &ctx.shavite, hash4 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in5, size );
sph_shavite512_close( &ctx.shavite, hash5 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in6, size );
sph_shavite512_close( &ctx.shavite, hash6 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in7, size );
sph_shavite512_close( &ctx.shavite, hash7 );
shavite512_full( &ctx.shavite, hash0, in0, size );
shavite512_full( &ctx.shavite, hash1, in1, size );
shavite512_full( &ctx.shavite, hash2, in2, size );
shavite512_full( &ctx.shavite, hash3, in3, size );
shavite512_full( &ctx.shavite, hash4, in4, size );
shavite512_full( &ctx.shavite, hash5, in5, size );
shavite512_full( &ctx.shavite, hash6, in6, size );
shavite512_full( &ctx.shavite, hash7, in7, size );
#endif
break;
case SIMD:
@@ -451,30 +432,14 @@ void x16rv2_8way_hash( void* output, const void* input )
hash7, vhash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in0, size );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in1, size );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in2, size );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in3, size );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in4, size );
sph_fugue512_close( &ctx.fugue, hash4 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in5, size );
sph_fugue512_close( &ctx.fugue, hash5 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in6, size );
sph_fugue512_close( &ctx.fugue, hash6 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in7, size );
sph_fugue512_close( &ctx.fugue, hash7 );
sph_fugue512_full( &ctx.fugue, hash0, in0, size );
sph_fugue512_full( &ctx.fugue, hash1, in1, size );
sph_fugue512_full( &ctx.fugue, hash2, in2, size );
sph_fugue512_full( &ctx.fugue, hash3, in3, size );
sph_fugue512_full( &ctx.fugue, hash4, in4, size );
sph_fugue512_full( &ctx.fugue, hash5, in5, size );
sph_fugue512_full( &ctx.fugue, hash6, in6, size );
sph_fugue512_full( &ctx.fugue, hash7, in7, size );
break;
case SHABAL:
intrlv_8x32( vhash, in0, in1, in2, in3, in4, in5, in6, in7,
@@ -519,30 +484,14 @@ void x16rv2_8way_hash( void* output, const void* input )
}
else
{
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in0, size );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in1, size );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in2, size );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in3, size );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in4, size );
sph_whirlpool_close( &ctx.whirlpool, hash4 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in5, size );
sph_whirlpool_close( &ctx.whirlpool, hash5 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in6, size );
sph_whirlpool_close( &ctx.whirlpool, hash6 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in7, size );
sph_whirlpool_close( &ctx.whirlpool, hash7 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, in0, size );
sph_whirlpool512_full( &ctx.whirlpool, hash1, in1, size );
sph_whirlpool512_full( &ctx.whirlpool, hash2, in2, size );
sph_whirlpool512_full( &ctx.whirlpool, hash3, in3, size );
sph_whirlpool512_full( &ctx.whirlpool, hash4, in4, size );
sph_whirlpool512_full( &ctx.whirlpool, hash5, in5, size );
sph_whirlpool512_full( &ctx.whirlpool, hash6, in6, size );
sph_whirlpool512_full( &ctx.whirlpool, hash7, in7, size );
}
break;
case SHA_512:
@@ -614,6 +563,9 @@ void x16rv2_8way_hash( void* output, const void* input )
hash7, vhash );
break;
}
if ( work_restart[thrid].restart ) return 0;
size = 64;
}
@@ -625,6 +577,7 @@ void x16rv2_8way_hash( void* output, const void* input )
memcpy( output+160, hash5, 32 );
memcpy( output+192, hash6, 32 );
memcpy( output+224, hash7, 32 );
return 1;
}
int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
@@ -651,17 +604,19 @@ int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
bedata1[0] = bswap_32( pdata[1] );
bedata1[1] = bswap_32( pdata[2] );
static __thread uint32_t s_ntime = UINT32_MAX;
const uint32_t ntime = bswap_32( pdata[17] );
if ( s_ntime != ntime )
{
x16_r_s_getAlgoString( (const uint8_t*)bedata1, hashOrder );
x16_r_s_getAlgoString( (const uint8_t*)bedata1, x16r_hash_order );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_INFO, "hash order %s (%08x)", hashOrder, ntime );
applog( LOG_INFO, "hash order %s (%08x)", x16r_hash_order, ntime );
}
// Do midstate prehash on hash functions with block size <= 64 bytes.
const char elem = hashOrder[0];
const char elem = x16r_hash_order[0];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
{
@@ -718,8 +673,7 @@ int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x16rv2_8way_hash( hash, vdata );
if ( x16rv2_8way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 8; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{
@@ -737,9 +691,6 @@ int scanhash_x16rv2_8way( struct work *work, uint32_t max_nonce,
#elif defined (X16RV2_4WAY)
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
union _x16rv2_4way_context_overlay
{
blake512_4way_context blake;
@@ -751,7 +702,7 @@ union _x16rv2_4way_context_overlay
keccak512_4way_context keccak;
luffa_2way_context luffa;
cubehashParam cube;
sph_shavite512_context shavite;
shavite512_context shavite;
simd_2way_context simd;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
@@ -770,7 +721,7 @@ inline void padtiger512( uint32_t* hash )
for ( int i = 6; i < 16; i++ ) hash[i] = 0;
}
void x16rv2_4way_hash( void* output, const void* input )
int x16rv2_4way_hash( void* output, const void* input, int thrid )
{
uint32_t hash0[20] __attribute__ ((aligned (64)));
uint32_t hash1[20] __attribute__ ((aligned (64)));
@@ -789,7 +740,7 @@ void x16rv2_4way_hash( void* output, const void* input )
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];
const char elem = x16r_hash_order[i];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
@@ -835,47 +786,47 @@ void x16rv2_4way_hash( void* output, const void* input )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case KECCAK:
if ( i == 0 )
{
sph_tiger( &ctx.tiger, in0 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash0 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in1 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash1 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in2 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash2 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in3 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash3 );
}
else
{
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in0, size );
sph_tiger_close( &ctx.tiger, hash0 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in1, size );
sph_tiger_close( &ctx.tiger, hash1 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in2, size );
sph_tiger_close( &ctx.tiger, hash2 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in3, size );
sph_tiger_close( &ctx.tiger, hash3 );
}
for ( int i = (24/4); i < (64/4); i++ )
if ( i == 0 )
{
sph_tiger( &ctx.tiger, in0 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash0 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in1 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash1 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in2 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash2 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in3 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash3 );
}
else
{
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in0, size );
sph_tiger_close( &ctx.tiger, hash0 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in1, size );
sph_tiger_close( &ctx.tiger, hash1 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in2, size );
sph_tiger_close( &ctx.tiger, hash2 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in3, size );
sph_tiger_close( &ctx.tiger, hash3 );
}
for ( int i = (24/4); i < (64/4); i++ )
hash0[i] = hash1[i] = hash2[i] = hash3[i] = 0;
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case SKEIN:
if ( i == 0 )
skein512_4way_update( &ctx.skein, input + (64<<2), 16 );
skein512_4way_final16( &ctx.skein, vhash, input + (64*4) );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
@@ -886,46 +837,46 @@ void x16rv2_4way_hash( void* output, const void* input )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case LUFFA:
if ( i == 0 )
{
sph_tiger( &ctx.tiger, in0 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash0 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in1 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash1 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in2 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash2 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in3 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash3 );
}
else
{
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in0, size );
sph_tiger_close( &ctx.tiger, hash0 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in1, size );
sph_tiger_close( &ctx.tiger, hash1 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in2, size );
sph_tiger_close( &ctx.tiger, hash2 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in3, size );
sph_tiger_close( &ctx.tiger, hash3 );
}
for ( int i = (24/4); i < (64/4); i++ )
if ( i == 0 )
{
sph_tiger( &ctx.tiger, in0 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash0 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in1 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash1 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in2 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash2 );
memcpy( &ctx, &x16rv2_ctx, sizeof(ctx) );
sph_tiger( &ctx.tiger, in3 + 64, 16 );
sph_tiger_close( &ctx.tiger, hash3 );
}
else
{
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in0, size );
sph_tiger_close( &ctx.tiger, hash0 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in1, size );
sph_tiger_close( &ctx.tiger, hash1 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in2, size );
sph_tiger_close( &ctx.tiger, hash2 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in3, size );
sph_tiger_close( &ctx.tiger, hash3 );
}
for ( int i = (24/4); i < (64/4); i++ )
hash0[i] = hash1[i] = hash2[i] = hash3[i] = 0;
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
break;
case CUBEHASH:
if ( i == 0 )
@@ -959,18 +910,10 @@ void x16rv2_4way_hash( void* output, const void* input )
}
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in0, size );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in1, size );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in2, size );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in3, size );
sph_shavite512_close( &ctx.shavite, hash3 );
shavite512_full( &ctx.shavite, hash0, in0, size );
shavite512_full( &ctx.shavite, hash1, in1, size );
shavite512_full( &ctx.shavite, hash2, in2, size );
shavite512_full( &ctx.shavite, hash3, in3, size );
break;
case SIMD:
intrlv_2x128( vhash, in0, in1, size<<3 );
@@ -1003,18 +946,10 @@ void x16rv2_4way_hash( void* output, const void* input )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in0, size );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in1, size );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in2, size );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in3, size );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_full( &ctx.fugue, hash0, in0, size );
sph_fugue512_full( &ctx.fugue, hash1, in1, size );
sph_fugue512_full( &ctx.fugue, hash2, in2, size );
sph_fugue512_full( &ctx.fugue, hash3, in3, size );
break;
case SHABAL:
intrlv_4x32( vhash, in0, in1, in2, in3, size<<3 );
@@ -1045,18 +980,10 @@ void x16rv2_4way_hash( void* output, const void* input )
}
else
{
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in0, size );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in1, size );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in2, size );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in3, size );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, in0, size );
sph_whirlpool512_full( &ctx.whirlpool, hash1, in1, size );
sph_whirlpool512_full( &ctx.whirlpool, hash2, in2, size );
sph_whirlpool512_full( &ctx.whirlpool, hash3, in3, size );
}
break;
case SHA_512:
@@ -1099,12 +1026,16 @@ void x16rv2_4way_hash( void* output, const void* input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
}
if ( work_restart[thrid].restart ) return 0;
size = 64;
}
memcpy( output, hash0, 32 );
memcpy( output+32, hash1, 32 );
memcpy( output+64, hash2, 32 );
memcpy( output+96, hash3, 32 );
return 1;
}
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
@@ -1121,7 +1052,7 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
__m256i *noncev = (__m256i*)vdata + 9;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
const bool bench = opt_benchmark;
@@ -1130,17 +1061,19 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
bedata1[0] = bswap_32( pdata[1] );
bedata1[1] = bswap_32( pdata[2] );
static __thread uint32_t s_ntime = UINT32_MAX;
const uint32_t ntime = bswap_32(pdata[17]);
if ( s_ntime != ntime )
{
x16_r_s_getAlgoString( (const uint8_t*)bedata1, hashOrder );
x16_r_s_getAlgoString( (const uint8_t*)bedata1, x16r_hash_order );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_DEBUG, "hash order %s (%08x)", hashOrder, ntime );
applog( LOG_INFO, "hash order %s (%08x)", x16r_hash_order, ntime );
}
// Do midstate prehash on hash functions with block size <= 64 bytes.
const char elem = hashOrder[0];
const char elem = x16r_hash_order[0];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
{
@@ -1159,8 +1092,7 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
break;
case SKEIN:
mm256_bswap32_intrlv80_4x64( vdata, pdata );
skein512_4way_init( &x16rv2_ctx.skein );
skein512_4way_update( &x16rv2_ctx.skein, vdata, 64 );
skein512_4way_prehash64( &x16r_ctx.skein, vdata );
break;
case CUBEHASH:
mm128_bswap32_80( edata, pdata );
@@ -1194,7 +1126,7 @@ int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
do
{
x16rv2_4way_hash( hash, vdata );
if ( x16rv2_4way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{

View File

@@ -34,7 +34,6 @@
#endif
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
union _x16rv2_context_overlay
{
@@ -52,7 +51,7 @@ union _x16rv2_context_overlay
sph_keccak512_context keccak;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
@@ -68,22 +67,16 @@ inline void padtiger512(uint32_t* hash) {
for (int i = (24/4); i < (64/4); i++) hash[i] = 0;
}
void x16rv2_hash( void* output, const void* input )
int x16rv2_hash( void* output, const void* input, int thrid )
{
uint32_t _ALIGN(128) hash[16];
x16rv2_context_overlay ctx;
void *in = (void*) input;
int size = 80;
/*
if ( s_ntime == UINT32_MAX )
{
const uint8_t* in8 = (uint8_t*) input;
x16_r_s_getAlgoString( &in8[4], hashOrder );
}
*/
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];
const char elem = x16r_hash_order[i];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
@@ -143,9 +136,7 @@ void x16rv2_hash( void* output, const void* input )
(const byte*)in, size );
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in, size );
sph_shavite512_close( &ctx.shavite, hash );
shavite512_full( &ctx.shavite, hash, in, size );
break;
case SIMD:
init_sd( &ctx.simd, 512 );
@@ -169,9 +160,7 @@ void x16rv2_hash( void* output, const void* input )
sph_hamsi512_close( &ctx.hamsi, hash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in, size );
sph_fugue512_close( &ctx.fugue, hash );
sph_fugue512_full( &ctx.fugue, hash, in, size );
break;
case SHABAL:
sph_shabal512_init( &ctx.shabal );
@@ -179,9 +168,7 @@ void x16rv2_hash( void* output, const void* input )
sph_shabal512_close( &ctx.shabal, hash );
break;
case WHIRLPOOL:
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in, size );
sph_whirlpool_close( &ctx.whirlpool, hash );
sph_whirlpool512_full( &ctx.whirlpool, hash, in, size );
break;
case SHA_512:
sph_tiger_init( &ctx.tiger );
@@ -193,58 +180,61 @@ void x16rv2_hash( void* output, const void* input )
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break;
}
if ( work_restart[thrid].restart ) return 0;
in = (void*) hash;
size = 64;
}
memcpy(output, hash, 32);
return 1;
}
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash32[8];
uint32_t _ALIGN(128) endiandata[20];
uint32_t _ALIGN(128) edata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id;
uint32_t nonce = first_nonce;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
const bool bench = opt_benchmark;
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
static __thread uint32_t s_ntime = UINT32_MAX;
if ( s_ntime != pdata[17] )
{
uint32_t ntime = swab32(pdata[17]);
x16_r_s_getAlgoString( (const uint8_t*) (&endiandata[1]), hashOrder );
x16_r_s_getAlgoString( (const uint8_t*) (&edata[1]), x16r_hash_order );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_DEBUG, "hash order %s (%08x)", hashOrder, ntime );
applog( LOG_DEBUG, "hash order %s (%08x)",
x16r_hash_order, ntime );
}
if ( opt_benchmark )
ptarget[7] = 0x0cff;
if ( bench ) ptarget[7] = 0x0cff;
do
{
be32enc( &endiandata[19], nonce );
x16rv2_hash( hash32, endiandata );
if ( hash32[7] <= Htarg )
if (fulltest( hash32, ptarget ) && !opt_benchmark )
edata[19] = nonce;
if ( x16rv2_hash( hash32, edata, thr_id ) )
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{
pdata[19] = nonce;
pdata[19] = bswap_32( nonce );
submit_solution( work, hash32, mythr );
}
nonce++;
} while ( nonce < max_nonce && !(*restart) );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
*hashes_done = pdata[19] - first_nonce;
return 0;
}

View File

@@ -30,7 +30,7 @@ union _x21s_8way_context_overlay
typedef union _x21s_8way_context_overlay x21s_8way_context_overlay;
void x21s_8way_hash( void* output, const void* input )
int x21s_8way_hash( void* output, const void* input, int thrid )
{
uint32_t vhash[16*8] __attribute__ ((aligned (128)));
uint8_t shash[64*8] __attribute__ ((aligned (64)));
@@ -44,7 +44,8 @@ void x21s_8way_hash( void* output, const void* input )
uint32_t *hash7 = (uint32_t*)( shash+448 );
x21s_8way_context_overlay ctx;
x16r_8way_hash_generic( shash, input );
if ( !x16r_8way_hash_generic( shash, input, thrid ) )
return 0;
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7 );
@@ -124,6 +125,8 @@ void x21s_8way_hash( void* output, const void* input )
sha256_8way_init( &ctx.sha256 );
sha256_8way_update( &ctx.sha256, vhash, 64 );
sha256_8way_close( &ctx.sha256, output );
return 1;
}
int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
@@ -166,8 +169,7 @@ int scanhash_x21s_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x21s_8way_hash( hash, vdata );
if ( x21s_8way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
{
@@ -215,7 +217,7 @@ union _x21s_4way_context_overlay
typedef union _x21s_4way_context_overlay x21s_4way_context_overlay;
void x21s_4way_hash( void* output, const void* input )
int x21s_4way_hash( void* output, const void* input, int thrid )
{
uint32_t vhash[16*4] __attribute__ ((aligned (64)));
uint8_t shash[64*4] __attribute__ ((aligned (64)));
@@ -225,8 +227,9 @@ void x21s_4way_hash( void* output, const void* input )
uint32_t *hash2 = (uint32_t*)( shash+128 );
uint32_t *hash3 = (uint32_t*)( shash+192 );
x16r_4way_hash_generic( shash, input );
if ( !x16r_4way_hash_generic( shash, input, thrid ) )
return 0;
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
haval256_5_4way_init( &ctx.haval );
@@ -299,6 +302,8 @@ void x21s_4way_hash( void* output, const void* input )
dintrlv_4x32( output, output+32, output+64,output+96, vhash, 256 );
#endif
return 1;
}
int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
@@ -337,7 +342,7 @@ int scanhash_x21s_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x21s_4way_hash( hash, vdata );
if ( x21s_4way_hash( hash, vdata, thr_id ) )
for ( int i = 0; i < 4; i++ )
if ( unlikely( valid_hash( hash + (i<<3), ptarget ) && !bench ) )
{

View File

@@ -27,12 +27,13 @@ union _x21s_context_overlay
};
typedef union _x21s_context_overlay x21s_context_overlay;
void x21s_hash( void* output, const void* input )
int x21s_hash( void* output, const void* input, int thrid )
{
uint32_t _ALIGN(128) hash[16];
x21s_context_overlay ctx;
x16r_hash_generic( hash, input );
if ( !x16r_hash_generic( hash, input, thrid ) )
return 0;
sph_haval256_5_init( &ctx.haval );
sph_haval256_5( &ctx.haval, (const void*) hash, 64) ;
@@ -54,6 +55,8 @@ void x21s_hash( void* output, const void* input )
SHA256_Final( (unsigned char*)hash, &ctx.sha256 );
memcpy( output, hash, 32 );
return 1;
}
int scanhash_x21s( struct work *work, uint32_t max_nonce,
@@ -87,8 +90,7 @@ int scanhash_x21s( struct work *work, uint32_t max_nonce,
do
{
edata[19] = nonce;
x21s_hash( hash32, edata );
if ( x21s_hash( hash32, edata, thr_id ) )
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( nonce );
@@ -97,7 +99,7 @@ int scanhash_x21s( struct work *work, uint32_t max_nonce,
nonce++;
} while ( nonce < max_nonce && !(*restart) );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
*hashes_done = pdata[19] - first_nonce;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,14 +4,14 @@ bool register_sonoa_algo( algo_gate_t* gate )
{
#if defined (SONOA_8WAY)
gate->scanhash = (void*)&scanhash_sonoa_8way;
gate->hash = (void*)&sonoa_8way_hash;
// gate->hash = (void*)&sonoa_8way_hash;
#elif defined (SONOA_4WAY)
gate->scanhash = (void*)&scanhash_sonoa_4way;
gate->hash = (void*)&sonoa_4way_hash;
// gate->hash = (void*)&sonoa_4way_hash;
#else
init_sonoa_ctx();
gate->scanhash = (void*)&scanhash_sonoa;
gate->hash = (void*)&sonoa_hash;
// gate->hash = (void*)&sonoa_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;
return true;

View File

@@ -14,19 +14,19 @@ bool register_sonoa_algo( algo_gate_t* gate );
#if defined(SONOA_8WAY)
void sonoa_8way_hash( void *state, const void *input );
int sonoa_8way_hash( void *state, const void *input, int thrid );
int scanhash_sonoa_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(SONOA_4WAY)
void sonoa_4way_hash( void *state, const void *input );
int sonoa_4way_hash( void *state, const void *input, int thrid );
int scanhash_sonoa_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#else
void sonoa_hash( void *state, const void *input );
int sonoa_hash( void *state, const void *input, int thrid );
int scanhash_sonoa( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_sonoa_ctx();

View File

@@ -83,27 +83,27 @@ void init_sonoa_ctx()
sph_haval256_5_init(&sonoa_ctx.haval);
};
void sonoa_hash( void *state, const void *input )
int sonoa_hash( void *state, const void *input, int thrid )
{
uint8_t hash[128] __attribute__ ((aligned (64)));
sonoa_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &sonoa_ctx, sizeof(sonoa_ctx) );
sonoa_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &sonoa_ctx, sizeof(sonoa_ctx) );
sph_blake512(&ctx.blake, input, 80);
sph_blake512(&ctx.blake, input, 80);
sph_blake512_close(&ctx.blake, hash);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512(&ctx.skein, hash, 64);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512(&ctx.jh, hash, 64);
@@ -112,510 +112,489 @@ void sonoa_hash( void *state, const void *input )
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
update_final_sd( &ctx.simd, (BitSequence *)hash,
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#if defined(__AES__)
update_final_echo ( &ctx.echo, (BitSequence *)hash,
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
if ( work_restart[thrid].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
if ( work_restart[thrid].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*)hash,
(const byte*)hash, 64 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
if ( work_restart[thrid].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
if ( work_restart[thrid].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
sph_shabal512_init( &ctx.shabal );
sph_shabal512_init( &ctx.shabal );
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_shabal512_close(&ctx.shabal, hash);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_shabal512_init( &ctx.shabal );
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_shabal512_init( &ctx.shabal );
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
if ( work_restart[thrid].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_shabal512_init( &ctx.shabal );
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_shabal512_init( &ctx.shabal );
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
SHA512_Update( &ctx.sha512, hash, 64 );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
SHA512_Update( &ctx.sha512, hash, 64 );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
if ( work_restart[thrid].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512_init(&ctx.groestl );
sph_groestl512(&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_skein512_init( &ctx.skein);
sph_skein512(&ctx.skein, hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_jh512_init( &ctx.jh);
sph_jh512(&ctx.jh, hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
sph_keccak512_init( &ctx.keccak );
sph_keccak512(&ctx.keccak, hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
cubehashInit( &ctx.cubehash, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cubehash, (byte*) hash,
(const byte*)hash, 64 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
sph_shavite512_init( &ctx.shavite );
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
sph_echo512_init( &ctx.echo );
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_fugue512_init( &ctx.fugue );
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_shabal512_init( &ctx.shabal );
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_shabal512_init( &ctx.shabal );
sph_shabal512(&ctx.shabal, hash, 64);
sph_shabal512_close(&ctx.shabal, hash);
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
SHA512_Init( &ctx.sha512 );
SHA512_Update( &ctx.sha512, hash, 64 );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
SHA512_Init( &ctx.sha512 );
SHA512_Update( &ctx.sha512, hash, 64 );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
sph_haval256_5(&ctx.haval,(const void*) hash, 64);
sph_haval256_5_close(&ctx.haval, hash);
sph_haval256_5(&ctx.haval,(const void*) hash, 64);
sph_haval256_5_close(&ctx.haval, hash);
memcpy(state, hash, 32);
return 1;
}
int scanhash_sonoa( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t _ALIGN(128) hash32[8];
uint32_t _ALIGN(128) endiandata[20];
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash64[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t n = pdata[19] - 1;
int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
uint64_t htmax[] =
mm128_bswap32_80( edata, pdata );
do
{
0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000
};
uint32_t masks[] =
{
0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0
};
// we need bigendian data...
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];
do
edata[19] = n;
if ( sonoa_hash( hash64, edata, thr_id ) )
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
{
pdata[19] = ++n;
be32enc(&endiandata[19], n);
sonoa_hash(hash32, endiandata);
if ( !( hash32[7] & mask ) )
if ( fulltest( hash32, ptarget ) && !opt_benchmark )
submit_solution( work, hash32, mythr );
} while (n < max_nonce && !work_restart[thr_id].restart);
break;
}
*hashes_done = n - first_nonce + 1;
pdata[19] = bswap_32( n );
submit_solution( work, hash64, mythr );
}
n++;
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}

View File

@@ -74,9 +74,7 @@ void x17_8way_hash( void *state, const void *input )
blake512_8way_full( &ctx.blake, vhash, input, 80 );
bmw512_8way_init( &ctx.bmw );
bmw512_8way_update( &ctx.bmw, vhash, 64 );
bmw512_8way_close( &ctx.bmw, vhash );
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
#if defined(__VAES__)
@@ -106,9 +104,7 @@ void x17_8way_hash( void *state, const void *input )
#endif
skein512_8way_init( &ctx.skein );
skein512_8way_update( &ctx.skein, vhash, 64 );
skein512_8way_close( &ctx.skein, vhash );
skein512_8way_full( &ctx.skein, vhash, vhash, 64 );
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, vhash, 64 );
@@ -136,30 +132,14 @@ void x17_8way_hash( void *state, const void *input )
dintrlv_4x128_512( hash0, hash1, hash2, hash3, vhashA );
dintrlv_4x128_512( hash4, hash5, hash6, hash7, vhashB );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash0, 64 );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash1, 64 );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash2, 64 );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash3, 64 );
sph_shavite512_close( &ctx.shavite, hash3 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash4, 64 );
sph_shavite512_close( &ctx.shavite, hash4 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash5, 64 );
sph_shavite512_close( &ctx.shavite, hash5 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash6, 64 );
sph_shavite512_close( &ctx.shavite, hash6 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash7, 64 );
sph_shavite512_close( &ctx.shavite, hash7 );
shavite512_full( &ctx.shavite, hash0, hash0, 64 );
shavite512_full( &ctx.shavite, hash1, hash1, 64 );
shavite512_full( &ctx.shavite, hash2, hash2, 64 );
shavite512_full( &ctx.shavite, hash3, hash3, 64 );
shavite512_full( &ctx.shavite, hash4, hash4, 64 );
shavite512_full( &ctx.shavite, hash5, hash5, 64 );
shavite512_full( &ctx.shavite, hash6, hash6, 64 );
shavite512_full( &ctx.shavite, hash7, hash7, 64 );
intrlv_4x128_512( vhashA, hash0, hash1, hash2, hash3 );
intrlv_4x128_512( vhashB, hash4, hash5, hash6, hash7 );
@@ -210,30 +190,14 @@ void x17_8way_hash( void *state, const void *input )
dintrlv_8x64_512( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash4, 64 );
sph_fugue512_close( &ctx.fugue, hash4 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash5, 64 );
sph_fugue512_close( &ctx.fugue, hash5 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash6, 64 );
sph_fugue512_close( &ctx.fugue, hash6 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash7, 64 );
sph_fugue512_close( &ctx.fugue, hash7 );
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
sph_fugue512_full( &ctx.fugue, hash4, hash4, 64 );
sph_fugue512_full( &ctx.fugue, hash5, hash5, 64 );
sph_fugue512_full( &ctx.fugue, hash6, hash6, 64 );
sph_fugue512_full( &ctx.fugue, hash7, hash7, 64 );
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7 );
@@ -245,30 +209,14 @@ void x17_8way_hash( void *state, const void *input )
dintrlv_8x32_512( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash1, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash2, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash4, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash4 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash5, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash5 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash6, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash6 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash7, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash7 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, hash0, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash1, hash1, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash4, hash4, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash5, hash5, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash6, hash6, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash7, hash7, 64 );
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7 );
@@ -287,18 +235,18 @@ void x17_8way_hash( void *state, const void *input )
int scanhash_x17_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*16] __attribute__ ((aligned (128)));
uint32_t vdata[24*8] __attribute__ ((aligned (64)));
uint32_t hash32[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t *hash32_d7 = &(hash32[7*8]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
@@ -307,12 +255,12 @@ int scanhash_x17_8way( struct work *work, uint32_t max_nonce,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x17_8way_hash( hash, vdata );
x17_8way_hash( hash32, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hash7[ lane ] <= Htarg ) && !bench ) )
if ( unlikely( ( hash32_d7[ lane ] <= targ32_d7 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = bswap_32( n + lane );
@@ -378,9 +326,7 @@ void x17_4way_hash( void *state, const void *input )
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_full( &ctx.skein, vhash, vhash, 64 );
jh512_4way_init( &ctx.jh );
jh512_4way_update( &ctx.jh, vhash, 64 );
@@ -424,18 +370,10 @@ void x17_4way_hash( void *state, const void *input )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
@@ -445,18 +383,10 @@ void x17_4way_hash( void *state, const void *input )
dintrlv_4x32_512( hash0, hash1, hash2, hash3, vhash );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash1, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash2, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, hash0, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash1, hash1, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
@@ -474,18 +404,18 @@ void x17_4way_hash( void *state, const void *input )
int scanhash_x17_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[16*4] __attribute__ ((aligned (64)));
uint32_t hash32[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *hash32_d7 = &(hash32[ 7*4 ]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce -4;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
@@ -493,12 +423,12 @@ int scanhash_x17_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x17_4way_hash( hash, vdata );
x17_4way_hash( hash32, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash7[ lane ] <= Htarg && !bench ) )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 && !bench ) )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
extr_lane_4x32( lane_hash, hash32, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = bswap_32( n + lane );

View File

@@ -169,8 +169,8 @@ int scanhash_x17( struct work *work, uint32_t max_nonce,
submit_solution( work, hash64, mythr );
}
n++;
} while ( n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}

View File

@@ -76,9 +76,7 @@ void xevan_8way_hash( void *output, const void *input )
blake512_8way_full( &ctx.blake, vhash, input, 80 );
memset( &vhash[8<<3], 0, 64<<3 );
bmw512_8way_init( &ctx.bmw );
bmw512_8way_update( &ctx.bmw, vhash, dataLen );
bmw512_8way_close( &ctx.bmw, vhash );
bmw512_8way_full( &ctx.bmw, vhash, vhash, dataLen );
#if defined(__VAES__)
@@ -108,9 +106,7 @@ void xevan_8way_hash( void *output, const void *input )
#endif
skein512_8way_init( &ctx.skein );
skein512_8way_update( &ctx.skein, vhash, dataLen );
skein512_8way_close( &ctx.skein, vhash );
skein512_8way_full( &ctx.skein, vhash, vhash, dataLen );
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, vhash, dataLen );
@@ -138,30 +134,14 @@ void xevan_8way_hash( void *output, const void *input )
dintrlv_4x128( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhashB, dataLen<<3 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash0, dataLen );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash1, dataLen );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash2, dataLen );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash3, dataLen );
sph_shavite512_close( &ctx.shavite, hash3 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash4, dataLen );
sph_shavite512_close( &ctx.shavite, hash4 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash5, dataLen );
sph_shavite512_close( &ctx.shavite, hash5 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash6, dataLen );
sph_shavite512_close( &ctx.shavite, hash6 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash7, dataLen );
sph_shavite512_close( &ctx.shavite, hash7 );
shavite512_full( &ctx.shavite, hash0, hash0, dataLen );
shavite512_full( &ctx.shavite, hash1, hash1, dataLen );
shavite512_full( &ctx.shavite, hash2, hash2, dataLen );
shavite512_full( &ctx.shavite, hash3, hash3, dataLen );
shavite512_full( &ctx.shavite, hash4, hash4, dataLen );
shavite512_full( &ctx.shavite, hash5, hash5, dataLen );
shavite512_full( &ctx.shavite, hash6, hash6, dataLen );
shavite512_full( &ctx.shavite, hash7, hash7, dataLen );
intrlv_4x128( vhashA, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x128( vhashB, hash4, hash5, hash6, hash7, dataLen<<3 );
@@ -212,30 +192,14 @@ void xevan_8way_hash( void *output, const void *input )
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, dataLen<<3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, dataLen );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, dataLen );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, dataLen );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, dataLen );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash4, dataLen );
sph_fugue512_close( &ctx.fugue, hash4 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash5, dataLen );
sph_fugue512_close( &ctx.fugue, hash5 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash6, dataLen );
sph_fugue512_close( &ctx.fugue, hash6 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash7, dataLen );
sph_fugue512_close( &ctx.fugue, hash7 );
sph_fugue512_full( &ctx.fugue, hash0, hash0, dataLen );
sph_fugue512_full( &ctx.fugue, hash1, hash1, dataLen );
sph_fugue512_full( &ctx.fugue, hash2, hash2, dataLen );
sph_fugue512_full( &ctx.fugue, hash3, hash3, dataLen );
sph_fugue512_full( &ctx.fugue, hash4, hash4, dataLen );
sph_fugue512_full( &ctx.fugue, hash5, hash5, dataLen );
sph_fugue512_full( &ctx.fugue, hash6, hash6, dataLen );
sph_fugue512_full( &ctx.fugue, hash7, hash7, dataLen );
intrlv_8x32( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, dataLen<<3 );
@@ -247,30 +211,14 @@ void xevan_8way_hash( void *output, const void *input )
dintrlv_8x32( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, dataLen<<3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash1, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash2, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash4, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash4 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash5, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash5 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash6, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash6 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash7, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash7 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, hash0, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash1, hash1, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash4, hash4, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash5, hash5, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash6, hash6, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash7, hash7, dataLen );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, dataLen<<3 );
@@ -291,9 +239,7 @@ void xevan_8way_hash( void *output, const void *input )
blake512_8way_full( &ctx.blake, vhash, vhash, dataLen );
bmw512_8way_init( &ctx.bmw );
bmw512_8way_update( &ctx.bmw, vhash, dataLen );
bmw512_8way_close( &ctx.bmw, vhash );
bmw512_8way_full( &ctx.bmw, vhash, vhash, dataLen );
#if defined(__VAES__)
@@ -323,9 +269,7 @@ void xevan_8way_hash( void *output, const void *input )
#endif
skein512_8way_init( &ctx.skein );
skein512_8way_update( &ctx.skein, vhash, dataLen );
skein512_8way_close( &ctx.skein, vhash );
skein512_8way_full( &ctx.skein, vhash, vhash, dataLen );
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, vhash, dataLen );
@@ -353,30 +297,14 @@ void xevan_8way_hash( void *output, const void *input )
dintrlv_4x128( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhashB, dataLen<<3 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash0, dataLen );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash1, dataLen );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash2, dataLen );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash3, dataLen );
sph_shavite512_close( &ctx.shavite, hash3 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash4, dataLen );
sph_shavite512_close( &ctx.shavite, hash4 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash5, dataLen );
sph_shavite512_close( &ctx.shavite, hash5 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash6, dataLen );
sph_shavite512_close( &ctx.shavite, hash6 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, hash7, dataLen );
sph_shavite512_close( &ctx.shavite, hash7 );
shavite512_full( &ctx.shavite, hash0, hash0, dataLen );
shavite512_full( &ctx.shavite, hash1, hash1, dataLen );
shavite512_full( &ctx.shavite, hash2, hash2, dataLen );
shavite512_full( &ctx.shavite, hash3, hash3, dataLen );
shavite512_full( &ctx.shavite, hash4, hash4, dataLen );
shavite512_full( &ctx.shavite, hash5, hash5, dataLen );
shavite512_full( &ctx.shavite, hash6, hash6, dataLen );
shavite512_full( &ctx.shavite, hash7, hash7, dataLen );
intrlv_4x128( vhashA, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x128( vhashB, hash4, hash5, hash6, hash7, dataLen<<3 );
@@ -427,30 +355,14 @@ void xevan_8way_hash( void *output, const void *input )
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, dataLen<<3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, dataLen );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, dataLen );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, dataLen );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, dataLen );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash4, dataLen );
sph_fugue512_close( &ctx.fugue, hash4 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash5, dataLen );
sph_fugue512_close( &ctx.fugue, hash5 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash6, dataLen );
sph_fugue512_close( &ctx.fugue, hash6 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash7, dataLen );
sph_fugue512_close( &ctx.fugue, hash7 );
sph_fugue512_full( &ctx.fugue, hash0, hash0, dataLen );
sph_fugue512_full( &ctx.fugue, hash1, hash1, dataLen );
sph_fugue512_full( &ctx.fugue, hash2, hash2, dataLen );
sph_fugue512_full( &ctx.fugue, hash3, hash3, dataLen );
sph_fugue512_full( &ctx.fugue, hash4, hash4, dataLen );
sph_fugue512_full( &ctx.fugue, hash5, hash5, dataLen );
sph_fugue512_full( &ctx.fugue, hash6, hash6, dataLen );
sph_fugue512_full( &ctx.fugue, hash7, hash7, dataLen );
intrlv_8x32( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, dataLen<<3 );
@@ -462,30 +374,14 @@ void xevan_8way_hash( void *output, const void *input )
dintrlv_8x32( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, dataLen<<3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash1, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash2, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash4, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash4 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash5, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash5 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash6, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash6 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash7, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash7 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, hash0, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash1, hash1, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash4, hash4, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash5, hash5, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash6, hash6, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash7, hash7, dataLen );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, dataLen<<3 );
@@ -504,40 +400,43 @@ void xevan_8way_hash( void *output, const void *input )
int scanhash_xevan_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*16] __attribute__ ((aligned (128)));
uint32_t vdata[24*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t *hashd7 = &(hash[7*8]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
const uint32_t targ32 = ptarget[7];
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
xevan_8way_hash( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if unlikely( ( hash7[ lane ] <= Htarg ) )
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = n + lane;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
@@ -578,8 +477,6 @@ void xevan_4way_hash( void *output, const void *input )
const int dataLen = 128;
xevan_4way_context_overlay ctx __attribute__ ((aligned (64)));
// parallel 4 way
blake512_4way_full( &ctx.blake, vhash, input, 80 );
memset( &vhash[8<<2], 0, 64<<2 );
@@ -598,9 +495,7 @@ void xevan_4way_hash( void *output, const void *input )
// Parallel 4way
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, dataLen );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_full( &ctx.skein, vhash, vhash, dataLen );
jh512_4way_init( &ctx.jh );
jh512_4way_update( &ctx.jh, vhash, dataLen );
@@ -618,15 +513,11 @@ void xevan_4way_hash( void *output, const void *input )
cube_2way_full( &ctx.cube, vhashA, 512, vhashA, dataLen );
cube_2way_full( &ctx.cube, vhashB, 512, vhashB, dataLen );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, dataLen );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, dataLen );
shavite512_2way_full( &ctx.shavite, vhashA, vhashA, dataLen );
shavite512_2way_full( &ctx.shavite, vhashB, vhashB, dataLen );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashA, vhashA, dataLen<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
simd512_2way_full( &ctx.simd, vhashA, vhashA, dataLen );
simd512_2way_full( &ctx.simd, vhashB, vhashB, dataLen );
dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
@@ -649,18 +540,10 @@ void xevan_4way_hash( void *output, const void *input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, dataLen );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, dataLen );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, dataLen );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, dataLen );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
// Parallel 4way 32 bit
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
@@ -672,18 +555,10 @@ void xevan_4way_hash( void *output, const void *input )
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
// Serial
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash1, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash2, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, hash0, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash1, hash1, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, dataLen );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
@@ -718,9 +593,7 @@ void xevan_4way_hash( void *output, const void *input )
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, dataLen );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_full( &ctx.skein, vhash, vhash, dataLen );
jh512_4way_init( &ctx.jh );
jh512_4way_update( &ctx.jh, vhash, dataLen );
@@ -738,15 +611,11 @@ void xevan_4way_hash( void *output, const void *input )
cube_2way_full( &ctx.cube, vhashA, 512, vhashA, dataLen );
cube_2way_full( &ctx.cube, vhashB, 512, vhashB, dataLen );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, dataLen );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, dataLen );
shavite512_2way_full( &ctx.shavite, vhashA, vhashA, dataLen );
shavite512_2way_full( &ctx.shavite, vhashB, vhashB, dataLen );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashA, vhashA, dataLen<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
simd512_2way_full( &ctx.simd, vhashA, vhashA, dataLen );
simd512_2way_full( &ctx.simd, vhashB, vhashB, dataLen );
dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
@@ -768,18 +637,10 @@ void xevan_4way_hash( void *output, const void *input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, dataLen );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, dataLen );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, dataLen );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, dataLen );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
@@ -789,18 +650,10 @@ void xevan_4way_hash( void *output, const void *input )
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash1, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash2, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
sph_whirlpool512_full( &ctx.whirlpool, hash0, hash0, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash1, hash1, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, dataLen );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, dataLen );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
@@ -818,41 +671,43 @@ void xevan_4way_hash( void *output, const void *input )
int scanhash_xevan_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t hash[16*4] __attribute__ ((aligned (128)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[7<<2]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 9;
const uint32_t targ32 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
const bool bench = opt_benchmark;
if ( opt_benchmark )
ptarget[7] = 0x0cff;
if ( bench ) ptarget[7] = 0x0cff;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ), *noncev );
xevan_4way_hash( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( hash7[ lane ] <= Htarg )
if ( unlikely( hashd7[ lane ] <= targ32 ) && ! bench )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
pdata[19] = bswap_32( n + lane );
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( ( n < max_nonce-4 ) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -56,8 +56,6 @@ typedef struct {
} xevan_ctx_holder;
xevan_ctx_holder xevan_ctx __attribute__ ((aligned (64)));
static __thread sph_blake512_context xevan_blake_mid
__attribute__ ((aligned (64)));
void init_xevan_ctx()
{
@@ -85,34 +83,23 @@ void init_xevan_ctx()
#endif
};
void xevan_blake512_midstate( const void* input )
{
memcpy( &xevan_blake_mid, &xevan_ctx.blake, sizeof xevan_blake_mid );
sph_blake512( &xevan_blake_mid, input, 64 );
}
void xevan_hash(void *output, const void *input)
{
uint32_t _ALIGN(64) hash[32]; // 128 bytes required
uint32_t _ALIGN(64) hash[32]; // 128 bytes required
const int dataLen = 128;
xevan_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &xevan_ctx, sizeof(xevan_ctx) );
const int midlen = 64; // bytes
const int tail = 80 - midlen; // 16
memcpy( &ctx.blake, &xevan_blake_mid, sizeof xevan_blake_mid );
sph_blake512( &ctx.blake, input + midlen, tail );
sph_blake512_close(&ctx.blake, hash);
xevan_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &xevan_ctx, sizeof(xevan_ctx) );
sph_blake512( &ctx.blake, input, 80 );
sph_blake512_close( &ctx.blake, hash );
memset(&hash[16], 0, 64);
sph_bmw512(&ctx.bmw, hash, dataLen);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, dataLen*8 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, dataLen*8 );
#else
sph_groestl512(&ctx.groestl, hash, dataLen);
sph_groestl512_close(&ctx.groestl, hash);
@@ -127,20 +114,20 @@ void xevan_hash(void *output, const void *input)
sph_keccak512(&ctx.keccak, hash, dataLen);
sph_keccak512_close(&ctx.keccak, hash);
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, dataLen );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, dataLen );
cubehashUpdateDigest( &ctx.cubehash, (byte*)hash,
(const byte*) hash, dataLen );
cubehashUpdateDigest( &ctx.cubehash, (byte*)hash,
(const byte*) hash, dataLen );
sph_shavite512(&ctx.shavite, hash, dataLen);
sph_shavite512_close(&ctx.shavite, hash);
update_final_sd( &ctx.simd, (BitSequence *)hash,
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, dataLen*8 );
#if defined(__AES__)
update_final_echo( &ctx.echo, (BitSequence *) hash,
update_final_echo( &ctx.echo, (BitSequence *) hash,
(const BitSequence *) hash, dataLen*8 );
#else
sph_echo512(&ctx.echo, hash, dataLen);
@@ -159,15 +146,15 @@ void xevan_hash(void *output, const void *input)
sph_whirlpool(&ctx.whirlpool, hash, dataLen);
sph_whirlpool_close(&ctx.whirlpool, hash);
SHA512_Update( &ctx.sha512, hash, dataLen );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
SHA512_Update( &ctx.sha512, hash, dataLen );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
sph_haval256_5(&ctx.haval,(const void*) hash, dataLen);
sph_haval256_5_close(&ctx.haval, hash);
memset(&hash[8], 0, dataLen - 32);
memcpy( &ctx, &xevan_ctx, sizeof(xevan_ctx) );
memcpy( &ctx, &xevan_ctx, sizeof(xevan_ctx) );
sph_blake512(&ctx.blake, hash, dataLen);
sph_blake512_close(&ctx.blake, hash);
@@ -176,11 +163,11 @@ void xevan_hash(void *output, const void *input)
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const BitSequence*)hash, dataLen*8 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const BitSequence*)hash, dataLen*8 );
#else
sph_groestl512(&ctx.groestl, hash, dataLen);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512_close(&ctx.groestl, hash);
#endif
sph_skein512(&ctx.skein, hash, dataLen);
@@ -191,24 +178,25 @@ void xevan_hash(void *output, const void *input)
sph_keccak512(&ctx.keccak, hash, dataLen);
sph_keccak512_close(&ctx.keccak, hash);
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, dataLen );
cubehashUpdateDigest( &ctx.cubehash, (byte*)hash,
(const byte*) hash, dataLen );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, dataLen );
cubehashUpdateDigest( &ctx.cubehash, (byte*)hash,
(const byte*) hash, dataLen );
sph_shavite512(&ctx.shavite, hash, dataLen);
sph_shavite512_close(&ctx.shavite, hash);
update_final_sd( &ctx.simd, (BitSequence *)hash,
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, dataLen*8 );
#if defined(__AES__)
update_final_echo( &ctx.echo, (BitSequence *) hash,
update_final_echo( &ctx.echo, (BitSequence *) hash,
(const BitSequence *) hash, dataLen*8 );
#else
sph_echo512(&ctx.echo, hash, dataLen);
sph_echo512_close(&ctx.echo, hash);
sph_echo512(&ctx.echo, hash, dataLen);
sph_echo512_close(&ctx.echo, hash);
#endif
sph_hamsi512(&ctx.hamsi, hash, dataLen);
@@ -223,8 +211,8 @@ void xevan_hash(void *output, const void *input)
sph_whirlpool(&ctx.whirlpool, hash, dataLen);
sph_whirlpool_close(&ctx.whirlpool, hash);
SHA512_Update( &ctx.sha512, hash, dataLen );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
SHA512_Update( &ctx.sha512, hash, dataLen );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
sph_haval256_5(&ctx.haval,(const void*) hash, dataLen);
sph_haval256_5_close(&ctx.haval, hash);
@@ -233,41 +221,33 @@ void xevan_hash(void *output, const void *input)
}
int scanhash_xevan( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t _ALIGN(64) hash[8];
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash64[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
if (opt_benchmark)
ptarget[7] = 0x0cff;
mm128_bswap32_80( edata, pdata );
for (int k=0; k < 19; k++)
be32enc(&endiandata[k], pdata[k]);
xevan_blake512_midstate( endiandata );
do {
be32enc(&endiandata[19], nonce);
xevan_hash(hash, endiandata);
if (hash[7] <= Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );
}
nonce++;
} while ( nonce < max_nonce && !(*restart) );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
do
{
edata[19] = n;
xevan_hash( hash64, edata );
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash64, mythr );
}
n++;
} while ( n < max_nonce && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif

View File

@@ -62,7 +62,7 @@ union _x22i_8way_ctx_overlay
};
typedef union _x22i_8way_ctx_overlay x22i_8way_ctx_overlay;
void x22i_8way_hash( void *output, const void *input )
int x22i_8way_hash( void *output, const void *input, int thrid )
{
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
@@ -87,64 +87,40 @@ void x22i_8way_hash( void *output, const void *input )
unsigned char hashA7[64] __attribute__((aligned(32))) = {0};
x22i_8way_ctx_overlay ctx;
blake512_8way_init( &ctx.blake );
blake512_8way_update( &ctx.blake, input, 80 );
blake512_8way_close( &ctx.blake, vhash );
blake512_8way_full( &ctx.blake, vhash, input, 80 );
bmw512_8way_init( &ctx.bmw );
bmw512_8way_update( &ctx.bmw, vhash, 64 );
bmw512_8way_close( &ctx.bmw, vhash );
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
#if defined(__VAES__)
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
groestl512_4way_init( &ctx.groestl, 64 );
groestl512_4way_update_close( &ctx.groestl, vhashA, vhashA, 512 );
groestl512_4way_init( &ctx.groestl, 64 );
groestl512_4way_update_close( &ctx.groestl, vhashB, vhashB, 512 );
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
#else
dintrlv_8x64_512( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash );
dintrlv_8x64_512( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(const char*)hash0, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(const char*)hash1, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(const char*)hash2, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(const char*)hash3, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash4,
(const char*)hash4, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash5,
(const char*)hash5, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash6,
(const char*)hash6, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash7,
(const char*)hash7, 512 );
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7 );
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
#endif
skein512_8way_init( &ctx.skein );
skein512_8way_update( &ctx.skein, vhash, 64 );
skein512_8way_close( &ctx.skein, vhash );
skein512_8way_full( &ctx.skein, vhash, vhash, 64 );
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, vhash, 64 );
jh512_8way_close( &ctx.jh, vhash );
@@ -153,24 +129,20 @@ void x22i_8way_hash( void *output, const void *input )
keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhash );
if ( work_restart[thrid].restart ) return 0;
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
luffa_4way_init( &ctx.luffa, 512 );
luffa_4way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
luffa_4way_init( &ctx.luffa, 512 );
luffa_4way_update_close( &ctx.luffa, vhashB, vhashB, 64 );
luffa512_4way_full( &ctx.luffa, vhashA, vhashA, 64 );
luffa512_4way_full( &ctx.luffa, vhashB, vhashB, 64 );
cube_4way_init( &ctx.cube, 512, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashA, vhashA, 64 );
cube_4way_init( &ctx.cube, 512, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashB, vhashB, 64 );
cube_4way_full( &ctx.cube, vhashA, 512, vhashA, 64 );
cube_4way_full( &ctx.cube, vhashB, 512, vhashB, 64 );
#if defined(__VAES__)
shavite512_4way_init( &ctx.shavite );
shavite512_4way_update_close( &ctx.shavite, vhashA, vhashA, 64 );
shavite512_4way_init( &ctx.shavite );
shavite512_4way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
shavite512_4way_full( &ctx.shavite, vhashA, vhashA, 64 );
shavite512_4way_full( &ctx.shavite, vhashB, vhashB, 64 );
#else
@@ -207,17 +179,13 @@ void x22i_8way_hash( void *output, const void *input )
#endif
simd_4way_init( &ctx.simd, 512 );
simd_4way_update_close( &ctx.simd, vhashA, vhashA, 512 );
simd_4way_init( &ctx.simd, 512 );
simd_4way_update_close( &ctx.simd, vhashB, vhashB, 512 );
simd512_4way_full( &ctx.simd, vhashA, vhashA, 64 );
simd512_4way_full( &ctx.simd, vhashB, vhashB, 64 );
#if defined(__VAES__)
echo_4way_init( &ctx.echo, 512 );
echo_4way_update_close( &ctx.echo, vhashA, vhashA, 512 );
echo_4way_init( &ctx.echo, 512 );
echo_4way_update_close( &ctx.echo, vhashB, vhashB, 512 );
echo_4way_full( &ctx.echo, vhashA, 512, vhashA, 64 );
echo_4way_full( &ctx.echo, vhashB, 512, vhashB, 64 );
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
@@ -226,36 +194,30 @@ void x22i_8way_hash( void *output, const void *input )
dintrlv_4x128_512( hash0, hash1, hash2, hash3, vhashA );
dintrlv_4x128_512( hash4, hash5, hash6, hash7, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash0,
(const BitSequence*)hash0, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash1,
(const BitSequence*)hash1, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash2,
(const BitSequence*)hash2, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash3,
(const BitSequence*)hash3, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash4,
(const BitSequence*)hash4, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash5,
(const BitSequence*)hash5, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash6,
(const BitSequence*)hash6, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash7,
(const BitSequence*)hash7, 512 );
echo_full( &ctx.echo, (BitSequence *)hash0, 512,
(const BitSequence *)hash0, 64 );
echo_full( &ctx.echo, (BitSequence *)hash1, 512,
(const BitSequence *)hash1, 64 );
echo_full( &ctx.echo, (BitSequence *)hash2, 512,
(const BitSequence *)hash2, 64 );
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)hash3, 64 );
echo_full( &ctx.echo, (BitSequence *)hash4, 512,
(const BitSequence *)hash4, 64 );
echo_full( &ctx.echo, (BitSequence *)hash5, 512,
(const BitSequence *)hash5, 64 );
echo_full( &ctx.echo, (BitSequence *)hash6, 512,
(const BitSequence *)hash6, 64 );
echo_full( &ctx.echo, (BitSequence *)hash7, 512,
(const BitSequence *)hash7, 64 );
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
#endif
if ( work_restart[thrid].restart ) return 0;
hamsi512_8way_init( &ctx.hamsi );
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
hamsi512_8way_close( &ctx.hamsi, vhash );
@@ -388,6 +350,8 @@ void x22i_8way_hash( void *output, const void *input )
sph_tiger (&ctx.tiger, (const void*) hash7, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA7);
if ( work_restart[thrid].restart ) return 0;
memset( hash0, 0, 64 );
memset( hash1, 0, 64 );
memset( hash2, 0, 64 );
@@ -441,8 +405,58 @@ void x22i_8way_hash( void *output, const void *input )
sha256_8way_init( &ctx.sha256 );
sha256_8way_update( &ctx.sha256, vhash, 64 );
sha256_8way_close( &ctx.sha256, output );
return 1;
}
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[7*8]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32 = ptarget[7];
const bool bench = opt_benchmark;
if ( bench ) ptarget[7] = 0x08ff;
InitializeSWIFFTX();
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( x22i_8way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
/*
int scanhash_x22i_8way( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
@@ -488,6 +502,7 @@ int scanhash_x22i_8way( struct work* work, uint32_t max_nonce,
*hashes_done = n - first_nonce;
return 0;
}
*/
#elif defined(X22I_4WAY)
@@ -516,7 +531,7 @@ union _x22i_4way_ctx_overlay
};
typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay;
void x22i_4way_hash( void *output, const void *input )
int x22i_4way_hash( void *output, const void *input, int thrid )
{
uint64_t hash0[8*4] __attribute__ ((aligned (64)));
uint64_t hash1[8*4] __attribute__ ((aligned (64)));
@@ -531,33 +546,21 @@ void x22i_4way_hash( void *output, const void *input )
unsigned char hashA3[64] __attribute__((aligned(32))) = {0};
x22i_ctx_overlay ctx;
blake512_4way_init( &ctx.blake );
blake512_4way_update( &ctx.blake, input, 80 );
blake512_4way_close( &ctx.blake, vhash );
blake512_4way_full( &ctx.blake, vhash, input, 80 );
bmw512_4way_init( &ctx.bmw );
bmw512_4way_update( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(const char*)hash0, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(const char*)hash1, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(const char*)hash2, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(const char*)hash3, 512 );
groestl512_full( &ctx.groestl, (char*)hash0, (const char*)hash0, 512 );
groestl512_full( &ctx.groestl, (char*)hash1, (const char*)hash1, 512 );
groestl512_full( &ctx.groestl, (char*)hash2, (const char*)hash2, 512 );
groestl512_full( &ctx.groestl, (char*)hash3, (const char*)hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_full( &ctx.skein, vhash, vhash, 64 );
jh512_4way_init( &ctx.jh );
jh512_4way_update( &ctx.jh, vhash, 64 );
@@ -567,46 +570,38 @@ void x22i_4way_hash( void *output, const void *input )
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
if ( work_restart[thrid].restart ) return false;
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 );
luffa512_2way_full( &ctx.luffa, vhashA, vhashA, 64 );
luffa512_2way_full( &ctx.luffa, vhashB, vhashB, 64 );
cube_2way_init( &ctx.cube, 512, 16, 32 );
cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 );
cube_2way_init( &ctx.cube, 512, 16, 32 );
cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 );
cube_2way_full( &ctx.cube, vhashA, 512, vhashA, 64 );
cube_2way_full( &ctx.cube, vhashB, 512, vhashB, 64 );
shavite512_2way_full( &ctx.shavite, vhashA, vhashA, 64 );
shavite512_2way_full( &ctx.shavite, vhashB, vhashB, 64 );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
simd512_2way_full( &ctx.simd, vhashA, vhashA, 64 );
simd512_2way_full( &ctx.simd, vhashB, vhashB, 64 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash0,
(const BitSequence*)hash0, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash1,
(const BitSequence*)hash1, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash2,
(const BitSequence*)hash2, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash3,
(const BitSequence*)hash3, 512 );
echo_full( &ctx.echo, (BitSequence *)hash0, 512,
(const BitSequence *)hash0, 64 );
echo_full( &ctx.echo, (BitSequence *)hash1, 512,
(const BitSequence *)hash1, 64 );
echo_full( &ctx.echo, (BitSequence *)hash2, 512,
(const BitSequence *)hash2, 64 );
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)hash3, 64 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
if ( work_restart[thrid].restart ) return false;
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
@@ -652,6 +647,8 @@ void x22i_4way_hash( void *output, const void *input )
sha512_4way_close( &ctx.sha512, vhash );
dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash );
if ( work_restart[thrid].restart ) return false;
ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0);
ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1);
ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2);
@@ -684,6 +681,8 @@ void x22i_4way_hash( void *output, const void *input )
sph_tiger (&ctx.tiger, (const void*) hash3, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA3);
if ( work_restart[thrid].restart ) return false;
memset( hash0, 0, 64 );
memset( hash1, 0, 64 );
memset( hash2, 0, 64 );
@@ -716,50 +715,53 @@ void x22i_4way_hash( void *output, const void *input )
sha256_4way_init( &ctx.sha256 );
sha256_4way_update( &ctx.sha256, vhash, 64 );
sha256_4way_close( &ctx.sha256, output );
}
return 1;
}
int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[ 7*4 ]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
const uint32_t targ32 = ptarget[7];
const bool bench = opt_benchmark;
if ( bench ) ptarget[7] = 0x08ff;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
InitializeSWIFFTX();
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x22i_4way_hash( hash, vdata );
if ( x22i_4way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 4; lane++ )
if unlikely( ( hash7[ lane ] <= Htarg ) )
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n < max_nonce - 4 ) && !work_restart[thr_id].restart ) );
*hashes_done = n - first_nonce + 1;
} while ( likely( ( n <= last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -16,19 +16,19 @@ bool register_x22i_algo( algo_gate_t* gate );
#if defined(X22I_8WAY)
void x22i_8way_hash( void *state, const void *input );
int x22i_8way_hash( void *state, const void *input, int thrid );
int scanhash_x22i_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(X22I_4WAY)
void x22i_4way_hash( void *state, const void *input );
int x22i_4way_hash( void *state, const void *input, int thrid );
int scanhash_x22i_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#else
void x22i_hash( void *state, const void *input );
int x22i_hash( void *state, const void *input, int thrid );
int scanhash_x22i( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
@@ -44,19 +44,19 @@ bool register_x25i_algo( algo_gate_t* gate );
#if defined(X25X_8WAY)
void x25x_8way_hash( void *state, const void *input );
int x25x_8way_hash( void *state, const void *input, int thrid );
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(X25X_4WAY)
void x25x_4way_hash( void *state, const void *input );
int x25x_4way_hash( void *state, const void *input, int thrid );
int scanhash_x25x_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#else
void x25x_hash( void *state, const void *input );
int x25x_hash( void *state, const void *input, int thrif );
int scanhash_x25x( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );

View File

@@ -59,7 +59,7 @@ union _x22i_context_overlay
};
typedef union _x22i_context_overlay x22i_context_overlay;
void x22i_hash( void *output, const void *input )
int x22i_hash( void *output, const void *input, int thrid )
{
unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
unsigned char hash2[65] __attribute__((aligned(64))) = {0};
@@ -95,6 +95,8 @@ void x22i_hash( void *output, const void *input )
sph_keccak512(&ctx.keccak, (const void*) hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
if ( work_restart[thrid].restart ) return 0;
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
@@ -121,6 +123,8 @@ void x22i_hash( void *output, const void *input )
sph_echo512_close( &ctx.echo, hash );
#endif
if ( work_restart[thrid].restart ) return 0;
sph_hamsi512_init(&ctx.hamsi);
sph_hamsi512(&ctx.hamsi, (const void*) hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
@@ -143,6 +147,8 @@ void x22i_hash( void *output, const void *input )
ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2);
if ( work_restart[thrid].restart ) return 0;
memset(hash, 0, 64);
sph_haval256_5_init(&ctx.haval);
sph_haval256_5(&ctx.haval,(const void*) hash2, 64);
@@ -165,42 +171,42 @@ void x22i_hash( void *output, const void *input )
SHA256_Final( (unsigned char*) hash, &ctx.sha256 );
memcpy(output, hash, 32);
return 1;
}
int scanhash_x22i( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
int scanhash_x22i( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t n = first_nonce;
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash64[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = n;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
for (int k=0; k < 20; k++)
be32enc(&endiandata[k], pdata[k]);
if ( bench ) ptarget[7] = 0x08ff;
mm128_bswap32_80( edata, pdata );
InitializeSWIFFTX();
do
{
pdata[19] = ++n;
be32enc( &endiandata[19], n );
x22i_hash( hash, endiandata );
if ( hash[7] < Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
submit_solution( work, hash, mythr );
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = pdata[19] - first_nonce;
return 0;
edata[19] = n;
if ( x22i_hash( hash64, edata, thr_id ) );
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash64, mythr );
}
n++;
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
#endif

View File

@@ -94,7 +94,7 @@ union _x25x_8way_ctx_overlay
};
typedef union _x25x_8way_ctx_overlay x25x_8way_ctx_overlay;
void x25x_8way_hash( void *output, const void *input )
int x25x_8way_hash( void *output, const void *input, int thrid )
{
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
@@ -179,13 +179,15 @@ void x25x_8way_hash( void *output, const void *input )
jh512_8way_close( &ctx.jh, vhash );
dintrlv_8x64_512( hash0[4], hash1[4], hash2[4], hash3[4],
hash4[4], hash5[4], hash6[4], hash7[4], vhash );
keccak512_8way_init( &ctx.keccak );
keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhash );
dintrlv_8x64_512( hash0[5], hash1[5], hash2[5], hash3[5],
hash4[5], hash5[5], hash6[5], hash7[5], vhash );
if ( work_restart[thrid].restart ) return 0;
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
luffa_4way_init( &ctx.luffa, 512 );
@@ -261,6 +263,7 @@ void x25x_8way_hash( void *output, const void *input )
intrlv_8x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10],
hash4[10], hash5[10], hash6[10], hash7[10] );
#else
init_echo( &ctx.echo, 512 );
@@ -292,6 +295,8 @@ void x25x_8way_hash( void *output, const void *input )
#endif
if ( work_restart[thrid].restart ) return 0;
hamsi512_8way_init( &ctx.hamsi );
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
hamsi512_8way_close( &ctx.hamsi, vhash );
@@ -407,6 +412,8 @@ void x25x_8way_hash( void *output, const void *input )
sph_tiger (&ctx.tiger, (const void*) hash7[17], 64);
sph_tiger_close(&ctx.tiger, (void*) hash7[18]);
if ( work_restart[thrid].restart ) return 0;
intrlv_2x256( vhash, hash0[18], hash1[18], 256 );
LYRA2X_2WAY( vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash0[19], hash1[19], vhash, 256 );
@@ -468,6 +475,8 @@ void x25x_8way_hash( void *output, const void *input )
laneHash(512, (const BitSequence*)hash6[22], 512, (BitSequence*)hash6[23]);
laneHash(512, (const BitSequence*)hash7[22], 512, (BitSequence*)hash7[23]);
if ( work_restart[thrid].restart ) return 0;
x25x_shuffle( hash0 );
x25x_shuffle( hash1 );
x25x_shuffle( hash2 );
@@ -528,8 +537,59 @@ void x25x_8way_hash( void *output, const void *input )
blake2s_8way_init( &ctx.blake2s, 32 );
blake2s_8way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
return 1;
}
int scanhash_x25x_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[7*8]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32 = ptarget[7];
const bool bench = opt_benchmark;
if ( bench ) ptarget[7] = 0x08ff;
InitializeSWIFFTX();
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( x25x_8way_hash( hash, vdata, thr_id ) );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
/*
int scanhash_x25x_8way( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
@@ -574,6 +634,7 @@ int scanhash_x25x_8way( struct work* work, uint32_t max_nonce,
*hashes_done = n - first_nonce;
return 0;
}
*/
#elif defined(X25X_4WAY)
@@ -604,7 +665,7 @@ union _x25x_4way_ctx_overlay
};
typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay;
void x25x_4way_hash( void *output, const void *input )
int x25x_4way_hash( void *output, const void *input, int thrid )
{
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
@@ -614,9 +675,7 @@ void x25x_4way_hash( void *output, const void *input )
unsigned char vhashX[24][64*4] __attribute__ ((aligned (64)));
x25x_4way_ctx_overlay ctx __attribute__ ((aligned (64)));
blake512_4way_init( &ctx.blake );
blake512_4way_update( &ctx.blake, input, 80 );
blake512_4way_close( &ctx.blake, vhash );
blake512_4way_full( &ctx.blake, vhash, input, 80 );
dintrlv_4x64_512( hash0[0], hash1[0], hash2[0], hash3[0], vhash );
bmw512_4way_init( &ctx.bmw );
@@ -624,24 +683,13 @@ void x25x_4way_hash( void *output, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
dintrlv_4x64_512( hash0[1], hash1[1], hash2[1], hash3[1], vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0[2],
(const char*)hash0[1], 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash1[2],
(const char*)hash1[1], 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash2[2],
(const char*)hash2[1], 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3[2],
(const char*)hash3[1], 512 );
groestl512_full( &ctx.groestl, (char*)hash0[2], (const char*)hash0[1], 512 );
groestl512_full( &ctx.groestl, (char*)hash1[2], (const char*)hash1[1], 512 );
groestl512_full( &ctx.groestl, (char*)hash2[2], (const char*)hash2[1], 512 );
groestl512_full( &ctx.groestl, (char*)hash3[2], (const char*)hash3[1], 512 );
intrlv_4x64_512( vhash, hash0[2], hash1[2], hash2[2], hash3[2] );
skein512_4way_init( &ctx.skein );
skein512_4way_update( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
skein512_4way_full( &ctx.skein, vhash, vhash, 64 );
dintrlv_4x64_512( hash0[3], hash1[3], hash2[3], hash3[3], vhash );
jh512_4way_init( &ctx.jh );
@@ -649,37 +697,27 @@ void x25x_4way_hash( void *output, const void *input )
jh512_4way_close( &ctx.jh, vhash );
dintrlv_4x64_512( hash0[4], hash1[4], hash2[4], hash3[4], vhash );
if ( work_restart[thrid].restart ) return 0;
keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
dintrlv_4x64_512( hash0[5], hash1[5], hash2[5], hash3[5], vhash );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash0[6],
(const BitSequence*)hash0[5], 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash1[6],
(const BitSequence*)hash1[5], 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash2[6],
(const BitSequence*)hash2[5], 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash3[6],
(const BitSequence*)hash3[5], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash0[7],
(const byte*)hash0[6], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash1[7],
(const byte*)hash1[6], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash2[7],
(const byte*)hash2[6], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash3[7],
(const byte*)hash3[6], 64 );
luffa_full( &ctx.luffa, (BitSequence*)hash0[6], 512,
(const BitSequence*)hash0[5], 64 );
luffa_full( &ctx.luffa, (BitSequence*)hash1[6], 512,
(const BitSequence*)hash1[5], 64 );
luffa_full( &ctx.luffa, (BitSequence*)hash2[6], 512,
(const BitSequence*)hash2[5], 64 );
luffa_full( &ctx.luffa, (BitSequence*)hash3[6], 512,
(const BitSequence*)hash3[5], 64 );
cubehash_full( &ctx.cube, (byte*)hash0[7], 512, (const byte*)hash0[6], 64 );
cubehash_full( &ctx.cube, (byte*)hash1[7], 512, (const byte*)hash1[6], 64 );
cubehash_full( &ctx.cube, (byte*)hash2[7], 512, (const byte*)hash2[6], 64 );
cubehash_full( &ctx.cube, (byte*)hash3[7], 512, (const byte*)hash3[6], 64 );
sph_shavite512_init(&ctx.shavite);
sph_shavite512(&ctx.shavite, (const void*) hash0[7], 64);
sph_shavite512_close(&ctx.shavite, hash0[8]);
@@ -693,34 +731,28 @@ void x25x_4way_hash( void *output, const void *input )
sph_shavite512(&ctx.shavite, (const void*) hash3[7], 64);
sph_shavite512_close(&ctx.shavite, hash3[8]);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)hash0[9],
(const BitSequence*)hash0[8], 512 );
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)hash1[9],
(const BitSequence*)hash1[8], 512 );
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)hash2[9],
(const BitSequence*)hash2[8], 512 );
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)hash3[9],
(const BitSequence*)hash3[8], 512 );
simd_full( &ctx.simd, (BitSequence*)hash0[9],
(const BitSequence*)hash0[8], 512 );
simd_full( &ctx.simd, (BitSequence*)hash1[9],
(const BitSequence*)hash1[8], 512 );
simd_full( &ctx.simd, (BitSequence*)hash2[9],
(const BitSequence*)hash2[8], 512 );
simd_full( &ctx.simd, (BitSequence*)hash3[9],
(const BitSequence*)hash3[8], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash0[10],
(const BitSequence*)hash0[9], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash1[10],
(const BitSequence*)hash1[9], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash2[10],
(const BitSequence*)hash2[9], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash3[10],
(const BitSequence*)hash3[9], 512 );
echo_full( &ctx.echo, (BitSequence *)hash0[10], 512,
(const BitSequence *)hash0[ 9], 64 );
echo_full( &ctx.echo, (BitSequence *)hash1[10], 512,
(const BitSequence *)hash1[ 9], 64 );
echo_full( &ctx.echo, (BitSequence *)hash2[10], 512,
(const BitSequence *)hash2[ 9], 64 );
echo_full( &ctx.echo, (BitSequence *)hash3[10], 512,
(const BitSequence *)hash3[ 9], 64 );
intrlv_4x64_512( vhash, hash0[10], hash1[10], hash2[10], hash3[10] );
if ( work_restart[thrid].restart ) return 0;
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
@@ -802,6 +834,8 @@ void x25x_4way_hash( void *output, const void *input )
LYRA2RE( (void*)hash3[19], 32, (const void*)hash3[18], 32,
(const void*)hash3[18], 32, 1, 4, 4 );
if ( work_restart[thrid].restart ) return 0;
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) hash0[19], 64);
sph_gost512_close(&ctx.gost, (void*) hash0[20]);
@@ -833,6 +867,8 @@ void x25x_4way_hash( void *output, const void *input )
laneHash(512, (const BitSequence*)hash2[22], 512, (BitSequence*)hash2[23]);
laneHash(512, (const BitSequence*)hash3[22], 512, (BitSequence*)hash3[23]);
if ( work_restart[thrid].restart ) return 0;
x25x_shuffle( hash0 );
x25x_shuffle( hash1 );
x25x_shuffle( hash2 );
@@ -865,48 +901,52 @@ void x25x_4way_hash( void *output, const void *input )
blake2s_4way_init( &ctx.blake2s, 32 );
blake2s_4way_full_blocks( &ctx.blake2s, output, vhashX, 64*24 );
return 1;
}
int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[16*4] __attribute__ ((aligned (128)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[ 7*4 ]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
uint32_t n = first_nonce;
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
const uint32_t targ32 = ptarget[7];
const bool bench = opt_benchmark;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
if ( bench ) ptarget[7] = 0x08ff;
InitializeSWIFFTX();
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x25x_4way_hash( hash, vdata );
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
if ( x25x_4way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hashd7[ lane ] <= targ32 && !bench ) )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
} while ( likely( ( n <= last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -64,7 +64,7 @@ union _x25x_context_overlay
};
typedef union _x25x_context_overlay x25x_context_overlay;
void x25x_hash( void *output, const void *input )
int x25x_hash( void *output, const void *input, int thrid )
{
unsigned char hash[25][64] __attribute__((aligned(64))) = {0};
x25x_context_overlay ctx;
@@ -99,6 +99,8 @@ void x25x_hash( void *output, const void *input )
sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64);
sph_keccak512_close(&ctx.keccak, &hash[5]);
if ( work_restart[thrid].restart ) return 0;
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6],
(const BitSequence*)&hash[5], 64 );
@@ -125,7 +127,9 @@ void x25x_hash( void *output, const void *input )
sph_echo512_close( &ctx.echo, &hash[10] );
#endif
sph_hamsi512_init(&ctx.hamsi);
if ( work_restart[thrid].restart ) return 0;
sph_hamsi512_init(&ctx.hamsi);
sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64);
sph_hamsi512_close(&ctx.hamsi, &hash[11]);
@@ -151,6 +155,8 @@ void x25x_hash( void *output, const void *input )
sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64);
sph_haval256_5_close(&ctx.haval,&hash[17]);
if ( work_restart[thrid].restart ) return 0;
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) &hash[17], 64);
sph_tiger_close(&ctx.tiger, (void*) &hash[18]);
@@ -199,44 +205,42 @@ void x25x_hash( void *output, const void *input )
blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 );
memcpy(output, &hash[24], 32);
return 1;
}
int scanhash_x25x( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
int scanhash_x25x( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t n = first_nonce;
uint32_t hash64[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = n;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
if ( bench ) ptarget[7] = 0x08ff;
mm128_bswap32_80( edata, pdata );
for (int k=0; k < 20; k++)
be32enc(&edata[k], pdata[k]);
InitializeSWIFFTX();
do
{
pdata[19] = ++n;
be32enc( &edata[19], n );
x25x_hash( hash, edata );
if ( hash[7] < Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
submit_solution( work, hash, mythr );
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = pdata[19] - first_nonce;
return 0;
edata[19] = n;
if ( x25x_hash( hash64, edata, thr_id ) );
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash64, mythr );
}
n++;
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
#endif

View File

@@ -31,6 +31,7 @@
#undef HUGEPAGE_SIZE
#endif
/*
static __inline uint32_t
le32dec(const void *pp)
{
@@ -50,6 +51,7 @@ le32enc(void *pp, uint32_t x)
p[2] = (x >> 16) & 0xff;
p[3] = (x >> 24) & 0xff;
}
*/
static void *
alloc_region(yescrypt_region_t * region, size_t size)
@@ -154,7 +156,7 @@ int yescrypt_init_shared(yescrypt_shared_t * shared, const uint8_t * param, size
if (yescrypt_kdf(&dummy, shared1,
param, paramlen, NULL, 0, N, r, p, 0,
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_1,
salt, sizeof(salt)))
salt, sizeof(salt), 0 ) )
goto out;
half1 = half2 = *shared;
@@ -166,19 +168,19 @@ int yescrypt_init_shared(yescrypt_shared_t * shared, const uint8_t * param, size
if (p > 1 && yescrypt_kdf(&half1, &half2.shared1,
param, paramlen, salt, sizeof(salt), N, r, p, 0,
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_2,
salt, sizeof(salt)))
salt, sizeof(salt), 0 ))
goto out;
if (yescrypt_kdf(&half2, &half1.shared1,
param, paramlen, salt, sizeof(salt), N, r, p, 0,
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_1,
salt, sizeof(salt)))
salt, sizeof(salt), 0))
goto out;
if (yescrypt_kdf(&half1, &half2.shared1,
param, paramlen, salt, sizeof(salt), N, r, p, 0,
YESCRYPT_RW | YESCRYPT_PARALLEL_SMIX | __YESCRYPT_INIT_SHARED_1,
buf, buflen))
buf, buflen, 0))
goto out;
shared->mask1 = mask;

View File

@@ -1149,7 +1149,7 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
uint8_t * buf, size_t buflen, int thrid )
{
uint8_t _ALIGN(128) sha256[32];
yescrypt_region_t tmp;
@@ -1157,6 +1157,7 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
int retval = 1;
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
@@ -1312,6 +1313,12 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if ( work_restart[thrid].restart )
{
retval = 0;
goto out;
}
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
@@ -1339,9 +1346,21 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
}
}
if ( work_restart[thrid].restart )
{
retval = 0;
goto out;
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
if ( work_restart[thrid].restart )
{
retval = 0;
goto out;
}
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
@@ -1370,9 +1389,10 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
}
}
out:
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
return retval;
}

View File

@@ -106,7 +106,8 @@ static const uint8_t* decode64_uint32(uint32_t* dst, uint32_t dstbits, const uin
}
uint8_t* yescrypt_r(const yescrypt_shared_t* shared, yescrypt_local_t* local,
const uint8_t* passwd, size_t passwdlen, const uint8_t* setting, uint8_t* buf, size_t buflen)
const uint8_t* passwd, size_t passwdlen, const uint8_t* setting,
uint8_t* buf, size_t buflen, int thrid )
{
uint8_t hash[HASH_SIZE];
const uint8_t * src, * salt;
@@ -210,7 +211,9 @@ uint8_t* yescrypt_r(const yescrypt_shared_t* shared, yescrypt_local_t* local,
return NULL;
}
if (yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, N, r, p, 0, flags, hash, sizeof(hash))) {
if ( yescrypt_kdf( shared, local, passwd, passwdlen, salt, saltlen, N, r, p,
0, flags, hash, sizeof(hash), thrid ) == -1 )
{
printf("died10 ...");
fflush(stdout);
return NULL;
@@ -237,7 +240,7 @@ uint8_t* yescrypt_r(const yescrypt_shared_t* shared, yescrypt_local_t* local,
return buf;
}
uint8_t* yescrypt(const uint8_t* passwd, const uint8_t* setting)
uint8_t* yescrypt(const uint8_t* passwd, const uint8_t* setting, int thrid )
{
static uint8_t buf[4 + 1 + 5 + 5 + BYTES2CHARS(32) + 1 + HASH_LEN + 1];
yescrypt_shared_t shared;
@@ -252,7 +255,7 @@ uint8_t* yescrypt(const uint8_t* passwd, const uint8_t* setting)
return NULL;
}
retval = yescrypt_r(&shared, &local,
passwd, 80, setting, buf, sizeof(buf));
passwd, 80, setting, buf, sizeof(buf), thrid );
//printf("hashse='%s'\n", (char *)retval);
if (yescrypt_free_local(&local)) {
yescrypt_free_shared(&shared);
@@ -329,7 +332,7 @@ uint8_t* yescrypt_gensalt(uint32_t N_log2, uint32_t r, uint32_t p, yescrypt_flag
static int yescrypt_bsty(const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p,
uint8_t * buf, size_t buflen)
uint8_t * buf, size_t buflen, int thrid )
{
static __thread int initialized = 0;
static __thread yescrypt_shared_t shared;
@@ -349,7 +352,7 @@ static int yescrypt_bsty(const uint8_t * passwd, size_t passwdlen,
}
retval = yescrypt_kdf(&shared, &local,
passwd, passwdlen, salt, saltlen, N, r, p, 0, YESCRYPT_FLAGS,
buf, buflen);
buf, buflen, thrid );
#if 0
if (yescrypt_free_local(&local)) {
yescrypt_free_shared(&shared);
@@ -370,16 +373,16 @@ char *yescrypt_client_key = NULL;
int yescrypt_client_key_len = 0;
/* main hash 80 bytes input */
void yescrypt_hash( const char *input, char *output, uint32_t len )
int yescrypt_hash( const char *input, char *output, uint32_t len, int thrid )
{
yescrypt_bsty( (uint8_t*)input, len, (uint8_t*)input, len, YESCRYPT_N,
YESCRYPT_R, YESCRYPT_P, (uint8_t*)output, 32 );
return yescrypt_bsty( (uint8_t*)input, len, (uint8_t*)input, len, YESCRYPT_N,
YESCRYPT_R, YESCRYPT_P, (uint8_t*)output, 32, thrid );
}
/* for util.c test */
void yescrypthash(void *output, const void *input)
int yescrypthash(void *output, const void *input, int thrid)
{
yescrypt_hash((char*) input, (char*) output, 80);
return yescrypt_hash((char*) input, (char*) output, 80, thrid);
}
int scanhash_yescrypt( struct work *work, uint32_t max_nonce,
@@ -392,13 +395,13 @@ int scanhash_yescrypt( struct work *work, uint32_t max_nonce,
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce;
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
for ( int k = 0; k < 19; k++ )
be32enc( &endiandata[k], pdata[k] );
endiandata[19] = n;
do {
yescrypt_hash((char*) endiandata, (char*) vhash, 80);
if ( yescrypt_hash((char*) endiandata, (char*) vhash, 80, thr_id ) )
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
{
be32enc( pdata+19, n );

View File

@@ -38,12 +38,13 @@ extern "C" {
#include <stdint.h>
#include <stdlib.h> /* for size_t */
#include <stdbool.h>
#include "miner.h"
//#define __SSE4_1__
void yescrypt_hash(const char* input, char* output, uint32_t len);
int yescrypt_hash(const char* input, char* output, uint32_t len, int thrid );
void yescrypthash(void *output, const void *input);
int yescrypthash(void *output, const void *input, int thrid );
/**
* crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
@@ -301,7 +302,7 @@ extern int yescrypt_kdf(const yescrypt_shared_t * __shared,
const uint8_t * __salt, size_t __saltlen,
uint64_t __N, uint32_t __r, uint32_t __p, uint32_t __t,
yescrypt_flags_t __flags,
uint8_t * __buf, size_t __buflen);
uint8_t * __buf, size_t __buflen, int thrid);
/**
* yescrypt_r(shared, local, passwd, passwdlen, setting, buf, buflen):
@@ -321,7 +322,7 @@ extern uint8_t * yescrypt_r(const yescrypt_shared_t * __shared,
yescrypt_local_t * __local,
const uint8_t * __passwd, size_t __passwdlen,
const uint8_t * __setting,
uint8_t * __buf, size_t __buflen);
uint8_t * __buf, size_t __buflen, int thrid);
/**
* yescrypt(passwd, setting):
@@ -339,7 +340,7 @@ extern uint8_t * yescrypt_r(const yescrypt_shared_t * __shared,
*
* MT-unsafe.
*/
extern uint8_t * yescrypt(const uint8_t * __passwd, const uint8_t * __setting);
extern uint8_t * yescrypt(const uint8_t * __passwd, const uint8_t * __setting, int thrid );
/**
* yescrypt_gensalt_r(N_log2, r, p, flags, src, srclen, buf, buflen):

View File

@@ -79,7 +79,7 @@ int main(int argc, const char * const *argv)
for (i = 0; i < sizeof(src); i++)
src.u8[i] = i * 3;
if (yespower_tls(src.u8, sizeof(src), &params, &dst)) {
if (!yespower_tls(src.u8, sizeof(src), &params, &dst)) {
puts("FAILED");
return 1;
}

View File

@@ -51,9 +51,13 @@ int scanhash_yespower_r8g( struct work *work, uint32_t max_nonce,
be32enc( &endiandata[ i], pdata[ i ]);
endiandata[19] = n;
// do sha256 prehash
SHA256_Init( &sha256_prehash_ctx );
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
do {
yespower_tls( (unsigned char *)endiandata, params.perslen,
&params, (yespower_binary_t*)hash );
&params, (yespower_binary_t*)hash, thr_id );
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
{
@@ -73,6 +77,7 @@ bool register_yescryptr8g_algo( algo_gate_t* gate )
gate->optimizations = SSE2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_yespower_r8g;
gate->hash = (void*)&yespower_tls;
pk_buffer_size = 26;
opt_sapling = true;
opt_target_factor = 65536.0;
return true;

View File

@@ -0,0 +1,692 @@
/*-
* Copyright 2009 Colin Percival
* Copyright 2013-2018 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*
* This is a proof-of-work focused fork of yescrypt, including reference and
* cut-down implementation of the obsolete yescrypt 0.5 (based off its first
* submission to PHC back in 2014) and a new proof-of-work specific variation
* known as yespower 1.0. The former is intended as an upgrade for
* cryptocurrencies that already use yescrypt 0.5 and the latter may be used
* as a further upgrade (hard fork) by those and other cryptocurrencies. The
* version of algorithm to use is requested through parameters, allowing for
* both algorithms to co-exist in client and miner implementations (such as in
* preparation for a hard-fork).
*
* This is the reference implementation. Its purpose is to provide a simple
* human- and machine-readable specification that implementations intended
* for actual use should be tested against. It is deliberately mostly not
* optimized, and it is not meant to be used in production. Instead, use
* yespower-opt.c.
*/
/*
#warning "This reference implementation is deliberately mostly not optimized. Use yespower-opt.c instead unless you're testing (against) the reference implementation on purpose."
*/
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "algo/sha/hmac-sha256-hash-4way.h"
//#include "sysendian.h"
#include "yespower.h"
#if defined(__AVX2__)
static void blkcpy_8way( __m256i *dst, const __m256i *src, size_t count )
{
do {
*dst++ = *src++;
} while (--count);
}
static void blkxor_8way( __m256i *dst, const __m256i *src, size_t count )
{
do {
*dst++ ^= *src++;
} while (--count);
}
/**
* salsa20(B):
* Apply the Salsa20 core to the provided block.
*/
static void salsa20_8way( __m256i B[16], uint32_t rounds )
{
__m256i x[16];
size_t i;
/* SIMD unshuffle */
for ( i = 0; i < 16; i++ )
x[i * 5 % 16] = B[i];
for ( i = 0; i < rounds; i += 2 )
{
#define R( a, b, c ) mm256_rol_32( _mm256_add_epi32( a, b ), c )
/* Operate on columns */
x[ 4] = _mm256_xor_si256( x[ 4], R( x[ 0], x[12], 7 ) );
x[ 8] = _mm256_xor_si256( x[ 8], R( x[ 4], x[ 0], 9 ) );
x[12] = _mm256_xor_si256( x[12], R( x[ 8], x[ 4], 13 ) );
x[ 0] = _mm256_xor_si256( x[ 0], R( x[12], x[ 8], 18 ) );
x[ 9] = _mm256_xor_si256( x[ 9], R( x[ 5], x[ 1], 7 ) );
x[13] = _mm256_xor_si256( x[13], R( x[ 9], x[ 5], 9 ) );
x[ 1] = _mm256_xor_si256( x[ 1], R( x[13], x[ 9], 13 ) );
x[ 5] = _mm256_xor_si256( x[ 5], R( x[ 1], x[13], 18 ) );
x[14] = _mm256_xor_si256( x[14], R( x[10], x[ 6], 7 ) );
x[ 2] = _mm256_xor_si256( x[ 2], R( x[14], x[10], 9 ) );
x[ 6] = _mm256_xor_si256( x[ 6], R( x[ 2], x[14], 13 ) );
x[10] = _mm256_xor_si256( x[10], R( x[ 6], x[ 2], 18 ) );
x[ 3] = _mm256_xor_si256( x[ 3], R( x[15], x[11], 7 ) );
x[ 7] = _mm256_xor_si256( x[ 7], R( x[ 3], x[15], 9 ) );
x[11] = _mm256_xor_si256( x[11], R( x[ 7], x[ 3], 13 ) );
x[15] = _mm256_xor_si256( x[15], R( x[11], x[ 7], 18 ) );
/* Operate on rows */
x[ 1] = _mm256_xor_si256( x[ 1], R( x[ 0], x[ 3], 7 ) );
x[ 2] = _mm256_xor_si256( x[ 2], R( x[ 1], x[ 0], 9 ) );
x[ 3] = _mm256_xor_si256( x[ 3], R( x[ 2], x[ 1], 13 ) );
x[ 0] = _mm256_xor_si256( x[ 0], R( x[ 3], x[ 2], 18 ) );
x[ 6] = _mm256_xor_si256( x[ 6], R( x[ 5], x[ 4], 7 ) );
x[ 7] = _mm256_xor_si256( x[ 7], R( x[ 6], x[ 5], 9 ) );
x[ 4] = _mm256_xor_si256( x[ 4], R( x[ 7], x[ 6], 13 ) );
x[ 5] = _mm256_xor_si256( x[ 5], R( x[ 4], x[ 7], 18 ) );
x[11] = _mm256_xor_si256( x[11], R( x[10], x[ 9], 7 ) );
x[ 8] = _mm256_xor_si256( x[ 8], R( x[11], x[10], 9 ) );
x[ 9] = _mm256_xor_si256( x[ 9], R( x[ 8], x[11], 13 ) );
x[10] = _mm256_xor_si256( x[10], R( x[ 9], x[ 8], 18 ) );
x[12] = _mm256_xor_si256( x[12], R( x[15], x[14], 7 ) );
x[13] = _mm256_xor_si256( x[13], R( x[12], x[15], 9 ) );
x[14] = _mm256_xor_si256( x[14], R( x[13], x[12], 13 ) );
x[15] = _mm256_xor_si256( x[15], R( x[14], x[13], 18 ) );
#undef R
}
/* SIMD shuffle */
for (i = 0; i < 16; i++)
B[i] = _mm256_add_epi32( B[i], x[i * 5 % 16] );
}
/**
* blockmix_salsa(B):
* Compute B = BlockMix_{salsa20, 1}(B). The input B must be 128 bytes in
* length.
*/
static void blockmix_salsa_8way( __m256i *B, uint32_t rounds )
{
__m256i X[16];
size_t i;
/* 1: X <-- B_{2r - 1} */
blkcpy_8way( X, &B[16], 16 );
/* 2: for i = 0 to 2r - 1 do */
for ( i = 0; i < 2; i++ )
{
/* 3: X <-- H(X xor B_i) */
blkxor_8way( X, &B[i * 16], 16 );
salsa20_8way( X, rounds );
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
blkcpy_8way( &B[i * 16], X, 16 );
}
}
/*
* These are tunable, but they must meet certain constraints and are part of
* what defines a yespower version.
*/
#define PWXsimple 2
#define PWXgather 4
/* Version 0.5 */
#define PWXrounds_0_5 6
#define Swidth_0_5 8
/* Version 1.0 */
#define PWXrounds_1_0 3
#define Swidth_1_0 11
/* Derived values. Not tunable on their own. */
#define PWXbytes (PWXgather * PWXsimple * 8)
#define PWXwords (PWXbytes / sizeof(uint32_t))
#define rmin ((PWXbytes + 127) / 128)
/* Runtime derived values. Not tunable on their own. */
#define Swidth_to_Sbytes1(Swidth) ((1 << Swidth) * PWXsimple * 8)
#define Swidth_to_Smask(Swidth) (((1 << Swidth) - 1) * PWXsimple * 8)
typedef struct {
__m256i (*S0)[2], (*S1)[2], (*S2)[2];
__m256i *S;
yespower_version_t version;
uint32_t salsa20_rounds;
uint32_t PWXrounds, Swidth, Sbytes, Smask;
size_t w;
} pwxform_8way_ctx_t __attribute__ ((aligned (128)));
/**
* pwxform(B):
* Transform the provided block using the provided S-boxes.
*/
static void pwxform_8way( __m256i *B, pwxform_8way_ctx_t *ctx )
{
__m256i (*X)[PWXsimple][2] = (__m256i (*)[PWXsimple][2])B;
__m256i (*S0)[2] = ctx->S0, (*S1)[2] = ctx->S1, (*S2)[2] = ctx->S2;
__m256i Smask = _mm256_set1_epi32( ctx->Smask );
size_t w = ctx->w;
size_t i, j, k;
/* 1: for i = 0 to PWXrounds - 1 do */
for ( i = 0; i < ctx->PWXrounds; i++ )
{
/* 2: for j = 0 to PWXgather - 1 do */
for ( j = 0; j < PWXgather; j++ )
{
// Are these pointers or data?
__m256i xl = X[j][0][0];
__m256i xh = X[j][0][1];
__m256i (*p0)[2], (*p1)[2];
// 3: p0 <-- (lo(B_{j,0}) & Smask) / (PWXsimple * 8)
// playing with pointers
/*
p0 = S0 + (xl & Smask) / sizeof(*S0);
// 4: p1 <-- (hi(B_{j,0}) & Smask) / (PWXsimple * 8)
p1 = S1 + (xh & Smask) / sizeof(*S1);
*/
/* 5: for k = 0 to PWXsimple - 1 do */
for ( k = 0; k < PWXsimple; k++ )
{
// shift from 32 bit data to 64 bit data
__m256i x0, x1, s00, s01, s10, s11;
__m128i *p0k = (__m128i*)p0[k];
__m128i *p1k = (__m128i*)p1[k];
s00 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p0k[0] ),
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p0k[2] ), 32 ) );
s01 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p0k[1] ),
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p0k[3] ), 32 ) );
s10 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p1k[0] ),
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p1k[2] ), 32 ) );
s11 = _mm256_add_epi64( _mm256_cvtepu32_epi64( p1k[1] ),
_mm256_slli_epi64( _mm256_cvtepu32_epi64( p1k[3] ), 32 ) );
__m128i *xx = (__m128i*)X[j][k];
x0 = _mm256_mul_epu32( _mm256_cvtepu32_epi64( xx[0] ),
_mm256_cvtepu32_epi64( xx[2] ) );
x1 = _mm256_mul_epu32( _mm256_cvtepu32_epi64( xx[1] ),
_mm256_cvtepu32_epi64( xx[3] ) );
x0 = _mm256_add_epi64( x0, s00 );
x1 = _mm256_add_epi64( x1, s01 );
x0 = _mm256_xor_si256( x0, s10 );
x1 = _mm256_xor_si256( x1, s11 );
X[j][k][0] = x0;
X[j][k][1] = x1;
}
if ( ctx->version != YESPOWER_0_5 &&
( i == 0 || j < PWXgather / 2 ) )
{
if ( j & 1 )
{
for ( k = 0; k < PWXsimple; k++ )
{
S1[w][0] = X[j][k][0];
S1[w][1] = X[j][k][1];
w++;
}
}
else
{
for ( k = 0; k < PWXsimple; k++ )
{
S0[w + k][0] = X[j][k][0];
S0[w + k][1] = X[j][k][1];
}
}
}
}
}
if ( ctx->version != YESPOWER_0_5 )
{
/* 14: (S0, S1, S2) <-- (S2, S0, S1) */
ctx->S0 = S2;
ctx->S1 = S0;
ctx->S2 = S1;
/* 15: w <-- w mod 2^Swidth */
ctx->w = w & ( ( 1 << ctx->Swidth ) * PWXsimple - 1 );
}
}
/**
* blockmix_pwxform(B, ctx, r):
* Compute B = BlockMix_pwxform{salsa20, ctx, r}(B). The input B must be
* 128r bytes in length.
*/
static void blockmix_pwxform_8way( uint32_t *B, pwxform_8way_ctx_t *ctx,
size_t r )
{
__m256i X[PWXwords];
size_t r1, i;
/* Convert 128-byte blocks to PWXbytes blocks */
/* 1: r_1 <-- 128r / PWXbytes */
r1 = 128 * r / PWXbytes;
/* 2: X <-- B'_{r_1 - 1} */
blkcpy_8way( X, &B[ (r1 - 1) * PWXwords ], PWXwords );
/* 3: for i = 0 to r_1 - 1 do */
for ( i = 0; i < r1; i++ )
{
/* 4: if r_1 > 1 */
if ( r1 > 1 )
{
/* 5: X <-- X xor B'_i */
blkxor_8way( X, &B[ i * PWXwords ], PWXwords );
}
/* 7: X <-- pwxform(X) */
pwxform_8way( X, ctx );
/* 8: B'_i <-- X */
blkcpy_8way( &B[ i * PWXwords ], X, PWXwords );
}
/* 10: i <-- floor((r_1 - 1) * PWXbytes / 64) */
i = ( r1 - 1 ) * PWXbytes / 64;
/* 11: B_i <-- H(B_i) */
salsa20_8way( &B[i * 16], ctx->salsa20_rounds );
#if 1 /* No-op with our current pwxform settings, but do it to make sure */
/* 12: for i = i + 1 to 2r - 1 do */
for ( i++; i < 2 * r; i++ )
{
/* 13: B_i <-- H(B_i xor B_{i-1}) */
blkxor_8way( &B[i * 16], &B[ (i - 1) * 16 ], 16 );
salsa20_8way( &B[i * 16], ctx->salsa20_rounds );
}
#endif
}
// This looks a lot like data dependent addressing
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static __m256i integerify8( const __m256i *B, size_t r )
{
/*
* Our 32-bit words are in host byte order. Also, they are SIMD-shuffled, but
* we only care about the least significant 32 bits anyway.
*/
const __m256i *X = &B[ (2 * r - 1) * 16 ];
return X[0];
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint32_t p2floor8( uint32_t x )
{
uint32_t y;
while ( ( y = x & (x - 1) ) )
x = y;
return x;
}
/**
* wrap(x, i):
* Wrap x to the range 0 to i-1.
*/
static uint32_t wrap8( uint32_t x, uint32_t i )
{
uint32_t n = p2floor( i );
return ( x & (n - 1) ) + (i - n);
}
/**
* smix1(B, r, N, V, X, ctx):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage X must be 128r bytes in length.
*/
static void smix1_8way( __m256i *B, size_t r, uint32_t N,
__m256i *V, __m256i *X, pwxform_8way_ctx_t *ctx )
{
size_t s = 32 * r;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
for ( k = 0; k < 2 * r; k++ )
for ( i = 0; i < 16; i++ )
X[ k * 16 + i ] = B[ k * 16 + ( i * 5 % 16 ) ];
if ( ctx->version != YESPOWER_0_5 )
{
for ( k = 1; k < r; k++ )
{
blkcpy_8way( &X[k * 32], &X[ (k - 1) * 32 ], 32 );
blockmix_pwxform_8way( &X[k * 32], ctx, 1 );
}
}
/* 2: for i = 0 to N - 1 do */
for ( i = 0; i < N; i++ )
{
/* 3: V_i <-- X */
blkcpy_8way( &V[i * s], X, s );
if ( i > 1 )
{
// is j int or vector? Integrify has data dependent addressing?
/* j <-- Wrap(Integerify(X), i) */
// j = wrap8( integerify8( X, r ), i );
/* X <-- X xor V_j */
blkxor_8way( X, &V[j * s], s );
}
/* 4: X <-- H(X) */
if ( V != ctx->S )
blockmix_pwxform_8way( X, ctx, r );
else
blockmix_salsa_8way( X, ctx->salsa20_rounds );
}
/* B' <-- X */
for ( k = 0; k < 2 * r; k++ )
for ( i = 0; i < 16; i++ )
B[ k * 16 + ( i * 5 % 16 ) ] = X[ k * 16 + i ];
}
/**
* smix2(B, r, N, Nloop, V, X, ctx):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage X must be 128r bytes in length. The value N must be a power of 2
* greater than 1.
*/
static void smix2_8way( __m256i *B, size_t r, uint32_t N, uint32_t Nloop,
__m256i *V, __m256i *X, pwxform_8way_ctx_t *ctx )
{
size_t s = 32 * r;
uint32_t i, j;
size_t k;
/* X <-- B */
for ( k = 0; k < 2 * r; k++ )
for ( i = 0; i < 16; i++ )
X[ k * 16 + i ] = B[ k * 16 + ( i * 5 % 16 ) ];
/* 6: for i = 0 to N - 1 do */
for ( i = 0; i < Nloop; i++ )
{
/* 7: j <-- Integerify(X) mod N */
// j = integerify8(X, r) & (N - 1);
/* 8.1: X <-- X xor V_j */
blkxor_8way( X, &V[j * s], s );
/* V_j <-- X */
if ( Nloop != 2 )
blkcpy_8way( &V[j * s], X, s );
/* 8.2: X <-- H(X) */
blockmix_pwxform_8way( X, ctx, r );
}
/* 10: B' <-- X */
for ( k = 0; k < 2 * r; k++ )
for ( i = 0; i < 16; i++ )
B[ k * 16 + ( i * 5 % 16 ) ] = X[ k * 16 + i ];
}
/**
* smix(B, r, N, p, t, V, X, ctx):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage
* X must be 128r bytes in length. The value N must be a power of 2 and at
* least 16.
*/
static void smix_8way( __m256i *B, size_t r, uint32_t N,
__m256i *V, __m256i *X, pwxform_8way_ctx_t *ctx)
{
uint32_t Nloop_all = (N + 2) / 3; /* 1/3, round up */
uint32_t Nloop_rw = Nloop_all;
Nloop_all++; Nloop_all &= ~(uint32_t)1; /* round up to even */
if ( ctx->version == YESPOWER_0_5 )
Nloop_rw &= ~(uint32_t)1; /* round down to even */
else
Nloop_rw++; Nloop_rw &= ~(uint32_t)1; /* round up to even */
smix1_8way( B, 1, ctx->Sbytes / 128, ctx->S, X, ctx );
smix1_8way( B, r, N, V, X, ctx );
smix2_8way( B, r, N, Nloop_rw /* must be > 2 */, V, X, ctx );
smix2_8way( B, r, N, Nloop_all - Nloop_rw /* 0 or 2 */, V, X, ctx );
}
/**
* yespower(local, src, srclen, params, dst):
* Compute yespower(src[0 .. srclen - 1], N, r), to be checked for "< target".
*
* Return 0 on success; or -1 on error.
*/
int yespower_8way( yespower_local_t *local, const __m256i *src, size_t srclen,
const yespower_params_t *params, yespower_8way_binary_t *dst,
int thrid )
{
yespower_version_t version = params->version;
uint32_t N = params->N;
uint32_t r = params->r;
const uint8_t *pers = params->pers;
size_t perslen = params->perslen;
int retval = -1;
size_t B_size, V_size;
uint32_t *B, *V, *X, *S;
pwxform_8way_ctx_t ctx;
__m256i sha256[8];
/* Sanity-check parameters */
if ( (version != YESPOWER_0_5 && version != YESPOWER_1_0 ) ||
N < 1024 || N > 512 * 1024 || r < 8 || r > 32 ||
(N & (N - 1)) != 0 || r < rmin ||
(!pers && perslen) )
{
errno = EINVAL;
return -1;
}
/* Allocate memory */
B_size = (size_t)128 * r;
V_size = B_size * N;
if ((V = malloc(V_size)) == NULL)
return -1;
if ((B = malloc(B_size)) == NULL)
goto free_V;
if ((X = malloc(B_size)) == NULL)
goto free_B;
ctx.version = version;
if (version == YESPOWER_0_5) {
ctx.salsa20_rounds = 8;
ctx.PWXrounds = PWXrounds_0_5;
ctx.Swidth = Swidth_0_5;
ctx.Sbytes = 2 * Swidth_to_Sbytes1(ctx.Swidth);
} else {
ctx.salsa20_rounds = 2;
ctx.PWXrounds = PWXrounds_1_0;
ctx.Swidth = Swidth_1_0;
ctx.Sbytes = 3 * Swidth_to_Sbytes1(ctx.Swidth);
}
if ((S = malloc(ctx.Sbytes)) == NULL)
goto free_X;
ctx.S = S;
ctx.S0 = (__m256i (*)[2])S;
ctx.S1 = ctx.S0 + (1 << ctx.Swidth) * PWXsimple;
ctx.S2 = ctx.S1 + (1 << ctx.Swidth) * PWXsimple;
ctx.Smask = Swidth_to_Smask(ctx.Swidth);
ctx.w = 0;
// do prehash
sha256_8way_full( sha256, src, srclen );
// need flexible size, use malloc;
__m256i vpers[128];
if ( version != YESPOWER_0_5 && perslen )
for ( int i = 0; i < perslen/4 + 1; i++ )
vpers[i] = _mm256_set1_epi32( pers[i] );
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
pbkdf2_sha256_8way( B, B_size, sha256, sizeof(sha256), vpers, perslen, 1 );
blkcpy_8way( sha256, B, sizeof(sha256) / sizeof(sha256[0] ) );
/* 3: B_i <-- MF(B_i, N) */
smix_8way( B, r, N, V, X, &ctx );
if ( version == YESPOWER_0_5 )
{
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
pbkdf2_sha256_8way( dst, sizeof(*dst), sha256, sizeof(sha256),
B, B_size, 1 );
if ( pers )
{
hmac_sha256_8way_full( dst, sizeof(*dst), vpers, perslen, sha256 );
sha256_8way_full( dst, sha256, sizeof(sha256) );
}
}
else
hmac_sha256_8way_full( dst, B + B_size - 64, 64, sha256, sizeof(sha256) );
/* Success! */
retval = 1;
/* Free memory */
free(S);
free_X:
free(X);
free_B:
free(B);
free_V:
free(V);
return retval;
}
int yespower_8way_tls( const __m256i *src, size_t srclen,
const yespower_params_t *params, yespower_8way_binary_t *dst, int trhid )
{
/* The reference implementation doesn't use thread-local storage */
return yespower_8way( NULL, src, srclen, params, dst, trhid );
}
int yespower_init_local8( yespower_local_t *local )
{
/* The reference implementation doesn't use the local structure */
local->base = local->aligned = NULL;
local->base_size = local->aligned_size = 0;
return 0;
}
int yespower_free_local8( yespower_local_t *local )
{
/* The reference implementation frees its memory in yespower() */
(void)local; /* unused */
return 0;
}
int yespower_8way_hash( const char *input, char *output, uint32_t len,
int thrid )
{
return yespower_8way_tls( input, len, &yespower_params,
(yespower_binary_t*)output, thrid );
}
int scanhash_yespower_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash[8*8];
uint32_t _ALIGN(128) vdata[20*8];
uint32_t _ALIGN(128) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
for ( int k = 0; k < 19; k++ )
be32enc( &endiandata[k], pdata[k] );
endiandata[19] = n;
// do sha256 prehash
SHA256_Init( &sha256_prehash_ctx );
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
do {
if ( yespower_hash( vdata, hash, 80, thr_id ) )
if unlikely( valid_hash( hash, ptarget ) && !opt_benchmark )
{
be32enc( pdata+19, n );
submit_solution( work, hash, mythr );
}
endiandata[19] = ++n;
} while ( n < last_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
#endif // AVX2

View File

@@ -194,11 +194,13 @@ static int free_region(yespower_region_t *region)
#define restrict
#endif
/*
#ifdef __GNUC__
#define unlikely(exp) __builtin_expect(exp, 0)
#else
#define unlikely(exp) (exp)
#endif
*/
#ifdef __SSE__
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
@@ -1113,7 +1115,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
int yespower_b2b(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params,
yespower_binary_t *dst)
yespower_binary_t *dst, int thrid )
{
uint32_t N = params->N;
uint32_t r = params->r;
@@ -1168,17 +1170,25 @@ int yespower_b2b(yespower_local_t *local,
srclen = 0;
}
if ( work_restart[thrid].restart ) return false;
pbkdf2_blake2b_yp(init_hash, sizeof(init_hash), src, srclen, 1, B, 128);
if ( work_restart[thrid].restart ) return false;
memcpy(init_hash, B, sizeof(init_hash));
smix_1_0(B, r, N, V, XY, &ctx);
if ( work_restart[thrid].restart ) return false;
hmac_blake2b_yp_hash((uint8_t *)dst, B + B_size - 64, 64, init_hash, sizeof(init_hash));
/* Success! */
return 0;
return 1;
fail:
memset(dst, 0xff, sizeof(*dst));
return -1;
return 0;
}
/**
@@ -1189,7 +1199,7 @@ fail:
* Return 0 on success; or -1 on error.
*/
int yespower_b2b_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst)
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
{
static __thread int initialized = 0;
static __thread yespower_local_t local;
@@ -1199,7 +1209,7 @@ int yespower_b2b_tls(const uint8_t *src, size_t srclen,
initialized = 1;
}
return yespower_b2b(&local, src, srclen, params, dst);
return yespower_b2b(&local, src, srclen, params, dst, thrid);
}
/*
int yespower_init_local(yespower_local_t *local)

View File

@@ -30,13 +30,16 @@
#include "algo-gate-api.h"
static yespower_params_t yespower_params;
yespower_params_t yespower_params;
SHA256_CTX sha256_prehash_ctx;
// YESPOWER
void yespower_hash( const char *input, char *output, uint32_t len )
int yespower_hash( const char *input, char *output, uint32_t len, int thrid )
{
yespower_tls( input, len, &yespower_params, (yespower_binary_t*)output );
return yespower_tls( input, len, &yespower_params,
(yespower_binary_t*)output, thrid );
}
int scanhash_yespower( struct work *work, uint32_t max_nonce,
@@ -54,8 +57,13 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
for ( int k = 0; k < 19; k++ )
be32enc( &endiandata[k], pdata[k] );
endiandata[19] = n;
// do sha256 prehash
SHA256_Init( &sha256_prehash_ctx );
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
do {
yespower_hash( (char*)endiandata, (char*)vhash, 80 );
if ( yespower_hash( (char*)endiandata, (char*)vhash, 80, thr_id ) )
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
{
be32enc( pdata+19, n );
@@ -70,9 +78,9 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
// YESPOWER-B2B
void yespower_b2b_hash( const char *input, char *output, uint32_t len )
int yespower_b2b_hash( const char *input, char *output, uint32_t len, int thrid )
{
yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output );
return yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output, thrid );
}
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
@@ -85,13 +93,18 @@ int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
const uint32_t last_nonce = max_nonce;
const int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id;
for ( int k = 0; k < 19; k++ )
be32enc( &endiandata[k], pdata[k] );
endiandata[19] = n;
// do sha256 prehash
SHA256_Init( &sha256_prehash_ctx );
SHA256_Update( &sha256_prehash_ctx, endiandata, 64 );
do {
yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80 );
if (yespower_b2b_hash( (char*) endiandata, (char*) vhash, 80, thr_id ) )
if unlikely( valid_hash( vhash, ptarget ) && !opt_benchmark )
{
be32enc( pdata+19, n );
@@ -151,7 +164,7 @@ bool register_yespowerr16_algo( algo_gate_t* gate )
return true;
};
/* not used
/* not used, doesn't work
bool register_yescrypt_05_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | SHA_OPT;
@@ -165,6 +178,40 @@ bool register_yescrypt_05_algo( algo_gate_t* gate )
return true;
}
bool register_yescrypt_05_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_yespower;
yespower_params.version = YESPOWER_0_5;
if ( opt_param_n ) yespower_params.N = opt_param_n;
else yespower_params.N = 2048;
if ( opt_param_r ) yespower_params.r = opt_param_r;
else yespower_params.r = 8;
if ( opt_param_key )
{
yespower_params.pers = opt_param_key;
yespower_params.perslen = strlen( opt_param_key );
}
else
{
yespower_params.pers = NULL;
yespower_params.perslen = 0;
}
// YESCRYPT_P = 1;
applog( LOG_NOTICE,"Yescrypt parameters: N= %d, R= %d.",
yespower_params.N, yespower_params.r );
if ( yespower_params.pers )
applog( LOG_NOTICE,"Key= \"%s\"\n", yespower_params.pers );
return true;
}
bool register_yescryptr8_05_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | SHA_OPT;

View File

@@ -96,6 +96,8 @@
#include <stdlib.h>
#include <string.h>
#include "algo/sha/hmac-sha256-hash.h"
#include "algo/sha/hmac-sha256-hash-4way.h"
#include "yespower.h"
#include "yespower-platform.c"
@@ -107,11 +109,13 @@
#define restrict
#endif
/*
#ifdef __GNUC__
#define unlikely(exp) __builtin_expect(exp, 0)
#else
#define unlikely(exp) (exp)
#endif
*/
#ifdef __SSE__
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
@@ -1023,7 +1027,7 @@ static void smix(uint8_t *B, size_t r, uint32_t N,
int yespower(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params,
yespower_binary_t *dst)
yespower_binary_t *dst, int thrid )
{
yespower_version_t version = params->version;
uint32_t N = params->N;
@@ -1036,12 +1040,13 @@ int yespower(yespower_local_t *local,
salsa20_blk_t *V, *XY;
pwxform_ctx_t ctx;
uint8_t sha256[32];
SHA256_CTX sha256_ctx;
/* Sanity-check parameters */
if ((version != YESPOWER_0_5 && version != YESPOWER_1_0) ||
N < 1024 || N > 512 * 1024 || r < 8 || r > 32 ||
(N & (N - 1)) != 0 ||
(!pers && perslen)) {
if ( (version != YESPOWER_0_5 && version != YESPOWER_1_0)
|| N < 1024 || N > 512 * 1024 || r < 8 || r > 32
|| (N & (N - 1)) != 0 || ( !pers && perslen ) )
{
errno = EINVAL;
return -1;
}
@@ -1049,20 +1054,22 @@ int yespower(yespower_local_t *local,
/* Allocate memory */
B_size = (size_t)128 * r;
V_size = B_size * N;
if (version == YESPOWER_0_5) {
if ( version == YESPOWER_0_5 )
{
XY_size = B_size * 2;
Swidth = Swidth_0_5;
ctx.Sbytes = 2 * Swidth_to_Sbytes1(Swidth);
ctx.Sbytes = 2 * Swidth_to_Sbytes1( Swidth );
} else {
XY_size = B_size + 64;
Swidth = Swidth_1_0;
ctx.Sbytes = 3 * Swidth_to_Sbytes1(Swidth);
ctx.Sbytes = 3 * Swidth_to_Sbytes1( Swidth );
}
need = B_size + V_size + XY_size + ctx.Sbytes;
if (local->aligned_size < need) {
if (free_region(local))
if ( local->aligned_size < need )
{
if ( free_region( local ) )
return -1;
if (!alloc_region(local, need))
if ( !alloc_region( local, need ) )
return -1;
}
B = (uint8_t *)local->aligned;
@@ -1070,43 +1077,85 @@ int yespower(yespower_local_t *local,
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
S = (uint8_t *)XY + XY_size;
ctx.S0 = S;
ctx.S1 = S + Swidth_to_Sbytes1(Swidth);
ctx.S1 = S + Swidth_to_Sbytes1( Swidth );
SHA256_Buf(src, srclen, sha256);
if (version == YESPOWER_0_5) {
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1,
B, B_size);
memcpy(sha256, B, sizeof(sha256));
smix(B, r, N, V, XY, &ctx);
PBKDF2_SHA256(sha256, sizeof(sha256), B, B_size, 1,
(uint8_t *)dst, sizeof(*dst));
// copy prehash, do tail
memcpy( &sha256_ctx, &sha256_prehash_ctx, sizeof sha256_ctx );
SHA256_Update( &sha256_ctx, src+64, srclen-64 );
SHA256_Final( sha256, &sha256_ctx );
if (pers) {
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
sha256);
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
// SHA256_Buf(src, srclen, sha256);
if ( version == YESPOWER_0_5 )
{
PBKDF2_SHA256( sha256, sizeof(sha256), src, srclen, 1, B, B_size );
if ( work_restart[thrid].restart ) return 0;
memcpy( sha256, B, sizeof(sha256) );
smix( B, r, N, V, XY, &ctx );
if ( work_restart[thrid].restart ) return 0;
PBKDF2_SHA256( sha256, sizeof(sha256), B, B_size, 1, (uint8_t *)dst,
sizeof(*dst) );
if ( work_restart[thrid].restart ) return 0;
if ( pers )
{
src = pers;
srclen = perslen;
}
else
srclen = 0;
HMAC_SHA256_CTX ctx;
HMAC_SHA256_Init( &ctx, dst, sizeof(*dst) );
HMAC_SHA256_Update( &ctx, src, srclen );
HMAC_SHA256_Final( sha256, &ctx );
// SHA256_CTX ctx;
SHA256_Init( &sha256_ctx );
SHA256_Update( &sha256_ctx, sha256, sizeof(sha256) );
SHA256_Final( (unsigned char*)dst, &sha256_ctx );
/*
if ( pers )
{
HMAC_SHA256_Buf( dst, sizeof(*dst), pers, perslen, sha256 );
SHA256_Buf( sha256, sizeof(sha256), (uint8_t *)dst );
}
} else {
ctx.S2 = S + 2 * Swidth_to_Sbytes1(Swidth);
*/
}
else
{
ctx.S2 = S + 2 * Swidth_to_Sbytes1( Swidth );
ctx.w = 0;
if (pers) {
if ( pers )
{
src = pers;
srclen = perslen;
} else {
srclen = 0;
}
else
srclen = 0;
PBKDF2_SHA256(sha256, sizeof(sha256), src, srclen, 1, B, 128);
memcpy(sha256, B, sizeof(sha256));
smix_1_0(B, r, N, V, XY, &ctx);
HMAC_SHA256_Buf(B + B_size - 64, 64,
sha256, sizeof(sha256), (uint8_t *)dst);
PBKDF2_SHA256( sha256, sizeof(sha256), src, srclen, 1, B, 128 );
memcpy( sha256, B, sizeof(sha256) );
if ( work_restart[thrid].restart ) return 0;
smix_1_0( B, r, N, V, XY, &ctx );
HMAC_SHA256_Buf( B + B_size - 64, 64, sha256, sizeof(sha256),
(uint8_t *)dst );
}
/* Success! */
return 0;
return 1;
}
/**
@@ -1117,7 +1166,7 @@ int yespower(yespower_local_t *local,
* Return 0 on success; or -1 on error.
*/
int yespower_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst)
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
{
static __thread int initialized = 0;
static __thread yespower_local_t local;
@@ -1128,7 +1177,7 @@ int yespower_tls(const uint8_t *src, size_t srclen,
initialized = 1;
}
return yespower(&local, src, srclen, params, dst);
return yespower( &local, src, srclen, params, dst, thrid );
}
int yespower_init_local(yespower_local_t *local)

View File

@@ -453,9 +453,8 @@ static void smix(uint32_t *B, size_t r, uint32_t N,
*
* Return 0 on success; or -1 on error.
*/
int yespower(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst)
int yespower( yespower_local_t *local, const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
{
yespower_version_t version = params->version;
uint32_t N = params->N;
@@ -534,17 +533,16 @@ int yespower(yespower_local_t *local,
if (pers) {
HMAC_SHA256_Buf(dst, sizeof(*dst), pers, perslen,
return true;
(uint8_t *)sha256);
SHA256_Buf(sha256, sizeof(sha256), (uint8_t *)dst);
}
} else {
HMAC_SHA256_Buf_P((uint8_t *)B + B_size - 64, 64,
HMAC_SHA256_Buf((uint8_t *)B + B_size - 64, 64,
sha256, sizeof(sha256), (uint8_t *)dst);
}
/* Success! */
retval = 0;
retval = 1;
/* Free memory */
free(S);
@@ -559,10 +557,10 @@ free_V:
}
int yespower_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst)
const yespower_params_t *params, yespower_binary_t *dst, int thrid )
{
/* The reference implementation doesn't use thread-local storage */
return yespower(NULL, src, srclen, params, dst);
return yespower(NULL, src, srclen, params, dst, thrid );
}
int yespower_init_local(yespower_local_t *local)

View File

@@ -32,6 +32,9 @@
#include <stdint.h>
#include <stdlib.h> /* for size_t */
#include "miner.h"
#include "simd-utils.h"
#include <openssl/sha.h>
#ifdef __cplusplus
extern "C" {
@@ -73,6 +76,10 @@ typedef struct {
unsigned char uc[32];
} yespower_binary_t __attribute__ ((aligned (64)));
yespower_params_t yespower_params;
SHA256_CTX sha256_prehash_ctx;
/**
* yespower_init_local(local):
* Initialize the thread-local (RAM) data structure. Actual memory allocation
@@ -109,11 +116,11 @@ extern int yespower_free_local(yespower_local_t *local);
*/
extern int yespower(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
const yespower_params_t *params, yespower_binary_t *dst, int thrid);
extern int yespower_b2b(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
const yespower_params_t *params, yespower_binary_t *dst, int thrid );
/**
* yespower_tls(src, srclen, params, dst):
@@ -125,10 +132,28 @@ extern int yespower_b2b(yespower_local_t *local,
* MT-safe as long as dst is local to the thread.
*/
extern int yespower_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
const yespower_params_t *params, yespower_binary_t *dst, int thr_id);
#if defined(__AVX2__)
typedef struct
{
__m256i uc[8];
} yespower_8way_binary_t __attribute__ ((aligned (128)));
extern int yespower_8way( yespower_local_t *local, const __m256i *src,
size_t srclen, const yespower_params_t *params,
yespower_8way_binary_t *dst, int thrid );
extern int yespower_8way_tls( const __m256i *src, size_t srclen,
const yespower_params_t *params, yespower_8way_binary_t *dst, int thr_id );
#endif // AVX2
#ifdef __cplusplus
}

20
configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.11.9.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.12.7.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='cpuminer-opt'
PACKAGE_TARNAME='cpuminer-opt'
PACKAGE_VERSION='3.11.9'
PACKAGE_STRING='cpuminer-opt 3.11.9'
PACKAGE_VERSION='3.12.7'
PACKAGE_STRING='cpuminer-opt 3.12.7'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures cpuminer-opt 3.11.9 to adapt to many kinds of systems.
\`configure' configures cpuminer-opt 3.12.7 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1404,7 +1404,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of cpuminer-opt 3.11.9:";;
short | recursive ) echo "Configuration of cpuminer-opt 3.12.7:";;
esac
cat <<\_ACEOF
@@ -1509,7 +1509,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
cpuminer-opt configure 3.11.9
cpuminer-opt configure 3.12.7
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by cpuminer-opt $as_me 3.11.9, which was
It was created by cpuminer-opt $as_me 3.12.7, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2993,7 +2993,7 @@ fi
# Define the identity of the package.
PACKAGE='cpuminer-opt'
VERSION='3.11.9'
VERSION='3.12.7'
cat >>confdefs.h <<_ACEOF
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by cpuminer-opt $as_me 3.11.9, which was
This file was extended by cpuminer-opt $as_me 3.12.7, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -6756,7 +6756,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
cpuminer-opt config.status 3.11.9
cpuminer-opt config.status 3.12.7
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View File

@@ -1,4 +1,4 @@
AC_INIT([cpuminer-opt], [3.11.9])
AC_INIT([cpuminer-opt], [3.12.7])
AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM

File diff suppressed because it is too large Load Diff

View File

@@ -1,170 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
*/
#include <stdint.h>
#if defined(__cplusplus)
extern "C"
{
#endif
#define TABLE_ALIGN 32
#define WPOLY 0x011b
#define N_COLS 4
#define AES_BLOCK_SIZE 16
#define RC_LENGTH (5 * (AES_BLOCK_SIZE / 4 - 2))
#if defined(_MSC_VER)
#define ALIGN __declspec(align(TABLE_ALIGN))
#elif defined(__GNUC__)
#define ALIGN __attribute__ ((aligned(16)))
#else
#define ALIGN
#endif
#define rf1(r,c) (r)
#define word_in(x,c) (*((uint32_t*)(x)+(c)))
#define word_out(x,c,v) (*((uint32_t*)(x)+(c)) = (v))
#define s(x,c) x[c]
#define si(y,x,c) (s(y,c) = word_in(x, c))
#define so(y,x,c) word_out(y, c, s(x,c))
#define state_in(y,x) si(y,x,0); si(y,x,1); si(y,x,2); si(y,x,3)
#define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3)
#define round(y,x,k) \
y[0] = (k)[0] ^ (t_fn[0][x[0] & 0xff] ^ t_fn[1][(x[1] >> 8) & 0xff] ^ t_fn[2][(x[2] >> 16) & 0xff] ^ t_fn[3][x[3] >> 24]); \
y[1] = (k)[1] ^ (t_fn[0][x[1] & 0xff] ^ t_fn[1][(x[2] >> 8) & 0xff] ^ t_fn[2][(x[3] >> 16) & 0xff] ^ t_fn[3][x[0] >> 24]); \
y[2] = (k)[2] ^ (t_fn[0][x[2] & 0xff] ^ t_fn[1][(x[3] >> 8) & 0xff] ^ t_fn[2][(x[0] >> 16) & 0xff] ^ t_fn[3][x[1] >> 24]); \
y[3] = (k)[3] ^ (t_fn[0][x[3] & 0xff] ^ t_fn[1][(x[0] >> 8) & 0xff] ^ t_fn[2][(x[1] >> 16) & 0xff] ^ t_fn[3][x[2] >> 24]);
#define to_byte(x) ((x) & 0xff)
#define bval(x,n) to_byte((x) >> (8 * (n)))
#define fwd_var(x,r,c)\
( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
: r == 1 ? ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))\
: r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
: ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2)))
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c))
#define sb_data(w) {\
w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
#define rc_data(w) {\
w(0x01), w(0x02), w(0x04), w(0x08), w(0x10),w(0x20), w(0x40), w(0x80),\
w(0x1b), w(0x36) }
#define bytes2word(b0, b1, b2, b3) (((uint32_t)(b3) << 24) | \
((uint32_t)(b2) << 16) | ((uint32_t)(b1) << 8) | (b0))
#define h0(x) (x)
#define w0(p) bytes2word(p, 0, 0, 0)
#define w1(p) bytes2word(0, p, 0, 0)
#define w2(p) bytes2word(0, 0, p, 0)
#define w3(p) bytes2word(0, 0, 0, p)
#define u0(p) bytes2word(f2(p), p, p, f3(p))
#define u1(p) bytes2word(f3(p), f2(p), p, p)
#define u2(p) bytes2word(p, f3(p), f2(p), p)
#define u3(p) bytes2word(p, p, f3(p), f2(p))
#define v0(p) bytes2word(fe(p), f9(p), fd(p), fb(p))
#define v1(p) bytes2word(fb(p), fe(p), f9(p), fd(p))
#define v2(p) bytes2word(fd(p), fb(p), fe(p), f9(p))
#define v3(p) bytes2word(f9(p), fd(p), fb(p), fe(p))
#define f2(x) ((x<<1) ^ (((x>>7) & 1) * WPOLY))
#define f4(x) ((x<<2) ^ (((x>>6) & 1) * WPOLY) ^ (((x>>6) & 2) * WPOLY))
#define f8(x) ((x<<3) ^ (((x>>5) & 1) * WPOLY) ^ (((x>>5) & 2) * WPOLY) ^ (((x>>5) & 4) * WPOLY))
#define f3(x) (f2(x) ^ x)
#define f9(x) (f8(x) ^ x)
#define fb(x) (f8(x) ^ f2(x) ^ x)
#define fd(x) (f8(x) ^ f4(x) ^ x)
#define fe(x) (f8(x) ^ f4(x) ^ f2(x))
#define t_dec(m,n) t_##m##n
#define t_set(m,n) t_##m##n
#define t_use(m,n) t_##m##n
#define d_4(t,n,b,e,f,g,h) ALIGN const t n[4][256] = { b(e), b(f), b(g), b(h) }
#define four_tables(x,tab,vf,rf,c) \
(tab[0][bval(vf(x,0,c),rf(0,c))] \
^ tab[1][bval(vf(x,1,c),rf(1,c))] \
^ tab[2][bval(vf(x,2,c),rf(2,c))] \
^ tab[3][bval(vf(x,3,c),rf(3,c))])
d_4(uint32_t, t_dec(f,n), sb_data, u0, u1, u2, u3);
void aesb_single_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey)
{
round(((uint32_t*) out), ((uint32_t*) in), ((uint32_t*) expandedKey));
}
void aesb_pseudo_round_mut(uint8_t *val, uint8_t *expandedKey)
{
uint32_t b1[4];
round(b1, ((uint32_t*) val), ((const uint32_t *) expandedKey));
round(((uint32_t*) val), b1, ((const uint32_t *) expandedKey) + 1 * N_COLS);
round(b1, ((uint32_t*) val), ((const uint32_t *) expandedKey) + 2 * N_COLS);
round(((uint32_t*) val), b1, ((const uint32_t *) expandedKey) + 3 * N_COLS);
round(b1, ((uint32_t*) val), ((const uint32_t *) expandedKey) + 4 * N_COLS);
round(((uint32_t*) val), b1, ((const uint32_t *) expandedKey) + 5 * N_COLS);
round(b1, ((uint32_t*) val), ((const uint32_t *) expandedKey) + 6 * N_COLS);
round(((uint32_t*) val), b1, ((const uint32_t *) expandedKey) + 7 * N_COLS);
round(b1, ((uint32_t*) val), ((const uint32_t *) expandedKey) + 8 * N_COLS);
round(((uint32_t*) val), b1, ((const uint32_t *) expandedKey) + 9 * N_COLS);
}
#if defined(__cplusplus)
}
#endif

View File

@@ -1,326 +0,0 @@
/*
* The blake256_* and blake224_* functions are largely copied from
* blake256_light.c and blake224_light.c from the BLAKE website:
*
* http://131002.net/blake/
*
* The hmac_* functions implement HMAC-BLAKE-256 and HMAC-BLAKE-224.
* HMAC is specified by RFC 2104.
*/
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include "c_blake256.h"
#define U8TO32(p) \
(((uint32_t)((p)[0]) << 24) | ((uint32_t)((p)[1]) << 16) | \
((uint32_t)((p)[2]) << 8) | ((uint32_t)((p)[3]) ))
#define U32TO8(p, v) \
(p)[0] = (uint8_t)((v) >> 24); (p)[1] = (uint8_t)((v) >> 16); \
(p)[2] = (uint8_t)((v) >> 8); (p)[3] = (uint8_t)((v) );
const uint8_t sigma[][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15},
{14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3},
{11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4},
{ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8},
{ 9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13},
{ 2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9},
{12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11},
{13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10},
{ 6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5},
{10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13, 0},
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15},
{14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3},
{11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4},
{ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8}
};
const uint32_t cst[16] = {
0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,
0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917
};
static const uint8_t padding[] = {
0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
void blake256_compress(state *S, const uint8_t *block) {
uint32_t v[16], m[16], i;
#define ROT(x,n) (((x)<<(32-n))|((x)>>(n)))
#define G(a,b,c,d,e) \
v[a] += (m[sigma[i][e]] ^ cst[sigma[i][e+1]]) + v[b]; \
v[d] = ROT(v[d] ^ v[a],16); \
v[c] += v[d]; \
v[b] = ROT(v[b] ^ v[c],12); \
v[a] += (m[sigma[i][e+1]] ^ cst[sigma[i][e]])+v[b]; \
v[d] = ROT(v[d] ^ v[a], 8); \
v[c] += v[d]; \
v[b] = ROT(v[b] ^ v[c], 7);
for (i = 0; i < 16; ++i) m[i] = U8TO32(block + i * 4);
for (i = 0; i < 8; ++i) v[i] = S->h[i];
v[ 8] = S->s[0] ^ 0x243F6A88;
v[ 9] = S->s[1] ^ 0x85A308D3;
v[10] = S->s[2] ^ 0x13198A2E;
v[11] = S->s[3] ^ 0x03707344;
v[12] = 0xA4093822;
v[13] = 0x299F31D0;
v[14] = 0x082EFA98;
v[15] = 0xEC4E6C89;
if (S->nullt == 0) {
v[12] ^= S->t[0];
v[13] ^= S->t[0];
v[14] ^= S->t[1];
v[15] ^= S->t[1];
}
for (i = 0; i < 14; ++i) {
G(0, 4, 8, 12, 0);
G(1, 5, 9, 13, 2);
G(2, 6, 10, 14, 4);
G(3, 7, 11, 15, 6);
G(3, 4, 9, 14, 14);
G(2, 7, 8, 13, 12);
G(0, 5, 10, 15, 8);
G(1, 6, 11, 12, 10);
}
for (i = 0; i < 16; ++i) S->h[i % 8] ^= v[i];
for (i = 0; i < 8; ++i) S->h[i] ^= S->s[i % 4];
}
void blake256_init(state *S) {
S->h[0] = 0x6A09E667;
S->h[1] = 0xBB67AE85;
S->h[2] = 0x3C6EF372;
S->h[3] = 0xA54FF53A;
S->h[4] = 0x510E527F;
S->h[5] = 0x9B05688C;
S->h[6] = 0x1F83D9AB;
S->h[7] = 0x5BE0CD19;
S->t[0] = S->t[1] = S->buflen = S->nullt = 0;
S->s[0] = S->s[1] = S->s[2] = S->s[3] = 0;
}
void blake224_init(state *S) {
S->h[0] = 0xC1059ED8;
S->h[1] = 0x367CD507;
S->h[2] = 0x3070DD17;
S->h[3] = 0xF70E5939;
S->h[4] = 0xFFC00B31;
S->h[5] = 0x68581511;
S->h[6] = 0x64F98FA7;
S->h[7] = 0xBEFA4FA4;
S->t[0] = S->t[1] = S->buflen = S->nullt = 0;
S->s[0] = S->s[1] = S->s[2] = S->s[3] = 0;
}
// datalen = number of bits
void blake256_update(state *S, const uint8_t *data, uint64_t datalen) {
int left = S->buflen >> 3;
int fill = 64 - left;
if (left && (((datalen >> 3) & 0x3F) >= (unsigned) fill)) {
memcpy((void *) (S->buf + left), (void *) data, fill);
S->t[0] += 512;
if (S->t[0] == 0) S->t[1]++;
blake256_compress(S, S->buf);
data += fill;
datalen -= (fill << 3);
left = 0;
}
while (datalen >= 512) {
S->t[0] += 512;
if (S->t[0] == 0) S->t[1]++;
blake256_compress(S, data);
data += 64;
datalen -= 512;
}
if (datalen > 0) {
memcpy((void *) (S->buf + left), (void *) data, (size_t) (datalen >> 3));
S->buflen = (left << 3) + (int) datalen;
} else {
S->buflen = 0;
}
}
// datalen = number of bits
void blake224_update(state *S, const uint8_t *data, uint64_t datalen) {
blake256_update(S, data, datalen);
}
void blake256_final_h(state *S, uint8_t *digest, uint8_t pa, uint8_t pb) {
uint8_t msglen[8];
uint32_t lo = S->t[0] + S->buflen, hi = S->t[1];
if (lo < (unsigned) S->buflen) hi++;
U32TO8(msglen + 0, hi);
U32TO8(msglen + 4, lo);
if (S->buflen == 440) { /* one padding byte */
S->t[0] -= 8;
blake256_update(S, &pa, 8);
} else {
if (S->buflen < 440) { /* enough space to fill the block */
if (S->buflen == 0) S->nullt = 1;
S->t[0] -= 440 - S->buflen;
blake256_update(S, padding, 440 - S->buflen);
} else { /* need 2 compressions */
S->t[0] -= 512 - S->buflen;
blake256_update(S, padding, 512 - S->buflen);
S->t[0] -= 440;
blake256_update(S, padding + 1, 440);
S->nullt = 1;
}
blake256_update(S, &pb, 8);
S->t[0] -= 8;
}
S->t[0] -= 64;
blake256_update(S, msglen, 64);
U32TO8(digest + 0, S->h[0]);
U32TO8(digest + 4, S->h[1]);
U32TO8(digest + 8, S->h[2]);
U32TO8(digest + 12, S->h[3]);
U32TO8(digest + 16, S->h[4]);
U32TO8(digest + 20, S->h[5]);
U32TO8(digest + 24, S->h[6]);
U32TO8(digest + 28, S->h[7]);
}
void blake256_final(state *S, uint8_t *digest) {
blake256_final_h(S, digest, 0x81, 0x01);
}
void blake224_final(state *S, uint8_t *digest) {
blake256_final_h(S, digest, 0x80, 0x00);
}
// inlen = number of bytes
void blake256_hash(uint8_t *out, const uint8_t *in, uint64_t inlen) {
state S;
blake256_init(&S);
blake256_update(&S, in, inlen * 8);
blake256_final(&S, out);
}
// inlen = number of bytes
void blake224_hash(uint8_t *out, const uint8_t *in, uint64_t inlen) {
state S;
blake224_init(&S);
blake224_update(&S, in, inlen * 8);
blake224_final(&S, out);
}
// keylen = number of bytes
void hmac_blake256_init(hmac_state *S, const uint8_t *_key, uint64_t keylen) {
const uint8_t *key = _key;
uint8_t keyhash[32];
uint8_t pad[64];
uint64_t i;
if (keylen > 64) {
blake256_hash(keyhash, key, keylen);
key = keyhash;
keylen = 32;
}
blake256_init(&S->inner);
memset(pad, 0x36, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake256_update(&S->inner, pad, 512);
blake256_init(&S->outer);
memset(pad, 0x5c, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake256_update(&S->outer, pad, 512);
memset(keyhash, 0, 32);
}
// keylen = number of bytes
void hmac_blake224_init(hmac_state *S, const uint8_t *_key, uint64_t keylen) {
const uint8_t *key = _key;
uint8_t keyhash[32];
uint8_t pad[64];
uint64_t i;
if (keylen > 64) {
blake256_hash(keyhash, key, keylen);
key = keyhash;
keylen = 28;
}
blake224_init(&S->inner);
memset(pad, 0x36, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake224_update(&S->inner, pad, 512);
blake224_init(&S->outer);
memset(pad, 0x5c, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake224_update(&S->outer, pad, 512);
memset(keyhash, 0, 32);
}
// datalen = number of bits
void hmac_blake256_update(hmac_state *S, const uint8_t *data, uint64_t datalen) {
// update the inner state
blake256_update(&S->inner, data, datalen);
}
// datalen = number of bits
void hmac_blake224_update(hmac_state *S, const uint8_t *data, uint64_t datalen) {
// update the inner state
blake224_update(&S->inner, data, datalen);
}
void hmac_blake256_final(hmac_state *S, uint8_t *digest) {
uint8_t ihash[32];
blake256_final(&S->inner, ihash);
blake256_update(&S->outer, ihash, 256);
blake256_final(&S->outer, digest);
memset(ihash, 0, 32);
}
void hmac_blake224_final(hmac_state *S, uint8_t *digest) {
uint8_t ihash[32];
blake224_final(&S->inner, ihash);
blake224_update(&S->outer, ihash, 224);
blake224_final(&S->outer, digest);
memset(ihash, 0, 32);
}
// keylen = number of bytes; inlen = number of bytes
void hmac_blake256_hash(uint8_t *out, const uint8_t *key, uint64_t keylen, const uint8_t *in, uint64_t inlen) {
hmac_state S;
hmac_blake256_init(&S, key, keylen);
hmac_blake256_update(&S, in, inlen * 8);
hmac_blake256_final(&S, out);
}
// keylen = number of bytes; inlen = number of bytes
void hmac_blake224_hash(uint8_t *out, const uint8_t *key, uint64_t keylen, const uint8_t *in, uint64_t inlen) {
hmac_state S;
hmac_blake224_init(&S, key, keylen);
hmac_blake224_update(&S, in, inlen * 8);
hmac_blake224_final(&S, out);
}

View File

@@ -1,43 +0,0 @@
#ifndef _BLAKE256_H_
#define _BLAKE256_H_
#include <stdint.h>
typedef struct {
uint32_t h[8], s[4], t[2];
int buflen, nullt;
uint8_t buf[64];
} state;
typedef struct {
state inner;
state outer;
} hmac_state;
void blake256_init(state *);
void blake224_init(state *);
void blake256_update(state *, const uint8_t *, uint64_t);
void blake224_update(state *, const uint8_t *, uint64_t);
void blake256_final(state *, uint8_t *);
void blake224_final(state *, uint8_t *);
void blake256_hash(uint8_t *, const uint8_t *, uint64_t);
void blake224_hash(uint8_t *, const uint8_t *, uint64_t);
/* HMAC functions: */
void hmac_blake256_init(hmac_state *, const uint8_t *, uint64_t);
void hmac_blake224_init(hmac_state *, const uint8_t *, uint64_t);
void hmac_blake256_update(hmac_state *, const uint8_t *, uint64_t);
void hmac_blake224_update(hmac_state *, const uint8_t *, uint64_t);
void hmac_blake256_final(hmac_state *, uint8_t *);
void hmac_blake224_final(hmac_state *, uint8_t *);
void hmac_blake256_hash(uint8_t *, const uint8_t *, uint64_t, const uint8_t *, uint64_t);
void hmac_blake224_hash(uint8_t *, const uint8_t *, uint64_t, const uint8_t *, uint64_t);
#endif /* _BLAKE256_H_ */

View File

@@ -1,360 +0,0 @@
/* hash.c April 2012
* Groestl ANSI C code optimised for 32-bit machines
* Author: Thomas Krinninger
*
* This work is based on the implementation of
* Soeren S. Thomsen and Krystian Matusiewicz
*
*
*/
#include "c_groestl.h"
#include "groestl_tables.h"
#define P_TYPE 0
#define Q_TYPE 1
const uint8_t shift_Values[2][8] = {{0,1,2,3,4,5,6,7},{1,3,5,7,0,2,4,6}};
const uint8_t indices_cyclic[15] = {0,1,2,3,4,5,6,7,0,1,2,3,4,5,6};
#define ROTATE_COLUMN_DOWN(v1, v2, amount_bytes, temp_var) {temp_var = (v1<<(8*amount_bytes))|(v2>>(8*(4-amount_bytes))); \
v2 = (v2<<(8*amount_bytes))|(v1>>(8*(4-amount_bytes))); \
v1 = temp_var;}
#define COLUMN(x,y,i,c0,c1,c2,c3,c4,c5,c6,c7,tv1,tv2,tu,tl,t) \
tu = T[2*(uint32_t)x[4*c0+0]]; \
tl = T[2*(uint32_t)x[4*c0+0]+1]; \
tv1 = T[2*(uint32_t)x[4*c1+1]]; \
tv2 = T[2*(uint32_t)x[4*c1+1]+1]; \
ROTATE_COLUMN_DOWN(tv1,tv2,1,t) \
tu ^= tv1; \
tl ^= tv2; \
tv1 = T[2*(uint32_t)x[4*c2+2]]; \
tv2 = T[2*(uint32_t)x[4*c2+2]+1]; \
ROTATE_COLUMN_DOWN(tv1,tv2,2,t) \
tu ^= tv1; \
tl ^= tv2; \
tv1 = T[2*(uint32_t)x[4*c3+3]]; \
tv2 = T[2*(uint32_t)x[4*c3+3]+1]; \
ROTATE_COLUMN_DOWN(tv1,tv2,3,t) \
tu ^= tv1; \
tl ^= tv2; \
tl ^= T[2*(uint32_t)x[4*c4+0]]; \
tu ^= T[2*(uint32_t)x[4*c4+0]+1]; \
tv1 = T[2*(uint32_t)x[4*c5+1]]; \
tv2 = T[2*(uint32_t)x[4*c5+1]+1]; \
ROTATE_COLUMN_DOWN(tv1,tv2,1,t) \
tl ^= tv1; \
tu ^= tv2; \
tv1 = T[2*(uint32_t)x[4*c6+2]]; \
tv2 = T[2*(uint32_t)x[4*c6+2]+1]; \
ROTATE_COLUMN_DOWN(tv1,tv2,2,t) \
tl ^= tv1; \
tu ^= tv2; \
tv1 = T[2*(uint32_t)x[4*c7+3]]; \
tv2 = T[2*(uint32_t)x[4*c7+3]+1]; \
ROTATE_COLUMN_DOWN(tv1,tv2,3,t) \
tl ^= tv1; \
tu ^= tv2; \
y[i] = tu; \
y[i+1] = tl;
/* compute one round of P (short variants) */
static void RND512P(uint8_t *x, uint32_t *y, uint32_t r) {
uint32_t temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp;
uint32_t* x32 = (uint32_t*)x;
x32[ 0] ^= 0x00000000^r;
x32[ 2] ^= 0x00000010^r;
x32[ 4] ^= 0x00000020^r;
x32[ 6] ^= 0x00000030^r;
x32[ 8] ^= 0x00000040^r;
x32[10] ^= 0x00000050^r;
x32[12] ^= 0x00000060^r;
x32[14] ^= 0x00000070^r;
COLUMN(x,y, 0, 0, 2, 4, 6, 9, 11, 13, 15, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 2, 2, 4, 6, 8, 11, 13, 15, 1, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 4, 4, 6, 8, 10, 13, 15, 1, 3, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 6, 6, 8, 10, 12, 15, 1, 3, 5, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 8, 8, 10, 12, 14, 1, 3, 5, 7, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y,10, 10, 12, 14, 0, 3, 5, 7, 9, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y,12, 12, 14, 0, 2, 5, 7, 9, 11, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y,14, 14, 0, 2, 4, 7, 9, 11, 13, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
}
/* compute one round of Q (short variants) */
static void RND512Q(uint8_t *x, uint32_t *y, uint32_t r) {
uint32_t temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp;
uint32_t* x32 = (uint32_t*)x;
x32[ 0] = ~x32[ 0];
x32[ 1] ^= 0xffffffff^r;
x32[ 2] = ~x32[ 2];
x32[ 3] ^= 0xefffffff^r;
x32[ 4] = ~x32[ 4];
x32[ 5] ^= 0xdfffffff^r;
x32[ 6] = ~x32[ 6];
x32[ 7] ^= 0xcfffffff^r;
x32[ 8] = ~x32[ 8];
x32[ 9] ^= 0xbfffffff^r;
x32[10] = ~x32[10];
x32[11] ^= 0xafffffff^r;
x32[12] = ~x32[12];
x32[13] ^= 0x9fffffff^r;
x32[14] = ~x32[14];
x32[15] ^= 0x8fffffff^r;
COLUMN(x,y, 0, 2, 6, 10, 14, 1, 5, 9, 13, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 2, 4, 8, 12, 0, 3, 7, 11, 15, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 4, 6, 10, 14, 2, 5, 9, 13, 1, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 6, 8, 12, 0, 4, 7, 11, 15, 3, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y, 8, 10, 14, 2, 6, 9, 13, 1, 5, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y,10, 12, 0, 4, 8, 11, 15, 3, 7, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y,12, 14, 2, 6, 10, 13, 1, 5, 9, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
COLUMN(x,y,14, 0, 4, 8, 12, 15, 3, 7, 11, temp_v1, temp_v2, temp_upper_value, temp_lower_value, temp);
}
/* compute compression function (short variants) */
static void F512(uint32_t *h, const uint32_t *m) {
int i;
uint32_t Ptmp[2*COLS512];
uint32_t Qtmp[2*COLS512];
uint32_t y[2*COLS512];
uint32_t z[2*COLS512];
for (i = 0; i < 2*COLS512; i++) {
z[i] = m[i];
Ptmp[i] = h[i]^m[i];
}
/* compute Q(m) */
RND512Q((uint8_t*)z, y, 0x00000000);
RND512Q((uint8_t*)y, z, 0x01000000);
RND512Q((uint8_t*)z, y, 0x02000000);
RND512Q((uint8_t*)y, z, 0x03000000);
RND512Q((uint8_t*)z, y, 0x04000000);
RND512Q((uint8_t*)y, z, 0x05000000);
RND512Q((uint8_t*)z, y, 0x06000000);
RND512Q((uint8_t*)y, z, 0x07000000);
RND512Q((uint8_t*)z, y, 0x08000000);
RND512Q((uint8_t*)y, Qtmp, 0x09000000);
/* compute P(h+m) */
RND512P((uint8_t*)Ptmp, y, 0x00000000);
RND512P((uint8_t*)y, z, 0x00000001);
RND512P((uint8_t*)z, y, 0x00000002);
RND512P((uint8_t*)y, z, 0x00000003);
RND512P((uint8_t*)z, y, 0x00000004);
RND512P((uint8_t*)y, z, 0x00000005);
RND512P((uint8_t*)z, y, 0x00000006);
RND512P((uint8_t*)y, z, 0x00000007);
RND512P((uint8_t*)z, y, 0x00000008);
RND512P((uint8_t*)y, Ptmp, 0x00000009);
/* compute P(h+m) + Q(m) + h */
for (i = 0; i < 2*COLS512; i++) {
h[i] ^= Ptmp[i]^Qtmp[i];
}
}
/* digest up to msglen bytes of input (full blocks only) */
static void Transform(groestlHashState *ctx,
const uint8_t *input,
int msglen) {
/* digest message, one block at a time */
for (; msglen >= SIZE512;
msglen -= SIZE512, input += SIZE512) {
F512(ctx->chaining,(uint32_t*)input);
/* increment block counter */
ctx->block_counter1++;
if (ctx->block_counter1 == 0) ctx->block_counter2++;
}
}
/* given state h, do h <- P(h)+h */
static void OutputTransformation(groestlHashState *ctx) {
int j;
uint32_t temp[2*COLS512];
uint32_t y[2*COLS512];
uint32_t z[2*COLS512];
for (j = 0; j < 2*COLS512; j++) {
temp[j] = ctx->chaining[j];
}
RND512P((uint8_t*)temp, y, 0x00000000);
RND512P((uint8_t*)y, z, 0x00000001);
RND512P((uint8_t*)z, y, 0x00000002);
RND512P((uint8_t*)y, z, 0x00000003);
RND512P((uint8_t*)z, y, 0x00000004);
RND512P((uint8_t*)y, z, 0x00000005);
RND512P((uint8_t*)z, y, 0x00000006);
RND512P((uint8_t*)y, z, 0x00000007);
RND512P((uint8_t*)z, y, 0x00000008);
RND512P((uint8_t*)y, temp, 0x00000009);
for (j = 0; j < 2*COLS512; j++) {
ctx->chaining[j] ^= temp[j];
}
}
/* initialise context */
static void Init(groestlHashState* ctx) {
int i = 0;
/* allocate memory for state and data buffer */
for(;i<(SIZE512/sizeof(uint32_t));i++)
{
ctx->chaining[i] = 0;
}
/* set initial value */
ctx->chaining[2*COLS512-1] = u32BIG((uint32_t)HASH_BIT_LEN);
/* set other variables */
ctx->buf_ptr = 0;
ctx->block_counter1 = 0;
ctx->block_counter2 = 0;
ctx->bits_in_last_byte = 0;
}
/* update state with databitlen bits of input */
static void Update(groestlHashState* ctx,
const BitSequence* input,
DataLength databitlen) {
int index = 0;
int msglen = (int)(databitlen/8);
int rem = (int)(databitlen%8);
/* if the buffer contains data that has not yet been digested, first
add data to buffer until full */
if (ctx->buf_ptr) {
while (ctx->buf_ptr < SIZE512 && index < msglen) {
ctx->buffer[(int)ctx->buf_ptr++] = input[index++];
}
if (ctx->buf_ptr < SIZE512) {
/* buffer still not full, return */
if (rem) {
ctx->bits_in_last_byte = rem;
ctx->buffer[(int)ctx->buf_ptr++] = input[index];
}
return;
}
/* digest buffer */
ctx->buf_ptr = 0;
Transform(ctx, ctx->buffer, SIZE512);
}
/* digest bulk of message */
Transform(ctx, input+index, msglen-index);
index += ((msglen-index)/SIZE512)*SIZE512;
/* store remaining data in buffer */
while (index < msglen) {
ctx->buffer[(int)ctx->buf_ptr++] = input[index++];
}
/* if non-integral number of bytes have been supplied, store
remaining bits in last byte, together with information about
number of bits */
if (rem) {
ctx->bits_in_last_byte = rem;
ctx->buffer[(int)ctx->buf_ptr++] = input[index];
}
}
#define BILB ctx->bits_in_last_byte
/* finalise: process remaining data (including padding), perform
output transformation, and write hash result to 'output' */
static void Final(groestlHashState* ctx,
BitSequence* output) {
int i, j = 0, hashbytelen = HASH_BIT_LEN/8;
uint8_t *s = (BitSequence*)ctx->chaining;
/* pad with '1'-bit and first few '0'-bits */
if (BILB) {
ctx->buffer[(int)ctx->buf_ptr-1] &= ((1<<BILB)-1)<<(8-BILB);
ctx->buffer[(int)ctx->buf_ptr-1] ^= 0x1<<(7-BILB);
BILB = 0;
}
else ctx->buffer[(int)ctx->buf_ptr++] = 0x80;
/* pad with '0'-bits */
if (ctx->buf_ptr > SIZE512-LENGTHFIELDLEN) {
/* padding requires two blocks */
while (ctx->buf_ptr < SIZE512) {
ctx->buffer[(int)ctx->buf_ptr++] = 0;
}
/* digest first padding block */
Transform(ctx, ctx->buffer, SIZE512);
ctx->buf_ptr = 0;
}
while (ctx->buf_ptr < SIZE512-LENGTHFIELDLEN) {
ctx->buffer[(int)ctx->buf_ptr++] = 0;
}
/* length padding */
ctx->block_counter1++;
if (ctx->block_counter1 == 0) ctx->block_counter2++;
ctx->buf_ptr = SIZE512;
while (ctx->buf_ptr > SIZE512-(int)sizeof(uint32_t)) {
ctx->buffer[(int)--ctx->buf_ptr] = (uint8_t)ctx->block_counter1;
ctx->block_counter1 >>= 8;
}
while (ctx->buf_ptr > SIZE512-LENGTHFIELDLEN) {
ctx->buffer[(int)--ctx->buf_ptr] = (uint8_t)ctx->block_counter2;
ctx->block_counter2 >>= 8;
}
/* digest final padding block */
Transform(ctx, ctx->buffer, SIZE512);
/* perform output transformation */
OutputTransformation(ctx);
/* store hash result in output */
for (i = SIZE512-hashbytelen; i < SIZE512; i++,j++) {
output[j] = s[i];
}
/* zeroise relevant variables and deallocate memory */
for (i = 0; i < COLS512; i++) {
ctx->chaining[i] = 0;
}
for (i = 0; i < SIZE512; i++) {
ctx->buffer[i] = 0;
}
}
/* hash bit sequence */
void groestl(const BitSequence* data,
DataLength databitlen,
BitSequence* hashval) {
groestlHashState context;
/* initialise */
Init(&context);
/* process message */
Update(&context, data, databitlen);
/* finalise */
Final(&context, hashval);
}
/*
static int crypto_hash(unsigned char *out,
const unsigned char *in,
unsigned long long len)
{
groestl(in, 8*len, out);
return 0;
}
*/

View File

@@ -1,60 +0,0 @@
#ifndef __hash_h
#define __hash_h
/*
#include "crypto_uint8.h"
#include "crypto_uint32.h"
#include "crypto_uint64.h"
#include "crypto_hash.h"
typedef crypto_uint8 uint8_t;
typedef crypto_uint32 uint32_t;
typedef crypto_uint64 uint64_t;
*/
#include <stdint.h>
#include "hash.h"
/* some sizes (number of bytes) */
#define ROWS 8
#define LENGTHFIELDLEN ROWS
#define COLS512 8
#define SIZE512 (ROWS*COLS512)
#define ROUNDS512 10
#define HASH_BIT_LEN 256
#define ROTL32(v, n) ((((v)<<(n))|((v)>>(32-(n))))&li_32(ffffffff))
#define li_32(h) 0x##h##u
#define EXT_BYTE(var,n) ((uint8_t)((uint32_t)(var) >> (8*n)))
#define u32BIG(a) \
((ROTL32(a,8) & li_32(00FF00FF)) | \
(ROTL32(a,24) & li_32(FF00FF00)))
/* NIST API begin */
typedef struct {
uint32_t chaining[SIZE512/sizeof(uint32_t)]; /* actual state */
uint32_t block_counter1,
block_counter2; /* message block counter(s) */
BitSequence buffer[SIZE512]; /* data buffer */
int buf_ptr; /* data buffer pointer */
int bits_in_last_byte; /* no. of message bits in last byte of
data buffer */
} groestlHashState;
/*void Init(hashState*);
void Update(hashState*, const BitSequence*, DataLength);
void Final(hashState*, BitSequence*); */
void groestl(const BitSequence*, DataLength, BitSequence*);
/* NIST API end */
/*
int crypto_hash(unsigned char *out,
const unsigned char *in,
unsigned long long len);
*/
#endif /* __hash_h */

View File

@@ -1,366 +0,0 @@
/*This program gives the 64-bit optimized bitslice implementation of JH using ANSI C
--------------------------------
Performance
Microprocessor: Intel CORE 2 processor (Core 2 Duo Mobile T6600 2.2GHz)
Operating System: 64-bit Ubuntu 10.04 (Linux kernel 2.6.32-22-generic)
Speed for long message:
1) 45.8 cycles/byte compiler: Intel C++ Compiler 11.1 compilation option: icc -O2
2) 56.8 cycles/byte compiler: gcc 4.4.3 compilation option: gcc -O3
--------------------------------
Last Modified: January 16, 2011
*/
#include "c_jh.h"
#include <stdint.h>
#include <string.h>
/*typedef unsigned long long uint64;*/
typedef uint64_t uint64;
/*define data alignment for different C compilers*/
#if defined(__GNUC__)
#define DATA_ALIGN16(x) x __attribute__ ((aligned(16)))
#else
#define DATA_ALIGN16(x) __declspec(align(16)) x
#endif
typedef struct {
int hashbitlen; /*the message digest size*/
unsigned long long databitlen; /*the message size in bits*/
unsigned long long datasize_in_buffer; /*the size of the message remained in buffer; assumed to be multiple of 8bits except for the last partial block at the end of the message*/
DATA_ALIGN16(uint64 x[8][2]); /*the 1024-bit state, ( x[i][0] || x[i][1] ) is the ith row of the state in the pseudocode*/
unsigned char buffer[64]; /*the 512-bit message block to be hashed;*/
} hashState;
/*The initial hash value H(0)*/
const unsigned char JH224_H0[128]={0x2d,0xfe,0xdd,0x62,0xf9,0x9a,0x98,0xac,0xae,0x7c,0xac,0xd6,0x19,0xd6,0x34,0xe7,0xa4,0x83,0x10,0x5,0xbc,0x30,0x12,0x16,0xb8,0x60,0x38,0xc6,0xc9,0x66,0x14,0x94,0x66,0xd9,0x89,0x9f,0x25,0x80,0x70,0x6f,0xce,0x9e,0xa3,0x1b,0x1d,0x9b,0x1a,0xdc,0x11,0xe8,0x32,0x5f,0x7b,0x36,0x6e,0x10,0xf9,0x94,0x85,0x7f,0x2,0xfa,0x6,0xc1,0x1b,0x4f,0x1b,0x5c,0xd8,0xc8,0x40,0xb3,0x97,0xf6,0xa1,0x7f,0x6e,0x73,0x80,0x99,0xdc,0xdf,0x93,0xa5,0xad,0xea,0xa3,0xd3,0xa4,0x31,0xe8,0xde,0xc9,0x53,0x9a,0x68,0x22,0xb4,0xa9,0x8a,0xec,0x86,0xa1,0xe4,0xd5,0x74,0xac,0x95,0x9c,0xe5,0x6c,0xf0,0x15,0x96,0xd,0xea,0xb5,0xab,0x2b,0xbf,0x96,0x11,0xdc,0xf0,0xdd,0x64,0xea,0x6e};
const unsigned char JH256_H0[128]={0xeb,0x98,0xa3,0x41,0x2c,0x20,0xd3,0xeb,0x92,0xcd,0xbe,0x7b,0x9c,0xb2,0x45,0xc1,0x1c,0x93,0x51,0x91,0x60,0xd4,0xc7,0xfa,0x26,0x0,0x82,0xd6,0x7e,0x50,0x8a,0x3,0xa4,0x23,0x9e,0x26,0x77,0x26,0xb9,0x45,0xe0,0xfb,0x1a,0x48,0xd4,0x1a,0x94,0x77,0xcd,0xb5,0xab,0x26,0x2,0x6b,0x17,0x7a,0x56,0xf0,0x24,0x42,0xf,0xff,0x2f,0xa8,0x71,0xa3,0x96,0x89,0x7f,0x2e,0x4d,0x75,0x1d,0x14,0x49,0x8,0xf7,0x7d,0xe2,0x62,0x27,0x76,0x95,0xf7,0x76,0x24,0x8f,0x94,0x87,0xd5,0xb6,0x57,0x47,0x80,0x29,0x6c,0x5c,0x5e,0x27,0x2d,0xac,0x8e,0xd,0x6c,0x51,0x84,0x50,0xc6,0x57,0x5,0x7a,0xf,0x7b,0xe4,0xd3,0x67,0x70,0x24,0x12,0xea,0x89,0xe3,0xab,0x13,0xd3,0x1c,0xd7,0x69};
const unsigned char JH384_H0[128]={0x48,0x1e,0x3b,0xc6,0xd8,0x13,0x39,0x8a,0x6d,0x3b,0x5e,0x89,0x4a,0xde,0x87,0x9b,0x63,0xfa,0xea,0x68,0xd4,0x80,0xad,0x2e,0x33,0x2c,0xcb,0x21,0x48,0xf,0x82,0x67,0x98,0xae,0xc8,0x4d,0x90,0x82,0xb9,0x28,0xd4,0x55,0xea,0x30,0x41,0x11,0x42,0x49,0x36,0xf5,0x55,0xb2,0x92,0x48,0x47,0xec,0xc7,0x25,0xa,0x93,0xba,0xf4,0x3c,0xe1,0x56,0x9b,0x7f,0x8a,0x27,0xdb,0x45,0x4c,0x9e,0xfc,0xbd,0x49,0x63,0x97,0xaf,0xe,0x58,0x9f,0xc2,0x7d,0x26,0xaa,0x80,0xcd,0x80,0xc0,0x8b,0x8c,0x9d,0xeb,0x2e,0xda,0x8a,0x79,0x81,0xe8,0xf8,0xd5,0x37,0x3a,0xf4,0x39,0x67,0xad,0xdd,0xd1,0x7a,0x71,0xa9,0xb4,0xd3,0xbd,0xa4,0x75,0xd3,0x94,0x97,0x6c,0x3f,0xba,0x98,0x42,0x73,0x7f};
const unsigned char JH512_H0[128]={0x6f,0xd1,0x4b,0x96,0x3e,0x0,0xaa,0x17,0x63,0x6a,0x2e,0x5,0x7a,0x15,0xd5,0x43,0x8a,0x22,0x5e,0x8d,0xc,0x97,0xef,0xb,0xe9,0x34,0x12,0x59,0xf2,0xb3,0xc3,0x61,0x89,0x1d,0xa0,0xc1,0x53,0x6f,0x80,0x1e,0x2a,0xa9,0x5,0x6b,0xea,0x2b,0x6d,0x80,0x58,0x8e,0xcc,0xdb,0x20,0x75,0xba,0xa6,0xa9,0xf,0x3a,0x76,0xba,0xf8,0x3b,0xf7,0x1,0x69,0xe6,0x5,0x41,0xe3,0x4a,0x69,0x46,0xb5,0x8a,0x8e,0x2e,0x6f,0xe6,0x5a,0x10,0x47,0xa7,0xd0,0xc1,0x84,0x3c,0x24,0x3b,0x6e,0x71,0xb1,0x2d,0x5a,0xc1,0x99,0xcf,0x57,0xf6,0xec,0x9d,0xb1,0xf8,0x56,0xa7,0x6,0x88,0x7c,0x57,0x16,0xb1,0x56,0xe3,0xc2,0xfc,0xdf,0xe6,0x85,0x17,0xfb,0x54,0x5a,0x46,0x78,0xcc,0x8c,0xdd,0x4b};
/*42 round constants, each round constant is 32-byte (256-bit)*/
const unsigned char E8_bitslice_roundconstant[42][32]={
{0x72,0xd5,0xde,0xa2,0xdf,0x15,0xf8,0x67,0x7b,0x84,0x15,0xa,0xb7,0x23,0x15,0x57,0x81,0xab,0xd6,0x90,0x4d,0x5a,0x87,0xf6,0x4e,0x9f,0x4f,0xc5,0xc3,0xd1,0x2b,0x40},
{0xea,0x98,0x3a,0xe0,0x5c,0x45,0xfa,0x9c,0x3,0xc5,0xd2,0x99,0x66,0xb2,0x99,0x9a,0x66,0x2,0x96,0xb4,0xf2,0xbb,0x53,0x8a,0xb5,0x56,0x14,0x1a,0x88,0xdb,0xa2,0x31},
{0x3,0xa3,0x5a,0x5c,0x9a,0x19,0xe,0xdb,0x40,0x3f,0xb2,0xa,0x87,0xc1,0x44,0x10,0x1c,0x5,0x19,0x80,0x84,0x9e,0x95,0x1d,0x6f,0x33,0xeb,0xad,0x5e,0xe7,0xcd,0xdc},
{0x10,0xba,0x13,0x92,0x2,0xbf,0x6b,0x41,0xdc,0x78,0x65,0x15,0xf7,0xbb,0x27,0xd0,0xa,0x2c,0x81,0x39,0x37,0xaa,0x78,0x50,0x3f,0x1a,0xbf,0xd2,0x41,0x0,0x91,0xd3},
{0x42,0x2d,0x5a,0xd,0xf6,0xcc,0x7e,0x90,0xdd,0x62,0x9f,0x9c,0x92,0xc0,0x97,0xce,0x18,0x5c,0xa7,0xb,0xc7,0x2b,0x44,0xac,0xd1,0xdf,0x65,0xd6,0x63,0xc6,0xfc,0x23},
{0x97,0x6e,0x6c,0x3,0x9e,0xe0,0xb8,0x1a,0x21,0x5,0x45,0x7e,0x44,0x6c,0xec,0xa8,0xee,0xf1,0x3,0xbb,0x5d,0x8e,0x61,0xfa,0xfd,0x96,0x97,0xb2,0x94,0x83,0x81,0x97},
{0x4a,0x8e,0x85,0x37,0xdb,0x3,0x30,0x2f,0x2a,0x67,0x8d,0x2d,0xfb,0x9f,0x6a,0x95,0x8a,0xfe,0x73,0x81,0xf8,0xb8,0x69,0x6c,0x8a,0xc7,0x72,0x46,0xc0,0x7f,0x42,0x14},
{0xc5,0xf4,0x15,0x8f,0xbd,0xc7,0x5e,0xc4,0x75,0x44,0x6f,0xa7,0x8f,0x11,0xbb,0x80,0x52,0xde,0x75,0xb7,0xae,0xe4,0x88,0xbc,0x82,0xb8,0x0,0x1e,0x98,0xa6,0xa3,0xf4},
{0x8e,0xf4,0x8f,0x33,0xa9,0xa3,0x63,0x15,0xaa,0x5f,0x56,0x24,0xd5,0xb7,0xf9,0x89,0xb6,0xf1,0xed,0x20,0x7c,0x5a,0xe0,0xfd,0x36,0xca,0xe9,0x5a,0x6,0x42,0x2c,0x36},
{0xce,0x29,0x35,0x43,0x4e,0xfe,0x98,0x3d,0x53,0x3a,0xf9,0x74,0x73,0x9a,0x4b,0xa7,0xd0,0xf5,0x1f,0x59,0x6f,0x4e,0x81,0x86,0xe,0x9d,0xad,0x81,0xaf,0xd8,0x5a,0x9f},
{0xa7,0x5,0x6,0x67,0xee,0x34,0x62,0x6a,0x8b,0xb,0x28,0xbe,0x6e,0xb9,0x17,0x27,0x47,0x74,0x7,0x26,0xc6,0x80,0x10,0x3f,0xe0,0xa0,0x7e,0x6f,0xc6,0x7e,0x48,0x7b},
{0xd,0x55,0xa,0xa5,0x4a,0xf8,0xa4,0xc0,0x91,0xe3,0xe7,0x9f,0x97,0x8e,0xf1,0x9e,0x86,0x76,0x72,0x81,0x50,0x60,0x8d,0xd4,0x7e,0x9e,0x5a,0x41,0xf3,0xe5,0xb0,0x62},
{0xfc,0x9f,0x1f,0xec,0x40,0x54,0x20,0x7a,0xe3,0xe4,0x1a,0x0,0xce,0xf4,0xc9,0x84,0x4f,0xd7,0x94,0xf5,0x9d,0xfa,0x95,0xd8,0x55,0x2e,0x7e,0x11,0x24,0xc3,0x54,0xa5},
{0x5b,0xdf,0x72,0x28,0xbd,0xfe,0x6e,0x28,0x78,0xf5,0x7f,0xe2,0xf,0xa5,0xc4,0xb2,0x5,0x89,0x7c,0xef,0xee,0x49,0xd3,0x2e,0x44,0x7e,0x93,0x85,0xeb,0x28,0x59,0x7f},
{0x70,0x5f,0x69,0x37,0xb3,0x24,0x31,0x4a,0x5e,0x86,0x28,0xf1,0x1d,0xd6,0xe4,0x65,0xc7,0x1b,0x77,0x4,0x51,0xb9,0x20,0xe7,0x74,0xfe,0x43,0xe8,0x23,0xd4,0x87,0x8a},
{0x7d,0x29,0xe8,0xa3,0x92,0x76,0x94,0xf2,0xdd,0xcb,0x7a,0x9,0x9b,0x30,0xd9,0xc1,0x1d,0x1b,0x30,0xfb,0x5b,0xdc,0x1b,0xe0,0xda,0x24,0x49,0x4f,0xf2,0x9c,0x82,0xbf},
{0xa4,0xe7,0xba,0x31,0xb4,0x70,0xbf,0xff,0xd,0x32,0x44,0x5,0xde,0xf8,0xbc,0x48,0x3b,0xae,0xfc,0x32,0x53,0xbb,0xd3,0x39,0x45,0x9f,0xc3,0xc1,0xe0,0x29,0x8b,0xa0},
{0xe5,0xc9,0x5,0xfd,0xf7,0xae,0x9,0xf,0x94,0x70,0x34,0x12,0x42,0x90,0xf1,0x34,0xa2,0x71,0xb7,0x1,0xe3,0x44,0xed,0x95,0xe9,0x3b,0x8e,0x36,0x4f,0x2f,0x98,0x4a},
{0x88,0x40,0x1d,0x63,0xa0,0x6c,0xf6,0x15,0x47,0xc1,0x44,0x4b,0x87,0x52,0xaf,0xff,0x7e,0xbb,0x4a,0xf1,0xe2,0xa,0xc6,0x30,0x46,0x70,0xb6,0xc5,0xcc,0x6e,0x8c,0xe6},
{0xa4,0xd5,0xa4,0x56,0xbd,0x4f,0xca,0x0,0xda,0x9d,0x84,0x4b,0xc8,0x3e,0x18,0xae,0x73,0x57,0xce,0x45,0x30,0x64,0xd1,0xad,0xe8,0xa6,0xce,0x68,0x14,0x5c,0x25,0x67},
{0xa3,0xda,0x8c,0xf2,0xcb,0xe,0xe1,0x16,0x33,0xe9,0x6,0x58,0x9a,0x94,0x99,0x9a,0x1f,0x60,0xb2,0x20,0xc2,0x6f,0x84,0x7b,0xd1,0xce,0xac,0x7f,0xa0,0xd1,0x85,0x18},
{0x32,0x59,0x5b,0xa1,0x8d,0xdd,0x19,0xd3,0x50,0x9a,0x1c,0xc0,0xaa,0xa5,0xb4,0x46,0x9f,0x3d,0x63,0x67,0xe4,0x4,0x6b,0xba,0xf6,0xca,0x19,0xab,0xb,0x56,0xee,0x7e},
{0x1f,0xb1,0x79,0xea,0xa9,0x28,0x21,0x74,0xe9,0xbd,0xf7,0x35,0x3b,0x36,0x51,0xee,0x1d,0x57,0xac,0x5a,0x75,0x50,0xd3,0x76,0x3a,0x46,0xc2,0xfe,0xa3,0x7d,0x70,0x1},
{0xf7,0x35,0xc1,0xaf,0x98,0xa4,0xd8,0x42,0x78,0xed,0xec,0x20,0x9e,0x6b,0x67,0x79,0x41,0x83,0x63,0x15,0xea,0x3a,0xdb,0xa8,0xfa,0xc3,0x3b,0x4d,0x32,0x83,0x2c,0x83},
{0xa7,0x40,0x3b,0x1f,0x1c,0x27,0x47,0xf3,0x59,0x40,0xf0,0x34,0xb7,0x2d,0x76,0x9a,0xe7,0x3e,0x4e,0x6c,0xd2,0x21,0x4f,0xfd,0xb8,0xfd,0x8d,0x39,0xdc,0x57,0x59,0xef},
{0x8d,0x9b,0xc,0x49,0x2b,0x49,0xeb,0xda,0x5b,0xa2,0xd7,0x49,0x68,0xf3,0x70,0xd,0x7d,0x3b,0xae,0xd0,0x7a,0x8d,0x55,0x84,0xf5,0xa5,0xe9,0xf0,0xe4,0xf8,0x8e,0x65},
{0xa0,0xb8,0xa2,0xf4,0x36,0x10,0x3b,0x53,0xc,0xa8,0x7,0x9e,0x75,0x3e,0xec,0x5a,0x91,0x68,0x94,0x92,0x56,0xe8,0x88,0x4f,0x5b,0xb0,0x5c,0x55,0xf8,0xba,0xbc,0x4c},
{0xe3,0xbb,0x3b,0x99,0xf3,0x87,0x94,0x7b,0x75,0xda,0xf4,0xd6,0x72,0x6b,0x1c,0x5d,0x64,0xae,0xac,0x28,0xdc,0x34,0xb3,0x6d,0x6c,0x34,0xa5,0x50,0xb8,0x28,0xdb,0x71},
{0xf8,0x61,0xe2,0xf2,0x10,0x8d,0x51,0x2a,0xe3,0xdb,0x64,0x33,0x59,0xdd,0x75,0xfc,0x1c,0xac,0xbc,0xf1,0x43,0xce,0x3f,0xa2,0x67,0xbb,0xd1,0x3c,0x2,0xe8,0x43,0xb0},
{0x33,0xa,0x5b,0xca,0x88,0x29,0xa1,0x75,0x7f,0x34,0x19,0x4d,0xb4,0x16,0x53,0x5c,0x92,0x3b,0x94,0xc3,0xe,0x79,0x4d,0x1e,0x79,0x74,0x75,0xd7,0xb6,0xee,0xaf,0x3f},
{0xea,0xa8,0xd4,0xf7,0xbe,0x1a,0x39,0x21,0x5c,0xf4,0x7e,0x9,0x4c,0x23,0x27,0x51,0x26,0xa3,0x24,0x53,0xba,0x32,0x3c,0xd2,0x44,0xa3,0x17,0x4a,0x6d,0xa6,0xd5,0xad},
{0xb5,0x1d,0x3e,0xa6,0xaf,0xf2,0xc9,0x8,0x83,0x59,0x3d,0x98,0x91,0x6b,0x3c,0x56,0x4c,0xf8,0x7c,0xa1,0x72,0x86,0x60,0x4d,0x46,0xe2,0x3e,0xcc,0x8,0x6e,0xc7,0xf6},
{0x2f,0x98,0x33,0xb3,0xb1,0xbc,0x76,0x5e,0x2b,0xd6,0x66,0xa5,0xef,0xc4,0xe6,0x2a,0x6,0xf4,0xb6,0xe8,0xbe,0xc1,0xd4,0x36,0x74,0xee,0x82,0x15,0xbc,0xef,0x21,0x63},
{0xfd,0xc1,0x4e,0xd,0xf4,0x53,0xc9,0x69,0xa7,0x7d,0x5a,0xc4,0x6,0x58,0x58,0x26,0x7e,0xc1,0x14,0x16,0x6,0xe0,0xfa,0x16,0x7e,0x90,0xaf,0x3d,0x28,0x63,0x9d,0x3f},
{0xd2,0xc9,0xf2,0xe3,0x0,0x9b,0xd2,0xc,0x5f,0xaa,0xce,0x30,0xb7,0xd4,0xc,0x30,0x74,0x2a,0x51,0x16,0xf2,0xe0,0x32,0x98,0xd,0xeb,0x30,0xd8,0xe3,0xce,0xf8,0x9a},
{0x4b,0xc5,0x9e,0x7b,0xb5,0xf1,0x79,0x92,0xff,0x51,0xe6,0x6e,0x4,0x86,0x68,0xd3,0x9b,0x23,0x4d,0x57,0xe6,0x96,0x67,0x31,0xcc,0xe6,0xa6,0xf3,0x17,0xa,0x75,0x5},
{0xb1,0x76,0x81,0xd9,0x13,0x32,0x6c,0xce,0x3c,0x17,0x52,0x84,0xf8,0x5,0xa2,0x62,0xf4,0x2b,0xcb,0xb3,0x78,0x47,0x15,0x47,0xff,0x46,0x54,0x82,0x23,0x93,0x6a,0x48},
{0x38,0xdf,0x58,0x7,0x4e,0x5e,0x65,0x65,0xf2,0xfc,0x7c,0x89,0xfc,0x86,0x50,0x8e,0x31,0x70,0x2e,0x44,0xd0,0xb,0xca,0x86,0xf0,0x40,0x9,0xa2,0x30,0x78,0x47,0x4e},
{0x65,0xa0,0xee,0x39,0xd1,0xf7,0x38,0x83,0xf7,0x5e,0xe9,0x37,0xe4,0x2c,0x3a,0xbd,0x21,0x97,0xb2,0x26,0x1,0x13,0xf8,0x6f,0xa3,0x44,0xed,0xd1,0xef,0x9f,0xde,0xe7},
{0x8b,0xa0,0xdf,0x15,0x76,0x25,0x92,0xd9,0x3c,0x85,0xf7,0xf6,0x12,0xdc,0x42,0xbe,0xd8,0xa7,0xec,0x7c,0xab,0x27,0xb0,0x7e,0x53,0x8d,0x7d,0xda,0xaa,0x3e,0xa8,0xde},
{0xaa,0x25,0xce,0x93,0xbd,0x2,0x69,0xd8,0x5a,0xf6,0x43,0xfd,0x1a,0x73,0x8,0xf9,0xc0,0x5f,0xef,0xda,0x17,0x4a,0x19,0xa5,0x97,0x4d,0x66,0x33,0x4c,0xfd,0x21,0x6a},
{0x35,0xb4,0x98,0x31,0xdb,0x41,0x15,0x70,0xea,0x1e,0xf,0xbb,0xed,0xcd,0x54,0x9b,0x9a,0xd0,0x63,0xa1,0x51,0x97,0x40,0x72,0xf6,0x75,0x9d,0xbf,0x91,0x47,0x6f,0xe2}};
static void E8(hashState *state); /*The bijective function E8, in bitslice form*/
static void F8(hashState *state); /*The compression function F8 */
/*The API functions*/
static HashReturn Init(hashState *state, int hashbitlen);
static HashReturn Update(hashState *state, const BitSequence *data, DataLength databitlen);
static HashReturn Final(hashState *state, BitSequence *hashval);
HashReturn jh_hash(int hashbitlen, const BitSequence *data,DataLength databitlen, BitSequence *hashval);
/*swapping bit 2i with bit 2i+1 of 64-bit x*/
#define SWAP1(x) (x) = ((((x) & 0x5555555555555555ULL) << 1) | (((x) & 0xaaaaaaaaaaaaaaaaULL) >> 1));
/*swapping bits 4i||4i+1 with bits 4i+2||4i+3 of 64-bit x*/
#define SWAP2(x) (x) = ((((x) & 0x3333333333333333ULL) << 2) | (((x) & 0xccccccccccccccccULL) >> 2));
/*swapping bits 8i||8i+1||8i+2||8i+3 with bits 8i+4||8i+5||8i+6||8i+7 of 64-bit x*/
#define SWAP4(x) (x) = ((((x) & 0x0f0f0f0f0f0f0f0fULL) << 4) | (((x) & 0xf0f0f0f0f0f0f0f0ULL) >> 4));
/*swapping bits 16i||16i+1||......||16i+7 with bits 16i+8||16i+9||......||16i+15 of 64-bit x*/
#define SWAP8(x) (x) = ((((x) & 0x00ff00ff00ff00ffULL) << 8) | (((x) & 0xff00ff00ff00ff00ULL) >> 8));
/*swapping bits 32i||32i+1||......||32i+15 with bits 32i+16||32i+17||......||32i+31 of 64-bit x*/
#define SWAP16(x) (x) = ((((x) & 0x0000ffff0000ffffULL) << 16) | (((x) & 0xffff0000ffff0000ULL) >> 16));
/*swapping bits 64i||64i+1||......||64i+31 with bits 64i+32||64i+33||......||64i+63 of 64-bit x*/
#define SWAP32(x) (x) = (((x) << 32) | ((x) >> 32));
/*The MDS transform*/
#define L(m0,m1,m2,m3,m4,m5,m6,m7) \
(m4) ^= (m1); \
(m5) ^= (m2); \
(m6) ^= (m0) ^ (m3); \
(m7) ^= (m0); \
(m0) ^= (m5); \
(m1) ^= (m6); \
(m2) ^= (m4) ^ (m7); \
(m3) ^= (m4);
/*Two Sboxes are computed in parallel, each Sbox implements S0 and S1, selected by a constant bit*/
/*The reason to compute two Sboxes in parallel is to try to fully utilize the parallel processing power*/
#define SS(m0,m1,m2,m3,m4,m5,m6,m7,cc0,cc1) \
m3 = ~(m3); \
m7 = ~(m7); \
m0 ^= ((~(m2)) & (cc0)); \
m4 ^= ((~(m6)) & (cc1)); \
temp0 = (cc0) ^ ((m0) & (m1));\
temp1 = (cc1) ^ ((m4) & (m5));\
m0 ^= ((m2) & (m3)); \
m4 ^= ((m6) & (m7)); \
m3 ^= ((~(m1)) & (m2)); \
m7 ^= ((~(m5)) & (m6)); \
m1 ^= ((m0) & (m2)); \
m5 ^= ((m4) & (m6)); \
m2 ^= ((m0) & (~(m3))); \
m6 ^= ((m4) & (~(m7))); \
m0 ^= ((m1) | (m3)); \
m4 ^= ((m5) | (m7)); \
m3 ^= ((m1) & (m2)); \
m7 ^= ((m5) & (m6)); \
m1 ^= (temp0 & (m0)); \
m5 ^= (temp1 & (m4)); \
m2 ^= temp0; \
m6 ^= temp1;
/*The bijective function E8, in bitslice form*/
static void E8(hashState *state)
{
uint64 i,roundnumber,temp0,temp1;
for (roundnumber = 0; roundnumber < 42; roundnumber = roundnumber+7) {
/*round 7*roundnumber+0: Sbox, MDS and Swapping layers*/
for (i = 0; i < 2; i++) {
SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i],((uint64*)E8_bitslice_roundconstant[roundnumber+0])[i],((uint64*)E8_bitslice_roundconstant[roundnumber+0])[i+2] );
L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]);
SWAP1(state->x[1][i]); SWAP1(state->x[3][i]); SWAP1(state->x[5][i]); SWAP1(state->x[7][i]);
}
/*round 7*roundnumber+1: Sbox, MDS and Swapping layers*/
for (i = 0; i < 2; i++) {
SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i],((uint64*)E8_bitslice_roundconstant[roundnumber+1])[i],((uint64*)E8_bitslice_roundconstant[roundnumber+1])[i+2] );
L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]);
SWAP2(state->x[1][i]); SWAP2(state->x[3][i]); SWAP2(state->x[5][i]); SWAP2(state->x[7][i]);
}
/*round 7*roundnumber+2: Sbox, MDS and Swapping layers*/
for (i = 0; i < 2; i++) {
SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i],((uint64*)E8_bitslice_roundconstant[roundnumber+2])[i],((uint64*)E8_bitslice_roundconstant[roundnumber+2])[i+2] );
L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]);
SWAP4(state->x[1][i]); SWAP4(state->x[3][i]); SWAP4(state->x[5][i]); SWAP4(state->x[7][i]);
}
/*round 7*roundnumber+3: Sbox, MDS and Swapping layers*/
for (i = 0; i < 2; i++) {
SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i],((uint64*)E8_bitslice_roundconstant[roundnumber+3])[i],((uint64*)E8_bitslice_roundconstant[roundnumber+3])[i+2] );
L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]);
SWAP8(state->x[1][i]); SWAP8(state->x[3][i]); SWAP8(state->x[5][i]); SWAP8(state->x[7][i]);
}
/*round 7*roundnumber+4: Sbox, MDS and Swapping layers*/
for (i = 0; i < 2; i++) {
SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i],((uint64*)E8_bitslice_roundconstant[roundnumber+4])[i],((uint64*)E8_bitslice_roundconstant[roundnumber+4])[i+2] );
L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]);
SWAP16(state->x[1][i]); SWAP16(state->x[3][i]); SWAP16(state->x[5][i]); SWAP16(state->x[7][i]);
}
/*round 7*roundnumber+5: Sbox, MDS and Swapping layers*/
for (i = 0; i < 2; i++) {
SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i],((uint64*)E8_bitslice_roundconstant[roundnumber+5])[i],((uint64*)E8_bitslice_roundconstant[roundnumber+5])[i+2] );
L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]);
SWAP32(state->x[1][i]); SWAP32(state->x[3][i]); SWAP32(state->x[5][i]); SWAP32(state->x[7][i]);
}
/*round 7*roundnumber+6: Sbox and MDS layers*/
for (i = 0; i < 2; i++) {
SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i],((uint64*)E8_bitslice_roundconstant[roundnumber+6])[i],((uint64*)E8_bitslice_roundconstant[roundnumber+6])[i+2] );
L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]);
}
/*round 7*roundnumber+6: swapping layer*/
for (i = 1; i < 8; i = i+2) {
temp0 = state->x[i][0]; state->x[i][0] = state->x[i][1]; state->x[i][1] = temp0;
}
}
}
/*The compression function F8 */
static void F8(hashState *state)
{
uint64 i;
/*xor the 512-bit message with the fist half of the 1024-bit hash state*/
for (i = 0; i < 8; i++) state->x[i >> 1][i & 1] ^= ((uint64*)state->buffer)[i];
/*the bijective function E8 */
E8(state);
/*xor the 512-bit message with the second half of the 1024-bit hash state*/
for (i = 0; i < 8; i++) state->x[(8+i) >> 1][(8+i) & 1] ^= ((uint64*)state->buffer)[i];
}
/*before hashing a message, initialize the hash state as H0 */
static HashReturn Init(hashState *state, int hashbitlen)
{
state->databitlen = 0;
state->datasize_in_buffer = 0;
/*initialize the initial hash value of JH*/
state->hashbitlen = hashbitlen;
/*load the intital hash value into state*/
switch (hashbitlen)
{
case 224: memcpy(state->x,JH224_H0,128); break;
case 256: memcpy(state->x,JH256_H0,128); break;
case 384: memcpy(state->x,JH384_H0,128); break;
case 512: memcpy(state->x,JH512_H0,128); break;
}
return(SUCCESS);
}
/*hash each 512-bit message block, except the last partial block*/
static HashReturn Update(hashState *state, const BitSequence *data, DataLength databitlen)
{
DataLength index; /*the starting address of the data to be compressed*/
state->databitlen += databitlen;
index = 0;
/*if there is remaining data in the buffer, fill it to a full message block first*/
/*we assume that the size of the data in the buffer is the multiple of 8 bits if it is not at the end of a message*/
/*There is data in the buffer, but the incoming data is insufficient for a full block*/
if ( (state->datasize_in_buffer > 0 ) && (( state->datasize_in_buffer + databitlen) < 512) ) {
if ( (databitlen & 7) == 0 )
memcpy(state->buffer + (state->datasize_in_buffer >> 3), data, (size_t) (64-(state->datasize_in_buffer >> 3)) );
else memcpy(state->buffer + (state->datasize_in_buffer >> 3), data, (size_t) (64 - (state->datasize_in_buffer >> 3) + 1) );
state->datasize_in_buffer += databitlen;
databitlen = 0;
}
/*There is data in the buffer, and the incoming data is sufficient for a full block*/
if ( (state->datasize_in_buffer > 0 ) && (( state->datasize_in_buffer + databitlen) >= 512) ) {
memcpy(state->buffer + (state->datasize_in_buffer >> 3), data, (size_t) (64-(state->datasize_in_buffer >> 3)) );
index = 64-(state->datasize_in_buffer >> 3);
databitlen = databitlen - (512 - state->datasize_in_buffer);
F8(state);
state->datasize_in_buffer = 0;
}
/*hash the remaining full message blocks*/
for ( ; databitlen >= 512; index = index+64, databitlen = databitlen - 512) {
memcpy(state->buffer, data+index, 64);
F8(state);
}
/*store the partial block into buffer, assume that -- if part of the last byte is not part of the message, then that part consists of 0 bits*/
if ( databitlen > 0) {
if ((databitlen & 7) == 0)
memcpy(state->buffer, data+index, (databitlen & 0x1ff) >> 3);
else
memcpy(state->buffer, data+index, ((databitlen & 0x1ff) >> 3)+1);
state->datasize_in_buffer = databitlen;
}
return(SUCCESS);
}
/*pad the message, process the padded block(s), truncate the hash value H to obtain the message digest*/
static HashReturn Final(hashState *state, BitSequence *hashval)
{
unsigned int i;
if ( (state->databitlen & 0x1ff) == 0 ) {
/*pad the message when databitlen is multiple of 512 bits, then process the padded block*/
memset(state->buffer, 0, 64);
state->buffer[0] = 0x80;
state->buffer[63] = state->databitlen & 0xff;
state->buffer[62] = (state->databitlen >> 8) & 0xff;
state->buffer[61] = (state->databitlen >> 16) & 0xff;
state->buffer[60] = (state->databitlen >> 24) & 0xff;
state->buffer[59] = (state->databitlen >> 32) & 0xff;
state->buffer[58] = (state->databitlen >> 40) & 0xff;
state->buffer[57] = (state->databitlen >> 48) & 0xff;
state->buffer[56] = (state->databitlen >> 56) & 0xff;
F8(state);
}
else {
/*set the rest of the bytes in the buffer to 0*/
if ( (state->datasize_in_buffer & 7) == 0)
for (i = (state->databitlen & 0x1ff) >> 3; i < 64; i++) state->buffer[i] = 0;
else
for (i = ((state->databitlen & 0x1ff) >> 3)+1; i < 64; i++) state->buffer[i] = 0;
/*pad and process the partial block when databitlen is not multiple of 512 bits, then hash the padded blocks*/
state->buffer[((state->databitlen & 0x1ff) >> 3)] |= 1 << (7- (state->databitlen & 7));
F8(state);
memset(state->buffer, 0, 64);
state->buffer[63] = state->databitlen & 0xff;
state->buffer[62] = (state->databitlen >> 8) & 0xff;
state->buffer[61] = (state->databitlen >> 16) & 0xff;
state->buffer[60] = (state->databitlen >> 24) & 0xff;
state->buffer[59] = (state->databitlen >> 32) & 0xff;
state->buffer[58] = (state->databitlen >> 40) & 0xff;
state->buffer[57] = (state->databitlen >> 48) & 0xff;
state->buffer[56] = (state->databitlen >> 56) & 0xff;
F8(state);
}
/*truncating the final hash value to generate the message digest*/
switch(state->hashbitlen) {
case 224: memcpy(hashval,(unsigned char*)state->x+64+36,28); break;
case 256: memcpy(hashval,(unsigned char*)state->x+64+32,32); break;
case 384: memcpy(hashval,(unsigned char*)state->x+64+16,48); break;
case 512: memcpy(hashval,(unsigned char*)state->x+64,64); break;
}
return(SUCCESS);
}
/* hash a message,
three inputs: message digest size in bits (hashbitlen); message (data); message length in bits (databitlen)
one output: message digest (hashval)
*/
HashReturn jh_hash(int hashbitlen, const BitSequence *data,DataLength databitlen, BitSequence *hashval)
{
hashState state;
if ( hashbitlen == 224 || hashbitlen == 256 || hashbitlen == 384 || hashbitlen == 512 ) {
Init(&state, hashbitlen);
Update(&state, data, databitlen);
Final(&state, hashval);
return SUCCESS;
}
else
return(BAD_HASHLEN);
}

Some files were not shown because too many files have changed in this diff Show More