Compare commits

...

4 Commits

Author SHA1 Message Date
Jay D Dee
72330eb5a7 v3.9.9 2019-10-10 19:58:34 -04:00
Jay D Dee
789c8b70bc v3.9.8.1 2019-10-01 14:17:36 -04:00
Jay D Dee
01550d94a2 v3.9.8 2019-09-26 22:37:26 -04:00
Jay D Dee
a042fb7612 v3.9.7 2019-08-03 10:39:54 -04:00
65 changed files with 5947 additions and 2507 deletions

View File

@@ -18,7 +18,6 @@ dist_man_MANS = cpuminer.1
cpuminer_SOURCES = \
cpu-miner.c \
util.c \
uint256.cpp \
api.c \
sysinfos.c \
algo-gate-api.c\
@@ -263,6 +262,8 @@ cpuminer_SOURCES = \
algo/x16/x16r-gate.c \
algo/x16/x16r.c \
algo/x16/x16r-4way.c \
algo/x16/x16rv2.c \
algo/x16/x16rv2-4way.c \
algo/x16/x16rt.c \
algo/x16/x16rt-4way.c \
algo/x16/hex.c \
@@ -281,7 +282,9 @@ cpuminer_SOURCES = \
algo/yescrypt/yescrypt.c \
algo/yescrypt/sha256_Y.c \
algo/yescrypt/yescrypt-best.c \
algo/yespower/yespower.c \
algo/yespower/yespower-gate.c \
algo/yespower/yespower-blake2b.c \
algo/yespower/crypto/blake2b-yp.c \
algo/yespower/sha256_p.c \
algo/yespower/yespower-opt.c

View File

@@ -24,7 +24,7 @@ Requirements
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI
optimizations a CPU with AES_NI is required. This includes Intel Westbridge
optimizations a CPU with AES_NI is required. This includes Intel Westmere
and newer and AMD equivalents. Further optimizations are available on some
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
@@ -87,10 +87,12 @@ Supported Algorithms
neoscrypt NeoScrypt(128, 2, 1)
nist5 Nist5
pentablake Pentablake
phi1612 phi, LUX coin (original algo)
phi2 LUX coin (new algo)
phi1612 phi
phi2 Luxcoin (LUX)
phi2-lux identical to phi2
pluck Pluck:128 (Supcoin)
polytimos Ninja
power2b MicroBitcoin (MBC)
quark Quark
qubit Qubit
scrypt scrypt(1024, 1, 1) (default)
@@ -120,7 +122,8 @@ Supported Algorithms
x13sm3 hsr (Hshare)
x14 X14
x15 X15
x16r Ravencoin (RVN)
x16r Ravencoin (RVN) (original algo)
x16rv2 Ravencoin (RVN) (new algo)
x16rt Gincoin (GIN)
x16rt_veil Veil (VEIL)
x16s Pigeoncoin (PGN)
@@ -133,6 +136,7 @@ Supported Algorithms
yescryptr32 WAVI
yespower Cryply
yespowerr16 Yenten (YTN)
yespoer-b2b generic yespower + blake2b
zr5 Ziftr
Errata
@@ -155,14 +159,15 @@ Benchmark testing does not work for x11evo.
Bugs
----
Users are encouraged to post their bug reports on the Bitcoin Talk
forum at:
Users are encouraged to post their bug reports using git issues or on the
Bitcoin Talk forum at:
https://bitcointalk.org/index.php?topic=1326803.0
All problem reports must be accompanied by a proper definition.
All problem reports must be accompanied by a proper problem definition.
This should include how the problem occurred, the command line and
output from the miner showing the startup and any errors.
output from the miner showing the startup messages and any errors.
A history is also useful, ie did it work before.
Donations
---------

View File

@@ -6,9 +6,6 @@ This feature requires recent SW including GCC version 5 or higher and
openssl version 1.1 or higher. It may also require using "-march=znver1"
compile flag.
cpuminer-opt is a console program, if you're using a mouse you're doing it
wrong.
Security warning
----------------
@@ -34,10 +31,64 @@ Intel Core2 or newer, or AMD Steamroller or newer CPU. ARM CPUs are not
supported.
64 bit Linux or Windows operating system. Apple and Android are not supported.
FreeBSD YMMV.
Change Log
----------
v3.9.9
Added power2b algo for MicroBitcoin.
Added generic yespower-b2b (yespower + blake2b) algo to be used with
the parameters introduced in v3.9.7 for yespower & yescrypt.
Display additional info when a share is rejected.
Some low level enhancements and minor tweaking of log output.
RELEASE_NOTES (this file) and README.md added to Windows release package.
v3.9.8.1
Summary log report will be generated on stratum diff change or after 5 minutes,
whichever comes first, to prevent incorrect data in the report.
Removed phi2-lux alias (introduced in v3.9.8) due to Luxcoin's planned fork
to a new algo. The new Luxcoin algo is not supported by cpuminer-opt.
Until the fork Luxcoin can be mined using phi2 algo.
--hide-diff option is deprecated and has no effect. It will be removed in a
future release.
v3.9.8
Changes to log output to provide data more relevant to actual mining
performance.
phi2 can now handle pools with a mix of coins that use and don't use roots.
phi2-lux added as an alias for phi2 as they are identical except for roots.
Add x16rv2 algo for Ravencoin fork.
v3.9.7
Command line option changes:
"-R" is no longer used as a shortcut for "--retry-pause", users must
use the long option.
New options:
-N, --param-n: set the N parameter for yescrypt, yespower or scrypt algos
-R, --param-r: set the R parameter for yescrypt or yespower algos, scrypt is
hardcoded with R=1
-K, --param-key: set the client key/pers parameter for yescrypt/yespower algos.
These options can be used to mine yescrypt or yespower variations using
the generic yescrypt or yespower algo name and specifying the parameters
manually. They can even be used to mine variations that aren't formally
supported by a unique algo name. Existing algos can continue to to be mined
using their original name without parameters.
v3.9.6.2
New algo blake2b.

View File

@@ -122,7 +122,6 @@ void init_algo_gate( algo_gate_t* gate )
gate->stratum_gen_work = (void*)&std_stratum_gen_work;
gate->build_stratum_request = (void*)&std_le_build_stratum_request;
gate->malloc_txs_request = (void*)&std_malloc_txs_request;
gate->set_target = (void*)&std_set_target;
gate->submit_getwork_result = (void*)&std_le_submit_getwork_result;
gate->build_block_header = (void*)&std_build_block_header;
gate->build_extraheader = (void*)&std_build_extraheader;
@@ -205,6 +204,7 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_PHI2: register_phi2_algo ( gate ); break;
case ALGO_PLUCK: register_pluck_algo ( gate ); break;
case ALGO_POLYTIMOS: register_polytimos_algo ( gate ); break;
case ALGO_POWER2B: register_power2b_algo ( gate ); break;
case ALGO_QUARK: register_quark_algo ( gate ); break;
case ALGO_QUBIT: register_qubit_algo ( gate ); break;
case ALGO_SCRYPT: register_scrypt_algo ( gate ); break;
@@ -234,6 +234,7 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_X14: register_x14_algo ( gate ); break;
case ALGO_X15: register_x15_algo ( gate ); break;
case ALGO_X16R: register_x16r_algo ( gate ); break;
case ALGO_X16RV2: register_x16rv2_algo ( gate ); break;
case ALGO_X16RT: register_x16rt_algo ( gate ); break;
case ALGO_X16RT_VEIL: register_x16rt_veil_algo ( gate ); break;
case ALGO_X16S: register_x16s_algo ( gate ); break;
@@ -251,6 +252,7 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_YESCRYPTR32: register_yescryptr32_algo ( gate ); break;
case ALGO_YESPOWER: register_yespower_algo ( gate ); break;
case ALGO_YESPOWERR16: register_yespowerr16_algo ( gate ); break;
case ALGO_YESPOWER_B2B: register_yespower_b2b_algo ( gate ); break;
case ALGO_ZR5: register_zr5_algo ( gate ); break;
default:
applog(LOG_ERR,"FAIL: algo_gate registration failed, unknown algo %s.\n", algo_names[opt_algo] );
@@ -337,7 +339,6 @@ const char* const algo_alias_map[][2] =
{ "myriad", "myr-gr" },
{ "neo", "neoscrypt" },
{ "phi", "phi1612" },
// { "sia", "blake2b" },
{ "sib", "x11gost" },
{ "timetravel8", "timetravel" },
{ "veil", "x16rt-veil" },
@@ -365,40 +366,3 @@ void get_algo_alias( char** algo_or_alias )
#undef ALIAS
#undef PROPER
bool submit_solution( struct work *work, void *hash,
struct thr_info *thr )
{
work_set_target_ratio( work, hash );
if ( submit_work( thr, work ) )
{
if ( !opt_quiet )
applog( LOG_BLUE, "Share %d submitted by thread %d, job %s.",
accepted_share_count + rejected_share_count + 1,
thr->id, work->job_id );
return true;
}
else
applog( LOG_WARNING, "Failed to submit share." );
return false;
}
bool submit_lane_solution( struct work *work, void *hash,
struct thr_info *thr, int lane )
{
work_set_target_ratio( work, hash );
if ( submit_work( thr, work ) )
{
if ( !opt_quiet )
// applog( LOG_BLUE, "Share %d submitted by thread %d, lane %d.",
// accepted_share_count + rejected_share_count + 1,
// thr->id, lane );
applog( LOG_BLUE, "Share %d submitted by thread %d, lane %d, job %s.",
accepted_share_count + rejected_share_count + 1, thr->id,
lane, work->job_id );
return true;
}
else
applog( LOG_WARNING, "Failed to submit share." );
return false;
}

View File

@@ -85,14 +85,16 @@
typedef uint32_t set_t;
#define EMPTY_SET 0
#define SSE2_OPT 1
#define AES_OPT 2
#define SSE42_OPT 4
#define AVX_OPT 8
#define AVX2_OPT 0x10
#define SHA_OPT 0x20
#define AVX512_OPT 0x40
#define EMPTY_SET 0
#define SSE2_OPT 1
#define AES_OPT 2
#define SSE42_OPT 4
#define AVX_OPT 8 // Sandybridge
#define AVX2_OPT 0x10 // Haswell
#define SHA_OPT 0x20 // sha256 (Ryzen, Ice Lake)
#define AVX512_OPT 0x40 // AVX512- F, VL, DQ, BW (Skylake-X)
#define VAES_OPT 0x80 // VAES (Ice Lake)
// return set containing all elements from sets a & b
inline set_t set_union ( set_t a, set_t b ) { return a | b; }
@@ -132,7 +134,6 @@ void ( *decode_extra_data ) ( struct work*, uint64_t* );
void ( *wait_for_diff ) ( struct stratum_ctx* );
int64_t ( *get_max64 ) ();
bool ( *work_decode ) ( const json_t*, struct work* );
void ( *set_target) ( struct work*, double );
bool ( *submit_getwork_result ) ( CURL*, struct work* );
void ( *gen_merkle_root ) ( char*, struct stratum_ctx* );
void ( *build_extraheader ) ( struct work*, struct stratum_ctx* );
@@ -193,15 +194,6 @@ void four_way_not_tested();
// allways returns failure
int null_scanhash();
// Allow algos to submit from scanhash loop.
bool submit_solution( struct work *work, void *hash,
struct thr_info *thr );
bool submit_lane_solution( struct work *work, void *hash,
struct thr_info *thr, int lane );
bool submit_work( struct thr_info *thr, const struct work *work_in );
// displays warning
void null_hash ();
void null_hash_suw();
@@ -232,10 +224,6 @@ int64_t get_max64_0x3fffffLL();
int64_t get_max64_0x1ffff();
int64_t get_max64_0xffffLL();
void std_set_target( struct work *work, double job_diff );
void alt_set_target( struct work* work, double job_diff );
void scrypt_set_target( struct work *work, double job_diff );
bool std_le_work_decode( const json_t *val, struct work *work );
bool std_be_work_decode( const json_t *val, struct work *work );
bool jr2_work_decode( const json_t *val, struct work *work );

View File

@@ -85,8 +85,9 @@ bool register_argon2_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_argon2;
gate->hash = (void*)&argon2hash;
gate->gen_merkle_root = (void*)&SHA256_gen_merkle_root;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&argon2_get_max64;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -67,8 +67,8 @@ bool register_argon2d_crds_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_argon2d_crds;
gate->hash = (void*)&argon2d_crds_hash;
gate->set_target = (void*)&scrypt_set_target;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
opt_target_factor = 65536.0;
return true;
}
@@ -135,8 +135,8 @@ bool register_argon2d_dyn_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_argon2d_dyn;
gate->hash = (void*)&argon2d_dyn_hash;
gate->set_target = (void*)&scrypt_set_target;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
opt_target_factor = 65536.0;
return true;
}
@@ -184,9 +184,9 @@ int64_t get_max64_0x1ff() { return 0x1ff; }
bool register_argon2d4096_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_argon2d4096;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&get_max64_0x1ff;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
opt_target_factor = 65536.0;
return true;
}

View File

@@ -116,7 +116,7 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
// block header suffix from coinb2 (stake version)
memcpy( &g_work->data[44],
&sctx->job.coinbase[ sctx->job.coinbase_size-4 ], 4 );
sctx->bloc_height = g_work->data[32];
sctx->block_height = g_work->data[32];
//applog_hex(work->data, 180);
//applog_hex(&work->data[36], 36);
}

View File

@@ -5,8 +5,8 @@ int64_t bmw512_get_max64() { return 0x7ffffLL; }
bool register_bmw512_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
gate->get_max64 = (void*)&bmw512_get_max64;
opt_target_factor = 256.0;
#if defined (BMW512_4WAY)
gate->scanhash = (void*)&scanhash_bmw512_4way;
gate->hash = (void*)&bmw512hash_4way;

View File

@@ -94,19 +94,14 @@ int scanhash_groestl( struct work *work, uint32_t max_nonce,
return 0;
}
void groestl_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
bool register_dmd_gr_algo( algo_gate_t* gate )
{
init_groestl_ctx();
gate->optimizations = SSE2_OPT | AES_OPT;
gate->scanhash = (void*)&scanhash_groestl;
gate->hash = (void*)&groestlhash;
gate->set_target = (void*)&groestl_set_target;
gate->get_max64 = (void*)&get_max64_0x3ffff;
opt_target_factor = 256.0;
return true;
};

View File

@@ -15,11 +15,6 @@ pthread_barrier_t hodl_barrier;
// need to be passed.
unsigned char *hodl_scratchbuf = NULL;
void hodl_set_target( struct work* work, double diff )
{
diff_to_target(work->target, diff / 8388608.0 );
}
void hodl_le_build_stratum_request( char* req, struct work* work,
struct stratum_ctx *sctx )
{
@@ -170,7 +165,6 @@ bool register_hodl_algo( algo_gate_t* gate )
gate->scanhash = (void*)&hodl_scanhash;
gate->get_new_work = (void*)&hodl_get_new_work;
gate->longpoll_rpc_call = (void*)&hodl_longpoll_rpc_call;
gate->set_target = (void*)&hodl_set_target;
gate->build_stratum_request = (void*)&hodl_le_build_stratum_request;
gate->malloc_txs_request = (void*)&hodl_malloc_txs_request;
gate->build_block_header = (void*)&hodl_build_block_header;
@@ -179,6 +173,7 @@ bool register_hodl_algo( algo_gate_t* gate )
gate->work_cmp_size = 76;
hodl_scratchbuf = (unsigned char*)malloc( 1 << 30 );
allow_getwork = false;
opt_target_factor = 8388608.0;
return ( hodl_scratchbuf != NULL );
}

View File

@@ -12,7 +12,7 @@ bool register_jha_algo( algo_gate_t* gate )
gate->hash = (void*)&jha_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&scrypt_set_target;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -1,18 +1,13 @@
#include "keccak-gate.h"
void keccak_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (128.0 * opt_diff_factor) );
}
int64_t keccak_get_max64() { return 0x7ffffLL; }
bool register_keccak_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->gen_merkle_root = (void*)&SHA256_gen_merkle_root;
gate->set_target = (void*)&keccak_set_target;
gate->get_max64 = (void*)&keccak_get_max64;
opt_target_factor = 128.0;
#if defined (KECCAK_4WAY)
gate->scanhash = (void*)&scanhash_keccak_4way;
gate->hash = (void*)&keccakhash_4way;
@@ -23,17 +18,12 @@ bool register_keccak_algo( algo_gate_t* gate )
return true;
};
void keccakc_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
bool register_keccakc_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->gen_merkle_root = (void*)&sha256d_gen_merkle_root;
gate->set_target = (void*)&keccakc_set_target;
gate->get_max64 = (void*)&keccak_get_max64;
opt_target_factor = 256.0;
#if defined (KECCAK_4WAY)
gate->scanhash = (void*)&scanhash_keccak_4way;
gate->hash = (void*)&keccakhash_4way;

View File

@@ -71,7 +71,7 @@ bool register_lyra2rev3_algo( algo_gate_t* gate )
#endif
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX2_OPT;
gate->miner_thread_init = (void*)&lyra2rev3_thread_init;
gate->set_target = (void*)&alt_set_target;
opt_target_factor = 256.0;
return true;
};
@@ -105,7 +105,7 @@ bool register_lyra2rev2_algo( algo_gate_t* gate )
#endif
gate->optimizations = SSE2_OPT | AES_OPT | SSE42_OPT | AVX2_OPT;
gate->miner_thread_init = (void*)&lyra2rev2_thread_init;
gate->set_target = (void*)&alt_set_target;
opt_target_factor = 256.0;
return true;
};
@@ -128,7 +128,7 @@ bool register_lyra2z_algo( algo_gate_t* gate )
#endif
gate->optimizations = SSE42_OPT | AVX2_OPT;
gate->get_max64 = (void*)&get_max64_0xffffLL;
gate->set_target = (void*)&alt_set_target;
opt_target_factor = 256.0;
return true;
};
@@ -148,7 +148,7 @@ bool register_lyra2h_algo( algo_gate_t* gate )
#endif
gate->optimizations = SSE42_OPT | AVX2_OPT;
gate->get_max64 = (void*)&get_max64_0xffffLL;
gate->set_target = (void*)&alt_set_target;
opt_target_factor = 256.0;
return true;
};
@@ -168,8 +168,8 @@ bool register_allium_algo( algo_gate_t* gate )
gate->hash = (void*)&allium_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | SSE42_OPT | AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
gate->get_max64 = (void*)&allium_get_max64_0xFFFFLL;
opt_target_factor = 256.0;
return true;
};
@@ -182,6 +182,7 @@ int phi2_get_work_data_size() { return phi2_use_roots ? 144 : 128; }
void phi2_decode_extra_data( struct work *work )
{
phi2_use_roots = false;
if ( work->data[0] & ( 1<<30 ) ) phi2_use_roots = true;
else for ( int i = 20; i < 36; i++ )
{
@@ -213,8 +214,8 @@ bool register_phi2_algo( algo_gate_t* gate )
gate->get_work_data_size = (void*)&phi2_get_work_data_size;
gate->decode_extra_data = (void*)&phi2_decode_extra_data;
gate->build_extraheader = (void*)&phi2_build_extraheader;
gate->set_target = (void*)&alt_set_target;
gate->get_max64 = (void*)&get_max64_0xffffLL;
opt_target_factor = 256.0;
#if defined(PHI2_4WAY)
gate->scanhash = (void*)&scanhash_phi2_4way;
#else

View File

@@ -118,11 +118,6 @@ int64_t lyra2re_get_max64 ()
return 0xffffLL;
}
void lyra2re_set_target ( struct work* work, double job_diff )
{
work_set_target(work, job_diff / (128.0 * opt_diff_factor) );
}
bool register_lyra2re_algo( algo_gate_t* gate )
{
init_lyra2re_ctx();
@@ -130,7 +125,7 @@ bool register_lyra2re_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_lyra2re;
gate->hash = (void*)&lyra2re_hash;
gate->get_max64 = (void*)&lyra2re_get_max64;
gate->set_target = (void*)&lyra2re_set_target;
opt_target_factor = 128.0;
return true;
};

View File

@@ -53,11 +53,6 @@ int scanhash_lyra2z330( struct work *work, uint32_t max_nonce,
return 0;
}
void lyra2z330_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
bool lyra2z330_thread_init()
{
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * 256; // nCols
@@ -76,7 +71,7 @@ bool register_lyra2z330_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_lyra2z330;
gate->hash = (void*)&lyra2z330_hash;
gate->get_max64 = (void*)&get_max64_0xffffLL;
gate->set_target = (void*)&lyra2z330_set_target;
opt_target_factor = 256.0;
return true;
};

View File

@@ -263,10 +263,9 @@ inline void absorbBlockBlake2Safe( uint64_t *State, const uint64_t *In,
#if defined (__AVX2__)
register __m256i state0, state1, state2, state3;
const __m256i zero = m256_zero;
state0 = zero;
state1 = zero;
state0 =
state1 = m256_zero;
state2 = m256_const_64( 0xa54ff53a5f1d36f1ULL, 0x3c6ef372fe94f82bULL,
0xbb67ae8584caa73bULL, 0x6a09e667f3bcc908ULL );
state3 = m256_const_64( 0x5be0cd19137e2179ULL, 0x1f83d9abfb41bd6bULL,
@@ -290,12 +289,11 @@ inline void absorbBlockBlake2Safe( uint64_t *State, const uint64_t *In,
#elif defined (__SSE2__)
__m128i state0, state1, state2, state3, state4, state5, state6, state7;
const __m128i zero = m128_zero;
state0 = zero;
state1 = zero;
state2 = zero;
state3 = zero;
state0 =
state1 =
state2 =
state3 = m128_zero;
state4 = m128_const_64( 0xbb67ae8584caa73bULL, 0x6a09e667f3bcc908ULL );
state5 = m128_const_64( 0xa54ff53a5f1d36f1ULL, 0x3c6ef372fe94f82bULL );
state6 = m128_const_64( 0x9b05688c2b3e6c1fULL, 0x510e527fade682d1ULL );

View File

@@ -323,9 +323,9 @@ bool register_m7m_algo( algo_gate_t *gate )
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&get_max64_0x1ffff;
gate->set_work_data_endian = (void*)&set_work_data_big_endian;
opt_target_factor = 65536.0;
return true;
}

View File

@@ -10,8 +10,8 @@ bool register_hmq1725_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_hmq1725;
gate->hash = (void*)&hmq1725hash;
#endif
gate->set_target = (void*)&scrypt_set_target;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -409,14 +409,3 @@ int scanhash_hmq1725( struct work *work, uint32_t max_nonce,
pdata[19] = n;
return 0;
}
/*
bool register_hmq1725_algo( algo_gate_t* gate )
{
init_hmq1725_ctx();
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&scrypt_set_target;
gate->scanhash = (void*)&scanhash_hmq1725;
gate->hash = (void*)&hmq1725hash;
return true;
};
*/

View File

@@ -41,6 +41,7 @@ void lbry_le_build_stratum_request( char *req, struct work *work,
free(xnonce2str);
}
/*
void lbry_build_block_header( struct work* g_work, uint32_t version,
uint32_t *prevhash, uint32_t *merkle_root,
uint32_t ntime, uint32_t nbits )
@@ -63,6 +64,7 @@ void lbry_build_block_header( struct work* g_work, uint32_t version,
g_work->data[ LBRY_NBITS_INDEX ] = nbits;
g_work->data[28] = 0x80000000;
}
*/
void lbry_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
@@ -92,11 +94,6 @@ void lbry_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
g_work->data[28] = 0x80000000;
}
void lbry_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
int64_t lbry_get_max64() { return 0x1ffffLL; }
int lbry_get_work_data_size() { return LBRY_WORK_DATA_SIZE; }
@@ -119,11 +116,11 @@ bool register_lbry_algo( algo_gate_t* gate )
gate->build_stratum_request = (void*)&lbry_le_build_stratum_request;
// gate->build_block_header = (void*)&build_block_header;
gate->build_extraheader = (void*)&lbry_build_extraheader;
gate->set_target = (void*)&lbry_set_target;
gate->ntime_index = LBRY_NTIME_INDEX;
gate->nbits_index = LBRY_NBITS_INDEX;
gate->nonce_index = LBRY_NONCE_INDEX;
gate->get_work_data_size = (void*)&lbry_get_work_data_size;
opt_target_factor = 256.0;
return true;
}

View File

@@ -1089,13 +1089,13 @@ bool register_neoscrypt_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_neoscrypt;
gate->hash = (void*)&neoscrypt;
gate->get_max64 = (void*)&get_neoscrypt_max64;
gate->set_target = (void*)&scrypt_set_target;
gate->wait_for_diff = (void*)&neoscrypt_wait_for_diff;
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
gate->set_work_data_endian = (void*)&set_work_data_big_endian;
gate->get_work_data_size = (void*)&neoscrypt_get_work_data_size;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -503,8 +503,8 @@ bool register_pluck_algo( algo_gate_t* gate )
gate->miner_thread_init = (void*)&pluck_miner_thread_init;
gate->scanhash = (void*)&scanhash_pluck;
gate->hash = (void*)&pluck_hash;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&pluck_get_max64;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -698,8 +698,8 @@ static void scrypt_1024_1_1_256_24way(const uint32_t *input,
extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t data[SCRYPT_MAX_WAYS * 20], hash[SCRYPT_MAX_WAYS * 8];
uint32_t midstate[8];
uint32_t n = pdata[19] - 1;
@@ -783,13 +783,18 @@ bool register_scrypt_algo( algo_gate_t* gate )
gate->miner_thread_init =(void*)&scrypt_miner_thread_init;
gate->scanhash = (void*)&scanhash_scrypt;
// gate->hash = (void*)&scrypt_1024_1_1_256_24way;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&scrypt_get_max64;
opt_target_factor = 65536.0;
if ( !opt_scrypt_n )
if ( !opt_param_n )
{
opt_param_n = 1024;
scratchbuf_size = 1024;
}
else
scratchbuf_size = opt_scrypt_n;
scratchbuf_size = opt_param_n;
applog(LOG_INFO,"Scrypt paramaters: N= %d, R= 1.", opt_param_n );
return true;
};

View File

@@ -55,6 +55,7 @@ typedef uint32_t scrypt_mix_word_t;
#include "scrypt-jane-romix-template.h"
#endif
/* cpu agnostic */
#define SCRYPT_ROMIX_FN scrypt_ROMix_basic
#define SCRYPT_MIX_FN chacha_core_basic

View File

@@ -1,9 +1,11 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_HAVE_ROMIX)
/*
#if defined(SCRYPT_CHOOSE_COMPILETIME)
#undef SCRYPT_ROMIX_FN
#define SCRYPT_ROMIX_FN scrypt_ROMix
#endif
*/
#undef SCRYPT_HAVE_ROMIX
#define SCRYPT_HAVE_ROMIX

View File

@@ -240,24 +240,24 @@ bool register_scryptjane_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_scryptjane;
gate->hash = (void*)&scryptjanehash;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&get_max64_0x40LL;
opt_target_factor = 65536.0;
// figure out if arg in N or Nfactor
if ( !opt_scrypt_n )
if ( !opt_param_n )
{
applog( LOG_ERR, "The N factor must be specified in the form algo:nf");
return false;
}
else if ( opt_scrypt_n < 32 )
else if ( opt_param_n < 32 )
{
// arg is Nfactor, calculate N
sj_N = 1 << ( opt_scrypt_n + 1 );
sj_N = 1 << ( opt_param_n + 1 );
}
else
{
// arg is N
sj_N = opt_scrypt_n;
sj_N = opt_param_n;
}
return true;
}

View File

@@ -200,6 +200,7 @@ void sm3_4way_compress( __m128i *digest, __m128i *block )
T = _mm_set1_epi32( 0x7A879D8AUL );
for( j =16; j < 64; j++ )
{
// AVX512 _mm_rol_epi32 doesn't like using a variable for the second arg.
SS1 = mm128_rol_32( _mm_add_epi32( _mm_add_epi32( mm128_rol_32(A,12), E ),
mm128_rol_32( T, j&31 ) ), 7 );
SS2 = _mm_xor_si128( SS1, mm128_rol_32( A, 12 ) );

View File

@@ -120,19 +120,13 @@ int scanhash_fresh( struct work *work,
return 0;
}
void fresh_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
bool register_fresh_algo( algo_gate_t* gate )
{
algo_not_tested();
gate->scanhash = (void*)&scanhash_fresh;
gate->hash = (void*)&freshhash;
gate->set_target = (void*)&fresh_set_target;
gate->get_max64 = (void*)&get_max64_0x3ffff;
opt_target_factor = 256.0;
return true;
};

View File

@@ -1,10 +1,5 @@
#include "timetravel-gate.h"
void tt8_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
bool register_timetravel_algo( algo_gate_t* gate )
{
#ifdef TIMETRAVEL_4WAY
@@ -16,9 +11,9 @@ bool register_timetravel_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_timetravel;
gate->hash = (void*)&timetravel_hash;
#endif
gate->set_target = (void*)&tt8_set_target;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->get_max64 = (void*)&get_max64_0xffffLL;
opt_target_factor = 256.0;
return true;
};

View File

@@ -1,10 +1,5 @@
#include "timetravel10-gate.h"
void tt10_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
bool register_timetravel10_algo( algo_gate_t* gate )
{
#ifdef TIMETRAVEL10_4WAY
@@ -16,9 +11,9 @@ bool register_timetravel10_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_timetravel10;
gate->hash = (void*)&timetravel10_hash;
#endif
gate->set_target = (void*)&tt10_set_target;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->get_max64 = (void*)&get_max64_0xffffLL;
opt_target_factor = 256.0;
return true;
};

View File

@@ -249,7 +249,6 @@ bool register_drop_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_drop;
gate->hash = (void*)&droplp_hash_pok;
gate->get_new_work = (void*)&drop_get_new_work;
gate->set_target = (void*)&scrypt_set_target;
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
@@ -257,6 +256,7 @@ bool register_drop_algo( algo_gate_t* gate )
gate->decode_extra_data = (void*)&drop_display_pok;
gate->get_work_data_size = (void*)&drop_get_work_data_size;
gate->work_cmp_size = 72;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -68,21 +68,7 @@ void x16r_4way_hash( void* output, const void* input )
int size = 80;
dintrlv_4x64( hash0, hash1, hash2, hash3, input, 640 );
/*
if ( s_ntime == UINT32_MAX )
{
const uint8_t* tmp = (uint8_t*) in0;
x16_r_s_getAlgoString( &tmp[4], hashOrder );
}
*/
// Input data is both 64 bit interleaved (input)
// and deinterleaved in inp0-3.
// If First function uses 64 bit data it is not required to interleave inp
// first. It may use the inerleaved data dmost convenient, ie 4way 64 bit.
// All other functions assume data is deinterleaved in hash0-3
// All functions must exit with data deinterleaved in hash0-3.
// Alias in0-3 points to either inp0-3 or hash0-3 according to
// its hashOrder position. Size is also set accordingly.
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];

View File

@@ -42,8 +42,23 @@ bool register_x16r_algo( algo_gate_t* gate )
gate->hash = (void*)&x16r_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
x16_r_s_getAlgoString = (void*)&x16r_getAlgoString;
opt_target_factor = 256.0;
return true;
};
bool register_x16rv2_algo( algo_gate_t* gate )
{
#if defined (X16R_4WAY)
gate->scanhash = (void*)&scanhash_x16rv2_4way;
gate->hash = (void*)&x16rv2_4way_hash;
#else
gate->scanhash = (void*)&scanhash_x16rv2;
gate->hash = (void*)&x16rv2_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
x16_r_s_getAlgoString = (void*)&x16r_getAlgoString;
opt_target_factor = 256.0;
return true;
};
@@ -57,8 +72,8 @@ bool register_x16s_algo( algo_gate_t* gate )
gate->hash = (void*)&x16r_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
x16_r_s_getAlgoString = (void*)&x16s_getAlgoString;
opt_target_factor = 256.0;
return true;
};
@@ -189,7 +204,7 @@ bool register_x16rt_algo( algo_gate_t* gate )
gate->hash = (void*)&x16rt_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
opt_target_factor = 256.0;
return true;
};
@@ -203,8 +218,8 @@ bool register_x16rt_veil_algo( algo_gate_t* gate )
gate->hash = (void*)&x16rt_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
gate->build_extraheader = (void*)&veil_build_extraheader;
opt_target_factor = 256.0;
return true;
};
@@ -212,19 +227,13 @@ bool register_x16rt_veil_algo( algo_gate_t* gate )
//
// HEX
void hex_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (128.0 * opt_diff_factor) );
}
bool register_hex_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_hex;
gate->hash = (void*)&hex_hash;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->gen_merkle_root = (void*)&SHA256_gen_merkle_root;
gate->set_target = (void*)&hex_set_target;
opt_target_factor = 128.0;
return true;
};
@@ -244,8 +253,8 @@ bool register_x21s_algo( algo_gate_t* gate )
gate->miner_thread_init = (void*)&x21s_thread_init;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | SHA_OPT;
gate->set_target = (void*)&alt_set_target;
x16_r_s_getAlgoString = (void*)&x16s_getAlgoString;
opt_target_factor = 256.0;
return true;
};

View File

@@ -38,6 +38,7 @@ void x16rt_getAlgoString( const uint32_t *timeHash, char *output );
void x16rt_getTimeHash( const uint32_t timeStamp, void* timeHash );
bool register_x16r_algo( algo_gate_t* gate );
bool register_x16rv2_algo( algo_gate_t* gate );
bool register_x16s_algo( algo_gate_t* gate );
bool register_x16rt_algo( algo_gate_t* gate );
bool register_hex__algo( algo_gate_t* gate );
@@ -49,6 +50,10 @@ void x16r_4way_hash( void *state, const void *input );
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x16rv2_4way_hash( void *state, const void *input );
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x16rt_4way_hash( void *state, const void *input );
int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
@@ -64,6 +69,10 @@ void x16r_hash( void *state, const void *input );
int scanhash_x16r( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x16rv2_hash( void *state, const void *input );
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x16rt_hash( void *state, const void *input );
int scanhash_x16rt( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );

384
algo/x16/x16rv2-4way.c Normal file
View File

@@ -0,0 +1,384 @@
/**
* x16r algo implementation
*
* Implementation by tpruvot@github Jan 2018
* Optimized by JayDDee@github Jan 2018
*/
#include "x16r-gate.h"
#if defined (X16R_4WAY)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/luffa/luffa-hash-2way.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/simd-hash-2way.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/shabal-hash-4way.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/sha/sha-hash-4way.h"
#include "algo/tiger/sph_tiger.h"
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
union _x16rv2_4way_context_overlay
{
blake512_4way_context blake;
bmw512_4way_context bmw;
hashState_echo echo;
hashState_groestl groestl;
skein512_4way_context skein;
jh512_4way_context jh;
keccak512_4way_context keccak;
luffa_2way_context luffa;
cubehashParam cube;
sph_shavite512_context shavite;
simd_2way_context simd;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
shabal512_4way_context shabal;
sph_whirlpool_context whirlpool;
sha512_4way_context sha512;
sph_tiger_context tiger;
};
typedef union _x16rv2_4way_context_overlay x16rv2_4way_context_overlay;
// Pad the 24 bytes tiger hash to 64 bytes
inline void padtiger512( uint32_t* hash )
{
for ( int i = 6; i < 16; i++ ) hash[i] = 0;
}
void x16rv2_4way_hash( void* output, const void* input )
{
uint32_t hash0[24] __attribute__ ((aligned (64)));
uint32_t hash1[24] __attribute__ ((aligned (64)));
uint32_t hash2[24] __attribute__ ((aligned (64)));
uint32_t hash3[24] __attribute__ ((aligned (64)));
uint32_t vhash[24*4] __attribute__ ((aligned (64)));
x16rv2_4way_context_overlay ctx;
void *in0 = (void*) hash0;
void *in1 = (void*) hash1;
void *in2 = (void*) hash2;
void *in3 = (void*) hash3;
int size = 80;
dintrlv_4x64( hash0, hash1, hash2, hash3, input, 640 );
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
{
case BLAKE:
blake512_4way_init( &ctx.blake );
if ( i == 0 )
blake512_4way( &ctx.blake, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
blake512_4way( &ctx.blake, vhash, size );
}
blake512_4way_close( &ctx.blake, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case BMW:
bmw512_4way_init( &ctx.bmw );
if ( i == 0 )
bmw512_4way( &ctx.bmw, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
bmw512_4way( &ctx.bmw, vhash, size );
}
bmw512_4way_close( &ctx.bmw, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case GROESTL:
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(const char*)in0, size<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(const char*)in1, size<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(const char*)in2, size<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(const char*)in3, size<<3 );
break;
case SKEIN:
skein512_4way_init( &ctx.skein );
if ( i == 0 )
skein512_4way( &ctx.skein, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
skein512_4way( &ctx.skein, vhash, size );
}
skein512_4way_close( &ctx.skein, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case JH:
jh512_4way_init( &ctx.jh );
if ( i == 0 )
jh512_4way( &ctx.jh, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
jh512_4way( &ctx.jh, vhash, size );
}
jh512_4way_close( &ctx.jh, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case KECCAK:
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in0, size );
sph_tiger_close( &ctx.tiger, hash0 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in1, size );
sph_tiger_close( &ctx.tiger, hash1 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in2, size );
sph_tiger_close( &ctx.tiger, hash2 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in3, size );
sph_tiger_close( &ctx.tiger, hash3 );
for ( int i = (24/4); i < (64/4); i++ )
hash0[i] = hash1[i] = hash2[i] = hash3[i] = 0;
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
keccak512_4way_init( &ctx.keccak );
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case LUFFA:
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in0, size );
sph_tiger_close( &ctx.tiger, hash0 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in1, size );
sph_tiger_close( &ctx.tiger, hash1 );
for ( int i = (24/4); i < (64/4); i++ )
hash0[i] = hash1[i] = 0;
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in2, size );
sph_tiger_close( &ctx.tiger, hash2 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in3, size );
sph_tiger_close( &ctx.tiger, hash3 );
for ( int i = (24/4); i < (64/4); i++ )
hash2[i] = hash3[i] = 0;
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
break;
case CUBEHASH:
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash0,
(const byte*)in0, size );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash1,
(const byte*)in1, size );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash2,
(const byte*)in2, size );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash3,
(const byte*)in3, size );
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in0, size );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in1, size );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in2, size );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in3, size );
sph_shavite512_close( &ctx.shavite, hash3 );
break;
case SIMD:
intrlv_2x128( vhash, in0, in1, size<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, in2, in3, size<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
break;
case ECHO:
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash0,
(const BitSequence*)in0, size<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash1,
(const BitSequence*)in1, size<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash2,
(const BitSequence*)in2, size<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash3,
(const BitSequence*)in3, size<<3 );
break;
case HAMSI:
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, size );
hamsi512_4way_close( &ctx.hamsi, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in0, size );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in1, size );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in2, size );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in3, size );
sph_fugue512_close( &ctx.fugue, hash3 );
break;
case SHABAL:
intrlv_4x32( vhash, in0, in1, in2, in3, size<<3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, size );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case WHIRLPOOL:
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in0, size );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in1, size );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in2, size );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in3, size );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
break;
case SHA_512:
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in0, size );
sph_tiger_close( &ctx.tiger, hash0 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in1, size );
sph_tiger_close( &ctx.tiger, hash1 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in2, size );
sph_tiger_close( &ctx.tiger, hash2 );
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in3, size );
sph_tiger_close( &ctx.tiger, hash3 );
for ( int i = (24/4); i < (64/4); i++ )
hash0[i] = hash1[i] = hash2[i] = hash3[i] = 0;
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, 64 );
sha512_4way_close( &ctx.sha512, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
}
size = 64;
}
memcpy( output, hash0, 32 );
memcpy( output+32, hash1, 32 );
memcpy( output+64, hash2, 32 );
memcpy( output+96, hash3, 32 );
}
int scanhash_x16rv2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
__m256i *noncev = (__m256i*)vdata + 9; // aligned
volatile uint8_t *restart = &(work_restart[thr_id].restart);
casti_m256i( endiandata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
casti_m256i( endiandata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
if ( s_ntime != endiandata[17] )
{
uint32_t ntime = swab32(pdata[17]);
x16_r_s_getAlgoString( (const uint8_t*) (&endiandata[1]), hashOrder );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_DEBUG, "hash order %s (%08x)", hashOrder, ntime );
}
if ( opt_benchmark )
ptarget[7] = 0x0cff;
uint64_t *edata = (uint64_t*)endiandata;
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x16rv2_4way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 4; i++ ) if ( (hash+(i<<3))[7] <= Htarg )
if( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( ( n < max_nonce ) && !(*restart) );
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

247
algo/x16/x16rv2.c Normal file
View File

@@ -0,0 +1,247 @@
/**
* x16r algo implementation
*
* Implementation by tpruvot@github Jan 2018
* Optimized by JayDDee@github Jan 2018
*/
#include "x16r-gate.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "algo/blake/sph_blake.h"
#include "algo/bmw/sph_bmw.h"
#include "algo/groestl/sph_groestl.h"
#include "algo/jh/sph_jh.h"
#include "algo/keccak/sph_keccak.h"
#include "algo/skein/sph_skein.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/nist.h"
#include "algo/echo/sph_echo.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/sph_shabal.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include <openssl/sha.h>
#include "algo/tiger/sph_tiger.h"
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#endif
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
union _x16rv2_context_overlay
{
#if defined(__AES__)
hashState_echo echo;
hashState_groestl groestl;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
#endif
sph_blake512_context blake;
sph_bmw512_context bmw;
sph_skein512_context skein;
sph_jh512_context jh;
sph_keccak512_context keccak;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sph_shabal512_context shabal;
sph_whirlpool_context whirlpool;
SHA512_CTX sha512;
sph_tiger_context tiger;
};
typedef union _x16rv2_context_overlay x16rv2_context_overlay;
// Pad the 24 bytes tiger hash to 64 bytes
inline void padtiger512(uint32_t* hash) {
for (int i = (24/4); i < (64/4); i++) hash[i] = 0;
}
void x16rv2_hash( void* output, const void* input )
{
uint32_t _ALIGN(128) hash[16];
x16rv2_context_overlay ctx;
void *in = (void*) input;
int size = 80;
/*
if ( s_ntime == UINT32_MAX )
{
const uint8_t* in8 = (uint8_t*) input;
x16_r_s_getAlgoString( &in8[4], hashOrder );
}
*/
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
{
case BLAKE:
sph_blake512_init( &ctx.blake );
sph_blake512( &ctx.blake, in, size );
sph_blake512_close( &ctx.blake, hash );
break;
case BMW:
sph_bmw512_init( &ctx.bmw );
sph_bmw512(&ctx.bmw, in, size);
sph_bmw512_close(&ctx.bmw, hash);
break;
case GROESTL:
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)in, size<<3 );
#else
sph_groestl512_init( &ctx.groestl );
sph_groestl512( &ctx.groestl, in, size );
sph_groestl512_close(&ctx.groestl, hash);
#endif
break;
case SKEIN:
sph_skein512_init( &ctx.skein );
sph_skein512( &ctx.skein, in, size );
sph_skein512_close( &ctx.skein, hash );
break;
case JH:
sph_jh512_init( &ctx.jh );
sph_jh512(&ctx.jh, in, size );
sph_jh512_close(&ctx.jh, hash );
break;
case KECCAK:
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in, size );
sph_tiger_close( &ctx.tiger, hash );
padtiger512( hash );
sph_keccak512_init( &ctx.keccak );
sph_keccak512( &ctx.keccak, hash, 64 );
sph_keccak512_close( &ctx.keccak, hash );
break;
case LUFFA:
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in, size );
sph_tiger_close( &ctx.tiger, hash );
padtiger512( hash );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
break;
case CUBEHASH:
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash,
(const byte*)in, size );
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in, size );
sph_shavite512_close( &ctx.shavite, hash );
break;
case SIMD:
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence*)in, size<<3 );
break;
case ECHO:
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence*)in, size<<3 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512( &ctx.echo, in, size );
sph_echo512_close( &ctx.echo, hash );
#endif
break;
case HAMSI:
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512( &ctx.hamsi, in, size );
sph_hamsi512_close( &ctx.hamsi, hash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in, size );
sph_fugue512_close( &ctx.fugue, hash );
break;
case SHABAL:
sph_shabal512_init( &ctx.shabal );
sph_shabal512( &ctx.shabal, in, size );
sph_shabal512_close( &ctx.shabal, hash );
break;
case WHIRLPOOL:
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in, size );
sph_whirlpool_close( &ctx.whirlpool, hash );
break;
case SHA_512:
sph_tiger_init( &ctx.tiger );
sph_tiger( &ctx.tiger, in, size );
sph_tiger_close( &ctx.tiger, hash );
padtiger512( hash );
SHA512_Init( &ctx.sha512 );
SHA512_Update( &ctx.sha512, hash, 64 );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break;
}
in = (void*) hash;
size = 64;
}
memcpy(output, hash, 32);
}
int scanhash_x16rv2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash32[8];
uint32_t _ALIGN(128) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t nonce = first_nonce;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
if ( s_ntime != pdata[17] )
{
uint32_t ntime = swab32(pdata[17]);
x16_r_s_getAlgoString( (const uint8_t*) (&endiandata[1]), hashOrder );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_DEBUG, "hash order %s (%08x)", hashOrder, ntime );
}
if ( opt_benchmark )
ptarget[7] = 0x0cff;
do
{
be32enc( &endiandata[19], nonce );
x16rv2_hash( hash32, endiandata );
if ( hash32[7] <= Htarg )
if (fulltest( hash32, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash32, mythr );
}
nonce++;
} while ( nonce < max_nonce && !(*restart) );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}

View File

@@ -1,10 +1,5 @@
#include "xevan-gate.h"
void xevan_set_target( struct work* work, double job_diff )
{
work_set_target( work, job_diff / (256.0 * opt_diff_factor) );
}
bool register_xevan_algo( algo_gate_t* gate )
{
#if defined (XEVAN_4WAY)
@@ -17,8 +12,8 @@ bool register_xevan_algo( algo_gate_t* gate )
gate->hash = (void*)&xevan_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&xevan_set_target;
gate->get_max64 = (void*)&get_max64_0xffffLL;
opt_target_factor = 256.0;
return true;
};

View File

@@ -26,9 +26,9 @@ bool register_x20r_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_x20r;
gate->hash = (void*)&x20r_hash;
#endif
gate->set_target = (void*)&alt_set_target;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
x20_r_s_getAlgoString = (void*)&x20r_getAlgoString;
opt_target_factor = 256.;
return true;
};

View File

@@ -431,18 +431,38 @@ void yescrypt_gate_base(algo_gate_t *gate )
gate->optimizations = SSE2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_yescrypt;
gate->hash = (void*)&yescrypt_hash;
gate->set_target = (void*)&scrypt_set_target;
opt_target_factor = 65536.0;
}
bool register_yescrypt_algo( algo_gate_t* gate )
{
yescrypt_gate_base( gate );
gate->get_max64 = (void*)&yescrypt_get_max64;
yescrypt_client_key = NULL;
yescrypt_client_key_len = 0;
YESCRYPT_N = 2048;
YESCRYPT_R = 8;
if ( opt_param_n ) YESCRYPT_N = opt_param_n;
else YESCRYPT_N = 2048;
if ( opt_param_r ) YESCRYPT_R = opt_param_r;
else YESCRYPT_R = 8;
if ( opt_param_key )
{
yescrypt_client_key = opt_param_key;
yescrypt_client_key_len = strlen( opt_param_key );
}
else
{
yescrypt_client_key = NULL;
yescrypt_client_key_len = 0;
}
YESCRYPT_P = 1;
applog( LOG_NOTICE,"Yescrypt parameters: N= %d, R= %d.", YESCRYPT_N,
YESCRYPT_R );
if ( yescrypt_client_key )
applog( LOG_NOTICE,"Key= \"%s\"\n", yescrypt_client_key );
return true;
}

Binary file not shown.

View File

@@ -0,0 +1,322 @@
/*
* Copyright 2009 Colin Percival, 2014 savale
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <algo/yespower/crypto/sph_types.h>
#include <algo/yespower/utils/sysendian.h>
#include "blake2b-yp.h"
// Cyclic right rotation.
#ifndef ROTR64
#define ROTR64(x, y) (((x) >> (y)) ^ ((x) << (64 - (y))))
#endif
// Little-endian byte access.
#define B2B_GET64(p) \
(((uint64_t) ((uint8_t *) (p))[0]) ^ \
(((uint64_t) ((uint8_t *) (p))[1]) << 8) ^ \
(((uint64_t) ((uint8_t *) (p))[2]) << 16) ^ \
(((uint64_t) ((uint8_t *) (p))[3]) << 24) ^ \
(((uint64_t) ((uint8_t *) (p))[4]) << 32) ^ \
(((uint64_t) ((uint8_t *) (p))[5]) << 40) ^ \
(((uint64_t) ((uint8_t *) (p))[6]) << 48) ^ \
(((uint64_t) ((uint8_t *) (p))[7]) << 56))
// G Mixing function.
#define B2B_G(a, b, c, d, x, y) { \
v[a] = v[a] + v[b] + x; \
v[d] = ROTR64(v[d] ^ v[a], 32); \
v[c] = v[c] + v[d]; \
v[b] = ROTR64(v[b] ^ v[c], 24); \
v[a] = v[a] + v[b] + y; \
v[d] = ROTR64(v[d] ^ v[a], 16); \
v[c] = v[c] + v[d]; \
v[b] = ROTR64(v[b] ^ v[c], 63); }
// Initialization Vector.
static const uint64_t blake2b_iv[8] = {
0x6A09E667F3BCC908, 0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1,
0x510E527FADE682D1, 0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179
};
// Compression function. "last" flag indicates last block.
static void blake2b_compress(blake2b_yp_ctx *ctx, int last)
{
const uint8_t sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
int i;
uint64_t v[16], m[16];
// init work variables
for (i = 0; i < 8; i++) {
v[i] = ctx->h[i];
v[i + 8] = blake2b_iv[i];
}
v[12] ^= ctx->t[0]; // low 64 bits of offset
v[13] ^= ctx->t[1]; // high 64 bits
// last block flag set ?
if (last) {
v[14] = ~v[14];
}
// get little-endian words
for (i = 0; i < 16; i++) {
m[i] = B2B_GET64(&ctx->b[8 * i]);
}
// twelve rounds
for (i = 0; i < 12; i++) {
B2B_G( 0, 4, 8, 12, m[sigma[i][ 0]], m[sigma[i][ 1]]);
B2B_G( 1, 5, 9, 13, m[sigma[i][ 2]], m[sigma[i][ 3]]);
B2B_G( 2, 6, 10, 14, m[sigma[i][ 4]], m[sigma[i][ 5]]);
B2B_G( 3, 7, 11, 15, m[sigma[i][ 6]], m[sigma[i][ 7]]);
B2B_G( 0, 5, 10, 15, m[sigma[i][ 8]], m[sigma[i][ 9]]);
B2B_G( 1, 6, 11, 12, m[sigma[i][10]], m[sigma[i][11]]);
B2B_G( 2, 7, 8, 13, m[sigma[i][12]], m[sigma[i][13]]);
B2B_G( 3, 4, 9, 14, m[sigma[i][14]], m[sigma[i][15]]);
}
for(i = 0; i < 8; ++i) {
ctx->h[i] ^= v[i] ^ v[i + 8];
}
}
// Initialize the hashing context "ctx" with optional key "key".
// 1 <= outlen <= 64 gives the digest size in bytes.
// Secret key (also <= 64 bytes) is optional (keylen = 0).
int blake2b_yp_init(blake2b_yp_ctx *ctx, size_t outlen,
const void *key, size_t keylen) // (keylen=0: no key)
{
size_t i;
// illegal parameters
if (outlen == 0 || outlen > 64 || keylen > 64) {
return -1;
}
// state, "param block"
for (i = 0; i < 8; i++) {
ctx->h[i] = blake2b_iv[i];
}
ctx->h[0] ^= 0x01010000 ^ (keylen << 8) ^ outlen;
ctx->t[0] = 0; // input count low word
ctx->t[1] = 0; // input count high word
ctx->c = 0; // pointer within buffer
ctx->outlen = outlen;
// zero input block
for (i = keylen; i < 128; i++) {
ctx->b[i] = 0;
}
if (keylen > 0) {
blake2b_yp_update(ctx, key, keylen);
ctx->c = 128; // at the end
}
return 0;
}
// Add "inlen" bytes from "in" into the hash.
void blake2b_yp_update(blake2b_yp_ctx *ctx,
const void *in, size_t inlen) // data bytes
{
size_t i;
for (i = 0; i < inlen; i++) {
if (ctx->c == 128) { // buffer full ?
ctx->t[0] += ctx->c; // add counters
if (ctx->t[0] < ctx->c) // carry overflow ?
ctx->t[1]++; // high word
blake2b_compress(ctx, 0); // compress (not last)
ctx->c = 0; // counter to zero
}
ctx->b[ctx->c++] = ((const uint8_t *) in)[i];
}
}
// Generate the message digest (size given in init).
// Result placed in "out".
void blake2b_yp_final(blake2b_yp_ctx *ctx, void *out)
{
size_t i;
ctx->t[0] += ctx->c; // mark last block offset
// carry overflow
if (ctx->t[0] < ctx->c) {
ctx->t[1]++; // high word
}
// fill up with zeros
while (ctx->c < 128) {
ctx->b[ctx->c++] = 0;
}
blake2b_compress(ctx, 1); // final block flag = 1
// little endian convert and store
for (i = 0; i < ctx->outlen; i++) {
((uint8_t *) out)[i] =
(ctx->h[i >> 3] >> (8 * (i & 7))) & 0xFF;
}
}
// inlen = number of bytes
void blake2b_yp_hash(void *out, const void *in, size_t inlen) {
blake2b_yp_ctx ctx;
blake2b_yp_init(&ctx, 32, NULL, 0);
blake2b_yp_update(&ctx, in, inlen);
blake2b_yp_final(&ctx, out);
}
// // keylen = number of bytes
void hmac_blake2b_yp_init(hmac_yp_ctx *hctx, const void *_key, size_t keylen) {
const uint8_t *key = _key;
uint8_t keyhash[32];
uint8_t pad[64];
uint64_t i;
if (keylen > 64) {
blake2b_yp_hash(keyhash, key, keylen);
key = keyhash;
keylen = 32;
}
blake2b_yp_init(&hctx->inner, 32, NULL, 0);
memset(pad, 0x36, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake2b_yp_update(&hctx->inner, pad, 64);
blake2b_yp_init(&hctx->outer, 32, NULL, 0);
memset(pad, 0x5c, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake2b_yp_update(&hctx->outer, pad, 64);
memset(keyhash, 0, 32);
}
// datalen = number of bits
void hmac_blake2b_yp_update(hmac_yp_ctx *hctx, const void *data, size_t datalen) {
// update the inner state
blake2b_yp_update(&hctx->inner, data, datalen);
}
void hmac_blake2b_yp_final(hmac_yp_ctx *hctx, uint8_t *digest) {
uint8_t ihash[32];
blake2b_yp_final(&hctx->inner, ihash);
blake2b_yp_update(&hctx->outer, ihash, 32);
blake2b_yp_final(&hctx->outer, digest);
memset(ihash, 0, 32);
}
// // keylen = number of bytes; inlen = number of bytes
void hmac_blake2b_yp_hash(void *out, const void *key, size_t keylen, const void *in, size_t inlen) {
hmac_yp_ctx hctx;
hmac_blake2b_yp_init(&hctx, key, keylen);
hmac_blake2b_yp_update(&hctx, in, inlen);
hmac_blake2b_yp_final(&hctx, out);
}
void pbkdf2_blake2b_yp(const uint8_t * passwd, size_t passwdlen, const uint8_t * salt,
size_t saltlen, uint64_t c, uint8_t * buf, size_t dkLen)
{
hmac_yp_ctx PShctx, hctx;
size_t i;
uint8_t ivec[4];
uint8_t U[32];
uint8_t T[32];
uint64_t j;
int k;
size_t clen;
/* Compute HMAC state after processing P and S. */
hmac_blake2b_yp_init(&PShctx, passwd, passwdlen);
hmac_blake2b_yp_update(&PShctx, salt, saltlen);
/* Iterate through the blocks. */
for (i = 0; i * 32 < dkLen; i++) {
/* Generate INT(i + 1). */
be32enc(ivec, (uint32_t)(i + 1));
/* Compute U_1 = PRF(P, S || INT(i)). */
memcpy(&hctx, &PShctx, sizeof(hmac_yp_ctx));
hmac_blake2b_yp_update(&hctx, ivec, 4);
hmac_blake2b_yp_final(&hctx, U);
/* T_i = U_1 ... */
memcpy(T, U, 32);
for (j = 2; j <= c; j++) {
/* Compute U_j. */
hmac_blake2b_yp_init(&hctx, passwd, passwdlen);
hmac_blake2b_yp_update(&hctx, U, 32);
hmac_blake2b_yp_final(&hctx, U);
/* ... xor U_j ... */
for (k = 0; k < 32; k++) {
T[k] ^= U[k];
}
}
/* Copy as many bytes as necessary into buf. */
clen = dkLen - i * 32;
if (clen > 32) {
clen = 32;
}
memcpy(&buf[i * 32], T, clen);
}
/* Clean PShctx, since we never called _Final on it. */
memset(&PShctx, 0, sizeof(hmac_yp_ctx));
}

View File

@@ -0,0 +1,42 @@
#pragma once
#ifndef __BLAKE2B_H__
#define __BLAKE2B_H__
#include <stddef.h>
#include <stdint.h>
#if defined(_MSC_VER) || defined(__x86_64__) || defined(__x86__)
#define NATIVE_LITTLE_ENDIAN
#endif
// state context
typedef struct {
uint8_t b[128]; // input buffer
uint64_t h[8]; // chained state
uint64_t t[2]; // total number of bytes
size_t c; // pointer for b[]
size_t outlen; // digest size
} blake2b_yp_ctx;
typedef struct {
blake2b_yp_ctx inner;
blake2b_yp_ctx outer;
} hmac_yp_ctx;
#if defined(__cplusplus)
extern "C" {
#endif
int blake2b_yp_init(blake2b_yp_ctx *ctx, size_t outlen, const void *key, size_t keylen);
void blake2b_yp_update(blake2b_yp_ctx *ctx, const void *in, size_t inlen);
void blake2b_yp_final(blake2b_yp_ctx *ctx, void *out);
void blake2b_yp_hash(void *out, const void *in, size_t inlen);
void hmac_blake2b_yp_hash(void *out, const void *key, size_t keylen, const void *in, size_t inlen);
void pbkdf2_blake2b_yp(const uint8_t * passwd, size_t passwdlen, const uint8_t * salt,
size_t saltlen, uint64_t c, uint8_t * buf, size_t dkLen);
#if defined(__cplusplus)
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@
#define insecure_memzero(buf, len) /* empty */

View File

@@ -0,0 +1,94 @@
/*-
* Copyright 2007-2014 Colin Percival
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYSENDIAN_H_
#define _SYSENDIAN_H_
#include <stdint.h>
/* Avoid namespace collisions with BSD <sys/endian.h>. */
#define be32dec libcperciva_be32dec
#define be32enc libcperciva_be32enc
#define be64enc libcperciva_be64enc
#define le32dec libcperciva_le32dec
#define le32enc libcperciva_le32enc
static inline uint32_t
be32dec(const void * pp)
{
const uint8_t * p = (uint8_t const *)pp;
return ((uint32_t)(p[3]) + ((uint32_t)(p[2]) << 8) +
((uint32_t)(p[1]) << 16) + ((uint32_t)(p[0]) << 24));
}
static inline void
be32enc(void * pp, uint32_t x)
{
uint8_t * p = (uint8_t *)pp;
p[3] = x & 0xff;
p[2] = (x >> 8) & 0xff;
p[1] = (x >> 16) & 0xff;
p[0] = (x >> 24) & 0xff;
}
static inline void
be64enc(void * pp, uint64_t x)
{
uint8_t * p = (uint8_t *)pp;
p[7] = x & 0xff;
p[6] = (x >> 8) & 0xff;
p[5] = (x >> 16) & 0xff;
p[4] = (x >> 24) & 0xff;
p[3] = (x >> 32) & 0xff;
p[2] = (x >> 40) & 0xff;
p[1] = (x >> 48) & 0xff;
p[0] = (x >> 56) & 0xff;
}
static inline uint32_t
le32dec(const void * pp)
{
const uint8_t * p = (uint8_t const *)pp;
return ((uint32_t)(p[0]) + ((uint32_t)(p[1]) << 8) +
((uint32_t)(p[2]) << 16) + ((uint32_t)(p[3]) << 24));
}
static inline void
le32enc(void * pp, uint32_t x)
{
uint8_t * p = (uint8_t *)pp;
p[0] = x & 0xff;
p[1] = (x >> 8) & 0xff;
p[2] = (x >> 16) & 0xff;
p[3] = (x >> 24) & 0xff;
}
#endif /* !_SYSENDIAN_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -70,6 +70,45 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
return 0;
}
void yespower_b2b_hash( const char *input, char *output, uint32_t len )
{
yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output );
}
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) vhash[8];
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
for (int k = 0; k < 19; k++)
be32enc(&endiandata[k], pdata[k]);
do {
be32enc(&endiandata[19], n);
yespower_b2b_hash((char*) endiandata, (char*) vhash, 80);
if ( vhash[7] < Htarg && fulltest( vhash, ptarget )
&& !opt_benchmark )
{
pdata[19] = n;
submit_solution( work, vhash, mythr );
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}
int64_t yespower_get_max64()
{
return 0xfffLL;
@@ -78,15 +117,34 @@ int64_t yespower_get_max64()
bool register_yespower_algo( algo_gate_t* gate )
{
yespower_params.version = YESPOWER_1_0;
yespower_params.N = 2048;
yespower_params.r = 32;
yespower_params.pers = NULL;
yespower_params.perslen = 0;
if ( opt_param_n ) yespower_params.N = opt_param_n;
else yespower_params.N = 2048;
if ( opt_param_r ) yespower_params.r = opt_param_r;
else yespower_params.r = 32;
if ( opt_param_key )
{
yespower_params.pers = opt_param_key;
yespower_params.perslen = strlen( opt_param_key );
}
else
{
yespower_params.pers = NULL;
yespower_params.perslen = 0;
}
applog( LOG_NOTICE,"Yespower parameters: N= %d, R= %d.", yespower_params.N,
yespower_params.r );
if ( yespower_params.pers )
applog( LOG_NOTICE,"Key= \"%s\"\n", yespower_params.pers );
gate->optimizations = SSE2_OPT;
gate->get_max64 = (void*)&yespower_get_max64;
gate->scanhash = (void*)&scanhash_yespower;
gate->hash = (void*)&yespower_hash;
gate->set_target = (void*)&scrypt_set_target;
opt_target_factor = 65536.0;
return true;
};
@@ -101,7 +159,7 @@ bool register_yespowerr16_algo( algo_gate_t* gate )
gate->get_max64 = (void*)&yespower_get_max64;
gate->scanhash = (void*)&scanhash_yespower;
gate->hash = (void*)&yespower_hash;
gate->set_target = (void*)&scrypt_set_target;
opt_target_factor = 65536.0;
return true;
};
@@ -120,13 +178,13 @@ bool register_yescrypt_05_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_yespower;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&yescrypt_05_get_max64;
yespower_params.version = YESPOWER_0_5;
yespower_params.N = 2048;
yespower_params.r = 8;
yespower_params.pers = NULL;
yespower_params.perslen = 0;
opt_target_factor = 65536.0;
return true;
}
@@ -134,13 +192,13 @@ bool register_yescryptr8_05_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_yespower;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&yescrypt_05_get_max64;
yespower_params.version = YESPOWER_0_5;
yespower_params.N = 2048;
yespower_params.r = 8;
yespower_params.pers = "Client Key";
yespower_params.perslen = 10;
opt_target_factor = 65536.0;
return true;
}
@@ -148,13 +206,13 @@ bool register_yescryptr16_05_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_yespower;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&yescryptr16_05_get_max64;
yespower_params.version = YESPOWER_0_5;
yespower_params.N = 4096;
yespower_params.r = 16;
yespower_params.pers = NULL;
yespower_params.perslen = 0;
opt_target_factor = 65536.0;
return true;
}
@@ -162,13 +220,76 @@ bool register_yescryptr32_05_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_yespower;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&yescryptr16_05_get_max64;
yespower_params.version = YESPOWER_0_5;
yespower_params.N = 4096;
yespower_params.r = 32;
yespower_params.pers = "WaviBanana";
yespower_params.perslen = 10;
opt_target_factor = 65536.0;
return true;
}
bool register_power2b_algo( algo_gate_t* gate )
{
yespower_params.version = YESPOWER_1_0;
yespower_params.N = 2048;
yespower_params.r = 32;
yespower_params.pers = "Now I am become Death, the destroyer of worlds";
yespower_params.perslen = 46;
applog( LOG_NOTICE,"yespower-b2b parameters: N= %d, R= %d.", yespower_params.N,
yespower_params.r );
applog( LOG_NOTICE,"Key= \"%s\"", yespower_params.pers );
applog( LOG_NOTICE,"Key length= %d\n", yespower_params.perslen );
gate->optimizations = SSE2_OPT;
gate->get_max64 = (void*)&yespower_get_max64;
gate->scanhash = (void*)&scanhash_yespower_b2b;
gate->hash = (void*)&yespower_b2b_hash;
opt_target_factor = 65536.0;
return true;
};
// Generic yespower + blake2b
bool register_yespower_b2b_algo( algo_gate_t* gate )
{
yespower_params.version = YESPOWER_1_0;
if ( !( opt_param_n && opt_param_r ) )
{
applog(LOG_ERR,"Yespower-b2b N & R parameters are required");
return false;
}
yespower_params.N = opt_param_n;
yespower_params.r = opt_param_r;
if ( opt_param_key )
{
yespower_params.pers = opt_param_key;
yespower_params.perslen = strlen( opt_param_key );
}
else
{
yespower_params.pers = NULL;
yespower_params.perslen = 0;
}
applog( LOG_NOTICE,"Yespower-b2b parameters: N= %d, R= %d",
yespower_params.N, yespower_params.r );
if ( yespower_params.pers )
{
applog( LOG_NOTICE,"Key= \"%s\"", yespower_params.pers );
applog( LOG_NOTICE,"Key length= %d\n", yespower_params.perslen );
}
gate->optimizations = SSE2_OPT;
gate->get_max64 = (void*)&yespower_get_max64;
gate->scanhash = (void*)&scanhash_yespower_b2b;
gate->hash = (void*)&yespower_b2b_hash;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -111,6 +111,10 @@ extern int yespower(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
extern int yespower_b2b(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
/**
* yespower_tls(src, srclen, params, dst):
* Compute yespower(src[0 .. srclen - 1], N, r), to be checked for "< target".
@@ -123,6 +127,9 @@ extern int yespower(yespower_local_t *local,
extern int yespower_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
#ifdef __cplusplus
}
#endif

20
configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.6.2.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.9.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='cpuminer-opt'
PACKAGE_TARNAME='cpuminer-opt'
PACKAGE_VERSION='3.9.6.2'
PACKAGE_STRING='cpuminer-opt 3.9.6.2'
PACKAGE_VERSION='3.9.9'
PACKAGE_STRING='cpuminer-opt 3.9.9'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures cpuminer-opt 3.9.6.2 to adapt to many kinds of systems.
\`configure' configures cpuminer-opt 3.9.9 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1404,7 +1404,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of cpuminer-opt 3.9.6.2:";;
short | recursive ) echo "Configuration of cpuminer-opt 3.9.9:";;
esac
cat <<\_ACEOF
@@ -1509,7 +1509,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
cpuminer-opt configure 3.9.6.2
cpuminer-opt configure 3.9.9
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by cpuminer-opt $as_me 3.9.6.2, which was
It was created by cpuminer-opt $as_me 3.9.9, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2993,7 +2993,7 @@ fi
# Define the identity of the package.
PACKAGE='cpuminer-opt'
VERSION='3.9.6.2'
VERSION='3.9.9'
cat >>confdefs.h <<_ACEOF
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by cpuminer-opt $as_me 3.9.6.2, which was
This file was extended by cpuminer-opt $as_me 3.9.9, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -6756,7 +6756,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
cpuminer-opt config.status 3.9.6.2
cpuminer-opt config.status 3.9.9
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View File

@@ -1,4 +1,4 @@
AC_INIT([cpuminer-opt], [3.9.6.2])
AC_INIT([cpuminer-opt], [3.9.9])
AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM

File diff suppressed because it is too large Load Diff

63
miner.h
View File

@@ -310,6 +310,7 @@ struct thr_api {
#define CL_WHT "\x1B[01;37m" /* white */
void applog(int prio, const char *fmt, ...);
void applog2(int prio, const char *fmt, ...);
void restart_threads(void);
extern json_t *json_rpc_call( CURL *curl, const char *url, const char *userpass,
const char *rpc_req, int *curl_err, int flags );
@@ -331,6 +332,24 @@ extern void diff_to_target(uint32_t *target, double diff);
double hash_target_ratio( uint32_t* hash, uint32_t* target );
void work_set_target_ratio( struct work* work, uint32_t* hash );
struct thr_info {
int id;
pthread_t pth;
pthread_attr_t attr;
struct thread_q *q;
struct cpu_info cpu;
};
//struct thr_info *thr_info;
bool submit_solution( struct work *work, void *hash,
struct thr_info *thr );
bool submit_lane_solution( struct work *work, void *hash,
struct thr_info *thr, int lane );
//bool submit_work( struct thr_info *thr, const struct work *work_in );
void get_currentalgo( char* buf, int sz );
bool has_sha();
@@ -355,7 +374,7 @@ struct work {
uint32_t target[8];
double targetdiff;
double shareratio;
// double shareratio;
double sharediff;
int height;
@@ -420,7 +439,7 @@ struct stratum_ctx {
struct work work __attribute__ ((aligned (64)));
pthread_mutex_t work_lock;
int bloc_height;
int block_height;
} __attribute__ ((aligned (64)));
bool stratum_socket_full(struct stratum_ctx *sctx, int timeout);
@@ -471,6 +490,9 @@ void print_hash_tests(void);
void scale_hash_for_display ( double* hashrate, char* units );
void report_summary_log( bool force );
/*
struct thr_info {
int id;
pthread_t pth;
@@ -478,6 +500,7 @@ struct thr_info {
struct thread_q *q;
struct cpu_info cpu;
};
*/
struct work_restart {
volatile uint8_t restart;
@@ -549,6 +572,7 @@ enum algos {
ALGO_PHI2,
ALGO_PLUCK,
ALGO_POLYTIMOS,
ALGO_POWER2B,
ALGO_QUARK,
ALGO_QUBIT,
ALGO_SCRYPT,
@@ -578,6 +602,7 @@ enum algos {
ALGO_X14,
ALGO_X15,
ALGO_X16R,
ALGO_X16RV2,
ALGO_X16RT,
ALGO_X16RT_VEIL,
ALGO_X16S,
@@ -590,6 +615,7 @@ enum algos {
ALGO_YESCRYPTR32,
ALGO_YESPOWER,
ALGO_YESPOWERR16,
ALGO_YESPOWER_B2B,
ALGO_ZR5,
ALGO_COUNT
};
@@ -643,6 +669,7 @@ static const char* const algo_names[] = {
"phi2",
"pluck",
"polytimos",
"power2b",
"quark",
"qubit",
"scrypt",
@@ -672,6 +699,7 @@ static const char* const algo_names[] = {
"x14",
"x15",
"x16r",
"x16rv2",
"x16rt",
"x16rt-veil",
"x16s",
@@ -684,6 +712,7 @@ static const char* const algo_names[] = {
"yescryptr32",
"yespower",
"yespowerr16",
"yespower-b2b",
"zr5",
"\0"
};
@@ -726,11 +755,15 @@ extern uint32_t opt_work_size;
extern double *thr_hashrates;
extern double global_hashrate;
extern double stratum_diff;
extern bool opt_reset_on_stale;
extern double net_diff;
extern double net_hashrate;
extern int opt_pluck_n;
extern int opt_scrypt_n;
extern int opt_param_n;
extern int opt_param_r;
extern char* opt_param_key;
extern double opt_diff_factor;
extern double opt_target_factor;
extern bool opt_randomize;
extern bool allow_mininginfo;
extern time_t g_work_time;
@@ -796,10 +829,11 @@ Options:\n\
neoscrypt NeoScrypt(128, 2, 1)\n\
nist5 Nist5\n\
pentablake 5 x blake512\n\
phi1612 phi, LUX coin (original algo)\n\
phi2 LUX (new algo)\n\
phi1612 phi\n\
phi2\n\
pluck Pluck:128 (Supcoin)\n\
polytimos\n\
power2b MicroBitcoin (MBC)\n\
quark Quark\n\
qubit Qubit\n\
scrypt scrypt(1024, 1, 1) (default)\n\
@@ -829,7 +863,8 @@ Options:\n\
x13sm3 hsr (Hshare)\n\
x14 X14\n\
x15 X15\n\
x16r Ravencoin (RVN)\n\
x16r\n\
x16rv2 Ravencoin (RVN)\n\
x16rt Gincoin (GIN)\n\
x16rt-veil Veil (VEIL)\n\
x16s Pigeoncoin (PGN)\n\
@@ -842,7 +877,11 @@ Options:\n\
yescryptr32 WAVI\n\
yespower Cryply\n\
yespowerr16 Yenten (YTN)\n\
yespower-b2b generic yespower + blake2b\n\
zr5 Ziftr\n\
-N, --param-n N parameter for scrypt based algos\n\
-R, --patam-r R parameter for scrypt based algos\n\
-K, --param-key Key parameter for algos that use it\n\
-o, --url=URL URL of mining server\n\
-O, --userpass=U:P username:password pair for mining server\n\
-u, --user=USERNAME username for mining server\n\
@@ -852,13 +891,14 @@ Options:\n\
-t, --threads=N number of miner threads (default: number of processors)\n\
-r, --retries=N number of times to retry if a network call fails\n\
(default: retry indefinitely)\n\
-R, --retry-pause=N time to pause between retries, in seconds (default: 30)\n\
--retry-pause=N time to pause between retries, in seconds (default: 30)\n\
--time-limit=N maximum time [s] to mine before exiting the program.\n\
-T, --timeout=N timeout for long poll and stratum (default: 300 seconds)\n\
-s, --scantime=N upper bound on time spent scanning current work when\n\
long polling is unavailable, in seconds (default: 5)\n\
--randomize Randomize scan range start to reduce duplicates\n\
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
--reset-on-stale Workaround reset stratum if too many stale shares\n\
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
-m, --diff-multiplier Multiply difficulty by this factor (std is 1.0)\n\
--hash-meter Display thread hash rates\n\
--hide-diff Do not display changes in difficulty\n\
@@ -927,6 +967,7 @@ static struct option const options[] = {
{ "hash-meter", 0, NULL, 1014 },
{ "hide-diff", 0, NULL, 1013 },
{ "help", 0, NULL, 'h' },
{ "key", 1, NULL, 'K' },
{ "no-gbt", 0, NULL, 1011 },
{ "no-getwork", 0, NULL, 1010 },
{ "no-longpoll", 0, NULL, 1003 },
@@ -936,14 +977,18 @@ static struct option const options[] = {
{ "max-temp", 1, NULL, 1060 },
{ "max-diff", 1, NULL, 1061 },
{ "max-rate", 1, NULL, 1062 },
{ "param-key", 1, NULL, 'K' },
{ "param-n", 1, NULL, 'N' },
{ "param-r", 1, NULL, 'R' },
{ "pass", 1, NULL, 'p' },
{ "protocol", 0, NULL, 'P' },
{ "protocol-dump", 0, NULL, 'P' },
{ "proxy", 1, NULL, 'x' },
{ "quiet", 0, NULL, 'q' },
{ "retries", 1, NULL, 'r' },
{ "retry-pause", 1, NULL, 'R' },
{ "retry-pause", 1, NULL, 1025 },
{ "randomize", 0, NULL, 1024 },
{ "reset-on-stale", 0, NULL, 1026 },
{ "scantime", 1, NULL, 's' },
#ifdef HAVE_SYSLOG_H
{ "syslog", 0, NULL, 'S' },

View File

@@ -36,7 +36,7 @@
// MMX: 64 bit vectors
// SSE2: 128 bit vectors (64 bit CPUs only, such as Intel Core2.
// AVX2: 256 bit vectors (Starting with Intel Haswell and AMD Ryzen)
// AVX512: 512 bit vectors (still under development)
// AVX512: 512 bit vectors (Starting with SkylakeX)
//
// Most functions are avalaible at the stated levels but in rare cases
// a higher level feature may be required with no compatible alternative.
@@ -138,24 +138,17 @@
// improve high level code readability without the penalty of function
// overhead.
//
// A major restructuring is taking place shifting the focus from pointers
// to registers. Previously pointer casting used memory to provide transparency
// leaving it up to the compiler to manage everything and it does a very good
// job. The focus has shifted to register arguments for more control
// over the actual instructions assuming the data is in a register and the
// the compiler just needs to manage the registers.
//
// Rather than use pointers to provide type transparency
// specific instructions are used to access specific data as specific types.
// Previously pointers were cast and the compiler was left to find a way
// to get the data from wherever it happened to be to the correct registers.
// These utilities avoid memory accesses and assume data is in a register
// argument. Vector constants, in particular are generated with opcodes instead
// of being read from memory.
//
// The utilities defined here make use features like register aliasing
// to optimize operations. Many operations have specialized versions as
// well as more generic versions. It is preferable to use a specialized
// version whenever possible a sthey can take advantage of certain
// optimizations not available to the generic version. Specically the generic
// version usually has a second argument used is some extra calculations.
// version whenever possible as they can take advantage of certain
// optimizations not available to the generic version. The generic
// version will often have an additional argument used is some extra
// calculations.
//
///////////////////////////////////////////////////////
@@ -165,9 +158,6 @@
#include <stdlib.h>
#include <stdbool.h>
// Various types and overlays
#include "simd-utils/simd-types.h"
// 64 and 128 bit integers.
#include "simd-utils/simd-int.h"
@@ -175,7 +165,6 @@
// 64 bit vectors
#include "simd-utils/simd-64.h"
//#include "simd-utils/intrlv-mmx.h"
#if defined(__SSE2__)
@@ -189,17 +178,19 @@
#if defined(__AVX2__)
// Utilities that require AVX2 are defined in simd-256.h.
// Skylake-X has all these
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// 512 bit vectors
#include "simd-utils/simd-512.h"
#endif // MMX
#endif // SSE2
#endif // AVX
#endif // AVX2
#endif // AVX512
#endif // AVX2
#endif // AVX
#endif // SSE2
#endif // MMX
#include "simd-utils/intrlv.h"

View File

@@ -1,45 +1,11 @@
#if !defined(INTERLEAVE_H__)
#define INTERLEAVE_H__ 1
// philosophical discussion
//
// transitions:
//
// int32 <-> int64
// uint64_t = (uint64_t)int32_lo | ( (uint64_t)int32_hi << 32 )
// Efficient transition and post processing, 32 bit granularity is lost.
// Not pratical.
//
// int32 <-> m64
// More complex, 32 bit granularity maintained, limited number of mmx regs.
// int32 <-> int64 <-> m64 might be more efficient.
//
// int32 <-> m128
// Expensive, current implementation.
//
// int32 <-> m256
// Very expensive multi stage, current implementation.
//
// int64/m64 <-> m128
// Efficient, agnostic to native element size. Common.
//
// m128 <-> m256
// Expensive for a single instruction, unavoidable. Common.
//
// Multi stage options
//
// int32 <-> int64 -> m128
// More efficient than insert32, granularity maintained. Common.
//
// int64 <-> m128 -> m256
// Unavoidable, reasonably efficient. Common
//
// int32 <-> int64 -> m128 -> m256
// Seems inevitable, most efficient despite number of stages. Common.
//
// It seems the best approach is to avoid transitions and use the native type
// of the data: 64 & 32 bit use integer, 128 bit use m128i.
//////////////////////////////////////////////////////////////////////////
//
// Utilities to interleave and deinterleave multiple data for parallel
// processing using SIMD. Utilities are grouped by data size.
//
////////////////////////////////
//
@@ -262,8 +228,6 @@ static inline void dintrlv_4x32_512( void *dst0, void *dst1, void *dst2,
d0[15] = s[ 60]; d1[15] = s[ 61]; d2[15] = s[ 62]; d3[15] = s[ 63];
}
#undef DLEAVE_4x32
static inline void extr_lane_4x32( void *d, const void *s,
const int lane, const int bit_len )
{
@@ -308,6 +272,7 @@ static inline void mm128_intrlv_4x32x( void *dst, void *src0, void *src1,
}
}
// Double buffered source to reduce latency
static inline void mm128_bswap32_intrlv80_4x32( void *d, void *src )
{
__m128i sx = mm128_bswap_32( casti_m128i( src,0 ) );
@@ -469,21 +434,18 @@ static inline void extr_lane_8x32( void *d, const void *s,
#if defined(__AVX2__)
// There a alignment problems with the source buffer on Wwindows,
// can't use 256 bit bswap.
static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
{
__m256i s0 = mm256_bswap_32( casti_m256i( src,0 ) );
__m256i s1 = mm256_bswap_32( casti_m256i( src,1 ) );
__m128i s2 = mm128_bswap_32( casti_m128i( src,4 ) );
const __m256i zero = m256_zero;
const __m256i one = m256_one_32;
const __m256i two = _mm256_add_epi32( one, one );
const __m256i three = _mm256_add_epi32( two, one );
const __m256i four = _mm256_add_epi32( two, two );
casti_m256i( d, 0 ) = _mm256_permutevar8x32_epi32( s0, zero );
casti_m256i( d, 0 ) = _mm256_broadcastd_epi32(
_mm256_castsi256_si128( s0 ) );
casti_m256i( d, 1 ) = _mm256_permutevar8x32_epi32( s0, one );
casti_m256i( d, 2 ) = _mm256_permutevar8x32_epi32( s0, two );
casti_m256i( d, 3 ) = _mm256_permutevar8x32_epi32( s0, three );
@@ -494,7 +456,8 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
_mm256_add_epi32( four, two ) );
casti_m256i( d, 7 ) = _mm256_permutevar8x32_epi32( s0,
_mm256_add_epi32( four, three ) );
casti_m256i( d, 8 ) = _mm256_permutevar8x32_epi32( s1, zero );
casti_m256i( d, 8 ) = _mm256_broadcastd_epi32(
_mm256_castsi256_si128( s1 ) );
casti_m256i( d, 9 ) = _mm256_permutevar8x32_epi32( s1, one );
casti_m256i( d,10 ) = _mm256_permutevar8x32_epi32( s1, two );
casti_m256i( d,11 ) = _mm256_permutevar8x32_epi32( s1, three );
@@ -505,8 +468,7 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
_mm256_add_epi32( four, two ) );
casti_m256i( d,15 ) = _mm256_permutevar8x32_epi32( s1,
_mm256_add_epi32( four, three ) );
casti_m256i( d,16 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s2 ), zero );
casti_m256i( d,16 ) = _mm256_broadcastd_epi32( s2 );
casti_m256i( d,17 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s2 ), one );
casti_m256i( d,18 ) = _mm256_permutevar8x32_epi32(
@@ -650,7 +612,7 @@ static inline void dintrlv_16x32_512( void *d00, void *d01, void *d02,
#undef DLEAVE_16x32
static inline void extr_lane_16x32( void *d, const void *s,
const int lane, const int bit_len )
const int lane, const int bit_len )
{
((uint32_t*)d)[ 0] = ((uint32_t*)s)[ lane ];
((uint32_t*)d)[ 1] = ((uint32_t*)s)[ lane+16 ];
@@ -682,41 +644,41 @@ static inline void mm512_bswap32_intrlv80_16x32( void *d, void *src )
const __m512i three = _mm512_add_epi32( two, one );
__m512i x = _mm512_add_epi32( three, three );
casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, m512_zero );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( s0, one );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( s0, two );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( s0, three );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( two, two ) );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( three, two ) );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d, 0 ) = _mm512_broadcastd_epi32(
_mm512_castsi512_si128( s0 ) );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( one, s0 );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( two, s0 );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( three, s0 );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( two, two ), s0 );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( three, two ), s0 );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
x = _mm512_add_epi32( x, three );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
x = _mm512_add_epi32( x, three );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, three ) );
casti_m512i( d,16 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), m512_zero );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), one );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), two );
casti_m512i( d,19 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), three );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, three ), s0 );
casti_m512i( d,16 ) = _mm512_broadcastd_epi32( s1 );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32( one,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32( two,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,19 ) = _mm512_permutexvar_epi32( three,
_mm512_castsi128_si512( s1 ) );
}
#endif // AVX512
@@ -874,17 +836,6 @@ static inline void extr_lane_4x64( void *d, const void *s,
((uint64_t*)d)[ 5] = ((uint64_t*)s)[ lane+20 ];
((uint64_t*)d)[ 6] = ((uint64_t*)s)[ lane+24 ];
((uint64_t*)d)[ 7] = ((uint64_t*)s)[ lane+28 ];
/*
if ( bit_len <= 256 ) return;
((uint64_t*)d)[ 8] = ((uint64_t*)s)[ lane+32 ];
((uint64_t*)d)[ 9] = ((uint64_t*)s)[ lane+36 ];
((uint64_t*)d)[10] = ((uint64_t*)s)[ lane+40 ];
((uint64_t*)d)[11] = ((uint64_t*)s)[ lane+44 ];
((uint64_t*)d)[12] = ((uint64_t*)s)[ lane+48 ];
((uint64_t*)d)[13] = ((uint64_t*)s)[ lane+52 ];
((uint64_t*)d)[14] = ((uint64_t*)s)[ lane+56 ];
((uint64_t*)d)[15] = ((uint64_t*)s)[ lane+60 ];
*/
}
#if defined(__AVX2__)
@@ -991,17 +942,6 @@ static inline void extr_lane_8x64( void *d, const void *s,
((uint64_t*)d)[ 5] = ((uint64_t*)s)[ lane+ 40 ];
((uint64_t*)d)[ 6] = ((uint64_t*)s)[ lane+ 48 ];
((uint64_t*)d)[ 7] = ((uint64_t*)s)[ lane+ 56 ];
/*
if ( bit_len <= 256 ) return;
((uint64_t*)d)[ 8] = ((uint64_t*)s)[ lane+ 64 ];
((uint64_t*)d)[ 9] = ((uint64_t*)s)[ lane+ 72 ];
((uint64_t*)d)[10] = ((uint64_t*)s)[ lane+ 80 ];
((uint64_t*)d)[11] = ((uint64_t*)s)[ lane+ 88 ];
((uint64_t*)d)[12] = ((uint64_t*)s)[ lane+ 96 ];
((uint64_t*)d)[13] = ((uint64_t*)s)[ lane+104 ];
((uint64_t*)d)[14] = ((uint64_t*)s)[ lane+112 ];
((uint64_t*)d)[15] = ((uint64_t*)s)[ lane+120 ];
*/
}
#if defined(__AVX512F__) && defined(__AVX512VL__)
@@ -1009,26 +949,23 @@ static inline void extr_lane_8x64( void *d, const void *s,
static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src )
{
__m512i *d = (__m512i*)dst;
__m512i s0 = mm512_bswap_32( casti_m512i(src, 0 ) );
__m128i s1 = mm128_bswap_32( casti_m128i(src, 4 ) );
// const __m512i zero = m512_zero;
__m512i s0 = mm512_bswap_32( casti_m512i( src, 0 ) );
__m128i s1 = mm128_bswap_32( casti_m128i( src, 4 ) );
const __m512i one = m512_one_64;
const __m512i two = _mm512_add_epi64( one, one );
const __m512i three = _mm512_add_epi64( two, one );
const __m512i four = _mm512_add_epi64( two, two );
d[0] = _mm512_permutexvar_epi64( s0, m512_zero );
d[1] = _mm512_permutexvar_epi64( s0, one );
d[2] = _mm512_permutexvar_epi64( s0, two );
d[3] = _mm512_permutexvar_epi64( s0, three );
d[4] = _mm512_permutexvar_epi64( s0, four );
d[5] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, one ) );
d[6] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, two ) );
d[7] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, three ) );
d[8] = _mm512_permutexvar_epi64(
_mm512_castsi128_si512( s1 ), m512_zero );
d[9] = _mm512_permutexvar_epi64(
_mm512_castsi128_si512( s1 ), one );
d[0] = _mm512_broadcastq_epi64( _mm512_castsi512_si128( s0 ) );
d[1] = _mm512_permutexvar_epi64( one, s0 );
d[2] = _mm512_permutexvar_epi64( two, s0 );
d[3] = _mm512_permutexvar_epi64( three, s0 );
d[4] = _mm512_permutexvar_epi64( four, s0 );
d[5] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, one ), s0 );
d[6] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, two ), s0 );
d[7] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, three ), s0 );
d[8] = _mm512_broadcastq_epi64( s1 );
d[9] = _mm512_permutexvar_epi64( one, _mm512_castsi128_si512( s1 ) );
}
#endif // AVX512
@@ -1175,6 +1112,44 @@ static inline void dintrlv_4x128_512( void *dst0, void *dst1, void *dst2,
}
// 2x256 (AVX512)
#if defined (__AVX__)
static inline void intrlv_2x256( void *dst, const void *src0,
const void *src1, int bit_len )
{
__m256i *d = (__m256i*)dst;
const __m256i *s0 = (const __m256i*)src0;
const __m256i *s1 = (const __m256i*)src1;
d[ 0] = s0[0]; d[ 1] = s1[0];
if ( bit_len <= 256 ) return;
d[ 2] = s0[1]; d[ 3] = s1[1];
if ( bit_len <= 512 ) return;
d[ 4] = s0[2];
if ( bit_len <= 640 ) return;
d[ 5] = s1[2];
d[ 6] = s0[3]; d[ 7] = s1[3];
}
// No 80 byte dintrlv
static inline void dintrlv_2x256( void *dst0, void *dst1,
const void *src, int bit_len )
{
__m256i *d0 = (__m256i*)dst0;
__m256i *d1 = (__m256i*)dst1;
const __m256i *s = (const __m256i*)src;
d0[0] = s[ 0]; d1[0] = s[ 1];
if ( bit_len <= 256 ) return;
d0[1] = s[ 2]; d1[1] = s[ 3];
if ( bit_len <= 512 ) return;
d0[2] = s[ 4]; d1[2] = s[ 5];
d0[3] = s[ 6]; d1[3] = s[ 7];
}
#endif // AVX
///////////////////////////
//
// Re-intereleaving

View File

@@ -19,18 +19,19 @@
//
// Constants are an issue with simd. Simply put, immediate constants don't
// exist. All simd constants either reside in memory or a register and
// must be loaded or generated at run time.
// must be loaded from memory or generated using instructions at run time.
//
// Due to the cost of generating constants it is often more efficient to
// define a local const for repeated references to the same constant.
//
// Some constant values can be generated using shortcuts. Zero for example
// is as simple as XORing any register with itself, and is implemented
// in the setzero instrinsic. These shortcuts must be implemented using ASM
// iby the setzero instrinsic. These shortcuts must be implemented using ASM
// due to doing things the compiler would complain about. Another single
// instruction constant is -1, defined below. Others may be added as the need
// arises. Even single instruction constants are less efficient than local
// register variables so the advice above stands.
// register variables so the advice above stands. These pseudo-constants
// do not perform any memory accesses
//
// One common use for simd constants is as a control index for some simd
// instructions like blend and shuffle. The utilities below do not take this
@@ -40,135 +41,142 @@
#define m128_zero _mm_setzero_si128()
static inline __m128i m128_one_128_fn()
static inline __m128i mm128_one_128_fn()
{
register __m128i a;
asm( "movq $1, %0\n\t"
: "=x"(a) );
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return a;
}
#define m128_one_128 m128_one_128_fn()
#define m128_one_128 mm128_one_128_fn()
static inline __m128i m128_one_64_fn()
static inline __m128i mm128_one_64_fn()
{
register uint64_t one = 1;
register __m128i a;
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(one) );
return _mm_shuffle_epi32( a, 0x04 );
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x44 );
}
#define m128_one_64 m128_one_64_fn()
#define m128_one_64 mm128_one_64_fn()
static inline __m128i m128_one_32_fn()
static inline __m128i mm128_one_32_fn()
{
register uint32_t one = 1;
register __m128i a;
__m128i a;
const uint32_t one = 1;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r"(one) );
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_32 m128_one_32_fn()
#define m128_one_32 mm128_one_32_fn()
static inline __m128i m128_one_16_fn()
static inline __m128i mm128_one_16_fn()
{
register uint32_t one = 0x00010001;
register __m128i a;
__m128i a;
const uint32_t one = 0x00010001;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r"(one) );
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_16 m128_one_16_fn()
#define m128_one_16 mm128_one_16_fn()
static inline __m128i m128_one_8_fn()
static inline __m128i mm128_one_8_fn()
{
register uint32_t one = 0x01010101;
register __m128i a;
__m128i a;
const uint32_t one = 0x01010101;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r"(one) );
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_8 m128_one_8_fn()
#define m128_one_8 mm128_one_8_fn()
static inline __m128i m128_neg1_fn()
static inline __m128i mm128_neg1_fn()
{
__m128i a;
asm( "pcmpeqd %0, %0\n\t"
: "=x"(a) );
: "=x" (a) );
return a;
}
#define m128_neg1 m128_neg1_fn()
#define m128_neg1 mm128_neg1_fn()
// move uint64_t to low bits of __m128i, zeros the rest
static inline __m128i mm128_mov64_128( uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
: "r" (n) );
return a;
}
static inline __m128i mm128_mov32_128( uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
: "r" (n) );
return a;
}
static inline uint64_t mm128_mov128_64( __m128i a )
{
register uint64_t n;
uint64_t n;
asm( "movq %1, %0\n\t"
: "=x" (n)
: "r" (a) );
: "r" (a) );
return n;
}
static inline uint32_t mm128_mov128_32( __m128i a )
{
register uint32_t n;
uint32_t n;
asm( "movd %1, %0\n\t"
: "=x" (n)
: "r" (a) );
: "r" (a) );
return n;
}
static inline __m128i m128_const1_64( const uint64_t n )
{
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return _mm_shuffle_epi32( a, 0x44 );
}
static inline __m128i m128_const1_32( const uint32_t n )
{
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return _mm_shuffle_epi32( a, 0x00 );
}
#if defined(__SSE41__)
// alternative to _mm_set_epi64x, doesn't use mem,
static inline __m128i m128_const_64( const uint64_t hi, const uint64_t lo )
{
register __m128i a;
__m128i a;
asm( "movq %2, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x"(a)
: "r"(hi), "r"(lo) );
return a;
}
static inline __m128i m128_const1_64( const uint64_t n )
{
register __m128i a;
asm( "movq %1, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x"(a)
: "r"(n) );
: "=x" (a)
: "r" (hi), "r" (lo) );
return a;
}
#else
// #define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_const_64 _mm_set_epi64x
#define m128_const1_64 _mm_set1_epi64x
#endif
@@ -263,46 +271,6 @@ do { \
#endif
// Gather and scatter data.
// Surprise, they don't use vector instructions. Several reasons why.
// Since scalar data elements are being manipulated scalar instructions
// are most appropriate and can bypass vector registers. They are faster
// and more efficient on a per instruction basis due to the higher clock
// speed and greater avaiability of execution resources. It's good for
// interleaving data buffers for parallel processing.
// May suffer overhead if data is already in a vector register. This can
// usually be easilly avoided by the coder. Sometimes _mm_set is simply better.
// These macros are likely to be used when transposing matrices rather than
// conversions of a single vector.
// Gather data elements into contiguous memory for vector use.
// Source args are appropriately sized value integers, destination arg is a
// type agnostic pointer.
// Vector alignment is not required, though likely. Appropriate integer
// alignment satisfies these macros.
// rewrite using insert
#define mm128_gather_64( d, s0, s1 ) \
((uint64_t*)d)[0] = (uint64_t)s0; \
((uint64_t*)d)[1] = (uint64_t)s1;
#define mm128_gather_32( d, s0, s1, s2, s3 ) \
((uint32_t*)d)[0] = (uint32_t)s0; \
((uint32_t*)d)[1] = (uint32_t)s1; \
((uint32_t*)d)[2] = (uint32_t)s2; \
((uint32_t*)d)[3] = (uint32_t)s3;
// Scatter data from contiguous memory.
#define mm128_scatter_64( d0, d1, s ) \
*( (uint64_t*)d0) = ((uint64_t*)s)[0]; \
*( (uint64_t*)d1) = ((uint64_t*)s)[1];
#define mm128_scatter_32( d0, d1, d2, d3, s ) \
*( (uint32_t*)d0) = ((uint32_t*)s)[0]; \
*( (uint32_t*)d1) = ((uint32_t*)s)[1]; \
*( (uint32_t*)d2) = ((uint32_t*)s)[2]; \
*( (uint32_t*)d3) = ((uint32_t*)s)[3];
// Memory functions
// Mostly for convenience, avoids calculating bytes.
// Assumes data is alinged and integral.
@@ -329,8 +297,19 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
// AVX512 has implemented bit rotation for 128 bit vectors with
// 64 and 32 bit elements.
//
// Rotate each element of v by c bits
// compiler doesn't like when a variable is used for the last arg of
// _mm_rol_epi32, must be "8 bit immediate".
// sm3-hash-4way.c fails to compile.
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm128_ror_64( v, c ) _mm_ror_epi64( v, c )
#define mm128_rol_64( v, c ) _mm_rol_epi64( v, c )
#define mm128_ror_32( v, c ) _mm_ror_epi32( v, c )
#define mm128_rol_32( v, c ) _mm_rol_epi32( v, c )
#else
*/
#define mm128_ror_64( v, c ) \
_mm_or_si128( _mm_srli_epi64( v, c ), _mm_slli_epi64( v, 64-(c) ) )
@@ -344,6 +323,8 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_rol_32( v, c ) \
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
//#endif // AVX512 else
#define mm128_ror_16( v, c ) \
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )
@@ -384,6 +365,22 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_brol( v, c ) \
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm128_invert_32( v ) _mm_shuffle_epi32( v, 0x1b )
#if defined(__SSSE3__)
#define mm128_invert_16( v ) \
_mm_shuffle_epi8( v, mm128_const_64( 0x0100030205040706, \
0x09080b0a0d0c0f0e )
#define mm128_invert_8( v ) \
_mm_shuffle_epi8( v, mm128_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f )
#endif // SSSE3
//
// Rotate elements within lanes.
@@ -565,57 +562,73 @@ do { \
#define mm128_ror1x64_256( v1, v2 ) \
do { \
__m128i t = _mm_srli_si128( v1, 8 ) | _mm_slli_si128( v2, 8 ); \
v2 = _mm_srli_si128( v2, 8 ) | _mm_slli_si128( v1, 8 ); \
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 8 ), \
_mm_slli_si128( v2, 8 ) ); \
v2 = _mm_or_si128( _mm_srli_si128( v2, 8 ), \
_mm_slli_si128( v1, 8 ) ); \
v1 = t; \
} while(0)
#define mm128_rol1x64_256( v1, v2 ) \
do { \
__m128i t = _mm_slli_si128( v1, 8 ) | _mm_srli_si128( v2, 8 ); \
v2 = _mm_slli_si128( v2, 8 ) | _mm_srli_si128( v1, 8 ); \
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 8 ), \
_mm_srli_si128( v2, 8 ) ); \
v2 = _mm_or_si128( _mm_slli_si128( v2, 8 ), \
_mm_srli_si128( v1, 8 ) ); \
v1 = t; \
} while(0)
#define mm128_ror1x32_256( v1, v2 ) \
do { \
__m128i t = _mm_srli_si128( v1, 4 ) | _mm_slli_si128( v2, 12 ); \
v2 = _mm_srli_si128( v2, 4 ) | _mm_slli_si128( v1, 12 ); \
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 4 ), \
_mm_slli_si128( v2, 12 ) ); \
v2 = _mm_or_si128( _mm_srli_si128( v2, 4 ), \
_mm_slli_si128( v1, 12 ) ); \
v1 = t; \
} while(0)
#define mm128_rol1x32_256( v1, v2 ) \
do { \
__m128i t = _mm_slli_si128( v1, 4 ) | _mm_srli_si128( v2, 12 ); \
v2 = _mm_slli_si128( v2, 4 ) | _mm_srli_si128( v1, 12 ); \
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 4 ), \
_mm_srli_si128( v2, 12 ) ); \
v2 = _mm_or_si128( _mm_slli_si128( v2, 4 ), \
_mm_srli_si128( v1, 12 ) ); \
v1 = t; \
} while(0)
#define mm128_ror1x16_256( v1, v2 ) \
do { \
__m128i t = _mm_srli_si128( v1, 2 ) | _mm_slli_si128( v2, 14 ); \
v2 = _mm_srli_si128( v2, 2 ) | _mm_slli_si128( v1, 14 ); \
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 2 ), \
_mm_slli_si128( v2, 14 ) ); \
v2 = _mm_or_si128( _mm_srli_si128( v2, 2 ), \
_mm_slli_si128( v1, 14 ) ); \
v1 = t; \
} while(0)
#define mm128_rol1x16_256( v1, v2 ) \
do { \
__m128i t = _mm_slli_si128( v1, 2 ) | _mm_srli_si128( v2, 14 ); \
v2 = _mm_slli_si128( v2, 2 ) | _mm_srli_si128( v1, 14 ); \
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 2 ), \
_mm_srli_si128( v2, 14 ) ); \
v2 = _mm_or_si128( _mm_slli_si128( v2, 2 ), \
_mm_srli_si128( v1, 14 ) ); \
v1 = t; \
} while(0)
#define mm128_ror1x8_256( v1, v2 ) \
do { \
__m128i t = _mm_srli_si128( v1, 1 ) | _mm_slli_si128( v2, 15 ); \
v2 = _mm_srli_si128( v2, 1 ) | _mm_slli_si128( v1, 15 ); \
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 1 ), \
_mm_slli_si128( v2, 15 ) ); \
v2 = _mm_or_si128( _mm_srli_si128( v2, 1 ), \
_mm_slli_si128( v1, 15 ) ); \
v1 = t; \
} while(0)
#define mm128_rol1x8_256( v1, v2 ) \
do { \
__m128i t = _mm_slli_si128( v1, 1 ) | _mm_srli_si128( v2, 15 ); \
v2 = _mm_slli_si128( v2, 1 ) | _mm_srli_si128( v1, 15 ); \
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 1 ), \
_mm_srli_si128( v2, 15 ) ); \
v2 = _mm_or_si128( _mm_slli_si128( v2, 1 ), \
_mm_srli_si128( v1, 15 ) ); \
v1 = t; \
} while(0)

View File

@@ -14,109 +14,174 @@
// is limited because 256 bit vectors are less likely to be used when 512
// is available.
// set instructions load memory resident constants, this avoids mem.
// cost 4 pinsert + 1 vinsert, estimate 8 clocks latency.
#if defined(__AVX2__)
#define m256_const_128( hi, lo ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 )
#define m256_const_64( i3, i2, i1, i0 ) \
m256_const_128( m128_const_64( i3, i2 ), m128_const_64( i1, i0 ) )
/*
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
*/
#else // AVX
#define m256_const_64( i3, i2, i1, i0 ) _mm256_set_epi64x( i3, i2, i1, i0 )
#endif
static inline __m256i m256_const1_64( uint64_t i )
{
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastq_epi64( a );
}
static inline __m256i m256_const1_32( uint32_t i )
{
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastd_epi32( a );
}
static inline __m256i m256_const1_16( uint16_t i )
{
__m128i a;
asm( "movw %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastw_epi16( a );
}
static inline __m256i m256_const1_8( uint8_t i )
{
__m128i a;
asm( "movb %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastb_epi8( a );
}
//
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#define m256_one_256 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
#define m256_one_128 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
// set instructions load memory resident constants, this avoids mem.
// cost 4 pinsert + 1 vinsert, estimate 8 clocks latency.
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
static inline __m256i m256_const1_64( uint64_t i )
{
register __m128i a;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm256_broadcastq_epi64( a );
}
#if defined(__AVX2__)
// Don't call the frunction directly, use the macro to make appear like
// a constant identifier instead of a function.
// __m256i foo = m256_one_64;
static inline __m256i m256_one_64_fn()
static inline __m256i mm256_one_256_fn()
{
register uint64_t one = 1;
register __m128i a;
__m256i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return a;
}
#define m256_one_256 mm256_one_256_fn()
static inline __m256i mm256_one_128_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastsi128_si256( a );
}
#define m256_one_128 mm256_one_128_fn()
static inline __m256i mm256_one_64_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_64 m256_one_64_fn()
#define m256_one_64 mm256_one_64_fn()
static inline __m256i m256_one_32_fn()
static inline __m256i mm256_one_32_fn()
{
register uint64_t one = 0x0000000100000001;
register __m128i a;
__m128i a;
const uint64_t one = 0x0000000100000001;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_32 m256_one_32_fn()
#define m256_one_32 mm256_one_32_fn()
static inline __m256i m256_one_16_fn()
static inline __m256i mm256_one_16_fn()
{
register uint64_t one = 0x0001000100010001;
register __m128i a;
__m128i a;
const uint64_t one = 0x0001000100010001;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_16 m256_one_16_fn()
#define m256_one_16 mm256_one_16_fn()
static inline __m256i m256_one_8_fn()
static inline __m256i mm256_one_8_fn()
{
register uint64_t one = 0x0101010101010101;
register __m128i a;
__m128i a;
const uint64_t one = 0x0101010101010101;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_8 m256_one_8_fn()
#define m256_one_8 mm256_one_8_fn()
static inline __m256i m256_neg1_fn()
static inline __m256i mm256_neg1_fn()
{
register __m256i a;
__m256i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
: "=x"(a) );
return a;
}
#define m256_neg1 m256_neg1_fn()
#define m256_neg1 mm256_neg1_fn()
#else // AVX
#define m256_one_256 m256_const_64( m128_zero, m128_one ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
#define m256_one_128 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
#define m256_one_64 _mm256_set1_epi64x( 1ULL )
#define m256_one_32 _mm256_set1_epi64x( 0x0000000100000001ULL )
#define m256_one_16 _mm256_set1_epi64x( 0x0001000100010001ULL )
#define m256_one_8 _mm256_set1_epi64x( 0x0101010101010101ULL )
// AVX doesn't have inserti128 but insertf128 will do.
static inline __m256i m256_neg1_fn()
static inline __m256i mm256_neg1_fn()
{
__m128i a = m128_neg1;
return _mm256_insertf128_si256( _mm256_castsi128_si256( a ), a, 1 );
}
#define m256_neg1 m256_neg1_fn()
#define m256_neg1 mm256_neg1_fn()
#endif // AVX2 else AVX
@@ -142,7 +207,7 @@ do { \
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
a0 = mm256_mov256_64( src ); \
a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
a2 = _mm_extract_epi64( hi, 0 ); \
a2 = mm128_mov128_64( hi ); \
a3 = _mm_extract_epi64( hi, 1 ); \
} while(0)
@@ -166,7 +231,7 @@ do { \
// Move integer to lower bits of vector, upper bits set to zero.
static inline __m256i mm256_mov64_256( uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -175,14 +240,14 @@ static inline __m256i mm256_mov64_256( uint64_t n )
static inline __m256i mm256_mov32_256( uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return _mm256_castsi128_si256( a );
}
// Move lo bits of vector to integer, hi bits are truncated.
// Return lo bits of vector as integer.
#define mm256_mov256_64( a ) mm128_mov128_64( _mm256_castsi256_si128( a ) )
#define mm256_mov256_32( a ) mm128_mov128_32( _mm256_castsi256_si128( a ) )
@@ -246,44 +311,6 @@ static inline __m256i mm256_mov32_256( uint32_t n )
#define casto_m256i(p,o) (((__m256i*)(p))+(o))
// Gather scatter
#define mm256_gather_64( d, s0, s1, s2, s3 ) \
((uint64_t*)(d))[0] = (uint64_t)(s0); \
((uint64_t*)(d))[1] = (uint64_t)(s1); \
((uint64_t*)(d))[2] = (uint64_t)(s2); \
((uint64_t*)(d))[3] = (uint64_t)(s3);
#define mm256_gather_32( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
((uint32_t*)(d))[0] = (uint32_t)(s0); \
((uint32_t*)(d))[1] = (uint32_t)(s1); \
((uint32_t*)(d))[2] = (uint32_t)(s2); \
((uint32_t*)(d))[3] = (uint32_t)(s3); \
((uint32_t*)(d))[4] = (uint32_t)(s4); \
((uint32_t*)(d))[5] = (uint32_t)(s5); \
((uint32_t*)(d))[6] = (uint32_t)(s6); \
((uint32_t*)(d))[7] = (uint32_t)(s7);
// Scatter data from contiguous memory.
// All arguments are pointers
#define mm256_scatter_64( d0, d1, d2, d3, s ) \
*((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \
*((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \
*((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \
*((uint64_t*)(d3)) = ((uint64_t*)(s))[3];
#define mm256_scatter_32( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
*((uint32_t*)(d0)) = ((uint32_t*)(s))[0]; \
*((uint32_t*)(d1)) = ((uint32_t*)(s))[1]; \
*((uint32_t*)(d2)) = ((uint32_t*)(s))[2]; \
*((uint32_t*)(d3)) = ((uint32_t*)(s))[3]; \
*((uint32_t*)(d4)) = ((uint32_t*)(s))[4]; \
*((uint32_t*)(d5)) = ((uint32_t*)(s))[5]; \
*((uint32_t*)(d6)) = ((uint32_t*)(s))[6]; \
*((uint32_t*)(d7)) = ((uint32_t*)(s))[7];
//
// Memory functions
// n = number of 256 bit (32 byte) vectors
@@ -339,10 +366,20 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// The only bit shift for more than 64 bits is with __int128.
//
// AVX512 has bit rotate for 256 bit vectors with 64 or 32 bit elements
// but is of little value
//
// Rotate each element of v by c bits
// compiler doesn't like when a variable is used for the last arg of
// _mm_rol_epi32, must be "8 bit immediate".
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_ror_64( v, c ) _mm256_ror_epi64( v, c )
#define mm256_rol_64( v, c ) _mm256_rol_epi64( v, c )
#define mm256_ror_32( v, c ) _mm256_ror_epi32( v, c )
#define mm256_rol_32( v, c ) _mm256_rol_epi32( v, c )
#else
*/
#define mm256_ror_64( v, c ) \
_mm256_or_si256( _mm256_srli_epi64( v, c ), \
_mm256_slli_epi64( v, 64-(c) ) )
@@ -359,6 +396,9 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_or_si256( _mm256_slli_epi32( v, c ), \
_mm256_srli_epi32( v, 32-(c) ) )
// #endif // AVX512 else
#define mm256_ror_16( v, c ) \
_mm256_or_si256( _mm256_srli_epi16( v, c ), \
_mm256_slli_epi16( v, 16-(c) ) )
@@ -394,6 +434,19 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_set1_epi32( 32 ), c ) ) )
// AVX512 can do 16 bit elements.
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_rorv_16( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#define mm256_rolv_16( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#endif // AVX512
//
// Rotate elements accross all lanes.
@@ -432,7 +485,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
0x0000000000000007, 0x0000000600000005 )
// AVX512 can do 16 & 8 bit elements.
#if defined(__AVX512VL__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Rotate 256 bit vector by one 16 bit element.
#define mm256_ror_1x16( v ) \
@@ -445,17 +498,50 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
0x000e000d000c000b, 0x000a000900080007, \
0x0006000500040003, 0x000200010000000f ), v )
// Rotate 256 bit vector by one byte.
#define mm256_ror_1x8( v ) m256_const_64( \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 )
#if defined (__AVX512VBMI__)
#define mm256_rol_1x8( v ) m256_const_64( \
// Rotate 256 bit vector by one byte.
#define mm256_ror_1x8( v ) _mm256_permutexvar_epi8( m256_const_64( \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
#define mm256_rol_1x8( v ) _mm256_permutexvar_epi16( m256_const_64( \
0x1e1d1c1b1a191817, 0x161514131211100f, \
0x0e0d0c0b0a090807, 0x060504030201001f )
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
#endif // VBMI
#endif // AVX512
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm256_invert_64 ( v ) _mm256_permute4x64_epi64( v, 0x1b )
#define mm256_invert_32 ( v ) _mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000000000001, 0x0000000200000003 \
0x0000000400000005, 0x0000000600000007 )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
#define mm256_invert_16 ( v ) \
_mm256_permutexvar_epi16( m256_const_64( 0x0000000100020003, \
0x0004000500060007, \
0x00080009000a000b, \
0x000c000d000e000f ), v )
#if defined(__AVX512VBMI__)
#define mm256_invert_8( v ) \
_mm256_permutexvar_epi8( m256_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f, \
0x1011121314151617, \
0x18191a1b1c1d1e1f ), v )
#endif // VBMI
#endif // AVX512
//
// Rotate elements within lanes of 256 bit vector.

View File

@@ -1,35 +1,32 @@
#if !defined(SIMD_512_H__)
#define SIMD_512_H__ 1
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
//
// Some extentsions in AVX512 supporting operations on
// smaller elements in 256 bit vectors.
// AVX-512
//
// The baseline for these utilities is AVX512F, AVX512DQ, AVX512BW
// and AVX512VL, first available in quantity in Skylake-X.
// Some utilities may require additional features available in subsequent
// architectures and are noted.
// Variable rotate, each element rotates by corresponding index.
#define mm256_rorv_16( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#define mm256_rolv_16( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
#define mm256_invert_16 ( v ) \
_mm256_permutex_epi16( v, _mm256_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15 ) )
#define mm256_invert_8( v ) \
_mm256_permutex_epi8( v, _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15, \
16,17,18,19,20,21,22,23, \
24,25,26,27,28,29,30,31 ) )
// AVX512 intrinsics have a few peculiarities with permutes and shuffles
// that are inconsistent with previous AVX2 implementations.
//
// _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute
// usually shuffles accross all lanes.
//
// permutexvar has args reversed, index is first arg. Previously all
// permutes and shuffles have the source vector first.
//
// _mm512_permutexvar_epi8 requires AVX512-VBMI, larger elements don't.
// It also performs the same op as _mm512_shuffle_epi8.
//
// _mm512_shuffle_epi8 shuffles accross entire 512 bits. Shuffle usually
// doesn't cross 128 bit lane boundaries.
//////////////////////////////////////////////////////////////
//
@@ -40,6 +37,74 @@
//
// Experimental, not fully tested.
//
// Pseudo constants.
//
// Vector constants are not really constants and can't be used as compile time
// initializers. They contain executable instructions to generate values at
// run time. They are very slow. If the same constant will be used repeatedly
// in a function it's better to define it once in a local register variable
// and use the variable for references.
// Tthe simpler the constant, the more efficienct it's generation. Zero is
// the fastest, then all elements set the same, different 64 bit elements,
// and different smaller elements is the slowest. Caching multiple uses us
// always faster.
#define m512_const_256( hi, lo ) \
_mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 )
#define m512_const_128( i3, i2, i1, i0 ) \
_mm512_inserti64x4( _mm512_castsi256_si512( m256_const_128( i1, i0 ) ), \
m256_const_128( i3,i2 ), 1 )
#define m512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
m512_const_256( m256_const_64( i7,i6,i5,i4 ), \
m256_const_64( i3,i2,i1,i0 ) )
static inline __m512i m512_const1_256( __m256i v )
{
return _mm512_broadcast_i64x4( v );
}
static inline __m512i m512_const1_128( __m128i v )
{
return _mm512_broadcast_i64x2( v );
}
static inline __m512i m512_const1_64( uint64_t i )
{
__m128i a;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastq_epi64( a );
}
static inline __m512i m512_const1_32( uint32_t i )
{
__m128i a;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastd_epi32( a );
}
static inline __m512i m512_const1_16( uint16_t i )
{
__m128i a;
asm( "movw %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastw_epi16( a );
}
static inline __m512i m512_const1_8( uint8_t i )
{
__m128i a;
asm( "movb %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastb_epi8( a );
}
//
// Pseudo constants.
@@ -47,84 +112,106 @@
// _mm512_setzero_si512 uses xor instruction. If needed frequently
// in a function is it better to define a register variable (const?)
// initialized to zero.
// It isn't clear to me yet how set or set1 actually work.
#define m512_zero _mm512_setzero_si512()
/*
#define m512_one_512 _mm512_set_epi64( 0ULL, 0ULL, 0ULL, 0ULL, \
0ULL, 0ULL, 0ULL, 1ULL )
#define m512_one_256 _mm512_set4_epi64( 0ULL, 0ULL, 0ULL, 1ULL )
#define m512_one_128 _mm512_set4_epi64( 0ULL, 1ULL, 0ULL, 1ULL )
//#define m512_one_64 _mm512_set1_epi64( 1ULL )
//#define m512_one_32 _mm512_set1_epi32( 1UL )
//#define m512_one_16 _mm512_set1_epi16( 1U )
//#define m512_one_8 _mm512_set1_epi8( 1U )
//#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
#define m512_one_64 _mm512_set1_epi64( 1ULL )
#define m512_one_32 _mm512_set1_epi32( 1UL )
#define m512_one_16 _mm512_set1_epi16( 1U )
#define m512_one_8 _mm512_set1_epi8( 1U )
#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
*/
#define mi512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
_mm512_inserti64x4( _mm512_castsi512_si256( m256_const_64( i3.i2,i1,i0 ) ), \
m256_const_64( i7,i6,i5,i4 ), 1 )
#define m512_const1_64( i ) m256_const_64( i, i, i, i, i, i, i, i )
static inline __m512i m512_one_64_fn()
static inline __m512i mm512_one_512_fn()
{
__m512i a;
asm( "vpxorq %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubq %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return a;
}
#define m512_one_64 m512_one_64_fn()
#define m512_one_512 mm512_one_512_fn()
static inline __m512i m512_one_32_fn()
static inline __m512i mm512_one_256_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubd %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m256i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcast_i64x4( a );
}
#define m512_one_32 m512_one_32_fn()
#define m512_one_256 mm512_one_256_fn()
static inline __m512i m512_one_16_fn()
static inline __m512i mm512_one_128_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubw %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcast_i64x2( a );
}
#define m512_one_16 m512_one_16_fn()
#define m512_one_128 mm512_one_128_fn()
static inline __m512i m512_one_8_fn()
static inline __m512i mm512_one_64_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubb %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_8 m512_one_8_fn()
#define m512_one_64 mm512_one_64_fn()
static inline __m512i m512_neg1_fn()
static inline __m512i mm512_one_32_fn()
{
__m128i a;
const uint64_t one = 0x0000000100000001;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_32 mm512_one_32_fn()
static inline __m512i mm512_one_16_fn()
{
__m128i a;
const uint64_t one = 0x0001000100010001;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_16 mm512_one_16_fn()
static inline __m512i mm512_one_8_fn()
{
__m128i a;
const uint64_t one = 0x0101010101010101;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_8 mm512_one_8_fn()
static inline __m512i mm512_neg1_fn()
{
__m512i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
:"=x"(a) );
return a;
}
#define m512_neg1 m512_neg1_fn()
#define m512_neg1 mm512_neg1_fn()
//
@@ -135,15 +222,13 @@ static inline __m512i m512_neg1_fn()
#define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x )
#define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x )
// More efficient to use cast to extract low lanes, it's free.
#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a )
#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 )
#define mm128_extr_lo128_512( a ) _mm512_castsi512_si256( a )
//
// Pointer casting
@@ -163,72 +248,8 @@ static inline __m512i m512_neg1_fn()
// returns p+o as pointer to vector
#define casto_m512i(p,o) (((__m512i*)(p))+(o))
// Gather scatter
#define mm512_gather_64( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
((uint64_t*)(d))[0] = (uint64_t)(s0); \
((uint64_t*)(d))[1] = (uint64_t)(s1); \
((uint64_t*)(d))[2] = (uint64_t)(s2); \
((uint64_t*)(d))[3] = (uint64_t)(s3); \
((uint64_t*)(d))[4] = (uint64_t)(s4); \
((uint64_t*)(d))[5] = (uint64_t)(s5); \
((uint64_t*)(d))[6] = (uint64_t)(s6); \
((uint64_t*)(d))[7] = (uint64_t)(s7);
#define mm512_gather_32( d, s00, s01, s02, s03, s04, s05, s06, s07, \
s08, s09, s10, s11, s12, s13, s14, s15 ) \
((uint32_t*)(d))[ 0] = (uint32_t)(s00); \
((uint32_t*)(d))[ 1] = (uint32_t)(s01); \
((uint32_t*)(d))[ 2] = (uint32_t)(s02); \
((uint32_t*)(d))[ 3] = (uint32_t)(s03); \
((uint32_t*)(d))[ 4] = (uint32_t)(s04); \
((uint32_t*)(d))[ 5] = (uint32_t)(s05); \
((uint32_t*)(d))[ 6] = (uint32_t)(s06); \
((uint32_t*)(d))[ 7] = (uint32_t)(s07); \
((uint32_t*)(d))[ 8] = (uint32_t)(s08); \
((uint32_t*)(d))[ 9] = (uint32_t)(s09); \
((uint32_t*)(d))[10] = (uint32_t)(s10); \
((uint32_t*)(d))[11] = (uint32_t)(s11); \
((uint32_t*)(d))[12] = (uint32_t)(s12); \
((uint32_t*)(d))[13] = (uint32_t)(s13); \
((uint32_t*)(d))[13] = (uint32_t)(s14); \
((uint32_t*)(d))[15] = (uint32_t)(s15);
// Scatter data from contiguous memory.
// All arguments are pointers
#define mm512_scatter_64( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
*((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \
*((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \
*((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \
*((uint64_t*)(d3)) = ((uint64_t*)(s))[3]; \
*((uint64_t*)(d4)) = ((uint64_t*)(s))[4]; \
*((uint64_t*)(d5)) = ((uint64_t*)(s))[5]; \
*((uint64_t*)(d6)) = ((uint64_t*)(s))[6]; \
*((uint64_t*)(d7)) = ((uint64_t*)(s))[7];
#define mm512_scatter_32( d00, d01, d02, d03, d04, d05, d06, d07, \
d08, d09, d10, d11, d12, d13, d14, d15, s ) \
*((uint32_t*)(d00)) = ((uint32_t*)(s))[ 0]; \
*((uint32_t*)(d01)) = ((uint32_t*)(s))[ 1]; \
*((uint32_t*)(d02)) = ((uint32_t*)(s))[ 2]; \
*((uint32_t*)(d03)) = ((uint32_t*)(s))[ 3]; \
*((uint32_t*)(d04)) = ((uint32_t*)(s))[ 4]; \
*((uint32_t*)(d05)) = ((uint32_t*)(s))[ 5]; \
*((uint32_t*)(d06)) = ((uint32_t*)(s))[ 6]; \
*((uint32_t*)(d07)) = ((uint32_t*)(s))[ 7]; \
*((uint32_t*)(d00)) = ((uint32_t*)(s))[ 8]; \
*((uint32_t*)(d01)) = ((uint32_t*)(s))[ 9]; \
*((uint32_t*)(d02)) = ((uint32_t*)(s))[10]; \
*((uint32_t*)(d03)) = ((uint32_t*)(s))[11]; \
*((uint32_t*)(d04)) = ((uint32_t*)(s))[12]; \
*((uint32_t*)(d05)) = ((uint32_t*)(s))[13]; \
*((uint32_t*)(d06)) = ((uint32_t*)(s))[14]; \
*((uint32_t*)(d07)) = ((uint32_t*)(s))[15];
// Add 4 values, fewer dependencies than sequential addition.
// Sum 4 values, fewer dependencies than sequential addition.
#define mm512_add4_64( a, b, c, d ) \
_mm512_add_epi64( _mm512_add_epi64( a, b ), _mm512_add_epi64( c, d ) )
@@ -246,17 +267,32 @@ static inline __m512i m512_neg1_fn()
_mm512_xor_si512( _mm512_xor_si256( a, b ), _mm512_xor_si256( c, d ) )
// Vector size conversion
#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a )
#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 )
#define mm512_concat_256( hi, lo ) \
_mm512_inserti164x4( _mm512_castsi256_si512( lo ), hi, 1 )
// Horizontal vector testing
#define mm512_allbits0( a ) _mm512_cmpeq_epi64_mask( a, m512_zero )
#define mm256_allbits1( a ) _mm512_cmpeq_epi64_mask( a, m512_neg1 )
#define mm512_anybits0( a ) _mm512_cmpneq_epi64_mask( a, m512_neg1 )
#define mm512_anybits1( a ) _mm512_cmpneq_epi64_mask( a, m512_zero )
//
// Bit rotations.
// AVX512F has built-in bit fixed and variable rotation for 64 & 32 bit
// elements. There is no bit rotation or shift for larger elements.
// AVX512F has built-in fixed and variable bit rotation for 64 & 32 bit
// elements and can be called directly.
//
// _mm512_rol_epi64, _mm512_ror_epi64, _mm512_rol_epi32, _mm512_ror_epi32
// _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32
//
// Here is a bit rotate for 16 bit elements:
// Here is a fixed bit rotate for 16 bit elements:
#define mm512_ror_16( v, c ) \
_mm512_or_si512( _mm512_srli_epi16( v, c ), \
_mm512_slli_epi16( v, 16-(c) )
@@ -264,6 +300,36 @@ static inline __m512i m512_neg1_fn()
_mm512_or_si512( _mm512_slli_epi16( v, c ), \
_mm512_srli_epi16( v, 16-(c) )
// Rotations using a vector control index are very slow due to overhead
// to generate the index vector. Repeated rotations using the same index
// are better handled by the calling function where the index only needs
// to be generated once then reused very efficiently.
// Permutes and shuffles using an immediate index are significantly faster.
//
// Swap bytes in vector elements, vectorized endian conversion.
#define mm512_bswap_64( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x38393A3B3C3D3E3F, 0x3031323334353637, \
0x28292A2B2C2D2E2F, 0x2021222324252627, \
0x18191A1B1C1D1E1F, 0x1011121314151617, \
0x08090A0B0C0D0E0F, 0x0001020304050607 ) )
#define mm512_bswap_32( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233 ) )
#define mm512_bswap_16( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3F3C3D3A3B3839, 0x3637343532333031, \
0x2E2F2C2D2A2B2829, 0x2627242522232021, \
0x1E1F1C1D1A1B1819, 0x1617141512131011, \
0x0E0F0C0D0A0B0809, 0x0607040502030001 ) )
//
// Rotate elements in 512 bit vector.
@@ -283,61 +349,61 @@ static inline __m512i m512_neg1_fn()
#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n )
#define mm512_ror_1x16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0000001F001E001D, 0x001C001B001A0019, \
0X0018001700160015, 0X0014001300120011, \
0X0010000F000E000D, 0X000C000B000A0009, \
0X0008000700060005, 0X0004000300020001 ) )
0X0008000700060005, 0X0004000300020001 ), v )
#define mm512_rol_1x16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A001900180017, \
0X0016001500140013, 0X001200110010000F, \
0X000E000D000C000B, 0X000A000900080007, \
0X0006000500040003, 0X000200010000001F ) )
0X0006000500040003, 0X000200010000001F ), v )
#define mm512_ror_1x8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x003F3E3D3C3B3A39, 0x3837363534333231, \
0x302F2E2D2C2B2A29, 0x2827262524232221, \
0x201F1E1D1C1B1A19. 0x1817161514131211, \
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol_1x8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231302F. \
0x2E2D2C2B2A292827, 0x262524232221201F, \
0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201003F ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm512_invert_128( v ) _mm512_permute4f128_epi32( a, 0x1b )
#define mm512_invert_128( v ) _mm512_shuffle_i64x2( v, v, 0x1b )
#define mm512_invert_64( v ) \
_mm512_permutex_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
_mm512_permutexvar_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
#define mm512_invert_32( v ) \
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15 ) )
_mm512_permutexvar_epi32( m512_const_64( \
0x0000000000000001,0x0000000200000003, \
0x0000000400000005,0x0000000600000007, \
0x0000000800000009,0x0000000a0000000b, \
0x0000000c0000000d,0x0000000e0000000f ), v )
#define mm512_invert_16( v ) \
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
0x00000001, 0x00020003, 0x00040005, 0x00060007, \
0x00080009, 0x000A000B, 0x000C000D, 0x000E000F, \
0x00100011, 0x00120013, 0x00140015, 0x00160017, \
0x00180019, 0x001A001B, 0x001C001D, 0x001E001F ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x0000000100020003, 0x0004000500060007, \
0x00080009000A000B, 0x000C000D000E000F, \
0x0010001100120013, 0x0014001500160017, \
0x00180019001A001B, 0x001C001D001E001F ), v )
#define mm512_invert_8( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F, \
0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F, \
0x20212223, 0x24252627, 0x28292A2B, 0x2C2D2E2F, \
0x30313233, 0x34353637, 0x38393A3B, 0x3C3D3E3F ) )
_mm512_shuffle_epi8( v, m512_const_64( \
0x0001020304050607, 0x08090A0B0C0D0E0F, \
0x1011121314151617, 0x18191A1B1C1D1E1F, \
0x2021222324252627, 0x28292A2B2C2D2E2F, \
0x3031323334353637, 0x38393A3B3C3D3E3F ) )
//
// Rotate elements within 256 bit lanes of 512 bit vector.
@@ -351,38 +417,46 @@ static inline __m512i m512_neg1_fn()
// Rotate 256 bit lanes by one 32 bit element
#define mm512_ror1x32_256( v ) \
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
8,15,14,13,12,11,10, 9, 0, 7, 6, 5, 4, 3, 2, 1 ) )
_mm512_permutexvar_epi32( m512_const_64( \
0x000000080000000f, 0x0000000e0000000d, \
0x0000000c0000000b, 0x0000000a00000009, \
0x0000000000000007, 0x0000000600000005, \
0x0000000400000003, 0x0000000200000001, v ) )
#define mm512_rol1x32_256( v ) \
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
14,13,12,11,10, 9, 8,15, 6, 5, 4, 3, 2, 1, 0, 7 ) )
_mm512_permutexvar_epi32( m512_const_64( \
0x0000000e0000000d, 0x0000000c0000000b, \
0x0000000a00000009, 0x000000080000000f, \
0x0000000600000005, 0x0000000400000003, \
0x0000000200000001, 0x0000000000000007 ), v )
#define mm512_ror1x16_256( v ) \
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
0x0010001F, 0x001E001D, 0x001C001B, 0x001A0019, \
0x00180017, 0x00160015, 0x00140013, 0x00120011, \
0x0000000F, 0x000E000D, 0x000C000B, 0x000A0009, \
0x00080007, 0x00060005, 0x00040003, 0x00020001 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x0010001F001E001D, 0x001C001B001A0019, \
0x0018001700160015, 0x0014001300120011, \
0x0000000F000E000D, 0x000C000B000A0009, \
0x0008000700060005, 0x0004000300020001 ), v )
#define mm512_rol1x16_256( v ) \
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \
0x00160015, 0x00140013, 0x00120011, 0x0000000F, \
0x000E000D, 0x000C000B, 0x000A0009, 0x00080007, \
0x00060005, 0x00040003, 0x00020001, 0x0000001F ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A001900180017, \
0x0016001500140013, 0x001200110000000F, \
0x000E000D000C000B, 0x000A000900080007, \
0x0006000500040003, 0x000200010000001F ), v )
#define mm512_ror1x8_256( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x203F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \
0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \
0x001F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \
0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) )
_mm512_shuffle_epi8( v, m512_const_64( \
0x203F3E3D3C3B3A39, 0x3837363534333231, \
0x302F2E2D2C2B2A29, 0x2827262524232221, \
0x001F1E1D1C1B1A19, 0x1817161514131211, \
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol1x8_256( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F, \
0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221203F, \
0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \
0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201001F ) )
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231302F, \
0x2E2D2C2B2A292827, 0x262524232221203F, \
0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201001F ))
//
// Rotate elements within 128 bit lanes of 512 bit vector.
@@ -395,28 +469,28 @@ static inline __m512i m512_neg1_fn()
#define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 )
#define mm512_ror1x16_128( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0018001F001E001D, 0x001C001B001A0019, \
0x0010001700160015, 0x0014001300120011, \
0x0008000F000E000D, 0x000C000B000A0009, \
0x0000000700060005, 0x0004000300020001 ) )
0x0000000700060005, 0x0004000300020001 ), v )
#define mm512_rol1x16_128( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A00190018001F, \
0x0016001500140013, 0x0012001100100017, \
0x000E000D000C000B, 0x000A00090008000F, \
0x0006000500040003, 0x0002000100000007 ) )
0x0006000500040003, 0x0002000100000007, v ) )
#define mm512_ror1x8_128( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x303F3E3D3C3B3A39, 0x3837363534333231, \
0x202F2E2D2C2B2A29, 0x2827262524232221, \
0x101F1E1D1C1B1A19, 0x1817161514131211, \
0x000F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol1x8_128( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231303F, \
0x2E2D2C2B2A292827, 0x262524232221202F, \
0x1E1D1C1B1A191817, 0x161514131211101F, \
@@ -437,90 +511,64 @@ static inline __m512i m512_neg1_fn()
// Swap 32 bit elements in each 64 bit lane
#define mm512_swap32_64( v ) _mm512_shuffle_epi32( v, 0xb1 )
// _mm512_set_epi8 doesn't seem to work
// Rotate each 64 bit lane by one 16 bit element.
#define mm512_ror1x16_64( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x39383F3E, 0x3D3C3B3A, 0x31303736, 0x35343332, \
0x29282F2E, 0x2D2C2B2A, 0x21202726, 0x25242322, \
0x19181F1E, 0x1D1C1B1A, 0x11101716, 0x15141312, \
0x09080F0E, 0x0D0C0B0A, 0x01000706, 0x05040302 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001c001f001e001d, 0x0018001b001a0019, \
0x0014001700160015, 0x0010001300120011, \
0x000c000f000e000d, 0x0008000b000a0009, \
0x0004000700060005, 0x0000000300020001, v )
#define mm512_rol1x16_64( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3D3C3B3A, 0x39383F3E, 0x35343332, 0x31303736 \
0x2D2C2B2A, 0x29282F2E, 0x25242322, 0x21202726 \
0x1D1C1B1A, 0x19181F1E, 0x15141312, 0x11101716 \
0x0D0C0B0A, 0x09080F0E, 0x05040302, 0x01000706 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001e001d001c001f, 0x001a00190018001b, \
0x0016001500140017, 0x0012001100100013, \
0x000e000d000c000f, 0x000a00090008000b, \
0x0006000500040007, 0x0002000100000003, v )
// Rotate each 64 bit lane by one byte.
#define mm512_ror1x8_64( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x383F3E3D, 0x3C3B3A39, 0x30373635, 0x34333231, \
0x282F2E2D, 0x2C2B2A29, 0x20272625, 0x24232221, \
0x181F1E1D, 0x1C1B1A19, 0x10171615, 0x14131211, \
0x080F0E0D, 0x0C0B0A09, 0x00070605, 0x0403020 )
_mm512_shuffle_epi8( v, m512_const_64( \
0x383F3E3D3C3B3A39, 0x3037363534333231, \
0x282F2E2D2C2B2A29, 0x2027262524232221, \
0x181F1E1D1C1B1A19, 0x1017161514131211, \
0x080F0E0D0C0B0A09, 0x0007060504030201 ) )
#define mm512_rol1x8_64( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3E3D3C3B, 0x3A39383F, 0x36353433, 0x32313037, \
0x2E2D2C2B, 0x2A29282F, 0x26252423, 0x22212027, \
0x1E1D1C1B, 0x1A19181F, 0x16151413, 0x12111017, \
0x0E0D0C0B, 0x0A09080F, 0x06050403, 0x02010007 )
_mm512_shuffle( v, m512_const_64( \
0x3E3D3C3B3A39383F, 0x3635343332313037, \
0x2E2D2C2B2A29282F, 0x2625242322212027, \
0x1E1D1C1B1A19181F, 0x1615141312111017, \
0x0E0D0C0B0A09080F, 0x0605040302010007 ) )
//
// Rotate elements within 32 bit lanes.
#define mm512_swap16_32( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x001D001C, 0x001F001E, 0x00190018, 0x001B001A, \
0x00150014, 0x00170016, 0x00110010, 0x00130012, \
0x000D000C, 0x000F000E, 0x00190008, 0x000B000A, \
0x00050004, 0x00070006, 0x00110000, 0x00030002 )
_mm512_permutexvar_epi16( m512_const_64( \
0x001e001f001c001d, 0x001a001b00180019, \
0x0016001700140015, 0x0012001300100011, \
0x000e000f000c000d, 0x000a000b00080009, \
0x0006000700040005, 0x0002000300000001 ), v )
#define mm512_ror1x8_32( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3C3F3E3D, 0x383B3A39, 0x34373635, 0x30333231, \
0x2C2F2E2D, 0x282B2A29, 0x24272625, 0x20232221, \
0x1C1F1E1D, 0x181B1A19, 0x14171615, 0x10131211, \
0x0C0F0E0D, 0x080B0A09, 0x04070605, 0x00030201 ) )
_mm512_shuffle_epi8( v, m512_const_64( \
0x3C3F3E3D383B3A39, 0x3437363530333231, \
0x2C2F2E2D282B2A29, 0x2427262520232221, \
0x1C1F1E1D181B1A19, 0x1417161510131211, \
0x0C0F0E0D080B0A09, 0x0407060500030201 ))
#define mm512_rol1x8_32( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3E3D3C3F, 0x3A39383B, 0x36353437, 0x32313033, \
0x2E2D2C2F, 0x2A29282B, 0x26252427, 0x22212023, \
0x1E1D1C1F, 0x1A19181B, 0x16151417, 0x12111013, \
0x0E0D0C0F, 0x0A09080B, 0x06050407, 0x02010003 ) )
//
// Swap bytes in vector elements, vectorized bswap.
#define mm512_bswap_64( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x38393A3B, 0x3C3D3E3F, 0x20313233, 0x34353637, \
0x28292A2B, 0x2C2D2E2F, 0x20212223, 0x34353637, \
0x18191A1B, 0x1C1D1E1F, 0x10111213, 0x14151617, \
0x08090A0B, 0x0C0D0E0F, 0x00010203, 0x04050607 ) )
#define mm512_bswap_32( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233 ) )
#define mm512_bswap_16( v ) \
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
0x3E3F3C3D, 0x3A3B3839, 0x36373435, 0x32333031, \
0x2E2F2C2D, 0x2A2B2829, 0x26272425, 0x22232021, \
0x1E1F1C1D, 0x1A1B1819, 0x16171415, 0x12131011, \
0x0E0F0C0D, 0x0A0B0809, 0x06070405, 0x02030001 ) )
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3F3A39383B, 0x3635343732313033, \
0x2E2D2C2F2A29282B, 0x2625242722212023, \
0x1E1D1C1F1A19181B, 0x1615141712111013, \
0x0E0D0C0F0A09080B, 0x0605040702010003 ) )
//
// Rotate elements from 2 512 bit vectors in place, source arguments
// are overwritten.
// These can all be done with 2 permutex2var instructions but they are
// slower than either xor or alignr.
// slower than either xor or alignr and require AVX512VBMI.
#define mm512_swap512_1024(v1, v2) \
v1 = _mm512_xor_si512(v1, v2); \

View File

@@ -33,7 +33,8 @@
// cast all arguments as the're likely to be uint64_t
// Bitwise not: ~(a)
#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 )
//#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 )
#define mm64_not( a ) ( (__m64)( ~( (uint64_t)(a) ) )
// Unary negate elements
#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, (__m64)v )

View File

@@ -1,5 +1,5 @@
#if !defined(SIMD_SCALAR_H__)
#define SIMD_SCALAR_H__ 1
#if !defined(SIMD_INT_H__)
#define SIMD_INT_H__ 1
///////////////////////////////////
//
@@ -13,6 +13,8 @@
// Some utilities are also provided for smaller integers, most notably
// bit rotation.
// MMX has no extract instruction for 32 bit elements so this:
// Lo is trivial, high is a simple shift.
// Input may be uint64_t or __m64, returns uint32_t.
@@ -34,7 +36,7 @@
(uint32_t)( ( (uint32_t)(x) << (c) ) | ( (uint32_t)(x) >> (32-(c)) ) )
#define u16_ror_16( x, c ) \
(uint16_t)( ( (uint16_t)(x) >> (c) ) | ( (uint16_t)(x) << (16-(c)) ) )
#define u16rol_16( x, c ) \
#define u16_rol_16( x, c ) \
(uint16_t)( ( (uint16_t)(x) << (c) ) | ( (uint16_t)(x) >> (16-(c)) ) )
#define u8_ror_8( x, c ) \
(uint8_t) ( ( (uint8_t) (x) >> (c) ) | ( (uint8_t) (x) << ( 8-(c)) ) )
@@ -56,18 +58,45 @@ static inline void memset_zero_64( uint64_t *src, int n )
static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
#if defined (GCC_INT128)
///////////////////////////////////////
//
// 128 bit integers
//
// 128 bit integers are inneficient and not a shortcut for __m128i.
// Native type __int128 supported starting with GCC-4.8.
//
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
// for 128 bit arithmetic. Vectors are preferred when 128 bit arith
// is not required. int128 also works better with other integer sizes.
// Vectors benefit from wider registers.
//
// For safety use typecasting on all numeric arguments.
//
// Use typecasting for conversion to/from 128 bit vector:
// __m128i v128 = (__m128i)my_int128l
// __m256i v256 = _mm256_set_m128i( (__m128i)my_int128, (__m128i)my_int128 );
// my_int128 = (uint128_t)_mm256_extracti128_si256( v256, 1 );
// No real need or use.
//#define u128_neg1 ((uint128_t)(-1))
// Compiler check for __int128 support
// Configure also has a test for int128.
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
#define GCC_INT128 1
#endif
// usefull for making constants.
#if !defined(GCC_INT128)
#warning "__int128 not supported, requires GCC-4.8 or newer."
#endif
#if defined(GCC_INT128)
// Familiar looking type names
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
// Maybe usefull for making constants.
#define mk_uint128( hi, lo ) \
( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) )
@@ -92,6 +121,6 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
#endif // GCC_INT128
#endif // SIMD_SCALAR_H__
#endif // SIMD_INT_H__

View File

@@ -1,389 +0,0 @@
//////////////////////////////////////
//
// Type abstraction overlays designed for use in highly optimized
// straight line code operating on array structures. It uses direct
// struct member access instead of indexing to access array elements.
// Ex: array.u32_3 instead of array[3].
//
// Vector types are used to represent asrrays. 64 and 128 bit vectors have
// corresponding 64 and 128 bit integer types.
//
// Data accesses are not tied to memory as arrays are. Thes structures
// can operate comfortably as reguietr variables.
//
// Although the abstraction makes for transparent usage there is overhead.
// Extra move instructins are required when an operation requires a
// different register type. Additionaly 128 bit operations, uint128_t
// and AES, can't be done in parallel with a 256 bit or lager vector.
// The require additionalmove instructions in addition to the lack of
// improvement from parallelism.
//
// Move instruction overhead is required when moving among gpr, mmx
// and xmm registers. The number of extra moves is usually the number
// of elements inthe vector. If bothe are the same size onlu one move
// is required. The number is doubled if the data is moved back.
//
// xmm and ymm resgisters are special, they are aliased. xmm registers
// overlay the lower 128 bits of the ymm registers. Accessing the data
// in the lower half of a ymm register by an xmm argument is free.
// The upper 128 bits need to be extracted and inserted like with other
// different sized data types.
//
// Integer types can be converted to differently sized integers without
// penalty.
//
// Conversions with penalty should be avoided as much possible by grouping
// operations requiring the same register set.
//
// There are two algorithms for extracting and inserting data.
//
// There isthe straightforward iterative meathod wher each element is
// extracted or inserted in turn. The compiler evidently take a different
// aproach based on assembly code generated by a set intrinsic.
// To extract 64 bit or smaller elements from a 256 bit vector the
// first extracts the upper 128 bit into a second xmm register. This
// eliminates a dependency between the upper and lower elements allowing
// the CPU more opportunity at multiple operations per clock.
// This adds one additional instruction to the process. With AVX512 an
// another stege is added by first splitting up the 512 bit vector into
// 2 256 bit vectors,
//
// xmm/ymm aliasing makes accessing low half trivial and without cost.
// Accessing the upper half requires a move from the upper half of
// the source register to the lower half of the destination.
// It's a bigger issue with GPRs as there is no aliasing.
//
// Theoretically memory resident data could bypass the move and load
// the data directly into the desired register type. However this
// ignores the overhead to ensure coherency between register and memory
// wich is significantly more.
//
// Overlay avoids pointer dereferences and favours register move over
// memory load, notwistanding compiler optimization.
//
// The syntax is ugly but can be abstracted with macros.
// Universal 64 bit overlay
// Avoids arrays and pointers, suitable as register variable.
// Conversions are transparent but not free, cost is one MOV instruction.
// Facilitates manipulating 32 bit data in 64 bit pairs.
// Allows full use of 64 bit registers for 32 bit data, effectively doubling
// the size of the register set.
// Potentially up to 50% reduction in instructions depending on rate of
// conversion.
///////////////////////////////////////////////////////
//
// 128 bit integer
//
// Native type __int128 supported starting with GCC-4.8.
//
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
// for 128 bit arithmetic. Vectors are preferred when 128 bit arith
// is not required. int128 also works better with other integer sizes.
// Vectors benefit from wider registers.
//
// For safety use typecasting on all numeric arguments.
//
// Use typecasting for conversion to/from 128 bit vector:
// __m128i v128 = (__m128i)my_int128l
// __m256i v256 = _mm256_set_m128i( (__m128i)my_int128, (__m128i)my_int128 );
// my_int128 = (uint128_t)_mm256_extracti128_si256( v256, 1 );
// Compiler check for __int128 support
// Configure also has a test for int128.
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
#define GCC_INT128 1
#endif
#if !defined(GCC_INT128)
#warning "__int128 not supported, requires GCC-4.8 or newer."
#endif
#if defined(GCC_INT128)
// Familiar looking type names
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
#endif
/////////////////////////////////////
//
// MMX 64 bit vector
//
// Emulates uint32_t[2]
struct _regarray_u32x2
{
uint32_t _0; uint32_t _1;
};
typedef struct _regarray_u32x2 regarray_u32x2;
// Emulates uint16_t[4]
struct _regarray_u16x4
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
};
typedef struct _regarray_u16x4 regarray_u16x4;
// Emulates uint8_t[8]
struct _regarray_u8x8
{
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
};
typedef struct _regarray_u8x8 regarray_u8x8;
// universal 64 bit overlay
union _regarray_64
{
regarray_u32x2 u32_; // uint32_t[2]
regarray_u16x4 u16_; // uint16_t[4]
regarray_u8x8 u8_; // uint8_t[8]
uint64_t u64;
__m64 v64;
};
typedef union _regarray_64 regarray_64;
/////
//
// SSE2
// Universal 128 bit overlay
//
// Avoids arrays and pointers, suitable as register variable.
// Designed for speed in straight line code with no loops.
//
// Conversions are transparent but not free, cost is one MOV instruction
// in each direction, except for lower half of ymm to/from xmm which are
// free.
//
// Facilitates two dimensional vectoring.
//
// 128 bit integer and AES can't be done in parallel. AES suffers extraction
// and insertion of the upper 128 bits. uint128_t suffers 4 times the cost
// with 2 64 bit extractions and 2 insertions for each 128 bit lane with
// single stage ymm <--> gpr for a total of 8 moves.
//
// Two stage conversion is possible which helps CPU instruction scheduling
// by removing a register dependency between the upper and lower 128 at the
// cost of two extra instructions (128 bit extract and insert. The compiler
// seems to prefer the 2 staged approach when using the set intrinsic.
// Use macros to simplify array access emulation.
// emulated array type: uint64_t a[4];
// array indexing: a[0], a[1]
// overlay emulation: a.u64_0, a.u64_1
// without macro: a.u64_._0, a.u64_._1
struct _regarray_u64x2
{
uint64_t _0; uint64_t _1;
};
typedef struct _regarray_u64x2 regarray_u64x2;
struct _regarray_v64x2
{
__m64 _0; __m64 _1;
};
typedef struct _regarray_v64x2 regarray_v64x2;
struct _regarray_u32x4
{
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
};
typedef struct _regarray_u32x2 regarray_u32x4;
struct _regarray_u16x8
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
};
typedef struct _regarray_u16x4 regarray_u16x4;
struct _regarray_u8x16
{
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
uint8_t _8; uint8_t _9; uint8_t _a; uint8_t _b;
uint8_t _c; uint8_t _d; uint8_t _e; uint8_t _f;
};
typedef struct _regarray_u8x16 regarray_u8x16;
union _register_array_m128v
{
#if defined(GCC_INT128)
uint128_t u128;
#endif
__m128i v128;
regarray_u64x2 u64_; // uint64_t[2]
regarray_v64x2 v64_; // __m64[2]
regarray_u32x4 u32_; // uint32_t[4]
regarray_u16x4 u16_; // uint16_t[8]
regarray_u8x16 u8_; // uint8_t[16]
};
typedef union _register_array_m128v register_array_m128v;
///////////////////
//
// AVX2
//
struct _regarray_v128x2
{
__m128i _0; __m128i _1;
};
typedef struct _regarray_v128x2 regarray_v128x2;
struct _regarray_u128x2
{
uint128_t _0; uint128_t _1;
};
typedef struct _regarray_u128x2 regarray_u128x2;
struct _regarray_u64x4
{
uint64_t _0; uint64_t _1; uint64_t _2; uint64_t _3;
};
typedef struct _regarray_u64x4 regarray_u64x4;
struct _regarray_v64x4
{
__m64 _0; __m64 _1; __m64 _2; __m64 _3;
};
typedef struct _regarray_v64x4 regarray_v64x4;
struct _regarray_u32x8
{
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
uint32_t _4; uint32_t _5; uint32_t _6; uint32_t _7;
};
typedef struct _regarray_u32x8 regarray_u32x8;
struct _regarray_u16x16
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
uint16_t _8; uint16_t _9; uint16_t _a; uint16_t _b;
uint16_t _c; uint16_t _d; uint16_t _e; uint16_t _f;
};
typedef struct _regarray_u16x16 regarray_u16x16;
struct _regarray_u8x32
{
uint8_t _00; uint8_t _01; uint8_t _02; uint8_t _03;
uint8_t _04; uint8_t _05; uint8_t _06; uint8_t _07;
uint8_t _08; uint8_t _09; uint8_t _0a; uint8_t _0b;
uint8_t _0c; uint8_t _0d; uint8_t _0e; uint8_t _0f;
uint8_t _10; uint8_t _11; uint8_t _12; uint8_t _13;
uint8_t _14; uint8_t _15; uint8_t _16; uint8_t _17;
uint8_t _18; uint8_t _19; uint8_t _1a; uint8_t _1b;
uint8_t _1c; uint8_t _1d; uint8_t _1e; uint8_t _1f;
};
typedef struct _regarray_u8x32 regarray_u8x32;
union _regarray_v256
{
__m256i v256;
#if defined(GCC_INT128)
regarray_u128x2 u128_; // uint128_t[2]
#endif
regarray_v128x2 v128_; // __m128i[2]
regarray_v64x4 v64_;
regarray_u64x4 u64_;
regarray_u32x8 u32_;
regarray_u16x16 u16_;
regarray_u8x32 u8_;
};
typedef union _regarray_v256 regarray_v256;
////////////
//
// Abstraction macros to allow easy readability.
// Users may define their own list to suit their preferences
// such as, upper case hex, leading zeros, multidimensional,
// alphabetic, day of week, etc..
#define v128_0 v128_._0
#define v128_1 v128_._1
#define u128_0 u128_._0
#define u128_1 u128_._1
#define v64_0 v64_._0
#define v64_1 v64_._1
#define v64_2 v64_._2
#define v64_3 v64_._3
#define u64_0 u64_._0
#define u64_1 u64_._1
#define u64_2 u64_._2
#define u64_3 u64_._3
#define u32_0 u32_._0
#define u32_1 u32_._1
#define u32_2 u32_._2
#define u32_3 u32_._3
#define u32_4 u32_._4
#define u32_5 u32_._5
#define u32_6 u32_._6
#define u32_7 u32_._7
#define u16_0 u16_._0
#define u16_1 u16_._1
#define u16_2 u16_._2
#define u16_3 u16_._3
#define u16_4 u16_._4
#define u16_5 u16_._5
#define u16_6 u16_._6
#define u16_7 u16_._7
#define u16_8 u16_._8
#define u16_9 u16_._9
#define u16_a u16_._a
#define u16_b u16_._b
#define u16_c u16_._c
#define u16_d u16_._d
#define u16_e u16_._e
#define u16_f u16_._f
#define u8_00 u8_._00
#define u8_01 u8_._01
#define u8_02 u8_._02
#define u8_03 u8_._03
#define u8_04 u8_._04
#define u8_05 u8_._05
#define u8_06 u8_._06
#define u8_07 u8_._07
#define u8_08 u8_._08
#define u8_09 u8_._09
#define u8_0a u8_._0a
#define u8_0b u8_._0b
#define u8_0c u8_._0c
#define u8_0d u8_._0d
#define u8_0e u8_._0e
#define u8_0f u8_._0f
#define u8_10 u8_._10
#define u8_11 u8_._11
#define u8_12 u8_._12
#define u8_13 u8_._13
#define u8_14 u8_._14
#define u8_15 u8_._15
#define u8_16 u8_._16
#define u8_17 u8_._17
#define u8_18 u8_._18
#define u8_19 u8_._19
#define u8_1a u8_._1a
#define u8_1b u8_._1b
#define u8_1c u8_._1c
#define u8_1d u8_._1d
#define u8_1e u8_._1e
#define u8_1f u8_._1f

View File

@@ -1,40 +0,0 @@
#include "uint256.h"
#ifdef __cplusplus
extern "C"{
#endif
#include "miner.h"
// compute the diff ratio between a found hash and the target
double hash_target_ratio(uint32_t* hash, uint32_t* target)
{
uint256 h, t;
double dhash;
if (!opt_showdiff)
return 0.0;
memcpy(&t, (void*) target, 32);
memcpy(&h, (void*) hash, 32);
dhash = h.getdouble();
if (dhash > 0.)
return t.getdouble() / dhash;
else
return dhash;
}
// store ratio in work struct
void work_set_target_ratio( struct work* work, uint32_t* hash )
{
// only if the option is enabled (to reduce cpu usage)
if (opt_showdiff) {
work->shareratio = hash_target_ratio(hash, work->target);
work->sharediff = work->targetdiff * work->shareratio;
}
}
#ifdef __cplusplus
}
#endif

784
uint256.h
View File

@@ -1,784 +0,0 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_UINT256_H
#define BITCOIN_UINT256_H
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <string>
#include <vector>
typedef long long int64;
typedef unsigned long long uint64;
inline int Testuint256AdHoc(std::vector<std::string> vArg);
/** Base class without constructors for uint256 and uint160.
* This makes the compiler let you use it in a union.
*/
template<unsigned int BITS>
class base_uint
{
protected:
enum { WIDTH=BITS/32 };
uint32_t pn[WIDTH];
public:
bool operator!() const
{
for (int i = 0; i < WIDTH; i++)
if (pn[i] != 0)
return false;
return true;
}
const base_uint operator~() const
{
base_uint ret;
for (int i = 0; i < WIDTH; i++)
ret.pn[i] = ~pn[i];
return ret;
}
const base_uint operator-() const
{
base_uint ret;
for (int i = 0; i < WIDTH; i++)
ret.pn[i] = ~pn[i];
ret++;
return ret;
}
double getdouble() const
{
double ret = 0.0;
double fact = 1.0;
for (int i = 0; i < WIDTH; i++) {
ret += fact * pn[i];
fact *= 4294967296.0;
}
return ret;
}
base_uint& operator=(uint64 b)
{
pn[0] = (unsigned int)b;
pn[1] = (unsigned int)(b >> 32);
for (int i = 2; i < WIDTH; i++)
pn[i] = 0;
return *this;
}
base_uint& operator^=(const base_uint& b)
{
for (int i = 0; i < WIDTH; i++)
pn[i] ^= b.pn[i];
return *this;
}
base_uint& operator&=(const base_uint& b)
{
for (int i = 0; i < WIDTH; i++)
pn[i] &= b.pn[i];
return *this;
}
base_uint& operator|=(const base_uint& b)
{
for (int i = 0; i < WIDTH; i++)
pn[i] |= b.pn[i];
return *this;
}
base_uint& operator^=(uint64 b)
{
pn[0] ^= (unsigned int)b;
pn[1] ^= (unsigned int)(b >> 32);
return *this;
}
base_uint& operator|=(uint64 b)
{
pn[0] |= (unsigned int)b;
pn[1] |= (unsigned int)(b >> 32);
return *this;
}
base_uint& operator<<=(unsigned int shift)
{
base_uint a(*this);
for (int i = 0; i < WIDTH; i++)
pn[i] = 0;
int k = shift / 32;
shift = shift % 32;
for (int i = 0; i < WIDTH; i++)
{
if (i+k+1 < WIDTH && shift != 0)
pn[i+k+1] |= (a.pn[i] >> (32-shift));
if (i+k < WIDTH)
pn[i+k] |= (a.pn[i] << shift);
}
return *this;
}
base_uint& operator>>=(unsigned int shift)
{
base_uint a(*this);
for (int i = 0; i < WIDTH; i++)
pn[i] = 0;
int k = shift / 32;
shift = shift % 32;
for (int i = 0; i < WIDTH; i++)
{
if (i-k-1 >= 0 && shift != 0)
pn[i-k-1] |= (a.pn[i] << (32-shift));
if (i-k >= 0)
pn[i-k] |= (a.pn[i] >> shift);
}
return *this;
}
base_uint& operator+=(const base_uint& b)
{
uint64 carry = 0;
for (int i = 0; i < WIDTH; i++)
{
uint64 n = carry + pn[i] + b.pn[i];
pn[i] = n & 0xffffffff;
carry = n >> 32;
}
return *this;
}
base_uint& operator-=(const base_uint& b)
{
*this += -b;
return *this;
}
base_uint& operator+=(uint64 b64)
{
base_uint b;
b = b64;
*this += b;
return *this;
}
base_uint& operator-=(uint64 b64)
{
base_uint b;
b = b64;
*this += -b;
return *this;
}
base_uint& operator++()
{
// prefix operator
int i = 0;
while (++pn[i] == 0 && i < WIDTH-1)
i++;
return *this;
}
const base_uint operator++(int)
{
// postfix operator
const base_uint ret = *this;
++(*this);
return ret;
}
base_uint& operator--()
{
// prefix operator
int i = 0;
while (--pn[i] == -1 && i < WIDTH-1)
i++;
return *this;
}
const base_uint operator--(int)
{
// postfix operator
const base_uint ret = *this;
--(*this);
return ret;
}
friend inline bool operator<(const base_uint& a, const base_uint& b)
{
for (int i = base_uint::WIDTH-1; i >= 0; i--)
{
if (a.pn[i] < b.pn[i])
return true;
else if (a.pn[i] > b.pn[i])
return false;
}
return false;
}
friend inline bool operator<=(const base_uint& a, const base_uint& b)
{
for (int i = base_uint::WIDTH-1; i >= 0; i--)
{
if (a.pn[i] < b.pn[i])
return true;
else if (a.pn[i] > b.pn[i])
return false;
}
return true;
}
friend inline bool operator>(const base_uint& a, const base_uint& b)
{
for (int i = base_uint::WIDTH-1; i >= 0; i--)
{
if (a.pn[i] > b.pn[i])
return true;
else if (a.pn[i] < b.pn[i])
return false;
}
return false;
}
friend inline bool operator>=(const base_uint& a, const base_uint& b)
{
for (int i = base_uint::WIDTH-1; i >= 0; i--)
{
if (a.pn[i] > b.pn[i])
return true;
else if (a.pn[i] < b.pn[i])
return false;
}
return true;
}
friend inline bool operator==(const base_uint& a, const base_uint& b)
{
for (int i = 0; i < base_uint::WIDTH; i++)
if (a.pn[i] != b.pn[i])
return false;
return true;
}
friend inline bool operator==(const base_uint& a, uint64 b)
{
if (a.pn[0] != (unsigned int)b)
return false;
if (a.pn[1] != (unsigned int)(b >> 32))
return false;
for (int i = 2; i < base_uint::WIDTH; i++)
if (a.pn[i] != 0)
return false;
return true;
}
friend inline bool operator!=(const base_uint& a, const base_uint& b)
{
return (!(a == b));
}
friend inline bool operator!=(const base_uint& a, uint64 b)
{
return (!(a == b));
}
std::string GetHex() const
{
char psz[sizeof(pn)*2 + 1];
for (unsigned int i = 0; i < sizeof(pn); i++)
sprintf(psz + i*2, "%02x", ((unsigned char*)pn)[sizeof(pn) - i - 1]);
return std::string(psz, psz + sizeof(pn)*2);
}
void SetHex(const char* psz)
{
for (int i = 0; i < WIDTH; i++)
pn[i] = 0;
// skip leading spaces
while (isspace(*psz))
psz++;
// skip 0x
if (psz[0] == '0' && tolower(psz[1]) == 'x')
psz += 2;
// hex string to uint
static const unsigned char phexdigit[256] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9,0,0,0,0,0,0, 0,0xa,0xb,0xc,0xd,0xe,0xf,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0xa,0xb,0xc,0xd,0xe,0xf,0,0,0,0,0,0,0,0,0 };
const char* pbegin = psz;
while (phexdigit[(unsigned char)*psz] || *psz == '0')
psz++;
psz--;
unsigned char* p1 = (unsigned char*)pn;
unsigned char* pend = p1 + WIDTH * 4;
while (psz >= pbegin && p1 < pend)
{
*p1 = phexdigit[(unsigned char)*psz--];
if (psz >= pbegin)
{
*p1 |= (phexdigit[(unsigned char)*psz--] << 4);
p1++;
}
}
}
void SetHex(const std::string& str)
{
SetHex(str.c_str());
}
std::string ToString() const
{
return (GetHex());
}
unsigned char* begin()
{
return (unsigned char*)&pn[0];
}
unsigned char* end()
{
return (unsigned char*)&pn[WIDTH];
}
const unsigned char* begin() const
{
return (unsigned char*)&pn[0];
}
const unsigned char* end() const
{
return (unsigned char*)&pn[WIDTH];
}
unsigned int size() const
{
return sizeof(pn);
}
uint64 Get64(int n=0) const
{
return pn[2*n] | (uint64)pn[2*n+1] << 32;
}
// unsigned int GetSerializeSize(int nType=0, int nVersion=PROTOCOL_VERSION) const
unsigned int GetSerializeSize(int nType, int nVersion) const
{
return sizeof(pn);
}
template<typename Stream>
// void Serialize(Stream& s, int nType=0, int nVersion=PROTOCOL_VERSION) const
void Serialize(Stream& s, int nType, int nVersion) const
{
s.write((char*)pn, sizeof(pn));
}
template<typename Stream>
// void Unserialize(Stream& s, int nType=0, int nVersion=PROTOCOL_VERSION)
void Unserialize(Stream& s, int nType, int nVersion)
{
s.read((char*)pn, sizeof(pn));
}
friend class uint160;
friend class uint256;
friend inline int Testuint256AdHoc(std::vector<std::string> vArg);
};
typedef base_uint<160> base_uint160;
typedef base_uint<256> base_uint256;
//
// uint160 and uint256 could be implemented as templates, but to keep
// compile errors and debugging cleaner, they're copy and pasted.
//
//////////////////////////////////////////////////////////////////////////////
//
// uint160
//
/** 160-bit unsigned integer */
class uint160 : public base_uint160
{
public:
typedef base_uint160 basetype;
uint160()
{
for (int i = 0; i < WIDTH; i++)
pn[i] = 0;
}
uint160(const basetype& b)
{
for (int i = 0; i < WIDTH; i++)
pn[i] = b.pn[i];
}
uint160& operator=(const basetype& b)
{
for (int i = 0; i < WIDTH; i++)
pn[i] = b.pn[i];
return *this;
}
uint160(uint64 b)
{
pn[0] = (unsigned int)b;
pn[1] = (unsigned int)(b >> 32);
for (int i = 2; i < WIDTH; i++)
pn[i] = 0;
}
uint160& operator=(uint64 b)
{
pn[0] = (unsigned int)b;
pn[1] = (unsigned int)(b >> 32);
for (int i = 2; i < WIDTH; i++)
pn[i] = 0;
return *this;
}
explicit uint160(const std::string& str)
{
SetHex(str);
}
explicit uint160(const std::vector<unsigned char>& vch)
{
if (vch.size() == sizeof(pn))
memcpy(pn, &vch[0], sizeof(pn));
else
*this = 0;
}
};
inline bool operator==(const uint160& a, uint64 b) { return (base_uint160)a == b; }
inline bool operator!=(const uint160& a, uint64 b) { return (base_uint160)a != b; }
inline const uint160 operator<<(const base_uint160& a, unsigned int shift) { return uint160(a) <<= shift; }
inline const uint160 operator>>(const base_uint160& a, unsigned int shift) { return uint160(a) >>= shift; }
inline const uint160 operator<<(const uint160& a, unsigned int shift) { return uint160(a) <<= shift; }
inline const uint160 operator>>(const uint160& a, unsigned int shift) { return uint160(a) >>= shift; }
inline const uint160 operator^(const base_uint160& a, const base_uint160& b) { return uint160(a) ^= b; }
inline const uint160 operator&(const base_uint160& a, const base_uint160& b) { return uint160(a) &= b; }
inline const uint160 operator|(const base_uint160& a, const base_uint160& b) { return uint160(a) |= b; }
inline const uint160 operator+(const base_uint160& a, const base_uint160& b) { return uint160(a) += b; }
inline const uint160 operator-(const base_uint160& a, const base_uint160& b) { return uint160(a) -= b; }
inline bool operator<(const base_uint160& a, const uint160& b) { return (base_uint160)a < (base_uint160)b; }
inline bool operator<=(const base_uint160& a, const uint160& b) { return (base_uint160)a <= (base_uint160)b; }
inline bool operator>(const base_uint160& a, const uint160& b) { return (base_uint160)a > (base_uint160)b; }
inline bool operator>=(const base_uint160& a, const uint160& b) { return (base_uint160)a >= (base_uint160)b; }
inline bool operator==(const base_uint160& a, const uint160& b) { return (base_uint160)a == (base_uint160)b; }
inline bool operator!=(const base_uint160& a, const uint160& b) { return (base_uint160)a != (base_uint160)b; }
inline const uint160 operator^(const base_uint160& a, const uint160& b) { return (base_uint160)a ^ (base_uint160)b; }
inline const uint160 operator&(const base_uint160& a, const uint160& b) { return (base_uint160)a & (base_uint160)b; }
inline const uint160 operator|(const base_uint160& a, const uint160& b) { return (base_uint160)a | (base_uint160)b; }
inline const uint160 operator+(const base_uint160& a, const uint160& b) { return (base_uint160)a + (base_uint160)b; }
inline const uint160 operator-(const base_uint160& a, const uint160& b) { return (base_uint160)a - (base_uint160)b; }
inline bool operator<(const uint160& a, const base_uint160& b) { return (base_uint160)a < (base_uint160)b; }
inline bool operator<=(const uint160& a, const base_uint160& b) { return (base_uint160)a <= (base_uint160)b; }
inline bool operator>(const uint160& a, const base_uint160& b) { return (base_uint160)a > (base_uint160)b; }
inline bool operator>=(const uint160& a, const base_uint160& b) { return (base_uint160)a >= (base_uint160)b; }
inline bool operator==(const uint160& a, const base_uint160& b) { return (base_uint160)a == (base_uint160)b; }
inline bool operator!=(const uint160& a, const base_uint160& b) { return (base_uint160)a != (base_uint160)b; }
inline const uint160 operator^(const uint160& a, const base_uint160& b) { return (base_uint160)a ^ (base_uint160)b; }
inline const uint160 operator&(const uint160& a, const base_uint160& b) { return (base_uint160)a & (base_uint160)b; }
inline const uint160 operator|(const uint160& a, const base_uint160& b) { return (base_uint160)a | (base_uint160)b; }
inline const uint160 operator+(const uint160& a, const base_uint160& b) { return (base_uint160)a + (base_uint160)b; }
inline const uint160 operator-(const uint160& a, const base_uint160& b) { return (base_uint160)a - (base_uint160)b; }
inline bool operator<(const uint160& a, const uint160& b) { return (base_uint160)a < (base_uint160)b; }
inline bool operator<=(const uint160& a, const uint160& b) { return (base_uint160)a <= (base_uint160)b; }
inline bool operator>(const uint160& a, const uint160& b) { return (base_uint160)a > (base_uint160)b; }
inline bool operator>=(const uint160& a, const uint160& b) { return (base_uint160)a >= (base_uint160)b; }
inline bool operator==(const uint160& a, const uint160& b) { return (base_uint160)a == (base_uint160)b; }
inline bool operator!=(const uint160& a, const uint160& b) { return (base_uint160)a != (base_uint160)b; }
inline const uint160 operator^(const uint160& a, const uint160& b) { return (base_uint160)a ^ (base_uint160)b; }
inline const uint160 operator&(const uint160& a, const uint160& b) { return (base_uint160)a & (base_uint160)b; }
inline const uint160 operator|(const uint160& a, const uint160& b) { return (base_uint160)a | (base_uint160)b; }
inline const uint160 operator+(const uint160& a, const uint160& b) { return (base_uint160)a + (base_uint160)b; }
inline const uint160 operator-(const uint160& a, const uint160& b) { return (base_uint160)a - (base_uint160)b; }
//////////////////////////////////////////////////////////////////////////////
//
// uint256
//
/** 256-bit unsigned integer */
class uint256 : public base_uint256
{
public:
typedef base_uint256 basetype;
uint256()
{
for (int i = 0; i < WIDTH; i++)
pn[i] = 0;
}
uint256(const basetype& b)
{
for (int i = 0; i < WIDTH; i++)
pn[i] = b.pn[i];
}
uint256& operator=(const basetype& b)
{
for (int i = 0; i < WIDTH; i++)
pn[i] = b.pn[i];
return *this;
}
uint256(uint64 b)
{
pn[0] = (unsigned int)b;
pn[1] = (unsigned int)(b >> 32);
for (int i = 2; i < WIDTH; i++)
pn[i] = 0;
}
uint256& operator=(uint64 b)
{
pn[0] = (unsigned int)b;
pn[1] = (unsigned int)(b >> 32);
for (int i = 2; i < WIDTH; i++)
pn[i] = 0;
return *this;
}
explicit uint256(const std::string& str)
{
SetHex(str);
}
explicit uint256(const std::vector<unsigned char>& vch)
{
if (vch.size() == sizeof(pn))
memcpy(pn, &vch[0], sizeof(pn));
else
*this = 0;
}
};
inline bool operator==(const uint256& a, uint64 b) { return (base_uint256)a == b; }
inline bool operator!=(const uint256& a, uint64 b) { return (base_uint256)a != b; }
inline const uint256 operator<<(const base_uint256& a, unsigned int shift) { return uint256(a) <<= shift; }
inline const uint256 operator>>(const base_uint256& a, unsigned int shift) { return uint256(a) >>= shift; }
inline const uint256 operator<<(const uint256& a, unsigned int shift) { return uint256(a) <<= shift; }
inline const uint256 operator>>(const uint256& a, unsigned int shift) { return uint256(a) >>= shift; }
inline const uint256 operator^(const base_uint256& a, const base_uint256& b) { return uint256(a) ^= b; }
inline const uint256 operator&(const base_uint256& a, const base_uint256& b) { return uint256(a) &= b; }
inline const uint256 operator|(const base_uint256& a, const base_uint256& b) { return uint256(a) |= b; }
inline const uint256 operator+(const base_uint256& a, const base_uint256& b) { return uint256(a) += b; }
inline const uint256 operator-(const base_uint256& a, const base_uint256& b) { return uint256(a) -= b; }
inline bool operator<(const base_uint256& a, const uint256& b) { return (base_uint256)a < (base_uint256)b; }
inline bool operator<=(const base_uint256& a, const uint256& b) { return (base_uint256)a <= (base_uint256)b; }
inline bool operator>(const base_uint256& a, const uint256& b) { return (base_uint256)a > (base_uint256)b; }
inline bool operator>=(const base_uint256& a, const uint256& b) { return (base_uint256)a >= (base_uint256)b; }
inline bool operator==(const base_uint256& a, const uint256& b) { return (base_uint256)a == (base_uint256)b; }
inline bool operator!=(const base_uint256& a, const uint256& b) { return (base_uint256)a != (base_uint256)b; }
inline const uint256 operator^(const base_uint256& a, const uint256& b) { return (base_uint256)a ^ (base_uint256)b; }
inline const uint256 operator&(const base_uint256& a, const uint256& b) { return (base_uint256)a & (base_uint256)b; }
inline const uint256 operator|(const base_uint256& a, const uint256& b) { return (base_uint256)a | (base_uint256)b; }
inline const uint256 operator+(const base_uint256& a, const uint256& b) { return (base_uint256)a + (base_uint256)b; }
inline const uint256 operator-(const base_uint256& a, const uint256& b) { return (base_uint256)a - (base_uint256)b; }
inline bool operator<(const uint256& a, const base_uint256& b) { return (base_uint256)a < (base_uint256)b; }
inline bool operator<=(const uint256& a, const base_uint256& b) { return (base_uint256)a <= (base_uint256)b; }
inline bool operator>(const uint256& a, const base_uint256& b) { return (base_uint256)a > (base_uint256)b; }
inline bool operator>=(const uint256& a, const base_uint256& b) { return (base_uint256)a >= (base_uint256)b; }
inline bool operator==(const uint256& a, const base_uint256& b) { return (base_uint256)a == (base_uint256)b; }
inline bool operator!=(const uint256& a, const base_uint256& b) { return (base_uint256)a != (base_uint256)b; }
inline const uint256 operator^(const uint256& a, const base_uint256& b) { return (base_uint256)a ^ (base_uint256)b; }
inline const uint256 operator&(const uint256& a, const base_uint256& b) { return (base_uint256)a & (base_uint256)b; }
inline const uint256 operator|(const uint256& a, const base_uint256& b) { return (base_uint256)a | (base_uint256)b; }
inline const uint256 operator+(const uint256& a, const base_uint256& b) { return (base_uint256)a + (base_uint256)b; }
inline const uint256 operator-(const uint256& a, const base_uint256& b) { return (base_uint256)a - (base_uint256)b; }
inline bool operator<(const uint256& a, const uint256& b) { return (base_uint256)a < (base_uint256)b; }
inline bool operator<=(const uint256& a, const uint256& b) { return (base_uint256)a <= (base_uint256)b; }
inline bool operator>(const uint256& a, const uint256& b) { return (base_uint256)a > (base_uint256)b; }
inline bool operator>=(const uint256& a, const uint256& b) { return (base_uint256)a >= (base_uint256)b; }
inline bool operator==(const uint256& a, const uint256& b) { return (base_uint256)a == (base_uint256)b; }
inline bool operator!=(const uint256& a, const uint256& b) { return (base_uint256)a != (base_uint256)b; }
inline const uint256 operator^(const uint256& a, const uint256& b) { return (base_uint256)a ^ (base_uint256)b; }
inline const uint256 operator&(const uint256& a, const uint256& b) { return (base_uint256)a & (base_uint256)b; }
inline const uint256 operator|(const uint256& a, const uint256& b) { return (base_uint256)a | (base_uint256)b; }
inline const uint256 operator+(const uint256& a, const uint256& b) { return (base_uint256)a + (base_uint256)b; }
inline const uint256 operator-(const uint256& a, const uint256& b) { return (base_uint256)a - (base_uint256)b; }
#ifdef TEST_UINT256
inline int Testuint256AdHoc(std::vector<std::string> vArg)
{
uint256 g(0);
printf("%s\n", g.ToString().c_str());
g--; printf("g--\n");
printf("%s\n", g.ToString().c_str());
g--; printf("g--\n");
printf("%s\n", g.ToString().c_str());
g++; printf("g++\n");
printf("%s\n", g.ToString().c_str());
g++; printf("g++\n");
printf("%s\n", g.ToString().c_str());
g++; printf("g++\n");
printf("%s\n", g.ToString().c_str());
g++; printf("g++\n");
printf("%s\n", g.ToString().c_str());
uint256 a(7);
printf("a=7\n");
printf("%s\n", a.ToString().c_str());
uint256 b;
printf("b undefined\n");
printf("%s\n", b.ToString().c_str());
int c = 3;
a = c;
a.pn[3] = 15;
printf("%s\n", a.ToString().c_str());
uint256 k(c);
a = 5;
a.pn[3] = 15;
printf("%s\n", a.ToString().c_str());
b = 1;
b <<= 52;
a |= b;
a ^= 0x500;
printf("a %s\n", a.ToString().c_str());
a = a | b | (uint256)0x1000;
printf("a %s\n", a.ToString().c_str());
printf("b %s\n", b.ToString().c_str());
a = 0xfffffffe;
a.pn[4] = 9;
printf("%s\n", a.ToString().c_str());
a++;
printf("%s\n", a.ToString().c_str());
a++;
printf("%s\n", a.ToString().c_str());
a++;
printf("%s\n", a.ToString().c_str());
a++;
printf("%s\n", a.ToString().c_str());
a--;
printf("%s\n", a.ToString().c_str());
a--;
printf("%s\n", a.ToString().c_str());
a--;
printf("%s\n", a.ToString().c_str());
uint256 d = a--;
printf("%s\n", d.ToString().c_str());
printf("%s\n", a.ToString().c_str());
a--;
printf("%s\n", a.ToString().c_str());
a--;
printf("%s\n", a.ToString().c_str());
d = a;
printf("%s\n", d.ToString().c_str());
for (int i = uint256::WIDTH-1; i >= 0; i--) printf("%08x", d.pn[i]); printf("\n");
uint256 neg = d;
neg = ~neg;
printf("%s\n", neg.ToString().c_str());
uint256 e = uint256("0xABCDEF123abcdef12345678909832180000011111111");
printf("\n");
printf("%s\n", e.ToString().c_str());
printf("\n");
uint256 x1 = uint256("0xABCDEF123abcdef12345678909832180000011111111");
uint256 x2;
printf("%s\n", x1.ToString().c_str());
for (int i = 0; i < 270; i += 4)
{
x2 = x1 << i;
printf("%s\n", x2.ToString().c_str());
}
printf("\n");
printf("%s\n", x1.ToString().c_str());
for (int i = 0; i < 270; i += 4)
{
x2 = x1;
x2 >>= i;
printf("%s\n", x2.ToString().c_str());
}
for (int i = 0; i < 100; i++)
{
uint256 k = (~uint256(0) >> i);
printf("%s\n", k.ToString().c_str());
}
for (int i = 0; i < 100; i++)
{
uint256 k = (~uint256(0) << i);
printf("%s\n", k.ToString().c_str());
}
return (0);
}
#endif
#endif

89
util.c
View File

@@ -80,6 +80,86 @@ struct thread_q {
pthread_cond_t cond;
};
void applog2( int prio, const char *fmt, ... )
{
va_list ap;
va_start(ap, fmt);
#ifdef HAVE_SYSLOG_H
if (use_syslog) {
va_list ap2;
char *buf;
int len;
/* custom colors to syslog prio */
if (prio > LOG_DEBUG) {
switch (prio) {
case LOG_BLUE: prio = LOG_NOTICE; break;
}
}
va_copy(ap2, ap);
len = vsnprintf(NULL, 0, fmt, ap2) + 1;
va_end(ap2);
buf = alloca(len);
if (vsnprintf(buf, len, fmt, ap) >= 0)
syslog(prio, "%s", buf);
}
#else
if (0) {}
#endif
else {
const char* color = "";
char *f;
int len;
// struct tm tm;
// time_t now = time(NULL);
// localtime_r(&now, &tm);
switch (prio) {
case LOG_ERR: color = CL_RED; break;
case LOG_WARNING: color = CL_YLW; break;
case LOG_NOTICE: color = CL_WHT; break;
case LOG_INFO: color = ""; break;
case LOG_DEBUG: color = CL_GRY; break;
case LOG_BLUE:
prio = LOG_NOTICE;
color = CL_CYN;
break;
}
if (!use_colors)
color = "";
len = 64 + (int) strlen(fmt) + 2;
f = (char*) malloc(len);
sprintf(f, " %s %s%s\n",
// sprintf(f, "[%d-%02d-%02d %02d:%02d:%02d]%s %s%s\n",
// tm.tm_year + 1900,
// tm.tm_mon + 1,
// tm.tm_mday,
// tm.tm_hour,
// tm.tm_min,
// tm.tm_sec,
color,
fmt,
use_colors ? CL_N : ""
);
pthread_mutex_lock(&applog_lock);
vfprintf(stdout, f, ap); /* atomic write to stdout */
fflush(stdout);
free(f);
pthread_mutex_unlock(&applog_lock);
}
va_end(ap);
}
void applog(int prio, const char *fmt, ...)
{
va_list ap;
@@ -1818,7 +1898,7 @@ static bool stratum_notify(struct stratum_ctx *sctx, json_t *params)
hex2bin(sctx->job.proofoffullnode, prooffullnode, 32);
}
sctx->bloc_height = getblocheight(sctx);
sctx->block_height = getblocheight(sctx);
for (i = 0; i < sctx->job.merkle_count; i++)
free(sctx->job.merkle[i]);
@@ -1853,13 +1933,6 @@ static bool stratum_set_difficulty(struct stratum_ctx *sctx, json_t *params)
pthread_mutex_lock(&sctx->work_lock);
sctx->next_diff = diff;
pthread_mutex_unlock(&sctx->work_lock);
/* store for api stats */
stratum_diff = diff;
if ( !opt_quiet )
applog(LOG_BLUE, "Stratum difficulty set to %g", diff);
return true;
}

View File

@@ -24,6 +24,8 @@ ln -s $LOCAL_LIB/gmp/gmp.h ./gmp.h
# make release directory and copy selected DLLs.
mkdir release
cp README.txt release/
cp README.md release/
cp RELEASE_NOTES release/
cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/