Compare commits

...

4 Commits

Author SHA1 Message Date
Jay D Dee
6f49ba09b7 v3.9.6 2019-07-17 17:54:38 -04:00
Jay D Dee
e2d5762ef2 v3.9.5.4 2019-07-15 17:00:26 -04:00
Jay D Dee
e625ed5420 v3.9.5.3 2019-07-12 10:42:38 -04:00
Jay D Dee
9abc19a30a v3.9.5.2 2019-07-04 12:12:11 -04:00
106 changed files with 5064 additions and 4377 deletions

View File

@@ -71,6 +71,9 @@ cpuminer_SOURCES = \
algo/bmw/bmw256-hash-4way.c \
algo/bmw/bmw512-hash-4way.c \
algo/bmw/bmw256.c \
algo/bmw/bmw512-gate.c \
algo/bmw/bmw512.c \
algo/bmw/bmw512-4way.c \
algo/cryptonight/cryptolight.c \
algo/cryptonight/cryptonight-common.c\
algo/cryptonight/cryptonight-aesni.c\
@@ -238,6 +241,8 @@ cpuminer_SOURCES = \
algo/x13/skunk-4way.c \
algo/x13/skunk.c \
algo/x13/drop.c \
algo/x13/x13bcd-4way.c \
algo/x13/x13bcd.c \
algo/x14/x14-gate.c \
algo/x14/x14.c \
algo/x14/x14-4way.c \
@@ -254,6 +259,8 @@ cpuminer_SOURCES = \
algo/x16/x16r-gate.c \
algo/x16/x16r.c \
algo/x16/x16r-4way.c \
algo/x16/x16rt.c \
algo/x16/x16rt-4way.c \
algo/x17/x17-gate.c \
algo/x17/x17.c \
algo/x17/x17-4way.c \

View File

@@ -58,6 +58,7 @@ Supported Algorithms
blakecoin blake256r8
blake2s Blake-2 S
bmw BMW 256
bmw512 BMW 512
c11 Chaincoin
decred
deep Deepcoin (DCN)
@@ -113,11 +114,14 @@ Supported Algorithms
x11gost sib (SibCoin)
x12 Galaxie Cash (GCH)
x13 X13
x13bcd bcd
x13sm3 hsr (Hshare)
x14 X14
x15 X15
x16r Ravencoin (RVN)
x16s pigeoncoin (PGN)
x16rt Gincoin (GIN)
x16rt_veil Veil (VEIL)
x16s Pigeoncoin (PGN)
x17
xevan Bitsend (BSD)
yescrypt Globalboost-Y (BSTY)

View File

@@ -38,6 +38,25 @@ supported.
Change Log
----------
v3.9.6
New algos: bmw512, x16rt, x16rt-veil (alias veil), x13bcd (alias bcd).
v3.9.5.4
Fixed sha256q AVX2 poor performance.
Fixed skein2 buffer overflow and restored bswap-interleave optimization.
More restructuring.
v3.9.5.3
Fix crash mining hodl with aes-sse42.
More restructuring and share report tweaks.
v3.9.5.2
Revert bswap-interleave optimization for causing crashes on Windows.
v3.9.5.1
Fixed skein2 crash on Windows.

View File

@@ -170,6 +170,7 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_BLAKECOIN: register_blakecoin_algo ( gate ); break;
// case ALGO_BLAKE2B: register_blake2b_algo ( gate ); break;
case ALGO_BLAKE2S: register_blake2s_algo ( gate ); break;
case ALGO_BMW512: register_bmw512_algo ( gate ); break;
case ALGO_C11: register_c11_algo ( gate ); break;
case ALGO_CRYPTOLIGHT: register_cryptolight_algo ( gate ); break;
case ALGO_CRYPTONIGHT: register_cryptonight_algo ( gate ); break;
@@ -227,10 +228,13 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_X11GOST: register_x11gost_algo ( gate ); break;
case ALGO_X12: register_x12_algo ( gate ); break;
case ALGO_X13: register_x13_algo ( gate ); break;
case ALGO_X13BCD: register_x13bcd_algo ( gate ); break;
case ALGO_X13SM3: register_x13sm3_algo ( gate ); break;
case ALGO_X14: register_x14_algo ( gate ); break;
case ALGO_X15: register_x15_algo ( gate ); break;
case ALGO_X16R: register_x16r_algo ( gate ); break;
case ALGO_X16RT: register_x16rt_algo ( gate ); break;
case ALGO_X16RT_VEIL: register_x16rt_veil_algo ( gate ); break;
case ALGO_X16S: register_x16s_algo ( gate ); break;
case ALGO_X17: register_x17_algo ( gate ); break;
case ALGO_XEVAN: register_xevan_algo ( gate ); break;
@@ -327,7 +331,6 @@ const char* const algo_alias_map[][2] =
{ "lyra2", "lyra2re" },
{ "lyra2v2", "lyra2rev2" },
{ "lyra2v3", "lyra2rev3" },
{ "lyra2zoin", "lyra2z330" },
{ "myrgr", "myr-gr" },
{ "myriad", "myr-gr" },
{ "neo", "neoscrypt" },
@@ -335,11 +338,9 @@ const char* const algo_alias_map[][2] =
// { "sia", "blake2b" },
{ "sib", "x11gost" },
{ "timetravel8", "timetravel" },
{ "ziftr", "zr5" },
{ "veil", "x16rt-veil" },
{ "yenten", "yescryptr16" },
{ "yescryptr8k", "yescrypt" },
{ "zcoin", "lyra2z" },
{ "zoin", "lyra2z330" },
{ "ziftr", "zr5" },
{ NULL, NULL }
};

View File

@@ -36,35 +36,31 @@ void argon2d_crds_hash( void *output, const void *input )
int scanhash_argon2d_crds( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t nonce = first_nonce;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
swab32_array( endiandata, pdata, 20 );
uint32_t nonce = first_nonce;
do {
be32enc(&endiandata[19], nonce);
argon2d_crds_hash( hash, endiandata );
if ( hash[7] <= Htarg && fulltest( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
swab32_array( endiandata, pdata, 20 );
do {
be32enc(&endiandata[19], nonce);
argon2d_crds_hash( hash, endiandata );
if ( hash[7] <= Htarg && fulltest( hash, ptarget ) )
{
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
work_set_target_ratio(work, hash);
return 1;
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}
bool register_argon2d_crds_algo( algo_gate_t* gate )
@@ -107,35 +103,32 @@ void argon2d_dyn_hash( void *output, const void *input )
int scanhash_argon2d_dyn( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t nonce = first_nonce;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
swab32_array( endiandata, pdata, 20 );
uint32_t nonce = first_nonce;
do
{
be32enc(&endiandata[19], nonce);
argon2d_dyn_hash( hash, endiandata );
if ( hash[7] <= Htarg && fulltest( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
swab32_array( endiandata, pdata, 20 );
do {
be32enc(&endiandata[19], nonce);
argon2d_dyn_hash( hash, endiandata );
if ( hash[7] <= Htarg && fulltest( hash, ptarget ) )
{
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
work_set_target_ratio(work, hash);
return 1;
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}
bool register_argon2d_dyn_algo( algo_gate_t* gate )
@@ -171,11 +164,10 @@ int scanhash_argon2d4096( struct work *work, uint32_t max_nonce,
be32enc( &endiandata[19], n );
argon2d_hash_raw( t_cost, m_cost, parallelism, (char*) endiandata, 80,
(char*) endiandata, 80, (char*) vhash, 32, ARGON2_VERSION_13 );
if ( vhash[7] < Htarg && fulltest( vhash, ptarget ) )
if ( vhash[7] < Htarg && fulltest( vhash, ptarget ) && !opt_benchmark )
{
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return true;
submit_solution( work, vhash, mythr );
}
n++;

View File

@@ -27,25 +27,19 @@ int scanhash_blake_4way( struct work *work, uint32_t max_nonce,
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t HTarget = ptarget[7];
uint32_t _ALIGN(32) edata[20];
__m128i *noncev = (__m128i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
if (opt_benchmark)
HTarget = 0x7f;
// we need big endian data...
swab32_array( edata, pdata, 20 );
mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake256r14_4way_init( &blake_4w_ctx );
blake256r14_4way( &blake_4w_ctx, vdata, 64 );
uint32_t *noncep = vdata + 76; // 19*4
do {
be32enc( noncep, n );
be32enc( noncep +1, n+1 );
be32enc( noncep +2, n+2 );
be32enc( noncep +3, n+3 );
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
blakehash_4way( hash, vdata );
@@ -76,9 +70,9 @@ void blakehash_8way( void *state, const void *input )
memcpy( &ctx, &blake_8w_ctx, sizeof ctx );
blake256r14_8way( &ctx, input + (64<<3), 16 );
blake256r14_8way_close( &ctx, vhash );
mm256_dintrlv_8x32( state, state+ 32, state+ 64, state+ 96,
state+128, state+160, state+192, state+224,
vhash, 256 );
_dintrlv_8x32( state, state+ 32, state+ 64, state+ 96,
state+128, state+160, state+192, state+224,
vhash, 256 );
}
int scanhash_blake_8way( struct work *work, uint32_t max_nonce,
@@ -90,32 +84,21 @@ int scanhash_blake_8way( struct work *work, uint32_t max_nonce,
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t HTarget = ptarget[7];
uint32_t _ALIGN(32) edata[20];
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 19; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
if (opt_benchmark)
HTarget = 0x7f;
// we need big endian data...
swab32_array( edata, pdata, 20 );
mm256_intrlv_8x32( vdata, edata, edata, edata, edata,
edata, edata, edata, edata, 640 );
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake256r14_8way_init( &blake_8w_ctx );
blake256r14_8way( &blake_8w_ctx, vdata, 64 );
uint32_t *noncep = vdata + 152; // 19*8
do {
be32enc( noncep, n );
be32enc( noncep +1, n+1 );
be32enc( noncep +2, n+2 );
be32enc( noncep +3, n+3 );
be32enc( noncep +4, n+4 );
be32enc( noncep +5, n+5 );
be32enc( noncep +6, n+6 );
be32enc( noncep +7, n+7 );
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
pdata[19] = n;
blakehash_8way( hash, vdata );

View File

@@ -412,34 +412,16 @@ do { \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm_xor_si128( S0, _mm_set_epi32( CS0, CS0, CS0, CS0 ) ); \
V9 = _mm_xor_si128( S1, _mm_set_epi32( CS1, CS1, CS1, CS1 ) ); \
VA = _mm_xor_si128( S2, _mm_set_epi32( CS2, CS2, CS2, CS2 ) ); \
VB = _mm_xor_si128( S3, _mm_set_epi32( CS3, CS3, CS3, CS3 ) ); \
VC = _mm_xor_si128( _mm_set_epi32( T0, T0, T0, T0 ), \
_mm_set_epi32( CS4, CS4, CS4, CS4 ) ); \
VD = _mm_xor_si128( _mm_set_epi32( T0, T0, T0, T0 ), \
_mm_set_epi32( CS5, CS5, CS5, CS5 ) ); \
VE = _mm_xor_si128( _mm_set_epi32( T1, T1, T1, T1 ) \
, _mm_set_epi32( CS6, CS6, CS6, CS6 ) ); \
VF = _mm_xor_si128( _mm_set_epi32( T1, T1, T1, T1 ), \
_mm_set_epi32( CS7, CS7, CS7, CS7 ) ); \
M[0x0] = mm128_bswap_32( *(buf + 0) ); \
M[0x1] = mm128_bswap_32( *(buf + 1) ); \
M[0x2] = mm128_bswap_32( *(buf + 2) ); \
M[0x3] = mm128_bswap_32( *(buf + 3) ); \
M[0x4] = mm128_bswap_32( *(buf + 4) ); \
M[0x5] = mm128_bswap_32( *(buf + 5) ); \
M[0x6] = mm128_bswap_32( *(buf + 6) ); \
M[0x7] = mm128_bswap_32( *(buf + 7) ); \
M[0x8] = mm128_bswap_32( *(buf + 8) ); \
M[0x9] = mm128_bswap_32( *(buf + 9) ); \
M[0xA] = mm128_bswap_32( *(buf + 10) ); \
M[0xB] = mm128_bswap_32( *(buf + 11) ); \
M[0xC] = mm128_bswap_32( *(buf + 12) ); \
M[0xD] = mm128_bswap_32( *(buf + 13) ); \
M[0xE] = mm128_bswap_32( *(buf + 14) ); \
M[0xF] = mm128_bswap_32( *(buf + 15) ); \
V8 = _mm_xor_si128( S0, _mm_set1_epi32( CS0 ) ); \
V9 = _mm_xor_si128( S1, _mm_set1_epi32( CS1 ) ); \
VA = _mm_xor_si128( S2, _mm_set1_epi32( CS2 ) ); \
VB = _mm_xor_si128( S3, _mm_set1_epi32( CS3 ) ); \
VC = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS4 ) ); \
VD = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS5 ) ); \
VE = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS6 ) ); \
VF = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS7 ) ); \
mm128_block_bswap_32( M, buf ); \
mm128_block_bswap_32( M+8, buf+8 ); \
for (r = 0; r < rounds; r ++) \
ROUND_S_4WAY(r); \
H0 = _mm_xor_si128( _mm_xor_si128( \
@@ -464,6 +446,54 @@ do { \
// current impl
#if defined(__SSSE3__)
#define BLAKE256_4WAY_BLOCK_BSWAP32 do \
{ \
__m128i shuf_bswap32 = _mm_set_epi64x( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ); \
M0 = _mm_shuffle_epi8( buf[ 0], shuf_bswap32 ); \
M1 = _mm_shuffle_epi8( buf[ 1], shuf_bswap32 ); \
M2 = _mm_shuffle_epi8( buf[ 2], shuf_bswap32 ); \
M3 = _mm_shuffle_epi8( buf[ 3], shuf_bswap32 ); \
M4 = _mm_shuffle_epi8( buf[ 4], shuf_bswap32 ); \
M5 = _mm_shuffle_epi8( buf[ 5], shuf_bswap32 ); \
M6 = _mm_shuffle_epi8( buf[ 6], shuf_bswap32 ); \
M7 = _mm_shuffle_epi8( buf[ 7], shuf_bswap32 ); \
M8 = _mm_shuffle_epi8( buf[ 8], shuf_bswap32 ); \
M9 = _mm_shuffle_epi8( buf[ 9], shuf_bswap32 ); \
MA = _mm_shuffle_epi8( buf[10], shuf_bswap32 ); \
MB = _mm_shuffle_epi8( buf[11], shuf_bswap32 ); \
MC = _mm_shuffle_epi8( buf[12], shuf_bswap32 ); \
MD = _mm_shuffle_epi8( buf[13], shuf_bswap32 ); \
ME = _mm_shuffle_epi8( buf[14], shuf_bswap32 ); \
MF = _mm_shuffle_epi8( buf[15], shuf_bswap32 ); \
} while(0)
#else // SSE2
#define BLAKE256_4WAY_BLOCK_BSWAP32 do \
{ \
M0 = mm128_bswap_32( buf[0] ); \
M1 = mm128_bswap_32( buf[1] ); \
M2 = mm128_bswap_32( buf[2] ); \
M3 = mm128_bswap_32( buf[3] ); \
M4 = mm128_bswap_32( buf[4] ); \
M5 = mm128_bswap_32( buf[5] ); \
M6 = mm128_bswap_32( buf[6] ); \
M7 = mm128_bswap_32( buf[7] ); \
M8 = mm128_bswap_32( buf[8] ); \
M9 = mm128_bswap_32( buf[9] ); \
MA = mm128_bswap_32( buf[10] ); \
MB = mm128_bswap_32( buf[11] ); \
MC = mm128_bswap_32( buf[12] ); \
MD = mm128_bswap_32( buf[13] ); \
ME = mm128_bswap_32( buf[14] ); \
MF = mm128_bswap_32( buf[15] ); \
} while(0)
#endif // SSSE3 else SSE2
#define COMPRESS32_4WAY( rounds ) \
do { \
__m128i M0, M1, M2, M3, M4, M5, M6, M7; \
@@ -486,22 +516,7 @@ do { \
VD = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS5 ) ); \
VE = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS6 ) ); \
VF = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS7 ) ); \
M0 = mm128_bswap_32( buf[ 0] ); \
M1 = mm128_bswap_32( buf[ 1] ); \
M2 = mm128_bswap_32( buf[ 2] ); \
M3 = mm128_bswap_32( buf[ 3] ); \
M4 = mm128_bswap_32( buf[ 4] ); \
M5 = mm128_bswap_32( buf[ 5] ); \
M6 = mm128_bswap_32( buf[ 6] ); \
M7 = mm128_bswap_32( buf[ 7] ); \
M8 = mm128_bswap_32( buf[ 8] ); \
M9 = mm128_bswap_32( buf[ 9] ); \
MA = mm128_bswap_32( buf[10] ); \
MB = mm128_bswap_32( buf[11] ); \
MC = mm128_bswap_32( buf[12] ); \
MD = mm128_bswap_32( buf[13] ); \
ME = mm128_bswap_32( buf[14] ); \
MF = mm128_bswap_32( buf[15] ); \
BLAKE256_4WAY_BLOCK_BSWAP32; \
ROUND_S_4WAY(0); \
ROUND_S_4WAY(1); \
ROUND_S_4WAY(2); \
@@ -519,14 +534,14 @@ do { \
ROUND_S_4WAY(2); \
ROUND_S_4WAY(3); \
} \
H0 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( V8, V0 ), S0 ), H0 ); \
H1 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( V9, V1 ), S1 ), H1 ); \
H2 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VA, V2 ), S2 ), H2 ); \
H3 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VB, V3 ), S3 ), H3 ); \
H4 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VC, V4 ), S0 ), H4 ); \
H5 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VD, V5 ), S1 ), H5 ); \
H6 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VE, V6 ), S2 ), H6 ); \
H7 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VF, V7 ), S3 ), H7 ); \
H0 = mm128_xor4( V8, V0, S0, H0 ); \
H1 = mm128_xor4( V9, V1, S1, H1 ); \
H2 = mm128_xor4( VA, V2, S2, H2 ); \
H3 = mm128_xor4( VB, V3, S3, H3 ); \
H4 = mm128_xor4( VC, V4, S0, H4 ); \
H5 = mm128_xor4( VD, V5, S1, H5 ); \
H6 = mm128_xor4( VE, V6, S2, H6 ); \
H7 = mm128_xor4( VF, V7, S3, H7 ); \
} while (0)
#endif
@@ -607,6 +622,7 @@ do { \
__m256i M8, M9, MA, MB, MC, MD, ME, MF; \
__m256i V0, V1, V2, V3, V4, V5, V6, V7; \
__m256i V8, V9, VA, VB, VC, VD, VE, VF; \
__m256i shuf_bswap32; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
@@ -623,22 +639,24 @@ do { \
VD = _mm256_xor_si256( _mm256_set1_epi32( T0 ), _mm256_set1_epi32( CS5 ) ); \
VE = _mm256_xor_si256( _mm256_set1_epi32( T1 ), _mm256_set1_epi32( CS6 ) ); \
VF = _mm256_xor_si256( _mm256_set1_epi32( T1 ), _mm256_set1_epi32( CS7 ) ); \
M0 = mm256_bswap_32( * buf ); \
M1 = mm256_bswap_32( *(buf+1) ); \
M2 = mm256_bswap_32( *(buf+2) ); \
M3 = mm256_bswap_32( *(buf+3) ); \
M4 = mm256_bswap_32( *(buf+4) ); \
M5 = mm256_bswap_32( *(buf+5) ); \
M6 = mm256_bswap_32( *(buf+6) ); \
M7 = mm256_bswap_32( *(buf+7) ); \
M8 = mm256_bswap_32( *(buf+8) ); \
M9 = mm256_bswap_32( *(buf+9) ); \
MA = mm256_bswap_32( *(buf+10) ); \
MB = mm256_bswap_32( *(buf+11) ); \
MC = mm256_bswap_32( *(buf+12) ); \
MD = mm256_bswap_32( *(buf+13) ); \
ME = mm256_bswap_32( *(buf+14) ); \
MF = mm256_bswap_32( *(buf+15) ); \
shuf_bswap32 = _mm256_set_epi64x( 0x0c0d0e0f08090a0b, 0x0405060700010203, \
0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
M0 = _mm256_shuffle_epi8( * buf , shuf_bswap32 ); \
M1 = _mm256_shuffle_epi8( *(buf+ 1), shuf_bswap32 ); \
M2 = _mm256_shuffle_epi8( *(buf+ 2), shuf_bswap32 ); \
M3 = _mm256_shuffle_epi8( *(buf+ 3), shuf_bswap32 ); \
M4 = _mm256_shuffle_epi8( *(buf+ 4), shuf_bswap32 ); \
M5 = _mm256_shuffle_epi8( *(buf+ 5), shuf_bswap32 ); \
M6 = _mm256_shuffle_epi8( *(buf+ 6), shuf_bswap32 ); \
M7 = _mm256_shuffle_epi8( *(buf+ 7), shuf_bswap32 ); \
M8 = _mm256_shuffle_epi8( *(buf+ 8), shuf_bswap32 ); \
M9 = _mm256_shuffle_epi8( *(buf+ 9), shuf_bswap32 ); \
MA = _mm256_shuffle_epi8( *(buf+10), shuf_bswap32 ); \
MB = _mm256_shuffle_epi8( *(buf+11), shuf_bswap32 ); \
MC = _mm256_shuffle_epi8( *(buf+12), shuf_bswap32 ); \
MD = _mm256_shuffle_epi8( *(buf+13), shuf_bswap32 ); \
ME = _mm256_shuffle_epi8( *(buf+14), shuf_bswap32 ); \
MF = _mm256_shuffle_epi8( *(buf+15), shuf_bswap32 ); \
ROUND_S_8WAY(0); \
ROUND_S_8WAY(1); \
ROUND_S_8WAY(2); \
@@ -656,22 +674,14 @@ do { \
ROUND_S_8WAY(2); \
ROUND_S_8WAY(3); \
} \
H0 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( V8, V0 ), \
S0 ), H0 ); \
H1 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( V9, V1 ), \
S1 ), H1 ); \
H2 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VA, V2 ), \
S2 ), H2 ); \
H3 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VB, V3 ), \
S3 ), H3 ); \
H4 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VC, V4 ), \
S0 ), H4 ); \
H5 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VD, V5 ), \
S1 ), H5 ); \
H6 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VE, V6 ), \
S2 ), H6 ); \
H7 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VF, V7 ), \
S3 ), H7 ); \
H0 = mm256_xor4( V8, V0, S0, H0 ); \
H1 = mm256_xor4( V9, V1, S1, H1 ); \
H2 = mm256_xor4( VA, V2, S2, H2 ); \
H3 = mm256_xor4( VB, V3, S3, H3 ); \
H4 = mm256_xor4( VC, V4, S0, H4 ); \
H5 = mm256_xor4( VD, V5, S1, H5 ); \
H6 = mm256_xor4( VE, V6, S2, H6 ); \
H7 = mm256_xor4( VF, V7, S3, H7 ); \
} while (0)
@@ -685,6 +695,7 @@ static void
blake32_4way_init( blake_4way_small_context *ctx, const uint32_t *iv,
const uint32_t *salt, int rounds )
{
__m128i zero = m128_zero;
casti_m128i( ctx->H, 0 ) = _mm_set1_epi32( iv[0] );
casti_m128i( ctx->H, 1 ) = _mm_set1_epi32( iv[1] );
casti_m128i( ctx->H, 2 ) = _mm_set1_epi32( iv[2] );
@@ -694,16 +705,10 @@ blake32_4way_init( blake_4way_small_context *ctx, const uint32_t *iv,
casti_m128i( ctx->H, 6 ) = _mm_set1_epi32( iv[6] );
casti_m128i( ctx->H, 7 ) = _mm_set1_epi32( iv[7] );
casti_m128i( ctx->S, 0 ) = m128_zero;
casti_m128i( ctx->S, 1 ) = m128_zero;
casti_m128i( ctx->S, 2 ) = m128_zero;
casti_m128i( ctx->S, 3 ) = m128_zero;
/*
sc->S[0] = _mm_set1_epi32( salt[0] );
sc->S[1] = _mm_set1_epi32( salt[1] );
sc->S[2] = _mm_set1_epi32( salt[2] );
sc->S[3] = _mm_set1_epi32( salt[3] );
*/
casti_m128i( ctx->S, 0 ) = zero;
casti_m128i( ctx->S, 1 ) = zero;
casti_m128i( ctx->S, 2 ) = zero;
casti_m128i( ctx->S, 3 ) = zero;
ctx->T0 = ctx->T1 = 0;
ctx->ptr = 0;
ctx->rounds = rounds;
@@ -796,14 +801,7 @@ blake32_4way_close( blake_4way_small_context *ctx, unsigned ub, unsigned n,
blake32_4way( ctx, buf, 64 );
}
casti_m128i( dst, 0 ) = mm128_bswap_32( casti_m128i( ctx->H, 0 ) );
casti_m128i( dst, 1 ) = mm128_bswap_32( casti_m128i( ctx->H, 1 ) );
casti_m128i( dst, 2 ) = mm128_bswap_32( casti_m128i( ctx->H, 2 ) );
casti_m128i( dst, 3 ) = mm128_bswap_32( casti_m128i( ctx->H, 3 ) );
casti_m128i( dst, 4 ) = mm128_bswap_32( casti_m128i( ctx->H, 4 ) );
casti_m128i( dst, 5 ) = mm128_bswap_32( casti_m128i( ctx->H, 5 ) );
casti_m128i( dst, 6 ) = mm128_bswap_32( casti_m128i( ctx->H, 6 ) );
casti_m128i( dst, 7 ) = mm128_bswap_32( casti_m128i( ctx->H, 7 ) );
mm128_block_bswap_32( (__m128i*)dst, (__m128i*)ctx->H );
}
#if defined (__AVX2__)
@@ -816,11 +814,21 @@ static void
blake32_8way_init( blake_8way_small_context *sc, const sph_u32 *iv,
const sph_u32 *salt, int rounds )
{
int i;
for ( i = 0; i < 8; i++ )
sc->H[i] = _mm256_set1_epi32( iv[i] );
for ( i = 0; i < 4; i++ )
sc->S[i] = _mm256_set1_epi32( salt[i] );
__m256i zero = m256_zero;
casti_m256i( sc->H, 0 ) = _mm256_set1_epi32( iv[0] );
casti_m256i( sc->H, 1 ) = _mm256_set1_epi32( iv[1] );
casti_m256i( sc->H, 2 ) = _mm256_set1_epi32( iv[2] );
casti_m256i( sc->H, 3 ) = _mm256_set1_epi32( iv[3] );
casti_m256i( sc->H, 4 ) = _mm256_set1_epi32( iv[4] );
casti_m256i( sc->H, 5 ) = _mm256_set1_epi32( iv[5] );
casti_m256i( sc->H, 6 ) = _mm256_set1_epi32( iv[6] );
casti_m256i( sc->H, 7 ) = _mm256_set1_epi32( iv[7] );
casti_m256i( sc->S, 0 ) = zero;
casti_m256i( sc->S, 1 ) = zero;
casti_m256i( sc->S, 2 ) = zero;
casti_m256i( sc->S, 3 ) = zero;
sc->T0 = sc->T1 = 0;
sc->ptr = 0;
sc->rounds = rounds;
@@ -872,14 +880,10 @@ static void
blake32_8way_close( blake_8way_small_context *sc, unsigned ub, unsigned n,
void *dst, size_t out_size_w32 )
{
// union {
__m256i buf[16];
// sph_u32 dummy;
// } u;
size_t ptr, k;
__m256i buf[16];
size_t ptr;
unsigned bit_len;
sph_u32 th, tl;
__m256i *out;
ptr = sc->ptr;
bit_len = ((unsigned)ptr << 3);
@@ -923,9 +927,7 @@ blake32_8way_close( blake_8way_small_context *sc, unsigned ub, unsigned n,
*(buf+(60>>2)) = mm256_bswap_32( _mm256_set1_epi32( tl ) );
blake32_8way( sc, buf, 64 );
}
out = (__m256i*)dst;
for ( k = 0; k < out_size_w32; k++ )
out[k] = mm256_bswap_32( sc->H[k] );
mm256_block_bswap_32( (__m256i*)dst, (__m256i*)sc->H );
}
#endif

View File

@@ -16,9 +16,9 @@ void blake2s_8way_hash( void *output, const void *input )
blake2s_8way_update( &ctx, input + (64<<3), 16 );
blake2s_8way_final( &ctx, vhash, BLAKE2S_OUTBYTES );
mm256_dintrlv_8x32( output, output+ 32, output+ 64, output+ 96,
output+128, output+160, output+192, output+224,
vhash, 256 );
dintrlv_8x32( output, output+ 32, output+ 64, output+ 96,
output+128, output+160, output+192, output+224,
vhash, 256 );
}
int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
@@ -28,28 +28,19 @@ int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
uint32_t hash[8*8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t _ALIGN(64) edata[20];
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
uint32_t n = first_nonce;
uint32_t *noncep = vdata + 152; // 19*8
int thr_id = mythr->id; // thr_id arg is deprecated
swab32_array( edata, pdata, 20 );
mm256_intrlv_8x32( vdata, edata, edata, edata, edata,
edata, edata, edata, edata, 640 );
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake2s_8way_init( &blake2s_8w_ctx, BLAKE2S_OUTBYTES );
blake2s_8way_update( &blake2s_8w_ctx, vdata, 64 );
do {
be32enc( noncep, n );
be32enc( noncep +1, n+1 );
be32enc( noncep +2, n+2 );
be32enc( noncep +3, n+3 );
be32enc( noncep +4, n+4 );
be32enc( noncep +5, n+5 );
be32enc( noncep +6, n+6 );
be32enc( noncep +7, n+7 );
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
pdata[19] = n;
blake2s_8way_hash( hash, vdata );
@@ -94,23 +85,18 @@ int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
uint32_t hash[8*4] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t _ALIGN(64) edata[20];
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m128i *noncev = (__m128i*)vdata + 19; // aligned
uint32_t n = first_nonce;
uint32_t *noncep = vdata + 76; // 19*4
int thr_id = mythr->id; // thr_id arg is deprecated
swab32_array( edata, pdata, 20 );
mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake2s_4way_init( &blake2s_4w_ctx, BLAKE2S_OUTBYTES );
blake2s_4way_update( &blake2s_4w_ctx, vdata, 64 );
do {
be32enc( noncep, n );
be32enc( noncep +1, n+1 );
be32enc( noncep +2, n+2 );
be32enc( noncep +3, n+3 );
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
pdata[19] = n;
blake2s_4way_hash( hash, vdata );

View File

@@ -412,18 +412,18 @@ static const sph_u64 CB[16] = {
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \
V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \
VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \
VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \
VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \
VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \
VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \
V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \
V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \
VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \
VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \
VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \
VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \
VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \
M[0x0] = mm256_bswap_64( *(buf+0) ); \
M[0x1] = mm256_bswap_64( *(buf+1) ); \
M[0x2] = mm256_bswap_64( *(buf+2) ); \
@@ -464,80 +464,76 @@ static const sph_u64 CB[16] = {
//current impl
#define COMPRESS64_4WAY do { \
__m256i M0, M1, M2, M3, M4, M5, M6, M7; \
__m256i M8, M9, MA, MB, MC, MD, ME, MF; \
__m256i V0, V1, V2, V3, V4, V5, V6, V7; \
__m256i V8, V9, VA, VB, VC, VD, VE, VF; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
V3 = H3; \
V4 = H4; \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \
V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \
VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \
VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \
VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \
VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \
VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \
M0 = mm256_bswap_64( *(buf + 0) ); \
M1 = mm256_bswap_64( *(buf + 1) ); \
M2 = mm256_bswap_64( *(buf + 2) ); \
M3 = mm256_bswap_64( *(buf + 3) ); \
M4 = mm256_bswap_64( *(buf + 4) ); \
M5 = mm256_bswap_64( *(buf + 5) ); \
M6 = mm256_bswap_64( *(buf + 6) ); \
M7 = mm256_bswap_64( *(buf + 7) ); \
M8 = mm256_bswap_64( *(buf + 8) ); \
M9 = mm256_bswap_64( *(buf + 9) ); \
MA = mm256_bswap_64( *(buf + 10) ); \
MB = mm256_bswap_64( *(buf + 11) ); \
MC = mm256_bswap_64( *(buf + 12) ); \
MD = mm256_bswap_64( *(buf + 13) ); \
ME = mm256_bswap_64( *(buf + 14) ); \
MF = mm256_bswap_64( *(buf + 15) ); \
ROUND_B_4WAY(0); \
ROUND_B_4WAY(1); \
ROUND_B_4WAY(2); \
ROUND_B_4WAY(3); \
ROUND_B_4WAY(4); \
ROUND_B_4WAY(5); \
ROUND_B_4WAY(6); \
ROUND_B_4WAY(7); \
ROUND_B_4WAY(8); \
ROUND_B_4WAY(9); \
ROUND_B_4WAY(0); \
ROUND_B_4WAY(1); \
ROUND_B_4WAY(2); \
ROUND_B_4WAY(3); \
ROUND_B_4WAY(4); \
ROUND_B_4WAY(5); \
H0 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S0, V0 ), V8 ), H0 ); \
H1 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S1, V1 ), V9 ), H1 ); \
H2 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S2, V2 ), VA ), H2 ); \
H3 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S3, V3 ), VB ), H3 ); \
H4 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S0, V4 ), VC ), H4 ); \
H5 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S1, V5 ), VD ), H5 ); \
H6 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S2, V6 ), VE ), H6 ); \
H7 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S3, V7 ), VF ), H7 ); \
} while (0)
#define COMPRESS64_4WAY do \
{ \
__m256i M0, M1, M2, M3, M4, M5, M6, M7; \
__m256i M8, M9, MA, MB, MC, MD, ME, MF; \
__m256i V0, V1, V2, V3, V4, V5, V6, V7; \
__m256i V8, V9, VA, VB, VC, VD, VE, VF; \
__m256i shuf_bswap64; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
V3 = H3; \
V4 = H4; \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm256_xor_si256( S0, _mm256_set1_epi64x( CB0 ) ); \
V9 = _mm256_xor_si256( S1, _mm256_set1_epi64x( CB1 ) ); \
VA = _mm256_xor_si256( S2, _mm256_set1_epi64x( CB2 ) ); \
VB = _mm256_xor_si256( S3, _mm256_set1_epi64x( CB3 ) ); \
VC = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
_mm256_set1_epi64x( CB4 ) ); \
VD = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
_mm256_set1_epi64x( CB5 ) ); \
VE = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \
_mm256_set1_epi64x( CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \
_mm256_set1_epi64x( CB7 ) ); \
shuf_bswap64 = _mm256_set_epi64x( 0x08090a0b0c0d0e0f, 0x0001020304050607, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
M0 = _mm256_shuffle_epi8( *(buf+ 0), shuf_bswap64 ); \
M1 = _mm256_shuffle_epi8( *(buf+ 1), shuf_bswap64 ); \
M2 = _mm256_shuffle_epi8( *(buf+ 2), shuf_bswap64 ); \
M3 = _mm256_shuffle_epi8( *(buf+ 3), shuf_bswap64 ); \
M4 = _mm256_shuffle_epi8( *(buf+ 4), shuf_bswap64 ); \
M5 = _mm256_shuffle_epi8( *(buf+ 5), shuf_bswap64 ); \
M6 = _mm256_shuffle_epi8( *(buf+ 6), shuf_bswap64 ); \
M7 = _mm256_shuffle_epi8( *(buf+ 7), shuf_bswap64 ); \
M8 = _mm256_shuffle_epi8( *(buf+ 8), shuf_bswap64 ); \
M9 = _mm256_shuffle_epi8( *(buf+ 9), shuf_bswap64 ); \
MA = _mm256_shuffle_epi8( *(buf+10), shuf_bswap64 ); \
MB = _mm256_shuffle_epi8( *(buf+11), shuf_bswap64 ); \
MC = _mm256_shuffle_epi8( *(buf+12), shuf_bswap64 ); \
MD = _mm256_shuffle_epi8( *(buf+13), shuf_bswap64 ); \
ME = _mm256_shuffle_epi8( *(buf+14), shuf_bswap64 ); \
MF = _mm256_shuffle_epi8( *(buf+15), shuf_bswap64 ); \
ROUND_B_4WAY(0); \
ROUND_B_4WAY(1); \
ROUND_B_4WAY(2); \
ROUND_B_4WAY(3); \
ROUND_B_4WAY(4); \
ROUND_B_4WAY(5); \
ROUND_B_4WAY(6); \
ROUND_B_4WAY(7); \
ROUND_B_4WAY(8); \
ROUND_B_4WAY(9); \
ROUND_B_4WAY(0); \
ROUND_B_4WAY(1); \
ROUND_B_4WAY(2); \
ROUND_B_4WAY(3); \
ROUND_B_4WAY(4); \
ROUND_B_4WAY(5); \
H0 = mm256_xor4( V8, V0, S0, H0 ); \
H1 = mm256_xor4( V9, V1, S1, H1 ); \
H2 = mm256_xor4( VA, V2, S2, H2 ); \
H3 = mm256_xor4( VB, V3, S3, H3 ); \
H4 = mm256_xor4( VC, V4, S0, H4 ); \
H5 = mm256_xor4( VD, V5, S1, H5 ); \
H6 = mm256_xor4( VE, V6, S2, H6 ); \
H7 = mm256_xor4( VF, V7, S3, H7 ); \
} while (0)
#endif
@@ -547,13 +543,23 @@ static void
blake64_4way_init( blake_4way_big_context *sc, const sph_u64 *iv,
const sph_u64 *salt )
{
int i;
for ( i = 0; i < 8; i++ )
sc->H[i] = _mm256_set1_epi64x( iv[i] );
for ( i = 0; i < 4; i++ )
sc->S[i] = _mm256_set1_epi64x( salt[i] );
sc->T0 = sc->T1 = 0;
sc->ptr = 0;
__m256i zero = m256_zero;
casti_m256i( sc->H, 0 ) = _mm256_set1_epi64x( iv[0] );
casti_m256i( sc->H, 1 ) = _mm256_set1_epi64x( iv[1] );
casti_m256i( sc->H, 2 ) = _mm256_set1_epi64x( iv[2] );
casti_m256i( sc->H, 3 ) = _mm256_set1_epi64x( iv[3] );
casti_m256i( sc->H, 4 ) = _mm256_set1_epi64x( iv[4] );
casti_m256i( sc->H, 5 ) = _mm256_set1_epi64x( iv[5] );
casti_m256i( sc->H, 6 ) = _mm256_set1_epi64x( iv[6] );
casti_m256i( sc->H, 7 ) = _mm256_set1_epi64x( iv[7] );
casti_m256i( sc->S, 0 ) = zero;
casti_m256i( sc->S, 1 ) = zero;
casti_m256i( sc->S, 2 ) = zero;
casti_m256i( sc->S, 3 ) = zero;
sc->T0 = sc->T1 = 0;
sc->ptr = 0;
}
static void
@@ -604,15 +610,11 @@ static void
blake64_4way_close( blake_4way_big_context *sc,
unsigned ub, unsigned n, void *dst, size_t out_size_w64)
{
// union {
__m256i buf[16];
// sph_u64 dummy;
// } u;
size_t ptr, k;
__m256i buf[16];
size_t ptr;
unsigned bit_len;
uint64_t z, zz;
sph_u64 th, tl;
__m256i *out;
ptr = sc->ptr;
bit_len = ((unsigned)ptr << 3);
@@ -665,9 +667,7 @@ blake64_4way_close( blake_4way_big_context *sc,
blake64_4way( sc, buf, 128 );
}
out = (__m256i*)dst;
for ( k = 0; k < out_size_w64; k++ )
out[k] = mm256_bswap_64( sc->H[k] );
mm256_block_bswap_64( (__m256i*)dst, sc->H );
}
void

View File

@@ -29,23 +29,18 @@ int scanhash_blakecoin_4way( struct work *work, uint32_t max_nonce,
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t HTarget = ptarget[7];
uint32_t _ALIGN(32) edata[20];
uint32_t n = first_nonce;
__m128i *noncev = (__m128i*)vdata + 19; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
if ( opt_benchmark )
HTarget = 0x7f;
swab32_array( edata, pdata, 20 );
mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake256r8_4way_init( &blakecoin_4w_ctx );
blake256r8_4way( &blakecoin_4w_ctx, vdata, 64 );
uint32_t *noncep = vdata + 76; // 19*4
do {
be32enc( noncep, n );
be32enc( noncep +1, n+1 );
be32enc( noncep +2, n+2 );
be32enc( noncep +3, n+3 );
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
pdata[19] = n;
blakecoin_4way_hash( hash, vdata );
@@ -79,9 +74,8 @@ void blakecoin_8way_hash( void *state, const void *input )
blake256r8_8way( &ctx, input + (64<<3), 16 );
blake256r8_8way_close( &ctx, vhash );
mm256_dintrlv_8x32( state, state+ 32, state+ 64, state+ 96,
state+128, state+160, state+192, state+224,
vhash, 256 );
dintrlv_8x32( state, state+ 32, state+ 64, state+ 96, state+128,
state+160, state+192, state+224, vhash, 256 );
}
int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce,
@@ -93,29 +87,19 @@ int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce,
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t HTarget = ptarget[7];
uint32_t _ALIGN(32) edata[20];
uint32_t n = first_nonce;
uint32_t *noncep = vdata + 152; // 19*8
__m256i *noncev = (__m256i*)vdata + 19; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
if ( opt_benchmark )
HTarget = 0x7f;
// we need big endian data...
swab32_array( edata, pdata, 20 );
mm256_intrlv_8x32( vdata, edata, edata, edata, edata,
edata, edata, edata, edata, 640 );
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake256r8_8way_init( &blakecoin_8w_ctx );
blake256r8_8way( &blakecoin_8w_ctx, vdata, 64 );
do {
be32enc( noncep, n );
be32enc( noncep +1, n+1 );
be32enc( noncep +2, n+2 );
be32enc( noncep +3, n+3 );
be32enc( noncep +4, n+4 );
be32enc( noncep +5, n+5 );
be32enc( noncep +6, n+6 );
be32enc( noncep +7, n+7 );
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
pdata[19] = n;
blakecoin_8way_hash( hash, vdata );

View File

@@ -10,13 +10,8 @@
#include "blake-hash-4way.h"
#include "sph_blake.h"
//#define DEBUG_ALGO
extern void pentablakehash_4way( void *output, const void *input )
{
// unsigned char _ALIGN(32) hash[128];
// // same as uint32_t hashA[16], hashB[16];
// #define hashB hash+64
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
@@ -29,22 +24,7 @@ extern void pentablakehash_4way( void *output, const void *input )
blake512_4way_init( &ctx );
blake512_4way( &ctx, input, 80 );
blake512_4way_close( &ctx, vhash );
/*
uint64_t sin0[10], sin1[10], sin2[10], sin3[10];
mm256_deinterleave_4x64( sin0, sin1, sin2, sin3, input, 640 );
sph_blake512_context ctx2_blake;
sph_blake512_init(&ctx2_blake);
sph_blake512(&ctx2_blake, sin0, 80);
sph_blake512_close(&ctx2_blake, (void*) hash);
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
uint64_t* hash64 = (uint64_t*)hash;
for( int i = 0; i < 8; i++ )
{
if ( hash0[i] != hash64[i] )
printf("hash mismatch %u\n",i);
}
*/
blake512_4way_init( &ctx );
blake512_4way( &ctx, vhash, 64 );
blake512_4way_close( &ctx, vhash );
@@ -61,42 +41,10 @@ for( int i = 0; i < 8; i++ )
blake512_4way( &ctx, vhash, 64 );
blake512_4way_close( &ctx, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
memcpy( output, hash0, 32 );
memcpy( output+32, hash1, 32 );
memcpy( output+64, hash2, 32 );
memcpy( output+96, hash3, 32 );
/*
uint64_t sin0[10] __attribute__ ((aligned (64)));
uint64_t sin1[10] __attribute__ ((aligned (64)));
uint64_t sin2[10] __attribute__ ((aligned (64)));
uint64_t sin3[10] __attribute__ ((aligned (64)));
sph_blake512_context ctx_blake;
sph_blake512_init(&ctx_blake);
sph_blake512(&ctx_blake, input, 80);
sph_blake512_close(&ctx_blake, hash);
sph_blake512_init(&ctx_blake);
sph_blake512(&ctx_blake, hash, 64);
sph_blake512_close(&ctx_blake, hash);
sph_blake512_init(&ctx_blake);
sph_blake512(&ctx_blake, hash, 64);
sph_blake512_close(&ctx_blake, hash);
sph_blake512_init(&ctx_blake);
sph_blake512(&ctx_blake, hash, 64);
sph_blake512_close(&ctx_blake, hash);
sph_blake512_init(&ctx_blake);
sph_blake512(&ctx_blake, hash, 64);
sph_blake512_close(&ctx_blake, hash);
memcpy(output, hash, 32);
*/
}
int scanhash_pentablake_4way( struct work *work,
@@ -137,7 +85,7 @@ int scanhash_pentablake_4way( struct work *work,
swab32_array( endiandata, pdata, 20 );
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
for ( int m=0; m < 6; m++ )
{

View File

@@ -113,50 +113,27 @@ static const uint32_t IV256[] = {
#define expand1s( qt, M, H, i ) \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( ss1( qt[ (i)-16 ] ), \
ss2( qt[ (i)-15 ] ) ), \
_mm_add_epi32( ss3( qt[ (i)-14 ] ), \
ss0( qt[ (i)-13 ] ) ) ), \
_mm_add_epi32( \
_mm_add_epi32( ss1( qt[ (i)-12 ] ), \
ss2( qt[ (i)-11 ] ) ), \
_mm_add_epi32( ss3( qt[ (i)-10 ] ), \
ss0( qt[ (i)- 9 ] ) ) ) ), \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( ss1( qt[ (i)- 8 ] ), \
ss2( qt[ (i)- 7 ] ) ), \
_mm_add_epi32( ss3( qt[ (i)- 6 ] ), \
ss0( qt[ (i)- 5 ] ) ) ), \
_mm_add_epi32( \
_mm_add_epi32( ss1( qt[ (i)- 4 ] ), \
ss2( qt[ (i)- 3 ] ) ), \
_mm_add_epi32( ss3( qt[ (i)- 2 ] ), \
ss0( qt[ (i)- 1 ] ) ) ) ) ), \
_mm_add_epi32( mm128_add4_32( \
mm128_add4_32( ss1( qt[ (i)-16 ] ), ss2( qt[ (i)-15 ] ), \
ss3( qt[ (i)-14 ] ), ss0( qt[ (i)-13 ] ) ), \
mm128_add4_32( ss1( qt[ (i)-12 ] ), ss2( qt[ (i)-11 ] ), \
ss3( qt[ (i)-10 ] ), ss0( qt[ (i)- 9 ] ) ), \
mm128_add4_32( ss1( qt[ (i)- 8 ] ), ss2( qt[ (i)- 7 ] ), \
ss3( qt[ (i)- 6 ] ), ss0( qt[ (i)- 5 ] ) ), \
mm128_add4_32( ss1( qt[ (i)- 4 ] ), ss2( qt[ (i)- 3 ] ), \
ss3( qt[ (i)- 2 ] ), ss0( qt[ (i)- 1 ] ) ) ), \
add_elt_s( M, H, (i)-16 ) )
#define expand2s( qt, M, H, i) \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( qt[ (i)-16 ], rs1( qt[ (i)-15 ] ) ), \
_mm_add_epi32( qt[ (i)-14 ], rs2( qt[ (i)-13 ] ) ) ), \
_mm_add_epi32( \
_mm_add_epi32( qt[ (i)-12 ], rs3( qt[ (i)-11 ] ) ), \
_mm_add_epi32( qt[ (i)-10 ], rs4( qt[ (i)- 9 ] ) ) ) ), \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( qt[ (i)- 8 ], rs5( qt[ (i)- 7 ] ) ), \
_mm_add_epi32( qt[ (i)- 6 ], rs6( qt[ (i)- 5 ] ) ) ), \
_mm_add_epi32( \
_mm_add_epi32( qt[ (i)- 4 ], rs7( qt[ (i)- 3 ] ) ), \
_mm_add_epi32( ss4( qt[ (i)- 2 ] ), \
ss5( qt[ (i)- 1 ] ) ) ) ) ), \
_mm_add_epi32( mm128_add4_32( \
mm128_add4_32( qt[ (i)-16 ], rs1( qt[ (i)-15 ] ), \
qt[ (i)-14 ], rs2( qt[ (i)-13 ] ) ), \
mm128_add4_32( qt[ (i)-12 ], rs3( qt[ (i)-11 ] ), \
qt[ (i)-10 ], rs4( qt[ (i)- 9 ] ) ), \
mm128_add4_32( qt[ (i)- 8 ], rs5( qt[ (i)- 7 ] ), \
qt[ (i)- 6 ], rs6( qt[ (i)- 5 ] ) ), \
mm128_add4_32( qt[ (i)- 4 ], rs7( qt[ (i)- 3 ] ), \
ss4( qt[ (i)- 2 ] ), ss5( qt[ (i)- 1 ] ) ) ), \
add_elt_s( M, H, (i)-16 ) )
#define Ws0 \
@@ -357,17 +334,11 @@ void compress_small( const __m128i *M, const __m128i H[16], __m128i dH[16] )
qt[30] = expand2s( qt, M, H, 30 );
qt[31] = expand2s( qt, M, H, 31 );
xl = _mm_xor_si128(
_mm_xor_si128( _mm_xor_si128( qt[16], qt[17] ),
_mm_xor_si128( qt[18], qt[19] ) ),
_mm_xor_si128( _mm_xor_si128( qt[20], qt[21] ),
_mm_xor_si128( qt[22], qt[23] ) ) );
xh = _mm_xor_si128( xl,
_mm_xor_si128(
_mm_xor_si128( _mm_xor_si128( qt[24], qt[25] ),
_mm_xor_si128( qt[26], qt[27] ) ),
_mm_xor_si128( _mm_xor_si128( qt[28], qt[29] ),
_mm_xor_si128( qt[30], qt[31] ) )));
xl = _mm_xor_si128( mm128_xor4( qt[16], qt[17], qt[18], qt[19] ),
mm128_xor4( qt[20], qt[21], qt[22], qt[23] ) );
xh = _mm_xor_si128( xl, _mm_xor_si128(
mm128_xor4( qt[24], qt[25], qt[26], qt[27] ),
mm128_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
dH[ 0] = _mm_add_epi32(
_mm_xor_si128( M[0],
@@ -695,22 +666,15 @@ bmw256_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
#define expand2s8( qt, M, H, i) \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( qt[ (i)-16 ], r8s1( qt[ (i)-15 ] ) ), \
_mm256_add_epi32( qt[ (i)-14 ], r8s2( qt[ (i)-13 ] ) ) ), \
_mm256_add_epi32( \
_mm256_add_epi32( qt[ (i)-12 ], r8s3( qt[ (i)-11 ] ) ), \
_mm256_add_epi32( qt[ (i)-10 ], r8s4( qt[ (i)- 9 ] ) ) ) ), \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( qt[ (i)- 8 ], r8s5( qt[ (i)- 7 ] ) ), \
_mm256_add_epi32( qt[ (i)- 6 ], r8s6( qt[ (i)- 5 ] ) ) ), \
_mm256_add_epi32( \
_mm256_add_epi32( qt[ (i)- 4 ], r8s7( qt[ (i)- 3 ] ) ), \
_mm256_add_epi32( s8s4( qt[ (i)- 2 ] ), \
s8s5( qt[ (i)- 1 ] ) ) ) ) ), \
mm256_add4_32( \
mm256_add4_32( qt[ (i)-16 ], r8s1( qt[ (i)-15 ] ), \
qt[ (i)-14 ], r8s2( qt[ (i)-13 ] ) ), \
mm256_add4_32( qt[ (i)-12 ], r8s3( qt[ (i)-11 ] ), \
qt[ (i)-10 ], r8s4( qt[ (i)- 9 ] ) ), \
mm256_add4_32( qt[ (i)- 8 ], r8s5( qt[ (i)- 7 ] ), \
qt[ (i)- 6 ], r8s6( qt[ (i)- 5 ] ) ), \
mm256_add4_32( qt[ (i)- 4 ], r8s7( qt[ (i)- 3 ] ), \
s8s4( qt[ (i)- 2 ] ), s8s5( qt[ (i)- 1 ] ) ) ), \
add_elt_s8( M, H, (i)-16 ) )
@@ -913,16 +877,11 @@ void compress_small_8way( const __m256i *M, const __m256i H[16],
qt[31] = expand2s8( qt, M, H, 31 );
xl = _mm256_xor_si256(
_mm256_xor_si256( _mm256_xor_si256( qt[16], qt[17] ),
_mm256_xor_si256( qt[18], qt[19] ) ),
_mm256_xor_si256( _mm256_xor_si256( qt[20], qt[21] ),
_mm256_xor_si256( qt[22], qt[23] ) ) );
xh = _mm256_xor_si256( xl,
_mm256_xor_si256(
_mm256_xor_si256( _mm256_xor_si256( qt[24], qt[25] ),
_mm256_xor_si256( qt[26], qt[27] ) ),
_mm256_xor_si256( _mm256_xor_si256( qt[28], qt[29] ),
_mm256_xor_si256( qt[30], qt[31] ) )));
mm256_xor4( qt[16], qt[17], qt[18], qt[19] ),
mm256_xor4( qt[20], qt[21], qt[22], qt[23] ) );
xh = _mm256_xor_si256( xl, _mm256_xor_si256(
mm256_xor4( qt[24], qt[25], qt[26], qt[27] ),
mm256_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
dH[ 0] = _mm256_add_epi32(
_mm256_xor_si256( M[0],

59
algo/bmw/bmw512-4way.c Normal file
View File

@@ -0,0 +1,59 @@
#include "bmw512-gate.h"
#ifdef BMW512_4WAY
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
//#include "sph_keccak.h"
#include "bmw-hash-4way.h"
void bmw512hash_4way(void *state, const void *input)
{
bmw512_4way_context ctx;
bmw512_4way_init( &ctx );
bmw512_4way( &ctx, input, 80 );
bmw512_4way_close( &ctx, state );
}
int scanhash_bmw512_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t hash[16*4] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[25]); // 3*8+1
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
// const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
bmw512hash_4way( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( ( ( hash7[ lane<<1 ] & 0xFFFFFF00 ) == 0 ) )
{
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 4;
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

20
algo/bmw/bmw512-gate.c Normal file
View File

@@ -0,0 +1,20 @@
#include "bmw512-gate.h"
int64_t bmw512_get_max64() { return 0x7ffffLL; }
bool register_bmw512_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
gate->get_max64 = (void*)&bmw512_get_max64;
#if defined (BMW512_4WAY)
gate->scanhash = (void*)&scanhash_bmw512_4way;
gate->hash = (void*)&bmw512hash_4way;
#else
gate->scanhash = (void*)&scanhash_bmw512;
gate->hash = (void*)&bmw512hash;
#endif
return true;
};

23
algo/bmw/bmw512-gate.h Normal file
View File

@@ -0,0 +1,23 @@
#ifndef BMW512_GATE_H__
#define BMW512_GATE_H__
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__)
#define BMW512_4WAY 1
#endif
#if defined(BMW512_4WAY)
void bmw512hash_4way( void *state, const void *input );
int scanhash_bmw512_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
void bmw512hash( void *state, const void *input );
int scanhash_bmw512( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif

View File

@@ -569,28 +569,20 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst )
#define sb0(x) \
_mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 1), \
_mm256_slli_epi64( (x), 3) ), \
_mm256_xor_si256( mm256_rol_64( (x), 4), \
mm256_rol_64( (x), 37) ) )
mm256_xor4( _mm256_srli_epi64( (x), 1), _mm256_slli_epi64( (x), 3), \
mm256_rol_64( (x), 4), mm256_rol_64( (x),37) )
#define sb1(x) \
_mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 1), \
_mm256_slli_epi64( (x), 2) ), \
_mm256_xor_si256( mm256_rol_64( (x), 13), \
mm256_rol_64( (x), 43) ) )
mm256_xor4( _mm256_srli_epi64( (x), 1), _mm256_slli_epi64( (x), 2), \
mm256_rol_64( (x),13), mm256_rol_64( (x),43) )
#define sb2(x) \
_mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 2), \
_mm256_slli_epi64( (x), 1) ), \
_mm256_xor_si256( mm256_rol_64( (x), 19), \
mm256_rol_64( (x), 53) ) )
mm256_xor4( _mm256_srli_epi64( (x), 2), _mm256_slli_epi64( (x), 1), \
mm256_rol_64( (x),19), mm256_rol_64( (x),53) )
#define sb3(x) \
_mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 2), \
_mm256_slli_epi64( (x), 2) ), \
_mm256_xor_si256( mm256_rol_64( (x), 28), \
mm256_rol_64( (x), 59) ) )
mm256_xor4( _mm256_srli_epi64( (x), 2), _mm256_slli_epi64( (x), 2), \
mm256_rol_64( (x),28), mm256_rol_64( (x),59) )
#define sb4(x) \
_mm256_xor_si256( (x), _mm256_srli_epi64( (x), 1 ) )
@@ -618,55 +610,32 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst )
rol_off_64( M, j, 10 ) ), \
_mm256_set1_epi64x( ( (j) + 16 ) * 0x0555555555555555ULL ) ), \
H[ ( (j)+7 ) & 0xF ] )
#define expand1b( qt, M, H, i ) \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( sb1( qt[ (i)-16 ] ), \
sb2( qt[ (i)-15 ] ) ), \
_mm256_add_epi64( sb3( qt[ (i)-14 ] ), \
sb0( qt[ (i)-13 ] ) ) ), \
_mm256_add_epi64( \
_mm256_add_epi64( sb1( qt[ (i)-12 ] ), \
sb2( qt[ (i)-11 ] ) ), \
_mm256_add_epi64( sb3( qt[ (i)-10 ] ), \
sb0( qt[ (i)- 9 ] ) ) ) ), \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( sb1( qt[ (i)- 8 ] ), \
sb2( qt[ (i)- 7 ] ) ), \
_mm256_add_epi64( sb3( qt[ (i)- 6 ] ), \
sb0( qt[ (i)- 5 ] ) ) ), \
_mm256_add_epi64( \
_mm256_add_epi64( sb1( qt[ (i)- 4 ] ), \
sb2( qt[ (i)- 3 ] ) ), \
_mm256_add_epi64( sb3( qt[ (i)- 2 ] ), \
sb0( qt[ (i)- 1 ] ) ) ) ) ), \
_mm256_add_epi64( mm256_add4_64( \
mm256_add4_64( sb1( qt[ (i)-16 ] ), sb2( qt[ (i)-15 ] ), \
sb3( qt[ (i)-14 ] ), sb0( qt[ (i)-13 ] )), \
mm256_add4_64( sb1( qt[ (i)-12 ] ), sb2( qt[ (i)-11 ] ), \
sb3( qt[ (i)-10 ] ), sb0( qt[ (i)- 9 ] )), \
mm256_add4_64( sb1( qt[ (i)- 8 ] ), sb2( qt[ (i)- 7 ] ), \
sb3( qt[ (i)- 6 ] ), sb0( qt[ (i)- 5 ] )), \
mm256_add4_64( sb1( qt[ (i)- 4 ] ), sb2( qt[ (i)- 3 ] ), \
sb3( qt[ (i)- 2 ] ), sb0( qt[ (i)- 1 ] ) ) ), \
add_elt_b( M, H, (i)-16 ) )
#define expand2b( qt, M, H, i) \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( qt[ (i)-16 ], rb1( qt[ (i)-15 ] ) ), \
_mm256_add_epi64( qt[ (i)-14 ], rb2( qt[ (i)-13 ] ) ) ), \
_mm256_add_epi64( \
_mm256_add_epi64( qt[ (i)-12 ], rb3( qt[ (i)-11 ] ) ), \
_mm256_add_epi64( qt[ (i)-10 ], rb4( qt[ (i)- 9 ] ) ) ) ), \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( qt[ (i)- 8 ], rb5( qt[ (i)- 7 ] ) ), \
_mm256_add_epi64( qt[ (i)- 6 ], rb6( qt[ (i)- 5 ] ) ) ), \
_mm256_add_epi64( \
_mm256_add_epi64( qt[ (i)- 4 ], rb7( qt[ (i)- 3 ] ) ), \
_mm256_add_epi64( sb4( qt[ (i)- 2 ] ), \
sb5( qt[ (i)- 1 ] ) ) ) ) ), \
_mm256_add_epi64( mm256_add4_64( \
mm256_add4_64( qt[ (i)-16 ], rb1( qt[ (i)-15 ] ), \
qt[ (i)-14 ], rb2( qt[ (i)-13 ] ) ), \
mm256_add4_64( qt[ (i)-12 ], rb3( qt[ (i)-11 ] ), \
qt[ (i)-10 ], rb4( qt[ (i)- 9 ] ) ), \
mm256_add4_64( qt[ (i)- 8 ], rb5( qt[ (i)- 7 ] ), \
qt[ (i)- 6 ], rb6( qt[ (i)- 5 ] ) ), \
mm256_add4_64( qt[ (i)- 4 ], rb7( qt[ (i)- 3 ] ), \
sb4( qt[ (i)- 2 ] ), sb5( qt[ (i)- 1 ] ) ) ), \
add_elt_b( M, H, (i)-16 ) )
#define Wb0 \
_mm256_add_epi64( \
_mm256_add_epi64( \
@@ -864,95 +833,90 @@ void compress_big( const __m256i *M, const __m256i H[16], __m256i dH[16] )
qt[30] = expand2b( qt, M, H, 30 );
qt[31] = expand2b( qt, M, H, 31 );
xl = _mm256_xor_si256(
_mm256_xor_si256( _mm256_xor_si256( qt[16], qt[17] ),
_mm256_xor_si256( qt[18], qt[19] ) ),
_mm256_xor_si256( _mm256_xor_si256( qt[20], qt[21] ),
_mm256_xor_si256( qt[22], qt[23] ) ) );
xh = _mm256_xor_si256( xl,
_mm256_xor_si256(
_mm256_xor_si256( _mm256_xor_si256( qt[24], qt[25] ),
_mm256_xor_si256( qt[26], qt[27] ) ),
_mm256_xor_si256( _mm256_xor_si256( qt[28], qt[29] ),
_mm256_xor_si256( qt[30], qt[31] ) )));
xl = _mm256_xor_si256(
mm256_xor4( qt[16], qt[17], qt[18], qt[19] ),
mm256_xor4( qt[20], qt[21], qt[22], qt[23] ) );
xh = _mm256_xor_si256( xl, _mm256_xor_si256(
mm256_xor4( qt[24], qt[25], qt[26], qt[27] ),
mm256_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
dH[ 0] = _mm256_add_epi64(
_mm256_xor_si256( M[0],
_mm256_xor_si256( _mm256_slli_epi64( xh, 5 ),
_mm256_srli_epi64( qt[16], 5 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[24] ), qt[ 0] ));
_mm256_xor_si256( M[0],
_mm256_xor_si256( _mm256_slli_epi64( xh, 5 ),
_mm256_srli_epi64( qt[16], 5 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[24] ), qt[ 0] ) );
dH[ 1] = _mm256_add_epi64(
_mm256_xor_si256( M[1],
_mm256_xor_si256( _mm256_srli_epi64( xh, 7 ),
_mm256_slli_epi64( qt[17], 8 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[25] ), qt[ 1] ));
_mm256_xor_si256( M[1],
_mm256_xor_si256( _mm256_srli_epi64( xh, 7 ),
_mm256_slli_epi64( qt[17], 8 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[25] ), qt[ 1] ) );
dH[ 2] = _mm256_add_epi64(
_mm256_xor_si256( M[2],
_mm256_xor_si256( _mm256_srli_epi64( xh, 5 ),
_mm256_slli_epi64( qt[18], 5 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[26] ), qt[ 2] ));
_mm256_xor_si256( M[2],
_mm256_xor_si256( _mm256_srli_epi64( xh, 5 ),
_mm256_slli_epi64( qt[18], 5 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[26] ), qt[ 2] ) );
dH[ 3] = _mm256_add_epi64(
_mm256_xor_si256( M[3],
_mm256_xor_si256( _mm256_srli_epi64( xh, 1 ),
_mm256_slli_epi64( qt[19], 5 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[27] ), qt[ 3] ));
_mm256_xor_si256( M[3],
_mm256_xor_si256( _mm256_srli_epi64( xh, 1 ),
_mm256_slli_epi64( qt[19], 5 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[27] ), qt[ 3] ) );
dH[ 4] = _mm256_add_epi64(
_mm256_xor_si256( M[4],
_mm256_xor_si256( _mm256_srli_epi64( xh, 3 ),
_mm256_slli_epi64( qt[20], 0 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[28] ), qt[ 4] ));
_mm256_xor_si256( M[4],
_mm256_xor_si256( _mm256_srli_epi64( xh, 3 ),
_mm256_slli_epi64( qt[20], 0 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[28] ), qt[ 4] ) );
dH[ 5] = _mm256_add_epi64(
_mm256_xor_si256( M[5],
_mm256_xor_si256( _mm256_slli_epi64( xh, 6 ),
_mm256_srli_epi64( qt[21], 6 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[29] ), qt[ 5] ));
_mm256_xor_si256( M[5],
_mm256_xor_si256( _mm256_slli_epi64( xh, 6 ),
_mm256_srli_epi64( qt[21], 6 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[29] ), qt[ 5] ) );
dH[ 6] = _mm256_add_epi64(
_mm256_xor_si256( M[6],
_mm256_xor_si256( _mm256_srli_epi64( xh, 4 ),
_mm256_slli_epi64( qt[22], 6 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[30] ), qt[ 6] ));
_mm256_xor_si256( M[6],
_mm256_xor_si256( _mm256_srli_epi64( xh, 4 ),
_mm256_slli_epi64( qt[22], 6 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[30] ), qt[ 6] ) );
dH[ 7] = _mm256_add_epi64(
_mm256_xor_si256( M[7],
_mm256_xor_si256( _mm256_srli_epi64( xh, 11 ),
_mm256_slli_epi64( qt[23], 2 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[31] ), qt[ 7] ));
_mm256_xor_si256( M[7],
_mm256_xor_si256( _mm256_srli_epi64( xh, 11 ),
_mm256_slli_epi64( qt[23], 2 ) ) ),
_mm256_xor_si256( _mm256_xor_si256( xl, qt[31] ), qt[ 7] ) );
dH[ 8] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[4], 9 ),
mm256_rol_64( dH[4], 9 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[24] ), M[ 8] )),
_mm256_xor_si256( _mm256_slli_epi64( xl, 8 ),
_mm256_xor_si256( qt[23], qt[ 8] ) ) );
dH[ 9] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[5], 10 ),
mm256_rol_64( dH[5], 10 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[25] ), M[ 9] )),
_mm256_xor_si256( _mm256_srli_epi64( xl, 6 ),
_mm256_xor_si256( qt[16], qt[ 9] ) ) );
dH[10] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[6], 11 ),
mm256_rol_64( dH[6], 11 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[26] ), M[10] )),
_mm256_xor_si256( _mm256_slli_epi64( xl, 6 ),
_mm256_xor_si256( qt[17], qt[10] ) ) );
dH[11] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[7], 12 ),
mm256_rol_64( dH[7], 12 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[27] ), M[11] )),
_mm256_xor_si256( _mm256_slli_epi64( xl, 4 ),
_mm256_xor_si256( qt[18], qt[11] ) ) );
dH[12] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[0], 13 ),
mm256_rol_64( dH[0], 13 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[28] ), M[12] )),
_mm256_xor_si256( _mm256_srli_epi64( xl, 3 ),
_mm256_xor_si256( qt[19], qt[12] ) ) );
dH[13] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[1], 14 ),
mm256_rol_64( dH[1], 14 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[29] ), M[13] )),
_mm256_xor_si256( _mm256_srli_epi64( xl, 4 ),
_mm256_xor_si256( qt[20], qt[13] ) ) );
dH[14] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[2], 15 ),
mm256_rol_64( dH[2], 15 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[30] ), M[14] )),
_mm256_xor_si256( _mm256_srli_epi64( xl, 7 ),
_mm256_xor_si256( qt[21], qt[14] ) ) );
dH[15] = _mm256_add_epi64( _mm256_add_epi64(
mm256_rol_64( dH[3], 16 ),
mm256_rol_64( dH[3], 16 ),
_mm256_xor_si256( _mm256_xor_si256( xh, qt[31] ), M[15] )),
_mm256_xor_si256( _mm256_srli_epi64( xl, 2 ),
_mm256_xor_si256( qt[22], qt[15] ) ) );

53
algo/bmw/bmw512.c Normal file
View File

@@ -0,0 +1,53 @@
#include "algo-gate-api.h"
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include "sph_bmw.h"
void bmw512hash(void *state, const void *input)
{
sph_bmw512_context ctx;
uint32_t hash[32];
sph_bmw512_init( &ctx );
sph_bmw512( &ctx,input, 80 );
sph_bmw512_close( &ctx, hash );
memcpy( state, hash, 32 );
}
int scanhash_bmw512( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
//const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t _ALIGN(32) hash64[8];
uint32_t endiandata[32];
for (int i=0; i < 19; i++)
be32enc(&endiandata[i], pdata[i]);
do {
pdata[19] = ++n;
be32enc(&endiandata[19], n);
bmw512hash(hash64, endiandata);
if (((hash64[7]&0xFFFFFF00)==0) &&
fulltest(hash64, ptarget)) {
*hashes_done = n - first_nonce + 1;
return true;
}
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}

View File

@@ -242,6 +242,8 @@ void cryptolight_hash(void* output, const void* input, int len) {
free(ctx);
}
#if defined(__AES__)
static void cryptolight_hash_ctx_aes_ni(void* output, const void* input,
int len, struct cryptonight_ctx* ctx)
{
@@ -312,6 +314,8 @@ static void cryptolight_hash_ctx_aes_ni(void* output, const void* input,
oaes_free((OAES_CTX **) &ctx->aes_ctx);
}
#endif
int scanhash_cryptolight( struct work *work,
uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr)
{

View File

@@ -7,6 +7,7 @@
// 2x128
/*
// The result of hashing 10 rounds of initial data which consists of params
// zero padded.
static const uint64_t IV256[] =
@@ -24,13 +25,14 @@ static const uint64_t IV512[] =
0x148FE485FCD398D9, 0xB64445321B017BEF, 0x2FF5781C6A536159, 0x0DBADEA991FA7934,
0xA5A70E75D65C8A2B, 0xBC796576B1C62456, 0xE7989AF11921C8F7, 0xD43E3B447795D246
};
*/
static void transform_2way( cube_2way_context *sp )
{
int r;
const int rounds = sp->rounds;
__m256i x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3;
__m256i x0, x1, x2, x3, x4, x5, x6, x7, y0, y1;
x0 = _mm256_load_si256( (__m256i*)sp->h );
x1 = _mm256_load_si256( (__m256i*)sp->h + 1 );
@@ -47,18 +49,12 @@ static void transform_2way( cube_2way_context *sp )
x5 = _mm256_add_epi32( x1, x5 );
x6 = _mm256_add_epi32( x2, x6 );
x7 = _mm256_add_epi32( x3, x7 );
y0 = x2;
y1 = x3;
y2 = x0;
y3 = x1;
x0 = _mm256_xor_si256( _mm256_slli_epi32( y0, 7 ),
_mm256_srli_epi32( y0, 25 ) );
x1 = _mm256_xor_si256( _mm256_slli_epi32( y1, 7 ),
_mm256_srli_epi32( y1, 25 ) );
x2 = _mm256_xor_si256( _mm256_slli_epi32( y2, 7 ),
_mm256_srli_epi32( y2, 25 ) );
x3 = _mm256_xor_si256( _mm256_slli_epi32( y3, 7 ),
_mm256_srli_epi32( y3, 25 ) );
y0 = x0;
y1 = x1;
x0 = mm256_rol_32( x2, 7 );
x1 = mm256_rol_32( x3, 7 );
x2 = mm256_rol_32( y0, 7 );
x3 = mm256_rol_32( y1, 7 );
x0 = _mm256_xor_si256( x0, x4 );
x1 = _mm256_xor_si256( x1, x5 );
x2 = _mm256_xor_si256( x2, x6 );
@@ -71,18 +67,12 @@ static void transform_2way( cube_2way_context *sp )
x5 = _mm256_add_epi32( x1, x5 );
x6 = _mm256_add_epi32( x2, x6 );
x7 = _mm256_add_epi32( x3, x7 );
y0 = x1;
y1 = x0;
y2 = x3;
y3 = x2;
x0 = _mm256_xor_si256( _mm256_slli_epi32( y0, 11 ),
_mm256_srli_epi32( y0, 21 ) );
x1 = _mm256_xor_si256( _mm256_slli_epi32( y1, 11 ),
_mm256_srli_epi32( y1, 21 ) );
x2 = _mm256_xor_si256( _mm256_slli_epi32( y2, 11 ),
_mm256_srli_epi32( y2, 21 ) );
x3 = _mm256_xor_si256( _mm256_slli_epi32( y3, 11 ),
_mm256_srli_epi32( y3, 21 ) );
y0 = x0;
y1 = x2;
x0 = mm256_rol_32( x1, 11 );
x1 = mm256_rol_32( y0, 11 );
x2 = mm256_rol_32( x3, 11 );
x3 = mm256_rol_32( y1, 11 );
x0 = _mm256_xor_si256( x0, x4 );
x1 = _mm256_xor_si256( x1, x5 );
x2 = _mm256_xor_si256( x2, x6 );
@@ -107,23 +97,40 @@ static void transform_2way( cube_2way_context *sp )
int cube_2way_init( cube_2way_context *sp, int hashbitlen, int rounds,
int blockbytes )
{
const uint64_t* iv = hashbitlen == 512 ? IV512 : IV256;
__m128i* h = (__m128i*)sp->h;
sp->hashlen = hashbitlen/128;
sp->blocksize = blockbytes/16;
sp->rounds = rounds;
sp->pos = 0;
__m256i* h = (__m256i*)sp->h;
h[0] = _mm256_set_epi64x( iv[ 1], iv[ 0], iv[ 1], iv[ 0] );
h[1] = _mm256_set_epi64x( iv[ 3], iv[ 2], iv[ 3], iv[ 2] );
h[2] = _mm256_set_epi64x( iv[ 5], iv[ 4], iv[ 5], iv[ 4] );
h[3] = _mm256_set_epi64x( iv[ 7], iv[ 6], iv[ 7], iv[ 6] );
h[4] = _mm256_set_epi64x( iv[ 9], iv[ 8], iv[ 9], iv[ 8] );
h[5] = _mm256_set_epi64x( iv[11], iv[10], iv[11], iv[10] );
h[6] = _mm256_set_epi64x( iv[13], iv[12], iv[13], iv[12] );
h[7] = _mm256_set_epi64x( iv[15], iv[14], iv[15], iv[14] );
if ( hashbitlen == 512 )
{
h[ 0] = m128_const_64( 0x4167D83E2D538B8B, 0x50F494D42AEA2A61 );
h[ 2] = m128_const_64( 0x50AC5695CC39968E, 0xC701CF8C3FEE2313 );
h[ 4] = m128_const_64( 0x825B453797CF0BEF, 0xA647A8B34D42C787 );
h[ 6] = m128_const_64( 0xA23911AED0E5CD33, 0xF22090C4EEF864D2 );
h[ 8] = m128_const_64( 0xB64445321B017BEF, 0x148FE485FCD398D9 );
h[10] = m128_const_64( 0x0DBADEA991FA7934, 0x2FF5781C6A536159 );
h[12] = m128_const_64( 0xBC796576B1C62456, 0xA5A70E75D65C8A2B );
h[14] = m128_const_64( 0xD43E3B447795D246, 0xE7989AF11921C8F7 );
h[1] = h[ 0]; h[ 3] = h[ 2]; h[ 5] = h[ 4]; h[ 7] = h[ 6];
h[9] = h[ 8]; h[11] = h[10]; h[13] = h[12]; h[15] = h[14];
}
else
{
h[ 0] = m128_const_64( 0x35481EAE63117E71, 0xCCD6F29FEA2BD4B4 );
h[ 2] = m128_const_64( 0xF4CC12BE7E624131, 0xE5D94E6322512D5B );
h[ 4] = m128_const_64( 0x3361DA8CD0720C35, 0x42AF2070C2D0B696 );
h[ 6] = m128_const_64( 0x40E5FBAB4680AC00, 0x8EF8AD8328CCECA4 );
h[ 8] = m128_const_64( 0xF0B266796C859D41, 0x6107FBD5D89041C3 );
h[10] = m128_const_64( 0x93CB628565C892FD, 0x5FA2560309392549 );
h[12] = m128_const_64( 0x85254725774ABFDD, 0x9E4B4E602AF2B5AE );
h[14] = m128_const_64( 0xD6032C0A9CDAF8AF, 0x4AB6AAD615815AEB );
h[1] = h[ 0]; h[ 3] = h[ 2]; h[ 5] = h[ 4]; h[ 7] = h[ 6];
h[9] = h[ 8]; h[11] = h[10]; h[13] = h[12]; h[15] = h[14];
}
return 0;
}
@@ -165,7 +172,7 @@ int cube_2way_close( cube_2way_context *sp, void *output )
for ( i = 0; i < 10; ++i ) transform_2way( sp );
for ( i = 0; i < sp->hashlen; i++ ) hash[i] = sp->h[i];
memcpy( hash, sp->h, sp->hashlen<<5 );
return 0;
}
@@ -198,7 +205,7 @@ int cube_2way_update_close( cube_2way_context *sp, void *output,
for ( i = 0; i < 10; ++i ) transform_2way( sp );
for ( i = 0; i < sp->hashlen; i++ ) hash[i] = sp->h[i];
memcpy( hash, sp->h, sp->hashlen<<5 );
return 0;
}

View File

@@ -16,24 +16,6 @@
#include "simd-utils.h"
#include <stdio.h>
// The result of hashing 10 rounds of initial data which is params and
// mostly zeros.
static const uint64_t IV256[] =
{
0xCCD6F29FEA2BD4B4, 0x35481EAE63117E71, 0xE5D94E6322512D5B, 0xF4CC12BE7E624131,
0x42AF2070C2D0B696, 0x3361DA8CD0720C35, 0x8EF8AD8328CCECA4, 0x40E5FBAB4680AC00,
0x6107FBD5D89041C3, 0xF0B266796C859D41, 0x5FA2560309392549, 0x93CB628565C892FD,
0x9E4B4E602AF2B5AE, 0x85254725774ABFDD, 0x4AB6AAD615815AEB, 0xD6032C0A9CDAF8AF
};
static const uint64_t IV512[] =
{
0x50F494D42AEA2A61, 0x4167D83E2D538B8B, 0xC701CF8C3FEE2313, 0x50AC5695CC39968E,
0xA647A8B34D42C787, 0x825B453797CF0BEF, 0xF22090C4EEF864D2, 0xA23911AED0E5CD33,
0x148FE485FCD398D9, 0xB64445321B017BEF, 0x2FF5781C6A536159, 0x0DBADEA991FA7934,
0xA5A70E75D65C8A2B, 0xBC796576B1C62456, 0xE7989AF11921C8F7, 0xD43E3B447795D246
};
static void transform( cubehashParam *sp )
{
int r;
@@ -53,26 +35,22 @@ static void transform( cubehashParam *sp )
x2 = _mm256_add_epi32( x0, x2 );
x3 = _mm256_add_epi32( x1, x3 );
y0 = x0;
x0 = _mm256_xor_si256( _mm256_slli_epi32( x1, 7 ),
_mm256_srli_epi32( x1, 25 ) );
x1 = _mm256_xor_si256( _mm256_slli_epi32( y0, 7 ),
_mm256_srli_epi32( y0, 25 ) );
x0 = mm256_rol_32( x1, 7 );
x1 = mm256_rol_32( y0, 7 );
x0 = _mm256_xor_si256( x0, x2 );
x1 = _mm256_xor_si256( x1, x3 );
x2 = _mm256_shuffle_epi32( x2, 0x4e );
x3 = _mm256_shuffle_epi32( x3, 0x4e );
x2 = mm256_swap64_128( x2 );
x3 = mm256_swap64_128( x3 );
x2 = _mm256_add_epi32( x0, x2 );
x3 = _mm256_add_epi32( x1, x3 );
y0 = _mm256_permute4x64_epi64( x0, 0x4e );
y1 = _mm256_permute4x64_epi64( x1, 0x4e );
x0 = _mm256_xor_si256( _mm256_slli_epi32( y0, 11 ),
_mm256_srli_epi32( y0, 21 ) );
x1 = _mm256_xor_si256( _mm256_slli_epi32( y1, 11 ),
_mm256_srli_epi32( y1, 21 ) );
y0 = mm256_swap_128( x0 );
y1 = mm256_swap_128( x1 );
x0 = mm256_rol_32( y0, 11 );
x1 = mm256_rol_32( y1, 11 );
x0 = _mm256_xor_si256( x0, x2 );
x1 = _mm256_xor_si256( x1, x3 );
x2 = _mm256_shuffle_epi32( x2, 0xb1 );
x3 = _mm256_shuffle_epi32( x3, 0xb1 );
x2 = mm256_swap32_64( x2 );
x3 = mm256_swap32_64( x3 );
}
_mm256_store_si256( (__m256i*)sp->x, x0 );
@@ -147,37 +125,58 @@ static void transform( cubehashParam *sp )
#endif
} // transform
/*
// The result of hashing 10 rounds of initial data which is params and
// mostly zeros.
static const uint64_t IV256[] =
{
0xCCD6F29FEA2BD4B4, 0x35481EAE63117E71, 0xE5D94E6322512D5B, 0xF4CC12BE7E624131,
0x42AF2070C2D0B696, 0x3361DA8CD0720C35, 0x8EF8AD8328CCECA4, 0x40E5FBAB4680AC00,
0x6107FBD5D89041C3, 0xF0B266796C859D41, 0x5FA2560309392549, 0x93CB628565C892FD,
0x9E4B4E602AF2B5AE, 0x85254725774ABFDD, 0x4AB6AAD615815AEB, 0xD6032C0A9CDAF8AF
};
static const uint64_t IV512[] =
{
0x50F494D42AEA2A61, 0x4167D83E2D538B8B, 0xC701CF8C3FEE2313, 0x50AC5695CC39968E,
0xA647A8B34D42C787, 0x825B453797CF0BEF, 0xF22090C4EEF864D2, 0xA23911AED0E5CD33,
0x148FE485FCD398D9, 0xB64445321B017BEF, 0x2FF5781C6A536159, 0x0DBADEA991FA7934,
0xA5A70E75D65C8A2B, 0xBC796576B1C62456, 0xE7989AF11921C8F7, 0xD43E3B447795D246
};
*/
int cubehashInit(cubehashParam *sp, int hashbitlen, int rounds, int blockbytes)
{
const uint64_t* iv = hashbitlen == 512 ? IV512 : IV256;
__m128i *x = (__m128i*)sp->x;
sp->hashlen = hashbitlen/128;
sp->blocksize = blockbytes/16;
sp->rounds = rounds;
sp->pos = 0;
#if defined(__AVX2__)
__m256i* x = (__m256i*)sp->x;
if ( hashbitlen == 512 )
{
x[0] = _mm256_set_epi64x( iv[ 3], iv[ 2], iv[ 1], iv[ 0] );
x[1] = _mm256_set_epi64x( iv[ 7], iv[ 6], iv[ 5], iv[ 4] );
x[2] = _mm256_set_epi64x( iv[11], iv[10], iv[ 9], iv[ 8] );
x[3] = _mm256_set_epi64x( iv[15], iv[14], iv[13], iv[12] );
x[0] = m128_const_64( 0x4167D83E2D538B8B, 0x50F494D42AEA2A61 );
x[1] = m128_const_64( 0x50AC5695CC39968E, 0xC701CF8C3FEE2313 );
x[2] = m128_const_64( 0x825B453797CF0BEF, 0xA647A8B34D42C787 );
x[3] = m128_const_64( 0xA23911AED0E5CD33, 0xF22090C4EEF864D2 );
x[4] = m128_const_64( 0xB64445321B017BEF, 0x148FE485FCD398D9 );
x[5] = m128_const_64( 0x0DBADEA991FA7934, 0x2FF5781C6A536159 );
x[6] = m128_const_64( 0xBC796576B1C62456, 0xA5A70E75D65C8A2B );
x[7] = m128_const_64( 0xD43E3B447795D246, 0xE7989AF11921C8F7 );
}
else
{
x[0] = m128_const_64( 0x35481EAE63117E71, 0xCCD6F29FEA2BD4B4 );
x[1] = m128_const_64( 0xF4CC12BE7E624131, 0xE5D94E6322512D5B );
x[2] = m128_const_64( 0x3361DA8CD0720C35, 0x42AF2070C2D0B696 );
x[3] = m128_const_64( 0x40E5FBAB4680AC00, 0x8EF8AD8328CCECA4 );
x[4] = m128_const_64( 0xF0B266796C859D41, 0x6107FBD5D89041C3 );
x[5] = m128_const_64( 0x93CB628565C892FD, 0x5FA2560309392549 );
x[6] = m128_const_64( 0x85254725774ABFDD, 0x9E4B4E602AF2B5AE );
x[7] = m128_const_64( 0xD6032C0A9CDAF8AF, 0x4AB6AAD615815AEB );
}
#else
__m128i* x = (__m128i*)sp->x;
x[0] = _mm_set_epi64x( iv[ 1], iv[ 0] );
x[1] = _mm_set_epi64x( iv[ 3], iv[ 2] );
x[2] = _mm_set_epi64x( iv[ 5], iv[ 4] );
x[3] = _mm_set_epi64x( iv[ 7], iv[ 6] );
x[4] = _mm_set_epi64x( iv[ 9], iv[ 8] );
x[5] = _mm_set_epi64x( iv[11], iv[10] );
x[6] = _mm_set_epi64x( iv[13], iv[12] );
x[7] = _mm_set_epi64x( iv[15], iv[14] );
#endif
return SUCCESS;
}

View File

@@ -47,10 +47,6 @@ void myriad_4way_hash( void *output, const void *input )
sha256_4way( &ctx.sha, vhash, 64 );
sha256_4way_close( &ctx.sha, output );
// sha256_4way_close( &ctx.sha, vhash );
// mm128_dintrlv_4x32( output, output+32, output+64, output+96,
// vhash, 256 );
}
int scanhash_myriad_4way( struct work *work, uint32_t max_nonce,
@@ -68,18 +64,10 @@ int scanhash_myriad_4way( struct work *work, uint32_t max_nonce,
__m128i *noncev = (__m128i*)vdata + 19; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
/*
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t _ALIGN(64) endiandata[20];
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
*/
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
do {
*noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) );

View File

@@ -531,16 +531,17 @@ static const sph_u32 T512[64][16] = {
#define INPUT_BIG \
do { \
const __m256i zero = _mm256_setzero_si256(); \
__m256i db = *buf; \
const sph_u32 *tp = &T512[0][0]; \
m0 = m256_zero; \
m1 = m256_zero; \
m2 = m256_zero; \
m3 = m256_zero; \
m4 = m256_zero; \
m5 = m256_zero; \
m6 = m256_zero; \
m7 = m256_zero; \
m0 = zero; \
m1 = zero; \
m2 = zero; \
m3 = zero; \
m4 = zero; \
m5 = zero; \
m6 = zero; \
m7 = zero; \
for ( int u = 0; u < 64; u++ ) \
{ \
__m256i dm = _mm256_and_si256( db, m256_one_64 ) ; \
@@ -913,9 +914,7 @@ void hamsi512_4way( hamsi_4way_big_context *sc, const void *data, size_t len )
void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
{
__m256i *out = (__m256i*)dst;
__m256i pad[1];
size_t u;
int ch, cl;
sph_enc32be( &ch, sc->count_high );
@@ -925,8 +924,8 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
0UL, 0x80UL, 0UL, 0x80UL );
hamsi_big( sc, sc->buf, 1 );
hamsi_big_final( sc, pad );
for ( u = 0; u < 8; u ++ )
out[u] = mm256_bswap_32( sc->h[u] );
mm256_block_bswap_32( (__m256i*)dst, sc->h );
}
#ifdef __cplusplus

View File

@@ -83,7 +83,7 @@ void ExpandAESKey256(__m128i *keys, const __m128i *KeyBuf)
keys[14] = tmp1;
}
#ifdef __SSE4_2__
#if defined(__SSE4_2__)
//#ifdef __AVX__
#define AESENC(i,j) \
@@ -151,7 +151,7 @@ void AES256CBC(__m128i** data, const __m128i** next, __m128i ExpandedKey[][16],
}
}
#else // NO SSE4.2
#else // NO AVX
static inline __m128i AES256Core(__m128i State, const __m128i *ExpandedKey)
{

View File

@@ -166,7 +166,7 @@ bool register_hodl_algo( algo_gate_t* gate )
// return false;
// }
pthread_barrier_init( &hodl_barrier, NULL, opt_n_threads );
gate->optimizations = AES_OPT | SSE42_OPT | AVX2_OPT;
gate->optimizations = AES_OPT | AVX_OPT | AVX2_OPT;
gate->scanhash = (void*)&hodl_scanhash;
gate->get_new_work = (void*)&hodl_get_new_work;
gate->longpoll_rpc_call = (void*)&hodl_longpoll_rpc_call;

View File

@@ -17,7 +17,7 @@ void GenerateGarbageCore( CacheEntry *Garbage, int ThreadID, int ThreadCount,
const uint32_t StartChunk = ThreadID * Chunk;
const uint32_t EndChunk = StartChunk + Chunk;
#ifdef __SSE4_2__
#if defined(__SSE4_2__)
//#ifdef __AVX__
uint64_t* TempBufs[ SHA512_PARALLEL_N ] ;
uint64_t* desination[ SHA512_PARALLEL_N ];
@@ -64,7 +64,7 @@ void Rev256(uint32_t *Dest, const uint32_t *Src)
int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
#ifdef __SSE4_2__
#if defined(__SSE4_2__)
//#ifdef __AVX__
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
@@ -140,7 +140,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
return(0);
#else // no SSE4.2
#else // no AVX
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
@@ -148,6 +148,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
CacheEntry *Garbage = (CacheEntry*)hodl_scratchbuf;
CacheEntry Cache;
uint32_t CollisionCount = 0;
int threadNumber = mythr->id;
swab32_array( BlockHdr, pdata, 20 );
// Search for pattern in psuedorandom data
@@ -205,7 +206,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
*hashes_done = CollisionCount;
return(0);
#endif // SSE4.2 else
#endif // AVX else
}

View File

@@ -23,6 +23,7 @@ typedef struct
__m256i h[8];
__m256i w[80];
#elif defined(__SSE4_2__)
//#elif defined(__AVX__)
__m128i h[8];
__m128i w[80];
#else
@@ -32,7 +33,8 @@ typedef struct
#ifdef __AVX2__
#define SHA512_PARALLEL_N 8
#elif defined(__SSE$_2__)
#elif defined(__SSE4_2__)
//#elif defined(__AVX__)
#define SHA512_PARALLEL_N 4
#else
#define SHA512_PARALLEL_N 1 // dummy value

View File

@@ -1,6 +1,6 @@
#ifndef __AVX2__
#ifdef __SSE4_2__
#if defined(__SSE4_2__)
//#ifdef __AVX__
//Dependencies

View File

@@ -6,7 +6,7 @@
void ExpandAESKey256(__m128i *keys, const __m128i *KeyBuf);
#ifdef __SSE4_2__
#if defined(__SSE4_2__)
//#ifdef __AVX__
#define AES_PARALLEL_N 8

View File

@@ -3,7 +3,6 @@
#include <stdint.h>
#include <string.h>
#include <stdio.h>
//#include "avxdefs.h"
#if defined(JHA_4WAY)
@@ -13,9 +12,6 @@
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
//static __thread keccak512_4way_context jha_kec_mid
// __attribute__ ((aligned (64)));
void jha_hash_4way( void *out, const void *input )
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
@@ -46,7 +42,7 @@ void jha_hash_4way( void *out, const void *input )
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256(
vh[0], _mm256_set1_epi64x( 1 ) ), m256_zero );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash0,
(char*)hash0, 512 );
@@ -59,7 +55,7 @@ void jha_hash_4way( void *out, const void *input )
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash3,
(char*)hash3, 512 );
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
skein512_4way_init( &ctx_skein );
skein512_4way( &ctx_skein, vhash, 64 );
@@ -79,8 +75,6 @@ void jha_hash_4way( void *out, const void *input )
for ( int i = 0; i < 8; i++ )
casti_m256i( out, i ) = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask );
}
// mm256_dintrlv_4x64( out, out+32, out+64, out+96, vhash, 256 );
}
int scanhash_jha_4way( struct work *work, uint32_t max_nonce,
@@ -115,12 +109,7 @@ int scanhash_jha_4way( struct work *work, uint32_t max_nonce,
0
};
// for ( int i=0; i < 19; i++ )
// be32enc( &endiandata[i], pdata[i] );
// uint64_t *edata = (uint64_t*)endiandata;
// mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m = 0; m < 6; m++ )
{
@@ -130,26 +119,17 @@ int scanhash_jha_4way( struct work *work, uint32_t max_nonce,
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
// be32enc( noncep, n );
// be32enc( noncep+2, n+1 );
// be32enc( noncep+4, n+2 );
// be32enc( noncep+6, n+3 );
jha_hash_4way( hash, vdata );
pdata[19] = n;
// for ( int i = 0; i < 4; i++ )
// if ( ( !( (hash+(i<<3))[7] & mask ) == 0 )
// && fulltest( hash+(i<<3), ptarget ) )
for ( int i = 0; i < 4; i++ ) if ( !( (hash7[i] & mask ) == 0 ) )
{
mm256_extr_lane_4x64( lane_hash, hash, i, 256 );
extr_lane_4x64( lane_hash, hash, i, 256 );
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, lane_hash, mythr, i );
// nonces[ num_found++ ] = n+i;
// work_set_target_ratio( work, hash+(i<<3) );
}
}
n += 4;

View File

@@ -20,7 +20,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (32)));
uint32_t hash[16*4] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[25]); // 3*8+1
uint32_t *pdata = work->data;
@@ -31,7 +31,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce,
// const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
@@ -41,7 +41,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce,
for ( int lane = 0; lane < 4; lane++ )
if ( ( ( hash7[ lane<<1 ] & 0xFFFFFF00 ) == 0 ) )
{
mm256_extr_lane_4x64( lane_hash, hash, lane, 256 );
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) )
{
pdata[19] = n + lane;

View File

@@ -77,6 +77,24 @@ static const sph_u32 V_INIT[5][8] = {
}
};
#if SPH_LUFFA_PARALLEL
static const sph_u64 RCW010[8] = {
SPH_C64(0xb6de10ed303994a6), SPH_C64(0x70f47aaec0e65299),
SPH_C64(0x0707a3d46cc33a12), SPH_C64(0x1c1e8f51dc56983e),
SPH_C64(0x707a3d451e00108f), SPH_C64(0xaeb285627800423d),
SPH_C64(0xbaca15898f5b7882), SPH_C64(0x40a46f3e96e1db12)
};
static const sph_u64 RCW014[8] = {
SPH_C64(0x01685f3de0337818), SPH_C64(0x05a17cf4441ba90d),
SPH_C64(0xbd09caca7f34d442), SPH_C64(0xf4272b289389217f),
SPH_C64(0x144ae5cce5a8bce6), SPH_C64(0xfaa7ae2b5274baf4),
SPH_C64(0x2e48f1c126889ba7), SPH_C64(0xb923c7049a226e9d)
};
#else
static const sph_u32 RC00[8] = {
SPH_C32(0x303994a6), SPH_C32(0xc0e65299),
SPH_C32(0x6cc33a12), SPH_C32(0xdc56983e),
@@ -105,20 +123,18 @@ static const sph_u32 RC14[8] = {
SPH_C32(0x2e48f1c1), SPH_C32(0xb923c704)
};
#if SPH_LUFFA_PARALLEL
static const sph_u64 RCW010[8] = {
SPH_C64(0xb6de10ed303994a6), SPH_C64(0x70f47aaec0e65299),
SPH_C64(0x0707a3d46cc33a12), SPH_C64(0x1c1e8f51dc56983e),
SPH_C64(0x707a3d451e00108f), SPH_C64(0xaeb285627800423d),
SPH_C64(0xbaca15898f5b7882), SPH_C64(0x40a46f3e96e1db12)
static const sph_u32 RC30[8] = {
SPH_C32(0xb213afa5), SPH_C32(0xc84ebe95),
SPH_C32(0x4e608a22), SPH_C32(0x56d858fe),
SPH_C32(0x343b138f), SPH_C32(0xd0ec4e3d),
SPH_C32(0x2ceb4882), SPH_C32(0xb3ad2208)
};
static const sph_u64 RCW014[8] = {
SPH_C64(0x01685f3de0337818), SPH_C64(0x05a17cf4441ba90d),
SPH_C64(0xbd09caca7f34d442), SPH_C64(0xf4272b289389217f),
SPH_C64(0x144ae5cce5a8bce6), SPH_C64(0xfaa7ae2b5274baf4),
SPH_C64(0x2e48f1c126889ba7), SPH_C64(0xb923c7049a226e9d)
static const sph_u32 RC34[8] = {
SPH_C32(0xe028c9bf), SPH_C32(0x44756f91),
SPH_C32(0x7e8fce32), SPH_C32(0x956548be),
SPH_C32(0xfe191be2), SPH_C32(0x3cb226e5),
SPH_C32(0x5944a28e), SPH_C32(0xa1c4c355)
};
#endif
@@ -137,19 +153,6 @@ static const sph_u32 RC24[8] = {
SPH_C32(0x36eda57f), SPH_C32(0x703aace7)
};
static const sph_u32 RC30[8] = {
SPH_C32(0xb213afa5), SPH_C32(0xc84ebe95),
SPH_C32(0x4e608a22), SPH_C32(0x56d858fe),
SPH_C32(0x343b138f), SPH_C32(0xd0ec4e3d),
SPH_C32(0x2ceb4882), SPH_C32(0xb3ad2208)
};
static const sph_u32 RC34[8] = {
SPH_C32(0xe028c9bf), SPH_C32(0x44756f91),
SPH_C32(0x7e8fce32), SPH_C32(0x956548be),
SPH_C32(0xfe191be2), SPH_C32(0x3cb226e5),
SPH_C32(0x5944a28e), SPH_C32(0xa1c4c355)
};
#if SPH_LUFFA_PARALLEL

View File

@@ -44,11 +44,11 @@ void allium_4way_hash( void *state, const void *input )
blake256_4way( &ctx.blake, input + (64<<2), 16 );
blake256_4way_close( &ctx.blake, vhash32 );
mm256_rintrlv_4x32_4x64( vhash64, vhash32, 256 );
rintrlv_4x32_4x64( vhash64, vhash32, 256 );
keccak256_4way( &ctx.keccak, vhash64, 32 );
keccak256_4way_close( &ctx.keccak, vhash64 );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 );
LYRA2RE( hash1, 32, hash1, 32, hash1, 32, 1, 8, 8 );
@@ -68,12 +68,12 @@ void allium_4way_hash( void *state, const void *input )
LYRA2RE( hash2, 32, hash2, 32, hash2, 32, 1, 8, 8 );
LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 );
mm256_intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
skein256_4way( &ctx.skein, vhash64, 32 );
skein256_4way_close( &ctx.skein, vhash64 );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
update_and_final_groestl256( &ctx.groestl, state, hash0, 256 );
memcpy( &ctx.groestl, &allium_4way_ctx.groestl,
@@ -103,7 +103,7 @@ int scanhash_allium_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake256_4way_init( &allium_4way_ctx.blake );
blake256_4way( &allium_4way_ctx.blake, vdata, 64 );

View File

@@ -5,7 +5,7 @@
#include <memory.h>
#include <mm_malloc.h>
#include "lyra2.h"
#include "algo/blake/sph_blake.h"
//#include "algo/blake/sph_blake.h"
#include "algo/blake/blake-hash-4way.h"
__thread uint64_t* lyra2h_4way_matrix;
@@ -64,7 +64,7 @@ int scanhash_lyra2h_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
ptarget[7] = 0x0000ff;
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
lyra2h_4way_midstate( vdata );
do {

View File

@@ -42,12 +42,12 @@ void lyra2rev2_4way_hash( void *state, const void *input )
blake256_4way( &ctx.blake, input + (64<<2), 16 );
blake256_4way_close( &ctx.blake, vhash );
mm256_rintrlv_4x32_4x64( vhash64, vhash, 256 );
rintrlv_4x32_4x64( vhash64, vhash, 256 );
keccak256_4way( &ctx.keccak, vhash64, 32 );
keccak256_4way_close( &ctx.keccak, vhash64 );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 );
cubehashInit( &ctx.cube, 256, 16, 32 );
@@ -62,12 +62,12 @@ void lyra2rev2_4way_hash( void *state, const void *input )
LYRA2REV2( l2v2_wholeMatrix, hash2, 32, hash2, 32, hash2, 32, 1, 4, 4 );
LYRA2REV2( l2v2_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 );
mm256_intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
skein256_4way( &ctx.skein, vhash64, 32 );
skein256_4way_close( &ctx.skein, vhash64 );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
cubehashInit( &ctx.cube, 256, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 );
@@ -102,7 +102,7 @@ int scanhash_lyra2rev2_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake256_4way_init( &l2v2_4way_ctx.blake );
blake256_4way( &l2v2_4way_ctx.blake, vdata, 64 );

View File

@@ -41,7 +41,7 @@ void lyra2rev3_8way_hash( void *state, const void *input )
blake256_8way( &ctx.blake, input, 80 );
blake256_8way_close( &ctx.blake, vhash );
mm256_dintrlv_8x32( hash0, hash1, hash2, hash3,
dintrlv_8x32( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash, 256 );
LYRA2REV3( l2v3_wholeMatrix, hash0, 32, hash0, 32, hash0, 32, 1, 4, 4 );
@@ -78,7 +78,7 @@ void lyra2rev3_8way_hash( void *state, const void *input )
LYRA2REV3( l2v3_wholeMatrix, hash6, 32, hash6, 32, hash6, 32, 1, 4, 4 );
LYRA2REV3( l2v3_wholeMatrix, hash7, 32, hash7, 32, hash7, 32, 1, 4, 4 );
mm256_intrlv_8x32( vhash, hash0, hash1, hash2, hash3,
intrlv_8x32( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, 256 );
bmw256_8way( &ctx.bmw, vhash, 32 );
@@ -104,7 +104,7 @@ int scanhash_lyra2rev3_8way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm256_bswap_intrlv80_8x32( vdata, pdata );
mm256_bswap32_intrlv80_8x32( vdata, pdata );
do
{
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
@@ -115,7 +115,7 @@ int scanhash_lyra2rev3_8way( struct work *work, uint32_t max_nonce,
for ( int lane = 0; lane < 8; lane++ ) if ( hash7[lane] <= Htarg )
{
mm256_extr_lane_8x32( lane_hash, hash, lane, 256 );
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
@@ -204,7 +204,7 @@ int scanhash_lyra2rev3_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
do
{
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );

View File

@@ -60,7 +60,7 @@ int scanhash_lyra2z_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
ptarget[7] = 0x0000ff;
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
lyra2z_4way_midstate( vdata );
do {
@@ -119,8 +119,8 @@ void lyra2z_8way_hash( void *state, const void *input )
blake256_8way( &ctx_blake, input + (64*8), 16 );
blake256_8way_close( &ctx_blake, vhash );
mm256_dintrlv_8x32( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash, 256 );
dintrlv_8x32( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash, 256 );
LYRA2Z( lyra2z_8way_matrix, hash0, 32, hash0, 32, hash0, 32, 8, 8, 8 );
LYRA2Z( lyra2z_8way_matrix, hash1, 32, hash1, 32, hash1, 32, 8, 8, 8 );
@@ -157,7 +157,7 @@ int scanhash_lyra2z_8way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
ptarget[7] = 0x0000ff;
mm256_bswap_intrlv80_8x32( vdata, pdata );
mm256_bswap32_intrlv80_8x32( vdata, pdata );
lyra2z_8way_midstate( vdata );
do {

View File

@@ -69,13 +69,13 @@ void phi2_hash_4way( void *state, const void *input )
LYRA2RE( &hashA[3][0], 32, &hashB[3][0], 32, &hashB[3][0], 32, 1, 8, 8 );
LYRA2RE( &hashA[3][8], 32, &hashB[3][8], 32, &hashB[3][8], 32, 1, 8, 8 );
mm256_intrlv_4x64( vhash, hashA[0], hashA[1], hashA[2], hashA[3], 512 );
intrlv_4x64( vhash, hashA[0], hashA[1], hashA[2], hashA[3], 512 );
jh512_4way_init( &ctx.jh );
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
mm256_dintrlv_4x64( hash[0], hash[1], hash[2], hash[3], vhash, 512 );
dintrlv_4x64( hash[0], hash[1], hash[2], hash[3], vhash, 512 );
if ( hash[0][0] & 1 )
{
@@ -141,7 +141,7 @@ void phi2_hash_4way( void *state, const void *input )
(const BitSequence *)hash[3], 512 );
}
mm256_intrlv_4x64( vhash, hash[0], hash[1], hash[2], hash[3], 512 );
intrlv_4x64( vhash, hash[0], hash[1], hash[2], hash[3], 512 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -217,7 +217,7 @@ int scanhash_phi2_4way( struct work *work, uint32_t max_nonce,
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[ lane<<1 ] < Htarg )
{
mm256_extr_lane_4x64( lane_hash, hash, lane, 256 );
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;

View File

@@ -323,7 +323,7 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
mpz_clears(magipi, magisw, product, bns0, bns1, NULL);
*hashes_done = n - first_nonce + 1;
return rc;
return 0;
}
bool register_m7m_algo( algo_gate_t *gate )

View File

@@ -12,9 +12,6 @@
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
// no improvement with midstate
//static __thread blake512_4way_context ctx_mid;
void nist5hash_4way( void *out, const void *input )
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
@@ -28,14 +25,11 @@ void nist5hash_4way( void *out, const void *input )
skein512_4way_context ctx_skein;
keccak512_4way_context ctx_keccak;
// memcpy( &ctx_blake, &ctx_mid, sizeof(ctx_mid) );
// blake512_4way( &ctx_blake, input + (64<<2), 16 );
blake512_4way_init( &ctx_blake );
blake512_4way( &ctx_blake, input, 80 );
blake512_4way_close( &ctx_blake, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash0,
@@ -50,7 +44,7 @@ void nist5hash_4way( void *out, const void *input )
update_and_final_groestl( &ctx_groestl, (char*)hash3,
(const char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
jh512_4way_init( &ctx_jh );
jh512_4way( &ctx_jh, vhash, 64 );
@@ -72,13 +66,12 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce,
uint32_t *hash7 = &(hash[25]);
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t *noncep = vdata + 73; // 9*8 + 1
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
uint64_t htmax[] = { 0,
@@ -95,15 +88,7 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce,
0xFFFF0000,
0 };
// we need bigendian data...
swab32_array( endiandata, pdata, 20 );
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
// precalc midstate
// blake512_4way_init( &ctx_mid );
// blake512_4way( &ctx_mid, vdata, 64 );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m=0; m < 6; m++ )
{
@@ -112,17 +97,15 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce,
uint32_t mask = masks[m];
do {
be32enc( noncep, n );
be32enc( noncep+2, n+1 );
be32enc( noncep+4, n+2 );
be32enc( noncep+6, n+3 );
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
nist5hash_4way( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( ( hash7[ lane ] & mask ) == 0 )
{
mm256_extr_lane_4x64( lane_hash, hash, lane, 256 );
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;

View File

@@ -50,6 +50,7 @@ void anime_4way_hash( void *state, const void *input )
__m256i vh_mask;
const uint32_t mask = 8;
const __m256i bit3_mask = _mm256_set1_epi64x( 8 );
const __m256i zero = _mm256_setzero_si256();
anime_4way_ctx_holder ctx;
memcpy( &ctx, &anime_4way_ctx, sizeof(anime_4way_ctx) );
@@ -59,10 +60,9 @@ void anime_4way_hash( void *state, const void *input )
blake512_4way( &ctx.blake, vhash, 64 );
blake512_4way_close( &ctx.blake, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
m256_zero );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
if ( hash0[0] & mask )
{
@@ -88,7 +88,7 @@ void anime_4way_hash( void *state, const void *input )
(char*)hash3, 512 );
}
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
if ( mm256_anybits0( vh_mask ) )
{
@@ -98,7 +98,7 @@ void anime_4way_hash( void *state, const void *input )
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -109,13 +109,12 @@ void anime_4way_hash( void *state, const void *input )
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
m256_zero );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
if ( mm256_anybits1( vh_mask ) )
{
@@ -139,8 +138,7 @@ void anime_4way_hash( void *state, const void *input )
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
m256_zero );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
if ( mm256_anybits1( vh_mask ) )
{
@@ -157,7 +155,7 @@ void anime_4way_hash( void *state, const void *input )
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
mm256_dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 );
dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 );
}
int scanhash_anime_4way( struct work *work, uint32_t max_nonce,
@@ -189,7 +187,7 @@ int scanhash_anime_4way( struct work *work, uint32_t max_nonce,
0
};
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for (int m=0; m < 6; m++)
if (Htarg <= htmax[m])

View File

@@ -67,7 +67,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
bmw512_4way( &ctx.bmw, input, 80 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
@@ -84,7 +84,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
// first fork, A is groestl serial, B is skein parallel.
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
m256_zero );
@@ -116,7 +116,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
(char*)hash3, 512 );
// }
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
// B
@@ -158,7 +158,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash0,
@@ -186,7 +186,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
cubehashUpdateDigest( &ctx.cube, (BitSequence *)hash3,
(const BitSequence *)hash3, 64 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// A= keccak parallel, B= jh parallel
@@ -209,7 +209,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512 ( &ctx.shavite, hash0, 64 );
@@ -240,7 +240,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
// A is whirlpool serial, B is haval parallel.
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
m256_zero );
@@ -271,7 +271,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
sph_whirlpool_close( &ctx.whirlpool, hash3 );
// }
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
// B
@@ -285,7 +285,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -300,13 +300,13 @@ extern void hmq1725_4way_hash(void *state, const void *input)
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
blake512_4way_init( &ctx.blake );
blake512_4way( &ctx.blake, vhash, 64 );
blake512_4way_close( &ctx.blake, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// shavite & luffa, both serial, select individually.
@@ -362,13 +362,13 @@ extern void hmq1725_4way_hash(void *state, const void *input)
(const BitSequence *)hash3, 64 );
}
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -438,13 +438,13 @@ extern void hmq1725_4way_hash(void *state, const void *input)
(const BitSequence *)hash3, 512 );
}
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
@@ -461,7 +461,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
// A = fugue serial, B = sha512 prarallel
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
m256_zero );
@@ -491,7 +491,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
sph_fugue512_close( &ctx.fugue, hash3 );
// }
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
// if ( mm256_any_clr_256( vh_mask ) )
// {
@@ -502,7 +502,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -513,7 +513,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, 64 );
@@ -524,7 +524,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
m256_zero );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// if ( mm256_any_set_256( vh_mask ) ) //4
// {
@@ -559,7 +559,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
sph_whirlpool_close( &ctx.whirlpool, hash3 );
// }
mm256_intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, 512 );
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
@@ -589,7 +589,7 @@ int scanhash_hmq1725_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[ m ];

View File

@@ -51,6 +51,7 @@ void quark_4way_hash( void *state, const void *input )
quark_4way_ctx_holder ctx;
const __m256i bit3_mask = _mm256_set1_epi64x( 8 );
const uint32_t mask = 8;
const __m256i zero = _mm256_setzero_si256();
memcpy( &ctx, &quark_4way_ctx, sizeof(quark_4way_ctx) );
@@ -60,10 +61,9 @@ void quark_4way_hash( void *state, const void *input )
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
m256_zero );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
if ( hash0[0] & mask )
{
@@ -89,7 +89,7 @@ void quark_4way_hash( void *state, const void *input )
(char*)hash3, 512 );
}
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
if ( mm256_anybits0( vh_mask ) )
{
@@ -99,7 +99,7 @@ void quark_4way_hash( void *state, const void *input )
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -110,13 +110,12 @@ void quark_4way_hash( void *state, const void *input )
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
m256_zero );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
if ( mm256_anybits1( vh_mask ) )
{
@@ -141,8 +140,7 @@ void quark_4way_hash( void *state, const void *input )
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
m256_zero );
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
if ( mm256_anybits1( vh_mask ) )
{
@@ -179,7 +177,7 @@ int scanhash_quark_4way( struct work *work, uint32_t max_nonce,
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
@@ -191,7 +189,7 @@ int scanhash_quark_4way( struct work *work, uint32_t max_nonce,
for ( int i = 0; i < 4; i++ )
if ( ( hash7[ i<<1 ] & 0xFFFFFF00 ) == 0 )
{
mm256_extr_lane_4x64( lane_hash, hash, i, 256 );
extr_lane_4x64( lane_hash, hash, i, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;

View File

@@ -39,7 +39,7 @@ void deep_2way_hash( void *output, const void *input )
memcpy( &ctx, &deep_2way_ctx, sizeof(deep_2way_ctx) );
luffa_2way_update( &ctx.luffa, input + (64<<1), 16 );
luffa_2way_close( &ctx.luffa, vhash );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash0,
(const byte*) hash0, 64 );
@@ -86,7 +86,7 @@ int scanhash_deep_2way( struct work *work,uint32_t max_nonce,
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 );
intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 );
luffa_2way_init( &deep_2way_ctx.luffa, 512 );
luffa_2way_update( &deep_2way_ctx.luffa, vdata, 64 );

View File

@@ -41,7 +41,7 @@ void qubit_2way_hash( void *output, const void *input )
memcpy( &ctx, &qubit_2way_ctx, sizeof(qubit_2way_ctx) );
luffa_2way_update( &ctx.luffa, input + (64<<1), 16 );
luffa_2way_close( &ctx.luffa, vhash );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash0,
(const byte*) hash0, 64 );
@@ -55,9 +55,9 @@ void qubit_2way_hash( void *output, const void *input )
sph_shavite512( &ctx.shavite, hash1, 64 );
sph_shavite512_close( &ctx.shavite, hash1 );
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
(const BitSequence *) hash0, 512 );
@@ -92,7 +92,7 @@ int scanhash_qubit_2way( struct work *work,uint32_t max_nonce,
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 );
intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 );
luffa_2way_init( &qubit_2way_ctx.luffa, 512 );
luffa_2way_update( &qubit_2way_ctx.luffa, vdata, 64 );

View File

@@ -40,9 +40,9 @@ void lbry_8way_hash( void* output, const void* input )
sha256_8way_close( &ctx_sha256, vhashA );
// reinterleave to do sha512 4-way 64 bit twice.
mm256_dintrlv_8x32( h0, h1, h2, h3, h4, h5, h6, h7, vhashA, 256 );
mm256_intrlv_4x64( vhashA, h0, h1, h2, h3, 256 );
mm256_intrlv_4x64( vhashB, h4, h5, h6, h7, 256 );
dintrlv_8x32( h0, h1, h2, h3, h4, h5, h6, h7, vhashA, 256 );
intrlv_4x64( vhashA, h0, h1, h2, h3, 256 );
intrlv_4x64( vhashB, h4, h5, h6, h7, 256 );
sha512_4way_init( &ctx_sha512 );
sha512_4way( &ctx_sha512, vhashA, 32 );
@@ -53,9 +53,9 @@ void lbry_8way_hash( void* output, const void* input )
sha512_4way_close( &ctx_sha512, vhashB );
// back to 8-way 32 bit
mm256_dintrlv_4x64( h0, h1, h2, h3, vhashA, 512 );
mm256_dintrlv_4x64( h4, h5, h6, h7, vhashB, 512 );
mm256_intrlv_8x32( vhashA, h0, h1, h2, h3, h4, h5, h6, h7, 512 );
dintrlv_4x64( h0, h1, h2, h3, vhashA, 512 );
dintrlv_4x64( h4, h5, h6, h7, vhashB, 512 );
intrlv_8x32( vhashA, h0, h1, h2, h3, h4, h5, h6, h7, 512 );
ripemd160_8way_init( &ctx_ripemd );
ripemd160_8way( &ctx_ripemd, vhashA, 32 );
@@ -97,11 +97,15 @@ int scanhash_lbry_8way( struct work *work, uint32_t max_nonce,
0xFFFFF000, 0xFFFF0000, 0 };
// we need bigendian data...
casti_m256i( edata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
casti_m256i( edata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
casti_m256i( edata, 2 ) = mm256_bswap_32( casti_m256i( pdata, 2 ) );
casti_m256i( edata, 3 ) = mm256_bswap_32( casti_m256i( pdata, 3 ) );
mm256_intrlv_8x32( vdata, edata, edata, edata, edata,
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
casti_m128i( edata, 5 ) = mm128_bswap_32( casti_m128i( pdata, 5 ) );
casti_m128i( edata, 6 ) = mm128_bswap_32( casti_m128i( pdata, 6 ) );
casti_m128i( edata, 7 ) = mm128_bswap_32( casti_m128i( pdata, 7 ) );
intrlv_8x32( vdata, edata, edata, edata, edata,
edata, edata, edata, edata, 1024 );
sha256_8way_init( &sha256_8w_mid );
sha256_8way( &sha256_8w_mid, vdata, LBRY_MIDSTATE );
@@ -118,7 +122,7 @@ int scanhash_lbry_8way( struct work *work, uint32_t max_nonce,
for ( int i = 0; i < 8; i++ ) if ( !( hash7[ i ] & mask ) )
{
// deinterleave hash for lane
mm256_extr_lane_8x32( lane_hash, hash, i, 256 );
extr_lane_8x32( lane_hash, hash, i, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[27] = n + i;

View File

@@ -196,9 +196,9 @@ SPH_XCAT( HASH, _addbits_and_close )(void *cc, unsigned ub, unsigned n,
ptr = (unsigned)sc->count & (SPH_BLEN - 1U);
#ifdef PW01
sc->buf[ptr>>3] = _mm256_set1_epi64x( 0x100 >> 8 );
sc->buf[ptr>>3] = m256_const1_64( 0x100 >> 8 );
#else
sc->buf[ptr>>3] = _mm256_set1_epi64x( 0x80 );
sc->buf[ptr>>3] = m256_const1_64( 0x80 );
#endif
ptr += 8;

View File

@@ -86,8 +86,7 @@ static const sph_u32 K256[64] = {
// SHA-256 4 way
#define SHA2s_MEXP( a, b, c, d ) \
_mm_add_epi32( _mm_add_epi32( _mm_add_epi32( \
SSG2_1( W[a] ), W[b] ), SSG2_0( W[c] ) ), W[d] );
mm128_add4_32( SSG2_1( W[a] ), W[b], SSG2_0( W[c] ), W[d] );
#define CHs(X, Y, Z) \
_mm_xor_si128( _mm_and_si128( _mm_xor_si128( Y, Z ), X ), Z )
@@ -115,9 +114,8 @@ static const sph_u32 K256[64] = {
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
do { \
register __m128i T1, T2; \
T1 = _mm_add_epi32( _mm_add_epi32( _mm_add_epi32( \
_mm_add_epi32( H, BSG2_1(E) ), CHs(E, F, G) ), \
_mm_set1_epi32( K256[( (j)+(i) )] ) ), W[i] ); \
T1 = _mm_add_epi32( H, mm128_add4_32( BSG2_1(E), CHs(E, F, G), \
_mm_set1_epi32( K256[( (j)+(i) )] ), W[i] ) ); \
T2 = _mm_add_epi32( BSG2_0(A), MAJs(A, B, C) ); \
D = _mm_add_epi32( D, T1 ); \
H = _mm_add_epi32( T1, T2 ); \
@@ -129,22 +127,8 @@ sha256_4way_round( __m128i *in, __m128i r[8] )
register __m128i A, B, C, D, E, F, G, H;
__m128i W[16];
W[ 0] = mm128_bswap_32( in[ 0] );
W[ 1] = mm128_bswap_32( in[ 1] );
W[ 2] = mm128_bswap_32( in[ 2] );
W[ 3] = mm128_bswap_32( in[ 3] );
W[ 4] = mm128_bswap_32( in[ 4] );
W[ 5] = mm128_bswap_32( in[ 5] );
W[ 6] = mm128_bswap_32( in[ 6] );
W[ 7] = mm128_bswap_32( in[ 7] );
W[ 8] = mm128_bswap_32( in[ 8] );
W[ 9] = mm128_bswap_32( in[ 9] );
W[10] = mm128_bswap_32( in[10] );
W[11] = mm128_bswap_32( in[11] );
W[12] = mm128_bswap_32( in[12] );
W[13] = mm128_bswap_32( in[13] );
W[14] = mm128_bswap_32( in[14] );
W[15] = mm128_bswap_32( in[15] );
mm128_block_bswap_32( W, in );
mm128_block_bswap_32( W+8, in+8 );
A = r[0];
B = r[1];
@@ -266,7 +250,7 @@ void sha256_4way( sha256_4way_context *sc, const void *data, size_t len )
void sha256_4way_close( sha256_4way_context *sc, void *dst )
{
unsigned ptr, u;
unsigned ptr;
uint32_t low, high;
const int buf_size = 64;
const int pad = buf_size - 8;
@@ -294,8 +278,7 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
mm128_bswap_32( _mm_set1_epi32( low ) );
sha256_4way_round( sc->buf, sc->val );
for ( u = 0; u < 8; u ++ )
((__m128i*)dst)[u] = mm128_bswap_32( sc->val[u] );
mm128_block_bswap_32( dst, sc->val );
}
#if defined(__AVX2__)
@@ -326,15 +309,13 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
mm256_ror_32(x, 17), mm256_ror_32(x, 19) ), _mm256_srli_epi32(x, 10) )
#define SHA2x_MEXP( a, b, c, d ) \
_mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \
SSG2_1x( W[a] ), W[b] ), SSG2_0x( W[c] ) ), W[d] );
mm256_add4_32( SSG2_1x( W[a] ), W[b], SSG2_0x( W[c] ), W[d] );
#define SHA2s_8WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
do { \
register __m256i T1, T2; \
T1 = _mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \
_mm256_add_epi32( H, BSG2_1x(E) ), CHx(E, F, G) ), \
_mm256_set1_epi32( K256[( (j)+(i) )] ) ), W[i] ); \
T1 = _mm256_add_epi32( H, mm256_add4_32( BSG2_1x(E), CHx(E, F, G), \
_mm256_set1_epi32( K256[( (j)+(i) )] ), W[i] ) ); \
T2 = _mm256_add_epi32( BSG2_0x(A), MAJx(A, B, C) ); \
D = _mm256_add_epi32( D, T1 ); \
H = _mm256_add_epi32( T1, T2 ); \
@@ -346,22 +327,8 @@ sha256_8way_round( __m256i *in, __m256i r[8] )
register __m256i A, B, C, D, E, F, G, H;
__m256i W[16];
W[ 0] = mm256_bswap_32( in[ 0] );
W[ 1] = mm256_bswap_32( in[ 1] );
W[ 2] = mm256_bswap_32( in[ 2] );
W[ 3] = mm256_bswap_32( in[ 3] );
W[ 4] = mm256_bswap_32( in[ 4] );
W[ 5] = mm256_bswap_32( in[ 5] );
W[ 6] = mm256_bswap_32( in[ 6] );
W[ 7] = mm256_bswap_32( in[ 7] );
W[ 8] = mm256_bswap_32( in[ 8] );
W[ 9] = mm256_bswap_32( in[ 9] );
W[10] = mm256_bswap_32( in[10] );
W[11] = mm256_bswap_32( in[11] );
W[12] = mm256_bswap_32( in[12] );
W[13] = mm256_bswap_32( in[13] );
W[14] = mm256_bswap_32( in[14] );
W[15] = mm256_bswap_32( in[15] );
mm256_block_bswap_32( W , in );
mm256_block_bswap_32( W+8, in+8 );
A = r[0];
B = r[1];
@@ -484,7 +451,7 @@ void sha256_8way( sha256_8way_context *sc, const void *data, size_t len )
void sha256_8way_close( sha256_8way_context *sc, void *dst )
{
unsigned ptr, u;
unsigned ptr;
uint32_t low, high;
const int buf_size = 64;
const int pad = buf_size - 8;
@@ -513,8 +480,7 @@ void sha256_8way_close( sha256_8way_context *sc, void *dst )
sha256_8way_round( sc->buf, sc->val );
for ( u = 0; u < 8; u ++ )
((__m256i*)dst)[u] = mm256_bswap_32( sc->val[u] );
mm256_block_bswap_32( dst, sc->val );
}
@@ -596,9 +562,8 @@ static const sph_u64 K512[80] = {
#define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \
do { \
register __m256i T1, T2; \
T1 = _mm256_add_epi64( _mm256_add_epi64( _mm256_add_epi64( \
_mm256_add_epi64( H, BSG5_1(E) ), CH(E, F, G) ), \
_mm256_set1_epi64x( K512[i] ) ), W[i] ); \
T1 = _mm256_add_epi64( H, mm256_add4_64( BSG5_1(E), CH(E, F, G), \
_mm256_set1_epi64x( K512[i] ), W[i] ) ); \
T2 = _mm256_add_epi64( BSG5_0(A), MAJ(A, B, C) ); \
D = _mm256_add_epi64( D, T1 ); \
H = _mm256_add_epi64( T1, T2 ); \
@@ -611,11 +576,12 @@ sha512_4way_round( __m256i *in, __m256i r[8] )
register __m256i A, B, C, D, E, F, G, H;
__m256i W[80];
for ( i = 0; i < 16; i++ )
W[i] = mm256_bswap_64( in[i] );
mm256_block_bswap_64( W , in );
mm256_block_bswap_64( W+8, in+8 );
for ( i = 16; i < 80; i++ )
W[i] = _mm256_add_epi64( _mm256_add_epi64( _mm256_add_epi64(
SSG5_1( W[ i-2 ] ), W[ i-7 ] ), SSG5_0( W[ i-15 ] ) ), W[ i-16 ] );
W[i] = mm256_add4_64( SSG5_1( W[ i- 2 ] ), W[ i- 7 ],
SSG5_0( W[ i-15 ] ), W[ i-16 ] );
A = r[0];
B = r[1];
@@ -689,12 +655,12 @@ void sha512_4way( sha512_4way_context *sc, const void *data, size_t len )
void sha512_4way_close( sha512_4way_context *sc, void *dst )
{
unsigned ptr, u;
unsigned ptr;
const int buf_size = 128;
const int pad = buf_size - 16;
ptr = (unsigned)sc->count & (buf_size - 1U);
sc->buf[ ptr>>3 ] = _mm256_set1_epi64x( 0x80 );
sc->buf[ ptr>>3 ] = m256_const1_64( 0x80 );
ptr += 8;
if ( ptr > pad )
{
@@ -711,8 +677,7 @@ void sha512_4way_close( sha512_4way_context *sc, void *dst )
mm256_bswap_64( _mm256_set1_epi64x( sc->count << 3 ) );
sha512_4way_round( sc->buf, sc->val );
for ( u = 0; u < 8; u ++ )
((__m256i*)dst)[u] = mm256_bswap_64( sc->val[u] );
mm256_block_bswap_64( dst, sc->val );
}
#endif // __AVX2__

View File

@@ -59,7 +59,7 @@ int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce,
0 };
// Need big endian data
mm256_bswap_intrlv80_8x32( vdata, pdata );
mm256_bswap32_intrlv80_8x32( vdata, pdata );
sha256_8way_init( &sha256_ctx8 );
sha256_8way( &sha256_ctx8, vdata, 64 );
@@ -80,7 +80,7 @@ int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce,
if ( !( hash7[ lane ] & mask ) )
{
// deinterleave hash for lane
mm256_extr_lane_8x32( lane_hash, hash, lane, 256 );
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
@@ -152,7 +152,7 @@ int scanhash_sha256q_4way( struct work *work, uint32_t max_nonce,
0xFFFF0000,
0 };
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
sha256_4way_init( &sha256_ctx4 );
sha256_4way( &sha256_ctx4, vdata, 64 );

View File

@@ -72,7 +72,7 @@ int scanhash_sha256t_11way( struct work *work, uint32_t max_nonce,
casti_m256i( dataz, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
casti_m128i( dataz, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
mm256_intrlv_8x32( datax, dataz, dataz, dataz, dataz,
intrlv_8x32( datax, dataz, dataz, dataz, dataz,
dataz, dataz, dataz, dataz, 640 );
mm64_interleave_2x32( datay, dataz, dataz, 640 );
@@ -99,7 +99,7 @@ int scanhash_sha256t_11way( struct work *work, uint32_t max_nonce,
for ( i = 0; i < 8; i++ ) if ( !( hash7[ i ] & mask ) )
{
// deinterleave hash for lane
mm256_extr_lane_8x32( lane_hash, hashx, i, 256 );
extr_lane_8x32( lane_hash, hashx, i, 256 );
if ( fulltest( lane_hash, ptarget ) )
{
pdata[19] = n + i;
@@ -186,8 +186,9 @@ int scanhash_sha256t_8way( struct work *work, uint32_t max_nonce,
0xFFFF0000,
0 };
// Need big endian data
mm256_bswap_intrlv80_8x32( vdata, pdata );
mm256_bswap32_intrlv80_8x32( vdata, pdata );
sha256_8way_init( &sha256_ctx8 );
sha256_8way( &sha256_ctx8, vdata, 64 );
@@ -204,7 +205,7 @@ int scanhash_sha256t_8way( struct work *work, uint32_t max_nonce,
if ( !( hash7[ lane ] & mask ) )
{
// deinterleave hash for lane
mm256_extr_lane_8x32( lane_hash, hash, lane, 256 );
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
@@ -271,7 +272,7 @@ int scanhash_sha256t_4way( struct work *work, uint32_t max_nonce,
0xFFFF0000,
0 };
mm128_bswap_intrlv80_4x32( vdata, pdata );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
sha256_4way_init( &sha256_ctx4 );
sha256_4way( &sha256_ctx4, vdata, 64 );

View File

@@ -11,7 +11,7 @@ bool register_sha256t_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_sha256t_4way;
gate->hash = (void*)&sha256t_4way_hash;
#else
gate->optimizations = SHA_OPT;
gate->optimizations = SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256t;
gate->hash = (void*)&sha256t_hash;
#endif
@@ -21,7 +21,11 @@ gate->optimizations = SHA_OPT;
bool register_sha256q_algo( algo_gate_t* gate )
{
#if defined(SHA256T_4WAY)
#if defined(SHA256T_8WAY)
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256q_8way;
gate->hash = (void*)&sha256q_8way_hash;
#elif defined(SHA256T_4WAY)
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256q_4way;
gate->hash = (void*)&sha256q_4way_hash;

View File

@@ -8,7 +8,7 @@
#if !defined(__SHA__)
#if defined(__AVX2__)
#define SHA256T_8WAY
#elif defined(__SSE2__)
#elif defined(__SSE2__)
#define SHA256T_4WAY
#endif
#endif

View File

@@ -20,6 +20,7 @@ static const uint32_t IV512[] =
static void
c512_2way( shavite512_2way_context *ctx, const void *msg )
{
const __m128i zero = _mm_setzero_si128();
__m256i p0, p1, p2, p3, x;
__m256i k00, k01, k02, k03, k10, k11, k12, k13;
__m256i *m = (__m256i*)msg;
@@ -33,24 +34,24 @@ c512_2way( shavite512_2way_context *ctx, const void *msg )
// round
k00 = m[0];
x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ), zero );
k01 = m[1];
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero );
k02 = m[2];
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero );
k03 = m[3];
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero );
p0 = _mm256_xor_si256( p0, x );
k10 = m[4];
x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ), zero );
k11 = m[5];
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero );
k12 = m[6];
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero );
k13 = m[7];
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero );
p2 = _mm256_xor_si256( p2, x );
@@ -59,129 +60,129 @@ c512_2way( shavite512_2way_context *ctx, const void *msg )
// round 1, 5, 9
k00 = _mm256_xor_si256( k13, mm256_ror1x32_128(
mm256_aesenc_2x128( k00 ) ) );
mm256_aesenc_2x128( k00, zero ) ) );
if ( r == 0 )
k00 = _mm256_xor_si256( k00, _mm256_set_epi32(
~ctx->count3, ctx->count2, ctx->count1, ctx->count0,
~ctx->count3, ctx->count2, ctx->count1, ctx->count0 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ), zero );
k01 = _mm256_xor_si256( k00,
mm256_ror1x32_128( mm256_aesenc_2x128( k01 ) ) );
mm256_ror1x32_128( mm256_aesenc_2x128( k01, zero ) ) );
if ( r == 1 )
k01 = _mm256_xor_si256( k01, _mm256_set_epi32(
~ctx->count0, ctx->count1, ctx->count2, ctx->count3,
~ctx->count0, ctx->count1, ctx->count2, ctx->count3 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero );
k02 = _mm256_xor_si256( k01,
mm256_ror1x32_128( mm256_aesenc_2x128( k02 ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) );
mm256_ror1x32_128( mm256_aesenc_2x128( k02, zero ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero );
k03 = _mm256_xor_si256( k02,
mm256_ror1x32_128( mm256_aesenc_2x128( k03 ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) );
mm256_ror1x32_128( mm256_aesenc_2x128( k03, zero ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero );
p3 = _mm256_xor_si256( p3, x );
k10 = _mm256_xor_si256( k03,
mm256_ror1x32_128( mm256_aesenc_2x128( k10 ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ) );
mm256_ror1x32_128( mm256_aesenc_2x128( k10, zero ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ), zero );
k11 = _mm256_xor_si256( k10,
mm256_ror1x32_128( mm256_aesenc_2x128( k11 ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) );
mm256_ror1x32_128( mm256_aesenc_2x128( k11, zero ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero );
k12 = _mm256_xor_si256( k11,
mm256_ror1x32_128( mm256_aesenc_2x128( k12 ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) );
mm256_ror1x32_128( mm256_aesenc_2x128( k12, zero ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero );
k13 = _mm256_xor_si256( k12,
mm256_ror1x32_128( mm256_aesenc_2x128( k13 ) ) );
mm256_ror1x32_128( mm256_aesenc_2x128( k13, zero ) ) );
if ( r == 2 )
k13 = _mm256_xor_si256( k13, _mm256_set_epi32(
~ctx->count1, ctx->count0, ctx->count3, ctx->count2,
~ctx->count1, ctx->count0, ctx->count3, ctx->count2 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero );
p1 = _mm256_xor_si256( p1, x );
// round 2, 6, 10
k00 = _mm256_xor_si256( k00, mm256_ror2x256hi_1x32( k12, k13 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k00 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k00 ), zero );
k01 = _mm256_xor_si256( k01, mm256_ror2x256hi_1x32( k13, k00 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero );
k02 = _mm256_xor_si256( k02, mm256_ror2x256hi_1x32( k00, k01 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero );
k03 = _mm256_xor_si256( k03, mm256_ror2x256hi_1x32( k01, k02 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero );
p2 = _mm256_xor_si256( p2, x );
k10 = _mm256_xor_si256( k10, mm256_ror2x256hi_1x32( k02, k03 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k10 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k10 ), zero );
k11 = _mm256_xor_si256( k11, mm256_ror2x256hi_1x32( k03, k10 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero );
k12 = _mm256_xor_si256( k12, mm256_ror2x256hi_1x32( k10, k11 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero );
k13 = _mm256_xor_si256( k13, mm256_ror2x256hi_1x32( k11, k12 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero );
p0 = _mm256_xor_si256( p0, x );
// round 3, 7, 11
k00 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k00 ) ), k13 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k00 ) );
mm256_aesenc_2x128( k00, zero ) ), k13 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k00 ), zero );
k01 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k01 ) ), k00 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) );
mm256_aesenc_2x128( k01, zero ) ), k00 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero );
k02 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k02 ) ), k01 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) );
mm256_aesenc_2x128( k02, zero ) ), k01 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero );
k03 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k03 ) ), k02 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) );
mm256_aesenc_2x128( k03, zero ) ), k02 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero );
p1 = _mm256_xor_si256( p1, x );
k10 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k10 ) ), k03 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k10 ) );
mm256_aesenc_2x128( k10, zero ) ), k03 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k10 ), zero );
k11 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k11 ) ), k10 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) );
mm256_aesenc_2x128( k11, zero ) ), k10 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero );
k12 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k12 ) ), k11 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) );
mm256_aesenc_2x128( k12, zero ) ), k11 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero );
k13 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k13 ) ), k12 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) );
mm256_aesenc_2x128( k13, zero ) ), k12 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero );
p3 = _mm256_xor_si256( p3, x );
// round 4, 8, 12
k00 = _mm256_xor_si256( k00, mm256_ror2x256hi_1x32( k12, k13 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ), zero );
k01 = _mm256_xor_si256( k01, mm256_ror2x256hi_1x32( k13, k00 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero );
k02 = _mm256_xor_si256( k02, mm256_ror2x256hi_1x32( k00, k01 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero );
k03 = _mm256_xor_si256( k03, mm256_ror2x256hi_1x32( k01, k02 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero );
p0 = _mm256_xor_si256( p0, x );
k10 = _mm256_xor_si256( k10, mm256_ror2x256hi_1x32( k02, k03 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ), zero );
k11 = _mm256_xor_si256( k11, mm256_ror2x256hi_1x32( k03, k10 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero );
k12 = _mm256_xor_si256( k12, mm256_ror2x256hi_1x32( k10, k11 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero );
k13 = _mm256_xor_si256( k13, mm256_ror2x256hi_1x32( k11, k12 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero );
p2 = _mm256_xor_si256( p2, x );
@@ -190,36 +191,36 @@ c512_2way( shavite512_2way_context *ctx, const void *msg )
// round 13
k00 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k00 ) ), k13 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ) );
mm256_aesenc_2x128( k00, zero ) ), k13 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ), zero );
k01 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k01 ) ), k00 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) );
mm256_aesenc_2x128( k01, zero ) ), k00 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero );
k02 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k02 ) ), k01 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) );
mm256_aesenc_2x128( k02, zero ) ), k01 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero );
k03 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k03 ) ), k02 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) );
mm256_aesenc_2x128( k03, zero ) ), k02 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero );
p3 = _mm256_xor_si256( p3, x );
k10 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k10 ) ), k03 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ) );
mm256_aesenc_2x128( k10, zero ) ), k03 );
x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ), zero );
k11 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k11 ) ), k10 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) );
mm256_aesenc_2x128( k11, zero ) ), k10 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero );
k12 = mm256_ror1x32_128( mm256_aesenc_2x128( k12 ) );
k12 = mm256_ror1x32_128( mm256_aesenc_2x128( k12, zero ) );
k12 = _mm256_xor_si256( k12, _mm256_xor_si256( k11, _mm256_set_epi32(
~ctx->count2, ctx->count3, ctx->count0, ctx->count1,
~ctx->count2, ctx->count3, ctx->count0, ctx->count1 ) ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero );
k13 = _mm256_xor_si256( mm256_ror1x32_128(
mm256_aesenc_2x128( k13 ) ), k12 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) );
mm256_aesenc_2x128( k13, zero ) ), k12 );
x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero );
p1 = _mm256_xor_si256( p1, x );

View File

@@ -87,6 +87,7 @@ static const sph_u32 IV512[] = {
static void
c512( sph_shavite_big_context *sc, const void *msg )
{
const __m128i zero = _mm_setzero_si128();
__m128i p0, p1, p2, p3, x;
__m128i k00, k01, k02, k03, k10, k11, k12, k13;
__m128i *m = (__m128i*)msg;
@@ -101,38 +102,38 @@ c512( sph_shavite_big_context *sc, const void *msg )
// round
k00 = m[0];
x = _mm_xor_si128( p1, k00 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k01 = m[1];
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k02 = m[2];
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k03 = m[3];
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p0 = _mm_xor_si128( p0, x );
k10 = m[4];
x = _mm_xor_si128( p3, k10 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k11 = m[5];
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k12 = m[6];
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k13 = m[7];
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p2 = _mm_xor_si128( p2, x );
for ( r = 0; r < 3; r ++ )
{
// round 1, 5, 9
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, zero ) );
k00 = _mm_xor_si128( k00, k13 );
if ( r == 0 )
@@ -140,8 +141,8 @@ c512( sph_shavite_big_context *sc, const void *msg )
~sc->count3, sc->count2, sc->count1, sc->count0 ) );
x = _mm_xor_si128( p0, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, zero ) );
k01 = _mm_xor_si128( k01, k00 );
if ( r == 1 )
@@ -149,32 +150,32 @@ c512( sph_shavite_big_context *sc, const void *msg )
~sc->count0, sc->count1, sc->count2, sc->count3 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p3 = _mm_xor_si128( p3, x );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p2, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, zero ) );
k12 = _mm_xor_si128( k12, k11 );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, zero ) );
k13 = _mm_xor_si128( k13, k12 );
if ( r == 2 )
@@ -182,78 +183,78 @@ c512( sph_shavite_big_context *sc, const void *msg )
~sc->count1, sc->count0, sc->count3, sc->count2 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p1 = _mm_xor_si128( p1, x );
// round 2, 6, 10
k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) );
x = _mm_xor_si128( p3, k00 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p2 = _mm_xor_si128( p2, x );
k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) );
x = _mm_xor_si128( p1, k10 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p0 = _mm_xor_si128( p0, x );
// round 3, 7, 11
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, zero ) );
k00 = _mm_xor_si128( k00, k13 );
x = _mm_xor_si128( p2, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, zero ) );
k01 = _mm_xor_si128( k01, k00 );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p1 = _mm_xor_si128( p1, x );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p0, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, zero ) );
k12 = _mm_xor_si128( k12, k11 );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, zero ) );
k13 = _mm_xor_si128( k13, k12 );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p3 = _mm_xor_si128( p3, x );
@@ -261,73 +262,73 @@ c512( sph_shavite_big_context *sc, const void *msg )
k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) );
x = _mm_xor_si128( p1, k00 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p0 = _mm_xor_si128( p0, x );
k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) );
x = _mm_xor_si128( p3, k10 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p2 = _mm_xor_si128( p2, x );
}
// round 13
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, zero ) );
k00 = _mm_xor_si128( k00, k13 );
x = _mm_xor_si128( p0, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, zero ) );
k01 = _mm_xor_si128( k01, k00 );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p3 = _mm_xor_si128( p3, x );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p2, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, zero ) );
k12 = _mm_xor_si128( k12, _mm_xor_si128( k11, _mm_set_epi32(
~sc->count2, sc->count3, sc->count0, sc->count1 ) ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
x = _mm_aesenc_si128( x, zero );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, zero ) );
k13 = _mm_xor_si128( k13, k12 );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
x = _mm_aesenc_si128( x, zero );
p1 = _mm_xor_si128( p1, x );

View File

@@ -342,6 +342,7 @@ void fft128_2way( void *a )
void fft128_2way_msg( uint16_t *a, const uint8_t *x, int final )
{
const __m256i zero = _mm256_setzero_si256();
static const m256_v16 Tweak = {{ 0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,1, }};
static const m256_v16 FinalTweak = {{ 0,0,0,0,0,1,0,1, 0,0,0,0,0,1,0,1, }};
@@ -352,10 +353,10 @@ void fft128_2way_msg( uint16_t *a, const uint8_t *x, int final )
#define UNPACK( i ) \
do { \
__m256i t = X[i]; \
A[2*i] = _mm256_unpacklo_epi8( t, m256_zero ); \
A[2*i] = _mm256_unpacklo_epi8( t, zero ); \
A[2*i+8] = _mm256_mullo_epi16( A[2*i], FFT128_Twiddle[2*i].v256 ); \
A[2*i+8] = REDUCE(A[2*i+8]); \
A[2*i+1] = _mm256_unpackhi_epi8( t, m256_zero ); \
A[2*i+1] = _mm256_unpackhi_epi8( t, zero ); \
A[2*i+9] = _mm256_mullo_epi16(A[2*i+1], FFT128_Twiddle[2*i+1].v256 ); \
A[2*i+9] = REDUCE(A[2*i+9]); \
} while(0)
@@ -365,10 +366,10 @@ do { \
do { \
__m256i t = X[i]; \
__m256i tmp; \
A[2*i] = _mm256_unpacklo_epi8( t, m256_zero ); \
A[2*i] = _mm256_unpacklo_epi8( t, zero ); \
A[2*i+8] = _mm256_mullo_epi16( A[ 2*i ], FFT128_Twiddle[ 2*i ].v256 ); \
A[2*i+8] = REDUCE( A[ 2*i+8 ] ); \
tmp = _mm256_unpackhi_epi8( t, m256_zero ); \
tmp = _mm256_unpackhi_epi8( t, zero ); \
A[2*i+1] = _mm256_add_epi16( tmp, tw ); \
A[2*i+9] = _mm256_mullo_epi16( _mm256_sub_epi16( tmp, tw ), \
FFT128_Twiddle[ 2*i+1 ].v256 );\
@@ -392,6 +393,7 @@ do { \
void fft256_2way_msg( uint16_t *a, const uint8_t *x, int final )
{
const __m256i zero = _mm256_setzero_si256();
static const m256_v16 Tweak = {{ 0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,1, }};
static const m256_v16 FinalTweak = {{ 0,0,0,0,0,1,0,1, 0,0,0,0,0,1,0,1, }};
@@ -402,11 +404,11 @@ void fft256_2way_msg( uint16_t *a, const uint8_t *x, int final )
#define UNPACK( i ) \
do { \
__m256i t = X[i]; \
A[ 2*i ] = _mm256_unpacklo_epi8( t, m256_zero ); \
A[ 2*i ] = _mm256_unpacklo_epi8( t, zero ); \
A[ 2*i + 16 ] = _mm256_mullo_epi16( A[ 2*i ], \
FFT256_Twiddle[ 2*i ].v256 ); \
A[ 2*i + 16 ] = REDUCE( A[ 2*i + 16 ] ); \
A[ 2*i + 1 ] = _mm256_unpackhi_epi8( t, m256_zero ); \
A[ 2*i + 1 ] = _mm256_unpackhi_epi8( t, zero ); \
A[ 2*i + 17 ] = _mm256_mullo_epi16( A[ 2*i + 1 ], \
FFT256_Twiddle[ 2*i + 1 ].v256 ); \
A[ 2*i + 17 ] = REDUCE( A[ 2*i + 17 ] ); \
@@ -417,11 +419,11 @@ do { \
do { \
__m256i t = X[i]; \
__m256i tmp; \
A[ 2*i ] = _mm256_unpacklo_epi8( t, m256_zero ); \
A[ 2*i ] = _mm256_unpacklo_epi8( t, zero ); \
A[ 2*i + 16 ] = _mm256_mullo_epi16( A[ 2*i ], \
FFT256_Twiddle[ 2*i ].v256 ); \
A[ 2*i + 16 ] = REDUCE( A[ 2*i + 16 ] ); \
tmp = _mm256_unpackhi_epi8( t, m256_zero ); \
tmp = _mm256_unpackhi_epi8( t, zero ); \
A[ 2*i + 1 ] = _mm256_add_epi16( tmp, tw ); \
A[ 2*i + 17 ] = _mm256_mullo_epi16( _mm256_sub_epi16( tmp, tw ), \
FFT256_Twiddle[ 2*i + 1 ].v256 ); \
@@ -446,6 +448,8 @@ do { \
fft128_2way( a+256 );
}
#define c1_16( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
void rounds512_2way( uint32_t *state, const uint8_t *msg, uint16_t *fft )
{
register __m256i S0l, S1l, S2l, S3l;
@@ -453,7 +457,8 @@ void rounds512_2way( uint32_t *state, const uint8_t *msg, uint16_t *fft )
__m256i *S = (__m256i*) state;
__m256i *M = (__m256i*) msg;
__m256i *W = (__m256i*) fft;
static const m256_v16 code[] = { mm256_const1_16(185), mm256_const1_16(233) };
static const m256_v16 code[] = { c1_16(185), c1_16(233) };
S0l = _mm256_xor_si256( S[0], M[0] );
S0h = _mm256_xor_si256( S[1], M[1] );

View File

@@ -12,7 +12,7 @@
void skeinhash_4way( void *state, const void *input )
{
uint64_t vhash64[8*4] __attribute__ ((aligned (64)));
uint64_t vhash64[16*4] __attribute__ ((aligned (64)));
skein512_4way_context ctx_skein;
#if defined(__SHA__)
uint32_t hash0[16] __attribute__ ((aligned (64)));
@@ -30,7 +30,7 @@ void skeinhash_4way( void *state, const void *input )
skein512_4way_close( &ctx_skein, vhash64 );
#if defined(__SHA__)
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 512 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash0, 64 );
@@ -50,7 +50,7 @@ void skeinhash_4way( void *state, const void *input )
intrlv_4x32( state, hash0, hash1, hash2, hash3, 256 );
#else
mm256_rintrlv_4x64_4x32( vhash32, vhash64, 512 );
rintrlv_4x64_4x32( vhash32, vhash64, 512 );
sha256_4way_init( &ctx_sha256 );
sha256_4way( &ctx_sha256, vhash32, 64 );
@@ -62,7 +62,7 @@ int scanhash_skein_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t hash[16*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *pdata = work->data;
@@ -73,7 +73,7 @@ int scanhash_skein_4way( struct work *work, uint32_t max_nonce,
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(

View File

@@ -279,10 +279,7 @@ do { \
_mm256_xor_si256( k2, k3 ) ), \
_mm256_xor_si256( _mm256_xor_si256( k4, k5 ), \
_mm256_xor_si256( k6, k7 ) ) ), \
_mm256_set_epi64x( SPH_C64(0x1BD11BDAA9FC1A22), \
SPH_C64(0x1BD11BDAA9FC1A22), \
SPH_C64(0x1BD11BDAA9FC1A22), \
SPH_C64(0x1BD11BDAA9FC1A22) ) ); \
m256_const1_64( 0x1BD11BDAA9FC1A22) ); \
t2 = t0 ^ t1; \
} while (0)
@@ -294,13 +291,11 @@ do { \
w3 = _mm256_add_epi64( w3, SKBI(k,s,3) ); \
w4 = _mm256_add_epi64( w4, SKBI(k,s,4) ); \
w5 = _mm256_add_epi64( w5, _mm256_add_epi64( SKBI(k,s,5), \
_mm256_set_epi64x( SKBT(t,s,0), SKBT(t,s,0), \
SKBT(t,s,0), SKBT(t,s,0) ) ) ); \
m256_const1_64( SKBT(t,s,0) ) ) ); \
w6 = _mm256_add_epi64( w6, _mm256_add_epi64( SKBI(k,s,6), \
_mm256_set_epi64x( SKBT(t,s,1), SKBT(t,s,1), \
SKBT(t,s,1), SKBT(t,s,1) ) ) ); \
m256_const1_64( SKBT(t,s,1) ) ) ); \
w7 = _mm256_add_epi64( w7, _mm256_add_epi64( SKBI(k,s,7), \
_mm256_set_epi64x( s, s, s, s ) ) ); \
m256_const1_64( s ) ) ); \
} while (0)

View File

@@ -8,7 +8,7 @@
void skein2hash_4way( void *output, const void *input )
{
skein512_4way_context ctx;
uint64_t hash[8*4] __attribute__ ((aligned (64)));
uint64_t hash[16*4] __attribute__ ((aligned (64)));
skein512_4way_init( &ctx );
skein512_4way( &ctx, input, 80 );
@@ -22,8 +22,7 @@ void skein2hash_4way( void *output, const void *input )
int scanhash_skein2_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*4] __attribute__ ((aligned (64)));
uint32_t edata[20] __attribute__ ((aligned (64)));
uint32_t hash[16*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[25]);
@@ -32,32 +31,21 @@ int scanhash_skein2_4way( struct work *work, uint32_t max_nonce,
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
// __m256i *noncev = (__m256i*)vdata + 9; // aligned
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t *noncep = vdata + 73; // 9*8 + 1
swab32_array( edata, pdata, 20 );
mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 );
// mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
{
be32enc( noncep, n );
be32enc( noncep+2, n+1 );
be32enc( noncep+4, n+2 );
be32enc( noncep+6, n+3 );
// *noncev = mm256_intrlv_blend_32( mm256_bswap_32(
// _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
skein2hash_4way( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( hash7[ lane<<1 ] <= Htarg )
{
mm256_extr_lane_4x64( lane_hash, hash, lane, 256 );
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;

View File

@@ -69,7 +69,7 @@ void c11_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 3 Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -81,7 +81,7 @@ void c11_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// 4way
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// 4 JH
jh512_4way( &ctx.jh, vhash, 64 );
@@ -96,16 +96,16 @@ void c11_4way_hash( void *state, const void *input )
skein512_4way_close( &ctx.skein, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 7 Luffa
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
mm256_intrlv_2x128( vhashB, hash2, hash3, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhashB, hash2, hash3, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhashB, 512 );
// 8 Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -133,13 +133,13 @@ void c11_4way_hash( void *state, const void *input )
sph_shavite512_close( &ctx.shavite, hash3 );
// 10 Simd
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
mm256_intrlv_2x128( vhashB, hash2, hash3, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhashB, hash2, hash3, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhashB, 512 );
// 11 Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -165,7 +165,6 @@ int scanhash_c11_4way( struct work *work, uint32_t max_nonce,
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
@@ -178,7 +177,7 @@ int scanhash_c11_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for (int m=0; m < 6; m++)
if (Htarg <= htmax[m])

View File

@@ -87,19 +87,16 @@ void timetravel_4way_hash(void *output, const void *input)
blake512_4way( &ctx.blake, vhashA, dataLen );
blake512_4way_close( &ctx.blake, vhashB );
if ( i == 7 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 1:
bmw512_4way( &ctx.bmw, vhashA, dataLen );
bmw512_4way_close( &ctx.bmw, vhashB );
if ( i == 7 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 2:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(char*)hash0, dataLen<<3 );
reinit_groestl( &ctx.groestl );
@@ -112,47 +109,40 @@ void timetravel_4way_hash(void *output, const void *input)
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(char*)hash3, dataLen<<3 );
if ( i != 7 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
case 3:
skein512_4way( &ctx.skein, vhashA, dataLen );
skein512_4way_close( &ctx.skein, vhashB );
if ( i == 7 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 4:
jh512_4way( &ctx.jh, vhashA, dataLen );
jh512_4way_close( &ctx.jh, vhashB );
if ( i == 7 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 5:
keccak512_4way( &ctx.keccak, vhashA, dataLen );
keccak512_4way_close( &ctx.keccak, vhashB );
if ( i == 7 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 6:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
mm256_intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
mm256_intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 );
dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
mm256_dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 );
dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 );
if ( i != 7 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
case 7:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash0,
(const byte*)hash0, dataLen );
memcpy( &ctx.cube, &tt8_4way_ctx.cube, sizeof(cubehashParam) );
@@ -165,8 +155,7 @@ void timetravel_4way_hash(void *output, const void *input)
cubehashUpdateDigest( &ctx.cube, (byte*)hash3,
(const byte*)hash3, dataLen );
if ( i != 7 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
default:
applog(LOG_ERR,"SWERR: timetravel invalid permutation");
@@ -215,7 +204,7 @@ int scanhash_timetravel_4way( struct work *work, uint32_t max_nonce,
}
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
do
{

View File

@@ -93,19 +93,16 @@ void timetravel10_4way_hash(void *output, const void *input)
blake512_4way( &ctx.blake, vhashA, dataLen );
blake512_4way_close( &ctx.blake, vhashB );
if ( i == 9 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 1:
bmw512_4way( &ctx.bmw, vhashA, dataLen );
bmw512_4way_close( &ctx.bmw, vhashB );
if ( i == 9 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 2:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(char*)hash0, dataLen<<3 );
reinit_groestl( &ctx.groestl );
@@ -118,46 +115,40 @@ void timetravel10_4way_hash(void *output, const void *input)
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(char*)hash3, dataLen<<3 );
if ( i != 9 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
case 3:
skein512_4way( &ctx.skein, vhashA, dataLen );
skein512_4way_close( &ctx.skein, vhashB );
if ( i == 9 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 4:
jh512_4way( &ctx.jh, vhashA, dataLen );
jh512_4way_close( &ctx.jh, vhashB );
if ( i == 9 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 5:
keccak512_4way( &ctx.keccak, vhashA, dataLen );
keccak512_4way_close( &ctx.keccak, vhashB );
if ( i == 9 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashB, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 );
break;
case 6:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
mm256_intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
mm256_intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 );
dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
mm256_dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 );
dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 );
if ( i != 9 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
case 7:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash0,
(const byte*)hash0, dataLen );
@@ -171,12 +162,10 @@ void timetravel10_4way_hash(void *output, const void *input)
cubehashUpdateDigest( &ctx.cube, (byte*)hash3,
(const byte*)hash3, dataLen );
if ( i != 9 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
case 8:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
sph_shavite512( &ctx.shavite, hash0, dataLen );
sph_shavite512_close( &ctx.shavite, hash0 );
memcpy( &ctx.shavite, &tt10_4way_ctx.shavite, sizeof ctx.shavite );
@@ -189,22 +178,19 @@ void timetravel10_4way_hash(void *output, const void *input)
sph_shavite512( &ctx.shavite, hash3, dataLen );
sph_shavite512_close( &ctx.shavite, hash3 );
if ( i != 9 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
case 9:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhashA, dataLen<<3 );
mm256_intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 );
intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 );
simd_2way_update_close( &ctx.simd, vhashA, vhashA, dataLen<<3 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
mm256_intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 );
dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashA, vhashA, dataLen<<3 );
mm256_dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 );
dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 );
if ( i != 9 )
mm256_intrlv_4x64( vhashB,
hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 );
break;
default:
applog(LOG_ERR,"SWERR: timetravel invalid permutation");
@@ -253,7 +239,7 @@ int scanhash_timetravel10_4way( struct work *work,
}
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
do
{

View File

@@ -37,7 +37,7 @@ void tribus_hash_4way(void *state, const void *input)
keccak512_4way( &ctx_keccak, vhash, 64 );
keccak512_4way_close( &ctx_keccak, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// hash echo serially
init_echo( &ctx_echo, 512 );
@@ -86,7 +86,7 @@ int scanhash_tribus_4way( struct work *work, uint32_t max_nonce,
0xFFFF0000,
0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
// precalc midstate
// doing it one way then then interleaving would be faster but too

View File

@@ -69,7 +69,7 @@ void x11_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 3 Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -81,7 +81,7 @@ void x11_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// 4way
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// 4 Skein
skein512_4way( &ctx.skein, vhash, 64 );
@@ -95,16 +95,16 @@ void x11_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 7 Luffa parallel 2 way 128 bit
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
mm256_intrlv_2x128( vhashB, hash2, hash3, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhashB, hash2, hash3, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhashB, 512 );
// 8 Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -132,13 +132,13 @@ void x11_4way_hash( void *state, const void *input )
sph_shavite512_close( &ctx.shavite, hash3 );
// 10 Simd
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
mm256_intrlv_2x128( vhashB, hash2, hash3, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhashB, hash2, hash3, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhashB, 512 );
// 11 Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -164,7 +164,6 @@ int scanhash_x11_4way( struct work *work, uint32_t max_nonce,
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
@@ -177,7 +176,7 @@ int scanhash_x11_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for (int m=0; m < 6; m++)
if (Htarg <= htmax[m])

View File

@@ -87,19 +87,16 @@ void x11evo_4way_hash( void *state, const void *input )
case 0:
blake512_4way( &ctx.blake, input, 80 );
blake512_4way_close( &ctx.blake, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
break;
case 1:
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
if ( i >= len-1 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
break;
case 2:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(char*)hash0, 512 );
reinit_groestl( &ctx.groestl );
@@ -112,47 +109,40 @@ void x11evo_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(char*)hash3, 512 );
if ( i < len-1 )
mm256_intrlv_4x64( vhash,
hash0, hash1, hash2, hash3, 64<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 );
break;
case 3:
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
if ( i >= len-1 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
break;
case 4:
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
if ( i >= len-1 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
break;
case 5:
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
if ( i >= len-1 )
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
break;
case 6:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
mm256_intrlv_2x128( vhash, hash0, hash1, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
intrlv_2x128( vhash, hash0, hash1, 64<<3 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 64<<3 );
mm256_intrlv_2x128( vhash, hash2, hash3, 64<<3 );
dintrlv_2x128( hash0, hash1, vhash, 64<<3 );
intrlv_2x128( vhash, hash2, hash3, 64<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 64<<3 );
dintrlv_2x128( hash2, hash3, vhash, 64<<3 );
if ( i < len-1 )
mm256_intrlv_4x64( vhash,
hash0, hash1, hash2, hash3, 64<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 );
break;
case 7:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash0,
(const byte*) hash0, 64 );
memcpy( &ctx.cube, &x11evo_4way_ctx.cube, sizeof(cubehashParam) );
@@ -165,12 +155,10 @@ void x11evo_4way_hash( void *state, const void *input )
cubehashUpdateDigest( &ctx.cube, (byte*)hash3,
(const byte*) hash3, 64 );
if ( i < len-1 )
mm256_intrlv_4x64( vhash,
hash0, hash1, hash2, hash3, 64<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 );
break;
case 8:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
sph_shavite512( &ctx.shavite, hash0, 64 );
sph_shavite512_close( &ctx.shavite, hash0 );
memcpy( &ctx.shavite, &x11evo_4way_ctx.shavite,
@@ -186,26 +174,22 @@ void x11evo_4way_hash( void *state, const void *input )
sph_shavite512( &ctx.shavite, hash3, 64 );
sph_shavite512_close( &ctx.shavite, hash3 );
if ( i < len-1 )
mm256_intrlv_4x64( vhash,
hash0, hash1, hash2, hash3, 64<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 );
break;
case 9:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
mm256_intrlv_2x128( vhash, hash0, hash1, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
intrlv_2x128( vhash, hash0, hash1, 64<<3 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 64<<3 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 64<<3 );
mm256_intrlv_2x128( vhash, hash2, hash3, 64<<3 );
dintrlv_2x128( hash0, hash1, vhash, 64<<3 );
intrlv_2x128( vhash, hash2, hash3, 64<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 64<<3 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 64<<3 );
dintrlv_2x128( hash2, hash3, vhash, 64<<3 );
if ( i < len-1 )
mm256_intrlv_4x64( vhash,
hash0, hash1, hash2, hash3, 64<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 );
break;
case 10:
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3,
vhash, 64<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
(const BitSequence *) hash0, 512 );
memcpy( &ctx.echo, &x11evo_4way_ctx.echo, sizeof(hashState_echo) );
@@ -218,8 +202,7 @@ void x11evo_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
if ( i < len-1 )
mm256_intrlv_4x64( vhash,
hash0, hash1, hash2, hash3, 64<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 );
break;
}
}
@@ -269,7 +252,7 @@ int scanhash_x11evo_4way( struct work* work, uint32_t max_nonce,
}
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
do
{

View File

@@ -70,7 +70,7 @@ void x11gost_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
memcpy( &ctx.groestl, &x11gost_4way_ctx.groestl,
@@ -84,7 +84,7 @@ void x11gost_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// 4way
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
@@ -96,7 +96,7 @@ void x11gost_4way_hash( void *state, const void *input )
keccak512_4way_close( &ctx.keccak, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
sph_gost512( &ctx.gost, hash0, 64 );
sph_gost512_close( &ctx.gost, hash0 );
@@ -110,13 +110,13 @@ void x11gost_4way_hash( void *state, const void *input )
sph_gost512( &ctx.gost, hash3, 64 );
sph_gost512_close( &ctx.gost, hash3 );
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
memcpy( &ctx.cube, &x11gost_4way_ctx.cube, sizeof(cubehashParam) );
@@ -141,12 +141,12 @@ void x11gost_4way_hash( void *state, const void *input )
sph_shavite512( &ctx.shavite, hash3, 64 );
sph_shavite512_close( &ctx.shavite, hash3 );
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
(const BitSequence *) hash0, 512 );
@@ -183,7 +183,7 @@ int scanhash_x11gost_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for (int m=0; m < 6; m++)
if (Htarg <= htmax[m])

View File

@@ -33,7 +33,6 @@ typedef struct {
simd_2way_context simd;
hashState_echo echo;
hamsi512_4way_context hamsi;
// sph_fugue512_context fugue;
} x12_4way_ctx_holder;
x12_4way_ctx_holder x12_4way_ctx __attribute__ ((aligned (64)));
@@ -52,7 +51,6 @@ void init_x12_4way_ctx()
simd_2way_init( &x12_4way_ctx.simd, 512 );
init_echo( &x12_4way_ctx.echo, 512 );
hamsi512_4way_init( &x12_4way_ctx.hamsi );
// sph_fugue512_init( &x12_4way_ctx.fugue );
};
void x12_4way_hash( void *state, const void *input )
@@ -74,7 +72,7 @@ void x12_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 3 Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -86,7 +84,7 @@ void x12_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// Parallel 4way 64 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// 4 Skein
skein512_4way( &ctx.skein, vhash, 64 );
@@ -101,16 +99,16 @@ void x12_4way_hash( void *state, const void *input )
keccak512_4way_close( &ctx.keccak, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 7 Luffa
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
intrlv_2x128( hash2, hash3, vhash, 512 );
// 8 Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -138,13 +136,13 @@ void x12_4way_hash( void *state, const void *input )
sph_shavite512_close( &ctx.shavite, hash3 );
// 10 Simd
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 11 Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -160,11 +158,11 @@ void x12_4way_hash( void *state, const void *input )
(const BitSequence *) hash3, 512 );
// 12 Hamsi parallel 4way 32 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 );
dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 );
}
int scanhash_x12_4way( struct work *work, uint32_t max_nonce,
@@ -189,7 +187,7 @@ int scanhash_x12_4way( struct work *work, uint32_t max_nonce,
swab32_array( endiandata, pdata, 20 );
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
for ( int m=0; m < 6; m++ )
if ( Htarg <= htmax[m] )

View File

@@ -53,7 +53,7 @@ void phi1612_4way_hash( void *state, const void *input )
jh512_4way_close( &ctx.jh, vhash );
// Serial to the end
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -124,7 +124,7 @@ int scanhash_phi1612_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0cff;
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(

View File

@@ -33,7 +33,7 @@ void skunk_4way_hash( void *output, const void *input )
skein512_4way( &ctx.skein, input, 80 );
skein512_4way_close( &ctx.skein, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*)hash0, 64 );
memcpy( &ctx.cube, &skunk_4way_ctx.cube, sizeof(cubehashParam) );
@@ -90,7 +90,7 @@ int scanhash_skunk_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
((uint32_t*)ptarget)[7] = 0x0cff;
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(

View File

@@ -74,7 +74,7 @@ void x13_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 3 Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -86,7 +86,7 @@ void x13_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// Parallel 4way 64 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// 4 Skein
skein512_4way( &ctx.skein, vhash, 64 );
@@ -101,16 +101,16 @@ void x13_4way_hash( void *state, const void *input )
keccak512_4way_close( &ctx.keccak, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 7 Luffa
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 8 Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -138,13 +138,13 @@ void x13_4way_hash( void *state, const void *input )
sph_shavite512_close( &ctx.shavite, hash3 );
// 10 Simd
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 11 Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -160,10 +160,10 @@ void x13_4way_hash( void *state, const void *input )
(const BitSequence *) hash3, 512 );
// 12 Hamsi parallel 4way 32 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 13 Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -201,7 +201,7 @@ int scanhash_x13_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m=0; m < 6; m++ )
if ( Htarg <= htmax[m] )

283
algo/x13/x13bcd-4way.c Normal file
View File

@@ -0,0 +1,283 @@
#include "x13sm3-gate.h"
#if defined(X13SM3_4WAY)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
//#include "algo/luffa/luffa-hash-2way.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/simd/simd-hash-2way.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/sm3/sm3-hash-4way.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
typedef struct {
blake512_4way_context blake;
bmw512_4way_context bmw;
hashState_groestl groestl;
skein512_4way_context skein;
jh512_4way_context jh;
keccak512_4way_context keccak;
// luffa_2way_context luffa;
cubehashParam cube;
sph_shavite512_context shavite;
simd_2way_context simd;
hashState_echo echo;
sm3_4way_ctx_t sm3;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
} x13bcd_4way_ctx_holder;
x13bcd_4way_ctx_holder x13bcd_4way_ctx __attribute__ ((aligned (64)));
static __thread blake512_4way_context x13bcd_ctx_mid;
void init_x13bcd_4way_ctx()
{
blake512_4way_init( &x13bcd_4way_ctx.blake );
bmw512_4way_init( &x13bcd_4way_ctx.bmw );
init_groestl( &x13bcd_4way_ctx.groestl, 64 );
skein512_4way_init( &x13bcd_4way_ctx.skein );
jh512_4way_init( &x13bcd_4way_ctx.jh );
keccak512_4way_init( &x13bcd_4way_ctx.keccak );
// luffa_2way_init( &x13bcd_4way_ctx.luffa, 512 );
cubehashInit( &x13bcd_4way_ctx.cube, 512, 16, 32 );
sph_shavite512_init( &x13bcd_4way_ctx.shavite );
simd_2way_init( &x13bcd_4way_ctx.simd, 512 );
init_echo( &x13bcd_4way_ctx.echo, 512 );
sm3_4way_init( &x13bcd_4way_ctx.sm3 );
hamsi512_4way_init( &x13bcd_4way_ctx.hamsi );
sph_fugue512_init( &x13bcd_4way_ctx.fugue );
};
void x13bcd_4way_hash( void *state, const void *input )
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t vhash[8*4] __attribute__ ((aligned (64)));
x13bcd_4way_ctx_holder ctx;
memcpy( &ctx, &x13bcd_4way_ctx, sizeof(x13bcd_4way_ctx) );
// Blake
memcpy( &ctx.blake, &x13bcd_ctx_mid, sizeof(x13bcd_ctx_mid) );
blake512_4way( &ctx.blake, input + (64<<2), 16 );
// blake512_4way( &ctx.blake, input, 80 );
blake512_4way_close( &ctx.blake, vhash );
// Bmw
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// Parallel 4way
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// Skein
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
// JH
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
// Keccak
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
// SM3 parallel 32 bit
uint32_t sm3_vhash[32*4] __attribute__ ((aligned (64)));
memset( sm3_vhash, 0, sizeof sm3_vhash );
uint32_t sm3_hash0[32] __attribute__ ((aligned (32)));
memset( sm3_hash0, 0, sizeof sm3_hash0 );
uint32_t sm3_hash1[32] __attribute__ ((aligned (32)));
memset( sm3_hash1, 0, sizeof sm3_hash1 );
uint32_t sm3_hash2[32] __attribute__ ((aligned (32)));
memset( sm3_hash2, 0, sizeof sm3_hash2 );
uint32_t sm3_hash3[32] __attribute__ ((aligned (32)));
memset( sm3_hash3, 0, sizeof sm3_hash3 );
sm3_4way( &ctx.sm3, vhash, 64 );
sm3_4way_close( &ctx.sm3, sm3_vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, sm3_vhash, 512 );
/*
// Luffa
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
*/
// Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
memcpy( &ctx.cube, &x13bcd_4way_ctx.cube, sizeof(cubehashParam) );
cubehashUpdateDigest( &ctx.cube, (byte*)hash1, (const byte*) hash1, 64 );
memcpy( &ctx.cube, &x13bcd_4way_ctx.cube, sizeof(cubehashParam) );
cubehashUpdateDigest( &ctx.cube, (byte*)hash2, (const byte*) hash2, 64 );
memcpy( &ctx.cube, &x13bcd_4way_ctx.cube, sizeof(cubehashParam) );
cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*) hash3, 64 );
// Shavite
sph_shavite512( &ctx.shavite, hash0, 64 );
sph_shavite512_close( &ctx.shavite, hash0 );
memcpy( &ctx.shavite, &x13bcd_4way_ctx.shavite,
sizeof(sph_shavite512_context) );
sph_shavite512( &ctx.shavite, hash1, 64 );
sph_shavite512_close( &ctx.shavite, hash1 );
memcpy( &ctx.shavite, &x13bcd_4way_ctx.shavite,
sizeof(sph_shavite512_context) );
sph_shavite512( &ctx.shavite, hash2, 64 );
sph_shavite512_close( &ctx.shavite, hash2 );
memcpy( &ctx.shavite, &x13bcd_4way_ctx.shavite,
sizeof(sph_shavite512_context) );
sph_shavite512( &ctx.shavite, hash3, 64 );
sph_shavite512_close( &ctx.shavite, hash3 );
// Simd
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
(const BitSequence *) hash0, 512 );
memcpy( &ctx.echo, &x13bcd_4way_ctx.echo, sizeof(hashState_echo) );
update_final_echo( &ctx.echo, (BitSequence *)hash1,
(const BitSequence *) hash1, 512 );
memcpy( &ctx.echo, &x13bcd_4way_ctx.echo, sizeof(hashState_echo) );
update_final_echo( &ctx.echo, (BitSequence *)hash2,
(const BitSequence *) hash2, 512 );
memcpy( &ctx.echo, &x13bcd_4way_ctx.echo, sizeof(hashState_echo) );
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
/*
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
// SM3 parallel 32 bit
uint32_t sm3_vhash[32*4] __attribute__ ((aligned (64)));
memset( sm3_vhash, 0, sizeof sm3_vhash );
uint32_t sm3_hash0[32] __attribute__ ((aligned (32)));
memset( sm3_hash0, 0, sizeof sm3_hash0 );
uint32_t sm3_hash1[32] __attribute__ ((aligned (32)));
memset( sm3_hash1, 0, sizeof sm3_hash1 );
uint32_t sm3_hash2[32] __attribute__ ((aligned (32)));
memset( sm3_hash2, 0, sizeof sm3_hash2 );
uint32_t sm3_hash3[32] __attribute__ ((aligned (32)));
memset( sm3_hash3, 0, sizeof sm3_hash3 );
sm3_4way( &ctx.sm3, vhash, 64 );
sm3_4way_close( &ctx.sm3, sm3_vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, sm3_vhash, 512 );
*/
// Hamsi parallel 4x32x2
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
memcpy( state, hash0, 32 );
memcpy( state+32, hash1, 32 );
memcpy( state+64, hash2, 32 );
memcpy( state+96, hash3, 32 );
}
int scanhash_x13bcd_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t Htarg = ptarget[7];
uint64_t htmax[] = { 0, 0xF, 0xFF,
0xFFF, 0xFFFF, 0x10000000 };
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap32_intrlv80_4x64( vdata, pdata );
blake512_4way_init( &x13bcd_ctx_mid );
blake512_4way( &x13bcd_ctx_mid, vdata, 64 );
for ( int m=0; m < 6; m++ )
if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x13bcd_4way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 4; i++ )
if ( ( ( (hash+(i<<3))[7] & mask ) == 0 ) )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

258
algo/x13/x13bcd.c Normal file
View File

@@ -0,0 +1,258 @@
#include "x13sm3-gate.h"
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "algo/groestl/sph_groestl.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/luffa/sph_luffa.h"
#include "algo/cubehash/sph_cubehash.h"
#include "algo/simd/sph_simd.h"
#include "algo/echo/sph_echo.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/sm3/sph_sm3.h"
//#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/nist.h"
#include "algo/blake/sse2/blake.c"
#include "algo/bmw/sse2/bmw.c"
#include "algo/keccak/sse2/keccak.c"
#include "algo/skein/sse2/skein.c"
#include "algo/jh/sse2/jh_sse2_opt64.h"
#ifndef NO_AES_NI
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/echo/aes_ni/hash_api.h"
#endif
typedef struct {
#ifdef NO_AES_NI
sph_groestl512_context groestl;
sph_echo512_context echo;
#else
hashState_echo echo;
hashState_groestl groestl;
#endif
// hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
hashState_sd simd;
sm3_ctx_t sm3;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
} x13bcd_ctx_holder;
x13bcd_ctx_holder x13bcd_ctx;
void init_x13bcd_ctx()
{
#ifdef NO_AES_NI
sph_groestl512_init(&x13bcd_ctx.groestl);
sph_echo512_init(&x13bcd_ctx.echo);
#else
init_echo(&x13bcd_ctx.echo, 512);
init_groestl(&x13bcd_ctx.groestl, 64 );
#endif
// init_luffa(&x13bcd_ctx.luffa,512);
cubehashInit(&x13bcd_ctx.cube,512,16,32);
sph_shavite512_init(&x13bcd_ctx.shavite);
init_sd(&x13bcd_ctx.simd,512);
sm3_init( &x13bcd_ctx.sm3 );
sph_hamsi512_init(&x13bcd_ctx.hamsi);
sph_fugue512_init(&x13bcd_ctx.fugue);
};
void x13bcd_hash(void *output, const void *input)
{
unsigned char hash[128] __attribute__ ((aligned (32)));
x13bcd_ctx_holder ctx;
memcpy(&ctx, &x13bcd_ctx, sizeof(x13bcd_ctx));
unsigned char hashbuf[128];
size_t hashptr;
sph_u64 hashctA;
sph_u64 hashctB;
//---blake1---
DECL_BLK;
BLK_I;
BLK_W;
BLK_C;
//---bmw2---
DECL_BMW;
BMW_I;
BMW_U;
#define M(x) sph_dec64le_aligned(data + 8 * (x))
#define H(x) (h[x])
#define dH(x) (dh[x])
BMW_C;
#undef M
#undef H
#undef dH
//---groestl----
#ifdef NO_AES_NI
sph_groestl512 (&ctx.groestl, hash, 64);
sph_groestl512_close(&ctx.groestl, hash);
#else
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#endif
//---skein4---
DECL_SKN;
SKN_I;
SKN_U;
SKN_C;
//---jh5------
DECL_JH;
JH_H;
//---keccak6---
DECL_KEC;
KEC_I;
KEC_U;
KEC_C;
uint32_t sm3_hash[32] __attribute__ ((aligned (32)));
memset(sm3_hash, 0, sizeof sm3_hash);
sph_sm3(&ctx.sm3, hash, 64);
sph_sm3_close(&ctx.sm3, sm3_hash);
cubehashUpdateDigest( &ctx.cube, (byte*) hash,
(const byte*)sm3_hash, 64 );
/*
//--- luffa7
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
// 8 Cube
cubehashUpdateDigest( &ctx.cube, (byte*) hash,
(const byte*)hash, 64 );
*/
// 9 Shavite
sph_shavite512( &ctx.shavite, hash, 64);
sph_shavite512_close( &ctx.shavite, hash);
// 10 Simd
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
//11---echo---
#ifdef NO_AES_NI
sph_echo512(&ctx.echo, hash, 64);
sph_echo512_close(&ctx.echo, hash);
#else
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence *)hash, 512 );
#endif
/*
uint32_t sm3_hash[32] __attribute__ ((aligned (32)));
memset(sm3_hash, 0, sizeof sm3_hash);
sph_sm3(&ctx.sm3, hash, 64);
sph_sm3_close(&ctx.sm3, sm3_hash);
sph_hamsi512(&ctx.hamsi, sm3_hash, 64);
*/
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
asm volatile ("emms");
memcpy(output, hash, 32);
}
int scanhash_x13bcd( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t hash64[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t Htarg = ptarget[7];
uint64_t htmax[] = {
0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000
};
uint32_t masks[] = {
0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0
};
// we need bigendian data...
swab32_array( endiandata, pdata, 20 );
#ifdef DEBUG_ALGO
if (Htarg != 0)
printf("[%d] Htarg=%X\n", thr_id, Htarg);
#endif
for (int m=0; m < 6; m++) {
if (Htarg <= htmax[m]) {
uint32_t mask = masks[m];
do {
pdata[19] = ++n;
be32enc(&endiandata[19], n);
x13bcd_hash(hash64, endiandata);
#ifndef DEBUG_ALGO
if ((!(hash64[7] & mask)) && fulltest(hash64, ptarget)) {
*hashes_done = n - first_nonce + 1;
return true;
}
#else
if (!(n % 0x1000) && !thr_id) printf(".");
if (!(hash64[7] & mask)) {
printf("[%d]",thr_id);
if (fulltest(hash64, ptarget)) {
work_set_target_ratio( work, hash64 );
*hashes_done = n - first_nonce + 1;
return true;
}
}
#endif
} while (n < max_nonce && !work_restart[thr_id].restart);
// see blake.c if else to understand the loop on htmax => mask
break;
}
}
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}

View File

@@ -81,7 +81,7 @@ void x13sm3_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -93,7 +93,7 @@ void x13sm3_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// Parallel 4way
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// Skein
skein512_4way( &ctx.skein, vhash, 64 );
@@ -108,16 +108,16 @@ void x13sm3_4way_hash( void *state, const void *input )
keccak512_4way_close( &ctx.keccak, vhash );
// Serial to the end
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// Luffa
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -145,13 +145,13 @@ void x13sm3_4way_hash( void *state, const void *input )
sph_shavite512_close( &ctx.shavite, hash3 );
// Simd
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -185,10 +185,10 @@ void x13sm3_4way_hash( void *state, const void *input )
dintrlv_4x32( hash0, hash1, hash2, hash3, sm3_vhash, 512 );
// Hamsi parallel 4x32x2
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -226,7 +226,7 @@ int scanhash_x13sm3_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
blake512_4way_init( &x13sm3_ctx_mid );
blake512_4way( &x13sm3_ctx_mid, vdata, 64 );

View File

@@ -16,3 +16,19 @@ bool register_x13sm3_algo( algo_gate_t* gate )
return true;
};
bool register_x13bcd_algo( algo_gate_t* gate )
{
#if defined (X13SM3_4WAY)
init_x13bcd_4way_ctx();
gate->scanhash = (void*)&scanhash_x13bcd_4way;
gate->hash = (void*)&x13bcd_4way_hash;
#else
init_x13bcd_ctx();
gate->scanhash = (void*)&scanhash_x13bcd;
gate->hash = (void*)&x13bcd_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->get_max64 = (void*)&get_max64_0x3ffff;
return true;
};

View File

@@ -10,23 +10,31 @@
bool register_x13sm3_algo( algo_gate_t* gate );
bool register_x13bcd_algo( algo_gate_t* gate );
#if defined(X13SM3_4WAY)
void x13sm3_4way_hash( void *state, const void *input );
int scanhash_x13sm3_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_x13sm3_4way_ctx();
void x13bcd_4way_hash( void *state, const void *input );
int scanhash_x13bcd_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_x13bcd_4way_ctx();
#endif
void x13sm3_hash( void *state, const void *input );
int scanhash_x13sm3( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_x13sm3_ctx();
void x13bcd_hash( void *state, const void *input );
int scanhash_x13bcd( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_x13bcd_ctx();
#endif

View File

@@ -39,7 +39,7 @@ void polytimos_4way_hash( void *output, const void *input )
// Need to convert from 64 bit interleaved to 32 bit interleaved.
uint32_t vhash32[16*4];
mm256_rintrlv_4x64_4x32( vhash32, vhash, 512 );
rintrlv_4x64_4x32( vhash32, vhash, 512 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash32, 64 );
shabal512_4way_close( &ctx.shabal, vhash32 );
@@ -58,15 +58,15 @@ void polytimos_4way_hash( void *output, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -117,7 +117,7 @@ int scanhash_polytimos_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
ptarget[7] = 0x0cff;
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );

View File

@@ -40,7 +40,7 @@ void veltor_4way_hash( void *output, const void *input )
skein512_4way( &ctx.skein, input, 80 );
skein512_4way_close( &ctx.skein, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
sph_shavite512( &ctx.shavite, hash0, 64 );
sph_shavite512_close( &ctx.shavite, hash0 );
@@ -94,7 +94,7 @@ int scanhash_veltor_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
ptarget[7] = 0x0cff;
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
{

View File

@@ -78,7 +78,7 @@ void x14_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 3 Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -90,7 +90,7 @@ void x14_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// Parallel 4way
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// 4 Skein
skein512_4way( &ctx.skein, vhash, 64 );
@@ -105,16 +105,16 @@ void x14_4way_hash( void *state, const void *input )
keccak512_4way_close( &ctx.keccak, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 7 Luffa
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 8 Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -142,13 +142,13 @@ void x14_4way_hash( void *state, const void *input )
sph_shavite512_close( &ctx.shavite, hash3 );
// 10 Simd
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 11 Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -164,10 +164,10 @@ void x14_4way_hash( void *state, const void *input )
(const BitSequence *) hash3, 512 );
// 12 Hamsi parallel 4way 32 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 13 Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -205,7 +205,7 @@ int scanhash_x14_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m=0; m < 6; m++ )
if ( Htarg <= htmax[m] )

View File

@@ -81,7 +81,7 @@ void x15_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 3 Groestl
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -93,7 +93,7 @@ void x15_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// Parallel 4way
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
// 4 Skein
skein512_4way( &ctx.skein, vhash, 64 );
@@ -108,16 +108,16 @@ void x15_4way_hash( void *state, const void *input )
keccak512_4way_close( &ctx.keccak, vhash );
// Serial to the end
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 7 Luffa
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 8 Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
@@ -145,13 +145,13 @@ void x15_4way_hash( void *state, const void *input )
sph_shavite512_close( &ctx.shavite, hash3 );
// 10 Simd
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
intrlv_2x128( vhash, hash0, hash1, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, hash2, hash3, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 11 Echo
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -167,10 +167,10 @@ void x15_4way_hash( void *state, const void *input )
(const BitSequence *) hash3, 512 );
// 12 Hamsi parallel 4way 32 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 13 Fugue
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -230,7 +230,8 @@ int scanhash_x15_4way( struct work *work, uint32_t max_nonce,
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m=0; m < 6; m++ )
if ( Htarg <= htmax[m] )

View File

@@ -67,7 +67,7 @@ void x16r_4way_hash( void* output, const void* input )
void *in3 = (void*) hash3;
int size = 80;
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, input, 640 );
dintrlv_4x64( hash0, hash1, hash2, hash3, input, 640 );
if ( s_ntime == UINT32_MAX )
{
@@ -96,11 +96,11 @@ void x16r_4way_hash( void* output, const void* input )
blake512_4way( &ctx.blake, input, size );
else
{
mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
blake512_4way( &ctx.blake, vhash, size );
}
blake512_4way_close( &ctx.blake, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case BMW:
bmw512_4way_init( &ctx.bmw );
@@ -108,11 +108,11 @@ void x16r_4way_hash( void* output, const void* input )
bmw512_4way( &ctx.bmw, input, size );
else
{
mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
bmw512_4way( &ctx.bmw, vhash, size );
}
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case GROESTL:
init_groestl( &ctx.groestl, 64 );
@@ -134,11 +134,11 @@ void x16r_4way_hash( void* output, const void* input )
skein512_4way( &ctx.skein, input, size );
else
{
mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
skein512_4way( &ctx.skein, vhash, size );
}
skein512_4way_close( &ctx.skein, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case JH:
jh512_4way_init( &ctx.jh );
@@ -146,11 +146,11 @@ void x16r_4way_hash( void* output, const void* input )
jh512_4way( &ctx.jh, input, size );
else
{
mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
jh512_4way( &ctx.jh, vhash, size );
}
jh512_4way_close( &ctx.jh, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case KECCAK:
keccak512_4way_init( &ctx.keccak );
@@ -158,21 +158,21 @@ void x16r_4way_hash( void* output, const void* input )
keccak512_4way( &ctx.keccak, input, size );
else
{
mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
keccak512_4way( &ctx.keccak, vhash, size );
}
keccak512_4way_close( &ctx.keccak, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case LUFFA:
mm256_intrlv_2x128( vhash, in0, in1, size<<3 );
intrlv_2x128( vhash, in0, in1, size<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, size );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, in2, in3, size<<3 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, in2, in3, size<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, size);
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
break;
case CUBEHASH:
cubehashInit( &ctx.cube, 512, 16, 32 );
@@ -203,14 +203,14 @@ void x16r_4way_hash( void* output, const void* input )
sph_shavite512_close( &ctx.shavite, hash3 );
break;
case SIMD:
mm256_intrlv_2x128( vhash, in0, in1, size<<3 );
intrlv_2x128( vhash, in0, in1, size<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 );
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
mm256_intrlv_2x128( vhash, in2, in3, size<<3 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, in2, in3, size<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 );
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
break;
case ECHO:
init_echo( &ctx.echo, 512 );
@@ -227,11 +227,11 @@ void x16r_4way_hash( void* output, const void* input )
(const BitSequence*)in3, size<<3 );
break;
case HAMSI:
mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, size );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
@@ -269,11 +269,11 @@ void x16r_4way_hash( void* output, const void* input )
sph_whirlpool_close( &ctx.whirlpool, hash3 );
break;
case SHA_512:
mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, size );
sha512_4way_close( &ctx.sha512, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
}
size = 64;
@@ -316,7 +316,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
ptarget[7] = 0x0cff;
uint64_t *edata = (uint64_t*)endiandata;
mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
do
{

View File

@@ -62,3 +62,149 @@ bool register_x16s_algo( algo_gate_t* gate )
return true;
};
////////////////
//
// X16RT
void x16rt_getTimeHash( const uint32_t timeStamp, void* timeHash )
{
int32_t maskedTime = timeStamp & 0xffffff80;
sha256d( (unsigned char*)timeHash, (const unsigned char*)( &maskedTime ),
sizeof( maskedTime ) );
}
void x16rt_getAlgoString( const uint32_t *timeHash, char *output)
{
char *sptr = output;
uint8_t* data = (uint8_t*)timeHash;
for (uint8_t j = 0; j < X16R_HASH_FUNC_COUNT; j++) {
uint8_t b = (15 - j) >> 1; // 16 ascii hex chars, reversed
uint8_t algoDigit = (j & 1) ? data[b] & 0xF : data[b] >> 4;
if (algoDigit >= 10)
sprintf(sptr, "%c", 'A' + (algoDigit - 10));
else
sprintf(sptr, "%u", (uint32_t) algoDigit);
sptr++;
}
*sptr = '\0';
}
void x16rt_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
uchar merkle_tree[64] = { 0 };
size_t t;
algo_gate.gen_merkle_root( merkle_tree, sctx );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
// algo_gate.build_block_header( g_work, le32dec( sctx->job.version ),
// (uint32_t*) sctx->job.prevhash, (uint32_t*) merkle_tree,
// le32dec( sctx->job.ntime ), le32dec(sctx->job.nbits) );
int i;
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );
if ( have_stratum )
for ( i = 0; i < 8; i++ )
g_work->data[ 1+i ] = le32dec( (uint32_t*)sctx->job.prevhash + i );
else
for (i = 0; i < 8; i++)
g_work->data[ 8-i ] = le32dec( (uint32_t*)sctx->job.prevhash + i );
g_work->data[ algo_gate.ntime_index ] = le32dec( sctx->job.ntime );
g_work->data[ algo_gate.nbits_index ] = le32dec( sctx->job.nbits );
g_work->data[20] = 0x80000000;
g_work->data[31] = 0x00000280;
for ( i = 0; i < 8; i++ )
g_work->merkleroothash[7 - i] = be32dec((uint32_t *)merkle_tree + i);
for ( i = 0; i < 8; i++ )
g_work->witmerkleroothash[7 - i] = be32dec((uint32_t *)merkle_tree + i);
for ( i = 0; i < 8; i++ )
g_work->denom10[i] = le32dec((uint32_t *)sctx->job.denom10 + i);
for ( i = 0; i < 8; i++ )
g_work->denom100[i] = le32dec((uint32_t *)sctx->job.denom100 + i);
for ( i = 0; i < 8; i++ )
g_work->denom1000[i] = le32dec((uint32_t *)sctx->job.denom1000 + i);
for ( i = 0; i < 8; i++ )
g_work->denom10000[i] = le32dec((uint32_t *)sctx->job.denom10000 + i);
uint32_t pofnhash[8];
memset(pofnhash, 0x00, 32);
char denom10_str [ 2 * sizeof( g_work->denom10 ) + 1 ];
char denom100_str [ 2 * sizeof( g_work->denom100 ) + 1 ];
char denom1000_str [ 2 * sizeof( g_work->denom1000 ) + 1 ];
char denom10000_str [ 2 * sizeof( g_work->denom10000 ) + 1 ];
char merkleroot_str [ 2 * sizeof( g_work->merkleroothash ) + 1 ];
char witmerkleroot_str[ 2 * sizeof( g_work->witmerkleroothash ) + 1 ];
char pofn_str [ 2 * sizeof( pofnhash ) + 1 ];
cbin2hex( denom10_str, (char*) g_work->denom10, 32 );
cbin2hex( denom100_str, (char*) g_work->denom100, 32 );
cbin2hex( denom1000_str, (char*) g_work->denom1000, 32 );
cbin2hex( denom10000_str, (char*) g_work->denom10000, 32 );
cbin2hex( merkleroot_str, (char*) g_work->merkleroothash, 32 );
cbin2hex( witmerkleroot_str, (char*) g_work->witmerkleroothash, 32 );
cbin2hex( pofn_str, (char*) pofnhash, 32 );
if ( true )
{
char* data;
data = (char*)malloc( 2 + strlen( denom10_str ) * 4 + 16 * 4
+ strlen( merkleroot_str ) * 3 );
// Build the block header veildatahash in hex
sprintf( data, "%s%s%s%s%s%s%s%s%s%s%s%s",
merkleroot_str, witmerkleroot_str, "04",
"0a00000000000000", denom10_str,
"6400000000000000", denom100_str,
"e803000000000000", denom1000_str,
"1027000000000000", denom10000_str, pofn_str );
// Covert the hex to binary
uint32_t test[100];
hex2bin( (unsigned char*)(&test), data, 257);
// Compute the sha256d of the binary
uint32_t _ALIGN(64) hash[8];
sha256d( (unsigned char*)hash, (unsigned char*)&(test), 257);
// assign the veildatahash in the blockheader
for ( i = 0; i < 8; i++ )
g_work->data[16 - i] = le32dec(hash + i);
free(data);
}
}
bool register_x16rt_algo( algo_gate_t* gate )
{
#if defined (X16R_4WAY)
gate->scanhash = (void*)&scanhash_x16rt_4way;
gate->hash = (void*)&x16rt_4way_hash;
#else
gate->scanhash = (void*)&scanhash_x16rt;
gate->hash = (void*)&x16rt_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
return true;
};
bool register_x16rt_veil_algo( algo_gate_t* gate )
{
#if defined (X16R_4WAY)
gate->scanhash = (void*)&scanhash_x16rt_4way;
gate->hash = (void*)&x16rt_4way_hash;
#else
gate->scanhash = (void*)&scanhash_x16rt;
gate->hash = (void*)&x16rt_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->set_target = (void*)&alt_set_target;
gate->build_extraheader = (void*)&x16rt_build_extraheader;
return true;
};

View File

@@ -4,6 +4,7 @@
#include "algo-gate-api.h"
#include "simd-utils.h"
#include <stdint.h>
#include <unistd.h>
#if defined(__AVX2__) && defined(__AES__)
#define X16R_4WAY
@@ -30,11 +31,15 @@ enum x16r_Algo {
};
void (*x16_r_s_getAlgoString) ( const uint8_t*, char* );
void x16r_getAlgoString( const uint8_t* prevblock, char *output );
void x16s_getAlgoString( const uint8_t* prevblock, char *output );
void x16r_getAlgoString( const uint8_t *prevblock, char *output );
void x16s_getAlgoString( const uint8_t *prevblock, char *output );
void x16rt_getAlgoString( const uint32_t *timeHash, char *output );
void x16rt_getTimeHash( const uint32_t timeStamp, void* timeHash );
bool register_x16r_algo( algo_gate_t* gate );
bool register_x16s_algo( algo_gate_t* gate );
bool register_x16rt_algo( algo_gate_t* gate );
#if defined(X16R_4WAY)
@@ -42,11 +47,18 @@ void x16r_4way_hash( void *state, const void *input );
int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x16rt_4way_hash( void *state, const void *input );
int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
void x16r_hash( void *state, const void *input );
int scanhash_x16r( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x16rt_hash( void *state, const void *input );
int scanhash_x16rt( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif

353
algo/x16/x16rt-4way.c Normal file
View File

@@ -0,0 +1,353 @@
#include "x16r-gate.h"
#if defined (X16R_4WAY)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/luffa/luffa-hash-2way.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/simd-hash-2way.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/shabal-hash-4way.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/sha/sha2-hash-4way.h"
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread bool s_implemented = false;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
union _x16rt_4way_context_overlay
{
blake512_4way_context blake;
bmw512_4way_context bmw;
hashState_echo echo;
hashState_groestl groestl;
skein512_4way_context skein;
jh512_4way_context jh;
keccak512_4way_context keccak;
luffa_2way_context luffa;
cubehashParam cube;
sph_shavite512_context shavite;
simd_2way_context simd;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
shabal512_4way_context shabal;
sph_whirlpool_context whirlpool;
sha512_4way_context sha512;
};
typedef union _x16rt_4way_context_overlay x16rt_4way_context_overlay;
void x16rt_4way_hash( void* output, const void* input )
{
uint32_t hash0[24] __attribute__ ((aligned (64)));
uint32_t hash1[24] __attribute__ ((aligned (64)));
uint32_t hash2[24] __attribute__ ((aligned (64)));
uint32_t hash3[24] __attribute__ ((aligned (64)));
uint32_t vhash[24*4] __attribute__ ((aligned (64)));
x16rt_4way_context_overlay ctx;
void *in0 = (void*) hash0;
void *in1 = (void*) hash1;
void *in2 = (void*) hash2;
void *in3 = (void*) hash3;
int size = 80;
dintrlv_4x64( hash0, hash1, hash2, hash3, input, 640 );
/*
void *in = (void*) input;
uint32_t *in32 = (uint32_t*) hash0;
uint32_t ntime = in32[17];
if ( s_ntime == UINT32_MAX )
{
uint32_t _ALIGN(64) timeHash[8];
x16rt_getTimeHash(ntime, &timeHash);
x16rt_getAlgoString(&timeHash[0], hashOrder);
}
*/
// Input data is both 64 bit interleaved (input)
// and deinterleaved in inp0-3.
// If First function uses 64 bit data it is not required to interleave inp
// first. It may use the inerleaved data dmost convenient, ie 4way 64 bit.
// All other functions assume data is deinterleaved in hash0-3
// All functions must exit with data deinterleaved in hash0-3.
// Alias in0-3 points to either inp0-3 or hash0-3 according to
// its hashOrder position. Size is also set accordingly.
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
{
case BLAKE:
blake512_4way_init( &ctx.blake );
if ( i == 0 )
blake512_4way( &ctx.blake, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
blake512_4way( &ctx.blake, vhash, size );
}
blake512_4way_close( &ctx.blake, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case BMW:
bmw512_4way_init( &ctx.bmw );
if ( i == 0 )
bmw512_4way( &ctx.bmw, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
bmw512_4way( &ctx.bmw, vhash, size );
}
bmw512_4way_close( &ctx.bmw, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case GROESTL:
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(const char*)in0, size<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(const char*)in1, size<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(const char*)in2, size<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(const char*)in3, size<<3 );
break;
case SKEIN:
skein512_4way_init( &ctx.skein );
if ( i == 0 )
skein512_4way( &ctx.skein, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
skein512_4way( &ctx.skein, vhash, size );
}
skein512_4way_close( &ctx.skein, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case JH:
jh512_4way_init( &ctx.jh );
if ( i == 0 )
jh512_4way( &ctx.jh, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
jh512_4way( &ctx.jh, vhash, size );
}
jh512_4way_close( &ctx.jh, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case KECCAK:
keccak512_4way_init( &ctx.keccak );
if ( i == 0 )
keccak512_4way( &ctx.keccak, input, size );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
keccak512_4way( &ctx.keccak, vhash, size );
}
keccak512_4way_close( &ctx.keccak, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case LUFFA:
intrlv_2x128( vhash, in0, in1, size<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, size );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, in2, in3, size<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, size);
dintrlv_2x128( hash2, hash3, vhash, 512 );
break;
case CUBEHASH:
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash0,
(const byte*)in0, size );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash1,
(const byte*)in1, size );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash2,
(const byte*)in2, size );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash3,
(const byte*)in3, size );
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in0, size );
sph_shavite512_close( &ctx.shavite, hash0 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in1, size );
sph_shavite512_close( &ctx.shavite, hash1 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in2, size );
sph_shavite512_close( &ctx.shavite, hash2 );
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in3, size );
sph_shavite512_close( &ctx.shavite, hash3 );
break;
case SIMD:
intrlv_2x128( vhash, in0, in1, size<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 );
dintrlv_2x128( hash0, hash1, vhash, 512 );
intrlv_2x128( vhash, in2, in3, size<<3 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
break;
case ECHO:
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash0,
(const BitSequence*)in0, size<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash1,
(const BitSequence*)in1, size<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash2,
(const BitSequence*)in2, size<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash3,
(const BitSequence*)in3, size<<3 );
break;
case HAMSI:
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, size );
hamsi512_4way_close( &ctx.hamsi, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in0, size );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in1, size );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in2, size );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in3, size );
sph_fugue512_close( &ctx.fugue, hash3 );
break;
case SHABAL:
intrlv_4x32( vhash, in0, in1, in2, in3, size<<3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, size );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
break;
case WHIRLPOOL:
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in0, size );
sph_whirlpool_close( &ctx.whirlpool, hash0 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in1, size );
sph_whirlpool_close( &ctx.whirlpool, hash1 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in2, size );
sph_whirlpool_close( &ctx.whirlpool, hash2 );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in3, size );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
break;
case SHA_512:
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, size );
sha512_4way_close( &ctx.sha512, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
break;
}
size = 64;
}
memcpy( output, hash0, 32 );
memcpy( output+32, hash1, 32 );
memcpy( output+64, hash2, 32 );
memcpy( output+96, hash3, 32 );
}
int scanhash_x16rt_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr)
{
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t _ALIGN(64) timeHash[4*8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
__m256i *noncev = (__m256i*)vdata + 9; // aligned
volatile uint8_t *restart = &(work_restart[thr_id].restart);
casti_m256i( endiandata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
casti_m256i( endiandata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
uint32_t ntime = swab32( pdata[17] );
if ( s_ntime != ntime )
{
x16rt_getTimeHash( ntime, &timeHash );
x16rt_getAlgoString( &timeHash[0], hashOrder );
s_ntime = ntime;
s_implemented = true;
if ( opt_debug && !thr_id )
applog( LOG_INFO, "hash order: %s time: (%08x) time hash: (%08x)",
hashOrder, ntime, timeHash );
}
if ( !s_implemented )
{
applog( LOG_WARNING, "s not implemented");
sleep(1);
return 0;
}
if ( opt_benchmark )
ptarget[7] = 0x0cff;
uint64_t *edata = (uint64_t*)endiandata;
intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x16rt_4way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 4; i++ ) if ( (hash+(i<<3))[7] <= Htarg )
if( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( ( n < max_nonce ) && !(*restart) );
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

239
algo/x16/x16rt.c Normal file
View File

@@ -0,0 +1,239 @@
#include "x16r-gate.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "algo/blake/sph_blake.h"
#include "algo/bmw/sph_bmw.h"
#include "algo/groestl/sph_groestl.h"
#include "algo/jh/sph_jh.h"
#include "algo/keccak/sph_keccak.h"
#include "algo/skein/sph_skein.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/nist.h"
#include "algo/echo/sph_echo.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/sph_shabal.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include <openssl/sha.h>
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#endif
static __thread uint32_t s_ntime = UINT32_MAX;
static __thread bool s_implemented = false;
static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 };
union _x16rt_context_overlay
{
#if defined(__AES__)
hashState_echo echo;
hashState_groestl groestl;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
#endif
sph_blake512_context blake;
sph_bmw512_context bmw;
sph_skein512_context skein;
sph_jh512_context jh;
sph_keccak512_context keccak;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sph_shabal512_context shabal;
sph_whirlpool_context whirlpool;
SHA512_CTX sha512;
};
typedef union _x16rt_context_overlay x16rt_context_overlay;
void x16rt_hash( void* output, const void* input )
{
uint32_t _ALIGN(128) hash[16];
x16rt_context_overlay ctx;
int size = 80;
void *in = (void*) input;
/*
void *in = (void*) input;
uint32_t *in32 = (uint32_t*) in;
uint32_t ntime = in32[17];
if ( s_ntime == UINT32_MAX )
{
uint32_t _ALIGN(64) timeHash[8];
x16rt_getTimeHash(ntime, &timeHash);
x16rt_getAlgoString(&timeHash[0], hashOrder);
}
*/
for ( int i = 0; i < 16; i++ )
{
const char elem = hashOrder[i];
const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0';
switch ( algo )
{
case BLAKE:
sph_blake512_init( &ctx.blake );
sph_blake512( &ctx.blake, in, size );
sph_blake512_close( &ctx.blake, hash );
break;
case BMW:
sph_bmw512_init( &ctx.bmw );
sph_bmw512(&ctx.bmw, in, size);
sph_bmw512_close(&ctx.bmw, hash);
break;
case GROESTL:
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)in, size<<3 );
#else
sph_groestl512_init( &ctx.groestl );
sph_groestl512( &ctx.groestl, in, size );
sph_groestl512_close(&ctx.groestl, hash);
#endif
break;
case SKEIN:
sph_skein512_init( &ctx.skein );
sph_skein512( &ctx.skein, in, size );
sph_skein512_close( &ctx.skein, hash );
break;
case JH:
sph_jh512_init( &ctx.jh );
sph_jh512(&ctx.jh, in, size );
sph_jh512_close(&ctx.jh, hash );
break;
case KECCAK:
sph_keccak512_init( &ctx.keccak );
sph_keccak512( &ctx.keccak, in, size );
sph_keccak512_close( &ctx.keccak, hash );
break;
case LUFFA:
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)in, size );
break;
case CUBEHASH:
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash,
(const byte*)in, size );
break;
case SHAVITE:
sph_shavite512_init( &ctx.shavite );
sph_shavite512( &ctx.shavite, in, size );
sph_shavite512_close( &ctx.shavite, hash );
break;
case SIMD:
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence *)hash,
(const BitSequence*)in, size<<3 );
break;
case ECHO:
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence *)hash,
(const BitSequence*)in, size<<3 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512( &ctx.echo, in, size );
sph_echo512_close( &ctx.echo, hash );
#endif
break;
case HAMSI:
sph_hamsi512_init( &ctx.hamsi );
sph_hamsi512( &ctx.hamsi, in, size );
sph_hamsi512_close( &ctx.hamsi, hash );
break;
case FUGUE:
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, in, size );
sph_fugue512_close( &ctx.fugue, hash );
break;
case SHABAL:
sph_shabal512_init( &ctx.shabal );
sph_shabal512( &ctx.shabal, in, size );
sph_shabal512_close( &ctx.shabal, hash );
break;
case WHIRLPOOL:
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, in, size );
sph_whirlpool_close( &ctx.whirlpool, hash );
break;
case SHA_512:
SHA512_Init( &ctx.sha512 );
SHA512_Update( &ctx.sha512, in, size );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
break;
}
in = (void*) hash;
size = 64;
}
memcpy(output, hash, 32);
}
int scanhash_x16rt( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash32[8];
uint32_t _ALIGN(128) endiandata[20];
uint32_t _ALIGN(64) timeHash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t nonce = first_nonce;
volatile uint8_t *restart = &(work_restart[thr_id].restart);
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
uint32_t ntime = swab32( pdata[17] );
if ( s_ntime != ntime )
{
x16rt_getTimeHash( ntime, &timeHash );
x16rt_getAlgoString( &timeHash[0], hashOrder );
s_ntime = ntime;
s_implemented = true;
if ( opt_debug && !thr_id )
applog( LOG_INFO, "hash order: %s time: (%08x) time hash: (%08x)",
hashOrder, ntime, timeHash );
}
if ( !s_implemented )
{
applog( LOG_WARNING, "s not implemented");
sleep(1);
return 0;
}
if ( opt_benchmark )
ptarget[7] = 0x0cff;
do
{
be32enc( &endiandata[19], nonce );
x16rt_hash( hash32, endiandata );
if ( hash32[7] <= Htarg )
if (fulltest( hash32, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash32, mythr );
}
nonce++;
} while ( nonce < max_nonce && !(*restart) );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}

View File

@@ -69,7 +69,7 @@ void sonoa_4way_hash( void *state, const void *input )
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -80,7 +80,7 @@ void sonoa_4way_hash( void *state, const void *input )
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -94,7 +94,7 @@ void sonoa_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -116,8 +116,8 @@ void sonoa_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -134,13 +134,13 @@ void sonoa_4way_hash( void *state, const void *input )
// 2
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -151,7 +151,7 @@ void sonoa_4way_hash( void *state, const void *input )
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -165,7 +165,7 @@ void sonoa_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -187,8 +187,8 @@ void sonoa_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -203,7 +203,7 @@ void sonoa_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
@@ -215,7 +215,7 @@ void sonoa_4way_hash( void *state, const void *input )
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -226,7 +226,7 @@ void sonoa_4way_hash( void *state, const void *input )
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -240,7 +240,7 @@ void sonoa_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -262,8 +262,8 @@ void sonoa_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -278,13 +278,13 @@ void sonoa_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -300,13 +300,13 @@ void sonoa_4way_hash( void *state, const void *input )
sph_fugue512_close( &ctx.fugue, hash3 );
// 4
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -317,7 +317,7 @@ void sonoa_4way_hash( void *state, const void *input )
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -331,7 +331,7 @@ void sonoa_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -353,8 +353,8 @@ void sonoa_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -369,13 +369,13 @@ void sonoa_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -390,19 +390,19 @@ void sonoa_4way_hash( void *state, const void *input )
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
mm256_rintrlv_4x32_4x64( vhashB, vhash, 512 );
rintrlv_4x32_4x64( vhashB, vhash, 512 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhashB, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -417,8 +417,8 @@ void sonoa_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_2x128( vhashA, hash0, hash1, 512 );
mm256_intrlv_2x128( vhashB, hash2, hash3, 512 );
intrlv_2x128_512( vhashA, hash0, hash1 );
intrlv_2x128_512( vhashB, hash2, hash3 );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 );
@@ -426,19 +426,19 @@ void sonoa_4way_hash( void *state, const void *input )
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
// 5
mm256_rintrlv_2x128_4x64( vhash, vhashA, vhashB, 512 );
rintrlv_2x128_4x64( vhash, vhashA, vhashB, 512 );
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 );
rintrlv_4x64_4x32( vhashB, vhash, 512 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhashB, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x32_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -449,7 +449,7 @@ void sonoa_4way_hash( void *state, const void *input )
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -463,7 +463,7 @@ void sonoa_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -485,8 +485,8 @@ void sonoa_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -501,13 +501,13 @@ void sonoa_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -522,13 +522,13 @@ void sonoa_4way_hash( void *state, const void *input )
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x32_512( hash0, hash1, hash2, hash3, vhash );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
@@ -545,13 +545,13 @@ void sonoa_4way_hash( void *state, const void *input )
// 6
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -562,7 +562,7 @@ void sonoa_4way_hash( void *state, const void *input )
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -576,7 +576,7 @@ void sonoa_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -598,8 +598,8 @@ void sonoa_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -614,13 +614,13 @@ void sonoa_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -635,13 +635,13 @@ void sonoa_4way_hash( void *state, const void *input )
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x32_512( hash0, hash1, hash2, hash3, vhash );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
@@ -656,13 +656,13 @@ void sonoa_4way_hash( void *state, const void *input )
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, 64 );
sha512_4way_close( &ctx.sha512, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
@@ -679,13 +679,13 @@ void sonoa_4way_hash( void *state, const void *input )
// 7
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
@@ -696,7 +696,7 @@ void sonoa_4way_hash( void *state, const void *input )
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
@@ -710,7 +710,7 @@ void sonoa_4way_hash( void *state, const void *input )
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -732,8 +732,8 @@ void sonoa_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -748,13 +748,13 @@ void sonoa_4way_hash( void *state, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
@@ -769,13 +769,13 @@ void sonoa_4way_hash( void *state, const void *input )
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x32_512( hash0, hash1, hash2, hash3, vhash );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
@@ -790,13 +790,13 @@ void sonoa_4way_hash( void *state, const void *input )
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, 64 );
sha512_4way_close( &ctx.sha512, vhash );
mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 );
rintrlv_4x64_4x32( vhashB, vhash, 512 );
haval256_5_4way_init( &ctx.haval );
haval256_5_4way( &ctx.haval, vhashB, 64 );
@@ -806,7 +806,7 @@ void sonoa_4way_hash( void *state, const void *input )
int scanhash_sonoa_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
@@ -816,14 +816,14 @@ int scanhash_sonoa_4way( struct work *work, uint32_t max_nonce,
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
uint64_t htmax[] = { 0, 0xF, 0xFF,
0xFFF, 0xFFFF, 0x10000000 };
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
// Need big endian data
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];

View File

@@ -68,7 +68,7 @@ void x17_4way_hash( void *state, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serialize
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
// 3 Groestl
init_groestl( &ctx.groestl, 64 );
@@ -81,7 +81,7 @@ void x17_4way_hash( void *state, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
// Parallellize
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
// 4 Skein parallel 4 way 64 bit
skein512_4way_init( &ctx.skein );
@@ -99,7 +99,7 @@ void x17_4way_hash( void *state, const void *input )
keccak512_4way_close( &ctx.keccak, vhash );
// 7 Luffa parallel 2 way 128 bit
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
@@ -124,8 +124,8 @@ void x17_4way_hash( void *state, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
mm256_dintrlv_2x128_512( hash0, hash1, vhashA );
mm256_dintrlv_2x128_512( hash2, hash3, vhashB );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
// 11 Echo serial
init_echo( &ctx.echo, 512 );
@@ -142,13 +142,13 @@ void x17_4way_hash( void *state, const void *input )
(const BitSequence *) hash3, 512 );
// 12 Hamsi parallel 4 way 64 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
// 13 Fugue serial
sph_fugue512_init( &ctx.fugue );
@@ -165,13 +165,13 @@ void x17_4way_hash( void *state, const void *input )
sph_fugue512_close( &ctx.fugue, hash3 );
// 14 Shabal, parallel 4 way 32 bit
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
dintrlv_4x32_512( hash0, hash1, hash2, hash3, vhash );
// 15 Whirlpool serial
sph_whirlpool_init( &ctx.whirlpool );
@@ -188,14 +188,14 @@ void x17_4way_hash( void *state, const void *input )
sph_whirlpool_close( &ctx.whirlpool, hash3 );
// 16 SHA512 parallel 64 bit
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, 64 );
sha512_4way_close( &ctx.sha512, vhash );
// 17 Haval parallel 32 bit
mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 );
rintrlv_4x64_4x32( vhashB, vhash, 512 );
haval256_5_4way_init( &ctx.haval );
haval256_5_4way( &ctx.haval, vhashB, 64 );
@@ -205,7 +205,7 @@ void x17_4way_hash( void *state, const void *input )
int scanhash_x17_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
@@ -222,7 +222,7 @@ int scanhash_x17_4way( struct work *work, uint32_t max_nonce,
0xFFFFF000, 0xFFFF0000, 0 };
// Need big endian data
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[ m ];
@@ -232,7 +232,7 @@ int scanhash_x17_4way( struct work *work, uint32_t max_nonce,
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x17_4way_hash( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
for ( int lane = 0; lane < 4; lane++ )
if ( ( hash7[ lane ] & mask ) == 0 )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );

View File

@@ -71,7 +71,7 @@ void xevan_4way_hash( void *output, const void *input )
bmw512_4way_close( &ctx.bmw, vhash );
// Serial
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0,
@@ -87,7 +87,7 @@ void xevan_4way_hash( void *output, const void *input )
dataLen<<3 );
// Parallel 4way
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, dataLen );
@@ -101,7 +101,7 @@ void xevan_4way_hash( void *output, const void *input )
keccak512_4way( &ctx.keccak, vhash, dataLen );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
@@ -123,8 +123,8 @@ void xevan_4way_hash( void *output, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -139,13 +139,13 @@ void xevan_4way_hash( void *output, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, dataLen<<3 );
// Parallel
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, dataLen );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, dataLen );
@@ -183,19 +183,19 @@ void xevan_4way_hash( void *output, const void *input )
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, dataLen );
sha512_4way_close( &ctx.sha512, vhash );
mm256_rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 );
rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 );
haval256_5_4way_init( &ctx.haval );
haval256_5_4way( &ctx.haval, vhashA, dataLen );
haval256_5_4way_close( &ctx.haval, vhashA );
mm256_rintrlv_4x32_4x64( vhash, vhashA, dataLen<<3 );
rintrlv_4x32_4x64( vhash, vhashA, dataLen<<3 );
memset( &vhash[ 4<<2 ], 0, (dataLen-32) << 2 );
@@ -207,7 +207,7 @@ void xevan_4way_hash( void *output, const void *input )
bmw512_4way( &ctx.bmw, vhash, dataLen );
bmw512_4way_close( &ctx.bmw, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0,
@@ -222,7 +222,7 @@ void xevan_4way_hash( void *output, const void *input )
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3,
dataLen<<3 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, dataLen );
@@ -236,7 +236,7 @@ void xevan_4way_hash( void *output, const void *input )
keccak512_4way( &ctx.keccak, vhash, dataLen );
keccak512_4way_close( &ctx.keccak, vhash );
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
@@ -258,8 +258,8 @@ void xevan_4way_hash( void *output, const void *input )
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
mm256_dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
init_echo( &ctx.echo, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
@@ -274,13 +274,13 @@ void xevan_4way_hash( void *output, const void *input )
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, dataLen<<3 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, dataLen );
hamsi512_4way_close( &ctx.hamsi, vhash );
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, dataLen );
@@ -316,13 +316,13 @@ void xevan_4way_hash( void *output, const void *input )
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
sph_whirlpool_close( &ctx.whirlpool, hash3 );
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, dataLen );
sha512_4way_close( &ctx.sha512, vhash );
mm256_rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 );
rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 );
haval256_5_4way_init( &ctx.haval );
haval256_5_4way( &ctx.haval, vhashA, dataLen );
@@ -332,7 +332,7 @@ void xevan_4way_hash( void *output, const void *input )
int scanhash_xevan_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
@@ -348,7 +348,7 @@ int scanhash_xevan_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
ptarget[7] = 0x0cff;
mm256_bswap_intrlv80_4x64( vdata, pdata );
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ), *noncev );

View File

@@ -399,15 +399,15 @@ int scanhash_yescrypt( struct work *work, uint32_t max_nonce,
be32enc(&endiandata[k], pdata[k]);
do {
be32enc(&endiandata[19], n);
yescrypt_hash((char*) endiandata, (char*) vhash, 80);
if (vhash[7] < Htarg && fulltest(vhash, ptarget)) {
work_set_target_ratio( work, vhash );
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return true;
}
n++;
be32enc(&endiandata[19], n);
yescrypt_hash((char*) endiandata, (char*) vhash, 80);
if (vhash[7] < Htarg && fulltest(vhash, ptarget )
&& !opt_benchmark )
{
pdata[19] = n;
submit_solution( work, vhash, mythr );
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;

View File

@@ -53,15 +53,15 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
for (int k = 0; k < 19; k++)
be32enc(&endiandata[k], pdata[k]);
do {
be32enc(&endiandata[19], n);
yespower_hash((char*) endiandata, (char*) vhash, 80);
if (vhash[7] < Htarg && fulltest(vhash, ptarget)) {
work_set_target_ratio( work, vhash );
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return true;
}
n++;
be32enc(&endiandata[19], n);
yespower_hash((char*) endiandata, (char*) vhash, 80);
if ( vhash[7] < Htarg && fulltest( vhash, ptarget )
&& !opt_benchmark )
{
pdata[19] = n;
submit_solution( work, vhash, mythr );
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;

20
configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.5.1.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.6.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='cpuminer-opt'
PACKAGE_TARNAME='cpuminer-opt'
PACKAGE_VERSION='3.9.5.1'
PACKAGE_STRING='cpuminer-opt 3.9.5.1'
PACKAGE_VERSION='3.9.6'
PACKAGE_STRING='cpuminer-opt 3.9.6'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures cpuminer-opt 3.9.5.1 to adapt to many kinds of systems.
\`configure' configures cpuminer-opt 3.9.6 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1404,7 +1404,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of cpuminer-opt 3.9.5.1:";;
short | recursive ) echo "Configuration of cpuminer-opt 3.9.6:";;
esac
cat <<\_ACEOF
@@ -1509,7 +1509,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
cpuminer-opt configure 3.9.5.1
cpuminer-opt configure 3.9.6
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by cpuminer-opt $as_me 3.9.5.1, which was
It was created by cpuminer-opt $as_me 3.9.6, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2993,7 +2993,7 @@ fi
# Define the identity of the package.
PACKAGE='cpuminer-opt'
VERSION='3.9.5.1'
VERSION='3.9.6'
cat >>confdefs.h <<_ACEOF
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by cpuminer-opt $as_me 3.9.5.1, which was
This file was extended by cpuminer-opt $as_me 3.9.6, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -6756,7 +6756,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
cpuminer-opt config.status 3.9.5.1
cpuminer-opt config.status 3.9.6
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View File

@@ -1,4 +1,4 @@
AC_INIT([cpuminer-opt], [3.9.5.1])
AC_INIT([cpuminer-opt], [3.9.6])
AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM

View File

@@ -178,7 +178,7 @@ static char const short_options[] =
#endif
"a:b:Bc:CDf:hm:n:p:Px:qr:R:s:t:T:o:u:O:V";
static struct work g_work = {{ 0 }};
static struct work g_work __attribute__ ((aligned (64))) = {{ 0 }};
//static struct work tmp_work;
time_t g_work_time = 0;
static pthread_mutex_t g_work_lock;
@@ -843,12 +843,13 @@ void scale_hash_for_display ( double* hashrate, char* units )
const uint64_t diff2hash = 0x40000000ULL;
static struct timeval five_min_start;
static double shash_sum = 0.;
static double bhash_sum = 0.;
static double time_sum = 0.;
static double latency_sum = 0.;
static uint64_t submits_sum = 0;
static struct timeval five_min_start;
static double shash_sum = 0.;
static double bhash_sum = 0.;
static double time_sum = 0.;
static double latency_sum = 0.;
static uint64_t submit_sum = 0;
static uint64_t reject_sum = 0;
struct share_stats_t
{
@@ -860,18 +861,22 @@ struct share_stats_t
// with more and more parallelism the chances of submitting multiple
// shares in a very short time grows.
#define s_stats_size 4
#define s_stats_size 8
static struct share_stats_t share_stats[ s_stats_size ];
static int s_get_ptr = 0, s_put_ptr = 0;
static struct timeval last_submit_time = {0};
static inline int stats_ptr_incr( int p )
{
return ++p < s_stats_size ? p : 0;
}
static int share_result( int result, struct work *null_work,
const char *reason )
{
double share_time, share_hash, block_hash, share_size;
double hashcount = 0.;
double hashrate = 0.;
uint64_t latency;
double share_time = 0., share_hash = 0., block_hash = 0., share_size = 0.;
double hashcount = 0., hashrate = 0.;
uint64_t latency = 0;
struct share_stats_t my_stats = {0};
struct timeval ack_time, latency_tv, et;
char hr[32];
@@ -879,37 +884,29 @@ static int share_result( int result, struct work *null_work,
char shr[32];
char shr_units[4] = {0};
char diffstr[32];
const char *sres;
bool solved;
const char *sres = NULL;
bool solved = false;
// Mutex while accessing global counters.
// Mutex while we grab asnapshot of the global counters.
pthread_mutex_lock( &stats_lock );
// There is a window where a second share could be submitted
// before receiving the response for this one. When this happens
// te second share will be processed from [1] on the next pass.
memcpy( &my_stats, &share_stats[ s_get_ptr], sizeof my_stats );
memset( &share_stats[ s_get_ptr ], 0, sizeof my_stats );
s_get_ptr++;
if ( s_get_ptr >= s_stats_size )
s_get_ptr = 0;
/*
if ( share_stats[0].submit_time.tv_sec )
// When submit_work detects a buffer overflow it discards the stats for
// the new share. When we catch up we may get acks for shares with
// no stats. Leaving the get pointer un-incremented will resync with the
// put pointer.
if ( share_stats[ s_get_ptr ].submit_time.tv_sec )
{
memcpy( &my_stats, &share_stats[0], sizeof my_stats );
memset( &share_stats[0], 0, sizeof my_stats );
}
else if ( share_stats[1].submit_time.tv_sec )
{
memcpy( &my_stats, &share_stats[1], sizeof my_stats );
memset( &share_stats[1], 0, sizeof my_stats );
memcpy( &my_stats, &share_stats[ s_get_ptr], sizeof my_stats );
memset( &share_stats[ s_get_ptr ], 0, sizeof my_stats );
s_get_ptr = stats_ptr_incr( s_get_ptr );
pthread_mutex_unlock( &stats_lock );
}
else
{
memcpy( &my_stats, &share_stats[2], sizeof my_stats );
memset( &share_stats[2], 0, sizeof my_stats );
pthread_mutex_unlock( &stats_lock );
applog(LOG_WARNING,"Pending shares overflow, stats for share are lost.");
}
*/
for ( int i = 0; i < opt_n_threads; i++ )
{
hashcount += thr_hashcount[i];
@@ -919,12 +916,16 @@ static int share_result( int result, struct work *null_work,
global_hashrate = hashrate;
// calculate latency and share time.
gettimeofday( &ack_time, NULL );
timeval_subtract( &latency_tv, &ack_time, &my_stats.submit_time );
latency = ( latency_tv.tv_sec * 1000 + latency_tv.tv_usec / 1000 );
timeval_subtract( &et, &my_stats.submit_time, &last_submit_time );
share_time = (double)et.tv_sec + ( (double)et.tv_usec / 1000000. );
memcpy( &last_submit_time, &my_stats.submit_time, sizeof last_submit_time );
if ( my_stats.submit_time.tv_sec )
{
gettimeofday( &ack_time, NULL );
timeval_subtract( &latency_tv, &ack_time, &my_stats.submit_time );
latency = ( latency_tv.tv_sec * 1000 + latency_tv.tv_usec / 1000 );
timeval_subtract( &et, &my_stats.submit_time, &last_submit_time );
share_time = (double)et.tv_sec + ( (double)et.tv_usec / 1000000. );
memcpy( &last_submit_time, &my_stats.submit_time,
sizeof last_submit_time );
}
// calculate share hashrate and size
share_hash = my_stats.share_diff * diff2hash;
@@ -938,10 +939,13 @@ static int share_result( int result, struct work *null_work,
solved_block_count += solved ? 1 : 0 ;
// update counters for 5 minute summary report
pthread_mutex_lock( &stats_lock );
shash_sum += share_hash;
bhash_sum += block_hash;
time_sum += share_time;
submits_sum ++;
submit_sum ++;
reject_sum += (uint64_t)!result;
latency_sum += latency;
pthread_mutex_unlock( &stats_lock );
@@ -961,32 +965,38 @@ static int share_result( int result, struct work *null_work,
// colour code the share diff to highlight high value.
if ( solved )
sprintf( diffstr, "%s%.3g%s", CL_MAG, my_stats.share_diff, CL_WHT );
else if ( my_stats.share_diff > (my_stats.net_diff*0.1) )
else if ( my_stats.share_diff > ( my_stats.net_diff * 0.1 ) )
sprintf( diffstr, "%s%.3g%s", CL_GRN, my_stats.share_diff, CL_WHT );
else if ( my_stats.share_diff > (my_stats.net_diff*0.01) )
else if ( my_stats.share_diff > ( my_stats.net_diff * 0.01 ) )
sprintf( diffstr, "%s%.3g%s", CL_CYN, my_stats.share_diff, CL_WHT );
else
sprintf( diffstr, "%.3g", my_stats.share_diff );
if ( hashrate && share_hash_rate > (768.*hashrate) )
sprintf( shr, "%s%.2f %sH/s%s", CL_MAG, scaled_shr, shr_units,
CL_WHT );
else if ( share_hash_rate > (32.*hashrate) )
sprintf( shr, "%s%.2f %sH/s%s", CL_GRN, scaled_shr, shr_units,
CL_WHT );
else if ( share_hash_rate > 2.0*hashrate )
sprintf( shr, "%s%.2f %sH/s%s", CL_CYN, scaled_shr, shr_units,
CL_WHT );
else if ( share_hash_rate > 0.5*hashrate )
sprintf( shr, "%.2f %sH/s", scaled_shr, shr_units );
else
sprintf( shr, "%s%.2f %sH/s%s", CL_YLW, scaled_shr, shr_units,
CL_WHT );
if ( hashrate ) // don't colour share hash rate without reference rate.
{
if ( share_hash_rate > 768. * hashrate )
sprintf( shr, "%s%.2f %sH/s%s", CL_MAG, scaled_shr, shr_units,
CL_WHT );
else if ( share_hash_rate > 32. * hashrate )
sprintf( shr, "%s%.2f %sH/s%s", CL_GRN, scaled_shr, shr_units,
CL_WHT );
else if ( share_hash_rate > 2.0 * hashrate )
sprintf( shr, "%s%.2f %sH/s%s", CL_CYN, scaled_shr, shr_units,
CL_WHT );
else if ( share_hash_rate > 0.5 * hashrate )
sprintf( shr, "%.2f %sH/s", scaled_shr, shr_units );
else
sprintf( shr, "%s%.2f %sH/s%s", CL_YLW, scaled_shr, shr_units,
CL_WHT );
}
else
sprintf( shr, "%.2f %sH/s", scaled_shr, shr_units );
}
else
else // monochrome
{
sres = ( solved ? "BLOCK SOLVED" : result ? "Accepted" : "Rejected" );
sprintf( diffstr, "%.3g", my_stats.share_diff );
sprintf( shr, "%.2f %sH/s", scaled_shr, shr_units );
}
scale_hash_for_display ( &hashrate, hr_units );
@@ -999,8 +1009,7 @@ static int share_result( int result, struct work *null_work,
sres, diffstr, share_time, accepted_share_count,
rejected_share_count, solved_block_count );
if ( have_stratum && result && my_stats.share_diff && my_stats.net_diff
&& !opt_quiet )
if ( have_stratum && result && !opt_quiet )
{
applog( LOG_NOTICE, "Miner %s %sH/s, Share %s, Latency %d ms.",
hr, hr_units, shr, latency );
@@ -1602,37 +1611,16 @@ bool submit_work(struct thr_info *thr, const struct work *work_in)
// collect some share stats
pthread_mutex_lock( &stats_lock );
gettimeofday( &share_stats[ s_put_ptr ].submit_time, NULL );
share_stats[ s_put_ptr ].share_diff = work_in->sharediff;
share_stats[ s_put_ptr ].net_diff = net_diff;
strcpy( share_stats[ s_put_ptr ].job_id, work_in->job_id );
s_put_ptr++;
if ( s_put_ptr >= s_stats_size )
s_put_ptr = 0;
/*
if ( share_stats[0].submit_time.tv_sec == 0 )
{
gettimeofday( &share_stats[0].submit_time, NULL );
share_stats[0].share_diff = work_in->sharediff;
share_stats[0].net_diff = net_diff;
strcpy( share_stats[0].job_id, work_in->job_id );
// if buffer full discard stats and don't increment pointer.
// We're on the clock so let share_result report it.
if ( share_stats[ s_put_ptr ].submit_time.tv_sec == 0 )
{
gettimeofday( &share_stats[ s_put_ptr ].submit_time, NULL );
share_stats[ s_put_ptr ].share_diff = work_in->sharediff;
share_stats[ s_put_ptr ].net_diff = net_diff;
strcpy( share_stats[ s_put_ptr ].job_id, work_in->job_id );
s_put_ptr = stats_ptr_incr( s_put_ptr );
}
else if ( share_stats[1].submit_time.tv_sec == 0 )
{ // previous share hasn't been confirmed yet.
gettimeofday( &share_stats[1].submit_time, NULL );
share_stats[1].share_diff = work_in->sharediff;
share_stats[1].net_diff = net_diff;
strcpy( share_stats[1].job_id, work_in->job_id );
}
else
{ // previous share hasn't been confirmed yet.
gettimeofday( &share_stats[2].submit_time, NULL );
share_stats[2].share_diff = work_in->sharediff;
share_stats[2].net_diff = net_diff;
strcpy( share_stats[2].job_id, work_in->job_id );
}
*/
pthread_mutex_unlock( &stats_lock );
@@ -1811,10 +1799,11 @@ void std_get_new_work( struct work* work, struct work* g_work, int thr_id,
// or
// || ( !benchmark && strcmp( work->job_id, g_work->job_id ) ) ) )
// For now leave it as is, it seems stable.
// strtoul seems to work.
if ( memcmp( work->data, g_work->data, algo_gate.work_cmp_size )
&& ( clean_job || ( *nonceptr >= *end_nonce_ptr )
|| ( work->job_id != g_work->job_id ) ) )
&& ( clean_job || ( *nonceptr >= *end_nonce_ptr )
|| strtoul( work->job_id, NULL, 16 )
!= strtoul( g_work->job_id, NULL, 16 ) ) )
{
work_free( work );
work_copy( work, g_work );
@@ -1862,9 +1851,9 @@ bool std_ready_to_mine( struct work* work, struct stratum_ctx* stratum,
static void *miner_thread( void *userdata )
{
struct work work __attribute__ ((aligned (64))) ;
struct thr_info *mythr = (struct thr_info *) userdata;
int thr_id = mythr->id;
struct work work;
uint32_t max_nonce;
struct timeval et;
struct timeval time_now;
@@ -2099,9 +2088,6 @@ static void *miner_thread( void *userdata )
break;
}
if ( !opt_quiet )
// applog( LOG_BLUE, "Share %d submitted by thread %d.",
// accepted_share_count + rejected_share_count + 1,
// mythr->id );
applog( LOG_BLUE, "Share %d submitted by thread %d, job %s.",
accepted_share_count + rejected_share_count + 1,
mythr->id, work.job_id );
@@ -2129,34 +2115,53 @@ static void *miner_thread( void *userdata )
pthread_mutex_unlock( &stats_lock );
else
{
// collect and reset counters
// collect and reset global counters
double hash = shash_sum; shash_sum = 0.;
double bhash = bhash_sum; bhash_sum = 0.;
double time = time_sum; time_sum = 0.;
uint64_t submits = submits_sum; submits_sum = 0;
uint64_t submits = submit_sum; submit_sum = 0;
uint64_t rejects = reject_sum; reject_sum = 0;
uint64_t latency = latency_sum; latency_sum = 0;
memcpy( &five_min_start, &time_now, sizeof time_now );
pthread_mutex_unlock( &stats_lock );
double ghrate = global_hashrate;
double shrate = time == 0. ? 0. : hash / time;
double scaled_shrate = shrate;
double avg_share = bhash == 0. ? 0. : hash / bhash * 100.;
double ghrate = global_hashrate;
double scaled_ghrate = ghrate;
double shrate = time == 0. ? 0. : hash / time;
double scaled_shrate = shrate;
double avg_share = bhash == 0. ? 0. : hash / bhash * 100.;
uint64_t avg_latency = 0;
double latency_pc = 0.;
double rejects_pc = 0.;
double submit_rate = 0.;
char shr[32];
char shr_units[4] = {0};
char ghr[32];
char ghr_units[4] = {0};
int temp = cpu_temp(0);
char timestr[32];
char tempstr[32];
latency = submits ? latency / submits : 0;
if ( submits )
avg_latency = latency / submits;
if ( time != 0. )
{
submit_rate = (double)submits*60. / time;
rejects_pc = (double)rejects / (time*10.);
latency_pc = (double)latency / ( time*10.);
}
scale_hash_for_display( &scaled_shrate, shr_units );
scale_hash_for_display( &scaled_ghrate, ghr_units );
sprintf( ghr, "%.2f %sH/s", scaled_ghrate, ghr_units );
if ( use_colors )
{
if ( shrate > (32.*ghrate) )
if ( shrate > (128.*ghrate) )
sprintf( shr, "%s%.2f %sH/s%s", CL_MAG, scaled_shrate,
shr_units, CL_WHT );
else if ( shrate > (8.*ghrate) )
else if ( shrate > (16.*ghrate) )
sprintf( shr, "%s%.2f %sH/s%s", CL_GRN, scaled_shrate,
shr_units, CL_WHT );
else if ( shrate > 2.0*ghrate )
@@ -2168,52 +2173,99 @@ static void *miner_thread( void *userdata )
sprintf( shr, "%s%.2f %sH/s%s", CL_YLW, scaled_shrate,
shr_units, CL_WHT );
if ( temp >= 80 ) sprintf( timestr, "%s%d C%s",
if ( temp >= 80 ) sprintf( tempstr, "%s%d C%s",
CL_RED, temp, CL_WHT );
else if (temp >=70 ) sprintf( timestr, "%s%d C%s",
else if (temp >=70 ) sprintf( tempstr, "%s%d C%s",
CL_YLW, temp, CL_WHT );
else sprintf( timestr, "%d C", temp );
else sprintf( tempstr, "%d C", temp );
}
else
{
sprintf( shr, "%.2f %sH/s", scaled_shrate, shr_units );
sprintf( tempstr, "%d C", temp );
}
applog(LOG_NOTICE,"Submitted %d shares in %dm%02ds.",
(uint64_t)submits, et.tv_sec / 60, et.tv_sec % 60 );
applog(LOG_NOTICE,"%d rejects (%.2f%%), %.5f%% block share.",
rejects, rejects_pc, avg_share );
applog(LOG_NOTICE,"Avg hashrate: Miner %s, Share %s.", ghr, shr );
#if ((defined(_WIN64) || defined(__WINDOWS__)))
applog(LOG_NOTICE,"Shares/min: %.2f, latency %d ms (%.2f%%).",
submit_rate, avg_latency, latency_pc );
#else
applog(LOG_NOTICE,"Shares/min: %.2f, latency %d ms (%.2f%%), temp: %s.",
submit_rate, avg_latency, latency_pc, tempstr );
#endif
/*
applog(LOG_NOTICE,"Submitted %d shares in %dm%02ds, %.5f%% block share.",
(uint64_t)submits, et.tv_sec / 60, et.tv_sec % 60, avg_share );
#if ((defined(_WIN64) || defined(__WINDOWS__)))
applog(LOG_NOTICE,"Share hashrate %s, latency %d ms.",
shr, latency );
applog(LOG_NOTICE,"Share hashrate %s, latency %d ms (%.2f%%).",
shr, avg_latency, latency_pc );
#else
applog(LOG_NOTICE,"Share hashrate %s, latency %d ms, temp %s.",
shr, latency, timestr );
applog(LOG_NOTICE,"Share hashrate %s, latency %d ms (%.2f%%), temp %s.",
shr, avg_latency, latency_pc, tempstr );
#endif
// applog(LOG_NOTICE,"Performance index: %s.", hixstr );
*/
applog(LOG_INFO,"- - - - - - - - - - - - - - - - - - - - - - - - - - -");
}
// display hashrate
if ( opt_hash_meter )
if ( !opt_quiet )
{
char hc[16];
char hr[16];
char hc_units[2] = {0,0};
char hr_units[2] = {0,0};
double hashcount = thr_hashcount[thr_id];
double hashrate = thr_hashrates[thr_id];
if ( hashcount )
double hashcount;
double hashrate;
if ( opt_hash_meter )
{
scale_hash_for_display( &hashcount, hc_units );
scale_hash_for_display( &hashrate, hr_units );
if ( hc_units[0] )
sprintf( hc, "%.2f", hashcount );
else // no fractions of a hash
sprintf( hc, "%.0f", hashcount );
sprintf( hr, "%.2f", hashrate );
applog( LOG_INFO, "CPU #%d: %s %sH, %s %sH/s",
thr_id, hc, hc_units, hr, hr_units );
hashcount = thr_hashcount[thr_id];
hashrate = thr_hashrates[thr_id];
if ( hashcount != 0. )
{
scale_hash_for_display( &hashcount, hc_units );
scale_hash_for_display( &hashrate, hr_units );
if ( hc_units[0] )
sprintf( hc, "%.2f", hashcount );
else // no fractions of a hash
sprintf( hc, "%.0f", hashcount );
sprintf( hr, "%.2f", hashrate );
applog( LOG_INFO, "CPU #%d: %s %sH, %s %sH/s",
thr_id, hc, hc_units, hr, hr_units );
}
}
if ( thr_id == 0 )
{
hashcount = 0.;
hashrate = 0.;
for ( i = 0; i < opt_n_threads; i++ )
{
hashrate += thr_hashrates[i];
hashcount += thr_hashcount[i];
}
if ( hashcount != 0. )
{
scale_hash_for_display( &hashcount, hc_units );
scale_hash_for_display( &hashrate, hr_units );
if ( hc_units[0] )
sprintf( hc, "%.2f", hashcount );
else // no fractions of a hash
sprintf( hc, "%.0f", hashcount );
sprintf( hr, "%.2f", hashrate );
applog( LOG_NOTICE, "Miner perf: %s %sH, %s %sH/s.",
hc, hc_units, hr, hr_units );
}
}
}
// Display benchmark total
// Update hashrate for API if no shares accepted yet.
if ( ( opt_benchmark || !accepted_share_count )
@@ -2226,7 +2278,7 @@ static void *miner_thread( void *userdata )
hashrate += thr_hashrates[i];
hashcount += thr_hashcount[i];
}
if ( hashcount )
if ( hashcount != 0. )
{
global_hashcount = hashcount;
global_hashrate = hashrate;
@@ -2478,8 +2530,8 @@ static bool stratum_handle_response( char *buf )
val = JSON_LOADS( buf, &err );
if (!val)
{
applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
{
applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text);
goto out;
}
res_val = json_object_get( val, "result" );
@@ -2488,8 +2540,8 @@ static bool stratum_handle_response( char *buf )
id_val = json_object_get( val, "id" );
if ( !id_val || json_is_null(id_val) )
goto out;
if ( !algo_gate.stratum_handle_response( val ) )
goto out;
if ( !algo_gate.stratum_handle_response( val ) )
goto out;
ret = true;
out:
if (val)

42
miner.h
View File

@@ -313,6 +313,7 @@ void applog(int prio, const char *fmt, ...);
void restart_threads(void);
extern json_t *json_rpc_call( CURL *curl, const char *url, const char *userpass,
const char *rpc_req, int *curl_err, int flags );
extern void cbin2hex(char *out, const char *in, size_t len);
void bin2hex( char *s, const unsigned char *p, size_t len );
char *abin2hex( const unsigned char *p, size_t len );
bool hex2bin( unsigned char *p, const char *hexstr, size_t len );
@@ -330,6 +331,7 @@ extern void diff_to_target(uint32_t *target, double diff);
double hash_target_ratio( uint32_t* hash, uint32_t* target );
void work_set_target_ratio( struct work* work, uint32_t* hash );
void get_currentalgo( char* buf, int sz );
bool has_sha();
bool has_aes_ni();
@@ -349,7 +351,7 @@ void cpu_brand_string( char* s );
float cpu_temp( int core );
struct work {
uint32_t data[48];
uint32_t data[48] __attribute__ ((aligned (64)));
uint32_t target[8];
double targetdiff;
@@ -363,6 +365,14 @@ struct work {
char *job_id;
size_t xnonce2_len;
unsigned char *xnonce2;
// x16rt
uint32_t merkleroothash[8];
uint32_t witmerkleroothash[8];
uint32_t denom10[8];
uint32_t denom100[8];
uint32_t denom1000[8];
uint32_t denom10000[8];
} __attribute__ ((aligned (64)));
struct stratum_job {
@@ -376,9 +386,15 @@ struct stratum_job {
unsigned char version[4];
unsigned char nbits[4];
unsigned char ntime[4];
bool clean;
double diff;
unsigned char extra[64];
bool clean;
// for x16rt
unsigned char extra[64];
unsigned char denom10[32];
unsigned char denom100[32];
unsigned char denom1000[32];
unsigned char denom10000[32];
unsigned char proofoffullnode[32];
} __attribute__ ((aligned (64)));
@@ -401,7 +417,7 @@ struct stratum_ctx {
unsigned char *xnonce1;
size_t xnonce2_size;
struct stratum_job job;
struct work work;
struct work work __attribute__ ((aligned (64)));
pthread_mutex_t work_lock;
int bloc_height;
@@ -498,6 +514,7 @@ enum algos {
// ALGO_BLAKE2B,
ALGO_BLAKE2S,
ALGO_BMW,
ALGO_BMW512,
ALGO_C11,
ALGO_CRYPTOLIGHT,
ALGO_CRYPTONIGHT,
@@ -555,10 +572,13 @@ enum algos {
ALGO_X11GOST,
ALGO_X12,
ALGO_X13,
ALGO_X13BCD,
ALGO_X13SM3,
ALGO_X14,
ALGO_X15,
ALGO_X16R,
ALGO_X16RT,
ALGO_X16RT_VEIL,
ALGO_X16S,
ALGO_X17,
ALGO_XEVAN,
@@ -586,6 +606,7 @@ static const char* const algo_names[] = {
// "blake2b",
"blake2s",
"bmw",
"bmw512",
"c11",
"cryptolight",
"cryptonight",
@@ -643,10 +664,13 @@ static const char* const algo_names[] = {
"x11gost",
"x12",
"x13",
"x13bcd",
"x13sm3",
"x14",
"x15",
"x16r",
"x16rt",
"x16rt-veil",
"x16s",
"x17",
"xevan",
@@ -736,6 +760,7 @@ Options:\n\
blakecoin blake256r8\n\
blake2s Blake-2 S\n\
bmw BMW 256\n\
bmw512 BMW 512\n\
c11 Chaincoin\n\
cryptolight Cryptonight-light\n\
cryptonight Cryptonote legacy\n\
@@ -782,7 +807,7 @@ Options:\n\
skein2 Double Skein (Woodcoin)\n\
skunk Signatum (SIGT)\n\
sonoa Sono\n\
timetravel timeravel8, Machinecoin (MAC)\n\
timetravel timeravel8, Machinecoin (MAC)\n\
timetravel10 Bitcore (BTX)\n\
tribus Denarius (DNR)\n\
vanilla blake256r8vnl (VCash)\n\
@@ -794,20 +819,23 @@ Options:\n\
x11gost sib (SibCoin)\n\
x12 Galaxie Cash (GCH)\n\
x13 X13\n\
x13bcd bcd \n\
x13sm3 hsr (Hshare)\n\
x14 X14\n\
x15 X15\n\
x16r Ravencoin (RVN)\n\
x16rt Gincoin (GIN)\n\
x16rt-veil Veil (VEIL)\n\
x16s Pigeoncoin (PGN)\n\
x17\n\
xevan Bitsend (BSD)\n\
yescrypt Globlboost-Y (BSTY)\n\
yescrypt Globalboost-Y (BSTY)\n\
yescryptr8 BitZeny (ZNY)\n\
yescryptr16 Eli\n\
yescryptr32 WAVI\n\
yespower Cryply\n\
yespowerr16 Yenten (YTN)\n\
zr5 Ziftr\n\
zr5 Ziftr\n\
-o, --url=URL URL of mining server\n\
-O, --userpass=U:P username:password pair for mining server\n\
-u, --user=USERNAME username for mining server\n\

View File

@@ -174,33 +174,26 @@
#if defined(__MMX__)
// 64 bit vectors
#include "simd-utils/simd-mmx.h"
#include "simd-utils/intrlv-mmx.h"
#include "simd-utils/simd-64.h"
//#include "simd-utils/intrlv-mmx.h"
#if defined(__SSE2__)
// 128 bit vectors
#include "simd-utils/simd-sse2.h"
#include "simd-utils/intrlv-sse2.h"
#include "simd-utils/simd-128.h"
#if defined(__AVX__)
// 256 bit vector basics
#include "simd-utils/simd-avx.h"
#include "simd-utils/intrlv-avx.h"
#include "simd-utils/simd-256.h"
#if defined(__AVX2__)
// 256 bit everything else
#include "simd-utils/simd-avx2.h"
#include "simd-utils/intrlv-avx2.h"
// Skylake-X has all these
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// 512 bit vectors
#include "simd-utils/simd-avx512.h"
#include "simd-utils/intrlv-avx512.h"
#include "simd-utils/simd-512.h"
#endif // MMX
#endif // SSE2
@@ -208,7 +201,6 @@
#endif // AVX2
#endif // AVX512
// Picks implementation based on available CPU features.
#include "simd-utils/intrlv-selector.h"
#include "simd-utils/intrlv.h"
#endif // SIMD_UTILS_H__

View File

@@ -1,867 +0,0 @@
#if !defined(INTRLV_AVX_H__)
#define INTRLV_AVX_H__ 1
#if defined(__AVX__)
// Convenient short cuts for local use only
// Extract 64 bits from the low 128 bits of 256 bit vector.
#define extr64_cast128_256( a, n ) \
_mm_extract_epi64( _mm256_castsi256_si128( a ), n )
// Extract 32 bits from the low 128 bits of 256 bit vector.
#define extr32_cast128_256( a, n ) \
_mm_extract_epi32( _mm256_castsi256_si128( a ), n )
///////////////////////////////////////////////////////////
//
// AVX 256 Bit Vectors
//
// 256 bit interleaving can be done with AVX.
#define mm256_put_64( s0, s1, s2, s3) \
_mm256_set_epi64x( *((const uint64_t*)(s3)), *((const uint64_t*)(s2)), \
*((const uint64_t*)(s1)), *((const uint64_t*)(s0)) )
#define mm256_put_32( s00, s01, s02, s03, s04, s05, s06, s07 ) \
_mm256_set_epi32( *((const uint32_t*)(s07)), *((const uint32_t*)(s06)), \
*((const uint32_t*)(s05)), *((const uint32_t*)(s04)), \
*((const uint32_t*)(s03)), *((const uint32_t*)(s02)), \
*((const uint32_t*)(s01)), *((const uint32_t*)(s00)) )
#define mm256_get_64( s, i0, i1, i2, i3 ) \
_mm256_set_epi64x( ((const uint64_t*)(s))[i3], ((const uint64_t*)(s))[i2], \
((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] )
#define mm256_get_32( s, i0, i1, i2, i3, i4, i5, i6, i7 ) \
_mm256_set_epi32( ((const uint32_t*)(s))[i7], ((const uint32_t*)(s))[i6], \
((const uint32_t*)(s))[i5], ((const uint32_t*)(s))[i4], \
((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \
((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] )
/*
// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1], lo[0] }
#define mm256_intrlv_blend_128( hi, lo ) \
_mm256_blend_epi32( hi, lo, 0x0f )
#define mm256_intrlv_blend_64( hi, lo ) \
_mm256_blend_epi32( hi, lo, 0x33 )
#define mm256_intrlv_blend_32( hi, lo ) \
_mm256_blend_epi32( hi, lo, 0x55 )
*/
// Interleave 8x32_256
#define mm256_intrlv_8x32_256( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
{ \
__m128i s0hi = mm128_extr_hi128_256( s0 ); \
__m128i s1hi = mm128_extr_hi128_256( s1 ); \
__m128i s2hi = mm128_extr_hi128_256( s2 ); \
__m128i s3hi = mm128_extr_hi128_256( s3 ); \
__m128i s4hi = mm128_extr_hi128_256( s4 ); \
__m128i s5hi = mm128_extr_hi128_256( s5 ); \
__m128i s6hi = mm128_extr_hi128_256( s6 ); \
__m128i s7hi = mm128_extr_hi128_256( s7 ); \
casti_m256i( d,0 ) = _mm256_set_epi32( \
extr32_cast128_256(s7,0), extr32_cast128_256(s6,0), \
extr32_cast128_256(s5,0), extr32_cast128_256(s4,0), \
extr32_cast128_256(s3,0), extr32_cast128_256(s2,0), \
extr32_cast128_256(s1,0), extr32_cast128_256(s0,0) ); \
casti_m256i( d,1 ) = _mm256_set_epi32( \
extr32_cast128_256(s7,1), extr32_cast128_256(s6,1), \
extr32_cast128_256(s5,1), extr32_cast128_256(s4,1), \
extr32_cast128_256(s3,1), extr32_cast128_256(s2,1), \
extr32_cast128_256(s1,1), extr32_cast128_256(s0,1) ); \
casti_m256i( d,2 ) = _mm256_set_epi32( \
extr32_cast128_256(s7,2), extr32_cast128_256(s6,2), \
extr32_cast128_256(s5,2), extr32_cast128_256(s4,2), \
extr32_cast128_256(s3,2), extr32_cast128_256(s2,2), \
extr32_cast128_256(s1,2), extr32_cast128_256(s0,2) ); \
casti_m256i( d,3 ) = _mm256_set_epi32( \
extr32_cast128_256(s7,3), extr32_cast128_256(s6,3), \
extr32_cast128_256(s5,3), extr32_cast128_256(s4,3), \
extr32_cast128_256(s3,3), extr32_cast128_256(s2,3), \
extr32_cast128_256(s1,3), extr32_cast128_256(s0,3) ); \
casti_m256i( d,4 ) = _mm256_set_epi32( \
mm128_extr_32(s7hi,0), mm128_extr_32(s6hi,0), \
mm128_extr_32(s5hi,0), mm128_extr_32(s4hi,0), \
mm128_extr_32(s3hi,0), mm128_extr_32(s2hi,0), \
mm128_extr_32(s1hi,0), mm128_extr_32(s0hi,0) ); \
casti_m256i( d,5 ) = _mm256_set_epi32( \
mm128_extr_32(s7hi,1), mm128_extr_32(s6hi,1), \
mm128_extr_32(s5hi,1), mm128_extr_32(s4hi,1), \
mm128_extr_32(s3hi,1), mm128_extr_32(s2hi,1), \
mm128_extr_32(s1hi,1), mm128_extr_32(s0hi,1) ); \
casti_m256i( d,6 ) = _mm256_set_epi32( \
mm128_extr_32(s7hi,2), mm128_extr_32(s6hi,2), \
mm128_extr_32(s5hi,2), mm128_extr_32(s4hi,2), \
mm128_extr_32(s3hi,2), mm128_extr_32(s2hi,2), \
mm128_extr_32(s1hi,2), mm128_extr_32(s0hi,2) ); \
casti_m256i( d,7 ) = _mm256_set_epi32( \
mm128_extr_32(s7hi,3), mm128_extr_32(s6hi,3), \
mm128_extr_32(s5hi,3), mm128_extr_32(s4hi,3), \
mm128_extr_32(s3hi,3), mm128_extr_32(s2hi,3), \
mm128_extr_32(s1hi,3), mm128_extr_32(s0hi,3) ); \
} while(0)
#define mm256_intrlv_8x32_128( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
{ \
casti_m256i( d,0 ) = _mm256_set_epi32( \
mm128_extr_32(s7,0), mm128_extr_32(s6,0), \
mm128_extr_32(s5,0), mm128_extr_32(s4,0), \
mm128_extr_32(s3,0), mm128_extr_32(s2,0), \
mm128_extr_32(s1,0), mm128_extr_32(s0,0) ); \
casti_m256i( d,1 ) = _mm256_set_epi32( \
mm128_extr_32(s7,1), mm128_extr_32(s6,1), \
mm128_extr_32(s5,1), mm128_extr_32(s4,1), \
mm128_extr_32(s3,1), mm128_extr_32(s2,1), \
mm128_extr_32(s1,1), mm128_extr_32(s0,1) ); \
casti_m256i( d,2 ) = _mm256_set_epi32( \
mm128_extr_32(s7,2), mm128_extr_32(s6,2), \
mm128_extr_32(s5,2), mm128_extr_32(s4,2), \
mm128_extr_32(s3,2), mm128_extr_32(s2,2), \
mm128_extr_32(s1,2), mm128_extr_32(s0,2) ); \
casti_m256i( d,3 ) = _mm256_set_epi32( \
mm128_extr_32(s7,3), mm128_extr_32(s6,3), \
mm128_extr_32(s5,3), mm128_extr_32(s4,3), \
mm128_extr_32(s3,3), mm128_extr_32(s2,3), \
mm128_extr_32(s1,3), mm128_extr_32(s0,3) ); \
} while(0)
/*
#define mm256_bswap_intrlv_8x32_256( d, src ) \
do { \
__m256i s0 = mm256_bswap_32( src ); \
__m128i s1 = _mm256_extracti128_si256( s0, 1 ); \
casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 0 ) ); \
casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 1 ) ); \
casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 2 ) ); \
casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 3 ) ); \
casti_m256i( d, 4 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 0 ) ); \
casti_m256i( d, 5 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 1 ) ); \
casti_m256i( d, 6 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 2 ) ); \
casti_m256i( d, 7 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 3 ) ); \
} while(0)
#define mm256_bswap_intrlv_8x32_128( d, src ) \
do { \
__m128i ss = mm128_bswap_32( src ); \
casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 0 ) ); \
casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 1 ) ); \
casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 2 ) ); \
casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 3 ) ); \
} while(0)
*/
#define mm256_dintrlv_8x32_256( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
do { \
__m256i s0 = casti_m256i(s,0); \
__m256i s1 = casti_m256i(s,1); \
__m256i s2 = casti_m256i(s,2); \
__m256i s3 = casti_m256i(s,3); \
__m256i s4 = casti_m256i(s,4); \
__m256i s5 = casti_m256i(s,5); \
__m256i s6 = casti_m256i(s,6); \
__m256i s7 = casti_m256i(s,7); \
__m128i s0hi = _mm256_extracti128_si256( s0, 1 ); \
__m128i s1hi = _mm256_extracti128_si256( s1, 1 ); \
__m128i s2hi = _mm256_extracti128_si256( s2, 1 ); \
__m128i s3hi = _mm256_extracti128_si256( s3, 1 ); \
__m128i s4hi = _mm256_extracti128_si256( s4, 1 ); \
__m128i s5hi = _mm256_extracti128_si256( s5, 1 ); \
__m128i s6hi = _mm256_extracti128_si256( s6, 1 ); \
__m128i s7hi = _mm256_extracti128_si256( s7, 1 ); \
d0 = _mm256_set_epi32( \
extr32_cast128_256( s7, 0 ), extr32_cast128_256( s6, 0 ), \
extr32_cast128_256( s5, 0 ), extr32_cast128_256( s4, 0 ), \
extr32_cast128_256( s3, 0 ), extr32_cast128_256( s2, 0 ), \
extr32_cast128_256( s1, 0 ), extr32_cast128_256( s0, 0 ) );\
d1 = _mm256_set_epi32( \
extr32_cast128_256( s7, 1 ), extr32_cast128_256( s6, 1 ), \
extr32_cast128_256( s5, 1 ), extr32_cast128_256( s4, 1 ), \
extr32_cast128_256( s3, 1 ), extr32_cast128_256( s2, 1 ), \
extr32_cast128_256( s1, 1 ), extr32_cast128_256( s0, 1 ) );\
d2 = _mm256_set_epi32( \
extr32_cast128_256( s7, 2 ), extr32_cast128_256( s6, 2 ), \
extr32_cast128_256( s5, 2 ), extr32_cast128_256( s4, 2 ), \
extr32_cast128_256( s3, 2 ), extr32_cast128_256( s2, 2 ), \
extr32_cast128_256( s1, 2 ), extr32_cast128_256( s0, 2 ) );\
d3 = _mm256_set_epi32( \
extr32_cast128_256( s7, 3 ), extr32_cast128_256( s6, 3 ), \
extr32_cast128_256( s5, 3 ), extr32_cast128_256( s4, 3 ), \
extr32_cast128_256( s3, 3 ), extr32_cast128_256( s2, 3 ), \
extr32_cast128_256( s1, 3 ), extr32_cast128_256( s0, 3 ) );\
d4 = _mm256_set_epi32( \
_mm_extract_epi32( s7hi, 0 ), _mm_extract_epi32( s6hi, 0 ), \
_mm_extract_epi32( s5hi, 0 ), _mm_extract_epi32( s4hi, 0 ), \
_mm_extract_epi32( s3hi, 0 ), _mm_extract_epi32( s2hi, 0 ), \
_mm_extract_epi32( s1hi, 0 ), _mm_extract_epi32( s0hi, 0 ) ); \
d5 = _mm256_set_epi32( \
_mm_extract_epi32( s7hi, 1 ), _mm_extract_epi32( s6hi, 1 ), \
_mm_extract_epi32( s5hi, 1 ), _mm_extract_epi32( s4hi, 1 ), \
_mm_extract_epi32( s3hi, 1 ), _mm_extract_epi32( s2hi, 1 ), \
_mm_extract_epi32( s1hi, 1 ), _mm_extract_epi32( s0hi, 1 ) ); \
d6 = _mm256_set_epi32( \
_mm_extract_epi32( s7hi, 2 ), _mm_extract_epi32( s6hi, 2 ), \
_mm_extract_epi32( s5hi, 2 ), _mm_extract_epi32( s4hi, 2 ), \
_mm_extract_epi32( s3hi, 2 ), _mm_extract_epi32( s2hi, 2 ), \
_mm_extract_epi32( s1hi, 2 ), _mm_extract_epi32( s0hi, 2 ) ); \
d7 = _mm256_set_epi32( \
_mm_extract_epi32( s7hi, 3 ), _mm_extract_epi32( s6hi, 3 ), \
_mm_extract_epi32( s5hi, 3 ), _mm_extract_epi32( s4hi, 3 ), \
_mm_extract_epi32( s3hi, 3 ), _mm_extract_epi32( s2hi, 3 ), \
_mm_extract_epi32( s1hi, 3 ), _mm_extract_epi32( s0hi, 3 ) ); \
} while(0)
#define mm128_dintrlv_8x32_128( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
do { \
__m128i s0 = casti_m128i(s,0); \
__m128i s1 = casti_m128i(s,1); \
__m128i s2 = casti_m128i(s,2); \
__m128i s3 = casti_m128i(s,3); \
d0 = _mm_set_epi32( \
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
d1 = _mm_set_epi32( \
_mm_extract_epi32( s3, 1 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 1 ), _mm_extract_epi32( s0, 0 ) ); \
d2 = _mm_set_epi32( \
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
d3 = _mm_set_epi32( \
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
d4 = _mm_set_epi32( \
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
d5 = _mm_set_epi32( \
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
d6 = _mm_set_epi32( \
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
d7 = _mm_set_epi32( \
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
} while(0)
#define mm256_intrlv_4x64_256( d, s0, s1, s2, s3 ) \
do { \
__m128i s0hi = _mm256_extracti128_si256( s0, 1 ); \
__m128i s1hi = _mm256_extracti128_si256( s1, 1 ); \
__m128i s2hi = _mm256_extracti128_si256( s2, 1 ); \
__m128i s3hi = _mm256_extracti128_si256( s3, 1 ); \
casti_m256i( d,0 ) = _mm256_set_epi64x( \
extr64_cast128_256( s3, 0 ), extr64_cast128_256( s2, 0 ), \
extr64_cast128_256( s1, 0 ), extr64_cast128_256( s0, 0 ) ); \
casti_m256i( d,1 ) = _mm256_set_epi64x( \
extr64_cast128_256( s3, 1 ), extr64_cast128_256( s2, 1 ), \
extr64_cast128_256( s1, 1 ), extr64_cast128_256( s0, 1 ) ); \
casti_m256i( d,2 ) = _mm256_set_epi64x( \
_mm_extract_epi64( s3hi,0 ), _mm_extract_epi64( s2hi,0 ), \
_mm_extract_epi64( s1hi,0 ), _mm_extract_epi64( s0hi,0 ) ); \
casti_m256i( d,3 ) = _mm256_set_epi64x( \
_mm_extract_epi64( s3hi,1 ), _mm_extract_epi64( s2hi,1 ), \
_mm_extract_epi64( s1hi,1 ), _mm_extract_epi64( s0hi,1 ) ); \
} while(0)
#define mm256_intrlv_4x64_128( d, s0, s1, s2, s3 ) \
do { \
casti_m256i( d,0 ) = _mm256_set_epi64x( \
_mm_extract_epi64( s3, 0 ), _mm_extract_epi64( s2, 0 ), \
_mm_extract_epi64( s1, 0 ), _mm_extract_epi64( s0, 0 ) ); \
casti_m256i( d,1 ) = _mm256_set_epi64x( \
_mm_extract_epi64( s3, 1 ), _mm_extract_epi64( s2, 1 ), \
_mm_extract_epi64( s1, 1 ), _mm_extract_epi64( s0, 1 ) ); \
} while(0)
/*
#define mm256_bswap_intrlv_4x64_256( d, src ) \
do { \
__m256i s0 = mm256_bswap_32( src ); \
__m128i s1 = _mm256_extracti128_si256( s0, 1 ); \
casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( \
_mm256_castsi256_si128( s0 ), 0 ) ); \
casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( \
_mm256_castsi256_si128( s0 ), 1 ) ); \
casti_m256i( d,2 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 0 ) ); \
casti_m256i( d,3 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 1 ) ); \
} while(0)
#define mm256_bswap_intrlv_4x64_128( d, src ) \
do { \
__m128i ss = mm128_bswap_32( src ); \
casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 0 ) ); \
casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 1 ) ); \
} while(0)
*/
// 4 lanes of 256 bits using 64 bit interleaving (standard final hash size)
static inline void mm256_dintrlv_4x64_256( void *d0, void *d1, void *d2,
void *d3, const int n, const void *src )
{
__m256i s0 = *( (__m256i*) src ); // s[0][1:0]
__m256i s1 = *( (__m256i*)(src+32) ); // s[1][1:0]
__m256i s2 = *( (__m256i*)(src+64) ); // s[2][1:0]
__m256i s3 = *( (__m256i*)(src+96) ); // s[3][2:0]
__m128i s0hi = _mm256_extracti128_si256( s0, 1 ); // s[0][3:2]
__m128i s1hi = _mm256_extracti128_si256( s1, 1 ); // s[1][3:2]
__m128i s2hi = _mm256_extracti128_si256( s2, 1 ); // s[2][3:2]
__m128i s3hi = _mm256_extracti128_si256( s3, 1 ); // s[3][3:2]
casti_m256i( d0,n ) = _mm256_set_epi64x(
extr64_cast128_256( s3, 0 ), extr64_cast128_256( s2, 0 ),
extr64_cast128_256( s1, 0 ), extr64_cast128_256( s0, 0 ) );
casti_m256i( d1,n ) = _mm256_set_epi64x(
extr64_cast128_256( s3, 1 ), extr64_cast128_256( s2, 1 ),
extr64_cast128_256( s1, 1 ), extr64_cast128_256( s0, 1 ) );
casti_m256i( d2,n ) = _mm256_set_epi64x(
_mm_extract_epi64( s3hi, 0 ), _mm_extract_epi64( s2hi, 0 ),
_mm_extract_epi64( s1hi, 0 ), _mm_extract_epi64( s0hi, 0 ) );
casti_m256i( d3,n ) = _mm256_set_epi64x(
_mm_extract_epi64( s3hi, 1 ), _mm_extract_epi64( s2hi, 1 ),
_mm_extract_epi64( s1hi, 1 ), _mm_extract_epi64( s0hi, 1 ) );
}
// quarter avx2 block, 16 bytes * 4 lanes
// 4 lanes of 128 bits using 64 bit interleaving
// Used for last 16 bytes of 80 byte input, only used for testing.
static inline void mm128_dintrlv_4x64_128( void *d0, void *d1, void *d2,
void *d3, const int n, const void *src )
{
__m256i s0 = *( (__m256i*) src );
__m256i s1 = *( (__m256i*)(src+32) );
__m128i s0hi = _mm256_extracti128_si256( s0, 1 );
__m128i s1hi = _mm256_extracti128_si256( s1, 1 );
casti_m128i( d0,n ) = _mm_set_epi64x( extr64_cast128_256( s1 , 0 ),
extr64_cast128_256( s0 , 0 ) );
casti_m128i( d1,n ) = _mm_set_epi64x( extr64_cast128_256( s1 , 1 ),
extr64_cast128_256( s0 , 1 ) );
casti_m128i( d2,n ) = _mm_set_epi64x( _mm_extract_epi64( s1hi, 0 ),
_mm_extract_epi64( s0hi, 0 ) );
casti_m128i( d3,n ) = _mm_set_epi64x( _mm_extract_epi64( s1hi, 1 ),
_mm_extract_epi64( s0hi, 1 ) );
}
/*
static inline void mm256_dintrlv_2x128x256( void *d0, void *d1,
const int n, const void *s )
{
casti_m256i( d0,n ) = mm256_get_64( s, 0, 1, 4, 5 );
casti_m256i( d1,n ) = mm256_get_64( s, 2, 3, 6, 7 );
}
*/
//
#define mm256_intrlv_4x32_256( d, s0, s1, s2, s3 ) \
do { \
casti_m256i( d,0 ) = _mm256_set_epi32( \
mm128_extr_32( s3, 1 ), mm128_extr_32( s2, 1 ), \
mm128_extr_32( s1, 1 ), mm128_extr_32( s0, 1 ), \
mm128_extr_32( s3, 0 ), mm128_extr_32( s2, 0 ), \
mm128_extr_32( s1, 0 ), mm128_extr_32( s0, 0 ) ); \
casti_m256i( d,1 ) = _mm256_set_epi32( \
mm128_extr_32( s3, 3 ), mm128_extr_32( s2, 3 ), \
mm128_extr_32( s1, 3 ), mm128_extr_32( s0, 3 ), \
mm128_extr_32( s3, 2 ), mm128_extr_32( s2, 2 ), \
mm128_extr_32( s1, 2 ), mm128_extr_32( s0, 2 ) ); \
casti_m256i( d,2 ) = _mm256_set_epi32( \
mm128_extr_32( s3, 5 ), mm128_extr_32( s2, 5 ), \
mm128_extr_32( s1, 5 ), mm128_extr_32( s0, 5 ), \
mm128_extr_32( s3, 4 ), mm128_extr_32( s2, 4 ), \
mm128_extr_32( s1, 4 ), mm128_extr_32( s0, 4 ) ); \
casti_m256i( d,3 ) = _mm256_set_epi32( \
mm128_extr_32( s3, 7 ), mm128_extr_32( s2, 7 ), \
mm128_extr_32( s1, 7 ), mm128_extr_32( s0, 7 ), \
mm128_extr_32( s3, 6 ), mm128_extr_32( s2, 6 ), \
mm128_extr_32( s1, 6 ), mm128_extr_32( s0, 6 ) ); \
} while(0)
// 256 bit versions of commmon 128 bit functions.
static inline void mm256_intrlv_4x32( void *d, const void *s0,
const void *s1, const void *s2, const void *s3, int bit_len )
{
mm256_intrlv_4x32_256( d ,casti_m256i(s0,0), casti_m256i(s1,0),
casti_m256i(s2,0), casti_m256i(s3,0) );
if ( bit_len <= 256 ) return;
mm256_intrlv_4x32_256( d+128 ,casti_m256i(s0,1), casti_m256i(s1,1),
casti_m256i(s2,1), casti_m256i(s3,1) );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
mm128_intrlv_4x32_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4),
casti_m128i(s2,4), casti_m128i(s3,4) );
return;
}
mm256_intrlv_4x32_256( d+256 ,casti_m256i(s0,2), casti_m256i(s1,2),
casti_m256i(s2,2), casti_m256i(s3,2) );
mm256_intrlv_4x32_256( d+384 ,casti_m256i(s0,3), casti_m256i(s1,3),
casti_m256i(s2,3), casti_m256i(s3,3) );
}
static inline void mm256_dintrlv_4x32_256( void *d0, void *d1, void *d2,
void *d3, const void *src )
{
__m256i s0 = *(__m256i*) src;
__m256i s1 = *(__m256i*)(src+32);
__m256i s2 = *(__m256i*)(src+64);
__m256i s3 = *(__m256i*)(src+96);
*(__m256i*)d0 = _mm256_set_epi32(
_mm256_extract_epi32( s3,4 ), _mm256_extract_epi32( s3,0 ),
_mm256_extract_epi32( s2,4 ), _mm256_extract_epi32( s2,0 ),
_mm256_extract_epi32( s1,4 ), _mm256_extract_epi32( s1,0 ),
_mm256_extract_epi32( s0,4 ), _mm256_extract_epi32( s0,0 ) );
*(__m256i*)d1 = _mm256_set_epi32(
_mm256_extract_epi32( s3,5 ), _mm256_extract_epi32( s3,1 ),
_mm256_extract_epi32( s2,5 ), _mm256_extract_epi32( s2,1 ),
_mm256_extract_epi32( s1,5 ), _mm256_extract_epi32( s1,1 ),
_mm256_extract_epi32( s0,5 ), _mm256_extract_epi32( s0,1 ) );
*(__m256i*)d2 = _mm256_set_epi32(
_mm256_extract_epi32( s3,6 ), _mm256_extract_epi32( s3,2 ),
_mm256_extract_epi32( s2,6 ), _mm256_extract_epi32( s2,2 ),
_mm256_extract_epi32( s1,6 ), _mm256_extract_epi32( s1,2 ),
_mm256_extract_epi32( s0,6 ), _mm256_extract_epi32( s0,2 ) );
*(__m256i*)d3 = _mm256_set_epi32(
_mm256_extract_epi32( s3,7 ), _mm256_extract_epi32( s3,3 ),
_mm256_extract_epi32( s2,7 ), _mm256_extract_epi32( s2,3 ),
_mm256_extract_epi32( s1,7 ), _mm256_extract_epi32( s1,3 ),
_mm256_extract_epi32( s0,7 ), _mm256_extract_epi32( s0,3 ) );
}
static inline void mm256_dintrlv_4x32( void *d0, void *d1, void *d2,
void *d3, const void *s, int bit_len )
{
mm256_dintrlv_4x32_256( d0 , d1 , d2 , d3 , s );
if ( bit_len <= 256 ) return;
mm256_dintrlv_4x32_256( d0+ 32, d1+ 32, d2+ 32, d3+ 32, s+128 );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
mm128_dintrlv_4x32_128( d0+ 64, d1+ 64, d2+ 64, d3+ 64, s+256 );
return;
}
mm256_dintrlv_4x32_256( d0+ 64, d1+ 64, d2+ 64, d3+ 64, s+256 );
mm256_dintrlv_4x32_256( d0+ 96, d1+ 96, d2+ 96, d3+ 96, s+384 );
}
static inline void mm256_extr_lane_4x32( void *d, const void *s,
const int lane, const int bit_len )
{
casti_m256i( d, 0 ) = mm256_get_32( s, lane , lane+ 4, lane+ 8, lane+12,
lane+16, lane+20, lane+24, lane+28 );
if ( bit_len <= 256 ) return;
casti_m256i( d, 1 ) = mm256_get_32( s, lane+32, lane+36, lane+40, lane+44,
lane+48, lane+52, lane+56, lane+60 );
}
// Interleave 8 source buffers containing 32 bit data into the destination
// vector
static inline void mm256_intrlv_8x32( void *d, const void *s0,
const void *s1, const void *s2, const void *s3, const void *s4,
const void *s5, const void *s6, const void *s7, int bit_len )
{
mm256_intrlv_8x32_256( d , casti_m256i( s0,0 ), casti_m256i( s1,0 ),
casti_m256i( s2,0 ), casti_m256i( s3,0 ), casti_m256i( s4,0 ),
casti_m256i( s5,0 ), casti_m256i( s6,0 ), casti_m256i( s7,0 ) );
if ( bit_len <= 256 ) return;
mm256_intrlv_8x32_256( d+256, casti_m256i( s0,1 ), casti_m256i( s1,1 ),
casti_m256i( s2,1 ), casti_m256i( s3,1 ), casti_m256i( s4,1 ),
casti_m256i( s5,1 ), casti_m256i( s6,1 ), casti_m256i( s7,1 ) );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
mm256_intrlv_8x32_128( d+512, casti_m128i( s0,4 ), casti_m128i( s1,4 ),
casti_m128i( s2,4 ), casti_m128i( s3,4 ), casti_m128i( s4,4 ),
casti_m128i( s5,4 ), casti_m128i( s6,4 ), casti_m128i( s7,4 ) );
return;
}
mm256_intrlv_8x32_256( d+512, casti_m256i( s0,2 ), casti_m256i( s1,2 ),
casti_m256i( s2,2 ), casti_m256i( s3,2 ), casti_m256i( s4,2 ),
casti_m256i( s5,2 ), casti_m256i( s6,2 ), casti_m256i( s7,2 ) );
mm256_intrlv_8x32_256( d+768, casti_m256i( s0,3 ), casti_m256i( s1,3 ),
casti_m256i( s2,3 ), casti_m256i( s3,3 ), casti_m256i( s4,3 ),
casti_m256i( s5,3 ), casti_m256i( s6,3 ), casti_m256i( s7,3 ) );
// bit_len == 1024
}
// A couple of mining specifi functions.
/*
// Interleave 80 bytes of 32 bit data for 8 lanes.
static inline void mm256_bswap_intrlv80_8x32( void *d, const void *s )
{
mm256_bswap_intrlv_8x32_256( d , casti_m256i( s, 0 ) );
mm256_bswap_intrlv_8x32_256( d+256, casti_m256i( s, 1 ) );
mm256_bswap_intrlv_8x32_128( d+512, casti_m128i( s, 4 ) );
}
*/
// Deinterleave 8 buffers of 32 bit data from the source buffer.
// Sub-function can be called directly for 32 byte final hash.
static inline void mm256_dintrlv_8x32( void *d0, void *d1, void *d2,
void *d3, void *d4, void *d5, void *d6, void *d7,
const void *s, int bit_len )
{
mm256_dintrlv_8x32_256( casti_m256i(d0,0), casti_m256i(d1,0),
casti_m256i(d2,0), casti_m256i(d3,0), casti_m256i(d4,0),
casti_m256i(d5,0), casti_m256i(d6,0), casti_m256i(d7,0), s );
if ( bit_len <= 256 ) return;
mm256_dintrlv_8x32_256( casti_m256i(d0,1), casti_m256i(d1,1),
casti_m256i(d2,1), casti_m256i(d3,1), casti_m256i(d4,1),
casti_m256i(d5,1), casti_m256i(d6,1), casti_m256i(d7,1), s+256 );
if ( bit_len <= 512 ) return;
// short block, final 16 bytes of input data
if ( bit_len <= 640 )
{
mm128_dintrlv_8x32_128( casti_m128i(d0,2), casti_m128i(d1,2),
casti_m128i(d2,2), casti_m128i(d3,2), casti_m128i(d4,2),
casti_m128i(d5,2), casti_m128i(d6,2), casti_m128i(d7,2), s+512 );
return;
}
// bitlen == 1024
mm256_dintrlv_8x32_256( casti_m256i(d0,2), casti_m256i(d1,2),
casti_m256i(d2,2), casti_m256i(d3,2), casti_m256i(d4,2),
casti_m256i(d5,2), casti_m256i(d6,2), casti_m256i(d7,2), s+512 );
mm256_dintrlv_8x32_256( casti_m256i(d0,3), casti_m256i(d1,3),
casti_m256i(d2,3), casti_m256i(d3,3), casti_m256i(d4,3),
casti_m256i(d5,3), casti_m256i(d6,3), casti_m256i(d7,3), s+768 );
}
static inline void mm256_extr_lane_8x32( void *d, const void *s,
const int lane, const int bit_len )
{
casti_m256i( d,0 ) = mm256_get_32(s, lane , lane+ 8, lane+ 16, lane+ 24,
lane+32, lane+ 40, lane+ 48, lane+ 56 );
if ( bit_len <= 256 ) return;
casti_m256i( d,1 ) = mm256_get_32(s, lane+64, lane+ 72, lane+ 80, lane+ 88,
lane+96, lane+104, lane+112, lane+120 );
// bit_len == 512
}
// Interleave 4 source buffers containing 64 bit data into the destination
// buffer. Only bit_len 256, 512, 640 & 1024 are supported.
static inline void mm256_intrlv_4x64( void *d, const void *s0,
const void *s1, const void *s2, const void *s3, int bit_len )
{
mm256_intrlv_4x64_256( d , casti_m256i(s0,0), casti_m256i(s1,0),
casti_m256i(s2,0), casti_m256i(s3,0) );
if ( bit_len <= 256 ) return;
mm256_intrlv_4x64_256( d+128, casti_m256i(s0,1), casti_m256i(s1,1),
casti_m256i(s2,1), casti_m256i(s3,1) );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
mm256_intrlv_4x64_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4),
casti_m128i(s2,4), casti_m128i(s3,4) );
return;
}
// bit_len == 1024
mm256_intrlv_4x64_256( d+256, casti_m256i(s0,2), casti_m256i(s1,2),
casti_m256i(s2,2), casti_m256i(s3,2) );
mm256_intrlv_4x64_256( d+384, casti_m256i(s0,3), casti_m256i(s1,3),
casti_m256i(s2,3), casti_m256i(s3,3) );
}
/*
// Interleave 80 bytes of 32 bit data for 8 lanes.
static inline void mm256_bswap_intrlv80_4x64( void *d, const void *s )
{
mm256_bswap_intrlv_4x64_256( d , casti_m256i( s, 0 ) );
mm256_bswap_intrlv_4x64_256( d+128, casti_m256i( s, 1 ) );
mm256_bswap_intrlv_4x64_128( d+256, casti_m128i( s, 4 ) );
}
// Blend 32 byte lanes of hash from 2 sources according to control mask.
// macro due to 256 bit value arg.
#define mm256_blend_hash_4x64( dst, a, b, mask ) \
do { \
dst[0] = _mm256_blendv_epi8( a[0], b[0], mask ); \
dst[1] = _mm256_blendv_epi8( a[1], b[1], mask ); \
dst[2] = _mm256_blendv_epi8( a[2], b[2], mask ); \
dst[3] = _mm256_blendv_epi8( a[3], b[3], mask ); \
dst[4] = _mm256_blendv_epi8( a[4], b[4], mask ); \
dst[5] = _mm256_blendv_epi8( a[5], b[5], mask ); \
dst[6] = _mm256_blendv_epi8( a[6], b[6], mask ); \
dst[7] = _mm256_blendv_epi8( a[7], b[7], mask ); \
} while(0)
*/
// Deinterleave 4 buffers of 64 bit data from the source buffer.
// bit_len must be 256, 512, 640 or 1024 bits.
// Requires overrun padding for 640 bit len.
static inline void mm256_dintrlv_4x64( void *d0, void *d1, void *d2,
void *d3, const void *s, int bit_len )
{
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 0, s );
if ( bit_len <= 256 ) return;
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 1, s+128 );
if ( bit_len <= 512 ) return;
// short block, final 16 bytes of input data
if ( bit_len <= 640 )
{
mm128_dintrlv_4x64_128( d0, d1, d2, d3, 4, s+256 );
return;
}
// bit_len == 1024
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 2, s+256 );
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 3, s+384 );
}
// extract and deinterleave specified lane.
#define mm256_extr_lane_4x64_256 \
casti_m256i( d, 0 ) = mm256_get_64( s, lane, lane+4, lane+8, lane+12 )
static inline void mm256_extr_lane_4x64( void *d, const void *s,
const int lane, const int bit_len )
{
casti_m256i( d, 0 ) = mm256_get_64( s, lane, lane+4, lane+8, lane+12 );
if ( bit_len <= 256 ) return;
casti_m256i( d, 1 ) = mm256_get_64( s, lane+16, lane+20, lane+24, lane+28 );
return;
}
// Convert from 4x32 SSE2 interleaving to 4x64 AVX2.
// Can't do it in place
static inline void mm256_rintrlv_4x32_4x64( void *dst, void *src,
int bit_len )
{
__m256i* d = (__m256i*)dst;
uint32_t *s = (uint32_t*)src;
d[0] = _mm256_set_epi32( s[ 7],s[ 3],s[ 6],s[ 2],s[ 5],s[ 1],s[ 4],s[ 0] );
d[1] = _mm256_set_epi32( s[15],s[11],s[14],s[10],s[13],s[ 9],s[12],s[ 8] );
d[2] = _mm256_set_epi32( s[23],s[19],s[22],s[18],s[21],s[17],s[20],s[16] );
d[3] = _mm256_set_epi32( s[31],s[27],s[30],s[26],s[29],s[25],s[28],s[24] );
if ( bit_len <= 256 ) return;
d[4] = _mm256_set_epi32( s[39],s[35],s[38],s[34],s[37],s[33],s[36],s[32] );
d[5] = _mm256_set_epi32( s[47],s[43],s[46],s[42],s[45],s[41],s[44],s[40] );
d[6] = _mm256_set_epi32( s[55],s[51],s[54],s[50],s[53],s[49],s[52],s[48] );
d[7] = _mm256_set_epi32( s[63],s[59],s[62],s[58],s[61],s[57],s[60],s[56] );
if ( bit_len <= 512 ) return;
d[8] = _mm256_set_epi32( s[71],s[67],s[70],s[66],s[69],s[65],s[68],s[64] );
d[9] = _mm256_set_epi32( s[79],s[75],s[78],s[74],s[77],s[73],s[76],s[72] );
if ( bit_len <= 640 ) return;
d[10] = _mm256_set_epi32(s[87],s[83],s[86],s[82],s[85],s[81],s[84],s[80]);
d[11] = _mm256_set_epi32(s[95],s[91],s[94],s[90],s[93],s[89],s[92],s[88]);
d[12] = _mm256_set_epi32(s[103],s[99],s[102],s[98],s[101],s[97],s[100],s[96]);
d[13] = _mm256_set_epi32(s[111],s[107],s[110],s[106],s[109],s[105],s[108],s[104]);
d[14] = _mm256_set_epi32(s[119],s[115],s[118],s[114],s[117],s[113],s[116],s[112]);
d[15] = _mm256_set_epi32(s[127],s[123],s[126],s[122],s[125],s[121],s[124],s[120]);
// bit_len == 1024
}
// Convert 4x64 byte (256 bit) vectors to 4x32 (128 bit) vectors for AVX
// bit_len must be multiple of 64
static inline void mm256_rintrlv_4x64_4x32( void *dst, void *src,
int bit_len )
{
__m256i *d = (__m256i*)dst;
uint32_t *s = (uint32_t*)src;
d[0] = _mm256_set_epi32( s[ 7],s[ 5],s[ 3],s[ 1],s[ 6],s[ 4],s[ 2],s[ 0] );
d[1] = _mm256_set_epi32( s[15],s[13],s[11],s[ 9],s[14],s[12],s[10],s[ 8] );
d[2] = _mm256_set_epi32( s[23],s[21],s[19],s[17],s[22],s[20],s[18],s[16] );
d[3] = _mm256_set_epi32( s[31],s[29],s[27],s[25],s[30],s[28],s[26],s[24] );
if ( bit_len <= 256 ) return;
d[4] = _mm256_set_epi32( s[39],s[37],s[35],s[33],s[38],s[36],s[34],s[32] );
d[5] = _mm256_set_epi32( s[47],s[45],s[43],s[41],s[46],s[44],s[42],s[40] );
d[6] = _mm256_set_epi32( s[55],s[53],s[51],s[49],s[54],s[52],s[50],s[48] );
d[7] = _mm256_set_epi32( s[63],s[61],s[59],s[57],s[62],s[60],s[58],s[56] );
if ( bit_len <= 512 ) return;
d[8] = _mm256_set_epi32( s[71],s[69],s[67],s[65],s[70],s[68],s[66],s[64] );
d[9] = _mm256_set_epi32( s[79],s[77],s[75],s[73],s[78],s[76],s[74],s[72] );
if ( bit_len <= 640 ) return;
d[10] = _mm256_set_epi32( s[87],s[85],s[83],s[81],s[86],s[84],s[82],s[80] );
d[11] = _mm256_set_epi32( s[95],s[93],s[91],s[89],s[94],s[92],s[90],s[88] );
d[12] = _mm256_set_epi32( s[103],s[101],s[99],s[97],s[102],s[100],s[98],s[96] );
d[13] = _mm256_set_epi32( s[111],s[109],s[107],s[105],s[110],s[108],s[106],s[104] );
d[14] = _mm256_set_epi32( s[119],s[117],s[115],s[113],s[118],s[116],s[114],s[112] );
d[15] = _mm256_set_epi32( s[127],s[125],s[123],s[121],s[126],s[124],s[122],s[120] );
// bit_len == 1024
}
static inline void mm256_rintrlv_4x64_2x128( void *dst0, void *dst1,
const void *src, int bit_len )
{
__m256i* d0 = (__m256i*)dst0;
__m256i* d1 = (__m256i*)dst1;
uint64_t *s = (uint64_t*)src;
d0[0] = _mm256_set_epi64x( s[ 5], s[ 1], s[ 4], s[ 0] );
d1[0] = _mm256_set_epi64x( s[ 7], s[ 3], s[ 6], s[ 2] );
d0[1] = _mm256_set_epi64x( s[13], s[ 9], s[12], s[ 8] );
d1[1] = _mm256_set_epi64x( s[15], s[11], s[14], s[10] );
if ( bit_len <= 256 ) return;
d0[2] = _mm256_set_epi64x( s[21], s[17], s[20], s[16] );
d1[2] = _mm256_set_epi64x( s[23], s[19], s[22], s[18] );
d0[3] = _mm256_set_epi64x( s[29], s[25], s[28], s[24] );
d1[3] = _mm256_set_epi64x( s[31], s[27], s[30], s[26] );
if ( bit_len <= 512 ) return;
d0[4] = _mm256_set_epi64x( s[37], s[33], s[36], s[32] );
d1[4] = _mm256_set_epi64x( s[39], s[35], s[38], s[34] );
d0[5] = _mm256_set_epi64x( s[45], s[41], s[44], s[40] );
d1[5] = _mm256_set_epi64x( s[47], s[43], s[46], s[42] );
d0[6] = _mm256_set_epi64x( s[53], s[49], s[52], s[48] );
d1[6] = _mm256_set_epi64x( s[55], s[51], s[54], s[50] );
d0[7] = _mm256_set_epi64x( s[61], s[57], s[60], s[56] );
d1[7] = _mm256_set_epi64x( s[63], s[59], s[62], s[58] );
}
static inline void mm256_rintrlv_2x128_4x64( void *dst, const void *src0,
const void *src1, int bit_len )
{
__m256i* d = (__m256i*)dst;
uint64_t *s0 = (uint64_t*)src0;
uint64_t *s1 = (uint64_t*)src1;
d[ 0] = _mm256_set_epi64x( s1[2], s1[0], s0[2], s0[0] );
d[ 1] = _mm256_set_epi64x( s1[3], s1[1], s0[3], s0[1] );
d[ 2] = _mm256_set_epi64x( s1[6], s1[4], s0[6], s0[4] );
d[ 3] = _mm256_set_epi64x( s1[7], s1[5], s0[7], s0[5] );
if ( bit_len <= 256 ) return;
d[ 4] = _mm256_set_epi64x( s1[10], s1[ 8], s0[10], s0[ 8] );
d[ 5] = _mm256_set_epi64x( s1[11], s1[ 9], s0[11], s0[ 9] );
d[ 6] = _mm256_set_epi64x( s1[14], s1[12], s0[14], s0[12] );
d[ 7] = _mm256_set_epi64x( s1[15], s1[13], s0[15], s0[13] );
if ( bit_len <= 512 ) return;
d[ 8] = _mm256_set_epi64x( s1[18], s1[16], s0[18], s0[16] );
d[ 9] = _mm256_set_epi64x( s1[19], s1[17], s0[19], s0[17] );
d[10] = _mm256_set_epi64x( s1[22], s1[20], s0[22], s0[20] );
d[11] = _mm256_set_epi64x( s1[23], s1[21], s0[23], s0[21] );
d[12] = _mm256_set_epi64x( s1[26], s1[24], s0[26], s0[24] );
d[13] = _mm256_set_epi64x( s1[27], s1[25], s0[27], s0[25] );
d[14] = _mm256_set_epi64x( s1[30], s1[28], s0[30], s0[28] );
d[15] = _mm256_set_epi64x( s1[31], s1[29], s0[31], s0[29] );
}
static inline void mm256_intrlv_2x128( const void *d, const void *s0,
void *s1, const int bit_len )
{
__m128i s1hi = _mm256_extracti128_si256( casti_m256i( s1,0 ), 1 );
__m128i s0hi = _mm256_extracti128_si256( casti_m256i( s0,0 ), 1 );
casti_m256i( d,0 ) = mm256_concat_128(
_mm256_castsi256_si128( casti_m256i( s1,0 ) ),
_mm256_castsi256_si128( casti_m256i( s0,0 ) ) );
casti_m256i( d,1 ) = mm256_concat_128( s1hi, s0hi );
if ( bit_len <= 256 ) return;
s0hi = _mm256_extracti128_si256( casti_m256i( s0,1 ), 1 );
s1hi = _mm256_extracti128_si256( casti_m256i( s1,1 ), 1 );
casti_m256i( d,2 ) = mm256_concat_128(
_mm256_castsi256_si128( casti_m256i( s1,1 ) ),
_mm256_castsi256_si128( casti_m256i( s0,1 ) ) );
casti_m256i( d,3 ) = mm256_concat_128( s1hi, s0hi );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
casti_m256i( d,4 ) = mm256_concat_128(
_mm256_castsi256_si128( casti_m256i( s1,2 ) ),
_mm256_castsi256_si128( casti_m256i( s0,2 ) ) );
return;
}
s0hi = _mm256_extracti128_si256( casti_m256i( s0,2 ), 1 );
s1hi = _mm256_extracti128_si256( casti_m256i( s1,2 ), 1 );
casti_m256i( d,4 ) = mm256_concat_128(
_mm256_castsi256_si128( casti_m256i( s1,2 ) ),
_mm256_castsi256_si128( casti_m256i( s0,2 ) ) );
casti_m256i( d,5 ) = mm256_concat_128( s1hi, s0hi );
s0hi = _mm256_extracti128_si256( casti_m256i( s0,3 ), 1 );
s1hi = _mm256_extracti128_si256( casti_m256i( s1,3 ), 1 );
casti_m256i( d,6 ) = mm256_concat_128(
_mm256_castsi256_si128( casti_m256i( s1,3 ) ),
_mm256_castsi256_si128( casti_m256i( s0,3 ) ) );
casti_m256i( d,7 ) = mm256_concat_128( s1hi, s0hi );
}
// 512 is the bit len used by most, eliminate the conditionals
static inline void mm256_dintrlv_2x128_512( void *dst0, void *dst1,
const void *s )
{
__m256i *d0 = (__m256i*)dst0;
__m256i *d1 = (__m256i*)dst1;
__m256i s0 = casti_m256i( s, 0 );
__m256i s1 = casti_m256i( s, 1 );
d0[0] = _mm256_permute2x128_si256( s0, s1, 0x20 );
d1[0] = _mm256_permute2x128_si256( s0, s1, 0x31 );
s0 = casti_m256i( s, 2 );
s1 = casti_m256i( s, 3 );
d0[1] = _mm256_permute2x128_si256( s0, s1, 0x20 );
d1[1] = _mm256_permute2x128_si256( s0, s1, 0x31 );
}
// Phase out usage for all 512 bit data lengths
static inline void mm256_dintrlv_2x128( void *dst0, void *dst1, const void *s,
int bit_len )
{
__m256i *d0 = (__m256i*)dst0;
__m256i *d1 = (__m256i*)dst1;
__m256i s0 = casti_m256i( s, 0 );
__m256i s1 = casti_m256i( s, 1 );
d0[0] = _mm256_permute2x128_si256( s0, s1, 0x20 );
d1[0] = _mm256_permute2x128_si256( s0, s1, 0x31 );
if ( bit_len <= 256 ) return;
s0 = casti_m256i( s, 2 );
s1 = casti_m256i( s, 3 );
d0[1] = _mm256_permute2x128_si256( s0, s1, 0x20 );
d1[1] = _mm256_permute2x128_si256( s0, s1, 0x31 );
if ( bit_len <= 512 ) return;
s0 = casti_m256i( s, 4 );
s1 = casti_m256i( s, 5 );
d0[2] = _mm256_permute2x128_si256( s0, s1, 0x20 );
d1[2] = _mm256_permute2x128_si256( s0, s1, 0x31 );
s0 = casti_m256i( s, 6 );
s1 = casti_m256i( s, 7 );
d0[3] = _mm256_permute2x128_si256( s0, s1, 0x20 );
d1[3] = _mm256_permute2x128_si256( s0, s1, 0x31 );
}
#undef extr64_cast128_256
#undef extr32_cast128_256
#endif // AVX
#endif // INTRLV_AVX_H__

View File

@@ -1,104 +0,0 @@
#if !defined(INTRLV_AVX2_H__)
#define INTRLV_AVX2_H__ 1
#if defined(__AVX2__)
///////////////////////////////////////////////////////////
//
// AVX2 256 Bit Vectors
//
// A few functions that need AVX2 for 256 bit.
// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1], lo[0] }
#define mm256_intrlv_blend_128( hi, lo ) \
_mm256_blend_epi32( hi, lo, 0x0f )
#define mm256_intrlv_blend_64( hi, lo ) \
_mm256_blend_epi32( hi, lo, 0x33 )
#define mm256_intrlv_blend_32( hi, lo ) \
_mm256_blend_epi32( hi, lo, 0x55 )
#define mm256_bswap_intrlv_8x32_256( d, src ) \
do { \
__m256i s0 = mm256_bswap_32( src ); \
__m128i s1 = _mm256_extracti128_si256( s0, 1 ); \
casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 0 ) ); \
casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 1 ) ); \
casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 2 ) ); \
casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( \
_mm256_castsi256_si128( s0 ), 3 ) ); \
casti_m256i( d, 4 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 0 ) ); \
casti_m256i( d, 5 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 1 ) ); \
casti_m256i( d, 6 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 2 ) ); \
casti_m256i( d, 7 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 3 ) ); \
} while(0)
#define mm256_bswap_intrlv_8x32_128( d, src ) \
do { \
__m128i ss = mm128_bswap_32( src ); \
casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 0 ) ); \
casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 1 ) ); \
casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 2 ) ); \
casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 3 ) ); \
} while(0)
#define mm256_bswap_intrlv_4x64_256( d, src ) \
do { \
__m256i s0 = mm256_bswap_32( src ); \
__m128i s1 = _mm256_extracti128_si256( s0, 1 ); \
casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( \
_mm256_castsi256_si128( s0 ), 0 ) ); \
casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( \
_mm256_castsi256_si128( s0 ), 1 ) ); \
casti_m256i( d,2 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 0 ) ); \
casti_m256i( d,3 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 1 ) ); \
} while(0)
#define mm256_bswap_intrlv_4x64_128( d, src ) \
do { \
__m128i ss = mm128_bswap_32( src ); \
casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 0 ) ); \
casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 1 ) ); \
} while(0)
// A couple of mining specifi functions.
// Interleave 80 bytes of 32 bit data for 8 lanes.
static inline void mm256_bswap_intrlv80_8x32( void *d, const void *s )
{
mm256_bswap_intrlv_8x32_256( d , casti_m256i( s, 0 ) );
mm256_bswap_intrlv_8x32_256( d+256, casti_m256i( s, 1 ) );
mm256_bswap_intrlv_8x32_128( d+512, casti_m128i( s, 4 ) );
}
// Interleave 80 bytes of 32 bit data for 8 lanes.
static inline void mm256_bswap_intrlv80_4x64( void *d, const void *s )
{
mm256_bswap_intrlv_4x64_256( d , casti_m256i( s, 0 ) );
mm256_bswap_intrlv_4x64_256( d+128, casti_m256i( s, 1 ) );
mm256_bswap_intrlv_4x64_128( d+256, casti_m128i( s, 4 ) );
}
// Blend 32 byte lanes of hash from 2 sources according to control mask.
// macro due to 256 bit value arg.
#define mm256_blend_hash_4x64( dst, a, b, mask ) \
do { \
dst[0] = _mm256_blendv_epi8( a[0], b[0], mask ); \
dst[1] = _mm256_blendv_epi8( a[1], b[1], mask ); \
dst[2] = _mm256_blendv_epi8( a[2], b[2], mask ); \
dst[3] = _mm256_blendv_epi8( a[3], b[3], mask ); \
dst[4] = _mm256_blendv_epi8( a[4], b[4], mask ); \
dst[5] = _mm256_blendv_epi8( a[5], b[5], mask ); \
dst[6] = _mm256_blendv_epi8( a[6], b[6], mask ); \
dst[7] = _mm256_blendv_epi8( a[7], b[7], mask ); \
} while(0)
#endif // AVX2
#endif // INTRLV_AVX2_H__

View File

@@ -1,679 +0,0 @@
#if !defined(INTRLV_AVX512_H__)
#define INTRLV_AVX512_H__ 1
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// SSE2 functions used in AVX512 interleaving
// AVX512 block is 64 * 64 bytes
// quarter avx512 block, 16 bytes * 16 lanes
static inline void mm128_dintrlv_16x32x128( void *d00, void *d01,
void *d02, void *d03, void *d04, void *d05, void *d06, void *d07,
void *d08, void *d09, void *d10, void *d11, void *d12, void *d13,
void *d14, void *d15, const int n, const void *s )
{
cast_m128i( d00 ) = mm128_get_32( s, 0, 16, 32, 48 );
cast_m128i( d01 ) = mm128_get_32( s, 1, 17, 33, 49 );
cast_m128i( d02 ) = mm128_get_32( s, 2, 18, 34, 50 );
cast_m128i( d03 ) = mm128_get_32( s, 3, 19, 35, 51 );
cast_m128i( d04 ) = mm128_get_32( s, 4, 20, 36, 52 );
cast_m128i( d05 ) = mm128_get_32( s, 5, 21, 37, 53 );
cast_m128i( d06 ) = mm128_get_32( s, 6, 22, 38, 54 );
cast_m128i( d07 ) = mm128_get_32( s, 7, 23, 39, 55 );
cast_m128i( d08 ) = mm128_get_32( s, 8, 24, 40, 56 );
cast_m128i( d09 ) = mm128_get_32( s, 9, 25, 41, 57 );
cast_m128i( d10 ) = mm128_get_32( s, 10, 26, 42, 58 );
cast_m128i( d11 ) = mm128_get_32( s, 11, 27, 43, 59 );
cast_m128i( d12 ) = mm128_get_32( s, 12, 28, 44, 60 );
cast_m128i( d13 ) = mm128_get_32( s, 13, 29, 45, 61 );
cast_m128i( d14 ) = mm128_get_32( s, 14, 30, 46, 62 );
cast_m128i( d15 ) = mm128_get_32( s, 15, 31, 47, 63 );
}
// quarter avx512 block, 32 bytes * 8 lanes
// 8 lanes of 128 bits using 64 bit interleaving
// Used for last 16 bytes of 80 byte input, only used for testing.
static inline void mm128_dintrlv_8x64x128( void *d0, void *d1, void *d2,
void *d3, void *d4, void *d5, void *d6, void *d7,
const int n, const void *s )
{
casti_m128i( d0,n ) = mm128_get_64( s, 0, 8 );
casti_m128i( d1,n ) = mm128_get_64( s, 1, 9 );
casti_m128i( d2,n ) = mm128_get_64( s, 2, 10 );
casti_m128i( d3,n ) = mm128_get_64( s, 3, 11 );
casti_m128i( d4,n ) = mm128_get_64( s, 4, 12 );
casti_m128i( d5,n ) = mm128_get_64( s, 5, 13 );
casti_m128i( d6,n ) = mm128_get_64( s, 6, 14 );
casti_m128i( d7,n ) = mm128_get_64( s, 7, 15 );
}
static inline void mm128_dintrlv_4x128x128( void *d0, void *d1, void *d2,
void *d3, const int n, const void *s )
{
casti_m128i( d0,n ) = mm128_get_64( s, 0, 1 );
casti_m128i( d1,n ) = mm128_get_64( s, 2, 3 );
casti_m128i( d2,n ) = mm128_get_64( s, 4, 5 );
casti_m128i( d3,n ) = mm128_get_64( s, 5, 7 );
}
// AVX2 functions Used in AVX512 interleaving
static inline void mm256_dintrlv_16x32x256( void *d00, void *d01,
void *d02, void *d03, void *d04, void *d05,
void *d06, void *d07, void *d08, void *d09,
void *d10, void *d11, void *d12, void *d13,
void *d14, void *d15, const int n, const void *s )
{
casti_m256i( d00,n ) = mm256_get_32( s, 0, 16, 32, 48, 64, 80, 96,112 );
casti_m256i( d01,n ) = mm256_get_32( s, 1, 17, 33, 49, 65, 81, 97,113 );
casti_m256i( d02,n ) = mm256_get_32( s, 2, 18, 34, 50, 66, 82, 98,114 );
casti_m256i( d03,n ) = mm256_get_32( s, 3, 19, 35, 51, 67, 83, 99,115 );
casti_m256i( d04,n ) = mm256_get_32( s, 4, 20, 36, 52, 68, 84,100,116 );
casti_m256i( d05,n ) = mm256_get_32( s, 5, 21, 37, 53, 69, 85,101,117 );
casti_m256i( d06,n ) = mm256_get_32( s, 6, 22, 38, 54, 70, 86,102,118 );
casti_m256i( d07,n ) = mm256_get_32( s, 7, 23, 39, 55, 71, 87,103,119 );
casti_m256i( d08,n ) = mm256_get_32( s, 8, 24, 40, 56, 72, 88,104,120 );
casti_m256i( d09,n ) = mm256_get_32( s, 9, 25, 41, 57, 73, 89,105,121 );
casti_m256i( d10,n ) = mm256_get_32( s, 10, 26, 42, 58, 74, 90,106,122 );
casti_m256i( d11,n ) = mm256_get_32( s, 11, 27, 43, 59, 75, 91,107,123 );
casti_m256i( d12,n ) = mm256_get_32( s, 12, 28, 44, 60, 76, 92,108,124 );
casti_m256i( d13,n ) = mm256_get_32( s, 13, 29, 45, 61, 77, 93,109,125 );
casti_m256i( d14,n ) = mm256_get_32( s, 14, 30, 46, 62, 78, 94,110,126 );
casti_m256i( d15,n ) = mm256_get_32( s, 15, 31, 47, 63, 79, 95,111,127 );
}
// 8 lanes of 256 bits using 64 bit interleaving (standard final hash size)
static inline void mm256_dintrlv_8x64x256( void *d0, void *d1, void *d2,
void *d3, void *d4, void *d5, void *d6, void *d7,
const int n, const void *s )
{
casti_m256i( d0,n ) = mm256_get_64( s, 0, 8, 16, 24 );
casti_m256i( d1,n ) = mm256_get_64( s, 1, 9, 17, 25 );
casti_m256i( d2,n ) = mm256_get_64( s, 2, 10, 18, 26 );
casti_m256i( d3,n ) = mm256_get_64( s, 3, 11, 19, 27 );
casti_m256i( d4,n ) = mm256_get_64( s, 4, 12, 20, 28 );
casti_m256i( d5,n ) = mm256_get_64( s, 5, 13, 21, 29 );
casti_m256i( d6,n ) = mm256_get_64( s, 6, 14, 22, 30 );
casti_m256i( d7,n ) = mm256_get_64( s, 7, 15, 23, 31 );
}
static inline void mm256_dintrlv_4x128x256( void *d0, void *d1, void *d2,
void *d3, const int n, const void *s )
{
casti_m256i( d0,n ) = mm256_get_64( s, 0, 1, 8, 9 );
casti_m256i( d1,n ) = mm256_get_64( s, 2, 3, 10, 11 );
casti_m256i( d2,n ) = mm256_get_64( s, 4, 5, 12, 13 );
casti_m256i( d3,n ) = mm256_get_64( s, 6, 7, 14, 15 );
}
// AVX 512 helper functions.
//
// Macro functions returning vector.
// Abstracted typecasting, avoid temp pointers.
// Source arguments may be any 64 or 32 byte aligned pointer as appropriate.
#define mm512_put_64( s0, s1, s2, s3, s4, s5, s6, s7 ) \
_mm512_set_epi64( *((const uint64_t*)(s7)), *((const uint64_t*)(s6)), \
*((const uint64_t*)(s5)), *((const uint64_t*)(s4)), \
*((const uint64_t*)(s3)), *((const uint64_t*)(s2)), \
*((const uint64_t*)(s1)), *((const uint64_t*)(s0)) )
#define mm512_put_32( s00, s01, s02, s03, s04, s05, s06, s07, \
s08, s09, s10, s11, s12, s13, s14, s15 ) \
_mm512_set_epi32( *((const uint32_t*)(s15)), *((const uint32_t*)(s14)), \
*((const uint32_t*)(s13)), *((const uint32_t*)(s12)), \
*((const uint32_t*)(s11)), *((const uint32_t*)(s10)), \
*((const uint32_t*)(s09)), *((const uint32_t*)(s08)), \
*((const uint32_t*)(s07)), *((const uint32_t*)(s06)), \
*((const uint32_t*)(s05)), *((const uint32_t*)(s04)), \
*((const uint32_t*)(s03)), *((const uint32_t*)(s02)), \
*((const uint32_t*)(s01)), *((const uint32_t*)(s00)) )
#define mm512_get_64( s, i0, i1, i2, i3, i4, i5, i6, i7 ) \
_mm512_set_epi64( ((const uint64_t*)(s))[i7], ((const uint64_t*)(s))[i6], \
((const uint64_t*)(s))[i5], ((const uint64_t*)(s))[i4], \
((const uint64_t*)(s))[i3], ((const uint64_t*)(s))[i2], \
((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] )
#define mm512_get_32( s, i00, i01, i02, i03, i04, i05, i06, i07, \
i08, i09, i10, i11, i12, i13, i14, i15 ) \
_mm512_set_epi32( ((const uint32_t*)(s))[i15], ((const uint32_t*)(s))[i14], \
((const uint32_t*)(s))[i13], ((const uint32_t*)(s))[i12], \
((const uint32_t*)(s))[i11], ((const uint32_t*)(s))[i10], \
((const uint32_t*)(s))[i09], ((const uint32_t*)(s))[i08], \
((const uint32_t*)(s))[i07], ((const uint32_t*)(s))[i06], \
((const uint32_t*)(s))[i05], ((const uint32_t*)(s))[i04], \
((const uint32_t*)(s))[i03], ((const uint32_t*)(s))[i02], \
((const uint32_t*)(s))[i01], ((const uint32_t*)(s))[i00] )
// AVX512 has no blend, can be done with permute2xvar but at what cost?
// Can also be done with shifting and mask-or'ing for 3 instructins with
// 1 dependency. Finally it can be done with 1 _mm512_set but with 8 64 bit
// array index calculations and 8 pointer reads.
// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1]. lo[0] }
#define mm512_interleave_blend_128( hi, lo ) \
_mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \
0x7, 0x6, 0x5, 0x4, 0xb, 0xa, 0x9, 0x8 )
#define mm512_interleave_blend_64( hi, lo ) \
_mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \
0x7, 0x6, 0xd, 0xc, 0x3, 0x2, 0x9, 0x8 )
#define mm512_interleave_blend_32( hi, lo ) \
_mm256_permute2xvar_epi32( hi, lo, _mm512_set_epi32( \
0x0f, 0x1e, 0x0d, 0x1c, 0x0b, 0x1a, 0x09, 0x18, \
0x07, 0x16, 0x05, 0x14, 0x03, 0x12, 0x01, 0x10 )
//
static inline void mm512_intrlv_16x32x512( void *d, const void *s00,
const void *s01, const void *s02, const void *s03, const void *s04,
const void *s05, const void *s06, const void *s07, const void *s08,
const void *s09, const void *s10, const void *s11, const void *s12,
const void *s13, const void *s14, const void *s15 )
{
casti_m512i( d, 0 ) = mm512_put_32(
s00, s01, s02, s03, s04, s05, s06, s07,
s08, s09, s10, s11, s12, s13, s14, s15 );
casti_m512i( d, 1 ) = mm512_put_32(
s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4,
s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 );
casti_m512i( d, 2 ) = mm512_put_32(
s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8,
s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 );
casti_m512i( d, 3 ) = mm512_put_32(
s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12,
s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 );
casti_m512i( d, 4 ) = mm512_put_32(
s00+16, s01+16, s02+16, s03+16, s04+16, s05+16, s06+16, s07+16,
s08+16, s09+16, s10+16, s11+16, s12+16, s13+16, s14+16, s15+16 );
casti_m512i( d, 5 ) = mm512_put_32(
s00+20, s01+20, s02+20, s03+20, s04+20, s05+20, s06+20, s07+20,
s08+20, s09+20, s10+20, s11+20, s12+20, s13+20, s14+20, s15+20 );
casti_m512i( d, 6 ) = mm512_put_32(
s00+24, s01+24, s02+24, s03+24, s04+24, s05+24, s06+24, s07+24,
s08+24, s09+24, s10+24, s11+24, s12+24, s13+24, s14+24, s15+24 );
casti_m512i( d, 7 ) = mm512_put_32(
s00+28, s01+28, s02+28, s03+28, s04+28, s05+28, s06+28, s07+28,
s08+28, s09+28, s10+28, s11+28, s12+28, s13+28, s14+28, s15+28 );
casti_m512i( d, 8 ) = mm512_put_32(
s00+32, s01+28, s02+28, s03+28, s04+32, s05+28, s06+28, s07+28,
s08+32, s09+28, s10+28, s11+28, s12+32, s13+28, s14+28, s15+28 );
casti_m512i( d, 9 ) = mm512_put_32(
s00+36, s01+28, s02+28, s03+28, s04+36, s05+28, s06+28, s07+28,
s08+36, s09+28, s10+28, s11+28, s12+36, s13+28, s14+28, s15+28 );
casti_m512i( d,10 ) = mm512_put_32(
s00+40, s01+28, s02+28, s03+28, s04+40, s05+28, s06+28, s07+28,
s08+40, s09+28, s10+28, s11+28, s12+40, s13+28, s14+28, s15+28 );
casti_m512i( d,11 ) = mm512_put_32(
s00+44, s01+28, s02+28, s03+28, s04+44, s05+28, s06+28, s07+28,
s08+44, s09+28, s10+28, s11+28, s12+44, s13+28, s14+28, s15+28 );
casti_m512i( d,12 ) = mm512_put_32(
s00+48, s01+28, s02+28, s03+28, s04+48, s05+28, s06+28, s07+28,
s08+48, s09+28, s10+28, s11+28, s12+48, s13+28, s14+28, s15+28 );
casti_m512i( d,13 ) = mm512_put_32(
s00+52, s01+28, s02+28, s03+28, s04+52, s05+28, s06+28, s07+28,
s08+52, s09+28, s10+28, s11+28, s12+52, s13+28, s14+28, s15+28 );
casti_m512i( d,14 ) = mm512_put_32(
s00+56, s01+28, s02+28, s03+28, s04+56, s05+28, s06+28, s07+28,
s08+56, s09+28, s10+28, s11+28, s12+56, s13+28, s14+28, s15+28 );
casti_m512i( d,15 ) = mm512_put_32(
s00+60, s01+28, s02+28, s03+28, s04+60, s05+28, s06+28, s07+28,
s08+60, s09+28, s10+28, s11+28, s12+60, s13+28, s14+28, s15+28 );
}
static inline void mm512_intrlv_16x32x256( void *d, const void *s00,
const void *s01, const void *s02, const void *s03, const void *s04,
const void *s05, const void *s06, const void *s07, const void *s08,
const void *s09, const void *s10, const void *s11, const void *s12,
const void *s13, const void *s14, const void *s15 )
{
casti_m512i( d, 0 ) = mm512_put_32(
s00, s01, s02, s03, s04, s05, s06, s07,
s08, s09, s10, s11, s12, s13, s14, s15 );
casti_m512i( d, 1 ) = mm512_put_32(
s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4,
s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 );
casti_m512i( d, 2 ) = mm512_put_32(
s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8,
s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 );
casti_m512i( d, 3 ) = mm512_put_32(
s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12,
s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 );
casti_m512i( d, 4 ) = mm512_put_32(
s00+16, s01+16, s02+16, s03+16, s04+16, s05+16, s06+16, s07+16,
s08+16, s09+16, s10+16, s11+16, s12+16, s13+16, s14+16, s15+16 );
casti_m512i( d, 5 ) = mm512_put_32(
s00+20, s01+20, s02+20, s03+20, s04+20, s05+20, s06+20, s07+20,
s08+20, s09+20, s10+20, s11+20, s12+20, s13+20, s14+20, s15+20 );
casti_m512i( d, 6 ) = mm512_put_32(
s00+24, s01+24, s02+24, s03+24, s04+24, s05+24, s06+24, s07+24,
s08+24, s09+24, s10+24, s11+24, s12+24, s13+24, s14+24, s15+24 );
casti_m512i( d, 7 ) = mm512_put_32(
s00+28, s01+28, s02+28, s03+28, s04+28, s05+28, s06+28, s07+28,
s08+28, s09+28, s10+28, s11+28, s12+28, s13+28, s14+28, s15+28 );
}
// Last 16 bytes of input
static inline void mm512_intrlv_16x32x128( void *d, const void *s00,
const void *s01, const void *s02, const void *s03, const void *s04,
const void *s05, const void *s06, const void *s07, const void *s08,
const void *s09, const void *s10, const void *s11, const void *s12,
const void *s13, const void *s14, const void *s15 )
{
casti_m512i( d, 0 ) = mm512_put_32(
s00, s01, s02, s03, s04, s05, s06, s07,
s08, s09, s10, s11, s12, s13, s14, s15 );
casti_m512i( d, 1 ) = mm512_put_32(
s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4,
s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 );
casti_m512i( d, 2 ) = mm512_put_32(
s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8,
s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 );
casti_m512i( d, 3 ) = mm512_put_32(
s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12,
s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 );
}
// can be called directly for 64 byte hash.
static inline void mm512_dintrlv_16x32x512( void *d00, void *d01,
void *d02, void *d03, void *d04, void *d05, void *d06,
void *d07, void *d08, void *d09, void *d10, void *d11,
void *d12, void *d13, void *d14, void *d15, const int n,
const void *s )
{
casti_m512i(d00,n) = mm512_get_32( s, 0, 16, 32, 48, 64, 80, 96,112,
128,144,160,176,192,208,224,240 );
casti_m512i(d01,n) = mm512_get_32( s, 1, 17, 33, 49, 65, 81, 97,113,
129,145,161,177,193,209,225,241 );
casti_m512i(d02,n) = mm512_get_32( s, 2, 18, 34, 50, 66, 82, 98,114,
130,146,162,178,194,210,226,242 );
casti_m512i(d03,n) = mm512_get_32( s, 3, 19, 35, 51, 67, 83, 99,115,
131,147,163,179,195,211,227,243 );
casti_m512i(d04,n) = mm512_get_32( s, 4, 20, 36, 52, 68, 84,100,116,
132,148,164,180,196,212,228,244 );
casti_m512i(d05,n) = mm512_get_32( s, 5, 21, 37, 53, 69, 85,101,117,
133,149,165,181,197,213,229,245 );
casti_m512i(d06,n) = mm512_get_32( s, 6, 22, 38, 54, 70, 86,102,118,
134,150,166,182,198,214,230,246 );
casti_m512i(d07,n) = mm512_get_32( s, 7, 23, 39, 55, 71, 87,103,119,
135,151,167,183,199,215,231,247 );
casti_m512i(d08,n) = mm512_get_32( s, 8, 24, 40, 56, 72, 88,104,120,
136,152,168,184,200,216,232,248 );
casti_m512i(d09,n) = mm512_get_32( s, 9, 25, 41, 57, 73, 89,105,121,
137,153,169,185,201,217,233,249 );
casti_m512i(d10,n) = mm512_get_32( s, 10, 26, 42, 58, 74, 90,106,122,
138,154,170,186,202,218,234,250 );
casti_m512i(d11,n) = mm512_get_32( s, 11, 27, 43, 59, 75, 91,107,123,
139,155,171,187,203,219,235,251 );
casti_m512i(d12,n) = mm512_get_32( s, 12, 28, 44, 60, 76, 92,108,124,
140,156,172,188,204,220,236,252 );
casti_m512i(d13,n) = mm512_get_32( s, 13, 29, 45, 61, 77, 93,109,125,
141,157,173,189,205,221,237,253 );
casti_m512i(d14,n) = mm512_get_32( s, 14, 30, 46, 62, 78, 94,110,126,
142,158,174,190,206,222,238,254 );
casti_m512i(d15,n) = mm512_get_32( s, 15, 31, 47, 63, 79, 95,111,127,
143,159,175,191,207,223,239,255 );
}
static inline void mm512_intrlv_8x64x512( void *d, const void *s0,
const void *s1, const void *s2, const void *s3,
const void *s4, const void *s5, const void *s6,
const void *s7 )
{
casti_m512i( d,0 ) = mm512_put_64( s0, s1, s2, s3,
s4, s5, s6, s7 );
casti_m512i( d,1 ) = mm512_put_64( s0+ 8, s1+ 8, s2+ 8, s3+ 8,
s4+ 8, s5+ 8, s6+ 8, s7+ 8 );
casti_m512i( d,2 ) = mm512_put_64( s0+16, s1+16, s2+16, s3+16,
s4+16, s5+16, s6+16, s7+16 );
casti_m512i( d,3 ) = mm512_put_64( s0+24, s1+24, s2+24, s3+24,
s4+24, s5+24, s6+24, s7+24 );
casti_m512i( d,4 ) = mm512_put_64( s0+32, s1+32, s2+32, s3+32,
s4+32, s5+32, s6+32, s7+32 );
casti_m512i( d,5 ) = mm512_put_64( s0+40, s1+40, s2+40, s3+40,
s4+40, s5+40, s6+40, s7+40 );
casti_m512i( d,6 ) = mm512_put_64( s0+48, s1+48, s2+48, s3+48,
s4+48, s5+48, s6+48, s7+48 );
casti_m512i( d,7 ) = mm512_put_64( s0+56, s1+56, s2+56, s3+56,
s4+56, s5+56, s6+56, s7+56 );
}
static inline void mm512_intrlv_8x64x256( void *d, const void *s0,
const void *s1, const void *s2, const void *s3,
const void *s4, const void *s5, const void *s6,
const void *s7 )
{
casti_m512i( d,0 ) = mm512_put_64( s0, s1, s2, s3,
s4, s5, s6, s7 );
casti_m512i( d,1 ) = mm512_put_64( s0+ 8, s1+ 8, s2+ 8, s3+ 8,
s4+ 8, s5+ 8, s6+ 8, s7+ 8 );
casti_m512i( d,2 ) = mm512_put_64( s0+16, s1+16, s2+16, s3+16,
s4+16, s5+16, s6+16, s7+16 );
casti_m512i( d,3 ) = mm512_put_64( s0+24, s1+24, s2+24, s3+24,
s4+24, s5+24, s6+24, s7+24 );
}
// 8 lanes of 512 bits using 64 bit interleaving (typical intermediate hash)
static inline void mm512_dintrlv_8x64x512( void *d0, void *d1, void *d2,
void *d3, void *d4, void *d5, void *d6, void *d7,
const int n, const void *s )
{
casti_m512i( d0,n ) = mm512_get_64( s, 0, 8, 16, 24, 32, 40, 48, 56 );
casti_m512i( d1,n ) = mm512_get_64( s, 1, 9, 17, 25, 33, 41, 49, 57 );
casti_m512i( d2,n ) = mm512_get_64( s, 2, 10, 18, 26, 34, 42, 50, 58 );
casti_m512i( d3,n ) = mm512_get_64( s, 3, 11, 19, 27, 35, 43, 51, 59 );
casti_m512i( d4,n ) = mm512_get_64( s, 4, 12, 20, 28, 36, 44, 52, 60 );
casti_m512i( d5,n ) = mm512_get_64( s, 5, 13, 21, 29, 37, 45, 53, 61 );
casti_m512i( d6,n ) = mm512_get_64( s, 6, 14, 22, 30, 38, 46, 54, 62 );
casti_m512i( d7,n ) = mm512_get_64( s, 7, 15, 23, 31, 39, 47, 55, 63 );
}
static inline void mm512_dintrlv_4x128x512( void *d0, void *d1, void *d2,
void *d3, const int n, const void *s )
{
casti_m512i( d0,n ) = mm512_get_64( s, 0, 1, 8, 9, 16, 17, 24, 25 );
casti_m512i( d1,n ) = mm512_get_64( s, 2, 3, 10, 11, 18, 19, 16, 27 );
casti_m512i( d2,n ) = mm512_get_64( s, 4, 5, 12, 13, 20, 21, 28, 29 );
casti_m512i( d3,n ) = mm512_get_64( s, 6, 7, 14, 15, 22, 23, 30, 31 );
}
// AVX-512 user facing functions.
static inline void mm512_intrlv_16x32( void *d, const void *s00,
const void *s01, const void *s02, const void *s03, const void *s04,
const void *s05, const void *s06, const void *s07, const void *s08,
const void *s09, const void *s10, const void *s11, const void *s12,
const void *s13, const void *s14, const void *s15, int bit_len )
{
if ( bit_len <= 256 )
{
mm512_intrlv_16x32x256( d, s00, s01, s02, s03, s04, s05, s06, s07,
s08, s09, s10, s11, s12, s13, s14, s15 );
return;
}
mm512_intrlv_16x32x512( d, s00, s01, s02, s03, s04, s05, s06, s07,
s08, s09, s10, s11, s12, s13, s14, s15 );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
mm512_intrlv_16x32x128( d+1024, s00+64, s01+64, s02+64, s03+64,
s04+64, s05+64, s06+64, s07+64, s08+64, s09+64,
s10+64, s11+64, s12+64, s13+64, s14+64, s15+64 );
return;
}
mm512_intrlv_16x32x512( d+1024, s00+64, s01+64, s02+64, s03+64,
s04+64, s05+64, s06+64, s07+64, s08+64, s09+64,
s10+64, s11+64, s12+64, s13+64, s14+64, s15+64 );
// bit_len == 1024
}
// sub-functions can be called directly for 32 & 64 byte hash.
static inline void mm512_dintrlv_16x32( void *d00, void *d01, void *d02,
void *d03, void *d04, void *d05, void *d06, void *d07, void *d08,
void *d09, void *d10, void *d11, void *d12, void *d13, void *d14,
void *d15, const void *src, const int bit_len )
{
if ( bit_len <= 256 )
{
mm256_dintrlv_16x32x256( d00, d01, d02, d03, d04, d05, d06, d07,
d08, d09, d10, d11, d12, d13, d14, d15,
0,src );
return;
}
mm512_dintrlv_16x32x512( d00, d01, d02, d03, d04, d05, d06, d07,
d08, d09, d10, d11, d12, d13, d14, d15,
0, src );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
// short block, final 16 bytes of input data.
mm128_dintrlv_16x32x128( d00, d01, d02, d03, d04, d05, d06, d07,
d08, d09, d10, d11, d12, d13, d14, d15,
1, src+1024 );
return;
}
// bit_len == 1024
mm512_dintrlv_16x32x512( d00, d01, d02, d03, d04, d05, d06, d07,
d08, d09, d10, d11, d12, d13, d14, d15,
1, src+1024 );
}
static inline void mm512_extr_lane_16x32( void *dst, const void *src,
const int lane, const int bit_len )
{
if ( bit_len <= 256 )
{
cast_m256i( dst ) = mm256_get_32( src, lane, lane+16, lane+32, lane+48,
lane+64, lane+80, lane+96, lane+112 );
return;
}
cast_m512i( dst ) = mm512_get_32( src, lane, lane+ 16, lane+ 32, lane+ 48,
lane+ 64, lane+ 80, lane+ 96, lane+112, lane+128, lane+144,
lane+160, lane+176, lane+192, lane+208, lane+224, lane+248 );
}
//
static inline void mm512_intrlv_8x64( void *d, const void *s0,
const void *s1, const void *s2, const void *s3,
const void *s4, const void *s5, const void *s6,
const void *s7, int bit_len )
{
if ( bit_len <= 256 )
{
mm512_intrlv_8x64x256( d, s0, s1, s2, s3, s4, s5, s6, s7 );
return;
}
mm512_intrlv_8x64x512( d, s0, s1, s2, s3, s4, s5, s6, s7 );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
casti_m512i( d, 8 ) = mm512_put_64( s7+64, s6+64, s5+64, s4+64,
s3+64, s2+64, s1+64, s0+64 );
casti_m512i( d, 9 ) = mm512_put_64( s7+72, s6+72, s5+72, s4+72,
s3+72, s2+72, s1+72, s0+72 );
return;
}
// bitlen == 1024
mm512_intrlv_8x64x512( d+512, s0+64, s1+64, s2+64, s3+64,
s4+64, s5+64, s6+64, s7+64 );
}
static inline void mm512_dintrlv_8x64( void *d0, void *d1, void *d2,
void *d3, void *d4, void *d5, void *d6, void *d7,
const void *s, const int bit_len )
{
if ( bit_len <= 256 )
{
mm256_dintrlv_8x64x256( d0, d1, d2, d3, d4, d5, d6, d7, 0, s );
return;
}
mm512_dintrlv_8x64x512( d0, d1, d2, d3, d4, d5, d6, d7, 0, s );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
// short block, final 16 bytes of input data.
mm128_dintrlv_8x64x128( d0, d1, d2, d3, d4, d5, d6, d7, 1, s+512 );
return;
}
// bit_len == 1024
mm512_dintrlv_8x64x512( d0, d1, d2, d3, d4, d5, d6, d7, 1, s+512 );
}
// Extract one lane from 64 bit interleaved data
static inline void mm512_extr_lane_8x64( void *d, const void *s,
const int lane, const int bit_len )
{
if ( bit_len <= 256 )
{
cast_m256i( d ) = mm256_get_64( s, lane, lane+8, lane+16, lane+24 );
return;
}
// else bit_len == 512
cast_m512i( d ) = mm512_get_64( s, lane , lane+ 8, lane+16, lane+24,
lane+32, lane+40, lane+48, lane+56 );
}
//
static inline void mm512_intrlv_4x128( void *d, const void *s0,
const void *s1, const void *s2, const void *s3, const int bit_len )
{
casti_m512i( d, 0 ) = mm512_put_64( s0, s0+8, s1, s1+8,
s2, s2+8, s3, s3+8 );
casti_m512i( d, 1 ) = mm512_put_64( s0+16, s0+24, s1+16, s1+24,
s2+16, s2+24, s3+16, s3+24 );
if ( bit_len <= 256 ) return;
casti_m512i( d, 2 ) = mm512_put_64( s0+32, s0+40, s1+32, s1+40,
s2+32, s2+40, s3+32, s3+40 );
casti_m512i( d, 3 ) = mm512_put_64( s0+48, s0+56, s1+48, s1+56,
s2+48, s2+56, s3+48, s3+56 );
if ( bit_len <= 512 ) return;
casti_m512i( d, 4 ) = mm512_put_64( s0+64, s0+72, s1+64, s1+72,
s2+64, s2+72, s3+64, s3+72 );
if ( bit_len <= 640 ) return;
casti_m512i( d, 5 ) = mm512_put_64( s0+ 80, s0+ 88, s1+ 80, s1+ 88,
s2+ 80, s2+ 88, s3+ 80, s3+ 88 );
casti_m512i( d, 6 ) = mm512_put_64( s0+ 96, s0+104, s1+ 96, s1+104,
s2+ 96, s2+104, s3+ 96, s3+104 );
casti_m512i( d, 7 ) = mm512_put_64( s0+112, s0+120, s1+112, s1+120,
s2+112, s2+120, s3+112, s3+120 );
// bit_len == 1024
}
static inline void mm512_dintrlv_4x128( void *d0, void *d1, void *d2,
void *d3, const void *s, const int bit_len )
{
if ( bit_len <= 256 )
{
mm256_dintrlv_4x128x256( d0, d1, d2, d3, 0, s );
return;
}
mm512_dintrlv_4x128x512( d0, d1, d2, d3, 0, s );
if ( bit_len <= 512 ) return;
if ( bit_len <= 640 )
{
mm128_dintrlv_4x128x128( d0, d1, d2, d3, 1, s+256 );
return;
}
// bit_len == 1024
mm512_dintrlv_4x128x512( d0, d1, d2, d3, 1, s+256 );
}
// input one 8x64 buffer and return 2*4*128
static inline void mm512_rintrlv_8x64_4x128( void *dst0, void *dst1,
const void *src, int bit_len )
{
__m512i* d0 = (__m512i*)dst0;
__m512i* d1 = (__m512i*)dst1;
uint64_t *s = (uint64_t*)src;
d0[0] = _mm512_set_epi64( s[ 11], s[ 3], s[ 10], s[ 2],
s[ 9], s[ 1], s[ 8], s[ 0] );
d0[1] = _mm512_set_epi64( s[ 27], s[ 19], s[ 26], s[ 18],
s[ 25], s[ 17], s[ 24], s[ 16] );
d0[2] = _mm512_set_epi64( s[ 15], s[ 7], s[ 14], s[ 6],
s[ 13], s[ 5], s[ 12], s[ 4] );
d0[3] = _mm512_set_epi64( s[ 31], s[ 23], s[ 30], s[ 22],
s[ 29], s[ 21], s[ 28], s[ 20] );
d1[0] = _mm512_set_epi64( s[ 43], s[ 35], s[ 42], s[ 34],
s[ 41], s[ 33], s[ 40], s[ 32] );
d1[1] = _mm512_set_epi64( s[ 59], s[ 51], s[ 58], s[ 50],
s[ 57], s[ 49], s[ 56], s[ 48] );
d1[2] = _mm512_set_epi64( s[ 47], s[ 39], s[ 46], s[ 38],
s[ 45], s[ 37], s[ 44], s[ 36] );
d1[3] = _mm512_set_epi64( s[ 63], s[ 55], s[ 62], s[ 54],
s[ 61], s[ 53], s[ 60], s[ 52] );
if ( bit_len <= 512 ) return;
d0[4] = _mm512_set_epi64( s[ 75], s[ 67], s[ 74], s[ 66],
s[ 73], s[ 65], s[ 72], s[ 64] );
d0[5] = _mm512_set_epi64( s[ 91], s[ 83], s[ 90], s[ 82],
s[ 89], s[ 81], s[ 88], s[ 80] );
d0[6] = _mm512_set_epi64( s[ 79], s[ 71], s[ 78], s[ 70],
s[ 77], s[ 69], s[ 76], s[ 68] );
d0[7] = _mm512_set_epi64( s[ 95], s[ 87], s[ 94], s[ 86],
s[ 93], s[ 85], s[ 92], s[ 84] );
d1[4] = _mm512_set_epi64( s[107], s[ 99], s[106], s[ 98],
s[105], s[ 97], s[104], s[ 96] );
d1[5] = _mm512_set_epi64( s[123], s[115], s[122], s[114],
s[121], s[113], s[120], s[112] );
d1[6] = _mm512_set_epi64( s[111], s[103], s[110], s[102],
s[109], s[101], s[108], s[100] );
d1[7] = _mm512_set_epi64( s[127], s[119], s[126], s[118],
s[125], s[117], s[124], s[116] );
}
// input 2 4x128 return 8x64
static inline void mm512_rintrlv_4x128_8x64( void *dst, const void *src0,
const void *src1, int bit_len )
{
__m512i* d = (__m512i*)dst;
uint64_t *s0 = (uint64_t*)src0;
uint64_t *s1 = (uint64_t*)src1;
d[0] = _mm512_set_epi64( s1[ 6], s1[ 4], s1[ 2], s1[ 0],
s0[ 6], s0[ 4], s0[ 2], s0[ 0] );
d[1] = _mm512_set_epi64( s1[ 7], s1[ 5], s1[ 3], s1[ 1],
s0[ 7], s0[ 5], s0[ 3], s0[ 1] );
d[2] = _mm512_set_epi64( s1[14], s1[12], s1[10], s1[ 8],
s0[14], s0[12], s0[10], s0[ 8] );
d[3] = _mm512_set_epi64( s1[15], s1[13], s1[11], s1[ 9],
s0[15], s0[13], s0[11], s0[ 9] );
d[4] = _mm512_set_epi64( s1[22], s1[20], s1[18], s1[16],
s0[22], s0[20], s0[18], s0[16] );
d[5] = _mm512_set_epi64( s1[23], s1[21], s1[19], s1[17],
s0[24], s0[21], s0[19], s0[17] );
d[6] = _mm512_set_epi64( s1[22], s1[28], s1[26], s1[24],
s0[22], s0[28], s0[26], s0[24] );
d[7] = _mm512_set_epi64( s1[31], s1[29], s1[27], s1[25],
s0[31], s0[29], s0[27], s0[25] );
if ( bit_len <= 512 ) return;
d[0] = _mm512_set_epi64( s1[38], s1[36], s1[34], s1[32],
s0[38], s0[36], s0[34], s0[32] );
d[1] = _mm512_set_epi64( s1[39], s1[37], s1[35], s1[33],
s0[39], s0[37], s0[35], s0[33] );
d[2] = _mm512_set_epi64( s1[46], s1[44], s1[42], s1[40],
s0[46], s0[44], s0[42], s0[40] );
d[3] = _mm512_set_epi64( s1[47], s1[45], s1[43], s1[41],
s0[47], s0[45], s0[43], s0[41] );
d[4] = _mm512_set_epi64( s1[54], s1[52], s1[50], s1[48],
s0[54], s0[52], s0[50], s0[48] );
d[5] = _mm512_set_epi64( s1[55], s1[53], s1[51], s1[49],
s0[55], s0[53], s0[51], s0[49] );
d[6] = _mm512_set_epi64( s1[62], s1[60], s1[58], s1[56],
s0[62], s0[60], s0[58], s0[56] );
d[7] = _mm512_set_epi64( s1[63], s1[61], s1[59], s1[57],
s0[63], s0[61], s0[59], s0[57] );
}
static inline void mm512_extr_lane_4x128( void *d, const void *s,
const int lane, const int bit_len )
{
int l = lane<<1;
if ( bit_len <= 256 )
{
cast_m256i( d ) = mm256_get_64( s, l, l+1, l+8, l+9 );
return;
}
// else bit_len == 512
cast_m512i( d ) = mm512_get_64( s, l , l+ 1, l+ 8, l+ 9,
l+16, l+17, l+24, l+25 );
}
#endif // AVX512
#endif // INTRLV_AVX512_H__

View File

@@ -1,126 +0,0 @@
#if !defined(INTRLV_MMX_H__)
#define INTRLV_MMX_H__ 1
#if defined(__MMX__)
//////////////////////////////////////////////////////
//
// MMX 64 bit vectors
#define mm64_put_32( s0, s1 ) \
_mm_set_pi32( *((const uint32_t*)(s1)), *((const uint32_t*)(s0)) )
#define mm64_get_32( s, i0, i1 ) \
_mm_set_pi32( ((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] )
// 1 MMX block, 8 bytes * 2 lanes
static inline void mm64_intrlv_2x32( void *d, const void *s0,
const void *s1, int len )
{
casti_m64( d, 0 ) = mm64_put_32( s0 , s1 );
casti_m64( d, 1 ) = mm64_put_32( s0+ 4, s1+ 4 );
casti_m64( d, 2 ) = mm64_put_32( s0+ 8, s1+ 8 );
casti_m64( d, 3 ) = mm64_put_32( s0+ 12, s1+ 12 );
casti_m64( d, 4 ) = mm64_put_32( s0+ 16, s1+ 16 );
casti_m64( d, 5 ) = mm64_put_32( s0+ 20, s1+ 20 );
casti_m64( d, 6 ) = mm64_put_32( s0+ 24, s1+ 24 );
casti_m64( d, 7 ) = mm64_put_32( s0+ 28, s1+ 28 );
if ( len <= 256 ) return;
casti_m64( d, 8 ) = mm64_put_32( s0+ 32, s1+ 32 );
casti_m64( d, 9 ) = mm64_put_32( s0+ 36, s1+ 36 );
casti_m64( d,10 ) = mm64_put_32( s0+ 40, s1+ 40 );
casti_m64( d,11 ) = mm64_put_32( s0+ 44, s1+ 44 );
casti_m64( d,12 ) = mm64_put_32( s0+ 48, s1+ 48 );
casti_m64( d,13 ) = mm64_put_32( s0+ 52, s1+ 52 );
casti_m64( d,14 ) = mm64_put_32( s0+ 56, s1+ 56 );
casti_m64( d,15 ) = mm64_put_32( s0+ 60, s1+ 60 );
if ( len <= 512 ) return;
casti_m64( d,16 ) = mm64_put_32( s0+ 64, s1+ 64 );
casti_m64( d,17 ) = mm64_put_32( s0+ 68, s1+ 68 );
casti_m64( d,18 ) = mm64_put_32( s0+ 72, s1+ 72 );
casti_m64( d,19 ) = mm64_put_32( s0+ 76, s1+ 76 );
if ( len <= 640 ) return;
casti_m64( d,20 ) = mm64_put_32( s0+ 80, s1+ 80 );
casti_m64( d,21 ) = mm64_put_32( s0+ 84, s1+ 84 );
casti_m64( d,22 ) = mm64_put_32( s0+ 88, s1+ 88 );
casti_m64( d,23 ) = mm64_put_32( s0+ 92, s1+ 92 );
casti_m64( d,24 ) = mm64_put_32( s0+ 96, s1+ 96 );
casti_m64( d,25 ) = mm64_put_32( s0+100, s1+100 );
casti_m64( d,26 ) = mm64_put_32( s0+104, s1+104 );
casti_m64( d,27 ) = mm64_put_32( s0+108, s1+108 );
casti_m64( d,28 ) = mm64_put_32( s0+112, s1+112 );
casti_m64( d,29 ) = mm64_put_32( s0+116, s1+116 );
casti_m64( d,30 ) = mm64_put_32( s0+120, s1+120 );
casti_m64( d,31 ) = mm64_put_32( s0+124, s1+124 );
}
static inline void mm64_dintrlv_2x32( void *d00, void *d01, const int n,
const void *s, int len )
{
casti_m64( d00,0 ) = mm64_get_32( s, 0, 2 );
casti_m64( d01,0 ) = mm64_get_32( s, 1, 3 );
casti_m64( d00,1 ) = mm64_get_32( s, 4, 6 );
casti_m64( d01,1 ) = mm64_get_32( s, 5, 7 );
casti_m64( d00,2 ) = mm64_get_32( s, 8, 10 );
casti_m64( d01,2 ) = mm64_get_32( s, 9, 11 );
casti_m64( d00,3 ) = mm64_get_32( s, 12, 14 );
casti_m64( d01,3 ) = mm64_get_32( s, 13, 15 );
if ( len <= 256 ) return;
casti_m64( d00,4 ) = mm64_get_32( s, 16, 18 );
casti_m64( d01,4 ) = mm64_get_32( s, 17, 19 );
casti_m64( d00,5 ) = mm64_get_32( s, 20, 22 );
casti_m64( d01,5 ) = mm64_get_32( s, 21, 23 );
casti_m64( d00,6 ) = mm64_get_32( s, 24, 26 );
casti_m64( d01,6 ) = mm64_get_32( s, 25, 27 );
casti_m64( d00,7 ) = mm64_get_32( s, 28, 30 );
casti_m64( d01,7 ) = mm64_get_32( s, 29, 31 );
if ( len <= 512 ) return;
casti_m64( d00,8 ) = mm64_get_32( s, 32, 34 );
casti_m64( d01,8 ) = mm64_get_32( s, 33, 35 );
casti_m64( d00,9 ) = mm64_get_32( s, 36, 38 );
casti_m64( d01,9 ) = mm64_get_32( s, 37, 39 );
if ( len <= 640 ) return;
casti_m64( d00,10 ) = mm64_get_32( s, 40, 42 );
casti_m64( d01,10 ) = mm64_get_32( s, 41, 43 );
casti_m64( d00,11 ) = mm64_get_32( s, 44, 46 );
casti_m64( d01,11 ) = mm64_get_32( s, 45, 47 );
casti_m64( d00,12 ) = mm64_get_32( s, 48, 50 );
casti_m64( d01,12 ) = mm64_get_32( s, 49, 51 );
casti_m64( d00,13 ) = mm64_get_32( s, 52, 54 );
casti_m64( d01,13 ) = mm64_get_32( s, 53, 55 );
casti_m64( d00,14 ) = mm64_get_32( s, 56, 58 );
casti_m64( d01,14 ) = mm64_get_32( s, 57, 59 );
casti_m64( d00,15 ) = mm64_get_32( s, 60, 62 );
casti_m64( d01,15 ) = mm64_get_32( s, 61, 63 );
}
static inline void mm64_extr_lane_2x32( void *d, const void *s,
const int lane, const int bit_len )
{
casti_m64( d, 0 ) = mm64_get_32( s, lane , lane+ 4 );
casti_m64( d, 1 ) = mm64_get_32( s, lane+ 8, lane+12 );
casti_m64( d, 2 ) = mm64_get_32( s, lane+16, lane+20 );
casti_m64( d, 3 ) = mm64_get_32( s, lane+24, lane+28 );
if ( bit_len <= 256 ) return;
casti_m64( d, 4 ) = mm64_get_32( s, lane+32, lane+36 );
casti_m64( d, 5 ) = mm64_get_32( s, lane+40, lane+44 );
casti_m64( d, 6 ) = mm64_get_32( s, lane+48, lane+52 );
casti_m64( d, 7 ) = mm64_get_32( s, lane+56, lane+60 );
// bit_len == 512
}
#endif // MMX
#endif // INTRLV_MMX_H__

View File

@@ -1,77 +0,0 @@
#if !defined(INTRLV_SELECTOR_H__)
#define INTRLV_SELECTOR_H__
//////////////////////////////////////////////////////////////
//
// Generic interface for interleaving data for parallel processing.
//
// Best tech is chosen atomatically.
/*
#if defined(__AVX512F__)
#define intrlv_4x128 mm512_intrlv_4x128
#define intrlv_4x128 mm512_intrlv_4x128
#define intrlv_8x64 mm512_intrlv_8x64
#define dintrlv_8x64 mm512_dintrlv_8x64
#define extr_lane_8x64 mm512_extr_lane_8x64
#define intrlv_16x32 mm512_intrlv_16x32
#define dintrlv_16x32 mm512_dintrlv_16x32
#define extr_lane_16x32 mm512_extr_lane_16x32
#define intrlv_2x128 mm512_intrlv_2x128
#define dintrlv_2x128 mm512_dintrlv_2x128
#define intrlv_4x64 mm512_intrlv_4x64
#define dintrlv_4x64 mm512_dintrlv_4x64
#define extr_lane_4x64 mm512_extr_lane_4x64
#define intrlv_8x32 mm512_intrlv_8x32
#define dintrlv_8x32 mm512_dintrlv_8x32
#define extr_lane_8x32 mm512_extr_lane_8x32
#elif defined(__AVX__)
*/
#if defined(__AVX__)
#define intrlv_2x128 mm256_intrlv_2x128
#define dintrlv_2x128 mm256_dintrlv_2x128
#define intrlv_4x64 mm256_intrlv_4x64
#define dintrlv_4x64 mm256_dintrlv_4x64
#define extr_lane_4x64 mm256_extr_lane_4x64
#define intrlv_8x32 mm256_intrlv_8x32
#define dintrlv_8x32 mm256_dintrlv_8x32
#define extr_lane_8x32 mm256_extr_lane_8x32
#define intrlv_4x32 mm256_intrlv_4x32
#define dintrlv_4x32 mm256_dintrlv_4x32
#define extr_lane_4x32 mm256_extr_lane_4x32
#else
#define intrlv_2x128 mm128_intrlv_2x128
#define dintrlv_2x128 mm128_dintrlv_2x128
#define intrlv_4x64 mm128_intrlv_4x64
#define dintrlv_4x64 mm128_dintrlv_4x64
#define extr_lane_4x64 mm128_extr_lane_4x64
#define intrlv_8x32 mm128_intrlv_8x32
#define dintrlv_8x32 mm128_dintrlv_8x32
#define extr_lane_8x32 mm128_extr_lane_8x32
#define intrlv_2x64 mm128_intrlv_2x64
#define dintrlv_2x64 mm128_dintrlv_2x64
#define extr_lane_2x64 mm128_extr_lane_2x64
#define intrlv_4x32 mm128_intrlv_4x32
#define dintrlv_4x32 mm128_dintrlv_4x32
#define extr_lane_4x32 mm128_extr_lane_4x32
#endif
#endif // INTRLV_SELECTOR_H__

View File

@@ -1,192 +0,0 @@
#if !defined(INTRLV_SSE2_H__)
#define INTRLV_SSE2_H__ 1
// Don't call __mm_extract_epi32 directly, it needs SSE4.1.
// Use mm128_extr_32 wrapper instead, it has both SSE4.1 & SSE2 covered.
#if defined(__SSE2__)
///////////////////////////////////////////////////////////////
//
// SSE2 128 bit vectors
// Macros to abstract typecasting
// Interleave lanes
#define mm128_put_64( s0, s1) \
_mm_set_epi64x( *((const uint64_t*)(s1)), *((const uint64_t*)(s0)) )
#define mm128_put_32( s0, s1, s2, s3 ) \
_mm_set_epi32( *((const uint32_t*)(s3)), *((const uint32_t*)(s2)), \
*((const uint32_t*)(s1)), *((const uint32_t*)(s0)) )
// Deinterleave lanes
#define mm128_get_64( s, i0, i1 ) \
_mm_set_epi64x( ((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] )
#define mm128_get_32( s, i0, i1, i2, i3 ) \
_mm_set_epi32( ((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \
((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] )
// blend 2 vectors while interleaving: { hi[n], lo[n-1], ... hi[1], lo[0] }
#define mm128_intrlv_blend_64( hi, lo ) \
_mm256_blend_epi16( hi, lo, 0x0f )
#define mm128_intrlv_blend_32( hi, lo ) \
_mm6_blend_epi16( hi, lo, 0x33 )
// 1 sse2 block, 16 x 16 bytes
#define mm128_intrlv_4x32_128( d, s0, s1, s2, s3 )\
do { \
casti_m128i( d,0 ) = _mm_set_epi32( \
mm128_extr_32( s3, 0 ), mm128_extr_32( s2, 0 ), \
mm128_extr_32( s1, 0 ), mm128_extr_32( s0, 0 ) ); \
casti_m128i( d,1 ) = _mm_set_epi32( \
mm128_extr_32( s3, 1 ), mm128_extr_32( s2, 1 ), \
mm128_extr_32( s1, 1 ), mm128_extr_32( s0, 1 ) ); \
casti_m128i( d,2 ) = _mm_set_epi32( \
mm128_extr_32( s3, 2 ), mm128_extr_32( s2, 2 ), \
mm128_extr_32( s1, 2 ), mm128_extr_32( s0, 2 ) ); \
casti_m128i( d,3 ) = _mm_set_epi32( \
mm128_extr_32( s3, 3 ), mm128_extr_32( s2, 3 ), \
mm128_extr_32( s1, 3 ), mm128_extr_32( s0, 3 ) ); \
} while(0)
static inline void mm128_dintrlv_4x32_128( void *d0, void *d1, void *d2,
void *d3, const void *src )
{
__m128i s0 = *(__m128i*) src;
__m128i s1 = *(__m128i*)(src+16);
__m128i s2 = *(__m128i*)(src+32);
__m128i s3 = *(__m128i*)(src+48);
*(__m128i*)d0 = _mm_set_epi32(
mm128_extr_32( s3,0 ), mm128_extr_32( s2,0 ),
mm128_extr_32( s1,0 ), mm128_extr_32( s0,0 ) );
*(__m128i*)d1 = _mm_set_epi32(
mm128_extr_32( s3,1 ), mm128_extr_32( s2,1 ),
mm128_extr_32( s1,1 ), mm128_extr_32( s0,1 ) );
*(__m128i*)d2 = _mm_set_epi32(
mm128_extr_32( s3,2 ), mm128_extr_32( s2,2 ),
mm128_extr_32( s1,2 ), mm128_extr_32( s0,2 ) );
*(__m128i*)d3 = _mm_set_epi32(
mm128_extr_32( s3,3 ), mm128_extr_32( s2,3 ),
mm128_extr_32( s1,3 ), mm128_extr_32( s0,3 ) );
}
static inline void mm128_intrlv_2x64x128( void *d, const void *s0,
const void *s1 )
{
casti_m128i( d,0 ) = mm128_put_64( s0, s1 );
casti_m128i( d,1 ) = mm128_put_64( s0+ 8, s1+ 8 );
casti_m128i( d,2 ) = mm128_put_64( s0+16, s1+16 );
casti_m128i( d,3 ) = mm128_put_64( s0+24, s1+24 );
}
#define mm128_bswap_intrlv_4x32_128( d, src ) \
do { \
__m128i ss = mm128_bswap_32( src );\
casti_m128i( d,0 ) = _mm_set1_epi32( mm128_extr_32( ss, 0 ) ); \
casti_m128i( d,1 ) = _mm_set1_epi32( mm128_extr_32( ss, 1 ) ); \
casti_m128i( d,2 ) = _mm_set1_epi32( mm128_extr_32( ss, 2 ) ); \
casti_m128i( d,3 ) = _mm_set1_epi32( mm128_extr_32( ss, 3 ) ); \
} while(0)
//
// User functions.
// interleave 4 arrays of 32 bit elements for 128 bit processing
// bit_len must be 256, 512 or 640 bits.
static inline void mm128_intrlv_4x32( void *d, const void *s0,
const void *s1, const void *s2, const void *s3, int bit_len )
{
mm128_intrlv_4x32_128( d , casti_m128i(s0,0), casti_m128i(s1,0),
casti_m128i(s2,0), casti_m128i(s3,0) );
mm128_intrlv_4x32_128( d+ 64, casti_m128i(s0,1), casti_m128i(s1,1),
casti_m128i(s2,1), casti_m128i(s3,1) );
if ( bit_len <= 256 ) return;
mm128_intrlv_4x32_128( d+128, casti_m128i(s0,2), casti_m128i(s1,2),
casti_m128i(s2,2), casti_m128i(s3,2) );
mm128_intrlv_4x32_128( d+192, casti_m128i(s0,3), casti_m128i(s1,3),
casti_m128i(s2,3), casti_m128i(s3,3) );
if ( bit_len <= 512 ) return;
mm128_intrlv_4x32_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4),
casti_m128i(s2,4), casti_m128i(s3,4) );
if ( bit_len <= 640 ) return;
mm128_intrlv_4x32_128( d+320, casti_m128i(s0,5), casti_m128i(s1,5),
casti_m128i(s2,5), casti_m128i(s3,5) );
mm128_intrlv_4x32_128( d+384, casti_m128i(s0,6), casti_m128i(s1,6),
casti_m128i(s2,6), casti_m128i(s3,6) );
mm128_intrlv_4x32_128( d+448, casti_m128i(s0,7), casti_m128i(s1,7),
casti_m128i(s2,7), casti_m128i(s3,7) );
// bit_len == 1024
}
// Still used by decred due to odd data size: 180 bytes
// bit_len must be multiple of 32
static inline void mm128_intrlv_4x32x( void *dst, void *src0, void *src1,
void *src2, void *src3, int bit_len )
{
uint32_t *d = (uint32_t*)dst;
uint32_t *s0 = (uint32_t*)src0;
uint32_t *s1 = (uint32_t*)src1;
uint32_t *s2 = (uint32_t*)src2;
uint32_t *s3 = (uint32_t*)src3;
for ( int i = 0; i < bit_len >> 5; i++, d += 4 )
{
*d = *(s0+i);
*(d+1) = *(s1+i);
*(d+2) = *(s2+i);
*(d+3) = *(s3+i);
}
}
static inline void mm128_dintrlv_4x32( void *d0, void *d1, void *d2,
void *d3, const void *s, int bit_len )
{
mm128_dintrlv_4x32_128( d0 , d1 , d2 , d3 , s );
mm128_dintrlv_4x32_128( d0+ 16, d1+ 16, d2+ 16, d3+ 16, s+ 64 );
if ( bit_len <= 256 ) return;
mm128_dintrlv_4x32_128( d0+ 32, d1+ 32, d2+ 32, d3+ 32, s+128 );
mm128_dintrlv_4x32_128( d0+ 48, d1+ 48, d2+ 48, d3+ 48, s+192 );
if ( bit_len <= 512 ) return;
mm128_dintrlv_4x32_128( d0+ 64, d1+ 64, d2+ 64, d3+ 64, s+256 );
if ( bit_len <= 640 ) return;
mm128_dintrlv_4x32_128( d0+ 80, d1+ 80, d2+ 80, d3+ 80, s+320 );
mm128_dintrlv_4x32_128( d0+ 96, d1+ 96, d2+ 96, d3+ 96, s+384 );
mm128_dintrlv_4x32_128( d0+112, d1+112, d2+112, d3+112, s+448 );
// bit_len == 1024
}
// extract and deinterleave specified lane.
static inline void mm128_extr_lane_4x32( void *d, const void *s,
const int lane, const int bit_len )
{
casti_m128i( d, 0 ) =
mm128_get_32( s, lane , lane+ 4, lane+ 8, lane+12 );
casti_m128i( d, 1 ) =
mm128_get_32( s, lane+16, lane+20, lane+24, lane+28 );
if ( bit_len <= 256 ) return;
casti_m128i( d, 2 ) =
mm128_get_32( s, lane+32, lane+36, lane+40, lane+44 );
casti_m128i( d, 3 ) =
mm128_get_32( s, lane+48, lane+52, lane+56, lane+60 );
// bit_len == 512
}
// Interleave 80 bytes of 32 bit data for 4 lanes.
static inline void mm128_bswap_intrlv80_4x32( void *d, const void *s )
{
mm128_bswap_intrlv_4x32_128( d , casti_m128i( s, 0 ) );
mm128_bswap_intrlv_4x32_128( d+ 64, casti_m128i( s, 1 ) );
mm128_bswap_intrlv_4x32_128( d+128, casti_m128i( s, 2 ) );
mm128_bswap_intrlv_4x32_128( d+192, casti_m128i( s, 3 ) );
mm128_bswap_intrlv_4x32_128( d+256, casti_m128i( s, 4 ) );
}
#endif // SSE2
#endif // INTRLV_SSE2_H__

1326
simd-utils/intrlv.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
#if !defined(SIMD_SSE2_H__)
#define SIMD_SSE2_H__ 1
#if !defined(SIMD_128_H__)
#define SIMD_128_H__ 1
#if defined(__SSE2__)
@@ -15,69 +15,158 @@
//
// 128 bit operations are enhanced with uint128 which adds 128 bit integer
// support for arithmetic and other operations. Casting to uint128_t is not
// free, it requires a move from mmx to gpr but is often the only way or
// the more efficient way for certain operations.
// Compile time constant initializers are type agnostic and can have
// a pointer handle of almost any type. All arguments must be scalar constants.
// up to 64 bits. These iniitializers should only be used at compile time
// to initialize vector arrays. All data reside in memory.
// efficient but is sometimes the only way for certain operations.
//
// These are of limited use, it is often simpler to use uint64_t arrays
// and cast as required.
#define mm128_const_64( x1, x0 ) {{ x1, x0 }}
#define mm128_const1_64( x ) {{ x, x }}
#define mm128_const_32( x3, x2, x1, x0 ) {{ x3, x2, x1, x0 }}
#define mm128_const1_32( x ) {{ x,x,x,x }}
#define mm128_const_16( x7, x6, x5, x4, x3, x2, x1, x0 ) \
{{ x7, x6, x5, x4, x3, x2, x1, x0 }}
#define mm128_const1_16( x ) {{ x,x,x,x, x,x,x,x }}
#define mm128_const_8( x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 ) \
{{ x15, x14, x13, x12, x11, x10, x09, x08, \
x07, x06, x05, x04, x03, x02, x01, x00 }}
#define mm128_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
// Compile time constants, use only for compile time initializing.
#define c128_zero mm128_const1_64( 0ULL )
#define c128_one_128 mm128_const_64( 0ULL, 1ULL )
#define c128_one_64 mm128_const1_64( 1ULL )
#define c128_one_32 mm128_const1_32( 1UL )
#define c128_one_16 mm128_const1_16( 1U )
#define c128_one_8 mm128_const1_8( 1U )
#define c128_neg1 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL )
#define c128_neg1_64 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL )
#define c128_neg1_32 mm128_const1_32( 0xFFFFFFFFUL )
#define c128_neg1_16 mm128_const1_32( 0xFFFFU )
#define c128_neg1_8 mm128_const1_32( 0xFFU )
// Constants are an issue with simd. Simply put, immediate constants don't
// exist. All simd constants either reside in memory or a register.
// The distibction is made below with c128 being memory resident defined
// at compile time and m128 being register defined at run time.
//
// All run time constants must be generated using their components elements
// incurring significant overhead. The more elements the more overhead
// both in instructions and in GP register usage. Whenever possible use
// 64 bit constant elements regardless of the actual element size.
//
// Due to the cost of generating constants they should not be regenerated
// in the same function. Instead, define a local const.
//
// Some constant values can be generated using shortcuts. Zero for example
// is as simple as XORing any register with itself, and is implemented
// in the setzero instrinsic. These shortcuts must be implemented is asm
// due to doing things the compiler would complain about. Another single
// instruction constant is -1, defined below. Others may be added as the need
// arises. Even single instruction constants are less efficient than local
// register variables so the advice above stands.
//
// One common use for simd constants is as a control index for some simd
// instructions like blend and shuffle. The utilities below do not take this
// into account. Those that generate a simd constant should not be used
// repeatedly. It may be better for the application to reimplement the
// utility to better suit its usage.
//
// More tips:
//
// Conversions from integer to vector should be avoided whenever possible.
// Extract, insert and set and set1 instructions should be avoided.
// In addition to the issues with constants set is also very inefficient with
// variables.
// Converting integer data to perform a couple of vector operations
// then converting back to integer should be avoided. Converting data in
// registers should also be avoided. Conversion should be limited to buffers
// in memory where the data is loaded directly to vector registers, bypassing
// the integer to vector conversion.
//
// Pseudo constants.
//
// These can't be used for compile time initialization.
// These should be used for all simple vectors.
//
// _mm_setzero_si128 uses pxor instruction, it's unclear what _mm_set_epi does.
// Clearly it's faster than reading a memory resident constant. Assume set
// is also faster.
// If a pseudo constant is used often in a function it may be preferable
// to define a register variable to represent that constant.
// register __m128i zero = mm_setzero_si128().
// This reduces any references to a move instruction.
// Repeated usage of any simd pseudo-constant should use a locally defined
// const rather than recomputing it for every reference.
#define m128_zero _mm_setzero_si128()
#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_one_64 _mm_set1_epi64x( 1ULL )
#define m128_one_32 _mm_set1_epi32( 1UL )
#define m128_one_16 _mm_set1_epi16( 1U )
#define m128_one_8 _mm_set1_epi8( 1U )
// As suggested by Intel...
// Arg passing for simd registers is assumed to be first output arg,
// then input args, then locals. This is probably wrong, gcc likely picks
// whichever register is currently holding the variable, or whichever
// register is available to hold it. Nevertheless, all args are specified
// by their arg number and local variables use registers starting at
// last arg + 1, by type.
// Output args don't need to be listed as clobbered.
#define m128_neg1 _mm_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL )
static inline __m128i m128_one_64_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubq %%xmm1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_64 m128_one_64_fn()
static inline __m128i m128_one_32_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubd %%xmm1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_32 m128_one_32_fn()
static inline __m128i m128_one_16_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubw %%xmm1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_16 m128_one_16_fn()
static inline __m128i m128_one_8_fn()
{
__m128i a;
asm( "pxor %0, %0\n\t"
"pcmpeqd %%xmm1, %%xmm1\n\t"
"psubb %%xmm1, %0\n\t"
: "=x"(a)
:
: "xmm1" );
return a;
}
#define m128_one_8 m128_one_8_fn()
static inline __m128i m128_neg1_fn()
{
__m128i a;
asm( "pcmpeqd %0, %0\n\t"
: "=x"(a) );
return a;
}
#define m128_neg1 m128_neg1_fn()
#if defined(__SSE41__)
static inline __m128i m128_one_128_fn()
{
__m128i a;
asm( "pinsrq $0, $1, %0\n\t"
"pinsrq $1, $0, %0\n\t"
: "=x"(a) );
return a;
}
#define m128_one_128 m128_one_128_fn()
// alternative to _mm_set_epi64x, doesn't use mem,
// cost = 2 pinsrt, estimate 4 clocks.
static inline __m128i m128_const_64( uint64_t hi, uint64_t lo )
{
__m128i a;
asm( "pinsrq $0, %2, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x"(a)
: "r"(hi), "r"(lo) );
return a;
}
#else
#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_const_64 _mm_set_epi64x
#endif
//
// Basic operations without equivalent SIMD intrinsic
@@ -90,9 +179,21 @@
#define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v )
#define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v )
// Use uint128_t for most arithmetic, bit shift, comparison operations
// spanning all 128 bits. Some extractions are also more efficient
// casting __m128i as uint128_t and usingstandard operators.
// Add 4 values, fewer dependencies than sequential addition.
#define mm128_add4_64( a, b, c, d ) \
_mm_add_epi64( _mm_add_epi64( a, b ), _mm_add_epi64( c, d ) )
#define mm128_add4_32( a, b, c, d ) \
_mm_add_epi32( _mm_add_epi32( a, b ), _mm_add_epi32( c, d ) )
#define mm128_add4_16( a, b, c, d ) \
_mm_add_epi16( _mm_add_epi16( a, b ), _mm_add_epi16( c, d ) )
#define mm128_add4_8( a, b, c, d ) \
_mm_add_epi8( _mm_add_epi8( a, b ), _mm_add_epi8( c, d ) )
#define mm128_xor4( a, b, c, d ) \
_mm_xor_si128( _mm_xor_si128( a, b ), _mm_xor_si128( c, d ) )
// This isn't cheap, not suitable for bulk usage.
#define mm128_extr_4x32( a0, a1, a2, a3, src ) \
@@ -105,6 +206,16 @@ do { \
// Horizontal vector testing
#if defined(__SSE41__)
#define mm128_allbits0( a ) _mm_testz_si128( a, a )
#define mm128_allbits1( a ) _mm_testc_si128( a, m128_neg1 )
#define mm128_allbitsne( a ) _mm_testnzc_si128( a, m128_neg1 )
#define mm128_anybits0 mm128_allbitsne
#define mm128_anybits1 mm128_allbitsne
#else // SSE2
// Bit-wise test of entire vector, useful to test results of cmp.
#define mm128_anybits0( a ) (uint128_t)(a)
#define mm128_anybits1( a ) (((uint128_t)(a))+1)
@@ -112,6 +223,8 @@ do { \
#define mm128_allbits0( a ) ( !mm128_anybits1(a) )
#define mm128_allbits1( a ) ( !mm128_anybits0(a) )
#endif // SSE41 else SSE2
//
// Vector pointer cast
@@ -139,6 +252,7 @@ do { \
#else
// Doesn't work with register variables.
#define mm128_extr_64(a,n) (((uint64_t*)&a)[n])
#define mm128_extr_32(a,n) (((uint32_t*)&a)[n])
@@ -209,7 +323,7 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
// Bit rotations
// AVX512 has implemented bit rotation for 128 bit vectors with
// 64 and 32 bit elements. Not really useful.
// 64 and 32 bit elements.
//
// Rotate each element of v by c bits
@@ -233,25 +347,29 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
_mm_or_si128( _mm_slli_epi16( v, c ), _mm_srli_epi16( v, 16-(c) ) )
//
// Rotate elements accross all lanes
// Rotate vector elements accross all lanes
#define mm128_swap_64( v ) _mm_shuffle_epi32( v, 0x4e )
#define mm128_ror_1x32( v ) _mm_shuffle_epi32( v, 0x39 )
#define mm128_rol_1x32( v ) _mm_shuffle_epi32( v, 0x93 )
#if defined (__SSE3__)
// no SSE2 implementation, no current users
#define mm128_ror_1x16( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 1, 0,15,14,13,12,11,10 \
9, 8, 7, 6, 5, 4, 3, 2 ) )
_mm_shuffle_epi8( v, m128_const_64( 0x01000f0e0d0c0b0a, \
0x0908070605040302 ) )
#define mm128_rol_1x16( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 13,12,11,10, 9, 8, 7, 6, \
5, 4, 3, 2, 1, 0,15,14 ) )
_mm_shuffle_epi8( v, m128_const_64( 0x0d0c0b0a09080706, \
0x0504030201000f0e ) )
#define mm128_ror_1x8( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 0,15,14,13,12,11,10, 9, \
8, 7, 6, 5, 4, 3, 2, 1 ) )
_mm_shuffle_epi8( v, m128_const_64( 0x000f0e0d0c0b0a09, \
0x0807060504030201 ) )
#define mm128_rol_1x8( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 14,13,12,11,10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0,15 ) )
_mm_shuffle_epi8( v, m128_const_64( 0x0e0d0c0b0a090807, \
0x060504030201000f ) )
#endif // SSE3
// Rotate 16 byte (128 bit) vector by c bytes.
// Less efficient using shift but more versatile. Use only for odd number
@@ -262,30 +380,18 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
#define mm128_brol( v, c ) \
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm128_invert_32( v ) _mm_shuffle_epi32( a, 0x1b )
#define mm128_invert_16( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 1, 0, 3, 2, 5, 4, 7, 6, \
9, 8, 11,10, 13,12, 15,14 ) )
#define mm128_invert_8( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15 ) )
//
// Rotate elements within lanes.
#define mm128_swap32_64( v ) _mm_shuffle_epi32( v, 0xb1 )
#define mm128_ror16_64( v ) _mm_shuffle_epi8( v, \
_mm_set_epi8( 9, 8,15,14,13,12,11,10, 1, 0, 7, 6, 5, 4, 3, 2 )
m128_const_64( 0x09080f0e0d0c0b0a, 0x0100070605040302 )
#define mm128_rol16_64( v ) _mm_shuffle_epi8( v, \
_mm_set_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 )
m128_const_64( 0x0dc0b0a09080f0e, 0x0504030201000706 )
#define mm128_swap16_32( v ) _mm_shuffle_epi8( v, \
_mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 )
m128_const_64( 0x0d0c0f0e09080b0a, 0x0504070601000302 )
//
// Endian byte swap.
@@ -293,16 +399,43 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
#if defined(__SSSE3__)
#define mm128_bswap_64( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 8, 9,10,11,12,13,14,15, \
0, 1, 2, 3, 4, 5, 6, 7 ) )
_mm_shuffle_epi8( v, m128_const_64( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) )
#define mm128_bswap_32( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 12,13,14,15, 8, 9,10,11, \
4, 5, 6, 7, 0, 1, 2, 3 ) )
_mm_shuffle_epi8( v, m128_const_64( 0x0c0d0e0f08090a0b, \
0x0405060700010203 ) )
#define mm128_bswap_16( v ) \
_mm_shuffle_epi8( v, _mm_set_epi8( 14,15, 12,13, 10,11, 8, 9, \
6, 7, 4, 5, 2, 3, 0, 1 ) )
#define mm128_bswap_16( v ) _mm_shuffle_epi8( \
m128_const_64( 0x0e0f0c0d0a0b0809, 0x0607040502030001 )
// 8 byte qword * 8 qwords * 2 lanes = 128 bytes
#define mm128_block_bswap_64( d, s ) do \
{ \
__m128i ctl = m128_const_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \
casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \
casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \
casti_m128i( d, 3 ) = _mm_shuffle_epi8( casti_m128i( s, 3 ), ctl ); \
casti_m128i( d, 4 ) = _mm_shuffle_epi8( casti_m128i( s, 4 ), ctl ); \
casti_m128i( d, 5 ) = _mm_shuffle_epi8( casti_m128i( s, 5 ), ctl ); \
casti_m128i( d, 6 ) = _mm_shuffle_epi8( casti_m128i( s, 6 ), ctl ); \
casti_m128i( d, 7 ) = _mm_shuffle_epi8( casti_m128i( s, 7 ), ctl ); \
} while(0)
// 4 byte dword * 8 dwords * 4 lanes = 128 bytes
#define mm128_block_bswap_32( d, s ) do \
{ \
__m128i ctl = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \
casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \
casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \
casti_m128i( d, 3 ) = _mm_shuffle_epi8( casti_m128i( s, 3 ), ctl ); \
casti_m128i( d, 4 ) = _mm_shuffle_epi8( casti_m128i( s, 4 ), ctl ); \
casti_m128i( d, 5 ) = _mm_shuffle_epi8( casti_m128i( s, 5 ), ctl ); \
casti_m128i( d, 6 ) = _mm_shuffle_epi8( casti_m128i( s, 6 ), ctl ); \
casti_m128i( d, 7 ) = _mm_shuffle_epi8( casti_m128i( s, 7 ), ctl ); \
} while(0)
#else // SSE2
@@ -326,16 +459,41 @@ static inline __m128i mm128_bswap_16( __m128i v )
return _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
}
static inline void mm128_block_bswap_64( __m128i *d, __m128i *s )
{
d[0] = mm128_bswap_64( s[0] );
d[1] = mm128_bswap_64( s[1] );
d[2] = mm128_bswap_64( s[2] );
d[3] = mm128_bswap_64( s[3] );
d[4] = mm128_bswap_64( s[4] );
d[5] = mm128_bswap_64( s[5] );
d[6] = mm128_bswap_64( s[6] );
d[7] = mm128_bswap_64( s[7] );
}
static inline void mm128_block_bswap_32( __m128i *d, __m128i *s )
{
d[0] = mm128_bswap_32( s[0] );
d[1] = mm128_bswap_32( s[1] );
d[2] = mm128_bswap_32( s[2] );
d[3] = mm128_bswap_32( s[3] );
d[4] = mm128_bswap_32( s[4] );
d[5] = mm128_bswap_32( s[5] );
d[6] = mm128_bswap_32( s[6] );
d[7] = mm128_bswap_32( s[7] );
}
#endif // SSSE3 else SSE2
//
// Rotate in place concatenated 128 bit vectors as one 256 bit vector.
// Swap 128 bit vectorse.
#define mm128_swap128_256(v1, v2) \
v1 = _mm_xor_si128(v1, v2); \
v2 = _mm_xor_si128(v1, v2); \
v1 = _mm_xor_si128(v1, v2);
#define mm128_swap128_256( v1, v2 ) \
v1 = _mm_xor_si128( v1, v2 ); \
v2 = _mm_xor_si128( v1, v2 ); \
v1 = _mm_xor_si128( v1, v2 );
// Concatenate v1 & v2 and rotate as one 256 bit vector.
#if defined(__SSE4_1__)
@@ -457,4 +615,4 @@ do { \
#endif // SSE4.1 else SSE2
#endif // __SSE2__
#endif // SIMD_SSE2_H__
#endif // SIMD_128_H__

Some files were not shown because too many files have changed in this diff Show More