This commit is contained in:
Jay D Dee
2020-06-18 17:30:26 -04:00
parent 51a1d91abd
commit cdd587537e
27 changed files with 455 additions and 567 deletions

View File

@@ -78,7 +78,6 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
uint32_t extraheader[32] = { 0 };
int headersize = 0;
uint32_t* extradata = (uint32_t*) sctx->xnonce1;
size_t t;
int i;
// getwork over stratum, getwork merkle + header passed in coinb1
@@ -87,9 +86,6 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
sizeof(extraheader) );
memcpy( extraheader, &sctx->job.coinbase[32], headersize );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );

View File

@@ -99,13 +99,13 @@ void hodl_build_block_header( struct work* g_work, uint32_t version,
// called only by thread 0, saves a backup of g_work
void hodl_get_new_work( struct work* work, struct work* g_work)
{
pthread_mutex_lock( &g_work_lock );
pthread_rwlock_rdlock( &g_work_lock );
work_free( &hodl_work );
work_copy( &hodl_work, g_work );
hodl_work.data[ algo_gate.nonce_index ] = ( clock() + rand() ) % 9999;
pthread_mutex_unlock( &g_work_lock );
pthread_rwlock_unlock( &g_work_lock );
}
json_t *hodl_longpoll_rpc_call( CURL *curl, int *err, char* lp_url )
@@ -159,11 +159,10 @@ bool register_hodl_algo( algo_gate_t* gate )
applog( LOG_ERR, "Only CPUs with AES are supported, use legacy version.");
return false;
#endif
// if ( TOTAL_CHUNKS % opt_n_threads )
// {
// applog(LOG_ERR,"Thread count must be power of 2.");
// return false;
// }
if ( GARBAGE_SIZE % opt_n_threads )
applog( LOG_WARNING,"WARNING: Thread count must be power of 2. Miner may crash or produce invalid hash!" );
pthread_barrier_init( &hodl_barrier, NULL, opt_n_threads );
gate->optimizations = SSE42_OPT | AES_OPT | AVX2_OPT;
gate->scanhash = (void*)&hodl_scanhash;
@@ -175,7 +174,7 @@ bool register_hodl_algo( algo_gate_t* gate )
gate->resync_threads = (void*)&hodl_resync_threads;
gate->do_this_thread = (void*)&hodl_do_this_thread;
gate->work_cmp_size = 76;
hodl_scratchbuf = (unsigned char*)malloc( 1 << 30 );
hodl_scratchbuf = (unsigned char*)_mm_malloc( 1 << 30, 64 );
allow_getwork = false;
opt_target_factor = 8388608.0;
return ( hodl_scratchbuf != NULL );

View File

@@ -70,7 +70,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
uint32_t *ptarget = work->target;
int threadNumber = mythr->id;
CacheEntry *Garbage = (CacheEntry*)hodl_scratchbuf;
CacheEntry Cache[AES_PARALLEL_N];
CacheEntry Cache[AES_PARALLEL_N] __attribute__ ((aligned (64)));
__m128i* data[AES_PARALLEL_N];
const __m128i* next[AES_PARALLEL_N];
uint32_t CollisionCount = 0;

View File

@@ -215,9 +215,6 @@ void phi2_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
size_t t;
algo_gate.gen_merkle_root( merkle_tree, sctx );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
algo_gate.build_block_header( g_work, le32dec( sctx->job.version ),
(uint32_t*) sctx->job.prevhash, (uint32_t*) merkle_tree,
le32dec( sctx->job.ntime ), le32dec(sctx->job.nbits), NULL );
@@ -225,7 +222,6 @@ void phi2_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
g_work->data[ 20+t ] = ((uint32_t*)sctx->job.extra)[t];
}
bool register_phi2_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;

View File

@@ -156,7 +156,7 @@ int scanhash_zr5( struct work *work, uint32_t max_nonce,
void zr5_get_new_work( struct work* work, struct work* g_work, int thr_id,
uint32_t* end_nonce_ptr )
{
pthread_mutex_lock( &g_work_lock );
pthread_rwlock_rdlock( &g_work_lock );
// ignore POK in first word
const int wkcmp_sz = 72; // (19-1) * sizeof(uint32_t)
@@ -174,7 +174,7 @@ void zr5_get_new_work( struct work* work, struct work* g_work, int thr_id,
else
++(*nonceptr);
pthread_mutex_unlock( &g_work_lock );
pthread_rwlock_unlock( &g_work_lock );
}
void zr5_display_pok( struct work* work )

View File

@@ -69,13 +69,9 @@ void lbry_build_block_header( struct work* g_work, uint32_t version,
void lbry_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
unsigned char merkle_root[64] = { 0 };
size_t t;
int i;
algo_gate.gen_merkle_root( merkle_root, sctx );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );

View File

@@ -227,7 +227,7 @@ bool initialize_torture_garden()
}
// Produce a 32-byte hash from 80-byte input data
int minotaur_hash( void *output, const void *input )
int minotaur_hash( void *output, const void *input, int thr_id )
{
unsigned char hash[64] __attribute__ ((aligned (64)));

View File

@@ -135,18 +135,16 @@ void x16rt_getAlgoString( const uint32_t *timeHash, char *output)
void veil_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
uint32_t merkleroothash[8];
uint32_t witmerkleroothash[8];
uint32_t denom10[8];
uint32_t denom100[8];
uint32_t denom1000[8];
uint32_t denom10000[8];
int i;
uchar merkle_tree[64] = { 0 };
size_t t;
algo_gate.gen_merkle_root( merkle_tree, sctx );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
// algo_gate.build_block_header( g_work, le32dec( sctx->job.version ),
// (uint32_t*) sctx->job.prevhash, (uint32_t*) merkle_tree,
// le32dec( sctx->job.ntime ), le32dec(sctx->job.nbits) );
int i;
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );
@@ -164,35 +162,35 @@ void veil_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
g_work->data[31] = 0x00000280;
for ( i = 0; i < 8; i++ )
g_work->merkleroothash[7 - i] = be32dec((uint32_t *)merkle_tree + i);
merkleroothash[7 - i] = be32dec((uint32_t *)merkle_tree + i);
for ( i = 0; i < 8; i++ )
g_work->witmerkleroothash[7 - i] = be32dec((uint32_t *)merkle_tree + i);
witmerkleroothash[7 - i] = be32dec((uint32_t *)merkle_tree + i);
for ( i = 0; i < 8; i++ )
g_work->denom10[i] = le32dec((uint32_t *)sctx->job.denom10 + i);
denom10[i] = le32dec((uint32_t *)sctx->job.denom10 + i);
for ( i = 0; i < 8; i++ )
g_work->denom100[i] = le32dec((uint32_t *)sctx->job.denom100 + i);
denom100[i] = le32dec((uint32_t *)sctx->job.denom100 + i);
for ( i = 0; i < 8; i++ )
g_work->denom1000[i] = le32dec((uint32_t *)sctx->job.denom1000 + i);
denom1000[i] = le32dec((uint32_t *)sctx->job.denom1000 + i);
for ( i = 0; i < 8; i++ )
g_work->denom10000[i] = le32dec((uint32_t *)sctx->job.denom10000 + i);
denom10000[i] = le32dec((uint32_t *)sctx->job.denom10000 + i);
uint32_t pofnhash[8];
memset(pofnhash, 0x00, 32);
char denom10_str [ 2 * sizeof( g_work->denom10 ) + 1 ];
char denom100_str [ 2 * sizeof( g_work->denom100 ) + 1 ];
char denom1000_str [ 2 * sizeof( g_work->denom1000 ) + 1 ];
char denom10000_str [ 2 * sizeof( g_work->denom10000 ) + 1 ];
char merkleroot_str [ 2 * sizeof( g_work->merkleroothash ) + 1 ];
char witmerkleroot_str[ 2 * sizeof( g_work->witmerkleroothash ) + 1 ];
char denom10_str [ 2 * sizeof( denom10 ) + 1 ];
char denom100_str [ 2 * sizeof( denom100 ) + 1 ];
char denom1000_str [ 2 * sizeof( denom1000 ) + 1 ];
char denom10000_str [ 2 * sizeof( denom10000 ) + 1 ];
char merkleroot_str [ 2 * sizeof( merkleroothash ) + 1 ];
char witmerkleroot_str[ 2 * sizeof( witmerkleroothash ) + 1 ];
char pofn_str [ 2 * sizeof( pofnhash ) + 1 ];
cbin2hex( denom10_str, (char*) g_work->denom10, 32 );
cbin2hex( denom100_str, (char*) g_work->denom100, 32 );
cbin2hex( denom1000_str, (char*) g_work->denom1000, 32 );
cbin2hex( denom10000_str, (char*) g_work->denom10000, 32 );
cbin2hex( merkleroot_str, (char*) g_work->merkleroothash, 32 );
cbin2hex( witmerkleroot_str, (char*) g_work->witmerkleroothash, 32 );
cbin2hex( denom10_str, (char*) denom10, 32 );
cbin2hex( denom100_str, (char*) denom100, 32 );
cbin2hex( denom1000_str, (char*) denom1000, 32 );
cbin2hex( denom10000_str, (char*) denom10000, 32 );
cbin2hex( merkleroot_str, (char*) merkleroothash, 32 );
cbin2hex( witmerkleroot_str, (char*) witmerkleroothash, 32 );
cbin2hex( pofn_str, (char*) pofnhash, 32 );
if ( true )

View File

@@ -58,7 +58,7 @@ union _sonoa_8way_context_overlay
typedef union _sonoa_8way_context_overlay sonoa_8way_context_overlay;
int sonoa_8way_hash( void *state, const void *input, int thrid )
int sonoa_8way_hash( void *state, const void *input, int thr_id )
{
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
@@ -186,7 +186,7 @@ int sonoa_8way_hash( void *state, const void *input, int thrid )
#endif
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 2
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
@@ -302,7 +302,7 @@ int sonoa_8way_hash( void *state, const void *input, int thrid )
hamsi512_8way_update( &ctx.hamsi, vhash, 64 );
hamsi512_8way_close( &ctx.hamsi, vhash );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 3
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
@@ -432,7 +432,7 @@ int sonoa_8way_hash( void *state, const void *input, int thrid )
sph_fugue512_full( &ctx.fugue, hash6, hash6, 64 );
sph_fugue512_full( &ctx.fugue, hash7, hash7, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 4
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
@@ -630,7 +630,7 @@ int sonoa_8way_hash( void *state, const void *input, int thrid )
#endif
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 5
bmw512_8way_full( &ctx.bmw, vhash, vhash, 64 );
@@ -783,7 +783,7 @@ int sonoa_8way_hash( void *state, const void *input, int thrid )
sph_whirlpool512_full( &ctx.whirlpool, hash6, hash6, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash7, hash7, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 6
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
@@ -952,7 +952,7 @@ int sonoa_8way_hash( void *state, const void *input, int thrid )
sph_whirlpool512_full( &ctx.whirlpool, hash6, hash6, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash7, hash7, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 7
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
@@ -1117,49 +1117,6 @@ int sonoa_8way_hash( void *state, const void *input, int thrid )
return 1;
}
int scanhash_sonoa_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*16] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[7<<3]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32 = ptarget[7];
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( sonoa_8way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 8; lane++ )
if unlikely( ( hashd7[ lane ] <= targ32 ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(SONOA_4WAY)
@@ -1186,7 +1143,7 @@ union _sonoa_4way_context_overlay
typedef union _sonoa_4way_context_overlay sonoa_4way_context_overlay;
int sonoa_4way_hash( void *state, const void *input, int thrid )
int sonoa_4way_hash( void *state, const void *input, int thr_id )
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
@@ -1250,7 +1207,7 @@ int sonoa_4way_hash( void *state, const void *input, int thrid )
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)hash3, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 2
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
@@ -1310,7 +1267,7 @@ int sonoa_4way_hash( void *state, const void *input, int thrid )
hamsi512_4way_update( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 3
bmw512_4way_init( &ctx.bmw );
@@ -1375,7 +1332,7 @@ int sonoa_4way_hash( void *state, const void *input, int thrid )
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 4
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
@@ -1472,7 +1429,7 @@ int sonoa_4way_hash( void *state, const void *input, int thrid )
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 5
rintrlv_2x128_4x64( vhash, vhashA, vhashB, 512 );
@@ -1557,7 +1514,7 @@ int sonoa_4way_hash( void *state, const void *input, int thrid )
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 6
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
@@ -1650,7 +1607,7 @@ int sonoa_4way_hash( void *state, const void *input, int thrid )
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
// 7
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
@@ -1745,46 +1702,4 @@ int sonoa_4way_hash( void *state, const void *input, int thrid )
return 1;
}
int scanhash_sonoa_4way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hashd7 = &( hash[7<<2] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
const uint32_t targ32 = ptarget[7];
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 9;
const int thr_id = mythr->id;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( sonoa_4way_hash( hash, vdata, thr_id ) )
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hashd7[ lane ] <= targ32 ) )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif

View File

@@ -3,14 +3,13 @@
bool register_sonoa_algo( algo_gate_t* gate )
{
#if defined (SONOA_8WAY)
gate->scanhash = (void*)&scanhash_sonoa_8way;
gate->scanhash = (void*)&scanhash_8way_64in_32out;
gate->hash = (void*)&sonoa_8way_hash;
#elif defined (SONOA_4WAY)
gate->scanhash = (void*)&scanhash_sonoa_4way;
gate->scanhash = (void*)&scanhash_4way_64in_32out;
gate->hash = (void*)&sonoa_4way_hash;
#else
init_sonoa_ctx();
// gate->scanhash = (void*)&scanhash_sonoa;
gate->hash = (void*)&sonoa_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;

View File

@@ -14,21 +14,15 @@ bool register_sonoa_algo( algo_gate_t* gate );
#if defined(SONOA_8WAY)
int sonoa_8way_hash( void *state, const void *input, int thrid );
int scanhash_sonoa_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
int sonoa_8way_hash( void *state, const void *input, int thr_id );
#elif defined(SONOA_4WAY)
int sonoa_4way_hash( void *state, const void *input, int thrid );
int scanhash_sonoa_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
int sonoa_4way_hash( void *state, const void *input, int thr_id );
#else
int sonoa_hash( void *state, const void *input, int thrid );
int scanhash_sonoa( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
int sonoa_hash( void *state, const void *input, int thr_id );
void init_sonoa_ctx();
#endif

View File

@@ -83,7 +83,7 @@ void init_sonoa_ctx()
sph_haval256_5_init(&sonoa_ctx.haval);
};
int sonoa_hash( void *state, const void *input, int thrid )
int sonoa_hash( void *state, const void *input, int thr_id )
{
uint8_t hash[128] __attribute__ ((aligned (64)));
sonoa_ctx_holder ctx __attribute__ ((aligned (64)));
@@ -132,7 +132,7 @@ int sonoa_hash( void *state, const void *input, int thrid )
sph_echo512_close(&ctx.echo, hash);
#endif
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
@@ -190,7 +190,7 @@ int sonoa_hash( void *state, const void *input, int thrid )
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
@@ -252,7 +252,7 @@ int sonoa_hash( void *state, const void *input, int thrid )
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
@@ -336,7 +336,7 @@ int sonoa_hash( void *state, const void *input, int thrid )
sph_shavite512(&ctx.shavite, hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
@@ -410,7 +410,7 @@ int sonoa_hash( void *state, const void *input, int thrid )
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);
sph_bmw512(&ctx.bmw, hash, 64);
@@ -487,7 +487,7 @@ int sonoa_hash( void *state, const void *input, int thrid )
sph_whirlpool(&ctx.whirlpool, hash, 64);
sph_whirlpool_close(&ctx.whirlpool, hash);
if ( work_restart[thrid].restart ) return 0;
if ( work_restart[thr_id].restart ) return 0;
//
sph_bmw512_init( &ctx.bmw);

View File

@@ -57,7 +57,7 @@ union _x17_8way_context_overlay
} __attribute__ ((aligned (64)));
typedef union _x17_8way_context_overlay x17_8way_context_overlay;
int x17_8way_hash( void *state, const void *input )
int x17_8way_hash( void *state, const void *input, int thr_id )
{
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
@@ -234,50 +234,6 @@ int x17_8way_hash( void *state, const void *input )
return 1;
}
int scanhash_x17_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash32_d7 = &(hash32[7*8]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x17_8way_hash( hash32, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hash32_d7[ lane ] <= targ32_d7 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(X17_4WAY)
union _x17_4way_context_overlay
@@ -302,7 +258,7 @@ union _x17_4way_context_overlay
};
typedef union _x17_4way_context_overlay x17_4way_context_overlay;
int x17_4way_hash( void *state, const void *input )
int x17_4way_hash( void *state, const void *input, int thr_id )
{
uint64_t vhash[8*4] __attribute__ ((aligned (64)));
uint64_t vhashA[8*4] __attribute__ ((aligned (64)));
@@ -405,47 +361,4 @@ int x17_4way_hash( void *state, const void *input )
return 1;
}
int scanhash_x17_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash32_d7 = &(hash32[ 7*4 ]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
x17_4way_hash( hash32, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 && !bench ) )
{
extr_lane_4x32( lane_hash, hash32, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n <= last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif

View File

@@ -3,10 +3,10 @@
bool register_x17_algo( algo_gate_t* gate )
{
#if defined (X17_8WAY)
gate->scanhash = (void*)&scanhash_x17_8way;
gate->scanhash = (void*)&scanhash_8way_64in_32out;
gate->hash = (void*)&x17_8way_hash;
#elif defined (X17_4WAY)
gate->scanhash = (void*)&scanhash_x17_4way;
gate->scanhash = (void*)&scanhash_4way_64in_32out;
gate->hash = (void*)&x17_4way_hash;
#else
gate->hash = (void*)&x17_hash;

View File

@@ -14,14 +14,11 @@ bool register_x17_algo( algo_gate_t* gate );
#if defined(X17_8WAY)
int x17_8way_hash( void *state, const void *input );
int scanhash_x17_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
int x17_8way_hash( void *state, const void *input, int thr_id );
#elif defined(X17_4WAY)
int x17_4way_hash( void *state, const void *input );
int scanhash_x17_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
int x17_4way_hash( void *state, const void *input, int thr_id );
#endif

View File

@@ -57,7 +57,7 @@ union _xevan_8way_context_overlay
} __attribute__ ((aligned (64)));
typedef union _xevan_8way_context_overlay xevan_8way_context_overlay;
int xevan_8way_hash( void *output, const void *input )
int xevan_8way_hash( void *output, const void *input, int thr_id )
{
uint64_t vhash[16<<3] __attribute__ ((aligned (128)));
uint64_t vhashA[16<<3] __attribute__ ((aligned (64)));
@@ -399,50 +399,6 @@ int xevan_8way_hash( void *output, const void *input )
return 1;
}
int scanhash_xevan_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[7*8]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32 = ptarget[7];
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
xevan_8way_hash( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hashd7[ lane ] <= targ32 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(XEVAN_4WAY)
union _xevan_4way_context_overlay
@@ -467,7 +423,7 @@ union _xevan_4way_context_overlay
};
typedef union _xevan_4way_context_overlay xevan_4way_context_overlay;
int xevan_4way_hash( void *output, const void *input )
int xevan_4way_hash( void *output, const void *input, int thr_id )
{
uint64_t hash0[16] __attribute__ ((aligned (64)));
uint64_t hash1[16] __attribute__ ((aligned (64)));
@@ -672,47 +628,4 @@ int xevan_4way_hash( void *output, const void *input )
return 1;
}
int scanhash_xevan_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[16*4] __attribute__ ((aligned (128)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hashd7 = &(hash[7<<2]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id;
__m256i *noncev = (__m256i*)vdata + 9;
const uint32_t targ32 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
const bool bench = opt_benchmark;
if ( bench ) ptarget[7] = 0x0cff;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do {
xevan_4way_hash( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hashd7[ lane ] <= targ32 ) && ! bench )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif

View File

@@ -3,10 +3,10 @@
bool register_xevan_algo( algo_gate_t* gate )
{
#if defined (XEVAN_8WAY)
gate->scanhash = (void*)&scanhash_xevan_8way;
gate->scanhash = (void*)&scanhash_8way_64in_32out;
gate->hash = (void*)&xevan_8way_hash;
#elif defined (XEVAN_4WAY)
gate->scanhash = (void*)&scanhash_xevan_4way;
gate->scanhash = (void*)&scanhash_4way_64in_32out;
gate->hash = (void*)&xevan_4way_hash;
#else
init_xevan_ctx();

View File

@@ -14,16 +14,11 @@ bool register_xevan_algo( algo_gate_t* gate );
#if defined(XEVAN_8WAY)
int xevan_8way_hash( void *state, const void *input );
int scanhash_xevan_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
int xevan_8way_hash( void *state, const void *input, int thr_id );
#elif defined(XEVAN_4WAY)
int xevan_4way_hash( void *state, const void *input );
int scanhash_xevan_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
//void init_xevan_4way_ctx();
int xevan_4way_hash( void *state, const void *input, int thr_id );
#else