mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.9.4
This commit is contained in:
@@ -48,8 +48,8 @@ void anime_4way_hash( void *state, const void *input )
|
||||
__m256i* vhA = (__m256i*)vhashA;
|
||||
__m256i* vhB = (__m256i*)vhashB;
|
||||
__m256i vh_mask;
|
||||
const uint32_t mask = 8;
|
||||
const __m256i bit3_mask = _mm256_set1_epi64x( 8 );
|
||||
int i;
|
||||
anime_4way_ctx_holder ctx;
|
||||
memcpy( &ctx, &anime_4way_ctx, sizeof(anime_4way_ctx) );
|
||||
|
||||
@@ -62,27 +62,44 @@ void anime_4way_hash( void *state, const void *input )
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
|
||||
m256_zero );
|
||||
|
||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0,
|
||||
(char*)hash0, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash1,
|
||||
(char*)hash1, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2,
|
||||
(char*)hash2, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3,
|
||||
(char*)hash3, 512 );
|
||||
mm256_interleave_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
skein512_4way( &ctx.skein, vhash, 64 );
|
||||
skein512_4way_close( &ctx.skein, vhashB );
|
||||
if ( hash0[0] & mask )
|
||||
{
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0,
|
||||
(char*)hash0, 512 );
|
||||
}
|
||||
if ( hash1[0] & mask )
|
||||
{
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash1,
|
||||
(char*)hash1, 512 );
|
||||
}
|
||||
if ( hash2[0] & mask )
|
||||
{
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2,
|
||||
(char*)hash2, 512 );
|
||||
}
|
||||
if ( hash3[0] & mask )
|
||||
{
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3,
|
||||
(char*)hash3, 512 );
|
||||
}
|
||||
|
||||
for ( i = 0; i < 8; i++ )
|
||||
vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask );
|
||||
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
if ( mm256_anybits0( vh_mask ) )
|
||||
{
|
||||
skein512_4way( &ctx.skein, vhash, 64 );
|
||||
skein512_4way_close( &ctx.skein, vhashB );
|
||||
}
|
||||
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
@@ -91,7 +108,8 @@ void anime_4way_hash( void *state, const void *input )
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
jh512_4way( &ctx.jh, vhash, 64 );
|
||||
jh512_4way_close( &ctx.jh, vhash );
|
||||
@@ -99,16 +117,20 @@ void anime_4way_hash( void *state, const void *input )
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
|
||||
m256_zero );
|
||||
|
||||
if ( mm256_anybits1( vh_mask ) )
|
||||
{
|
||||
blake512_4way_init( &ctx.blake );
|
||||
blake512_4way( &ctx.blake, vhash, 64 );
|
||||
blake512_4way_close( &ctx.blake, vhashA );
|
||||
|
||||
}
|
||||
if ( mm256_anybits0( vh_mask ) )
|
||||
{
|
||||
bmw512_4way_init( &ctx.bmw );
|
||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||
bmw512_4way_close( &ctx.bmw, vhashB );
|
||||
}
|
||||
|
||||
for ( i = 0; i < 8; i++ )
|
||||
vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask );
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhash );
|
||||
@@ -120,33 +142,35 @@ void anime_4way_hash( void *state, const void *input )
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
|
||||
m256_zero );
|
||||
|
||||
keccak512_4way_init( &ctx.keccak );
|
||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhashA );
|
||||
if ( mm256_anybits1( vh_mask ) )
|
||||
{
|
||||
keccak512_4way_init( &ctx.keccak );
|
||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhashA );
|
||||
}
|
||||
if ( mm256_anybits0( vh_mask ) )
|
||||
{
|
||||
jh512_4way_init( &ctx.jh );
|
||||
jh512_4way( &ctx.jh, vhash, 64 );
|
||||
jh512_4way_close( &ctx.jh, vhashB );
|
||||
}
|
||||
|
||||
jh512_4way_init( &ctx.jh );
|
||||
jh512_4way( &ctx.jh, vhash, 64 );
|
||||
jh512_4way_close( &ctx.jh, vhashB );
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
for ( i = 0; i < 8; i++ )
|
||||
vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask );
|
||||
|
||||
mm256_deinterleave_4x64( state, state+32, state+64, state+96, vhash, 256 );
|
||||
mm256_dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 );
|
||||
}
|
||||
|
||||
int scanhash_anime_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done)
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t hash[4*8] __attribute__ ((aligned (64)));
|
||||
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t n = pdata[19];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
uint32_t *nonces = work->nonces;
|
||||
int num_found = 0;
|
||||
uint32_t *noncep = vdata + 73; // 9*8 + 1
|
||||
__m256i *noncev = (__m256i*)vdata + 9; // aligned
|
||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
uint64_t htmax[] = {
|
||||
0,
|
||||
@@ -165,10 +189,7 @@ int scanhash_anime_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
0
|
||||
};
|
||||
|
||||
swab32_array( endiandata, pdata, 20 );
|
||||
|
||||
uint64_t *edata = (uint64_t*)endiandata;
|
||||
mm256_interleave_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
|
||||
mm256_bswap_intrlv80_4x64( vdata, pdata );
|
||||
|
||||
for (int m=0; m < 6; m++)
|
||||
if (Htarg <= htmax[m])
|
||||
@@ -177,30 +198,26 @@ int scanhash_anime_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
|
||||
do
|
||||
{
|
||||
be32enc( noncep, n );
|
||||
be32enc( noncep+2, n+1 );
|
||||
be32enc( noncep+4, n+2 );
|
||||
be32enc( noncep+6, n+3 );
|
||||
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
|
||||
|
||||
anime_4way_hash( hash, vdata );
|
||||
pdata[19] = n;
|
||||
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( ( ( (hash+(i<<3))[7] & mask ) == 0 )
|
||||
&& fulltest( hash+(i<<3), ptarget ) )
|
||||
&& fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
nonces[ num_found++ ] = n+i;
|
||||
work_set_target_ratio( work, hash+(i<<3) );
|
||||
submit_solution( work, hash+(i<<3), mythr, i );
|
||||
}
|
||||
n += 4;
|
||||
} while ( ( num_found == 0 ) && ( n < max_nonce )
|
||||
&& !work_restart[thr_id].restart );
|
||||
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
|
||||
break;
|
||||
}
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return num_found;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,19 +13,15 @@ bool register_anime_algo( algo_gate_t* gate );
|
||||
#if defined(ANIME_4WAY)
|
||||
|
||||
void anime_4way_hash( void *state, const void *input );
|
||||
|
||||
int scanhash_anime_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done );
|
||||
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void init_anime_4way_ctx();
|
||||
|
||||
#endif
|
||||
|
||||
void anime_hash( void *state, const void *input );
|
||||
|
||||
int scanhash_anime( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done );
|
||||
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void init_anime_ctx();
|
||||
|
||||
#endif
|
||||
|
||||
@@ -46,20 +46,6 @@ void init_anime_ctx()
|
||||
void anime_hash( void *state, const void *input )
|
||||
{
|
||||
unsigned char hash[128] __attribute__ ((aligned (32)));
|
||||
/*
|
||||
uint64_t hash0[8] __attribute__ ((aligned (64)));
|
||||
uint64_t hash1[8] __attribute__ ((aligned (64)));
|
||||
uint64_t hash2[8] __attribute__ ((aligned (64)));
|
||||
uint64_t hash3[8] __attribute__ ((aligned (64)));
|
||||
uint64_t vhash[8*4] __attribute__ ((aligned (64)));
|
||||
uint64_t vhashA[8*4] __attribute__ ((aligned (64)));
|
||||
uint64_t vhashB[8*4] __attribute__ ((aligned (64)));
|
||||
__m256i* vh = (__m256i*)vhash;
|
||||
__m256i* vhA = (__m256i*)vhashA;
|
||||
__m256i* vhB = (__m256i*)vhashB;
|
||||
__m256i vh_mask;
|
||||
__m256i bit3_mask; bit3_mask = _mm256_set1_epi64x( 8 );
|
||||
*/
|
||||
uint32_t mask = 8;
|
||||
anime_ctx_holder ctx;
|
||||
memcpy( &ctx, &anime_ctx, sizeof(anime_ctx) );
|
||||
@@ -134,7 +120,7 @@ void anime_hash( void *state, const void *input )
|
||||
}
|
||||
|
||||
int scanhash_anime( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done)
|
||||
uint64_t *hashes_done, struct thr_info *mythr)
|
||||
{
|
||||
uint32_t hash[8] __attribute__ ((aligned (64)));
|
||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
||||
@@ -142,6 +128,7 @@ int scanhash_anime( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t n = pdata[19];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
uint64_t htmax[] = {
|
||||
0,
|
||||
|
||||
618
algo/quark/hmq1725-4way.c
Normal file
618
algo/quark/hmq1725-4way.c
Normal file
@@ -0,0 +1,618 @@
|
||||
#include "hmq1725-gate.h"
|
||||
|
||||
#if defined(HMQ1725_4WAY)
|
||||
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include "algo/blake/blake-hash-4way.h"
|
||||
#include "algo/bmw/bmw-hash-4way.h"
|
||||
#include "algo/groestl/aes_ni/hash-groestl.h"
|
||||
#include "algo/skein/skein-hash-4way.h"
|
||||
#include "algo/jh/jh-hash-4way.h"
|
||||
#include "algo/keccak/keccak-hash-4way.h"
|
||||
#include "algo/luffa/luffa_for_sse2.h"
|
||||
#include "algo/cubehash/cubehash_sse2.h"
|
||||
#include "algo/simd/nist.h"
|
||||
#include "algo/shavite/sph_shavite.h"
|
||||
#include "algo/simd/simd-hash-2way.h"
|
||||
#include "algo/echo/aes_ni/hash_api.h"
|
||||
#include "algo/hamsi/hamsi-hash-4way.h"
|
||||
#include "algo/fugue/sph_fugue.h"
|
||||
#include "algo/shabal/shabal-hash-4way.h"
|
||||
#include "algo/whirlpool/sph_whirlpool.h"
|
||||
#include "algo/haval/haval-hash-4way.h"
|
||||
#include "algo/sha/sha2-hash-4way.h"
|
||||
|
||||
union _hmq1725_4way_context_overlay
|
||||
{
|
||||
blake512_4way_context blake;
|
||||
bmw512_4way_context bmw;
|
||||
hashState_groestl groestl;
|
||||
skein512_4way_context skein;
|
||||
jh512_4way_context jh;
|
||||
keccak512_4way_context keccak;
|
||||
hashState_luffa luffa;
|
||||
cubehashParam cube;
|
||||
sph_shavite512_context shavite;
|
||||
hashState_sd simd;
|
||||
hashState_echo echo;
|
||||
hamsi512_4way_context hamsi;
|
||||
sph_fugue512_context fugue;
|
||||
shabal512_4way_context shabal;
|
||||
sph_whirlpool_context whirlpool;
|
||||
sha512_4way_context sha512;
|
||||
haval256_5_4way_context haval;
|
||||
};
|
||||
typedef union _hmq1725_4way_context_overlay hmq1725_4way_context_overlay;
|
||||
|
||||
extern void hmq1725_4way_hash(void *state, const void *input)
|
||||
{
|
||||
// why so big? only really need 8, haval thing uses 16.
|
||||
uint32_t hash0 [32] __attribute__ ((aligned (64)));
|
||||
uint32_t hash1 [32] __attribute__ ((aligned (64)));
|
||||
uint32_t hash2 [32] __attribute__ ((aligned (64)));
|
||||
uint32_t hash3 [32] __attribute__ ((aligned (64)));
|
||||
uint32_t vhash [32<<2] __attribute__ ((aligned (64)));
|
||||
uint32_t vhashA[32<<2] __attribute__ ((aligned (64)));
|
||||
uint32_t vhashB[32<<2] __attribute__ ((aligned (64)));
|
||||
hmq1725_4way_context_overlay ctx __attribute__ ((aligned (64)));
|
||||
__m256i vh_mask;
|
||||
const __m256i vmask = _mm256_set1_epi64x( 24 );
|
||||
const uint32_t mask = 24;
|
||||
__m256i* vh = (__m256i*)vhash;
|
||||
__m256i* vhA = (__m256i*)vhashA;
|
||||
__m256i* vhB = (__m256i*)vhashB;
|
||||
|
||||
bmw512_4way_init( &ctx.bmw );
|
||||
bmw512_4way( &ctx.bmw, input, 80 );
|
||||
bmw512_4way_close( &ctx.bmw, vhash );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash0 );
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash1, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash1 );
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash2, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash2 );
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||
|
||||
// first fork, A is groestl serial, B is skein parallel.
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
|
||||
m256_zero );
|
||||
|
||||
// A
|
||||
|
||||
// if ( hash0[0] & mask )
|
||||
// {
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0,
|
||||
(char*)hash0, 512 );
|
||||
// }
|
||||
// if ( hash1[0] & mask )
|
||||
// {
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash1,
|
||||
(char*)hash1, 512 );
|
||||
// }
|
||||
// if ( hash2[0] & mask )
|
||||
// {
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2,
|
||||
(char*)hash2, 512 );
|
||||
// }
|
||||
// if ( hash3[0] & mask )
|
||||
// {
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3,
|
||||
(char*)hash3, 512 );
|
||||
// }
|
||||
|
||||
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
// B
|
||||
|
||||
// if ( mm256_any_clr_256( vh_mask ) )
|
||||
// {
|
||||
skein512_4way_init( &ctx.skein );
|
||||
skein512_4way( &ctx.skein, vhash, 64 );
|
||||
skein512_4way_close( &ctx.skein, vhashB );
|
||||
// }
|
||||
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
jh512_4way_init( &ctx.jh );
|
||||
jh512_4way( &ctx.jh, vhash, 64 );
|
||||
jh512_4way_close( &ctx.jh, vhash );
|
||||
|
||||
keccak512_4way_init( &ctx.keccak );
|
||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhash );
|
||||
|
||||
// second fork, A = blake parallel, B= bmw parallel.
|
||||
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
|
||||
m256_zero );
|
||||
|
||||
// if ( mm256_any_set_256( vh_mask ) )
|
||||
// {
|
||||
blake512_4way_init( &ctx.blake );
|
||||
blake512_4way( &ctx.blake, vhash, 64 );
|
||||
blake512_4way_close( &ctx.blake, vhashA );
|
||||
// }
|
||||
|
||||
// if ( mm256_any_clr_256( vh_mask ) )
|
||||
// {
|
||||
bmw512_4way_init( &ctx.bmw );
|
||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||
bmw512_4way_close( &ctx.bmw, vhashB );
|
||||
// }
|
||||
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash0,
|
||||
(const BitSequence*)hash0, 64 );
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash1,
|
||||
(const BitSequence*)hash1, 64 );
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash2,
|
||||
(const BitSequence*)hash2, 64 );
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash3,
|
||||
(const BitSequence*)hash3, 64 );
|
||||
|
||||
cubehashInit( &ctx.cube, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (BitSequence *)hash0,
|
||||
(const BitSequence *)hash0, 64 );
|
||||
cubehashInit( &ctx.cube, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (BitSequence *)hash1,
|
||||
(const BitSequence *)hash1, 64 );
|
||||
cubehashInit( &ctx.cube, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (BitSequence *)hash2,
|
||||
(const BitSequence *)hash2, 64 );
|
||||
cubehashInit( &ctx.cube, 512, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (BitSequence *)hash3,
|
||||
(const BitSequence *)hash3, 64 );
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
// A= keccak parallel, B= jh parallel
|
||||
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
|
||||
m256_zero );
|
||||
|
||||
// if ( mm256_any_set_256( vh_mask ) )
|
||||
// {
|
||||
keccak512_4way_init( &ctx.keccak );
|
||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhashA );
|
||||
// }
|
||||
|
||||
// if ( mm256_any_clr_256( vh_mask ) )
|
||||
// {
|
||||
jh512_4way_init( &ctx.jh );
|
||||
jh512_4way( &ctx.jh, vhash, 64 );
|
||||
jh512_4way_close( &ctx.jh, vhashB );
|
||||
// }
|
||||
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512 ( &ctx.shavite, hash0, 64 );
|
||||
sph_shavite512_close( &ctx.shavite, hash0 );
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512 ( &ctx.shavite, hash1, 64 );
|
||||
sph_shavite512_close( &ctx.shavite, hash1 );
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512 ( &ctx.shavite, hash2, 64 );
|
||||
sph_shavite512_close( &ctx.shavite, hash2 );
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512 ( &ctx.shavite, hash3, 64 );
|
||||
sph_shavite512_close( &ctx.shavite, hash3 );
|
||||
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash0,
|
||||
(const BitSequence *)hash0, 512 );
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash1,
|
||||
(const BitSequence *)hash1, 512 );
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash2,
|
||||
(const BitSequence *)hash2, 512 );
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash3,
|
||||
(const BitSequence *)hash3, 512 );
|
||||
|
||||
// A is whirlpool serial, B is haval parallel.
|
||||
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
|
||||
m256_zero );
|
||||
// A
|
||||
|
||||
// if ( hash0[0] & mask )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash0 );
|
||||
// }
|
||||
// if ( hash1[0] & mask )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash1, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash1 );
|
||||
// }
|
||||
// if ( hash2[0] & mask )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash2, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash2 );
|
||||
// }
|
||||
// if ( hash3[0] & mask )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||
// }
|
||||
|
||||
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
// B
|
||||
|
||||
// if ( mm256_any_clr_256( vh_mask ) )
|
||||
// {
|
||||
haval256_5_4way_init( &ctx.haval );
|
||||
haval256_5_4way( &ctx.haval, vhash, 64 );
|
||||
haval256_5_4way_close( &ctx.haval, vhashB );
|
||||
memset( &vhashB[8<<2], 0, 32<<2);
|
||||
// }
|
||||
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||
(const BitSequence *)hash0, 512 );
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash1,
|
||||
(const BitSequence *)hash1, 512 );
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash2,
|
||||
(const BitSequence *)hash2, 512 );
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||
(const BitSequence *)hash3, 512 );
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
blake512_4way_init( &ctx.blake );
|
||||
blake512_4way( &ctx.blake, vhash, 64 );
|
||||
blake512_4way_close( &ctx.blake, vhash );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
// shavite & luffa, both serial, select individually.
|
||||
|
||||
if ( hash0[0] & mask )
|
||||
{
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512( &ctx.shavite, hash0, 64 ); //
|
||||
sph_shavite512_close( &ctx.shavite, hash0 ); //8
|
||||
}
|
||||
else
|
||||
{
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence *)hash0,
|
||||
(const BitSequence *)hash0, 64 );
|
||||
}
|
||||
|
||||
if ( hash1[0] & mask )
|
||||
{
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512( &ctx.shavite, hash1, 64 ); //
|
||||
sph_shavite512_close( &ctx.shavite, hash1 ); //8
|
||||
}
|
||||
else
|
||||
{
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence *)hash1,
|
||||
(const BitSequence *)hash1, 64 );
|
||||
}
|
||||
|
||||
if ( hash2[0] & mask )
|
||||
{
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512( &ctx.shavite, hash2, 64 ); //
|
||||
sph_shavite512_close( &ctx.shavite, hash2 ); //8
|
||||
}
|
||||
else
|
||||
{
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence *)hash2,
|
||||
(const BitSequence *)hash2, 64 );
|
||||
}
|
||||
|
||||
if ( hash3[0] & mask )
|
||||
{
|
||||
sph_shavite512_init( &ctx.shavite );
|
||||
sph_shavite512( &ctx.shavite, hash3, 64 ); //
|
||||
sph_shavite512_close( &ctx.shavite, hash3 ); //8
|
||||
}
|
||||
else
|
||||
{
|
||||
init_luffa( &ctx.luffa, 512 );
|
||||
update_and_final_luffa( &ctx.luffa, (BitSequence *)hash3,
|
||||
(const BitSequence *)hash3, 64 );
|
||||
}
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
hamsi512_4way_init( &ctx.hamsi );
|
||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash0 );
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash1, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash1 );
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash2, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash2 );
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash3, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||
|
||||
|
||||
// A echo, B sd both serial
|
||||
|
||||
if ( hash0[0] & mask ) //4
|
||||
{
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||
(const BitSequence *)hash0, 512 );
|
||||
}
|
||||
else
|
||||
{
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash0,
|
||||
(const BitSequence *)hash0, 512 );
|
||||
}
|
||||
|
||||
if ( hash1[0] & mask ) //4
|
||||
{
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash1,
|
||||
(const BitSequence *)hash1, 512 );
|
||||
}
|
||||
else
|
||||
{
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash1,
|
||||
(const BitSequence *)hash1, 512 );
|
||||
}
|
||||
|
||||
if ( hash2[0] & mask ) //4
|
||||
{
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash2,
|
||||
(const BitSequence *)hash2, 512 );
|
||||
}
|
||||
else
|
||||
{
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash2,
|
||||
(const BitSequence *)hash2, 512 );
|
||||
}
|
||||
|
||||
if ( hash3[0] & mask ) //4
|
||||
{
|
||||
init_echo( &ctx.echo, 512 );
|
||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||
(const BitSequence *)hash3, 512 );
|
||||
}
|
||||
else
|
||||
{
|
||||
init_sd( &ctx.simd, 512 );
|
||||
update_final_sd( &ctx.simd, (BitSequence *)hash3,
|
||||
(const BitSequence *)hash3, 512 );
|
||||
}
|
||||
|
||||
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
shabal512_4way_init( &ctx.shabal );
|
||||
shabal512_4way( &ctx.shabal, vhash, 64 );
|
||||
shabal512_4way_close( &ctx.shabal, vhash );
|
||||
|
||||
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash0 );
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash1, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash1 );
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash2, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash2 );
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||
|
||||
// A = fugue serial, B = sha512 prarallel
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
|
||||
m256_zero );
|
||||
|
||||
// if ( hash0[0] & mask )
|
||||
// {
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash0 );
|
||||
// }
|
||||
// if ( hash1[0] & mask )
|
||||
// {
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash1, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash1 );
|
||||
// }
|
||||
// if ( hash2[0] & mask )
|
||||
// {
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash2, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash2 );
|
||||
// }
|
||||
// if ( hash3[0] & mask )
|
||||
// {
|
||||
sph_fugue512_init( &ctx.fugue );
|
||||
sph_fugue512( &ctx.fugue, hash3, 64 );
|
||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||
// }
|
||||
|
||||
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
// if ( mm256_any_clr_256( vh_mask ) )
|
||||
// {
|
||||
sha512_4way_init( &ctx.sha512 );
|
||||
sha512_4way( &ctx.sha512, vhash, 64 );
|
||||
sha512_4way_close( &ctx.sha512, vhashB );
|
||||
// }
|
||||
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
|
||||
init_groestl( &ctx.groestl, 64 );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
sha512_4way_init( &ctx.sha512 );
|
||||
sha512_4way( &ctx.sha512, vhash, 64 );
|
||||
sha512_4way_close( &ctx.sha512, vhash );
|
||||
|
||||
// A = haval parallel, B = Whirlpool serial
|
||||
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ),
|
||||
m256_zero );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
// if ( mm256_any_set_256( vh_mask ) ) //4
|
||||
// {
|
||||
haval256_5_4way_init( &ctx.haval );
|
||||
haval256_5_4way( &ctx.haval, vhash, 64 );
|
||||
haval256_5_4way_close( &ctx.haval, vhashA );
|
||||
memset( &vhashA[8<<2], 0, 32<<2 );
|
||||
// }
|
||||
|
||||
// if ( !( hash0[0] & mask ) )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash0 );
|
||||
// }
|
||||
// if ( !( hash2[0] & mask ) )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash1, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash1 );
|
||||
// }
|
||||
// if ( !( hash2[0] & mask ) )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash2, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash2 );
|
||||
// }
|
||||
// if ( !( hash3[0] & mask ) )
|
||||
// {
|
||||
sph_whirlpool_init( &ctx.whirlpool );
|
||||
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||
// }
|
||||
|
||||
mm256_intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
bmw512_4way_init( &ctx.bmw );
|
||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||
bmw512_4way_close( &ctx.bmw, vhash );
|
||||
|
||||
memcpy(state, vhash, 32<<2 );
|
||||
}
|
||||
|
||||
int scanhash_hmq1725_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t hash[4*8] __attribute__ ((aligned (64)));
|
||||
// uint32_t *hash7 = &(hash[7<<2]);
|
||||
// uint32_t lane_hash[8];
|
||||
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
__m256i *noncev = (__m256i*)vdata + 9; // aligned
|
||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
uint64_t htmax[] = { 0, 0xF, 0xFF,
|
||||
0xFFF, 0xFFFF, 0x10000000 };
|
||||
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
|
||||
0xFFFFF000, 0xFFFF0000, 0 };
|
||||
|
||||
mm256_bswap_intrlv80_4x64( vdata, pdata );
|
||||
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
|
||||
{
|
||||
uint32_t mask = masks[ m ];
|
||||
do
|
||||
{
|
||||
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
|
||||
hmq1725_4way_hash( hash, vdata );
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( ( (hash+(i<<3))[7] & mask ) == 0 )
|
||||
{
|
||||
if ( fulltest( (hash+(i<<3)), ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + i;
|
||||
submit_solution( work, (hash+(i<<3)), mythr, i );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
} while ( ( n < max_nonce-4 ) && !work_restart[thr_id].restart );
|
||||
break;
|
||||
}
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // HMQ1725_4WAY
|
||||
17
algo/quark/hmq1725-gate.c
Normal file
17
algo/quark/hmq1725-gate.c
Normal file
@@ -0,0 +1,17 @@
|
||||
#include "hmq1725-gate.h"
|
||||
|
||||
bool register_hmq1725_algo( algo_gate_t* gate )
|
||||
{
|
||||
#if defined(HMQ1725_4WAY)
|
||||
gate->scanhash = (void*)&scanhash_hmq1725_4way;
|
||||
gate->hash = (void*)&hmq1725_4way_hash;
|
||||
#else
|
||||
init_hmq1725_ctx();
|
||||
gate->scanhash = (void*)&scanhash_hmq1725;
|
||||
gate->hash = (void*)&hmq1725hash;
|
||||
#endif
|
||||
gate->set_target = (void*)&scrypt_set_target;
|
||||
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
|
||||
return true;
|
||||
};
|
||||
|
||||
28
algo/quark/hmq1725-gate.h
Normal file
28
algo/quark/hmq1725-gate.h
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef HMQ1725_GATE_H__
|
||||
#define HMQ1725_GATE_H__ 1
|
||||
|
||||
#include "algo-gate-api.h"
|
||||
#include <stdint.h>
|
||||
|
||||
#if defined(__AVX2__) && defined(__AES__)
|
||||
// #define HMQ1725_4WAY
|
||||
#endif
|
||||
|
||||
bool register_hmq1725_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(HMQ1725_4WAY)
|
||||
|
||||
void hmq1725_4way_hash( void *state, const void *input );
|
||||
int scanhash_hmq1725_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#else
|
||||
|
||||
void hmq1725hash( void *state, const void *input );
|
||||
int scanhash_hmq1725( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void init_hmq1725_ctx();
|
||||
|
||||
#endif
|
||||
|
||||
#endif // HMQ1725_GATE_H__
|
||||
422
algo/quark/hmq1725.c
Normal file
422
algo/quark/hmq1725.c
Normal file
@@ -0,0 +1,422 @@
|
||||
#include "hmq1725-gate.h"
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include "algo/blake/sph_blake.h"
|
||||
#include "algo/bmw/sph_bmw.h"
|
||||
#include "algo/groestl/sph_groestl.h"
|
||||
#include "algo/jh/sph_jh.h"
|
||||
#include "algo/keccak/sph_keccak.h"
|
||||
#include "algo/skein/sph_skein.h"
|
||||
#include "algo/luffa/sph_luffa.h"
|
||||
#include "algo/cubehash/sph_cubehash.h"
|
||||
#include "algo/shavite/sph_shavite.h"
|
||||
#include "algo/simd/sph_simd.h"
|
||||
#include "algo/echo/sph_echo.h"
|
||||
#include "algo/hamsi/sph_hamsi.h"
|
||||
#include "algo/fugue/sph_fugue.h"
|
||||
#include "algo/shabal/sph_shabal.h"
|
||||
#include "algo/whirlpool/sph_whirlpool.h"
|
||||
#include "algo/haval/sph-haval.h"
|
||||
#include <openssl/sha.h>
|
||||
#if defined(__AES__)
|
||||
#include "algo/groestl/aes_ni/hash-groestl.h"
|
||||
#include "algo/echo/aes_ni/hash_api.h"
|
||||
#endif
|
||||
#include "algo/luffa/luffa_for_sse2.h"
|
||||
#include "algo/cubehash/cubehash_sse2.h"
|
||||
#include "algo/simd/nist.h"
|
||||
#include "algo/jh/sse2/jh_sse2_opt64.h"
|
||||
|
||||
typedef struct {
|
||||
sph_blake512_context blake1, blake2;
|
||||
sph_bmw512_context bmw1, bmw2, bmw3;
|
||||
sph_skein512_context skein1, skein2;
|
||||
sph_jh512_context jh1, jh2;
|
||||
sph_keccak512_context keccak1, keccak2;
|
||||
hashState_luffa luffa1, luffa2;
|
||||
cubehashParam cube;
|
||||
sph_shavite512_context shavite1, shavite2;
|
||||
hashState_sd simd1, simd2;
|
||||
sph_hamsi512_context hamsi1;
|
||||
sph_fugue512_context fugue1, fugue2;
|
||||
sph_shabal512_context shabal1;
|
||||
sph_whirlpool_context whirlpool1, whirlpool2, whirlpool3, whirlpool4;
|
||||
SHA512_CTX sha1, sha2;
|
||||
sph_haval256_5_context haval1, haval2;
|
||||
#if defined(__AES__)
|
||||
hashState_echo echo1, echo2;
|
||||
hashState_groestl groestl1, groestl2;
|
||||
#else
|
||||
sph_groestl512_context groestl1, groestl2;
|
||||
sph_echo512_context echo1, echo2;
|
||||
#endif
|
||||
} hmq1725_ctx_holder;
|
||||
|
||||
static hmq1725_ctx_holder hmq1725_ctx __attribute__ ((aligned (64)));
|
||||
static __thread sph_bmw512_context hmq_bmw_mid __attribute__ ((aligned (64)));
|
||||
|
||||
void init_hmq1725_ctx()
|
||||
{
|
||||
sph_blake512_init(&hmq1725_ctx.blake1);
|
||||
sph_blake512_init(&hmq1725_ctx.blake2);
|
||||
|
||||
sph_bmw512_init(&hmq1725_ctx.bmw1);
|
||||
sph_bmw512_init(&hmq1725_ctx.bmw2);
|
||||
sph_bmw512_init(&hmq1725_ctx.bmw3);
|
||||
|
||||
sph_skein512_init(&hmq1725_ctx.skein1);
|
||||
sph_skein512_init(&hmq1725_ctx.skein2);
|
||||
|
||||
sph_jh512_init(&hmq1725_ctx.jh1);
|
||||
sph_jh512_init(&hmq1725_ctx.jh2);
|
||||
|
||||
sph_keccak512_init(&hmq1725_ctx.keccak1);
|
||||
sph_keccak512_init(&hmq1725_ctx.keccak2);
|
||||
|
||||
init_luffa( &hmq1725_ctx.luffa1, 512 );
|
||||
init_luffa( &hmq1725_ctx.luffa2, 512 );
|
||||
|
||||
cubehashInit( &hmq1725_ctx.cube, 512, 16, 32 );
|
||||
|
||||
sph_shavite512_init(&hmq1725_ctx.shavite1);
|
||||
sph_shavite512_init(&hmq1725_ctx.shavite2);
|
||||
|
||||
init_sd( &hmq1725_ctx.simd1, 512 );
|
||||
init_sd( &hmq1725_ctx.simd2, 512 );
|
||||
|
||||
sph_hamsi512_init(&hmq1725_ctx.hamsi1);
|
||||
|
||||
sph_fugue512_init(&hmq1725_ctx.fugue1);
|
||||
sph_fugue512_init(&hmq1725_ctx.fugue2);
|
||||
|
||||
sph_shabal512_init(&hmq1725_ctx.shabal1);
|
||||
|
||||
sph_whirlpool_init(&hmq1725_ctx.whirlpool1);
|
||||
sph_whirlpool_init(&hmq1725_ctx.whirlpool2);
|
||||
sph_whirlpool_init(&hmq1725_ctx.whirlpool3);
|
||||
sph_whirlpool_init(&hmq1725_ctx.whirlpool4);
|
||||
|
||||
SHA512_Init( &hmq1725_ctx.sha1 );
|
||||
SHA512_Init( &hmq1725_ctx.sha2 );
|
||||
|
||||
sph_haval256_5_init(&hmq1725_ctx.haval1);
|
||||
sph_haval256_5_init(&hmq1725_ctx.haval2);
|
||||
|
||||
#if defined(__AES__)
|
||||
init_echo( &hmq1725_ctx.echo1, 512 );
|
||||
init_echo( &hmq1725_ctx.echo2, 512 );
|
||||
init_groestl( &hmq1725_ctx.groestl1, 64 );
|
||||
init_groestl( &hmq1725_ctx.groestl2, 64 );
|
||||
#else
|
||||
sph_groestl512_init( &hmq1725_ctx.groestl1 );
|
||||
sph_groestl512_init( &hmq1725_ctx.groestl2 );
|
||||
sph_echo512_init( &hmq1725_ctx.echo1 );
|
||||
sph_echo512_init( &hmq1725_ctx.echo2 );
|
||||
#endif
|
||||
}
|
||||
|
||||
void hmq_bmw512_midstate( const void* input )
|
||||
{
|
||||
memcpy( &hmq_bmw_mid, &hmq1725_ctx.bmw1, sizeof hmq_bmw_mid );
|
||||
sph_bmw512( &hmq_bmw_mid, input, 64 );
|
||||
}
|
||||
|
||||
__thread hmq1725_ctx_holder h_ctx __attribute__ ((aligned (64)));
|
||||
|
||||
extern void hmq1725hash(void *state, const void *input)
|
||||
{
|
||||
const uint32_t mask = 24;
|
||||
uint32_t hashA[32] __attribute__((aligned(64)));
|
||||
uint32_t hashB[32] __attribute__((aligned(64)));
|
||||
const int midlen = 64; // bytes
|
||||
const int tail = 80 - midlen; // 16
|
||||
|
||||
memcpy(&h_ctx, &hmq1725_ctx, sizeof(hmq1725_ctx));
|
||||
|
||||
memcpy( &h_ctx.bmw1, &hmq_bmw_mid, sizeof hmq_bmw_mid );
|
||||
sph_bmw512( &h_ctx.bmw1, input + midlen, tail );
|
||||
sph_bmw512_close(&h_ctx.bmw1, hashA); //1
|
||||
|
||||
sph_whirlpool (&h_ctx.whirlpool1, hashA, 64); //0
|
||||
sph_whirlpool_close(&h_ctx.whirlpool1, hashB); //1
|
||||
|
||||
if ( hashB[0] & mask ) //1
|
||||
{
|
||||
#if defined(__AES__)
|
||||
update_and_final_groestl( &h_ctx.groestl1, (char*)hashA,
|
||||
(const char*)hashB, 512 );
|
||||
#else
|
||||
sph_groestl512 (&h_ctx.groestl1, hashB, 64); //1
|
||||
sph_groestl512_close(&h_ctx.groestl1, hashA); //2
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
sph_skein512 (&h_ctx.skein1, hashB, 64); //1
|
||||
sph_skein512_close(&h_ctx.skein1, hashA); //2
|
||||
}
|
||||
|
||||
sph_jh512 (&h_ctx.jh1, hashA, 64); //3
|
||||
sph_jh512_close(&h_ctx.jh1, hashB); //4
|
||||
|
||||
sph_keccak512 (&h_ctx.keccak1, hashB, 64); //2
|
||||
sph_keccak512_close(&h_ctx.keccak1, hashA); //3
|
||||
|
||||
if ( hashA[0] & mask ) //4
|
||||
{
|
||||
sph_blake512 (&h_ctx.blake1, hashA, 64); //
|
||||
sph_blake512_close(&h_ctx.blake1, hashB); //5
|
||||
}
|
||||
else
|
||||
{
|
||||
sph_bmw512 (&h_ctx.bmw2, hashA, 64); //4
|
||||
sph_bmw512_close(&h_ctx.bmw2, hashB); //5
|
||||
}
|
||||
|
||||
update_and_final_luffa( &h_ctx.luffa1, (BitSequence*)hashA,
|
||||
(const BitSequence*)hashB, 64 );
|
||||
|
||||
cubehashUpdateDigest( &h_ctx.cube, (BitSequence *)hashB,
|
||||
(const BitSequence *)hashA, 64 );
|
||||
|
||||
if ( hashB[0] & mask ) //7
|
||||
{
|
||||
sph_keccak512 (&h_ctx.keccak2, hashB, 64); //
|
||||
sph_keccak512_close(&h_ctx.keccak2, hashA); //8
|
||||
}
|
||||
else
|
||||
{
|
||||
sph_jh512 (&h_ctx.jh2, hashB, 64); //7
|
||||
sph_jh512_close(&h_ctx.jh2, hashA); //8
|
||||
}
|
||||
|
||||
sph_shavite512 (&h_ctx.shavite1, hashA, 64); //3
|
||||
sph_shavite512_close(&h_ctx.shavite1, hashB); //4
|
||||
|
||||
update_final_sd( &h_ctx.simd1, (BitSequence *)hashA,
|
||||
(const BitSequence *)hashB, 512 );
|
||||
|
||||
if ( hashA[0] & mask ) //4
|
||||
{
|
||||
sph_whirlpool (&h_ctx.whirlpool2, hashA, 64); //
|
||||
sph_whirlpool_close(&h_ctx.whirlpool2, hashB); //5
|
||||
}
|
||||
else
|
||||
{
|
||||
sph_haval256_5 (&h_ctx.haval1, hashA, 64); //4
|
||||
sph_haval256_5_close(&h_ctx.haval1, hashB); //5
|
||||
memset(&hashB[8], 0, 32);
|
||||
}
|
||||
|
||||
#if defined(__AES__)
|
||||
update_final_echo ( &h_ctx.echo1, (BitSequence *)hashA,
|
||||
(const BitSequence *)hashB, 512 );
|
||||
#else
|
||||
sph_echo512 (&h_ctx.echo1, hashB, 64); //5
|
||||
sph_echo512_close(&h_ctx.echo1, hashA); //6
|
||||
#endif
|
||||
|
||||
sph_blake512 (&h_ctx.blake2, hashA, 64); //6
|
||||
sph_blake512_close(&h_ctx.blake2, hashB); //7
|
||||
|
||||
if ( hashB[0] & mask ) //7
|
||||
{
|
||||
sph_shavite512 (&h_ctx.shavite2, hashB, 64); //
|
||||
sph_shavite512_close(&h_ctx.shavite2, hashA); //8
|
||||
}
|
||||
else
|
||||
{
|
||||
update_and_final_luffa( &h_ctx.luffa2, (BitSequence *)hashA,
|
||||
(const BitSequence *)hashB, 64 );
|
||||
}
|
||||
|
||||
sph_hamsi512 (&h_ctx.hamsi1, hashA, 64); //3
|
||||
sph_hamsi512_close(&h_ctx.hamsi1, hashB); //4
|
||||
|
||||
sph_fugue512 (&h_ctx.fugue1, hashB, 64); //2 ////
|
||||
sph_fugue512_close(&h_ctx.fugue1, hashA); //3
|
||||
|
||||
if ( hashA[0] & mask ) //4
|
||||
{
|
||||
#if defined(__AES__)
|
||||
update_final_echo ( &h_ctx.echo2, (BitSequence *)hashB,
|
||||
(const BitSequence *)hashA, 512 );
|
||||
#else
|
||||
sph_echo512 (&h_ctx.echo2, hashA, 64); //
|
||||
sph_echo512_close(&h_ctx.echo2, hashB); //5
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
update_final_sd( &h_ctx.simd2, (BitSequence *)hashB,
|
||||
(const BitSequence *)hashA, 512 );
|
||||
}
|
||||
|
||||
sph_shabal512 (&h_ctx.shabal1, hashB, 64); //5
|
||||
sph_shabal512_close(&h_ctx.shabal1, hashA); //6
|
||||
|
||||
sph_whirlpool (&h_ctx.whirlpool3, hashA, 64); //6
|
||||
sph_whirlpool_close(&h_ctx.whirlpool3, hashB); //7
|
||||
|
||||
if ( hashB[0] & mask ) //7
|
||||
{
|
||||
sph_fugue512 (&h_ctx.fugue2, hashB, 64); //
|
||||
sph_fugue512_close(&h_ctx.fugue2, hashA); //8
|
||||
}
|
||||
else
|
||||
{
|
||||
SHA512_Update( &h_ctx.sha1, hashB, 64 );
|
||||
SHA512_Final( (unsigned char*) hashA, &h_ctx.sha1 );
|
||||
}
|
||||
|
||||
#if defined(__AES__)
|
||||
update_and_final_groestl( &h_ctx.groestl2, (char*)hashB,
|
||||
(const char*)hashA, 512 );
|
||||
#else
|
||||
sph_groestl512 (&h_ctx.groestl2, hashA, 64); //3
|
||||
sph_groestl512_close(&h_ctx.groestl2, hashB); //4
|
||||
#endif
|
||||
|
||||
SHA512_Update( &h_ctx.sha2, hashB, 64 );
|
||||
SHA512_Final( (unsigned char*) hashA, &h_ctx.sha2 );
|
||||
|
||||
if ( hashA[0] & mask ) //4
|
||||
{
|
||||
sph_haval256_5 (&h_ctx.haval2, hashA, 64); //
|
||||
sph_haval256_5_close(&h_ctx.haval2, hashB); //5
|
||||
memset(&hashB[8], 0, 32);
|
||||
}
|
||||
else
|
||||
{
|
||||
sph_whirlpool (&h_ctx.whirlpool4, hashA, 64); //4
|
||||
sph_whirlpool_close(&h_ctx.whirlpool4, hashB); //5
|
||||
}
|
||||
|
||||
sph_bmw512 (&h_ctx.bmw3, hashB, 64); //5
|
||||
sph_bmw512_close(&h_ctx.bmw3, hashA); //6
|
||||
|
||||
memcpy(state, hashA, 32);
|
||||
}
|
||||
|
||||
int scanhash_hmq1725( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
// uint32_t endiandata[32] __attribute__((aligned(64)));
|
||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
||||
uint32_t hash64[8] __attribute__((aligned(64)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||
//const uint32_t Htarg = ptarget[7];
|
||||
|
||||
//we need bigendian data...
|
||||
// for (int k = 0; k < 32; k++)
|
||||
for (int k = 0; k < 20; k++)
|
||||
be32enc(&endiandata[k], pdata[k]);
|
||||
|
||||
hmq_bmw512_midstate( endiandata );
|
||||
|
||||
// if (opt_debug)
|
||||
// {
|
||||
// applog(LOG_DEBUG, "Thr: %02d, firstN: %08x, maxN: %08x, ToDo: %d", thr_id, first_nonce, max_nonce, max_nonce-first_nonce);
|
||||
// }
|
||||
|
||||
/* I'm to lazy to put the loop in an inline function... so dirty copy'n'paste.... */
|
||||
/* i know that i could set a variable, but i don't know how the compiler will optimize it, not that then the cpu needs to load the value *everytime* in a register */
|
||||
if (ptarget[7]==0) {
|
||||
do {
|
||||
pdata[19] = ++n;
|
||||
be32enc(&endiandata[19], n);
|
||||
hmq1725hash(hash64, endiandata);
|
||||
if (((hash64[7]&0xFFFFFFFF)==0) &&
|
||||
fulltest(hash64, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return true;
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
}
|
||||
else if (ptarget[7]<=0xF)
|
||||
{
|
||||
do {
|
||||
pdata[19] = ++n;
|
||||
be32enc(&endiandata[19], n);
|
||||
hmq1725hash(hash64, endiandata);
|
||||
if (((hash64[7]&0xFFFFFFF0)==0) &&
|
||||
fulltest(hash64, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return true;
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
}
|
||||
else if (ptarget[7]<=0xFF)
|
||||
{
|
||||
do {
|
||||
pdata[19] = ++n;
|
||||
be32enc(&endiandata[19], n);
|
||||
hmq1725hash(hash64, endiandata);
|
||||
if (((hash64[7]&0xFFFFFF00)==0) &&
|
||||
fulltest(hash64, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return true;
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
}
|
||||
else if (ptarget[7]<=0xFFF)
|
||||
{
|
||||
do {
|
||||
pdata[19] = ++n;
|
||||
be32enc(&endiandata[19], n);
|
||||
hmq1725hash(hash64, endiandata);
|
||||
if (((hash64[7]&0xFFFFF000)==0) &&
|
||||
fulltest(hash64, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return true;
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
}
|
||||
else if (ptarget[7]<=0xFFFF)
|
||||
{
|
||||
do {
|
||||
pdata[19] = ++n;
|
||||
be32enc(&endiandata[19], n);
|
||||
hmq1725hash(hash64, endiandata);
|
||||
if (((hash64[7]&0xFFFF0000)==0) &&
|
||||
fulltest(hash64, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return true;
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
do {
|
||||
pdata[19] = ++n;
|
||||
be32enc(&endiandata[19], n);
|
||||
hmq1725hash(hash64, endiandata);
|
||||
if (fulltest(hash64, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return true;
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
}
|
||||
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
bool register_hmq1725_algo( algo_gate_t* gate )
|
||||
{
|
||||
init_hmq1725_ctx();
|
||||
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
|
||||
gate->set_target = (void*)&scrypt_set_target;
|
||||
gate->scanhash = (void*)&scanhash_hmq1725;
|
||||
gate->hash = (void*)&hmq1725hash;
|
||||
return true;
|
||||
};
|
||||
*/
|
||||
@@ -48,9 +48,10 @@ void quark_4way_hash( void *state, const void *input )
|
||||
__m256i* vhA = (__m256i*)vhashA;
|
||||
__m256i* vhB = (__m256i*)vhashB;
|
||||
__m256i vh_mask;
|
||||
__m256i bit3_mask; bit3_mask = _mm256_set1_epi64x( 8 );
|
||||
int i;
|
||||
quark_4way_ctx_holder ctx;
|
||||
const __m256i bit3_mask = _mm256_set1_epi64x( 8 );
|
||||
const uint32_t mask = 8;
|
||||
|
||||
memcpy( &ctx, &quark_4way_ctx, sizeof(quark_4way_ctx) );
|
||||
|
||||
blake512_4way( &ctx.blake, input, 80 );
|
||||
@@ -62,27 +63,44 @@ void quark_4way_hash( void *state, const void *input )
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
|
||||
m256_zero );
|
||||
|
||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
if ( hash0[0] & mask )
|
||||
{
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0,
|
||||
(char*)hash0, 512 );
|
||||
}
|
||||
if ( hash1[0] & mask )
|
||||
{
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash1,
|
||||
(char*)hash1, 512 );
|
||||
}
|
||||
if ( hash2[0] & mask )
|
||||
{
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2,
|
||||
(char*)hash2, 512 );
|
||||
}
|
||||
if ( hash3[0] & mask )
|
||||
{
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3,
|
||||
(char*)hash3, 512 );
|
||||
mm256_interleave_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
}
|
||||
|
||||
mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
if ( mm256_anybits0( vh_mask ) )
|
||||
{
|
||||
skein512_4way( &ctx.skein, vhash, 64 );
|
||||
skein512_4way_close( &ctx.skein, vhashB );
|
||||
}
|
||||
|
||||
for ( i = 0; i < 8; i++ )
|
||||
vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask );
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
@@ -91,7 +109,8 @@ void quark_4way_hash( void *state, const void *input )
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
|
||||
reinit_groestl( &ctx.groestl );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
jh512_4way( &ctx.jh, vhash, 64 );
|
||||
jh512_4way_close( &ctx.jh, vhash );
|
||||
@@ -99,16 +118,21 @@ void quark_4way_hash( void *state, const void *input )
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
|
||||
m256_zero );
|
||||
|
||||
if ( mm256_anybits1( vh_mask ) )
|
||||
{
|
||||
blake512_4way_init( &ctx.blake );
|
||||
blake512_4way( &ctx.blake, vhash, 64 );
|
||||
blake512_4way_close( &ctx.blake, vhashA );
|
||||
}
|
||||
|
||||
if ( mm256_anybits0( vh_mask ) )
|
||||
{
|
||||
bmw512_4way_init( &ctx.bmw );
|
||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||
bmw512_4way_close( &ctx.bmw, vhashB );
|
||||
}
|
||||
|
||||
for ( i = 0; i < 8; i++ )
|
||||
vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask );
|
||||
mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask );
|
||||
|
||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhash );
|
||||
@@ -120,63 +144,65 @@ void quark_4way_hash( void *state, const void *input )
|
||||
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ),
|
||||
m256_zero );
|
||||
|
||||
if ( mm256_anybits1( vh_mask ) )
|
||||
{
|
||||
keccak512_4way_init( &ctx.keccak );
|
||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||
keccak512_4way_close( &ctx.keccak, vhashA );
|
||||
}
|
||||
|
||||
if ( mm256_anybits0( vh_mask ) )
|
||||
{
|
||||
jh512_4way_init( &ctx.jh );
|
||||
jh512_4way( &ctx.jh, vhash, 64 );
|
||||
jh512_4way_close( &ctx.jh, vhashB );
|
||||
}
|
||||
|
||||
for ( i = 0; i < 8; i++ )
|
||||
vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask );
|
||||
|
||||
mm256_deinterleave_4x64( state, state+32, state+64, state+96, vhash, 256 );
|
||||
// Final blend, directly to state, only need 32 bytes.
|
||||
casti_m256i( state, 0 ) = _mm256_blendv_epi8( vhA[0], vhB[0], vh_mask );
|
||||
casti_m256i( state, 1 ) = _mm256_blendv_epi8( vhA[1], vhB[1], vh_mask );
|
||||
casti_m256i( state, 2 ) = _mm256_blendv_epi8( vhA[2], vhB[2], vh_mask );
|
||||
casti_m256i( state, 3 ) = _mm256_blendv_epi8( vhA[3], vhB[3], vh_mask );
|
||||
}
|
||||
|
||||
int scanhash_quark_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done)
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t hash[4*8] __attribute__ ((aligned (64)));
|
||||
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
||||
uint32_t *hash7 = &(hash[25]);
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t n = pdata[19];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
uint32_t *nonces = work->nonces;
|
||||
int num_found = 0;
|
||||
uint32_t *noncep = vdata + 73; // 9*8 + 1
|
||||
|
||||
swab32_array( endiandata, pdata, 20 );
|
||||
|
||||
uint64_t *edata = (uint64_t*)endiandata;
|
||||
mm256_interleave_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
|
||||
__m256i *noncev = (__m256i*)vdata + 9; // aligned
|
||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||
|
||||
mm256_bswap_intrlv80_4x64( vdata, pdata );
|
||||
do
|
||||
{
|
||||
be32enc( noncep, n );
|
||||
be32enc( noncep+2, n+1 );
|
||||
be32enc( noncep+4, n+2 );
|
||||
be32enc( noncep+6, n+3 );
|
||||
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
|
||||
|
||||
quark_4way_hash( hash, vdata );
|
||||
pdata[19] = n;
|
||||
|
||||
for ( int i = 0; i < 4; i++ )
|
||||
if ( ( ( (hash+(i<<3))[7] & 0xFFFFFF00 ) == 0 )
|
||||
&& fulltest( hash+(i<<3), ptarget ) )
|
||||
if ( ( hash7[ i<<1 ] & 0xFFFFFF00 ) == 0 )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
nonces[ num_found++ ] = n+i;
|
||||
work_set_target_ratio( work, hash+(i<<3) );
|
||||
mm256_extract_lane_4x64( lane_hash, hash, i, 256 );
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
submit_solution( work, lane_hash, mythr, i );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
} while ( ( num_found == 0 ) && ( n < max_nonce )
|
||||
&& !work_restart[thr_id].restart );
|
||||
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return num_found;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,19 +13,15 @@ bool register_quark_algo( algo_gate_t* gate );
|
||||
#if defined(QUARK_4WAY)
|
||||
|
||||
void quark_4way_hash( void *state, const void *input );
|
||||
|
||||
int scanhash_quark_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done );
|
||||
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void init_quark_4way_ctx();
|
||||
|
||||
#endif
|
||||
|
||||
void quark_hash( void *state, const void *input );
|
||||
|
||||
int scanhash_quark( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done );
|
||||
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void init_quark_ctx();
|
||||
|
||||
#endif
|
||||
|
||||
@@ -173,16 +173,17 @@ void quark_hash(void *state, const void *input)
|
||||
}
|
||||
|
||||
int scanhash_quark( int thr_id, struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done)
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
||||
uint32_t hash64[8] __attribute__((aligned(32)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
||||
uint32_t hash64[8] __attribute__((aligned(32)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||
|
||||
swab32_array( endiandata, pdata, 20 );
|
||||
swab32_array( endiandata, pdata, 20 );
|
||||
|
||||
do {
|
||||
pdata[19] = ++n;
|
||||
|
||||
Reference in New Issue
Block a user