This commit is contained in:
Jay D Dee
2023-10-25 20:36:20 -04:00
parent 31c4dedf59
commit 160608cce5
180 changed files with 10318 additions and 13097 deletions

View File

@@ -1,6 +1,5 @@
#include "lyra2-gate.h"
#include <memory.h>
#include <mm_malloc.h>
#include "algo/blake/blake256-hash.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/skein/skein-hash-4way.h"
@@ -10,6 +9,19 @@
#if defined(__VAES__)
#include "algo/groestl/groestl256-hash-4way.h"
#endif
#include "algo/keccak/sph_keccak.h"
#include "algo/skein/sph_skein.h"
#if !( defined(__AES__) || defined(__ARM_FEATURE_AES) )
#include "algo/groestl/sph_groestl.h"
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define ALLIUM_16WAY 1
#elif defined(__AVX2__)
#define ALLIUM_8WAY 1
#elif #defined(__SSE2__) || defined(__ARM_NEON)
#define ALLIUM_4WAY 1
#endif
#if defined (ALLIUM_16WAY)
@@ -443,4 +455,297 @@ int scanhash_allium_8way( struct work *work, uint32_t max_nonce,
return 0;
}
#elif defined(__SSE2__) || defined(__ARM_NEON)
///////////////////
//
// 4 way
typedef union
{
keccak256_2x64_context keccak;
cubehashParam cube;
#if defined(__x86_64__)
skein256_2x64_context skein;
#else
sph_skein512_context skein;
#endif
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
hashState_groestl256 groestl;
#else
sph_groestl256_context groestl;
#endif
} allium_4way_ctx_holder;
static void allium_4way_hash( void *hash, const void *midstate_vars,
const void *midhash, const void *block )
{
uint64_t vhashA[4*4] __attribute__ ((aligned (64)));
uint64_t *hash0 = (uint64_t*)hash;
uint64_t *hash1 = (uint64_t*)hash+ 4;
uint64_t *hash2 = (uint64_t*)hash+ 8;
uint64_t *hash3 = (uint64_t*)hash+12;
allium_4way_ctx_holder ctx __attribute__ ((aligned (64)));
blake256_4way_final_rounds_le( vhashA, midstate_vars, midhash, block, 14 );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhashA, 256 );
intrlv_2x64( vhashA, hash0, hash1, 256 );
keccak256_2x64_init( &ctx.keccak );
keccak256_2x64_update( &ctx.keccak, vhashA, 32 );
keccak256_2x64_close( &ctx.keccak, vhashA );
dintrlv_2x64( hash0, hash1, vhashA, 256 );
intrlv_2x64( vhashA, hash2, hash3, 256 );
keccak256_2x64_init( &ctx.keccak );
keccak256_2x64_update( &ctx.keccak, vhashA, 32 );
keccak256_2x64_close( &ctx.keccak, vhashA );
dintrlv_2x64( hash2, hash3, vhashA, 256 );
LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 );
LYRA2RE( hash1, 32, hash1, 32, hash1, 32, 1, 8, 8 );
LYRA2RE( hash2, 32, hash2, 32, hash2, 32, 1, 8, 8 );
LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 );
cubehash_full( &ctx.cube, hash0, 256, hash0, 32 );
cubehash_full( &ctx.cube, hash1, 256, hash1, 32 );
cubehash_full( &ctx.cube, hash2, 256, hash2, 32 );
cubehash_full( &ctx.cube, hash3, 256, hash3, 32 );
LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 );
LYRA2RE( hash1, 32, hash1, 32, hash1, 32, 1, 8, 8 );
LYRA2RE( hash2, 32, hash2, 32, hash2, 32, 1, 8, 8 );
LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 );
#if defined(__x86_64__)
intrlv_2x64( vhashA, hash0, hash1, 256 );
skein256_2x64_init( &ctx.skein );
skein256_2x64_update( &ctx.skein, vhashA, 32 );
skein256_2x64_close( &ctx.skein, vhashA );
dintrlv_2x64( hash0, hash1, vhashA, 256 );
intrlv_2x64( vhashA, hash2, hash3, 256 );
skein256_2x64_init( &ctx.skein );
skein256_2x64_update( &ctx.skein, vhashA, 32 );
skein256_2x64_close( &ctx.skein, vhashA );
dintrlv_2x64( hash2, hash3, vhashA, 256 );
#else
sph_skein256_init( &ctx.skein );
sph_skein256( &ctx.skein, hash0, 32 );
sph_skein256_close( &ctx.skein, hash0 );
sph_skein256_init( &ctx.skein );
sph_skein256( &ctx.skein, hash1, 32 );
sph_skein256_close( &ctx.skein, hash1 );
sph_skein256_init( &ctx.skein );
sph_skein256( &ctx.skein, hash2, 32 );
sph_skein256_close( &ctx.skein, hash2 );
sph_skein256_init( &ctx.skein );
sph_skein256( &ctx.skein, hash3, 32 );
sph_skein256_close( &ctx.skein, hash3 );
#endif
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
groestl256_full( &ctx.groestl, hash0, hash0, 256 );
groestl256_full( &ctx.groestl, hash1, hash1, 256 );
groestl256_full( &ctx.groestl, hash2, hash2, 256 );
groestl256_full( &ctx.groestl, hash3, hash3, 256 );
#else
sph_groestl256_init( &ctx.groestl );
sph_groestl256( &ctx.groestl, hash0, 32 );
sph_groestl256_close( &ctx.groestl, hash0 );
sph_groestl256_init( &ctx.groestl );
sph_groestl256( &ctx.groestl, hash1, 32 );
sph_groestl256_close( &ctx.groestl, hash1 );
sph_groestl256_init( &ctx.groestl );
sph_groestl256( &ctx.groestl, hash2, 32 );
sph_groestl256_close( &ctx.groestl, hash2 );
sph_groestl256_init( &ctx.groestl );
sph_groestl256( &ctx.groestl, hash3, 32 );
sph_groestl256_close( &ctx.groestl, hash3 );
#endif
}
int scanhash_allium_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint64_t hash[4*4] __attribute__ ((aligned (64)));
uint32_t midstate_vars[16*4] __attribute__ ((aligned (64)));
v128_t block0_hash[8] __attribute__ ((aligned (64)));
v128_t block_buf[16] __attribute__ ((aligned (64)));
uint32_t phash[8] __attribute__ ((aligned (32))) =
{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
uint32_t *pdata = work->data;
uint64_t *ptarget = (uint64_t*)work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const v128u32_t four = v128_32(4);
// Prehash first block
blake256_transform_le( phash, pdata, 512, 0, 14 );
block0_hash[0] = v128_32( phash[0] );
block0_hash[1] = v128_32( phash[1] );
block0_hash[2] = v128_32( phash[2] );
block0_hash[3] = v128_32( phash[3] );
block0_hash[4] = v128_32( phash[4] );
block0_hash[5] = v128_32( phash[5] );
block0_hash[6] = v128_32( phash[6] );
block0_hash[7] = v128_32( phash[7] );
// Build vectored second block, interleave last 16 bytes of data using
// unique nonces.
block_buf[ 0] = v128_32( pdata[16] );
block_buf[ 1] = v128_32( pdata[17] );
block_buf[ 2] = v128_32( pdata[18] );
block_buf[ 3] = v128_set32( n+3, n+2, n+1, n );
block_buf[ 4] = v128_32( 0x80000000 );
block_buf[13] = v128_32( 1 );
block_buf[15] = v128_32( 640 );
// Partialy prehash second block without touching nonces
blake256_4way_round0_prehash_le( midstate_vars, block0_hash, block_buf );
do {
allium_4way_hash( hash, midstate_vars, block0_hash, block_buf );
for ( int lane = 0; lane < 4; lane++ )
{
const uint64_t *lane_hash = hash + (lane<<2);
if ( unlikely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 4;
block_buf[3] = v128_add32( block_buf[3], four );
} while ( likely( (n <= last_nonce) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif
////////////
//
// 1 way
typedef struct
{
blake256_context blake;
sph_keccak256_context keccak;
cubehashParam cube;
sph_skein256_context skein;
#if defined (__AES__) || defined(__ARM_FEATURE_AES)
hashState_groestl256 groestl;
#else
sph_groestl256_context groestl;
#endif
} allium_ctx_holder;
static __thread allium_ctx_holder allium_ctx;
bool init_allium_ctx()
{
sph_keccak256_init( &allium_ctx.keccak );
cubehashInit( &allium_ctx.cube, 256, 16, 32 );
sph_skein256_init( &allium_ctx.skein );
#if defined (__AES__) || defined(__ARM_FEATURE_AES)
init_groestl256( &allium_ctx.groestl, 32 );
#else
sph_groestl256_init( &allium_ctx.groestl );
#endif
return true;
}
void allium_hash(void *state, const void *input)
{
uint32_t hash[8] __attribute__ ((aligned (64)));
allium_ctx_holder ctx __attribute__ ((aligned (32)));
memcpy( &ctx, &allium_ctx, sizeof(allium_ctx) );
blake256_update( &ctx.blake, input + 64, 16 );
blake256_close( &ctx.blake, hash );
sph_keccak256( &ctx.keccak, hash, 32 );
sph_keccak256_close( &ctx.keccak, hash );
LYRA2RE( hash, 32, hash, 32, hash, 32, 1, 8, 8 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash, (const byte*)hash, 32 );
LYRA2RE( hash, 32, hash, 32, hash, 32, 1, 8, 8 );
sph_skein256( &ctx.skein, hash, 32 );
sph_skein256_close( &ctx.skein, hash );
#if defined (__AES__) || defined(__ARM_FEATURE_AES)
update_and_final_groestl256( &ctx.groestl, hash, hash, 256 );
#else
sph_groestl256( &ctx.groestl, hash, 32 );
sph_groestl256_close( &ctx.groestl, hash );
#endif
memcpy(state, hash, 32);
}
int scanhash_allium( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash[8];
uint32_t _ALIGN(128) edata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
const int thr_id = mythr->id;
if ( opt_benchmark )
ptarget[7] = 0x3ffff;
for ( int i = 0; i < 19; i++ )
edata[i] = bswap_32( pdata[i] );
blake256_init( &allium_ctx.blake );
blake256_update( &allium_ctx.blake, edata, 64 );
do {
edata[19] = nonce;
allium_hash( hash, edata );
if ( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = bswap_32( nonce );
submit_solution( work, hash, mythr );
}
nonce++;
} while ( nonce < max_nonce && !work_restart[thr_id].restart );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
return 0;
}
bool register_allium_algo( algo_gate_t* gate )
{
#if defined (ALLIUM_16WAY)
gate->scanhash = (void*)&scanhash_allium_16way;
#elif defined (ALLIUM_8WAY)
gate->scanhash = (void*)&scanhash_allium_8way;
#elif defined (ALLIUM_4WAY)
gate->scanhash = (void*)&scanhash_allium_4way;
#else
gate->miner_thread_init = (void*)&init_allium_ctx;
gate->scanhash = (void*)&scanhash_allium;
gate->hash = (void*)&allium_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT
| VAES_OPT | NEON_OPT;
opt_target_factor = 256.0;
return true;
};

View File

@@ -1,110 +0,0 @@
#include "lyra2-gate.h"
#if !( defined(ALLIUM_16WAY) || defined(ALLIUM_8WAY) || defined(ALLIUM_4WAY) )
#include <memory.h>
#include "algo/blake/sph_blake.h"
#include "algo/keccak/sph_keccak.h"
#include "algo/skein/sph_skein.h"
#include "algo/cubehash/cubehash_sse2.h"
#if defined(__AES__)
#include "algo/groestl/aes_ni/hash-groestl256.h"
#else
#include "algo/groestl/sph_groestl.h"
#endif
#include "lyra2.h"
typedef struct {
sph_blake256_context blake;
sph_keccak256_context keccak;
cubehashParam cube;
sph_skein256_context skein;
#if defined (__AES__)
hashState_groestl256 groestl;
#else
sph_groestl256_context groestl;
#endif
} allium_ctx_holder;
static __thread allium_ctx_holder allium_ctx;
bool init_allium_ctx()
{
sph_keccak256_init( &allium_ctx.keccak );
cubehashInit( &allium_ctx.cube, 256, 16, 32 );
sph_skein256_init( &allium_ctx.skein );
#if defined (__AES__)
init_groestl256( &allium_ctx.groestl, 32 );
#else
sph_groestl256_init( &allium_ctx.groestl );
#endif
return true;
}
void allium_hash(void *state, const void *input)
{
uint32_t hash[8] __attribute__ ((aligned (64)));
allium_ctx_holder ctx __attribute__ ((aligned (32)));
memcpy( &ctx, &allium_ctx, sizeof(allium_ctx) );
sph_blake256( &ctx.blake, input + 64, 16 );
sph_blake256_close( &ctx.blake, hash );
sph_keccak256( &ctx.keccak, hash, 32 );
sph_keccak256_close( &ctx.keccak, hash );
LYRA2RE( hash, 32, hash, 32, hash, 32, 1, 8, 8 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash, (const byte*)hash, 32 );
LYRA2RE( hash, 32, hash, 32, hash, 32, 1, 8, 8 );
sph_skein256( &ctx.skein, hash, 32 );
sph_skein256_close( &ctx.skein, hash );
#if defined (__AES__)
update_and_final_groestl256( &ctx.groestl, hash, hash, 256 );
#else
sph_groestl256( &ctx.groestl, hash, 32 );
sph_groestl256_close( &ctx.groestl, hash );
#endif
memcpy(state, hash, 32);
}
int scanhash_allium( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(128) hash[8];
uint32_t _ALIGN(128) edata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
const int thr_id = mythr->id;
if ( opt_benchmark )
ptarget[7] = 0x3ffff;
for ( int i = 0; i < 19; i++ )
edata[i] = bswap_32( pdata[i] );
sph_blake256_init( &allium_ctx.blake );
sph_blake256( &allium_ctx.blake, edata, 64 );
do {
edata[19] = nonce;
allium_hash( hash, edata );
if ( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = bswap_32( nonce );
submit_solution( work, hash, mythr );
}
nonce++;
} while ( nonce < max_nonce && !work_restart[thr_id].restart );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
return 0;
}
#endif

View File

@@ -1,5 +1,5 @@
#include "lyra2-gate.h"
#include <mm_malloc.h>
//#include <mm_malloc.h>
// huge pages
//
@@ -48,10 +48,10 @@ bool lyra2rev3_thread_init()
#if defined(LYRA2REV3_16WAY)
// l2v3_wholeMatrix = _mm_malloc( 2*size, 128 );
l2v3_wholeMatrix = _mm_malloc( 2*size, 64 );
l2v3_wholeMatrix = mm_malloc( 2*size, 64 );
init_lyra2rev3_16way_ctx();;
#else
l2v3_wholeMatrix = _mm_malloc( size, 64 );
l2v3_wholeMatrix = mm_malloc( size, 64 );
#if defined (LYRA2REV3_8WAY)
init_lyra2rev3_8way_ctx();;
#elif defined (LYRA2REV3_4WAY)
@@ -95,13 +95,13 @@ bool lyra2rev2_thread_init()
int size = (int64_t)ROW_LEN_BYTES * 4; // nRows;
#if defined (LYRA2REV2_16WAY)
l2v2_wholeMatrix = _mm_malloc( 2 * size, 64 ); // 2 way
l2v2_wholeMatrix = mm_malloc( 2 * size, 64 ); // 2 way
init_lyra2rev2_16way_ctx();;
#elif defined (LYRA2REV2_8WAY)
l2v2_wholeMatrix = _mm_malloc( size, 64 );
l2v2_wholeMatrix = mm_malloc( size, 64 );
init_lyra2rev2_8way_ctx();;
#else
l2v2_wholeMatrix = _mm_malloc( size, 64 );
l2v2_wholeMatrix = mm_malloc( size, 64 );
init_lyra2rev2_ctx();
#endif
return l2v2_wholeMatrix;
@@ -125,6 +125,7 @@ bool register_lyra2rev2_algo( algo_gate_t* gate )
return true;
};
/*
/////////////////////////////
bool register_lyra2z_algo( algo_gate_t* gate )
@@ -146,11 +147,11 @@ bool register_lyra2z_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_lyra2z;
gate->hash = (void*)&lyra2z_hash;
#endif
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
opt_target_factor = 256.0;
return true;
};
*/
////////////////////////
@@ -171,7 +172,7 @@ bool register_lyra2h_algo( algo_gate_t* gate )
};
/////////////////////////////////
/*
bool register_allium_algo( algo_gate_t* gate )
{
#if defined (ALLIUM_16WAY)
@@ -184,11 +185,11 @@ bool register_allium_algo( algo_gate_t* gate )
gate->hash = (void*)&allium_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT
| VAES_OPT;
| VAES_OPT | NEON_OPT;
opt_target_factor = 256.0;
return true;
};
*/
/////////////////////////////////////////
bool phi2_has_roots = false;

View File

@@ -5,7 +5,6 @@
#include <stdint.h>
#include "lyra2.h"
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define LYRA2REV3_16WAY 1
#elif defined(__AVX2__)
@@ -74,7 +73,6 @@ int scanhash_lyra2rev2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_lyra2rev2_8way_ctx();
#else
void lyra2rev2_hash( void *state, const void *input );
@@ -84,49 +82,6 @@ bool init_lyra2rev2_ctx();
#endif
/////////////////////////
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define LYRA2Z_16WAY 1
#elif defined(__AVX2__)
#define LYRA2Z_8WAY 1
#elif defined(__SSE2__)
#define LYRA2Z_4WAY 1
#endif
#define LYRA2Z_MATRIX_SIZE BLOCK_LEN_INT64 * 8 * 8 * 8
#if defined(LYRA2Z_16WAY)
//void lyra2z_16way_hash( void *state, const void *input );
int scanhash_lyra2z_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool lyra2z_16way_thread_init();
#elif defined(LYRA2Z_8WAY)
//void lyra2z_8way_hash( void *state, const void *input );
int scanhash_lyra2z_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool lyra2z_8way_thread_init();
#elif defined(LYRA2Z_4WAY)
void lyra2z_4way_hash( void *state, const void *input );
int scanhash_lyra2z_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool lyra2z_4way_thread_init();
#else
void lyra2z_hash( void *state, const void *input );
int scanhash_lyra2z( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool lyra2z_thread_init();
#endif
////////////////////
#if defined(__AVX2__)
@@ -151,35 +106,6 @@ bool lyra2h_thread_init();
#endif
//////////////////////////////////
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define ALLIUM_16WAY 1
#elif defined(__AVX2__) && defined(__AES__)
#define ALLIUM_8WAY 1
#endif
bool register_allium_algo( algo_gate_t* gate );
#if defined(ALLIUM_16WAY)
int scanhash_allium_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(ALLIUM_8WAY)
int scanhash_allium_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#else
void allium_hash( void *state, const void *input );
int scanhash_allium( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_allium_ctx();
#endif
/////////////////////////////////////////
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)

View File

@@ -21,8 +21,9 @@
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <mm_malloc.h>
//#include <mm_malloc.h>
#include "compat.h"
#include "miner.h"
#include "lyra2.h"
#include "sponge.h"
@@ -468,7 +469,7 @@ int LYRA2RE_2WAY( void *K, uint64_t kLen, const void *pwd,
: BLOCK_LEN_BLAKE2_SAFE_BYTES;
i = (int64_t)ROW_LEN_BYTES * nRows;
uint64_t *wholeMatrix = _mm_malloc( 2*i, 64 );
uint64_t *wholeMatrix = mm_malloc( 2*i, 64 );
if (wholeMatrix == NULL)
return -1;
@@ -570,7 +571,7 @@ int LYRA2RE_2WAY( void *K, uint64_t kLen, const void *pwd,
squeeze_2way( state, K, (unsigned int) kLen );
//================== Freeing the memory =============================//
_mm_free(wholeMatrix);
mm_free(wholeMatrix);
return 0;
}
@@ -602,7 +603,7 @@ int LYRA2X_2WAY( void *K, uint64_t kLen, const void *pwd,
: BLOCK_LEN_BLAKE2_SAFE_BYTES;
i = (int64_t)ROW_LEN_BYTES * nRows;
uint64_t *wholeMatrix = _mm_malloc( 2*i, 64 );
uint64_t *wholeMatrix = mm_malloc( 2*i, 64 );
if (wholeMatrix == NULL)
return -1;
@@ -704,7 +705,7 @@ int LYRA2X_2WAY( void *K, uint64_t kLen, const void *pwd,
squeeze_2way( state, K, (unsigned int) kLen );
//================== Freeing the memory =============================//
_mm_free(wholeMatrix);
mm_free(wholeMatrix);
return 0;
}

View File

@@ -21,7 +21,8 @@
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <mm_malloc.h>
//#include <mm_malloc.h>
#include "miner.h"
#include "compat.h"
#include "lyra2.h"
#include "sponge.h"
@@ -463,7 +464,7 @@ int LYRA2Z( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
//=================== Initializing the Sponge State ====================//
//Sponge state: 16 uint64_t, BLOCK_LEN_INT64 words of them for the bitrate (b) and the remainder for the capacity (c)
// uint64_t *state = _mm_malloc(16 * sizeof(uint64_t), 32);
// uint64_t *state = mm_malloc(16 * sizeof(uint64_t), 32);
// if (state == NULL) {
// return -1;
// }
@@ -572,7 +573,7 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
: BLOCK_LEN_BLAKE2_SAFE_BYTES;
i = (int64_t)ROW_LEN_BYTES * nRows;
uint64_t *wholeMatrix = _mm_malloc( i, 64 );
uint64_t *wholeMatrix = mm_malloc( i, 64 );
if (wholeMatrix == NULL)
return -1;
@@ -720,7 +721,7 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
squeeze(state, K, (unsigned int) kLen);
//================== Freeing the memory =============================//
_mm_free(wholeMatrix);
mm_free(wholeMatrix);
return 0;
}

View File

@@ -37,8 +37,8 @@ typedef unsigned char byte;
#define BLOCK_LEN_BYTES (BLOCK_LEN_INT64 * 8) //Block length, in bytes
#endif
#define BLOCK_LEN_M256I (BLOCK_LEN_INT64 / 4 )
#define BLOCK_LEN_M128I (BLOCK_LEN_INT64 / 2 )
#define BLOCK_LEN_256 (BLOCK_LEN_INT64 / 4 )
#define BLOCK_LEN_128 (BLOCK_LEN_INT64 / 2 )
int LYRA2RE( void *K, uint64_t kLen, const void *pwd,
uint64_t pwdlen, const void *salt, uint64_t saltlen,

View File

@@ -3,7 +3,7 @@
#ifdef LYRA2H_4WAY
#include <memory.h>
#include <mm_malloc.h>
//#include <mm_malloc.h>
#include "lyra2.h"
#include "algo/blake/blake256-hash.h"
@@ -11,7 +11,7 @@ __thread uint64_t* lyra2h_4way_matrix;
bool lyra2h_4way_thread_init()
{
return ( lyra2h_4way_matrix = _mm_malloc( LYRA2H_MATRIX_SIZE, 64 ) );
return ( lyra2h_4way_matrix = mm_malloc( LYRA2H_MATRIX_SIZE, 64 ) );
}
static __thread blake256_4way_context l2h_4way_blake_mid;

View File

@@ -3,7 +3,7 @@
#if !( defined(LYRA2H_8WAY) || defined(LYRA2H_4WAY) )
#include <memory.h>
#include <mm_malloc.h>
//#include <mm_malloc.h>
#include "lyra2.h"
#include "algo/blake/sph_blake.h"
@@ -11,7 +11,7 @@ __thread uint64_t* lyra2h_matrix;
bool lyra2h_thread_init()
{
lyra2h_matrix = _mm_malloc( LYRA2H_MATRIX_SIZE, 64 );
lyra2h_matrix = mm_malloc( LYRA2H_MATRIX_SIZE, 64 );
return lyra2h_matrix;
}

View File

@@ -1,16 +1,27 @@
#include "lyra2-gate.h"
#include <memory.h>
#include <mm_malloc.h>
#include "lyra2.h"
#include "algo/blake/blake256-hash.h"
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define LYRA2Z_16WAY 1
#elif defined(__AVX2__)
#define LYRA2Z_8WAY 1
#elif defined(__SSE2__)
#define LYRA2Z_4WAY 1
//#else
// NEON 1 way SIMD
#endif
#define LYRA2Z_MATRIX_SIZE BLOCK_LEN_INT64 * 8 * 8 * 8
#if defined(LYRA2Z_16WAY)
__thread uint64_t* lyra2z_16way_matrix;
bool lyra2z_16way_thread_init()
{
return ( lyra2z_16way_matrix = _mm_malloc( 2*LYRA2Z_MATRIX_SIZE, 64 ) );
return ( lyra2z_16way_matrix = mm_malloc( 2*LYRA2Z_MATRIX_SIZE, 64 ) );
}
static void lyra2z_16way_hash( void *state, const void *midstate_vars,
@@ -153,7 +164,7 @@ __thread uint64_t* lyra2z_8way_matrix;
bool lyra2z_8way_thread_init()
{
return ( lyra2z_8way_matrix = _mm_malloc( LYRA2Z_MATRIX_SIZE, 64 ) );
return ( lyra2z_8way_matrix = mm_malloc( LYRA2Z_MATRIX_SIZE, 64 ) );
}
static void lyra2z_8way_hash( void *state, const void *midstate_vars,
@@ -259,12 +270,13 @@ int scanhash_lyra2z_8way( struct work *work, uint32_t max_nonce,
#elif defined(LYRA2Z_4WAY)
// SSE2 or NEON
__thread uint64_t* lyra2z_4way_matrix;
bool lyra2z_4way_thread_init()
{
return ( lyra2z_4way_matrix = _mm_malloc( LYRA2Z_MATRIX_SIZE, 64 ) );
return ( lyra2z_4way_matrix = mm_malloc( LYRA2Z_MATRIX_SIZE, 64 ) );
}
static __thread blake256_4way_context l2z_4way_blake_mid;
@@ -275,59 +287,90 @@ void lyra2z_4way_midstate( const void* input )
blake256_4way_update( &l2z_4way_blake_mid, input, 64 );
}
void lyra2z_4way_hash( void *state, const void *input )
void lyra2z_4way_hash( void *hash, const void *midstate_vars,
const void *midhash, const void *block )
{
uint32_t hash0[8] __attribute__ ((aligned (64)));
uint32_t hash1[8] __attribute__ ((aligned (64)));
uint32_t hash2[8] __attribute__ ((aligned (64)));
uint32_t hash3[8] __attribute__ ((aligned (64)));
uint32_t vhash[8*4] __attribute__ ((aligned (64)));
blake256_4way_context ctx_blake __attribute__ ((aligned (64)));
// blake256_4way_context ctx_blake __attribute__ ((aligned (64)));
blake256_4way_final_rounds_le( vhash, midstate_vars, midhash, block, 14 );
/*
memcpy( &ctx_blake, &l2z_4way_blake_mid, sizeof l2z_4way_blake_mid );
blake256_4way_update( &ctx_blake, input + (64*4), 16 );
blake256_4way_close( &ctx_blake, vhash );
*/
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
LYRA2Z( lyra2z_4way_matrix, state , 32, hash0, 32, hash0, 32, 8, 8, 8 );
LYRA2Z( lyra2z_4way_matrix, state+32, 32, hash1, 32, hash1, 32, 8, 8, 8 );
LYRA2Z( lyra2z_4way_matrix, state+64, 32, hash2, 32, hash2, 32, 8, 8, 8 );
LYRA2Z( lyra2z_4way_matrix, state+96, 32, hash3, 32, hash3, 32, 8, 8, 8 );
LYRA2Z( lyra2z_4way_matrix, hash , 32, hash0, 32, hash0, 32, 8, 8, 8 );
LYRA2Z( lyra2z_4way_matrix, hash+32, 32, hash1, 32, hash1, 32, 8, 8, 8 );
LYRA2Z( lyra2z_4way_matrix, hash+64, 32, hash2, 32, hash2, 32, 8, 8, 8 );
LYRA2Z( lyra2z_4way_matrix, hash+96, 32, hash3, 32, hash3, 32, 8, 8, 8 );
}
int scanhash_lyra2z_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint64_t hash[4*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t midstate_vars[16*4] __attribute__ ((aligned (64)));
v128_t block0_hash[8] __attribute__ ((aligned (64)));
v128_t block_buf[16] __attribute__ ((aligned (64)));
uint32_t phash[8] __attribute__ ((aligned (32))) =
{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint64_t *ptarget = (uint64_t*)work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
__m128i *noncev = (__m128i*)vdata + 19; // aligned
const int thr_id = mythr->id;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const v128u32_t four = v128_32(4);
if ( bench ) ptarget[7] = 0x0000ff;
// Prehash first block
blake256_transform_le( phash, pdata, 512, 0, 14 );
v128_bswap32_intrlv80_4x32( vdata, pdata );
*noncev = _mm_set_epi32( n+3, n+2, n+1, n );
lyra2z_4way_midstate( vdata );
block0_hash[0] = v128_32( phash[0] );
block0_hash[1] = v128_32( phash[1] );
block0_hash[2] = v128_32( phash[2] );
block0_hash[3] = v128_32( phash[3] );
block0_hash[4] = v128_32( phash[4] );
block0_hash[5] = v128_32( phash[5] );
block0_hash[6] = v128_32( phash[6] );
block0_hash[7] = v128_32( phash[7] );
// Build vectored second block, interleave last 16 bytes of data using
// unique nonces.
block_buf[ 0] = v128_32( pdata[16] );
block_buf[ 1] = v128_32( pdata[17] );
block_buf[ 2] = v128_32( pdata[18] );
block_buf[ 3] = v128_set32( n+3, n+2, n+1, n );
block_buf[ 4] = v128_32( 0x80000000 );
block_buf[13] = v128_32( 1 );
block_buf[15] = v128_32( 640 );
// Partialy prehash second block without touching nonces
blake256_4way_round0_prehash_le( midstate_vars, block0_hash, block_buf );
do {
lyra2z_4way_hash( hash, vdata );
lyra2z_4way_hash( hash, midstate_vars, block0_hash, block_buf );
for ( int lane = 0; lane < 4; lane++ )
{
const uint64_t *lane_hash = hash + (lane<<2);
if ( unlikely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n + lane );
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm_add_epi32( *noncev, _mm_set1_epi32( 4 ) );
block_buf[ 3] = v128_add32( block_buf[ 3], four );
n += 4;
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
@@ -336,5 +379,97 @@ int scanhash_lyra2z_4way( struct work *work, uint32_t max_nonce,
return 0;
}
#else
// not used
__thread uint64_t* lyra2z_matrix;
bool lyra2z_thread_init()
{
const int i = BLOCK_LEN_INT64 * 8 * 8 * 8;
lyra2z_matrix = mm_malloc( i, 64 );
return lyra2z_matrix;
}
static __thread blake256_context lyra2z_blake_mid;
void lyra2z_midstate( const void* input )
{
blake256_init( &lyra2z_blake_mid );
blake256_update( &lyra2z_blake_mid, input, 64 );
}
void lyra2z_hash( void *state, const void *input )
{
uint32_t _ALIGN(32) hash[16];
blake256_context ctx_blake __attribute__ ((aligned (64)));
memcpy( &ctx_blake, &lyra2z_blake_mid, sizeof (blake256_context) );
blake256_update( &ctx_blake, input + 64, 16 );
blake256_close( &ctx_blake, hash );
LYRA2Z( lyra2z_matrix, hash, 32, hash, 32, hash, 32, 8, 8, 8 );
memcpy( state, hash, 32 );
}
int scanhash_lyra2z( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) hash[8];
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
int thr_id = mythr->id;
if (opt_benchmark) ptarget[7] = 0x0000ff;
// for ( int i = 0; i < 20; i++ ) endiandata[i] = bswap_32( pdata[i] );
v128_bswap32_80( endiandata, pdata );
lyra2z_midstate( endiandata );
do {
endiandata[19] = bswap_32( nonce );
lyra2z_hash( hash, endiandata );
if ( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );
}
nonce++;
} while ( nonce < max_nonce && !work_restart[thr_id].restart );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}
#endif
bool register_lyra2z_algo( algo_gate_t* gate )
{
#if defined(LYRA2Z_16WAY)
gate->miner_thread_init = (void*)&lyra2z_16way_thread_init;
gate->scanhash = (void*)&scanhash_lyra2z_16way;
// gate->hash = (void*)&lyra2z_16way_hash;
#elif defined(LYRA2Z_8WAY)
gate->miner_thread_init = (void*)&lyra2z_8way_thread_init;
gate->scanhash = (void*)&scanhash_lyra2z_8way;
// gate->hash = (void*)&lyra2z_8way_hash;
#elif defined(LYRA2Z_4WAY)
gate->miner_thread_init = (void*)&lyra2z_4way_thread_init;
gate->scanhash = (void*)&scanhash_lyra2z_4way;
gate->hash = (void*)&lyra2z_4way_hash;
#else
gate->miner_thread_init = (void*)&lyra2z_thread_init;
gate->scanhash = (void*)&scanhash_lyra2z;
gate->hash = (void*)&lyra2z_hash;
#endif
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
opt_target_factor = 256.0;
return true;
};

View File

@@ -1,84 +0,0 @@
#include <memory.h>
#include <mm_malloc.h>
#include "lyra2-gate.h"
#if !( defined(LYRA2Z_16WAY) || defined(LYRA2Z_8WAY) || defined(LYRA2Z_4WAY) )
#include "lyra2.h"
#include "algo/blake/sph_blake.h"
#include "simd-utils.h"
__thread uint64_t* lyra2z_matrix;
bool lyra2z_thread_init()
{
// const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * 8; // nCols
// const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
// int i = (int64_t)ROW_LEN_BYTES * 8; // nRows;
const int i = BLOCK_LEN_INT64 * 8 * 8 * 8;
lyra2z_matrix = _mm_malloc( i, 64 );
return lyra2z_matrix;
}
static __thread sph_blake256_context lyra2z_blake_mid;
void lyra2z_midstate( const void* input )
{
sph_blake256_init( &lyra2z_blake_mid );
sph_blake256( &lyra2z_blake_mid, input, 64 );
}
// block 2050 new algo, blake plus new lyra parms. new input
// is power of 2 so normal lyra can be used
//void zcoin_hash(void *state, const void *input, uint32_t height)
void lyra2z_hash( void *state, const void *input )
{
uint32_t _ALIGN(64) hash[16];
sph_blake256_context ctx_blake __attribute__ ((aligned (64)));
memcpy( &ctx_blake, &lyra2z_blake_mid, sizeof lyra2z_blake_mid );
sph_blake256( &ctx_blake, input + 64, 16 );
sph_blake256_close( &ctx_blake, hash );
LYRA2Z( lyra2z_matrix, hash, 32, hash, 32, hash, 32, 8, 8, 8);
memcpy(state, hash, 32);
}
int scanhash_lyra2z( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) hash[8];
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
int thr_id = mythr->id;
if (opt_benchmark)
ptarget[7] = 0x0000ff;
for (int i=0; i < 19; i++) {
be32enc(&endiandata[i], pdata[i]);
}
lyra2z_midstate( endiandata );
do {
be32enc(&endiandata[19], nonce);
lyra2z_hash( hash, endiandata );
if ( valid_hash( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );
}
nonce++;
} while ( nonce < max_nonce && !work_restart[thr_id].restart );
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}
#endif

View File

@@ -2,7 +2,6 @@
#include "algo-gate-api.h"
#include "lyra2.h"
#include "simd-utils.h"
#include <mm_malloc.h>
static __thread uint64_t* lyra2z330_wholeMatrix;
@@ -62,14 +61,14 @@ bool lyra2z330_thread_init()
const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
int i = (int64_t)ROW_LEN_BYTES * 330; // nRows;
lyra2z330_wholeMatrix = _mm_malloc( i, 64 );
lyra2z330_wholeMatrix = mm_malloc( i, 64 );
return lyra2z330_wholeMatrix;
}
bool register_lyra2z330_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AVX2_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | NEON_OPT;
gate->miner_thread_init = (void*)&lyra2z330_thread_init;
gate->scanhash = (void*)&scanhash_lyra2z330;
gate->hash = (void*)&lyra2z330_hash;

View File

@@ -32,7 +32,7 @@
inline void squeeze_2way( uint64_t *State, byte *Out, unsigned int len )
{
const int len_m256i = len / 32;
const int fullBlocks = len_m256i / BLOCK_LEN_M256I;
const int fullBlocks = len_m256i / BLOCK_LEN_256;
__m512i* state = (__m512i*)State;
__m512i* out = (__m512i*)Out;
int i;
@@ -40,12 +40,12 @@ inline void squeeze_2way( uint64_t *State, byte *Out, unsigned int len )
//Squeezes full blocks
for ( i = 0; i < fullBlocks; i++ )
{
memcpy_512( out, state, BLOCK_LEN_M256I );
memcpy_512( out, state, BLOCK_LEN_256 );
LYRA_ROUND_2WAY_AVX512( state[0], state[1], state[2], state[3] );
out += BLOCK_LEN_M256I;
out += BLOCK_LEN_256;
}
//Squeezes remaining bytes
memcpy_512( out, state, len_m256i % BLOCK_LEN_M256I );
memcpy_512( out, state, len_m256i % BLOCK_LEN_256 );
}
inline void absorbBlock_2way( uint64_t *State, const uint64_t *In0,
@@ -116,7 +116,7 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
register __m512i state0, state1, state2, state3;
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_256 );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
@@ -139,7 +139,7 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
out[2] = state2;
//Goes to next block (column) that will receive the squeezed data
out -= BLOCK_LEN_M256I;
out -= BLOCK_LEN_256;
LYRA_ROUND_2WAY_AVX512( state0, state1, state2, state3 );
}
@@ -157,7 +157,7 @@ inline void reducedDuplexRow1_2way( uint64_t *State, uint64_t *rowIn,
int i;
register __m512i state0, state1, state2, state3;
__m512i *in = (__m512i*)rowIn;
__m512i *out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
__m512i *out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_256 );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
@@ -177,9 +177,9 @@ inline void reducedDuplexRow1_2way( uint64_t *State, uint64_t *rowIn,
out[2] = _mm512_xor_si512( state2, in[2] );
//Input: next column (i.e., next block in sequence)
in += BLOCK_LEN_M256I;
in += BLOCK_LEN_256;
//Output: goes to previous column
out -= BLOCK_LEN_M256I;
out -= BLOCK_LEN_256;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -195,7 +195,7 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
register __m512i state0, state1, state2, state3;
__m512i* in = (__m512i*)rowIn;
__m512i* inout = (__m512i*)rowInOut;
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_256 );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
@@ -234,10 +234,10 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
}
//Inputs: next column (i.e., next block in sequence)
in += BLOCK_LEN_M256I;
inout += BLOCK_LEN_M256I;
in += BLOCK_LEN_256;
inout += BLOCK_LEN_256;
//Output: goes to previous column
out -= BLOCK_LEN_M256I;
out -= BLOCK_LEN_256;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -336,10 +336,10 @@ static inline void reducedDuplexRow_2way_normal( uint64_t *State,
_mm512_mask_store_epi64( inout1 +2, 0xf0, io2 );
//Goes to next block
in += BLOCK_LEN_M256I;
inout0 += BLOCK_LEN_M256I;
inout1 += BLOCK_LEN_M256I;
out += BLOCK_LEN_M256I;
in += BLOCK_LEN_256;
inout0 += BLOCK_LEN_256;
inout1 += BLOCK_LEN_256;
out += BLOCK_LEN_256;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -458,10 +458,10 @@ static inline void reducedDuplexRow_2way_overlap( uint64_t *State,
_mm512_mask_store_epi64( inout1 +2, 0xf0, io.v512[2] );
*/
//Goes to next block
in += BLOCK_LEN_M256I;
inout0 += BLOCK_LEN_M256I;
inout1 += BLOCK_LEN_M256I;
out += BLOCK_LEN_M256I;
in += BLOCK_LEN_256;
inout0 += BLOCK_LEN_256;
inout1 += BLOCK_LEN_256;
out += BLOCK_LEN_256;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -550,10 +550,10 @@ static inline void reducedDuplexRow_2way_overlap_X( uint64_t *State,
inout1[5] = inout.v256[5];
//Goes to next block
in += BLOCK_LEN_M256I;
inout0 += BLOCK_LEN_M256I * 2;
inout1 += BLOCK_LEN_M256I * 2;
out += BLOCK_LEN_M256I;
in += BLOCK_LEN_256;
inout0 += BLOCK_LEN_256 * 2;
inout1 += BLOCK_LEN_256 * 2;
out += BLOCK_LEN_256;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -610,9 +610,9 @@ static inline void reducedDuplexRow_2way_unified( uint64_t *State,
}
//Goes to next block
in += BLOCK_LEN_M256I;
inout += BLOCK_LEN_M256I;
out += BLOCK_LEN_M256I;
in += BLOCK_LEN_256;
inout += BLOCK_LEN_256;
out += BLOCK_LEN_256;
}
_mm512_store_si512( (__m512i*)State, state0 );

File diff suppressed because it is too large Load Diff

View File

@@ -97,11 +97,11 @@ static const uint64_t blake2b_IV[8] =
#define G_4X64(a,b,c,d) \
a = _mm256_add_epi64( a, b ); \
d = mm256_swap64_32( _mm256_xor_si256( d, a ) ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 32 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_shuflr64_24( _mm256_xor_si256( b, c ) ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 24 ); \
a = _mm256_add_epi64( a, b ); \
d = mm256_shuflr64_16( _mm256_xor_si256( d, a ) ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 63 );
@@ -144,38 +144,38 @@ static const uint64_t blake2b_IV[8] =
#endif
#if defined(__SSE2__)
#if defined(__SSE2__) || defined(__ARM_NEON)
// process 2 columns in parallel
// returns void, all args updated
#define G_2X64(a,b,c,d) \
a = _mm_add_epi64( a, b ); \
d = mm128_swap64_32( _mm_xor_si128( d, a) ); \
c = _mm_add_epi64( c, d ); \
b = mm128_shuflr64_24( _mm_xor_si128( b, c ) ); \
a = _mm_add_epi64( a, b ); \
d = mm128_shuflr64_16( _mm_xor_si128( d, a ) ); \
c = _mm_add_epi64( c, d ); \
b = mm128_ror_64( _mm_xor_si128( b, c ), 63 );
a = v128_add64( a, b ); \
d = v128_ror64( v128_xor( d, a), 32 ); \
c = v128_add64( c, d ); \
b = v128_ror64( v128_xor( b, c ), 24 ); \
a = v128_add64( a, b ); \
d = v128_ror64( v128_xor( d, a ), 16 ); \
c = v128_add64( c, d ); \
b = v128_ror64( v128_xor( b, c ), 63 );
#define LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
{ \
__m128i t; \
v128u64_t t; \
G_2X64( s0, s2, s4, s6 ); \
G_2X64( s1, s3, s5, s7 ); \
t = mm128_alignr_64( s7, s6, 1 ); \
s6 = mm128_alignr_64( s6, s7, 1 ); \
t = v128_alignr64( s7, s6, 1 ); \
s6 = v128_alignr64( s6, s7, 1 ); \
s7 = t; \
t = mm128_alignr_64( s2, s3, 1 ); \
s2 = mm128_alignr_64( s3, s2, 1 ); \
t = v128_alignr64( s2, s3, 1 ); \
s2 = v128_alignr64( s3, s2, 1 ); \
s3 = t; \
G_2X64( s0, s2, s5, s6 ); \
G_2X64( s1, s3, s4, s7 ); \
t = mm128_alignr_64( s6, s7, 1 ); \
s6 = mm128_alignr_64( s7, s6, 1 ); \
t = v128_alignr64( s6, s7, 1 ); \
s6 = v128_alignr64( s7, s6, 1 ); \
s7 = t; \
t = mm128_alignr_64( s3, s2, 1 ); \
s2 = mm128_alignr_64( s2, s3, 1 ); \
t = v128_alignr64( s3, s2, 1 ); \
s2 = v128_alignr64( s2, s3, 1 ); \
s3 = t; \
}
@@ -195,34 +195,31 @@ static const uint64_t blake2b_IV[8] =
#endif // AVX2 else SSE2
// Scalar, not used.
static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
return ( w >> c ) | ( w << ( 64 - c ) );
}
#define G(r,i,a,b,c,d) \
do { \
#define G( r, i, a, b, c, d ) \
{ \
a = a + b; \
d = rotr64(d ^ a, 32); \
d = ror64( (d) ^ (a), 32 ); \
c = c + d; \
b = rotr64(b ^ c, 24); \
b = ror64( (b) ^ (c), 24 ); \
a = a + b; \
d = rotr64(d ^ a, 16); \
d = ror64( (d) ^ (a), 16 ); \
c = c + d; \
b = rotr64(b ^ c, 63); \
} while(0)
b = ror64( (b) ^ (c), 63 ); \
}
#define ROUND_LYRA(r) \
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
G(r,2,v[ 2],v[ 6],v[10],v[14]); \
G(r,3,v[ 3],v[ 7],v[11],v[15]); \
G(r,4,v[ 0],v[ 5],v[10],v[15]); \
G(r,5,v[ 1],v[ 6],v[11],v[12]); \
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
G(r,7,v[ 3],v[ 4],v[ 9],v[14]);
G( r, 0, v[ 0], v[ 4], v[ 8], v[12] ); \
G( r, 1, v[ 1], v[ 5], v[ 9], v[13] ); \
G( r, 2, v[ 2], v[ 6], v[10], v[14] ); \
G( r, 3, v[ 3], v[ 7], v[11], v[15] ); \
G( r, 4, v[ 0], v[ 5], v[10], v[15] ); \
G( r, 5, v[ 1], v[ 6], v[11], v[12] ); \
G( r, 6, v[ 2], v[ 7], v[ 8], v[13] ); \
G( r, 7, v[ 3], v[ 4], v[ 9], v[14] );
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)