This commit is contained in:
Jay D Dee
2019-12-17 00:57:35 -05:00
parent a17ff6f189
commit d741f1c9a9
51 changed files with 5473 additions and 911 deletions

View File

@@ -44,8 +44,13 @@ bool lyra2rev3_thread_init()
{
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * 4; // nCols
const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
int size = ROW_LEN_BYTES * 4; // nRows;
int size = (int64_t)ROW_LEN_BYTES * 4; // nRows;
#if defined(LYRA2REV3_16WAY)
// l2v3_wholeMatrix = _mm_malloc( 2*size, 128 );
l2v3_wholeMatrix = _mm_malloc( 2*size, 64 );
init_lyra2rev3_16way_ctx();;
#else
l2v3_wholeMatrix = _mm_malloc( size, 64 );
#if defined (LYRA2REV3_8WAY)
init_lyra2rev3_8way_ctx();;
@@ -53,13 +58,17 @@ bool lyra2rev3_thread_init()
init_lyra2rev3_4way_ctx();;
#else
init_lyra2rev3_ctx();
#endif
#endif
return l2v3_wholeMatrix;
}
bool register_lyra2rev3_algo( algo_gate_t* gate )
{
#if defined (LYRA2REV3_8WAY)
#if defined(LYRA2REV3_16WAY)
gate->scanhash = (void*)&scanhash_lyra2rev3_16way;
gate->hash = (void*)&lyra2rev3_16way_hash;
#elif defined (LYRA2REV3_8WAY)
gate->scanhash = (void*)&scanhash_lyra2rev3_8way;
gate->hash = (void*)&lyra2rev3_8way_hash;
#elif defined (LYRA2REV3_4WAY)
@@ -69,6 +78,7 @@ bool register_lyra2rev3_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_lyra2rev3;
gate->hash = (void*)&lyra2rev3_hash;
#endif
// gate->optimizations = SSE2_OPT | SSE42_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX2_OPT;
gate->miner_thread_init = (void*)&lyra2rev3_thread_init;
opt_target_factor = 256.0;

View File

@@ -5,18 +5,29 @@
#include <stdint.h>
#include "lyra2.h"
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define LYRA2REV3_16WAY 1
#elif defined(__AVX2__)
*/
#if defined(__AVX2__)
#define LYRA2REV3_8WAY
#endif
#if defined(__SSE2__)
#define LYRA2REV3_4WAY
#define LYRA2REV3_8WAY 1
#elif defined(__SSE2__)
#define LYRA2REV3_4WAY 1
#endif
extern __thread uint64_t* l2v3_wholeMatrix;
bool register_lyra2rev3_algo( algo_gate_t* gate );
#if defined(LYRA2REV3_8WAY)
#if defined(LYRA2REV3_16WAY)
void lyra2rev3_16way_hash( void *state, const void *input );
int scanhash_lyra2rev3_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_lyra2rev3_16way_ctx();
#elif defined(LYRA2REV3_8WAY)
void lyra2rev3_8way_hash( void *state, const void *input );
int scanhash_lyra2rev3_8way( struct work *work, uint32_t max_nonce,

View File

@@ -46,6 +46,7 @@
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
#if 0
int LYRA2REV2( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
const uint64_t pwdlen, const void *salt, const uint64_t saltlen,
const uint64_t timeCost, const uint64_t nRows,
@@ -216,29 +217,55 @@ int LYRA2REV2( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
return 0;
}
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// This version is currently only used by REv3 and has some hard coding
// specific to v3 such as input data size of 32 bytes.
//
// Similarly with REv2. Thedifference with REv3 isn't clear and maybe
// they can be merged.
//
// RE is used by RE, allium. The main difference between RE and REv2
// in the matrix size.
//
// Z also needs to support 80 byte input as well as 32 byte, and odd
// matrix sizes like 330 rows. It is used by lyra2z330, lyra2z, lyra2h.
/////////////////////////////////////////////////
// 2 way 256
// drop salt, salt len arguments, hard code some others.
// Data is interleaved 2x256.
//int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
// const void *pwd, uint64_t pwdlen, uint64_t timeCost,
// uint64_t nRows, uint64_t nCols )
// hard coded for 32 byte input as well as matrix size.
// Other required versions include 80 byte input and different block
// sizez
int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
const void *pwd, const uint64_t pwdlen, const void *salt,
const uint64_t saltlen, const uint64_t timeCost, const uint64_t nRows,
const uint64_t nCols )
{
//====================== Basic variables ============================//
uint64_t _ALIGN(256) state[16];
int64_t row = 2; //index of row to be processed
int64_t prev = 1; //index of prev (last row ever computed/modified)
int64_t rowa = 0; //index of row* (a previous row, deterministically picked during Setup and randomly picked while Wandering)
int64_t tau; //Time Loop iterator
int64_t step = 1; //Visitation step (used during Setup and Wandering phases)
int64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
uint64_t _ALIGN(256) state[32];
int64_t row = 2;
int64_t prev = 1;
int64_t rowa0 = 0;
int64_t rowa1 = 0;
int64_t tau;
int64_t step = 1;
int64_t window = 2;
int64_t gap = 1;
// int64_t i; //auxiliary iteration counter
int64_t v64; // 64bit var for memcpy
uint64_t instance0 = 0; // Seperate instance for each lane
// int64_t v64; // 64bit var for memcpy
uint64_t instance0 = 0;
uint64_t instance1 = 0;
//====================================================================/
@@ -248,7 +275,9 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
uint64_t *ptrWord = wholeMatrix;
// 2 way 256 rewrite. Salt always == password, and data is interleaved,
// need to build in parallel:
// need to build in parallel as pw isalready interleaved.
// { password, (64 or 80 bytes)
// salt, (64 or 80 bytes) = same as password
// Klen, (u64) = 32 bytes
@@ -262,16 +291,45 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
// 1 (byte)
// }
// memset( wholeMatrix, 0, ROW_LEN_BYTES * nRows );
// It's all u64 so don't use byte
// input is usually 32 maybe 64, both are aligned to 256 bit vector.
// 80 byte inpput is not aligned complicating matters for lyra2z.
int64_t nBlocksInput = ( ( saltlen + pwdlen + 6 * sizeof(uint64_t) )
/ BLOCK_LEN_BLAKE2_SAFE_BYTES ) + 1;
uint64_t *ptr = wholeMatrix;
uint64_t *pw = (uint64_t*)pwd;
byte *ptrByte = (byte*) wholeMatrix;
memcpy( ptr, pw, 2*pwdlen ); // password
ptr += pwdlen>>2;
memcpy( ptr, pw, 2*pwdlen ); // password lane 1
ptr += pwdlen>>2;
// now build the rest interleaving on the fly.
//Prepends the password
memcpy(ptrByte, pwd, pwdlen);
ptrByte += pwdlen;
ptr[0] = ptr[ 4] = kLen;
ptr[1] = ptr[ 5] = pwdlen;
ptr[2] = ptr[ 6] = pwdlen; // saltlen
ptr[3] = ptr[ 7] = timeCost;
ptr[8] = ptr[12] = nRows;
ptr[9] = ptr[13] = nCols;
ptr[10] = ptr[14] = 0x80;
ptr[11] = ptr[15] = 0x0100000000000000;
ptr = wholeMatrix;
/*
// do it the old way to compare.
uint64_t pb[512];
byte* ptrByte = (byte*)pb;
//Prepends the password (use salt for testing)
memcpy( ptrByte, salt, saltlen );
ptrByte += saltlen;
//Concatenates the salt
memcpy(ptrByte, salt, saltlen);
@@ -280,55 +338,259 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
memset( ptrByte, 0, nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES
- (saltlen + pwdlen) );
//Concatenates the basil: every integer passed as parameter, in the order they are provided by the interface
memcpy(ptrByte, &kLen, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = pwdlen;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = saltlen;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = timeCost;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = nRows;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = nCols;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
memcpy(ptrByte, &kLen, 8);
ptrByte += 8;
memcpy(ptrByte, &pwdlen, 8);
ptrByte += 8;
memcpy(ptrByte, &saltlen, 8);
ptrByte += 8;
memcpy(ptrByte, &timeCost, 8);
ptrByte += 8;
memcpy(ptrByte, &nRows, 8);
ptrByte += 8;
memcpy(ptrByte, &nCols, 8);
ptrByte += 8;
//Now comes the padding
*ptrByte = 0x80; //first byte of padding: right after the password
ptrByte = (byte*) wholeMatrix; //resets the pointer to the start of the memory matrix
ptrByte = (byte*) pb; //resets the pointer to the start of the memory matrix
ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block
*ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block
*/
// display the data
printf("LYRA2REV3 data, blocks= %d\n", nBlocksInput);
/*
uint64_t* m = (uint64_t*)wholeMatrix;
printf("Lyra2v3 1: blocklensafe %d\n", BLOCK_LEN_BLAKE2_SAFE_BYTES);
printf("pb: %016lx %016lx %016lx %016lx\n",pb[0],pb[1],pb[2],pb[3]);
printf("pb: %016lx %016lx %016lx %016lx\n",pb[4],pb[5],pb[6],pb[7]);
printf("pb: %016lx %016lx %016lx %016lx\n",pb[8],pb[8],pb[10],pb[11]);
printf("pb: %016lx %016lx %016lx %016lx\n",pb[12],pb[13],pb[14],pb[15]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[0],m[1],m[2],m[3]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[4],m[5],m[6],m[7]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[8],m[8],m[10],m[11]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[12],m[13],m[14],m[15]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[16],m[17],m[18],m[19]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[20],m[21],m[22],m[23]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[24],m[25],m[26],m[27]);
printf("data V: %016lx %016lx %016lx %016lx\n",m[28],m[29],m[30],m[31]);
*/
// from here on it's all simd acces to state and matrix
// define vector pointers and adjust sizes and pointer offsets
uint64_t _ALIGN(256) st[16];
ptrWord = wholeMatrix;
absorbBlockBlake2Safe( state, ptrWord, nBlocksInput, BLOCK_LEN );
reducedSqueezeRow0( state, &wholeMatrix[0], nCols );
absorbBlockBlake2Safe_2way( state, ptrWord, nBlocksInput, BLOCK_LEN );
reducedDuplexRow1( state, &wholeMatrix[0], &wholeMatrix[ROW_LEN_INT64],
uint64_t *p = wholeMatrix;
printf("wholematrix[0]\n");
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[2*ROW_LEN_INT64];
printf("wholematrix[1]\n");
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[4*ROW_LEN_INT64];
printf("wholematrix[2]\n");
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[6*ROW_LEN_INT64];
printf("wholematrix[3]\n");
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV1 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
//printf("SV1: %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
/*
absorbBlockBlake2Safe( st, pb, nBlocksInput, BLOCK_LEN );
printf("SV: %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
printf("SS: %016lx %016lx %016lx %016lx\n",st[0],st[1],st[2],st[3]);
*/
reducedSqueezeRow0_2way( state, &wholeMatrix[0], nCols );
// At this point the entire matrix should be filled but only col 0 is.
// The others are unchanged or the display offsets are wrong.
p = wholeMatrix;
printf("wholematrix[0] %x\n",wholeMatrix);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[32],p[33],p[34],p[35]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[36],p[37],p[38],p[39]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[40],p[41],p[42],p[43]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[44],p[45],p[46],p[47]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[48],p[49],p[50],p[51]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[52],p[53],p[54],p[55]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[56],p[57],p[58],p[59]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[60],p[61],p[62],p[63]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[64],p[65],p[66],p[67]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[68],p[69],p[70],p[71]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[72],p[73],p[74],p[75]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[76],p[77],p[78],p[79]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[80],p[81],p[82],p[83]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[84],p[85],p[86],p[87]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[88],p[89],p[90],p[91]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[92],p[93],p[94],p[95]);
p = &wholeMatrix[2*ROW_LEN_INT64];
printf("wholematrix[1] %x\n", &wholeMatrix[2*ROW_LEN_INT64]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[4*ROW_LEN_INT64];
printf("wholematrix[2] %x\n",&wholeMatrix[4*ROW_LEN_INT64]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[6*ROW_LEN_INT64];
printf("wholematrix[3] %x\n",&wholeMatrix[6*ROW_LEN_INT64]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV2 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
//printf("SV2 %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
/*
printf("SV2 %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
printf("SV2 %016lx %016lx %016lx %016lx\n",state[4],state[5],state[6],state[7]);
printf("SV2 %016lx %016lx %016lx %016lx\n",state[8],state[9],state[10],state[11]);
printf("SV2 %016lx %016lx %016lx %016lx\n",state[12],state[13],state[14],state[15]);
printf("SV2 %016lx %016lx %016lx %016lx\n",state[16],state[17],state[18],state[19]);
printf("SV2 %016lx %016lx %016lx %016lx\n",state[20],state[21],state[22],state[23]);
printf("SV2 %016lx %016lx %016lx %016lx\n",state[24],state[25],state[26],state[27]);
printf("SV2 %016lx %016lx %016lx %016lx\n",state[28],state[29],state[30],state[31]);
*/
reducedDuplexRow1_2way( state, &wholeMatrix[0], &wholeMatrix[2*ROW_LEN_INT64],
nCols);
//printf("SV3 %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
/*
printf("SV3 %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
printf("SV3 %016lx %016lx %016lx %016lx\n",state[4],state[5],state[6],state[7]);
printf("SV3 %016lx %016lx %016lx %016lx\n",state[8],state[9],state[10],state[11]);
printf("SV3 %016lx %016lx %016lx %016lx\n",state[12],state[13],state[14],state[15]);
printf("SV3 %016lx %016lx %016lx %016lx\n",state[16],state[17],state[18],state[19]);
printf("SV3 %016lx %016lx %016lx %016lx\n",state[20],state[21],state[22],state[23]);
printf("SV3 %016lx %016lx %016lx %016lx\n",state[24],state[25],state[26],state[27]);
printf("SV3 %016lx %016lx %016lx %016lx\n",state[28],state[29],state[30],state[31]);
*/
p = wholeMatrix;
printf("wholematrix[0]\n");
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[2*ROW_LEN_INT64];
printf("wholematrix[1]\n");
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[4*ROW_LEN_INT64];
printf("wholematrix[2]\n");
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[6*ROW_LEN_INT64];
printf("wholematrix[3]\n");
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV3 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
do
{
reducedDuplexRowSetup( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
reducedDuplexRowSetup_2way( state, &wholeMatrix[2*prev*ROW_LEN_INT64],
&wholeMatrix[2*rowa0*ROW_LEN_INT64],
&wholeMatrix[2*row*ROW_LEN_INT64], nCols );
rowa = (rowa + step) & (window - 1);
rowa0 = (rowa0 + step) & (window - 1);
prev = row;
row++;
if (rowa == 0)
if (rowa0 == 0)
{
step = window + gap; //changes the step: approximately doubles its value
window *= 2; //doubles the size of the re-visitation window
@@ -337,6 +599,80 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
} while (row < nRows);
p = wholeMatrix;
printf("wholematrix[0]\n");
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[2*ROW_LEN_INT64];
printf("wholematrix[1]\n");
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[4*ROW_LEN_INT64];
printf("wholematrix[2]\n");
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
p = &wholeMatrix[6*ROW_LEN_INT64];
printf("wholematrix[3]\n");
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
//printf("SV5 prev= %d\n",prev);
/*
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV4 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[4],state[5],state[6],state[7]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[8],state[9],state[10],state[11]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[12],state[13],state[14],state[15]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[16],state[17],state[18],state[19]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[20],state[21],state[22],state[23]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[24],state[25],state[26],state[27]);
printf("SV4 S %016lx %016lx %016lx %016lx\n",state[28],state[29],state[30],state[31]);
*/
//printf("Lyra2v3 4\n");
uint64_t *ptr0 = wholeMatrix; // base address for each lane
uint64_t *ptr1 = wholeMatrix + 4;
// convert a simple offset to an index into interleaved data.
// good for state and 4 row matrix.
// index = ( int( off / 4 ) * 2 ) + ( off mod 4 )
#define offset_to_index( o ) \
( ( ( (uint64_t)( (o) & 0xf) / 4 ) * 8 ) + ( (o) % 4 ) )
row = 0;
for (tau = 1; tau <= timeCost; tau++)
{
@@ -344,24 +680,79 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
do
{
// This part is not parallel, rowa will be different for each lane.
// state (u64[16]) is interleaved 2x256, need to extract seperately.
// state (u64[16]) is interleaved 2x256, need to extract seperately
// and figure out where the data is when interleaved.
// &state[0] (or matrix) is the start of lane 0, while &state[4]
// is the start of lane 1. From there there are 4 consecutive elements
// followed by 4 elements from the other lane that must be skipped.
// index = 2 * instance / 4 * 4 + instance % 4
uint64_t index0 = ( ( (instance0 & 0xf) >> 3 ) << 2 )
+ ( instance0 & 0x3 )
uint64_t index1 = ( ( (instance1 & 0xf) >> 3 ) << 2 )
+ ( instance1 & 0x3 )
povly ptr;
ptr.u64 = wholeMatrix;
instance0 = state[ index0 ] & 0xf;
instance1 = (state+4)[ index1 ] & 0xf;
/*
printf("SV4a %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
printf("SV4a %016lx %016lx %016lx %016lx\n",state[4],state[5],state[6],state[7]);
printf("SV4a %016lx %016lx %016lx %016lx\n",state[8],state[9],state[10],state[11]);
printf("SV4a %016lx %016lx %016lx %016lx\n",state[12],state[13],state[14],state[15]);
printf("SV4a %016lx %016lx %016lx %016lx\n",state[16],state[17],state[18],state[19]);
printf("SV4a %016lx %016lx %016lx %016lx\n",state[20],state[21],state[22],state[23]);
printf("SV4a %016lx %016lx %016lx %016lx\n",state[24],state[25],state[26],state[27]);
printf("SV4a %016lx %016lx %016lx %016lx\n",state[28],state[29],state[30],state[31]);
*
//printf("SV4a o to i %016lx = %016lx\n", instance0, offset_to_index( instance0 ) );
*/
instance0 = state[ offset_to_index( instance0 ) ];
instance1 = (&state[4])[ offset_to_index( instance1 ) ];
rowa0 = state[ instance0 ];
rowa1 = (state+4)[ instance1 ];
printf("SV4b o to i %016lx = %016lx, state0 %016lx\n", instance0, offset_to_index( instance0 ), state[offset_to_index( instance0 )] );
printf("SV4b o to i %016lx = %016lx, state1 %016lx\n", instance1, offset_to_index( instance1 ), (state+4)[offset_to_index( instance1 )] );
//printf("SV4b lane 1 instance1 = %d, rowa1= %d\n",instance1,rowa1);
reducedDuplexRow_2way( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa0*ROW_LEN_INT64],
&wholeMatrix[rowa1*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
rowa0 = state[ offset_to_index( instance0 ) ]
& (unsigned int)(nRows-1);
rowa1 = (state+4)[ offset_to_index( instance1 ) ]
& (unsigned int)(nRows-1);
// matrix[prev] ie row 0, is messed up after rdr for row 1. ok after rdr 0
//printf("SV5 lane 1 instance1= %016lx, rowa1= %d\n",instance1,rowa1);
printf("SV5 row= %d, step= %d\n",row,step);
printf("SV5 instance0 %016lx, rowa0 %d, p0 %016lx\n",instance0,rowa0,ptr0[ 2* rowa0 * ROW_LEN_INT64 ]);
printf("SV5 instance1 %016lx, rowa1 %d, p1 %016lx\n",instance1,rowa1,ptr1[ 2* rowa1 * ROW_LEN_INT64 ]);
uint64_t *p = &wholeMatrix[2*rowa1*ROW_LEN_INT64];
printf("SV5 prev= %d\n",prev);
/*
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[0],p[1],p[2],p[3]);
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[4],p[5],p[6],p[7]);
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[8],p[9],p[10],p[11]);
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[12],p[13],p[14],p[15]);
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[16],p[17],p[18],p[19]);
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[20],p[21],p[22],p[23]);
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[24],p[25],p[26],p[27]);
printf("SV5 M %016lx %016lx %016lx %016lx\n",p[28],p[29],p[30],p[31]);
*/
reducedDuplexRow_2way( state, ptr, prev, rowa0, rowa1, row, nCols );
/*
reducedDuplexRow_2way( state, &wholeMatrix[ 2* prev * ROW_LEN_INT64 ],
&ptr0[ 2* rowa0 * ROW_LEN_INT64 ],
&ptr1[ 2* rowa1 * ROW_LEN_INT64 ],
&wholeMatrix[ 2* row*ROW_LEN_INT64], nCols );
*/
/*
printf("SV6 %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
printf("SV6 %016lx %016lx %016lx %016lx\n",state[4],state[5],state[6],state[7]);
printf("SV6 %016lx %016lx %016lx %016lx\n",state[8],state[9],state[10],state[11]);
printf("SV6 %016lx %016lx %016lx %016lx\n",state[12],state[13],state[14],state[15]);
printf("SV6 %016lx %016lx %016lx %016lx\n",state[16],state[17],state[18],state[19]);
printf("SV6 %016lx %016lx %016lx %016lx\n",state[20],state[21],state[22],state[23]);
printf("SV6 %016lx %016lx %016lx %016lx\n",state[24],state[25],state[26],state[271]);
printf("SV6 %016lx %016lx %016lx %016lx\n",state[28],state[29],state[30],state[31]);
*/
/*
instance = state[instance & 0xF];
rowa = state[instance & 0xF] & (unsigned int)(nRows-1);
@@ -378,13 +769,22 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
} while ( row != 0 );
}
absorbBlock( state, &wholeMatrix[rowa*ROW_LEN_INT64] );
squeeze( state, K, (unsigned int) kLen );
printf("SV7 %016lx %016lx %016lx %016lx\n",state[0],state[1],state[2],state[3]);
// rowa mismatches here so need to do a split read
absorbBlock_2way( state, &wholeMatrix[2*rowa0*ROW_LEN_INT64] );
squeeze_2way( state, K, (unsigned int) kLen );
return 0;
}
#undef offset_to_index
#endif // AVX512
#if 0
//////////////////////////////////////////////////
int LYRA2Z( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
@@ -713,3 +1113,4 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
return 0;
}
#endif

View File

@@ -60,4 +60,15 @@ int LYRA2Z( uint64_t*, void *K, uint64_t kLen, const void *pwd,
int LYRA2(void *K, int64_t kLen, const void *pwd, int32_t pwdlen, const void *salt, int32_t saltlen, int64_t timeCost, const int16_t nRows, const int16_t nCols);
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
int LYRA2REV3_2WAY( uint64_t*, void *K, uint64_t kLen, const void *pwd,
uint64_t pwdlen, const void *salt, uint64_t saltlen,
uint64_t timeCost, uint64_t nRows, uint64_t nCols );
//int LYRA2REV3_2WAY( uint64_t*, void *K, uint64_t kLen, const void *pwd,
// uint64_t pwdlen, uint64_t timeCost, uint64_t nRows, uint64_t nCols );
#endif
#endif /* LYRA2_H_ */

View File

@@ -4,8 +4,212 @@
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/cubehash/cube-hash-2way.h"
#if defined (LYRA2REV3_8WAY)
#if defined (LYRA2REV3_16WAY)
typedef struct {
blake256_16way_context blake;
cube_4way_context cube;
bmw256_16way_context bmw;
} lyra2v3_16way_ctx_holder;
static __thread lyra2v3_16way_ctx_holder l2v3_16way_ctx;
bool init_lyra2rev3_16way_ctx()
{
blake256_16way_init( &l2v3_16way_ctx.blake );
cube_4way_init( &l2v3_16way_ctx.cube, 256, 16, 32 );
bmw256_16way_init( &l2v3_16way_ctx.bmw );
return true;
}
void lyra2rev3_16way_hash( void *state, const void *input )
{
uint32_t vhash[16*8] __attribute__ ((aligned (128)));
uint32_t hash0[8] __attribute__ ((aligned (64)));
uint32_t hash1[8] __attribute__ ((aligned (32)));
uint32_t hash2[8] __attribute__ ((aligned (32)));
uint32_t hash3[8] __attribute__ ((aligned (32)));
uint32_t hash4[8] __attribute__ ((aligned (32)));
uint32_t hash5[8] __attribute__ ((aligned (32)));
uint32_t hash6[8] __attribute__ ((aligned (32)));
uint32_t hash7[8] __attribute__ ((aligned (32)));
uint32_t hash8[8] __attribute__ ((aligned (64)));
uint32_t hash9[8] __attribute__ ((aligned (32)));
uint32_t hash10[8] __attribute__ ((aligned (32)));
uint32_t hash11[8] __attribute__ ((aligned (32)));
uint32_t hash12[8] __attribute__ ((aligned (32)));
uint32_t hash13[8] __attribute__ ((aligned (32)));
uint32_t hash14[8] __attribute__ ((aligned (32)));
uint32_t hash15[8] __attribute__ ((aligned (32)));
lyra2v3_16way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &l2v3_16way_ctx, sizeof(l2v3_16way_ctx) );
blake256_16way_update( &ctx.blake, input + (64*16), 16 );
blake256_16way_close( &ctx.blake, vhash );
dintrlv_16x32( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
hash8, hash9, hash10, hash11 ,hash12, hash13, hash14, hash15,
vhash, 256 );
//printf("Lyra1 lane 0\n");
intrlv_2x256( vhash, hash0, hash1, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash0, 32, 1, 4, 4 );
uint32_t h[8];
LYRA2REV3( l2v3_wholeMatrix, h, 32, hash1, 32, hash1, 32, 1, 4, 4 );
printf("S: %08x %08x %08x %08x %08x %08x %08x %08x\n",hash0[0],hash0[1],hash0[2],hash0[3],hash0[4],hash0[5],hash0[6],hash0[7]);
printf("V: %08x %08x %08x %08x %08x %08x %08x %08x\n",h[0],h[1],h[2],h[3],h[4],h[5],h[6],h[7]);
printf("\n");
//printf("Lyra1 lane 2\n");
dintrlv_2x256( hash0, hash1, vhash, 256 );
/*
intrlv_2x256( vhash, hash2, hash3, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash2, 32, 1, 4, 4 );
dintrlv_2x256( hash2, hash3, vhash, 256 );
intrlv_2x256( vhash, hash4, hash5, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash4, 32, 1, 4, 4 );
dintrlv_2x256( hash4, hash5, vhash, 256 );
intrlv_2x256( vhash, hash6, hash7, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash6, 32, 1, 4, 4 );
dintrlv_2x256( hash6, hash7, vhash, 256 );
intrlv_2x256( vhash, hash8, hash9, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash8, 32, 1, 4, 4 );
dintrlv_2x256( hash8, hash9, vhash, 256 );
intrlv_2x256( vhash, hash10, hash11, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash10, 32, 1, 4, 4 );
dintrlv_2x256( hash10, hash11, vhash, 256 );
intrlv_2x256( vhash, hash12, hash13, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash12, 32, 1, 4, 4 );
dintrlv_2x256( hash12, hash13, vhash, 256 );
intrlv_2x256( vhash, hash14, hash15, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash14, 32, 1, 4, 4 );
dintrlv_2x256( hash14, hash15, vhash, 256 );
*/
//printf("cube\n");
intrlv_4x128( vhash, hash0, hash1, hash2, hash3, 256 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhash, 256 );
intrlv_4x128( vhash, hash4, hash5, hash6, hash7, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhash, 256 );
intrlv_4x128( vhash, hash8, hash9, hash10, hash11, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash8, hash9, hash10, hash11, vhash, 256 );
intrlv_4x128( vhash, hash12, hash13, hash14, hash15, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash12, hash13, hash14, hash15, vhash, 256 );
//printf("Lyra2...\n");
/*
intrlv_2x256( vhash, hash0, hash1, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash0, 32, 1, 4, 4 );
dintrlv_2x256( hash0, hash1, vhash, 256 );
intrlv_2x256( vhash, hash2, hash3, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash2, 32, 1, 4, 4 );
dintrlv_2x256( hash2, hash3, vhash, 256 );
intrlv_2x256( vhash, hash4, hash5, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash4, 32, 1, 4, 4 );
dintrlv_2x256( hash4, hash5, vhash, 256 );
intrlv_2x256( vhash, hash6, hash7, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash6, 32, 1, 4, 4 );
dintrlv_2x256( hash6, hash7, vhash, 256 );
intrlv_2x256( vhash, hash8, hash9, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash8, 32, 1, 4, 4 );
dintrlv_2x256( hash8, hash9, vhash, 256 );
intrlv_2x256( vhash, hash10, hash11, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash10, 32, 1, 4, 4 );
dintrlv_2x256( hash10, hash11, vhash, 256 );
intrlv_2x256( vhash, hash12, hash13, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash12, 32, 1, 4, 4 );
dintrlv_2x256( hash12, hash13, vhash, 256 );
intrlv_2x256( vhash, hash14, hash15, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, hash14, 32, 1, 4, 4 );
dintrlv_2x256( hash14, hash15, vhash, 256 );
*/
intrlv_16x32( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, hash8, hash9, hash10, hash11, hash12, hash13, hash14,
hash15, 256 );
//printf("bmw\n");
bmw256_16way_update( &ctx.bmw, vhash, 32 );
bmw256_16way_close( &ctx.bmw, state );
//printf("done\n");
}
int scanhash_lyra2rev3_16way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*16] __attribute__ ((aligned (128)));
uint32_t vdata[20*16] __attribute__ ((aligned (64)));
uint32_t *hash7 = &hash[7<<3];
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
__m512i *noncev = (__m512i*)vdata + 19; // aligned
const int thr_id = mythr->id;
if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff;
mm512_bswap32_intrlv80_16x32( vdata, pdata );
blake256_16way_init( &l2v3_16way_ctx.blake );
// blake256_16way_update( &l2v3_16way_ctx.blake, vdata, 64 );
do
{
*noncev = mm512_bswap_32( _mm512_set_epi32( n+15, n+14, n+13, n+12,
n+11, n+10, n+ 9, n+ 8,
n+ 7, n+ 6, n+ 5, n+ 4,
n+ 3, n+ 2, n+ 1, n ) );
lyra2rev3_16way_hash( hash, vdata );
pdata[19] = n;
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
{
extr_lane_16x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 16;
} while ( likely( (n < max_nonce-16) && !work_restart[thr_id].restart ) );
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (LYRA2REV3_8WAY)
typedef struct {
blake256_8way_context blake;

View File

@@ -19,7 +19,7 @@
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "algo-gate.h"
//#include "algo-gate.h"
#include <string.h>
#include <stdio.h>
#include <time.h>
@@ -31,21 +31,31 @@
inline void squeeze_2way( uint64_t *State, byte *Out, unsigned int len )
{
const int len_m256i = len / 32;
const int fullBlocks = len_m256i / BLOCK_LEN_M256I;
const int fullBlocks = len / 32;
__m512i* state = (__m512i*)State;
__m512i* out = (__m512i*)Out;
int i;
//printf("squeeze 1, len= %d, full %d\n", len,fullBlocks);
//Squeezes full blocks
for ( i = 0; i < fullBlocks; i++ )
{
//printf("squeeze 1, %d\n",i);
memcpy_512( out, state, BLOCK_LEN_M256I*2 );
LYRA_ROUND_2WAY_AVX2( state[0], state[1], state[2], state[3] );
out += BLOCK_LEN_M256I*2;
//printf("squeeze 2\n");
LYRA_ROUND_2WAY_AVX512( state[0], state[1], state[2], state[3] );
//printf("squeeze 2\n");
out += BLOCK_LEN_M256I;
}
//Squeezes remaining bytes
memcpy_512( out, state, ( (len_m256i % BLOCK_LEN_M256I) * 2 ) );
// memcpy_512( out, state, ( (len * 2 ) );
}
inline void absorbBlock_2way( uint64_t *State, const uint64_t *In )
@@ -90,7 +100,7 @@ inline void absorbBlockBlake2Safe_2way( uint64_t *State, const uint64_t *In,
state1 = _mm512_xor_si512( state1, in[1] );
LYRA_12_ROUNDS_2WAY_AVX512( state0, state1, state2, state3 );
In += block_len * 2;
In += block_len*2;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -109,7 +119,7 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
register __m512i state0, state1, state2, state3;
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I * 2 );
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
@@ -132,7 +142,7 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
out[2] = state2;
//Goes to next block (column) that will receive the squeezed data
out -= BLOCK_LEN_M256I * 2;
out -= BLOCK_LEN_M256I;
LYRA_ROUND_2WAY_AVX512( state0, state1, state2, state3 );
}
@@ -143,15 +153,14 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
_mm512_store_si512( (__m512i*)State + 3, state3 );
}
// This function has to deal with gathering 2 256 bit rowin vectors from
// non-contiguous memory. Extra work and performance penalty.
inline void reducedDuplexRow1_2way( uint64_t *State, uint64_t *rowIn,
uint64_t *rowOut, uint64_t nCols )
{
int i;
register __m512i state0, state1, state2, state3;
__m512i *in = (__m256i*)rowIn;
__m512i *in = (__m512i*)rowIn;
__m512i *out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
@@ -171,17 +180,15 @@ inline void reducedDuplexRow1_2way( uint64_t *State, uint64_t *rowIn,
out[2] = _mm512_xor_si512( state2, in[2] );
//Input: next column (i.e., next block in sequence)
in0 += BLOCK_LEN_M256I;
in1 += BLOCK_LEN_M256I;
in += BLOCK_LEN_M256I;
//Output: goes to previous column
out -= BLOCK_LEN_M256I * 2;
out -= BLOCK_LEN_M256I;
}
_mm512_store_si256( (__m512i*)State, state0 );
_mm512_store_si256( (__m512i*)State + 1, state1 );
_mm512_store_si256( (__m512i*)State + 2, state2 );
_mm512_store_si256( (__m512i*)State + 3, state3 );
}
_mm512_store_si512( (__m512i*)State, state0 );
_mm512_store_si512( (__m512i*)State + 1, state1 );
_mm512_store_si512( (__m512i*)State + 2, state2 );
_mm512_store_si512( (__m512i*)State + 3, state3 );
}
inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
@@ -192,7 +199,7 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
register __m512i state0, state1, state2, state3;
__m512i* in = (__m512i*)rowIn;
__m512i* inout = (__m512i*)rowInOut;
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I * 2 );
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
__m512i t0, t1, t2;
state0 = _mm512_load_si512( (__m512i*)State );
@@ -209,7 +216,7 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
state2 = _mm512_xor_si512( state2,
_mm512_add_epi64( in[2], inout[2] ) );
LYRA_ROUND_2WAY AVX512( state0, state1, state2, state3 );
LYRA_ROUND_2WAY_AVX512( state0, state1, state2, state3 );
out[0] = _mm512_xor_si512( state0, in[0] );
out[1] = _mm512_xor_si512( state1, in[1] );
@@ -221,17 +228,17 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
t2 = _mm512_permutex_epi64( state2, 0x93 );
inout[0] = _mm512_xor_si512( inout[0],
_mm512_mask_blend_epi32( t0, t2, 0x03 ) );
_mm512_mask_blend_epi32( 0x03, t0, t2 ) );
inout[1] = _mm512_xor_si512( inout[1],
_mm512_mask_blend_epi32( t1, t0, 0x03 ) );
_mm512_mask_blend_epi32( 0x03, t1, t0 ) );
inout[2] = _mm512_xor_si512( inout[2],
_mm512_mask_blend_epi32( t2, t1, 0x03 ) );
_mm512_mask_blend_epi32( 0x03, t2, t1 ) );
//Inputs: next column (i.e., next block in sequence)
in += BLOCK_LEN_M256I * 2;
inout += BLOCK_LEN_M256I * 2;
in += BLOCK_LEN_M256I;
inout += BLOCK_LEN_M256I;
//Output: goes to previous column
out -= BLOCK_LEN_M256I * 2;
out -= BLOCK_LEN_M256I;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -240,53 +247,99 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
_mm512_store_si512( (__m512i*)State + 3, state3 );
}
inline void reducedDuplexRow_2way( uint64_t *State, uint64_t *rowIn1,
uint64_t *rowIn0, uint64_t *rowInOut, uint64_t *rowOut,
uint64_t nCols )
// big ugly workaound for pointer aliasing, use a union of pointers.
// Access matrix using m512i for in and out, m256i for inout
inline void reducedDuplexRow_2way( uint64_t *State, povly matrix,
uint64_t rowIn,
uint64_t rowInOut0, uint64_t rowInOut1,
uint64_t rowOut, uint64_t nCols )
{
int i;
register __m512i state0, state1, state2, state3;
__m256i *in0 = (__m256i*)rowIn0;
__m256i *in0 = (__m256i*)rowIn0;
__m2512* in = (__m512i*)rowIn;
__m2512* inout = (__m512i*)rowInOut;
__m512i* out = (__m512i*)rowOut;
__m512i t0, t1, t2;
const uint64_t ROW_LEN_M256I = BLOCK_LEN_INT64 * nCols / 4;
__m512i state0, state1, state2, state3;
// register __m512i state0, state1, state2, state3;
__m512i *in = &matrix.v512[ rowIn * ROW_LEN_M256I ];
__m256i *inout0 = &matrix.v256[ 2 * rowInOut0 * ROW_LEN_M256I ];
__m256i *inout1 = &matrix.v256[ 2 * rowInOut1 * ROW_LEN_M256I ];
__m512i *out = &matrix.v512[ rowOut * ROW_LEN_M256I ];
__m512i io[3];
povly inout;
inout.v512 = &io[0];
__m512i t0, t1, t2;
_mm_prefetch( in0, _MM_HINT_T0 );
_mm_prefetch( in1, _MM_HINT_T0 );
_mm_prefetch( in0 + 2, _MM_HINT_T0 );
_mm_prefetch( in1 + 2, _MM_HINT_T0 );
_mm_prefetch( in0 + 4, _MM_HINT_T0 );
_mm_prefetch( in1 + 4, _MM_HINT_T0 );
_mm_prefetch( in0 + 6, _MM_HINT_T0 );
_mm_prefetch( in1 + 6, _MM_HINT_T0 );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
state2 = _mm512_load_si512( (__m512i*)State + 2 );
state3 = _mm512_load_si512( (__m512i*)State + 3 );
_mm_prefetch( in, _MM_HINT_T0 );
_mm_prefetch( inout0, _MM_HINT_T0 );
_mm_prefetch( inout1, _MM_HINT_T0 );
_mm_prefetch( in + 2, _MM_HINT_T0 );
_mm_prefetch( inout0 + 2, _MM_HINT_T0 );
_mm_prefetch( inout1 + 2, _MM_HINT_T0 );
_mm_prefetch( in + 4, _MM_HINT_T0 );
_mm_prefetch( inout0 + 4, _MM_HINT_T0 );
_mm_prefetch( inout1 + 4, _MM_HINT_T0 );
_mm_prefetch( in + 6, _MM_HINT_T0 );
_mm_prefetch( inout0 + 6, _MM_HINT_T0 );
_mm_prefetch( inout1 + 6, _MM_HINT_T0 );
//uint64_t *ii = (uint64_t*)in0;
//printf("RDRV0 IO %016lx %016lx %016lx %016lx\n",ii[0],ii[1],ii[2],ii[3]);
for ( i = 0; i < nCols; i++ )
{
/*
//printf("RDR: loop %d\n",i);
uint64_t *io1 = (uint64_t*)inout1;
printf("RDRV0 col= %d\n", i);
printf("RDRV0 IO1 %016lx %016lx %016lx %016lx\n",io1[0],io1[1],io1[2],io1[3]);
printf("RDRV0 IO1 %016lx %016lx %016lx %016lx\n",io1[4],io1[5],io1[6],io1[7]);
printf("RDRV0 IO1 %016lx %016lx %016lx %016lx\n",io1[8],io1[9],io1[10],io1[11]);
printf("RDRV0 IO1 %016lx %016lx %016lx %016lx\n",io1[12],io1[13],io1[14],io1[153]);
*/
//Absorbing "M[prev] [+] M[row*]"
inout.v256[0] = inout0[0];
inout.v256[1] = inout1[1];
inout.v256[2] = inout0[2];
inout.v256[3] = inout1[3];
inout.v256[4] = inout0[4];
inout.v256[5] = inout1[5];
/*
uint64_t *io = (uint64_t*)inout.u64;
uint64_t *ii = (uint64_t*)in;
printf("RDRV1 col= %d\n", i);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[0],io[1],io[2],io[3]);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[4],io[5],io[6],io[7]);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[8],io[9],io[10],io[11]);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[12],io[13],io[14],io[15]);
printf("RDRV1 IN %016lx %016lx %016lx %016lx\n",ii[0],ii[1],ii[2],ii[3]);
printf("RDRV1 IN %016lx %016lx %016lx %016lx\n",ii[4],ii[5],ii[6],ii[7]);
printf("RDRV1 IN %016lx %016lx %016lx %016lx\n",ii[8],ii[9],ii[10],ii[11]);
printf("RDRV1 IN %016lx %016lx %016lx %016lx\n",ii[12],ii[13],ii[14],ii[15]);
*/
// state0 = _mm512_xor_si512( state0, mm512_concat_256( in1[0], in0[0] );
// state1 = _mm512_xor_si512( state1, mm512_concat_256( in1[1], in0[1] );
// state2 = _mm512_xor_si512( state2, mm512_concat_256( in1[2], in0[2] );
t0 = mm512_concat_256( in1[0], in0[0] );
t1 = mm512_concat_256( in1[1], in0[1] );
t2 = mm512_concat_256( in1[2], in0[2] );
state0 = _mm512_xor_si512( state0,
_mm512_add_epi64( t0, inout[0] ) );
_mm512_add_epi64( in[0], inout.v512[0] ) );
state1 = _mm512_xor_si512( state1,
_mm512_add_epi64( t1, inout[1] ) );
_mm512_add_epi64( in[1], inout.v512[1] ) );
state2 = _mm512_xor_si512( state2,
_mm512_add_epi64( t2, inout[2] ) );
_mm512_add_epi64( in[2], inout.v512[2] ) );
//printf("RDR: round\n");
//Applies the reduced-round transformation f to the sponge's state
LYRA_ROUND_2WAY_AVX512( state0, state1, state2, state3 );
//printf("RDR 3\n");
//M[rowOut][col] = M[rowOut][col] XOR rand
out[0] = _mm512_xor_si512( out[0], state0 );
out[1] = _mm512_xor_si512( out[1], state1 );
@@ -296,18 +349,76 @@ inline void reducedDuplexRow_2way( uint64_t *State, uint64_t *rowIn1,
t0 = _mm512_permutex_epi64( state0, 0x93 );
t1 = _mm512_permutex_epi64( state1, 0x93 );
t2 = _mm512_permutex_epi64( state2, 0x93 );
/*
uint64_t *st = (uint64_t*)&state0;
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[0],st[1],st[2],st[3]);
printf("RDRv2 %016lx %016lx %016lx %016lx\n",st[4],st[5],st[6],st[7]);
st = (uint64_t*)&state1;
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[0],st[1],st[2],st[3]);
printf("RDRv2 %016lx %016lx %016lx %016lx\n",st[4],st[5],st[6],st[7]);
st = (uint64_t*)&state2;
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[0],st[1],st[2],st[3]);
printf("RDRv2 %016lx %016lx %016lx %016lx\n",st[4],st[5],st[6],st[7]);
inout[0] = _mm512_xor_si512( inout[0],
_mm512_mask_blend_epi32( t0, t2, 0x03 ) );
inout[1] = _mm512_xor_si512( inout[1],
_mm512_mask_blend_epi32( t1, t0, 0x03 ) );
inout[2] = _mm512_xor_si512( inout[2],
_mm512_mask_blend_epi32( t2, t1, 0x03 ) );
st = (uint64_t*)&t0;
printf("RDRV2 t0 %016lx %016lx %016lx %016lx\n",st[0],st[1],st[2],st[3]);
printf("RDRv2 t0 %016lx %016lx %016lx %016lx\n",st[4],st[5],st[6],st[7]);
st = (uint64_t*)&t1;
printf("RDRV2 t1 %016lx %016lx %016lx %016lx\n",st[0],st[1],st[2],st[3]);
printf("RDRv2 t1 %016lx %016lx %016lx %016lx\n",st[4],st[5],st[6],st[7]);
st = (uint64_t*)&t2;
printf("RDRV2 t2 %016lx %016lx %016lx %016lx\n",st[0],st[1],st[2],st[3]);
printf("RDRv2 t2 %016lx %016lx %016lx %016lx\n",st[4],st[5],st[6],st[7]);
*/
/*
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[8],st[9],st[10],st[11]);
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[12],st[13],st[14],st[15]);
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[16],st[17],st[18],st[19]);
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[20],st[21],st[22],st[23]);
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[24],st[25],st[26],st[271]);
printf("RDRV2 %016lx %016lx %016lx %016lx\n",st[28],st[29],st[30],st[31]);
*/
//printf("RDR 4\n");
/*
//uint64_t *io = (uint64_t*)&inout;
printf("RDRV1 col= %d\n", i);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[0],io[1],io[2],io[3]);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[4],io[5],io[6],io[7]);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[8],io[9],io[10],io[11]);
printf("RDRV1 IO %016lx %016lx %016lx %016lx\n",io[12],io[13],io[14],io[15]);
*/
// need to split inout for write
inout.v512[0] = _mm512_xor_si512( inout.v512[0],
_mm512_mask_blend_epi32( 0x03, t0, t2 ) );
inout.v512[1] = _mm512_xor_si512( inout.v512[1],
_mm512_mask_blend_epi32( 0x03, t1, t0 ) );
inout.v512[2] = _mm512_xor_si512( inout.v512[2],
_mm512_mask_blend_epi32( 0x03, t2, t1 ) );
/*
printf("RDRV3 IO %016lx %016lx %016lx %016lx\n",io[0],io[1],io[2],io[3]);
printf("RDRV3 IO %016lx %016lx %016lx %016lx\n",io[4],io[5],io[6],io[7]);
printf("RDRV3 IO %016lx %016lx %016lx %016lx\n",io[8],io[9],io[10],io[11]);
printf("RDRV3 IO %016lx %016lx %016lx %016lx\n",io[12],io[13],io[14],io[153]);
*/
inout0[0] = inout.v256[0];
inout1[1] = inout.v256[1];
inout0[2] = inout.v256[2];
inout1[3] = inout.v256[3];
inout0[4] = inout.v256[4];
inout1[5] = inout.v256[5];
//printf("RDR 5\n");
//Goes to next block
in += BLOCK_LEN_M256I * 2;
out += BLOCK_LEN_M256I * 2;
inout += BLOCK_LEN_M256I * 2;
in += BLOCK_LEN_M256I;
inout0 += BLOCK_LEN_M256I * 2;
inout1 += BLOCK_LEN_M256I * 2;
out += BLOCK_LEN_M256I;
}
_mm512_store_si512( (__m512i*)State, state0 );

View File

@@ -65,14 +65,14 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
b = mm512_ror_64( _mm512_xor_si512( b, c ), 63 );
#define LYRA_ROUND_2WAY_AVX512( s0, s1, s2, s3 ) \
G_4X64( s0, s1, s2, s3 ); \
s1 = mm512_ror_1x64( s1); \
s2 = mm512_swap128_256( s2 ); \
s3 = mm512_rol1x64_256( s3 ); \
G_4X64( s0, s1, s2, s3 ); \
s1 = mm512_rol1x64_256( s1 ); \
s2 = mm512_swap128_256( s2 ); \
s3 = mm512_ror1x64_256( s3 );
G2W_4X64( s0, s1, s2, s3 ); \
s1 = mm512_ror256_64( s1); \
s2 = mm512_swap256_128( s2 ); \
s3 = mm512_rol256_64( s3 ); \
G2W_4X64( s0, s1, s2, s3 ); \
s1 = mm512_rol256_64( s1 ); \
s2 = mm512_swap256_128( s2 ); \
s3 = mm512_ror256_64( s3 );
#define LYRA_12_ROUNDS_2WAY_AVX512( s0, s1, s2, s3 ) \
LYRA_ROUND_2WAY_AVX512( s0, s1, s2, s3 ) \
@@ -148,14 +148,14 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
#define LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
G_2X64( s0, s2, s4, s6 ); \
G_2X64( s1, s3, s5, s7 ); \
mm128_ror1x64_256( s2, s3 ); \
mm128_swap128_256( s4, s5 ); \
mm128_rol1x64_256( s6, s7 ); \
mm128_ror256_64( s2, s3 ); \
mm128_swap256_128( s4, s5 ); \
mm128_rol256_64( s6, s7 ); \
G_2X64( s0, s2, s4, s6 ); \
G_2X64( s1, s3, s5, s7 ); \
mm128_rol1x64_256( s2, s3 ); \
mm128_swap128_256( s4, s5 ); \
mm128_ror1x64_256( s6, s7 );
mm128_rol256_64( s2, s3 ); \
mm128_swap256_128( s4, s5 ); \
mm128_ror256_64( s6, s7 );
#define LYRA_12_ROUNDS_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
@@ -220,7 +220,23 @@ void reducedDuplexRow1_2way( uint64_t *state, uint64_t *rowIn,
uint64_t *rowOut, uint64_t nCols);
void reducedDuplexRowSetup_2way( uint64_t *state, uint64_t *rowIn,
uint64_t *rowInOut, uint64_t *rowOut, uint64_t nCols );
void reducedDuplexRow_2way(uint64_t *state, uint64_t *rowIn1, uint64_t *rowIn0, uint64_t *rowInOut, uint64_t *rowOut, uint64_t nCols);
/*
void reducedDuplexRow_2way( uint64_t *state, uint64_t *rowIn,
uint64_t *rowInOut0, uint64_t *rowInOut1,
uint64_t *rowOut, uint64_t nCols);
*/
union _povly
{
__m512i *v512;
__m256i *v256;
uint64_t *u64;
};
typedef union _povly povly;
void reducedDuplexRow_2way( uint64_t *state, povly matrix, uint64_t rowIn,
uint64_t rowInOut0, uint64_t rowInOut1,
uint64_t rowOut, uint64_t nCols);
#endif