This commit is contained in:
Jay D Dee
2019-12-21 13:19:29 -05:00
parent a17ff6f189
commit c65b0ff7a6
72 changed files with 9090 additions and 1336 deletions

View File

@@ -44,8 +44,13 @@ bool lyra2rev3_thread_init()
{
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * 4; // nCols
const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
int size = ROW_LEN_BYTES * 4; // nRows;
int size = (int64_t)ROW_LEN_BYTES * 4; // nRows;
#if defined(LYRA2REV3_16WAY)
// l2v3_wholeMatrix = _mm_malloc( 2*size, 128 );
l2v3_wholeMatrix = _mm_malloc( 2*size, 64 );
init_lyra2rev3_16way_ctx();;
#else
l2v3_wholeMatrix = _mm_malloc( size, 64 );
#if defined (LYRA2REV3_8WAY)
init_lyra2rev3_8way_ctx();;
@@ -53,13 +58,17 @@ bool lyra2rev3_thread_init()
init_lyra2rev3_4way_ctx();;
#else
init_lyra2rev3_ctx();
#endif
#endif
return l2v3_wholeMatrix;
}
bool register_lyra2rev3_algo( algo_gate_t* gate )
{
#if defined (LYRA2REV3_8WAY)
#if defined(LYRA2REV3_16WAY)
gate->scanhash = (void*)&scanhash_lyra2rev3_16way;
gate->hash = (void*)&lyra2rev3_16way_hash;
#elif defined (LYRA2REV3_8WAY)
gate->scanhash = (void*)&scanhash_lyra2rev3_8way;
gate->hash = (void*)&lyra2rev3_8way_hash;
#elif defined (LYRA2REV3_4WAY)
@@ -69,7 +78,7 @@ bool register_lyra2rev3_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_lyra2rev3;
gate->hash = (void*)&lyra2rev3_hash;
#endif
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX2_OPT;
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX2_OPT | AVX512_OPT;
gate->miner_thread_init = (void*)&lyra2rev3_thread_init;
opt_target_factor = 256.0;
return true;
@@ -85,10 +94,14 @@ bool lyra2rev2_thread_init()
const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
int size = (int64_t)ROW_LEN_BYTES * 4; // nRows;
#if defined (LYRA2REV2_8WAY)
l2v2_wholeMatrix = _mm_malloc( 2 * size, 64 ); // 2 way
init_lyra2rev2_8way_ctx();;
#elif defined (LYRA2REV2_4WAY)
l2v2_wholeMatrix = _mm_malloc( size, 64 );
#if defined (LYRA2REV2_4WAY)
init_lyra2rev2_4way_ctx();;
#else
l2v2_wholeMatrix = _mm_malloc( size, 64 );
init_lyra2rev2_ctx();
#endif
return l2v2_wholeMatrix;
@@ -96,14 +109,17 @@ bool lyra2rev2_thread_init()
bool register_lyra2rev2_algo( algo_gate_t* gate )
{
#if defined (LYRA2REV2_4WAY)
#if defined (LYRA2REV2_8WAY)
gate->scanhash = (void*)&scanhash_lyra2rev2_8way;
gate->hash = (void*)&lyra2rev2_8way_hash;
#elif defined (LYRA2REV2_4WAY)
gate->scanhash = (void*)&scanhash_lyra2rev2_4way;
gate->hash = (void*)&lyra2rev2_4way_hash;
#else
gate->scanhash = (void*)&scanhash_lyra2rev2;
gate->hash = (void*)&lyra2rev2_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | SSE42_OPT | AVX2_OPT;
gate->optimizations = SSE2_OPT | AES_OPT | SSE42_OPT | AVX2_OPT | AVX512_OPT;
gate->miner_thread_init = (void*)&lyra2rev2_thread_init;
opt_target_factor = 256.0;
return true;

View File

@@ -5,18 +5,27 @@
#include <stdint.h>
#include "lyra2.h"
#if defined(__AVX2__)
#define LYRA2REV3_8WAY
#endif
#if defined(__SSE2__)
#define LYRA2REV3_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define LYRA2REV3_16WAY 1
#elif defined(__AVX2__)
#define LYRA2REV3_8WAY 1
#elif defined(__SSE2__)
#define LYRA2REV3_4WAY 1
#endif
extern __thread uint64_t* l2v3_wholeMatrix;
bool register_lyra2rev3_algo( algo_gate_t* gate );
#if defined(LYRA2REV3_8WAY)
#if defined(LYRA2REV3_16WAY)
void lyra2rev3_16way_hash( void *state, const void *input );
int scanhash_lyra2rev3_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_lyra2rev3_16way_ctx();
#elif defined(LYRA2REV3_8WAY)
void lyra2rev3_8way_hash( void *state, const void *input );
int scanhash_lyra2rev3_8way( struct work *work, uint32_t max_nonce,
@@ -41,15 +50,24 @@ bool init_lyra2rev3_ctx();
//////////////////////////////////
#if defined(__AVX2__)
#define LYRA2REV2_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define LYRA2REV2_8WAY 1
#elif defined(__AVX2__)
#define LYRA2REV2_4WAY 1
#endif
extern __thread uint64_t* l2v2_wholeMatrix;
bool register_lyra2rev2_algo( algo_gate_t* gate );
#if defined(LYRA2REV2_4WAY)
#if defined(LYRA2REV2_8WAY)
void lyra2rev2_8way_hash( void *state, const void *input );
int scanhash_lyra2rev2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
bool init_lyra2rev2_8way_ctx();
#elif defined(LYRA2REV2_4WAY)
void lyra2rev2_4way_hash( void *state, const void *input );
int scanhash_lyra2rev2_4way( struct work *work, uint32_t max_nonce,

View File

@@ -26,6 +26,19 @@
#include "lyra2.h"
#include "sponge.h"
// LYRA2RE 8 cols 8 rows used by lyea2re, allium, phi2, x22i, x25x.
//
// LYRA2REV2 4 cols 4 rows used by lyra2rev2.
//
// LYRA2REV3 4 cols 4 rows with an extra twist in calculating
// rowa in the wandering phase. Used by lyra2rev3.
//
// LYRA2Z various cols & rows and supports 80 input. Used by lyra2z,
// lyra2z330, lyra2h,
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
/**
* Executes Lyra2 based on the G function from Blake2b. This version supports salts and passwords
* whose combined length is smaller than the size of the memory matrix, (i.e., (nRows x nCols x b) bits,
@@ -46,176 +59,137 @@
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
int LYRA2REV2( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
const uint64_t pwdlen, const void *salt, const uint64_t saltlen,
const uint64_t timeCost, const uint64_t nRows,
const uint64_t nCols )
// For lyra2rev3.
// convert a simple offset to an index into interleaved data.
// good for state and 4 row matrix.
// index = ( int( off / 4 ) * 2 ) + ( off mod 4 )
#define offset_to_index( o ) \
( ( ( (uint64_t)( (o) & 0xf) / 4 ) * 8 ) + ( (o) % 4 ) )
int LYRA2REV2_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
const void *pwd, const uint64_t pwdlen, const uint64_t timeCost,
const uint64_t nRows, const uint64_t nCols )
{
//====================== Basic variables ============================//
uint64_t _ALIGN(256) state[16];
int64_t row = 2; //index of row to be processed
int64_t prev = 1; //index of prev (last row ever computed/modified)
int64_t rowa = 0; //index of row* (a previous row, deterministically picked during Setup and randomly picked while Wandering)
int64_t tau; //Time Loop iterator
int64_t step = 1; //Visitation step (used during Setup and Wandering phases)
int64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
// int64_t i; //auxiliary iteration counter
int64_t v64; // 64bit var for memcpy
uint64_t _ALIGN(256) state[32];
int64_t row = 2;
int64_t prev = 1;
int64_t rowa0 = 0;
int64_t rowa1 = 0;
int64_t tau;
int64_t step = 1;
int64_t window = 2;
int64_t gap = 1;
//====================================================================/
//=== Initializing the Memory Matrix and pointers to it =============//
//Tries to allocate enough space for the whole memory matrix
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * nCols;
// const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
// for Lyra2REv2, nCols = 4, v1 was using 8
const int64_t BLOCK_LEN = (nCols == 4) ? BLOCK_LEN_BLAKE2_SAFE_INT64
: BLOCK_LEN_BLAKE2_SAFE_BYTES;
uint64_t *ptrWord = wholeMatrix;
// memset( wholeMatrix, 0, ROW_LEN_BYTES * nRows );
//=== Getting the password + salt + basil padded with 10*1 ==========//
//OBS.:The memory matrix will temporarily hold the password: not for saving memory,
//but this ensures that the password copied locally will be overwritten as soon as possible
//First, we clean enough blocks for the password, salt, basil and padding
int64_t nBlocksInput = ( ( saltlen + pwdlen + 6 * sizeof(uint64_t) )
int64_t nBlocksInput = ( ( pwdlen + pwdlen + 6 * sizeof(uint64_t) )
/ BLOCK_LEN_BLAKE2_SAFE_BYTES ) + 1;
byte *ptrByte = (byte*) wholeMatrix;
uint64_t *ptr = wholeMatrix;
uint64_t *pw = (uint64_t*)pwd;
//Prepends the password
memcpy(ptrByte, pwd, pwdlen);
ptrByte += pwdlen;
memcpy( ptr, pw, 2*pwdlen ); // password
ptr += pwdlen>>2;
memcpy( ptr, pw, 2*pwdlen ); // password lane 1
ptr += pwdlen>>2;
//Concatenates the salt
memcpy(ptrByte, salt, saltlen);
ptrByte += saltlen;
// now build the rest interleaving on the fly.
memset( ptrByte, 0, nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES
- (saltlen + pwdlen) );
ptr[0] = ptr[ 4] = kLen;
ptr[1] = ptr[ 5] = pwdlen;
ptr[2] = ptr[ 6] = pwdlen; // saltlen
ptr[3] = ptr[ 7] = timeCost;
ptr[8] = ptr[12] = nRows;
ptr[9] = ptr[13] = nCols;
ptr[10] = ptr[14] = 0x80;
ptr[11] = ptr[15] = 0x0100000000000000;
//Concatenates the basil: every integer passed as parameter, in the order they are provided by the interface
memcpy(ptrByte, &kLen, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = pwdlen;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = saltlen;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = timeCost;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = nRows;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = nCols;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
//Now comes the padding
*ptrByte = 0x80; //first byte of padding: right after the password
ptrByte = (byte*) wholeMatrix; //resets the pointer to the start of the memory matrix
ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block
*ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block
// from here on it's all simd acces to state and matrix
// define vector pointers and adjust sizes and pointer offsets
//================= Initializing the Sponge State ====================//
//Sponge state: 16 uint64_t, BLOCK_LEN_INT64 words of them for the bitrate (b) and the remainder for the capacity (c)
// initState( state );
//========================= Setup Phase =============================//
//Absorbing salt, password and basil: this is the only place in which the block length is hard-coded to 512 bits
ptrWord = wholeMatrix;
absorbBlockBlake2Safe( state, ptrWord, nBlocksInput, BLOCK_LEN );
/*
for (i = 0; i < nBlocksInput; i++)
{
absorbBlockBlake2Safe( state, ptrWord ); //absorbs each block of pad(pwd || salt || basil)
ptrWord += BLOCK_LEN; //goes to next block of pad(pwd || salt || basil)
}
*/
absorbBlockBlake2Safe_2way( state, ptrWord, nBlocksInput, BLOCK_LEN );
//Initializes M[0] and M[1]
reducedSqueezeRow0( state, &wholeMatrix[0], nCols ); //The locally copied password is most likely overwritten here
reducedSqueezeRow0_2way( state, &wholeMatrix[0], nCols );
reducedDuplexRow1( state, &wholeMatrix[0], &wholeMatrix[ROW_LEN_INT64],
nCols);
reducedDuplexRow1_2way( state, &wholeMatrix[0],
&wholeMatrix[ 2 * ROW_LEN_INT64 ], nCols );
do
{
//M[row] = rand; //M[row*] = M[row*] XOR rotW(rand)
//M[row] = rand; //M[row*] = M[row*] XOR rotW(rand)
reducedDuplexRowSetup( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
reducedDuplexRowSetup_2way( state, &wholeMatrix[ 2* prev * ROW_LEN_INT64],
&wholeMatrix[ 2* rowa0 * ROW_LEN_INT64],
&wholeMatrix[ 2* row*ROW_LEN_INT64],
nCols );
//updates the value of row* (deterministically picked during Setup))
rowa = (rowa + step) & (window - 1);
//update prev: it now points to the last row ever computed
rowa0 = (rowa0 + step) & (window - 1);
prev = row;
//updates row: goes to the next row to be computed
row++;
prev = row;
row++;
//Checks if all rows in the window where visited.
if (rowa == 0)
{
step = window + gap; //changes the step: approximately doubles its value
window *= 2; //doubles the size of the re-visitation window
gap = -gap; //inverts the modifier to the step
}
} while (row < nRows);
if ( rowa0 == 0 )
{
step = window + gap;
window *= 2;
gap = -gap;
}
} while ( row < nRows );
//===================== Wandering Phase =============================//
row = 0; //Resets the visitation to the first row of the memory matrix
for (tau = 1; tau <= timeCost; tau++)
row = 0;
for ( tau = 1; tau <= timeCost; tau++ )
{
//Step is approximately half the number of all rows of the memory matrix for an odd tau; otherwise, it is -1
step = (tau % 2 == 0) ? -1 : nRows / 2 - 1;
do
{
//Selects a pseudorandom index row*
//-----------------------------------------------
rowa = state[0] & (unsigned int)(nRows-1); //(USE THIS IF nRows IS A POWER OF 2)
step = ( (tau & 1) == 0 ) ? -1 : ( nRows >> 1 ) - 1;
do
{
rowa0 = state[ 0 ] & (unsigned int)(nRows-1);
rowa1 = state[ 4 ] & (unsigned int)(nRows-1);
//rowa = state[0] % nRows; //(USE THIS FOR THE "GENERIC" CASE)
//-------------------------------------------
reducedDuplexRow_2way( state, &wholeMatrix[ 2* prev * ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa0 * ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa1 * ROW_LEN_INT64 ],
&wholeMatrix[ 2* row *ROW_LEN_INT64 ],
nCols );
prev = row;
//Performs a reduced-round duplexing operation over M[row*] XOR M[prev], updating both M[row*] and M[row]
reducedDuplexRow( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
//update prev: it now points to the last row ever computed
prev = row;
row = (row + step) & (unsigned int)(nRows-1); //(USE THIS IF nRows IS A POWER OF 2)
//updates row: goes to the next row to be computed
//----------------------------------------------------
row = (row + step) & (unsigned int)(nRows-1); //(USE THIS IF nRows IS A POWER OF 2)
//row = (row + step) % nRows; //(USE THIS FOR THE "GENERIC" CASE)
//----------------------------------------------------
} while (row != 0);
} while (row != 0);
}
//===================== Wrap-up Phase ===============================//
//Absorbs the last block of the memory matrix
absorbBlock(state, &wholeMatrix[rowa*ROW_LEN_INT64]);
absorbBlock_2way( state, &wholeMatrix[ 2 * rowa0 *ROW_LEN_INT64 ],
&wholeMatrix[ 2 * rowa1 *ROW_LEN_INT64 ] );
//Squeezes the key
squeeze(state, K, (unsigned int) kLen);
squeeze_2way( state, K, (unsigned int) kLen );
return 0;
}
// This version is currently only used by REv3 and has some hard coding
// specific to v3 such as input data size of 32 bytes.
//
// Similarly with REv2. Thedifference with REv3 isn't clear and maybe
// they can be merged.
//
// RE is used by RE, allium. The main difference between RE and REv2
// in the matrix size.
//
// Z also needs to support 80 byte input as well as 32 byte, and odd
// matrix sizes like 330 rows. It is used by lyra2z330, lyra2z, lyra2h.
/////////////////////////////////////////////////
// 2 way 256
@@ -223,22 +197,29 @@ int LYRA2REV2( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
// Data is interleaved 2x256.
int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
const void *pwd, const uint64_t pwdlen, const void *salt,
const uint64_t saltlen, const uint64_t timeCost, const uint64_t nRows,
const uint64_t nCols )
const void *pwd, uint64_t pwdlen, uint64_t timeCost,
uint64_t nRows, uint64_t nCols )
// hard coded for 32 byte input as well as matrix size.
// Other required versions include 80 byte input and different block
// sizez
//int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
// const void *pwd, const uint64_t pwdlen, const void *salt,
// const uint64_t saltlen, const uint64_t timeCost, const uint64_t nRows,
// const uint64_t nCols )
{
//====================== Basic variables ============================//
uint64_t _ALIGN(256) state[16];
int64_t row = 2; //index of row to be processed
int64_t prev = 1; //index of prev (last row ever computed/modified)
int64_t rowa = 0; //index of row* (a previous row, deterministically picked during Setup and randomly picked while Wandering)
int64_t tau; //Time Loop iterator
int64_t step = 1; //Visitation step (used during Setup and Wandering phases)
int64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
// int64_t i; //auxiliary iteration counter
int64_t v64; // 64bit var for memcpy
uint64_t instance0 = 0; // Seperate instance for each lane
uint64_t _ALIGN(256) state[32];
int64_t row = 2;
int64_t prev = 1;
int64_t rowa0 = 0;
int64_t rowa1 = 0;
int64_t tau;
int64_t step = 1;
int64_t window = 2;
int64_t gap = 1;
uint64_t instance0 = 0;
uint64_t instance1 = 0;
//====================================================================/
@@ -248,7 +229,9 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
uint64_t *ptrWord = wholeMatrix;
// 2 way 256 rewrite. Salt always == password, and data is interleaved,
// need to build in parallel:
// need to build in parallel as pw isalready interleaved.
// { password, (64 or 80 bytes)
// salt, (64 or 80 bytes) = same as password
// Klen, (u64) = 32 bytes
@@ -262,73 +245,54 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
// 1 (byte)
// }
// memset( wholeMatrix, 0, ROW_LEN_BYTES * nRows );
// input is usually 32 maybe 64, both are aligned to 256 bit vector.
// 80 byte inpput is not aligned complicating matters for lyra2z.
int64_t nBlocksInput = ( ( saltlen + pwdlen + 6 * sizeof(uint64_t) )
int64_t nBlocksInput = ( ( pwdlen + pwdlen + 6 * sizeof(uint64_t) )
/ BLOCK_LEN_BLAKE2_SAFE_BYTES ) + 1;
uint64_t *ptr = wholeMatrix;
uint64_t *pw = (uint64_t*)pwd;
byte *ptrByte = (byte*) wholeMatrix;
memcpy( ptr, pw, 2*pwdlen ); // password
ptr += pwdlen>>2;
memcpy( ptr, pw, 2*pwdlen ); // password lane 1
ptr += pwdlen>>2;
// now build the rest interleaving on the fly.
//Prepends the password
memcpy(ptrByte, pwd, pwdlen);
ptrByte += pwdlen;
//Concatenates the salt
memcpy(ptrByte, salt, saltlen);
ptrByte += saltlen;
memset( ptrByte, 0, nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES
- (saltlen + pwdlen) );
//Concatenates the basil: every integer passed as parameter, in the order they are provided by the interface
memcpy(ptrByte, &kLen, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = pwdlen;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = saltlen;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = timeCost;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = nRows;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
v64 = nCols;
memcpy(ptrByte, &v64, sizeof(int64_t));
ptrByte += sizeof(uint64_t);
//Now comes the padding
*ptrByte = 0x80; //first byte of padding: right after the password
ptrByte = (byte*) wholeMatrix; //resets the pointer to the start of the memory matrix
ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block
*ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block
// from here on it's all simd acces to state and matrix
// define vector pointers and adjust sizes and pointer offsets
ptr[0] = ptr[ 4] = kLen;
ptr[1] = ptr[ 5] = pwdlen;
ptr[2] = ptr[ 6] = pwdlen; // saltlen
ptr[3] = ptr[ 7] = timeCost;
ptr[8] = ptr[12] = nRows;
ptr[9] = ptr[13] = nCols;
ptr[10] = ptr[14] = 0x80;
ptr[11] = ptr[15] = 0x0100000000000000;
ptrWord = wholeMatrix;
absorbBlockBlake2Safe( state, ptrWord, nBlocksInput, BLOCK_LEN );
reducedSqueezeRow0( state, &wholeMatrix[0], nCols );
absorbBlockBlake2Safe_2way( state, ptrWord, nBlocksInput, BLOCK_LEN );
reducedDuplexRow1( state, &wholeMatrix[0], &wholeMatrix[ROW_LEN_INT64],
nCols);
reducedSqueezeRow0_2way( state, &wholeMatrix[0], nCols );
reducedDuplexRow1_2way( state, &wholeMatrix[0],
&wholeMatrix[2*ROW_LEN_INT64], nCols );
do
{
reducedDuplexRowSetup( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
reducedDuplexRowSetup_2way( state, &wholeMatrix[ 2* prev*ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa0*ROW_LEN_INT64 ],
&wholeMatrix[ 2* row*ROW_LEN_INT64 ],
nCols );
rowa = (rowa + step) & (window - 1);
rowa0 = (rowa0 + step) & (window - 1);
prev = row;
row++;
if (rowa == 0)
if (rowa0 == 0)
{
step = window + gap; //changes the step: approximately doubles its value
window *= 2; //doubles the size of the re-visitation window
@@ -340,37 +304,22 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
row = 0;
for (tau = 1; tau <= timeCost; tau++)
{
step = ((tau & 1) == 0) ? -1 : (nRows >> 1) - 1;
step = ( (tau & 1) == 0 ) ? -1 : ( nRows >> 1 ) - 1;
do
{
// This part is not parallel, rowa will be different for each lane.
// state (u64[16]) is interleaved 2x256, need to extract seperately.
instance0 = state[ offset_to_index( instance0 ) ];
instance1 = (&state[4])[ offset_to_index( instance1 ) ];
// index = 2 * instance / 4 * 4 + instance % 4
uint64_t index0 = ( ( (instance0 & 0xf) >> 3 ) << 2 )
+ ( instance0 & 0x3 )
uint64_t index1 = ( ( (instance1 & 0xf) >> 3 ) << 2 )
+ ( instance1 & 0x3 )
rowa0 = state[ offset_to_index( instance0 ) ]
& (unsigned int)(nRows-1);
rowa1 = (state+4)[ offset_to_index( instance1 ) ]
& (unsigned int)(nRows-1);
instance0 = state[ index0 ] & 0xf;
instance1 = (state+4)[ index1 ] & 0xf;
rowa0 = state[ instance0 ];
rowa1 = (state+4)[ instance1 ];
reducedDuplexRow_2way( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa0*ROW_LEN_INT64],
&wholeMatrix[rowa1*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
/*
instance = state[instance & 0xF];
rowa = state[instance & 0xF] & (unsigned int)(nRows-1);
reducedDuplexRow( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
*/
// End of divergence.
reducedDuplexRow_2way( state, &wholeMatrix[ 2* prev * ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa0 * ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa1 * ROW_LEN_INT64 ],
&wholeMatrix[ 2* row*ROW_LEN_INT64 ],
nCols );
prev = row;
row = (row + step) & (unsigned int)(nRows-1);
@@ -378,13 +327,17 @@ int LYRA2REV3_2WAY( uint64_t* wholeMatrix, void *K, uint64_t kLen,
} while ( row != 0 );
}
absorbBlock( state, &wholeMatrix[rowa*ROW_LEN_INT64] );
squeeze( state, K, (unsigned int) kLen );
absorbBlock_2way( state, &wholeMatrix[2*rowa0*ROW_LEN_INT64],
&wholeMatrix[2*rowa1*ROW_LEN_INT64] );
squeeze_2way( state, K, (unsigned int) kLen );
return 0;
}
#endif // AVX512
#if 0
//////////////////////////////////////////////////
int LYRA2Z( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
@@ -532,22 +485,26 @@ int LYRA2Z( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
return 0;
}
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Lyra2RE doesn't like the new wholeMatrix implementation
int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
const void *salt, const uint64_t saltlen, const uint64_t timeCost,
const uint64_t nRows, const uint64_t nCols )
int LYRA2RE_2WAY( void *K, uint64_t kLen, const void *pwd,
const uint64_t pwdlen, const uint64_t timeCost,
const uint64_t nRows, const uint64_t nCols )
{
//====================== Basic variables ============================//
uint64_t _ALIGN(256) state[16];
int64_t row = 2; //index of row to be processed
int64_t prev = 1; //index of prev (last row ever computed/modified)
int64_t rowa = 0; //index of row* (a previous row, deterministically picked during Setup and randomly picked while Wandering)
int64_t rowa0 = 0;
int64_t rowa1 = 0;
int64_t tau; //Time Loop iterator
int64_t step = 1; //Visitation step (used during Setup and Wandering phases)
int64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
int64_t i; //auxiliary iteration counter
int64_t v64; // 64bit var for memcpy
//====================================================================/
//=== Initializing the Memory Matrix and pointers to it =============//
@@ -573,15 +530,36 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
#endif
uint64_t *ptrWord = wholeMatrix;
uint64_t *pw = (uint64_t*)pwd;
//=== Getting the password + salt + basil padded with 10*1 ==========//
//OBS.:The memory matrix will temporarily hold the password: not for saving memory,
//but this ensures that the password copied locally will be overwritten as soon as possible
//First, we clean enough blocks for the password, salt, basil and padding
int64_t nBlocksInput = ( ( saltlen + pwdlen + 6 * sizeof(uint64_t) )
int64_t nBlocksInput = ( ( pwdlen + pwdlen + 6 * sizeof(uint64_t) )
/ BLOCK_LEN_BLAKE2_SAFE_BYTES ) + 1;
uint64_t *ptr = wholeMatrix;
memcpy( ptr, pw, 2*pwdlen ); // password
ptr += pwdlen>>2;
memcpy( ptr, pw, 2*pwdlen ); // password lane 1
ptr += pwdlen>>2;
// now build the rest interleaving on the fly.
ptr[0] = ptr[ 4] = kLen;
ptr[1] = ptr[ 5] = pwdlen;
ptr[2] = ptr[ 6] = pwdlen; // saltlen
ptr[3] = ptr[ 7] = timeCost;
ptr[8] = ptr[12] = nRows;
ptr[9] = ptr[13] = nCols;
ptr[10] = ptr[14] = 0x80;
ptr[11] = ptr[15] = 0x0100000000000000;
/*
byte *ptrByte = (byte*) wholeMatrix;
//Prepends the password
@@ -630,7 +608,9 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
ptrWord = wholeMatrix;
absorbBlockBlake2Safe( state, ptrWord, nBlocksInput, BLOCK_LEN );
*/
absorbBlockBlake2Safe_2way( state, ptrWord, nBlocksInput, BLOCK_LEN );
/*
for (i = 0; i < nBlocksInput; i++)
{
@@ -639,21 +619,22 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
}
*/
//Initializes M[0] and M[1]
reducedSqueezeRow0( state, &wholeMatrix[0], nCols ); //The locally copied password is most likely overwritten here
reducedSqueezeRow0_2way( state, &wholeMatrix[0], nCols ); //The locally copied password is most likely overwritten here
reducedDuplexRow1( state, &wholeMatrix[0], &wholeMatrix[ROW_LEN_INT64],
nCols);
reducedDuplexRow1_2way( state, &wholeMatrix[0],
&wholeMatrix[ 2 * ROW_LEN_INT64], nCols );
do
{
//M[row] = rand; //M[row*] = M[row*] XOR rotW(rand)
reducedDuplexRowSetup( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
reducedDuplexRowSetup_2way( state, &wholeMatrix[ 2* prev*ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa0*ROW_LEN_INT64 ],
&wholeMatrix[ 2* row*ROW_LEN_INT64 ],
nCols );
//updates the value of row* (deterministically picked during Setup))
rowa = (rowa + step) & (window - 1);
rowa0 = (rowa0 + step) & (window - 1);
//update prev: it now points to the last row ever computed
prev = row;
@@ -661,7 +642,7 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
row++;
//Checks if all rows in the window where visited.
if (rowa == 0)
if (rowa0 == 0)
{
step = window + gap; //changes the step: approximately doubles its value
window *= 2; //doubles the size of the re-visitation window
@@ -674,21 +655,18 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
row = 0; //Resets the visitation to the first row of the memory matrix
for (tau = 1; tau <= timeCost; tau++)
{
//Step is approximately half the number of all rows of the memory matrix for an odd tau; otherwise, it is -1
step = (tau % 2 == 0) ? -1 : nRows / 2 - 1;
do
{
//Selects a pseudorandom index row*
//-----------------------------------------------
rowa = state[0] & (unsigned int)(nRows-1); //(USE THIS IF nRows IS A POWER OF 2)
step = ((tau & 1) == 0) ? -1 : (nRows >> 1) - 1;
do
{
rowa0 = state[ 0 ] & (unsigned int)(nRows-1);
rowa1 = state[ 4 ] & (unsigned int)(nRows-1);
//rowa = state[0] % nRows; //(USE THIS FOR THE "GENERIC" CASE)
//-------------------------------------------
reducedDuplexRow_2way( state, &wholeMatrix[ 2* prev * ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa0 * ROW_LEN_INT64 ],
&wholeMatrix[ 2* rowa1 * ROW_LEN_INT64 ],
&wholeMatrix[ 2* row *ROW_LEN_INT64 ],
nCols );
//Performs a reduced-round duplexing operation over M[row*] XOR M[prev], updating both M[row*] and M[row]
reducedDuplexRow( state, &wholeMatrix[prev*ROW_LEN_INT64],
&wholeMatrix[rowa*ROW_LEN_INT64],
&wholeMatrix[row*ROW_LEN_INT64], nCols );
//update prev: it now points to the last row ever computed
prev = row;
@@ -703,9 +681,10 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
//===================== Wrap-up Phase ===============================//
//Absorbs the last block of the memory matrix
absorbBlock(state, &wholeMatrix[rowa*ROW_LEN_INT64]);
absorbBlock_2way( state, &wholeMatrix[ 2 * rowa0 *ROW_LEN_INT64],
&wholeMatrix[ 2 * rowa1 *ROW_LEN_INT64] );
//Squeezes the key
squeeze(state, K, (unsigned int) kLen);
squeeze_2way( state, K, (unsigned int) kLen );
//================== Freeing the memory =============================//
_mm_free(wholeMatrix);
@@ -713,3 +692,4 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
return 0;
}
#endif

View File

@@ -327,7 +327,6 @@ int LYRA2REV3( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
reducedDuplexRow1( state, &wholeMatrix[0], &wholeMatrix[ROW_LEN_INT64],
nCols);
do
{
//M[row] = rand; //M[row*] = M[row*] XOR rotW(rand)

View File

@@ -60,4 +60,15 @@ int LYRA2Z( uint64_t*, void *K, uint64_t kLen, const void *pwd,
int LYRA2(void *K, int64_t kLen, const void *pwd, int32_t pwdlen, const void *salt, int32_t saltlen, int64_t timeCost, const int16_t nRows, const int16_t nCols);
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
int LYRA2REV2_2WAY( uint64_t*, void *K, uint64_t kLen, const void *pwd,
uint64_t pwdlen, uint64_t timeCost, uint64_t nRows, uint64_t nCols );
int LYRA2REV3_2WAY( uint64_t*, void *K, uint64_t kLen, const void *pwd,
uint64_t pwdlen, uint64_t timeCost, uint64_t nRows, uint64_t nCols );
#endif
#endif /* LYRA2_H_ */

View File

@@ -1,13 +1,150 @@
#include "lyra2-gate.h"
#include <memory.h>
#if defined (LYRA2REV2_4WAY)
#include "algo/blake/blake-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/cubehash/cube-hash-2way.h"
#if defined (LYRA2REV2_8WAY)
typedef struct {
blake256_8way_context blake;
keccak256_8way_context keccak;
cube_4way_context cube;
skein256_8way_context skein;
bmw256_8way_context bmw;
} lyra2v2_8way_ctx_holder __attribute__ ((aligned (64)));
static lyra2v2_8way_ctx_holder l2v2_8way_ctx;
bool init_lyra2rev2_8way_ctx()
{
keccak256_8way_init( &l2v2_8way_ctx.keccak );
cube_4way_init( &l2v2_8way_ctx.cube, 256, 16, 32 );
skein256_8way_init( &l2v2_8way_ctx.skein );
bmw256_8way_init( &l2v2_8way_ctx.bmw );
return true;
}
void lyra2rev2_8way_hash( void *state, const void *input )
{
uint32_t vhash[8*8] __attribute__ ((aligned (128)));
uint32_t vhashA[8*8] __attribute__ ((aligned (64)));
uint32_t vhashB[8*8] __attribute__ ((aligned (64)));
uint32_t hash0[8] __attribute__ ((aligned (64)));
uint32_t hash1[8] __attribute__ ((aligned (64)));
uint32_t hash2[8] __attribute__ ((aligned (64)));
uint32_t hash3[8] __attribute__ ((aligned (64)));
uint32_t hash4[8] __attribute__ ((aligned (64)));
uint32_t hash5[8] __attribute__ ((aligned (64)));
uint32_t hash6[8] __attribute__ ((aligned (64)));
uint32_t hash7[8] __attribute__ ((aligned (64)));
lyra2v2_8way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &l2v2_8way_ctx, sizeof(l2v2_8way_ctx) );
blake256_8way( &ctx.blake, input + (64<<3), 16 );
blake256_8way_close( &ctx.blake, vhash );
rintrlv_8x32_8x64( vhashA, vhash, 256 );
keccak256_8way_update( &ctx.keccak, vhashA, 32 );
keccak256_8way_close( &ctx.keccak, vhash );
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 256 );
cube_4way_update_close( &ctx.cube, vhashA, vhashA, 32 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashB, vhashB, 32 );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhashA, 256 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhashB, 256 );
intrlv_2x256( vhash, hash0, hash1, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash0, hash1, vhash, 256 );
intrlv_2x256( vhash, hash2, hash3, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash2, hash3, vhash, 256 );
intrlv_2x256( vhash, hash4, hash5, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash4, hash5, vhash, 256 );
intrlv_2x256( vhash, hash6, hash7, 256 );
LYRA2REV2_2WAY( l2v2_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash6, hash7, vhash, 256 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 256 );
skein256_8way_update( &ctx.skein, vhash, 32 );
skein256_8way_close( &ctx.skein, vhash );
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashA, vhashA, 32 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhashB, vhashB, 32 );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhashA, 256 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhashB, 256 );
intrlv_8x32( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 256 );
bmw256_8way_update( &ctx.bmw, vhash, 32 );
bmw256_8way_close( &ctx.bmw, state );
}
int scanhash_lyra2rev2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
int thr_id = mythr->id;
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake256_8way_init( &l2v2_8way_ctx.blake );
blake256_8way_update( &l2v2_8way_ctx.blake, vdata, 64 );
do
{
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
lyra2rev2_8way_hash( hash, vdata );
pdata[19] = n;
for ( int lane = 0; lane < 8; lane++ ) if ( hash7[lane] <= Htarg )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
} while ( (n < last_nonce) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (LYRA2REV2_4WAY)
typedef struct {
blake256_4way_context blake;

View File

@@ -4,8 +4,180 @@
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/cubehash/cube-hash-2way.h"
#if defined (LYRA2REV3_8WAY)
#if defined (LYRA2REV3_16WAY)
typedef struct {
blake256_16way_context blake;
cube_4way_context cube;
bmw256_16way_context bmw;
} lyra2v3_16way_ctx_holder;
static __thread lyra2v3_16way_ctx_holder l2v3_16way_ctx;
bool init_lyra2rev3_16way_ctx()
{
blake256_16way_init( &l2v3_16way_ctx.blake );
cube_4way_init( &l2v3_16way_ctx.cube, 256, 16, 32 );
bmw256_16way_init( &l2v3_16way_ctx.bmw );
return true;
}
void lyra2rev3_16way_hash( void *state, const void *input )
{
uint32_t vhash[16*8] __attribute__ ((aligned (128)));
uint32_t hash0[8] __attribute__ ((aligned (64)));
uint32_t hash1[8] __attribute__ ((aligned (64)));
uint32_t hash2[8] __attribute__ ((aligned (64)));
uint32_t hash3[8] __attribute__ ((aligned (64)));
uint32_t hash4[8] __attribute__ ((aligned (64)));
uint32_t hash5[8] __attribute__ ((aligned (64)));
uint32_t hash6[8] __attribute__ ((aligned (64)));
uint32_t hash7[8] __attribute__ ((aligned (64)));
uint32_t hash8[8] __attribute__ ((aligned (64)));
uint32_t hash9[8] __attribute__ ((aligned (64)));
uint32_t hash10[8] __attribute__ ((aligned (64)));
uint32_t hash11[8] __attribute__ ((aligned (64)));
uint32_t hash12[8] __attribute__ ((aligned (64)));
uint32_t hash13[8] __attribute__ ((aligned (64)));
uint32_t hash14[8] __attribute__ ((aligned (64)));
uint32_t hash15[8] __attribute__ ((aligned (64)));
lyra2v3_16way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &l2v3_16way_ctx, sizeof(l2v3_16way_ctx) );
blake256_16way_update( &ctx.blake, input + (64*16), 16 );
blake256_16way_close( &ctx.blake, vhash );
dintrlv_16x32( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
hash8, hash9, hash10, hash11 ,hash12, hash13, hash14, hash15,
vhash, 256 );
intrlv_2x256( vhash, hash0, hash1, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash0, hash1, vhash, 256 );
intrlv_2x256( vhash, hash2, hash3, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash2, hash3, vhash, 256 );
intrlv_2x256( vhash, hash4, hash5, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash4, hash5, vhash, 256 );
intrlv_2x256( vhash, hash6, hash7, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash6, hash7, vhash, 256 );
intrlv_2x256( vhash, hash8, hash9, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash8, hash9, vhash, 256 );
intrlv_2x256( vhash, hash10, hash11, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash10, hash11, vhash, 256 );
intrlv_2x256( vhash, hash12, hash13, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash12, hash13, vhash, 256 );
intrlv_2x256( vhash, hash14, hash15, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash14, hash15, vhash, 256 );
intrlv_4x128( vhash, hash0, hash1, hash2, hash3, 256 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhash, 256 );
intrlv_4x128( vhash, hash4, hash5, hash6, hash7, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash4, hash5, hash6, hash7, vhash, 256 );
intrlv_4x128( vhash, hash8, hash9, hash10, hash11, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash8, hash9, hash10, hash11, vhash, 256 );
intrlv_4x128( vhash, hash12, hash13, hash14, hash15, 256 );
cube_4way_init( &ctx.cube, 256, 16, 32 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 32 );
dintrlv_4x128( hash12, hash13, hash14, hash15, vhash, 256 );
intrlv_2x256( vhash, hash0, hash1, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash0, hash1, vhash, 256 );
intrlv_2x256( vhash, hash2, hash3, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash2, hash3, vhash, 256 );
intrlv_2x256( vhash, hash4, hash5, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash4, hash5, vhash, 256 );
intrlv_2x256( vhash, hash6, hash7, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash6, hash7, vhash, 256 );
intrlv_2x256( vhash, hash8, hash9, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash8, hash9, vhash, 256 );
intrlv_2x256( vhash, hash10, hash11, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash10, hash11, vhash, 256 );
intrlv_2x256( vhash, hash12, hash13, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash12, hash13, vhash, 256 );
intrlv_2x256( vhash, hash14, hash15, 256 );
LYRA2REV3_2WAY( l2v3_wholeMatrix, vhash, 32, vhash, 32, 1, 4, 4 );
dintrlv_2x256( hash14, hash15, vhash, 256 );
intrlv_16x32( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, hash8, hash9, hash10, hash11, hash12, hash13, hash14,
hash15, 256 );
bmw256_16way_update( &ctx.bmw, vhash, 32 );
bmw256_16way_close( &ctx.bmw, state );
}
int scanhash_lyra2rev3_16way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*16] __attribute__ ((aligned (128)));
uint32_t vdata[20*16] __attribute__ ((aligned (64)));
uint32_t *hash7 = &hash[7<<4];
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
const uint32_t last_nonce = max_nonce - 16;
const uint32_t Htarg = ptarget[7];
__m512i *noncev = (__m512i*)vdata + 19; // aligned
const int thr_id = mythr->id;
if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff;
mm512_bswap32_intrlv80_16x32( vdata, pdata );
blake256_16way_init( &l2v3_16way_ctx.blake );
blake256_16way_update( &l2v3_16way_ctx.blake, vdata, 64 );
do
{
*noncev = mm512_bswap_32( _mm512_set_epi32( n+15, n+14, n+13, n+12,
n+11, n+10, n+ 9, n+ 8,
n+ 7, n+ 6, n+ 5, n+ 4,
n+ 3, n+ 2, n+ 1, n ) );
lyra2rev3_16way_hash( hash, vdata );
pdata[19] = n;
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
{
extr_lane_16x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 16;
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart ) );
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (LYRA2REV3_8WAY)
typedef struct {
blake256_8way_context blake;

View File

@@ -19,7 +19,7 @@
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "algo-gate.h"
//#include "algo-gate.h"
#include <string.h>
#include <stdio.h>
#include <time.h>
@@ -40,19 +40,26 @@ inline void squeeze_2way( uint64_t *State, byte *Out, unsigned int len )
//Squeezes full blocks
for ( i = 0; i < fullBlocks; i++ )
{
memcpy_512( out, state, BLOCK_LEN_M256I*2 );
LYRA_ROUND_2WAY_AVX2( state[0], state[1], state[2], state[3] );
out += BLOCK_LEN_M256I*2;
memcpy_512( out, state, BLOCK_LEN_M256I );
LYRA_ROUND_2WAY_AVX512( state[0], state[1], state[2], state[3] );
out += BLOCK_LEN_M256I;
}
//Squeezes remaining bytes
memcpy_512( out, state, ( (len_m256i % BLOCK_LEN_M256I) * 2 ) );
memcpy_512( out, state, len_m256i % BLOCK_LEN_M256I );
}
inline void absorbBlock_2way( uint64_t *State, const uint64_t *In )
inline void absorbBlock_2way( uint64_t *State, const uint64_t *In0,
const uint64_t *In1 )
{
register __m512i state0, state1, state2, state3;
__m512i *in = (__m512i*)In;
__m512i in[3];
casti_m256i( in, 0 ) = casti_m256i( In0, 0 );
casti_m256i( in, 1 ) = casti_m256i( In1, 1 );
casti_m256i( in, 2 ) = casti_m256i( In0, 2 );
casti_m256i( in, 3 ) = casti_m256i( In1, 3 );
casti_m256i( in, 4 ) = casti_m256i( In0, 4 );
casti_m256i( in, 5 ) = casti_m256i( In1, 5 );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
state2 = _mm512_load_si512( (__m512i*)State + 2 );
@@ -90,7 +97,7 @@ inline void absorbBlockBlake2Safe_2way( uint64_t *State, const uint64_t *In,
state1 = _mm512_xor_si512( state1, in[1] );
LYRA_12_ROUNDS_2WAY_AVX512( state0, state1, state2, state3 );
In += block_len * 2;
In += block_len*2;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -109,7 +116,7 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
register __m512i state0, state1, state2, state3;
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I * 2 );
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
@@ -126,13 +133,13 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
{
_mm_prefetch( out - 9, _MM_HINT_T0 );
_mm_prefetch( out - 11, _MM_HINT_T0 );
out[0] = state0;
out[1] = state1;
out[2] = state2;
//Goes to next block (column) that will receive the squeezed data
out -= BLOCK_LEN_M256I * 2;
out -= BLOCK_LEN_M256I;
LYRA_ROUND_2WAY_AVX512( state0, state1, state2, state3 );
}
@@ -143,15 +150,14 @@ inline void reducedSqueezeRow0_2way( uint64_t* State, uint64_t* rowOut,
_mm512_store_si512( (__m512i*)State + 3, state3 );
}
// This function has to deal with gathering 2 256 bit rowin vectors from
// non-contiguous memory. Extra work and performance penalty.
inline void reducedDuplexRow1_2way( uint64_t *State, uint64_t *rowIn,
uint64_t *rowOut, uint64_t nCols )
{
int i;
register __m512i state0, state1, state2, state3;
__m512i *in = (__m256i*)rowIn;
__m512i *in = (__m512i*)rowIn;
__m512i *out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
@@ -171,28 +177,25 @@ inline void reducedDuplexRow1_2way( uint64_t *State, uint64_t *rowIn,
out[2] = _mm512_xor_si512( state2, in[2] );
//Input: next column (i.e., next block in sequence)
in0 += BLOCK_LEN_M256I;
in1 += BLOCK_LEN_M256I;
in += BLOCK_LEN_M256I;
//Output: goes to previous column
out -= BLOCK_LEN_M256I * 2;
out -= BLOCK_LEN_M256I;
}
_mm512_store_si256( (__m512i*)State, state0 );
_mm512_store_si256( (__m512i*)State + 1, state1 );
_mm512_store_si256( (__m512i*)State + 2, state2 );
_mm512_store_si256( (__m512i*)State + 3, state3 );
}
_mm512_store_si512( (__m512i*)State, state0 );
_mm512_store_si512( (__m512i*)State + 1, state1 );
_mm512_store_si512( (__m512i*)State + 2, state2 );
_mm512_store_si512( (__m512i*)State + 3, state3 );
}
inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
uint64_t *rowInOut, uint64_t *rowOut, uint64_t nCols )
{
int i;
register __m512i state0, state1, state2, state3;
__m512i* in = (__m512i*)rowIn;
__m512i* inout = (__m512i*)rowInOut;
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I * 2 );
__m512i* out = (__m512i*)rowOut + ( (nCols-1) * BLOCK_LEN_M256I );
__m512i t0, t1, t2;
state0 = _mm512_load_si512( (__m512i*)State );
@@ -209,7 +212,7 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
state2 = _mm512_xor_si512( state2,
_mm512_add_epi64( in[2], inout[2] ) );
LYRA_ROUND_2WAY AVX512( state0, state1, state2, state3 );
LYRA_ROUND_2WAY_AVX512( state0, state1, state2, state3 );
out[0] = _mm512_xor_si512( state0, in[0] );
out[1] = _mm512_xor_si512( state1, in[1] );
@@ -221,17 +224,18 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
t2 = _mm512_permutex_epi64( state2, 0x93 );
inout[0] = _mm512_xor_si512( inout[0],
_mm512_mask_blend_epi32( t0, t2, 0x03 ) );
_mm512_mask_blend_epi32( 0x0303, t0, t2 ) );
inout[1] = _mm512_xor_si512( inout[1],
_mm512_mask_blend_epi32( t1, t0, 0x03 ) );
_mm512_mask_blend_epi32( 0x0303, t1, t0 ) );
inout[2] = _mm512_xor_si512( inout[2],
_mm512_mask_blend_epi32( t2, t1, 0x03 ) );
_mm512_mask_blend_epi32( 0x0303, t2, t1 ) );
//Inputs: next column (i.e., next block in sequence)
in += BLOCK_LEN_M256I * 2;
inout += BLOCK_LEN_M256I * 2;
in += BLOCK_LEN_M256I;
inout += BLOCK_LEN_M256I;
//Output: goes to previous column
out -= BLOCK_LEN_M256I * 2;
out -= BLOCK_LEN_M256I;
}
_mm512_store_si512( (__m512i*)State, state0 );
@@ -240,49 +244,61 @@ inline void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
_mm512_store_si512( (__m512i*)State + 3, state3 );
}
inline void reducedDuplexRow_2way( uint64_t *State, uint64_t *rowIn1,
uint64_t *rowIn0, uint64_t *rowInOut, uint64_t *rowOut,
uint64_t nCols )
// big ugly workaound for pointer aliasing, use a union of pointers.
// Access matrix using m512i for in and out, m256i for inout
inline void reducedDuplexRow_2way( uint64_t *State, uint64_t *rowIn,
uint64_t *rowInOut0, uint64_t *rowInOut1,
uint64_t *rowOut, uint64_t nCols)
{
int i;
register __m512i state0, state1, state2, state3;
__m256i *in0 = (__m256i*)rowIn0;
__m256i *in0 = (__m256i*)rowIn0;
__m2512* in = (__m512i*)rowIn;
__m2512* inout = (__m512i*)rowInOut;
__m512i* out = (__m512i*)rowOut;
__m512i t0, t1, t2;
__m512i *in = (__m512i*)rowIn;
__m256i *inout0 = (__m256i*)rowInOut0;
__m256i *inout1 = (__m256i*)rowInOut1;
__m512i *out = (__m512i*)rowOut;
__m512i io[3];
povly inout;
inout.v512 = &io[0];
__m512i t0, t1, t2;
_mm_prefetch( in0, _MM_HINT_T0 );
_mm_prefetch( in1, _MM_HINT_T0 );
_mm_prefetch( in0 + 2, _MM_HINT_T0 );
_mm_prefetch( in1 + 2, _MM_HINT_T0 );
_mm_prefetch( in0 + 4, _MM_HINT_T0 );
_mm_prefetch( in1 + 4, _MM_HINT_T0 );
_mm_prefetch( in0 + 6, _MM_HINT_T0 );
_mm_prefetch( in1 + 6, _MM_HINT_T0 );
state0 = _mm512_load_si512( (__m512i*)State );
state1 = _mm512_load_si512( (__m512i*)State + 1 );
state2 = _mm512_load_si512( (__m512i*)State + 2 );
state3 = _mm512_load_si512( (__m512i*)State + 3 );
_mm_prefetch( in, _MM_HINT_T0 );
_mm_prefetch( inout0, _MM_HINT_T0 );
_mm_prefetch( inout1, _MM_HINT_T0 );
_mm_prefetch( in + 2, _MM_HINT_T0 );
_mm_prefetch( inout0 + 2, _MM_HINT_T0 );
_mm_prefetch( inout1 + 2, _MM_HINT_T0 );
_mm_prefetch( in + 4, _MM_HINT_T0 );
_mm_prefetch( inout0 + 4, _MM_HINT_T0 );
_mm_prefetch( inout1 + 4, _MM_HINT_T0 );
_mm_prefetch( in + 6, _MM_HINT_T0 );
_mm_prefetch( inout0 + 6, _MM_HINT_T0 );
_mm_prefetch( inout1 + 6, _MM_HINT_T0 );
for ( i = 0; i < nCols; i++ )
{
//Absorbing "M[prev] [+] M[row*]"
inout.v256[0] = inout0[0];
inout.v256[1] = inout1[1];
inout.v256[2] = inout0[2];
inout.v256[3] = inout1[3];
inout.v256[4] = inout0[4];
inout.v256[5] = inout1[5];
// state0 = _mm512_xor_si512( state0, mm512_concat_256( in1[0], in0[0] );
// state1 = _mm512_xor_si512( state1, mm512_concat_256( in1[1], in0[1] );
// state2 = _mm512_xor_si512( state2, mm512_concat_256( in1[2], in0[2] );
t0 = mm512_concat_256( in1[0], in0[0] );
t1 = mm512_concat_256( in1[1], in0[1] );
t2 = mm512_concat_256( in1[2], in0[2] );
state0 = _mm512_xor_si512( state0,
_mm512_add_epi64( t0, inout[0] ) );
_mm512_add_epi64( in[0], inout.v512[0] ) );
state1 = _mm512_xor_si512( state1,
_mm512_add_epi64( t1, inout[1] ) );
_mm512_add_epi64( in[1], inout.v512[1] ) );
state2 = _mm512_xor_si512( state2,
_mm512_add_epi64( t2, inout[2] ) );
_mm512_add_epi64( in[2], inout.v512[2] ) );
//Applies the reduced-round transformation f to the sponge's state
LYRA_ROUND_2WAY_AVX512( state0, state1, state2, state3 );
@@ -292,22 +308,44 @@ inline void reducedDuplexRow_2way( uint64_t *State, uint64_t *rowIn1,
out[1] = _mm512_xor_si512( out[1], state1 );
out[2] = _mm512_xor_si512( out[2], state2 );
// if inout is the same row as out it was just overwritten, reload.
if ( rowOut == rowInOut0 )
{
inout.v256[0] = inout0[0];
inout.v256[2] = inout0[2];
inout.v256[4] = inout0[4];
}
if ( rowOut == rowInOut1 )
{
inout.v256[1] = inout1[1];
inout.v256[3] = inout1[3];
inout.v256[5] = inout1[5];
}
//M[rowInOut][col] = M[rowInOut][col] XOR rotW(rand)
t0 = _mm512_permutex_epi64( state0, 0x93 );
t1 = _mm512_permutex_epi64( state1, 0x93 );
t2 = _mm512_permutex_epi64( state2, 0x93 );
inout[0] = _mm512_xor_si512( inout[0],
_mm512_mask_blend_epi32( t0, t2, 0x03 ) );
inout[1] = _mm512_xor_si512( inout[1],
_mm512_mask_blend_epi32( t1, t0, 0x03 ) );
inout[2] = _mm512_xor_si512( inout[2],
_mm512_mask_blend_epi32( t2, t1, 0x03 ) );
inout.v512[0] = _mm512_xor_si512( inout.v512[0],
_mm512_mask_blend_epi32( 0x0303, t0, t2 ) );
inout.v512[1] = _mm512_xor_si512( inout.v512[1],
_mm512_mask_blend_epi32( 0x0303, t1, t0 ) );
inout.v512[2] = _mm512_xor_si512( inout.v512[2],
_mm512_mask_blend_epi32( 0x0303, t2, t1 ) );
inout0[0] = inout.v256[0];
inout1[1] = inout.v256[1];
inout0[2] = inout.v256[2];
inout1[3] = inout.v256[3];
inout0[4] = inout.v256[4];
inout1[5] = inout.v256[5];
//Goes to next block
in += BLOCK_LEN_M256I * 2;
out += BLOCK_LEN_M256I * 2;
inout += BLOCK_LEN_M256I * 2;
in += BLOCK_LEN_M256I;
inout0 += BLOCK_LEN_M256I * 2;
inout1 += BLOCK_LEN_M256I * 2;
out += BLOCK_LEN_M256I;
}
_mm512_store_si512( (__m512i*)State, state0 );

View File

@@ -375,7 +375,10 @@ inline void reducedSqueezeRow0( uint64_t* State, uint64_t* rowOut,
{
_mm_prefetch( out - 9, _MM_HINT_T0 );
_mm_prefetch( out - 11, _MM_HINT_T0 );
//printf("S RSR0 col= %d, out= %x\n",i,out);
out[0] = state0;
out[1] = state1;
out[2] = state2;
@@ -706,11 +709,34 @@ inline void reducedDuplexRowSetup( uint64_t *State, uint64_t *rowIn,
out[1] = _mm256_xor_si256( state1, in[1] );
out[2] = _mm256_xor_si256( state2, in[2] );
/*
printf("s duplexsetup col= %d\n",i);
uint64_t * o = (uint64_t*)out;
printf("S out %016lx %016lx %016lx %016lx\n",o[0],o[1],o[2],o[3]);
printf("S out %016lx %016lx %016lx %016lx\n",o[4],o[5],o[6],o[7]);
printf("S out %016lx %016lx %016lx %016lx\n",o[8],o[9],o[10],o[11]);
printf("S out %016lx %016lx %016lx %016lx\n",o[12],o[13],o[14],o[15]);
printf("S out %016lx %016lx %016lx %016lx\n",o[16],o[17],o[18],o[19]);
printf("S out %016lx %016lx %016lx %016lx\n",o[20],o[21],o[22],o[23]);
*/
//M[row*][col] = M[row*][col] XOR rotW(rand)
t0 = _mm256_permute4x64_epi64( state0, 0x93 );
t1 = _mm256_permute4x64_epi64( state1, 0x93 );
t2 = _mm256_permute4x64_epi64( state2, 0x93 );
/*
uint64_t *t = (uint64_t*)&t0;
printf("S t0 %016lx %016lx %016lx %016lx\n",t[0],t[1],t[2],t[3]);
o = (uint64_t*)inout;
printf("S inout0 %016lx %016lx %016lx %016lx\n",o[0],o[1],o[2],o[3]);
printf("S inout0 %016lx %016lx %016lx %016lx\n",o[4],o[5],o[6],o[7]);
printf("S inout0 %016lx %016lx %016lx %016lx\n",o[8],o[9],o[10],o[11]);
printf("S inout0 %016lx %016lx %016lx %016lx\n",o[12],o[13],o[14],o[15]);
printf("S inout0 %016lx %016lx %016lx %016lx\n",o[16],o[17],o[18],o[19]);
printf("S inout0 %016lx %016lx %016lx %016lx\n",o[20],o[21],o[22],o[23]);
*/
inout[0] = _mm256_xor_si256( inout[0],
_mm256_blend_epi32( t0, t2, 0x03 ) );
inout[1] = _mm256_xor_si256( inout[1],
@@ -718,7 +744,17 @@ inline void reducedDuplexRowSetup( uint64_t *State, uint64_t *rowIn,
inout[2] = _mm256_xor_si256( inout[2],
_mm256_blend_epi32( t2, t1, 0x03 ) );
//Inputs: next column (i.e., next block in sequence)
/*
o = (uint64_t*)inout;
printf("S inout1 %016lx %016lx %016lx %016lx\n",o[0],o[1],o[2],o[3]);
printf("S inout1 %016lx %016lx %016lx %016lx\n",o[4],o[5],o[6],o[7]);
printf("S inout1 %016lx %016lx %016lx %016lx\n",o[8],o[9],o[10],o[11]);
printf("S inout1 %016lx %016lx %016lx %016lx\n",o[12],o[13],o[14],o[15]);
printf("S inout1 %016lx %016lx %016lx %016lx\n",o[16],o[17],o[18],o[19]);
printf("S inout1 %016lx %016lx %016lx %016lx\n",o[20],o[21],o[22],o[23]);
*/
//Inputs: next column (i.e., next block in sequence)
in += BLOCK_LEN_M256I;
inout += BLOCK_LEN_M256I;
//Output: goes to previous column
@@ -949,6 +985,22 @@ inline void reducedDuplexRow( uint64_t *State, uint64_t *rowIn,
_mm_prefetch( inout + 9, _MM_HINT_T0 );
_mm_prefetch( inout + 11, _MM_HINT_T0 );
/*
uint64_t *io = (uint64_t*)inout;
uint64_t *ii = (uint64_t*)in;
printf("RDRS1 col= %d\n", i);
printf("RDRS1 IO %016lx %016lx %016lx %016lx\n",io[0],io[1],io[2],io[3]);
printf("RDRS1 IO %016lx %016lx %016lx %016lx\n",io[4],io[5],io[6],io[7]);
printf("RDRS1 IO %016lx %016lx %016lx %016lx\n",io[8],io[9],io[10],io[11]);
printf("RDRS1 IO %016lx %016lx %016lx %016lx\n",io[12],io[13],io[14],io[15]);
printf("RDRS1 IN %016lx %016lx %016lx %016lx\n",ii[0],ii[1],ii[2],ii[3]);
printf("RDRS1 IN %016lx %016lx %016lx %016lx\n",ii[4],ii[5],ii[6],ii[7]);
printf("RDRS1 IN %016lx %016lx %016lx %016lx\n",ii[8],ii[9],ii[10],ii[11]);
printf("RDRS1 IN %016lx %016lx %016lx %016lx\n",ii[12],ii[13],ii[14],ii[15]);
*/
//Absorbing "M[prev] [+] M[row*]"
state0 = _mm256_xor_si256( state0,
_mm256_add_epi64( in[0], inout[0] ) );

View File

@@ -65,14 +65,14 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
b = mm512_ror_64( _mm512_xor_si512( b, c ), 63 );
#define LYRA_ROUND_2WAY_AVX512( s0, s1, s2, s3 ) \
G_4X64( s0, s1, s2, s3 ); \
s1 = mm512_ror_1x64( s1); \
s2 = mm512_swap128_256( s2 ); \
s3 = mm512_rol1x64_256( s3 ); \
G_4X64( s0, s1, s2, s3 ); \
s1 = mm512_rol1x64_256( s1 ); \
s2 = mm512_swap128_256( s2 ); \
s3 = mm512_ror1x64_256( s3 );
G2W_4X64( s0, s1, s2, s3 ); \
s1 = mm512_ror256_64( s1); \
s2 = mm512_swap256_128( s2 ); \
s3 = mm512_rol256_64( s3 ); \
G2W_4X64( s0, s1, s2, s3 ); \
s1 = mm512_rol256_64( s1 ); \
s2 = mm512_swap256_128( s2 ); \
s3 = mm512_ror256_64( s3 );
#define LYRA_12_ROUNDS_2WAY_AVX512( s0, s1, s2, s3 ) \
LYRA_ROUND_2WAY_AVX512( s0, s1, s2, s3 ) \
@@ -148,14 +148,14 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
#define LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
G_2X64( s0, s2, s4, s6 ); \
G_2X64( s1, s3, s5, s7 ); \
mm128_ror1x64_256( s2, s3 ); \
mm128_swap128_256( s4, s5 ); \
mm128_rol1x64_256( s6, s7 ); \
mm128_ror256_64( s2, s3 ); \
mm128_swap256_128( s4, s5 ); \
mm128_rol256_64( s6, s7 ); \
G_2X64( s0, s2, s4, s6 ); \
G_2X64( s1, s3, s5, s7 ); \
mm128_rol1x64_256( s2, s3 ); \
mm128_swap128_256( s4, s5 ); \
mm128_ror1x64_256( s6, s7 );
mm128_rol256_64( s2, s3 ); \
mm128_swap256_128( s4, s5 ); \
mm128_ror256_64( s6, s7 );
#define LYRA_12_ROUNDS_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
@@ -203,24 +203,36 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
union _povly
{
__m512i *v512;
__m256i *v256;
uint64_t *u64;
};
typedef union _povly povly;
//---- Housekeeping
void initState_2way( uint64_t state[/*16*/] );
void initState_2way( uint64_t State[/*16*/] );
//---- Squeezes
void squeeze_2way( uint64_t *state, unsigned char *out, unsigned int len );
void squeeze_2way( uint64_t *State, unsigned char *out, unsigned int len );
void reducedSqueezeRow0_2way( uint64_t* state, uint64_t* row, uint64_t nCols );
//---- Absorbs
void absorbBlock_2way( uint64_t *state, const uint64_t *in );
void absorbBlockBlake2Safe_2way( uint64_t *state, const uint64_t *in,
void absorbBlock_2way( uint64_t *State, const uint64_t *In0,
const uint64_t *In1 );
void absorbBlockBlake2Safe_2way( uint64_t *State, const uint64_t *In,
const uint64_t nBlocks, const uint64_t block_len );
//---- Duplexes
void reducedDuplexRow1_2way( uint64_t *state, uint64_t *rowIn,
void reducedDuplexRow1_2way( uint64_t *State, uint64_t *rowIn,
uint64_t *rowOut, uint64_t nCols);
void reducedDuplexRowSetup_2way( uint64_t *state, uint64_t *rowIn,
void reducedDuplexRowSetup_2way( uint64_t *State, uint64_t *rowIn,
uint64_t *rowInOut, uint64_t *rowOut, uint64_t nCols );
void reducedDuplexRow_2way(uint64_t *state, uint64_t *rowIn1, uint64_t *rowIn0, uint64_t *rowInOut, uint64_t *rowOut, uint64_t nCols);
void reducedDuplexRow_2way( uint64_t *State, uint64_t *rowIn,
uint64_t *rowInOut0, uint64_t *rowInOut1,
uint64_t *rowOut, uint64_t nCols);
#endif