mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.15.2
This commit is contained in:
@@ -15,7 +15,9 @@
|
||||
#include "miner.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
|
||||
int groestl256_4way_init( groestl256_4way_context* ctx, uint64_t hashlen )
|
||||
@@ -43,10 +45,10 @@ int groestl256_4way_init( groestl256_4way_context* ctx, uint64_t hashlen )
|
||||
}
|
||||
|
||||
int groestl256_4way_full( groestl256_4way_context* ctx, void* output,
|
||||
const void* input, uint64_t databitlen )
|
||||
const void* input, uint64_t datalen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = 32 / 16; // bytes to __m128i
|
||||
const int len = (int)datalen >> 4;
|
||||
const int hashlen_m128i = 32 >> 4; // bytes to __m128i
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE256;
|
||||
@@ -172,5 +174,161 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
#endif // AVX512
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
int groestl256_2way_init( groestl256_2way_context* ctx, uint64_t hashlen )
|
||||
{
|
||||
int i;
|
||||
|
||||
ctx->hashlen = hashlen;
|
||||
|
||||
if (ctx->chaining == NULL || ctx->buffer == NULL)
|
||||
return 1;
|
||||
|
||||
for ( i = 0; i < SIZE256; i++ )
|
||||
{
|
||||
ctx->chaining[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_zero;
|
||||
}
|
||||
|
||||
// The only non-zero in the IV is len. It can be hard coded.
|
||||
ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 );
|
||||
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl256_2way_full( groestl256_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t datalen )
|
||||
{
|
||||
const int len = (int)datalen >> 4;
|
||||
const int hashlen_m128i = 32 >> 4; // bytes to __m128i
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE256;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
if (ctx->chaining == NULL || ctx->buffer == NULL)
|
||||
return 1;
|
||||
|
||||
for ( i = 0; i < SIZE256; i++ )
|
||||
{
|
||||
ctx->chaining[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_zero;
|
||||
}
|
||||
|
||||
// The only non-zero in the IV is len. It can be hard coded.
|
||||
ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 );
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
// --- update ---
|
||||
|
||||
// digest any full blocks, process directly from input
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF512_2way( ctx->chaining, &in[ i * SIZE256 ] );
|
||||
ctx->buf_ptr = blocks * SIZE256;
|
||||
|
||||
// copy any remaining data to buffer, it may already contain data
|
||||
// from a previous update for a midstate precalc
|
||||
for ( i = 0; i < len % SIZE256; i++ )
|
||||
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += rem; // use i as rem_ptr in final
|
||||
|
||||
//--- final ---
|
||||
|
||||
blocks++; // adjust for final block
|
||||
|
||||
if ( i == SIZE256 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const2_64( (uint64_t)blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = m256_const2_64( (uint64_t)blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
TF512_2way( ctx->chaining, ctx->buffer );
|
||||
|
||||
OF512_2way( ctx->chaining );
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t databitlen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE256;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
// --- update ---
|
||||
|
||||
// digest any full blocks, process directly from input
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF512_2way( ctx->chaining, &in[ i * SIZE256 ] );
|
||||
ctx->buf_ptr = blocks * SIZE256;
|
||||
|
||||
// copy any remaining data to buffer, it may already contain data
|
||||
// from a previous update for a midstate precalc
|
||||
for ( i = 0; i < len % SIZE256; i++ )
|
||||
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += rem; // use i as rem_ptr in final
|
||||
|
||||
//--- final ---
|
||||
|
||||
blocks++; // adjust for final block
|
||||
|
||||
if ( i == SIZE256 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = m256_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
TF512_2way( ctx->chaining, ctx->buffer );
|
||||
|
||||
OF512_2way( ctx->chaining );
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
|
@@ -18,8 +18,8 @@
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#define LENGTH (256)
|
||||
|
||||
//#include "brg_endian.h"
|
||||
@@ -48,6 +48,8 @@
|
||||
|
||||
#define SIZE256 (SIZE_512/16)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m512i chaining[SIZE256];
|
||||
__attribute__ ((aligned (64))) __m512i buffer[SIZE256];
|
||||
@@ -55,7 +57,7 @@ typedef struct {
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
int databitlen; // bits
|
||||
// int databitlen; // bits
|
||||
} groestl256_4way_context;
|
||||
|
||||
|
||||
@@ -74,5 +76,25 @@ int groestl256_4way_update_close( groestl256_4way_context*, void*,
|
||||
int groestl256_4way_full( groestl256_4way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif // AVX512
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m256i chaining[SIZE256];
|
||||
__attribute__ ((aligned (64))) __m256i buffer[SIZE256];
|
||||
int hashlen; // byte
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
// int databitlen; // bits
|
||||
} groestl256_2way_context;
|
||||
|
||||
int groestl256_2way_init( groestl256_2way_context*, uint64_t );
|
||||
|
||||
int groestl256_2way_update_close( groestl256_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
int groestl256_2way_full( groestl256_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL256_HASH_4WAY_H__
|
||||
|
@@ -12,7 +12,7 @@
|
||||
|
||||
#include "groestl256-hash-4way.h"
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
static const __m128i round_const_l0[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
@@ -42,6 +42,8 @@ static const __m128i round_const_l7[] __attribute__ ((aligned (64))) =
|
||||
{ 0x0000000000000000, 0x8696a6b6c6d6e6f6 }
|
||||
};
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
static const __m512i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12,
|
||||
0x2d2529212c242820, 0x2f272b232e262a22,
|
||||
@@ -499,5 +501,398 @@ void OF512_4way( __m512i* chaining )
|
||||
chaining[3] = xmm11;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
static const __m256i TRANSP_MASK_2WAY =
|
||||
{ 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12 };
|
||||
|
||||
static const __m256i SUBSH_MASK0_2WAY =
|
||||
{ 0x0c0f0104070b0e00, 0x03060a0d08020509,
|
||||
0x1c1f1114171b1e10, 0x13161a1d18121519 };
|
||||
|
||||
static const __m256i SUBSH_MASK1_2WAY =
|
||||
{ 0x0e090205000d0801, 0x04070c0f0a03060b,
|
||||
0x1e191215101d1801, 0x14171c1f1a13161b };
|
||||
|
||||
static const __m256i SUBSH_MASK2_2WAY =
|
||||
{ 0x080b0306010f0a02, 0x05000e090c04070d,
|
||||
0x181b1316111f1a12, 0x15101e191c14171d };
|
||||
|
||||
static const __m256i SUBSH_MASK3_2WAY =
|
||||
{ 0x0a0d040702090c03, 0x0601080b0e05000f,
|
||||
0x1a1d141712191c13, 0x1611181b1e15101f };
|
||||
|
||||
static const __m256i SUBSH_MASK4_2WAY =
|
||||
{ 0x0b0e0500030a0d04, 0x0702090c0f060108,
|
||||
0x1b1e1510131a1d14, 0x1712191c1f161118 };
|
||||
|
||||
static const __m256i SUBSH_MASK5_2WAY =
|
||||
{ 0x0d080601040c0f05, 0x00030b0e0907020a,
|
||||
0x1d181611141c1f15, 0x10131b1e1917121a };
|
||||
|
||||
static const __m256i SUBSH_MASK6_2WAY =
|
||||
{ 0x0f0a0702050e0906, 0x01040d080b00030c,
|
||||
0x1f1a1712151e1916, 0x11141d181b10131c };
|
||||
|
||||
static const __m256i SUBSH_MASK7_2WAY =
|
||||
{ 0x090c000306080b07, 0x02050f0a0d01040e,
|
||||
0x191c101316181b17, 0x12151f1a1d11141e, };
|
||||
|
||||
#define tos(a) #a
|
||||
#define tostr(a) tos(a)
|
||||
|
||||
/* xmm[i] will be multiplied by 2
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2_2WAY(i, j, k){\
|
||||
j = _mm256_xor_si256(j, j);\
|
||||
j = _mm256_cmpgt_epi8(j, i );\
|
||||
i = _mm256_add_epi8(i, i);\
|
||||
j = _mm256_and_si256(j, k);\
|
||||
i = _mm256_xor_si256(i, j);\
|
||||
}
|
||||
|
||||
#define MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm256_xor_si256(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm256_xor_si256(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm256_xor_si256(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm256_xor_si256(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm256_xor_si256(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm256_xor_si256(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm256_xor_si256(a6, a7);\
|
||||
a7 = _mm256_xor_si256(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm256_xor_si256(b0, a4);\
|
||||
b6 = _mm256_xor_si256(b6, a4);\
|
||||
b1 = _mm256_xor_si256(b1, a5);\
|
||||
b7 = _mm256_xor_si256(b7, a5);\
|
||||
b2 = _mm256_xor_si256(b2, a6);\
|
||||
b0 = _mm256_xor_si256(b0, a6);\
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm256_xor_si256(b3, a7);\
|
||||
b1 = _mm256_xor_si256(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm256_xor_si256(b4, a0);\
|
||||
b2 = _mm256_xor_si256(b2, a0);\
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm256_xor_si256(b5, a1);\
|
||||
b3 = _mm256_xor_si256(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm256_xor_si256(b6, a2);\
|
||||
b4 = _mm256_xor_si256(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm256_xor_si256(b7, a3);\
|
||||
b5 = _mm256_xor_si256(b5, a3);\
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm256_xor_si256(a0, a3);\
|
||||
a1 = _mm256_xor_si256(a1, a4);\
|
||||
a2 = _mm256_xor_si256(a2, a5);\
|
||||
a3 = _mm256_xor_si256(a3, a6);\
|
||||
a4 = _mm256_xor_si256(a4, a7);\
|
||||
a5 = _mm256_xor_si256(a5, b0);\
|
||||
a6 = _mm256_xor_si256(a6, b1);\
|
||||
a7 = _mm256_xor_si256(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m256_const1_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
a0 = _mm256_xor_si256(a0, TEMP0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
a1 = _mm256_xor_si256(a1, TEMP1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
a2 = _mm256_xor_si256(a2, b2);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
a3 = _mm256_xor_si256(a3, b3);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
a4 = _mm256_xor_si256(a4, b4);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
a5 = _mm256_xor_si256(a5, b5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
a6 = _mm256_xor_si256(a6, b6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
a7 = _mm256_xor_si256(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
b5 = _mm256_xor_si256(b5, a0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
b6 = _mm256_xor_si256(b6, a1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
b7 = _mm256_xor_si256(b7, a2);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
b2 = _mm256_xor_si256(b2, a5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
b3 = _mm256_xor_si256(b3, a6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
b4 = _mm256_xor_si256(b4, a7);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm256_xor_si256(b0, a3);\
|
||||
b1 = _mm256_xor_si256(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#define ROUND_2WAY(i, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* AddRoundConstant */\
|
||||
b1 = m256_const2_64( 0xffffffffffffffff, 0 ); \
|
||||
a0 = _mm256_xor_si256( a0, m256_const1_128( round_const_l0[i] ) );\
|
||||
a1 = _mm256_xor_si256( a1, b1 );\
|
||||
a2 = _mm256_xor_si256( a2, b1 );\
|
||||
a3 = _mm256_xor_si256( a3, b1 );\
|
||||
a4 = _mm256_xor_si256( a4, b1 );\
|
||||
a5 = _mm256_xor_si256( a5, b1 );\
|
||||
a6 = _mm256_xor_si256( a6, b1 );\
|
||||
a7 = _mm256_xor_si256( a7, m256_const1_128( round_const_l7[i] ) );\
|
||||
\
|
||||
/* ShiftBytes + SubBytes (interleaved) */\
|
||||
b0 = _mm256_xor_si256( b0, b0 );\
|
||||
a0 = _mm256_shuffle_epi8( a0, SUBSH_MASK0_2WAY );\
|
||||
a0 = _mm256_aesenclast_epi128(a0, b0 );\
|
||||
a1 = _mm256_shuffle_epi8( a1, SUBSH_MASK1_2WAY );\
|
||||
a1 = _mm256_aesenclast_epi128(a1, b0 );\
|
||||
a2 = _mm256_shuffle_epi8( a2, SUBSH_MASK2_2WAY );\
|
||||
a2 = _mm256_aesenclast_epi128(a2, b0 );\
|
||||
a3 = _mm256_shuffle_epi8( a3, SUBSH_MASK3_2WAY );\
|
||||
a3 = _mm256_aesenclast_epi128(a3, b0 );\
|
||||
a4 = _mm256_shuffle_epi8( a4, SUBSH_MASK4_2WAY );\
|
||||
a4 = _mm256_aesenclast_epi128(a4, b0 );\
|
||||
a5 = _mm256_shuffle_epi8( a5, SUBSH_MASK5_2WAY );\
|
||||
a5 = _mm256_aesenclast_epi128(a5, b0 );\
|
||||
a6 = _mm256_shuffle_epi8( a6, SUBSH_MASK6_2WAY );\
|
||||
a6 = _mm256_aesenclast_epi128(a6, b0 );\
|
||||
a7 = _mm256_shuffle_epi8( a7, SUBSH_MASK7_2WAY );\
|
||||
a7 = _mm256_aesenclast_epi128( a7, b0 );\
|
||||
\
|
||||
/* MixBytes */\
|
||||
MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
|
||||
\
|
||||
}
|
||||
|
||||
/* 10 rounds, P and Q in parallel */
|
||||
#define ROUNDS_P_Q_2WAY(){\
|
||||
ROUND_2WAY(0, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
}
|
||||
|
||||
#define Matrix_Transpose_A_2way(i0, i1, i2, i3, o1, o2, o3, t0){\
|
||||
t0 = TRANSP_MASK_2WAY;\
|
||||
\
|
||||
i0 = _mm256_shuffle_epi8( i0, t0 );\
|
||||
i1 = _mm256_shuffle_epi8( i1, t0 );\
|
||||
i2 = _mm256_shuffle_epi8( i2, t0 );\
|
||||
i3 = _mm256_shuffle_epi8( i3, t0 );\
|
||||
\
|
||||
o1 = i0;\
|
||||
t0 = i2;\
|
||||
\
|
||||
i0 = _mm256_unpacklo_epi16( i0, i1 );\
|
||||
o1 = _mm256_unpackhi_epi16( o1, i1 );\
|
||||
i2 = _mm256_unpacklo_epi16( i2, i3 );\
|
||||
t0 = _mm256_unpackhi_epi16( t0, i3 );\
|
||||
\
|
||||
i0 = _mm256_shuffle_epi32( i0, 216 );\
|
||||
o1 = _mm256_shuffle_epi32( o1, 216 );\
|
||||
i2 = _mm256_shuffle_epi32( i2, 216 );\
|
||||
t0 = _mm256_shuffle_epi32( t0, 216 );\
|
||||
\
|
||||
o2 = i0;\
|
||||
o3 = o1;\
|
||||
\
|
||||
i0 = _mm256_unpacklo_epi32( i0, i2 );\
|
||||
o1 = _mm256_unpacklo_epi32( o1, t0 );\
|
||||
o2 = _mm256_unpackhi_epi32( o2, i2 );\
|
||||
o3 = _mm256_unpackhi_epi32( o3, t0 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_B_2way(i0, i1, i2, i3, i4, i5, i6, i7, o1, o2, o3, o4, o5, o6, o7){\
|
||||
o1 = i0;\
|
||||
o2 = i1;\
|
||||
i0 = _mm256_unpacklo_epi64( i0, i4 );\
|
||||
o1 = _mm256_unpackhi_epi64( o1, i4 );\
|
||||
o3 = i1;\
|
||||
o4 = i2;\
|
||||
o2 = _mm256_unpacklo_epi64( o2, i5 );\
|
||||
o3 = _mm256_unpackhi_epi64( o3, i5 );\
|
||||
o5 = i2;\
|
||||
o6 = i3;\
|
||||
o4 = _mm256_unpacklo_epi64( o4, i6 );\
|
||||
o5 = _mm256_unpackhi_epi64( o5, i6 );\
|
||||
o7 = i3;\
|
||||
o6 = _mm256_unpacklo_epi64( o6, i7 );\
|
||||
o7 = _mm256_unpackhi_epi64( o7, i7 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_B_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, o3){\
|
||||
o0 = i0;\
|
||||
i0 = _mm256_unpacklo_epi64( i0, i1 );\
|
||||
o0 = _mm256_unpackhi_epi64( o0, i1 );\
|
||||
o1 = i2;\
|
||||
i2 = _mm256_unpacklo_epi64( i2, i3 );\
|
||||
o1 = _mm256_unpackhi_epi64( o1, i3 );\
|
||||
o2 = i4;\
|
||||
i4 = _mm256_unpacklo_epi64( i4, i5 );\
|
||||
o2 = _mm256_unpackhi_epi64( o2, i5 );\
|
||||
o3 = i6;\
|
||||
i6 = _mm256_unpacklo_epi64( i6, i7 );\
|
||||
o3 = _mm256_unpackhi_epi64( o3, i7 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_O_B_2way(i0, i1, i2, i3, i4, i5, i6, i7, t0){\
|
||||
t0 = _mm256_xor_si256( t0, t0 );\
|
||||
i1 = i0;\
|
||||
i3 = i2;\
|
||||
i5 = i4;\
|
||||
i7 = i6;\
|
||||
i0 = _mm256_unpacklo_epi64( i0, t0 );\
|
||||
i1 = _mm256_unpackhi_epi64( i1, t0 );\
|
||||
i2 = _mm256_unpacklo_epi64( i2, t0 );\
|
||||
i3 = _mm256_unpackhi_epi64( i3, t0 );\
|
||||
i4 = _mm256_unpacklo_epi64( i4, t0 );\
|
||||
i5 = _mm256_unpackhi_epi64( i5, t0 );\
|
||||
i6 = _mm256_unpacklo_epi64( i6, t0 );\
|
||||
i7 = _mm256_unpackhi_epi64( i7, t0 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_O_B_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7){\
|
||||
i0 = _mm256_unpacklo_epi64( i0, i1 );\
|
||||
i2 = _mm256_unpacklo_epi64( i2, i3 );\
|
||||
i4 = _mm256_unpacklo_epi64( i4, i5 );\
|
||||
i6 = _mm256_unpacklo_epi64( i6, i7 );\
|
||||
}/**/
|
||||
|
||||
void TF512_2way( __m256i* chaining, __m256i* message )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load message into registers xmm12 - xmm15 */
|
||||
xmm12 = message[0];
|
||||
xmm13 = message[1];
|
||||
xmm14 = message[2];
|
||||
xmm15 = message[3];
|
||||
|
||||
/* transform message M from column ordering into row ordering */
|
||||
/* we first put two rows (64 bit) of the message into one 128-bit xmm register */
|
||||
Matrix_Transpose_A_2way(xmm12, xmm13, xmm14, xmm15, xmm2, xmm6, xmm7, xmm0);
|
||||
|
||||
/* load previous chaining value */
|
||||
/* we first put two rows (64 bit) of the CV into one 128-bit xmm register */
|
||||
xmm8 = chaining[0];
|
||||
xmm0 = chaining[1];
|
||||
xmm4 = chaining[2];
|
||||
xmm5 = chaining[3];
|
||||
|
||||
/* xor message to CV get input of P */
|
||||
/* result: CV+M in xmm8, xmm0, xmm4, xmm5 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, xmm12 );
|
||||
xmm0 = _mm256_xor_si256( xmm0, xmm2 );
|
||||
xmm4 = _mm256_xor_si256( xmm4, xmm6 );
|
||||
xmm5 = _mm256_xor_si256( xmm5, xmm7 );
|
||||
|
||||
/* there are now 2 rows of the Groestl state (P and Q) in each xmm register */
|
||||
/* unpack to get 1 row of P (64 bit) and Q (64 bit) into one xmm register */
|
||||
/* result: the 8 rows of P and Q in xmm8 - xmm12 */
|
||||
Matrix_Transpose_B_2way(xmm8, xmm0, xmm4, xmm5, xmm12, xmm2, xmm6, xmm7, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);
|
||||
|
||||
/* compute the two permutations P and Q in parallel */
|
||||
ROUNDS_P_Q_2WAY();
|
||||
|
||||
/* unpack again to get two rows of P or two rows of Q in one xmm register */
|
||||
Matrix_Transpose_B_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3);
|
||||
|
||||
/* xor output of P and Q */
|
||||
/* result: P(CV+M)+Q(M) in xmm0...xmm3 */
|
||||
xmm0 = _mm256_xor_si256( xmm0, xmm8 );
|
||||
xmm1 = _mm256_xor_si256( xmm1, xmm10 );
|
||||
xmm2 = _mm256_xor_si256( xmm2, xmm12 );
|
||||
xmm3 = _mm256_xor_si256( xmm3, xmm14 );
|
||||
|
||||
/* xor CV (feed-forward) */
|
||||
/* result: P(CV+M)+Q(M)+CV in xmm0...xmm3 */
|
||||
xmm0 = _mm256_xor_si256( xmm0, (chaining[0]) );
|
||||
xmm1 = _mm256_xor_si256( xmm1, (chaining[1]) );
|
||||
xmm2 = _mm256_xor_si256( xmm2, (chaining[2]) );
|
||||
xmm3 = _mm256_xor_si256( xmm3, (chaining[3]) );
|
||||
|
||||
/* store CV */
|
||||
chaining[0] = xmm0;
|
||||
chaining[1] = xmm1;
|
||||
chaining[2] = xmm2;
|
||||
chaining[3] = xmm3;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void OF512_2way( __m256i* chaining )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load CV into registers xmm8, xmm10, xmm12, xmm14 */
|
||||
xmm8 = chaining[0];
|
||||
xmm10 = chaining[1];
|
||||
xmm12 = chaining[2];
|
||||
xmm14 = chaining[3];
|
||||
|
||||
/* there are now 2 rows of the CV in one xmm register */
|
||||
/* unpack to get 1 row of P (64 bit) into one half of an xmm register */
|
||||
/* result: the 8 input rows of P in xmm8 - xmm15 */
|
||||
Matrix_Transpose_O_B_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0);
|
||||
|
||||
/* compute the permutation P */
|
||||
/* result: the output of P(CV) in xmm8 - xmm15 */
|
||||
ROUNDS_P_Q_2WAY();
|
||||
|
||||
/* unpack again to get two rows of P in one xmm register */
|
||||
/* result: P(CV) in xmm8, xmm10, xmm12, xmm14 */
|
||||
Matrix_Transpose_O_B_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV)+CV in xmm8, xmm10, xmm12, xmm14 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[1]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[2]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[3]) );
|
||||
|
||||
/* transform state back from row ordering into column ordering */
|
||||
/* result: final hash value in xmm9, xmm11 */
|
||||
Matrix_Transpose_A_2way(xmm8, xmm10, xmm12, xmm14, xmm4, xmm9, xmm11, xmm0);
|
||||
|
||||
/* we only need to return the truncated half of the state */
|
||||
chaining[2] = xmm9;
|
||||
chaining[3] = xmm11;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL512_INTR_4WAY_H__
|
||||
#endif // GROESTL256_INTR_4WAY_H__
|
||||
|
@@ -15,7 +15,9 @@
|
||||
#include "miner.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
int groestl512_4way_init( groestl512_4way_context* ctx, uint64_t hashlen )
|
||||
{
|
||||
@@ -137,5 +139,130 @@ int groestl512_4way_full( groestl512_4way_context* ctx, void* output,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
int groestl512_2way_init( groestl512_2way_context* ctx, uint64_t hashlen )
|
||||
{
|
||||
if (ctx->chaining == NULL || ctx->buffer == NULL)
|
||||
return 1;
|
||||
|
||||
memset_zero_256( ctx->chaining, SIZE512 );
|
||||
memset_zero_256( ctx->buffer, SIZE512 );
|
||||
|
||||
// The only non-zero in the IV is len. It can be hard coded.
|
||||
ctx->chaining[ 6 ] = m256_const2_64( 0x0200000000000000, 0 );
|
||||
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl512_2way_update_close( groestl512_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t databitlen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = 64 / 16; // bytes to __m128i
|
||||
const int hash_offset = SIZE512 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE512;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
// --- update ---
|
||||
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF1024_2way( ctx->chaining, &in[ i * SIZE512 ] );
|
||||
ctx->buf_ptr = blocks * SIZE512;
|
||||
|
||||
for ( i = 0; i < len % SIZE512; i++ )
|
||||
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += rem;
|
||||
|
||||
//--- final ---
|
||||
|
||||
blocks++; // adjust for final block
|
||||
|
||||
if ( i == SIZE512 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
|
||||
}
|
||||
|
||||
TF1024_2way( ctx->chaining, ctx->buffer );
|
||||
OF1024_2way( ctx->chaining );
|
||||
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl512_2way_full( groestl512_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t datalen )
|
||||
{
|
||||
const int len = (int)datalen >> 4;
|
||||
const int hashlen_m128i = 64 >> 4; // bytes to __m128i
|
||||
const int hash_offset = SIZE512 - hashlen_m128i;
|
||||
uint64_t blocks = len / SIZE512;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
// --- init ---
|
||||
|
||||
memset_zero_256( ctx->chaining, SIZE512 );
|
||||
memset_zero_256( ctx->buffer, SIZE512 );
|
||||
ctx->chaining[ 6 ] = m256_const2_64( 0x0200000000000000, 0 );
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
// --- update ---
|
||||
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF1024_2way( ctx->chaining, &in[ i * SIZE512 ] );
|
||||
ctx->buf_ptr = blocks * SIZE512;
|
||||
|
||||
for ( i = 0; i < len % SIZE512; i++ )
|
||||
ctx->buffer[ ctx->rem_ptr + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += ctx->rem_ptr;
|
||||
|
||||
// --- close ---
|
||||
|
||||
blocks++;
|
||||
|
||||
if ( i == SIZE512 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
TF1024_2way( ctx->chaining, ctx->buffer );
|
||||
OF1024_2way( ctx->chaining );
|
||||
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
|
||||
|
@@ -10,7 +10,7 @@
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#define LENGTH (512)
|
||||
|
||||
@@ -36,20 +36,19 @@
|
||||
|
||||
#define SIZE512 (SIZE_1024/16)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m512i chaining[SIZE512];
|
||||
__attribute__ ((aligned (64))) __m512i buffer[SIZE512];
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
int databitlen; // bits
|
||||
} groestl512_4way_context;
|
||||
|
||||
|
||||
int groestl512_4way_init( groestl512_4way_context*, uint64_t );
|
||||
|
||||
//int reinit_groestl( hashState_groestl* );
|
||||
|
||||
int groestl512_4way_update( groestl512_4way_context*, const void*,
|
||||
uint64_t );
|
||||
int groestl512_4way_close( groestl512_4way_context*, void* );
|
||||
@@ -58,5 +57,29 @@ int groestl512_4way_update_close( groestl512_4way_context*, void*,
|
||||
int groestl512_4way_full( groestl512_4way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m256i chaining[SIZE512];
|
||||
__attribute__ ((aligned (64))) __m256i buffer[SIZE512];
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
} groestl512_2way_context;
|
||||
|
||||
|
||||
int groestl512_2way_init( groestl512_2way_context*, uint64_t );
|
||||
|
||||
int groestl512_2way_update( groestl512_2way_context*, const void*,
|
||||
uint64_t );
|
||||
int groestl512_2way_close( groestl512_2way_context*, void* );
|
||||
int groestl512_2way_update_close( groestl512_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
int groestl512_2way_full( groestl512_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL512_HASH_4WAY_H__
|
||||
|
@@ -12,7 +12,7 @@
|
||||
|
||||
#include "groestl512-hash-4way.h"
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
static const __m128i round_const_p[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
@@ -50,6 +50,8 @@ static const __m128i round_const_q[] __attribute__ ((aligned (64))) =
|
||||
{ 0x8292a2b2c2d2e2f2, 0x0212223242526272 }
|
||||
};
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
static const __m512i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12,
|
||||
0x2d2529212c242820, 0x2f272b232e262a22,
|
||||
@@ -660,5 +662,578 @@ void OF1024_4way( __m512i* chaining )
|
||||
return;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
static const __m256i TRANSP_MASK_2WAY =
|
||||
{ 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12 };
|
||||
|
||||
static const __m256i SUBSH_MASK0_2WAY =
|
||||
{ 0x0b0e0104070a0d00, 0x0306090c0f020508,
|
||||
0x1b1e1114171a1d10, 0x1316191c1f121518 };
|
||||
|
||||
static const __m256i SUBSH_MASK1_2WAY =
|
||||
{ 0x0c0f0205080b0e01, 0x04070a0d00030609,
|
||||
0x1c1f1215181b1e11, 0x14171a1d10131619 };
|
||||
|
||||
static const __m256i SUBSH_MASK2_2WAY =
|
||||
{ 0x0d000306090c0f02, 0x05080b0e0104070a,
|
||||
0x1d101316191c1f12, 0x15181b1e1114171a };
|
||||
|
||||
static const __m256i SUBSH_MASK3_2WAY =
|
||||
{ 0x0e0104070a0d0003, 0x06090c0f0205080b,
|
||||
0x1e1114171a1d1013, 0x16191c1f1215181b };
|
||||
|
||||
static const __m256i SUBSH_MASK4_2WAY =
|
||||
{ 0x0f0205080b0e0104, 0x070a0d000306090c,
|
||||
0x1f1215181b1e1114, 0x171a1d101316191c };
|
||||
|
||||
static const __m256i SUBSH_MASK5_2WAY =
|
||||
{ 0x000306090c0f0205, 0x080b0e0104070a0d,
|
||||
0x101316191c1f1215, 0x181b1e1114171a1d };
|
||||
|
||||
static const __m256i SUBSH_MASK6_2WAY =
|
||||
{ 0x0104070a0d000306, 0x090c0f0205080b0e,
|
||||
0x1114171a1d101316, 0x191c1f1215181b1e };
|
||||
|
||||
static const __m256i SUBSH_MASK7_2WAY =
|
||||
{ 0x06090c0f0205080b, 0x0e0104070a0d0003,
|
||||
0x16191c1f1215181b, 0x1e1114171a1d1013 };
|
||||
|
||||
#define tos(a) #a
|
||||
#define tostr(a) tos(a)
|
||||
|
||||
/* xmm[i] will be multiplied by 2
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2_2WAY(i, j, k){\
|
||||
j = _mm256_xor_si256(j, j);\
|
||||
j = _mm256_cmpgt_epi8(j, i );\
|
||||
i = _mm256_add_epi8(i, i);\
|
||||
j = _mm256_and_si256(j, k);\
|
||||
i = _mm256_xor_si256(i, j);\
|
||||
}
|
||||
|
||||
#define MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm256_xor_si256(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm256_xor_si256(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm256_xor_si256(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm256_xor_si256(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm256_xor_si256(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm256_xor_si256(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm256_xor_si256(a6, a7);\
|
||||
a7 = _mm256_xor_si256(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm256_xor_si256(b0, a4);\
|
||||
b6 = _mm256_xor_si256(b6, a4);\
|
||||
b1 = _mm256_xor_si256(b1, a5);\
|
||||
b7 = _mm256_xor_si256(b7, a5);\
|
||||
b2 = _mm256_xor_si256(b2, a6);\
|
||||
b0 = _mm256_xor_si256(b0, a6);\
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm256_xor_si256(b3, a7);\
|
||||
b1 = _mm256_xor_si256(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm256_xor_si256(b4, a0);\
|
||||
b2 = _mm256_xor_si256(b2, a0);\
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm256_xor_si256(b5, a1);\
|
||||
b3 = _mm256_xor_si256(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm256_xor_si256(b6, a2);\
|
||||
b4 = _mm256_xor_si256(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm256_xor_si256(b7, a3);\
|
||||
b5 = _mm256_xor_si256(b5, a3);\
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm256_xor_si256(a0, a3);\
|
||||
a1 = _mm256_xor_si256(a1, a4);\
|
||||
a2 = _mm256_xor_si256(a2, a5);\
|
||||
a3 = _mm256_xor_si256(a3, a6);\
|
||||
a4 = _mm256_xor_si256(a4, a7);\
|
||||
a5 = _mm256_xor_si256(a5, b0);\
|
||||
a6 = _mm256_xor_si256(a6, b1);\
|
||||
a7 = _mm256_xor_si256(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m256_const1_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
a0 = _mm256_xor_si256(a0, TEMP0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
a1 = _mm256_xor_si256(a1, TEMP1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
a2 = _mm256_xor_si256(a2, b2);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
a3 = _mm256_xor_si256(a3, b3);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
a4 = _mm256_xor_si256(a4, b4);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
a5 = _mm256_xor_si256(a5, b5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
a6 = _mm256_xor_si256(a6, b6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
a7 = _mm256_xor_si256(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
b5 = _mm256_xor_si256(b5, a0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
b6 = _mm256_xor_si256(b6, a1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
b7 = _mm256_xor_si256(b7, a2);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
b2 = _mm256_xor_si256(b2, a5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
b3 = _mm256_xor_si256(b3, a6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
b4 = _mm256_xor_si256(b4, a7);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm256_xor_si256(b0, a3);\
|
||||
b1 = _mm256_xor_si256(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
/* one round
|
||||
* a0-a7 = input rows
|
||||
* b0-b7 = output rows
|
||||
*/
|
||||
#define SUBMIX_2WAY(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* SubBytes */\
|
||||
b0 = _mm256_xor_si256( b0, b0 );\
|
||||
a0 = _mm256_aesenclast_epi128( a0, b0 );\
|
||||
a1 = _mm256_aesenclast_epi128( a1, b0 );\
|
||||
a2 = _mm256_aesenclast_epi128( a2, b0 );\
|
||||
a3 = _mm256_aesenclast_epi128( a3, b0 );\
|
||||
a4 = _mm256_aesenclast_epi128( a4, b0 );\
|
||||
a5 = _mm256_aesenclast_epi128( a5, b0 );\
|
||||
a6 = _mm256_aesenclast_epi128( a6, b0 );\
|
||||
a7 = _mm256_aesenclast_epi128( a7, b0 );\
|
||||
/* MixBytes */\
|
||||
MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
|
||||
}
|
||||
|
||||
#define ROUNDS_P_2WAY(){\
|
||||
uint8_t round_counter = 0;\
|
||||
for ( round_counter = 0; round_counter < 14; round_counter += 2 ) \
|
||||
{ \
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm8 = _mm256_xor_si256( xmm8, m256_const1_128( \
|
||||
casti_m128i( round_const_p, round_counter ) ) ); \
|
||||
/* ShiftBytes P1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm256_shuffle_epi8( xmm8, SUBSH_MASK0_2WAY ); \
|
||||
xmm9 = _mm256_shuffle_epi8( xmm9, SUBSH_MASK1_2WAY );\
|
||||
xmm10 = _mm256_shuffle_epi8( xmm10, SUBSH_MASK2_2WAY );\
|
||||
xmm11 = _mm256_shuffle_epi8( xmm11, SUBSH_MASK3_2WAY );\
|
||||
xmm12 = _mm256_shuffle_epi8( xmm12, SUBSH_MASK4_2WAY );\
|
||||
xmm13 = _mm256_shuffle_epi8( xmm13, SUBSH_MASK5_2WAY );\
|
||||
xmm14 = _mm256_shuffle_epi8( xmm14, SUBSH_MASK6_2WAY );\
|
||||
xmm15 = _mm256_shuffle_epi8( xmm15, SUBSH_MASK7_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
\
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm0 = _mm256_xor_si256( xmm0, m256_const1_128( \
|
||||
casti_m128i( round_const_p, round_counter+1 ) ) ); \
|
||||
/* ShiftBytes P1024 + pre-AESENCLAST */\
|
||||
xmm0 = _mm256_shuffle_epi8( xmm0, SUBSH_MASK0_2WAY );\
|
||||
xmm1 = _mm256_shuffle_epi8( xmm1, SUBSH_MASK1_2WAY );\
|
||||
xmm2 = _mm256_shuffle_epi8( xmm2, SUBSH_MASK2_2WAY );\
|
||||
xmm3 = _mm256_shuffle_epi8( xmm3, SUBSH_MASK3_2WAY );\
|
||||
xmm4 = _mm256_shuffle_epi8( xmm4, SUBSH_MASK4_2WAY );\
|
||||
xmm5 = _mm256_shuffle_epi8( xmm5, SUBSH_MASK5_2WAY );\
|
||||
xmm6 = _mm256_shuffle_epi8( xmm6, SUBSH_MASK6_2WAY );\
|
||||
xmm7 = _mm256_shuffle_epi8( xmm7, SUBSH_MASK7_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
}\
|
||||
}
|
||||
|
||||
#define ROUNDS_Q_2WAY(){\
|
||||
uint8_t round_counter = 0;\
|
||||
for ( round_counter = 0; round_counter < 14; round_counter += 2) \
|
||||
{ \
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm1 = m256_neg1;\
|
||||
xmm8 = _mm256_xor_si256( xmm8, xmm1 );\
|
||||
xmm9 = _mm256_xor_si256( xmm9, xmm1 );\
|
||||
xmm10 = _mm256_xor_si256( xmm10, xmm1 );\
|
||||
xmm11 = _mm256_xor_si256( xmm11, xmm1 );\
|
||||
xmm12 = _mm256_xor_si256( xmm12, xmm1 );\
|
||||
xmm13 = _mm256_xor_si256( xmm13, xmm1 );\
|
||||
xmm14 = _mm256_xor_si256( xmm14, xmm1 );\
|
||||
xmm15 = _mm256_xor_si256( xmm15, m256_const1_128( \
|
||||
casti_m128i( round_const_q, round_counter ) ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm256_shuffle_epi8( xmm8, SUBSH_MASK1_2WAY );\
|
||||
xmm9 = _mm256_shuffle_epi8( xmm9, SUBSH_MASK3_2WAY );\
|
||||
xmm10 = _mm256_shuffle_epi8( xmm10, SUBSH_MASK5_2WAY );\
|
||||
xmm11 = _mm256_shuffle_epi8( xmm11, SUBSH_MASK7_2WAY );\
|
||||
xmm12 = _mm256_shuffle_epi8( xmm12, SUBSH_MASK0_2WAY );\
|
||||
xmm13 = _mm256_shuffle_epi8( xmm13, SUBSH_MASK2_2WAY );\
|
||||
xmm14 = _mm256_shuffle_epi8( xmm14, SUBSH_MASK4_2WAY );\
|
||||
xmm15 = _mm256_shuffle_epi8( xmm15, SUBSH_MASK6_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
\
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm9 = m256_neg1;\
|
||||
xmm0 = _mm256_xor_si256( xmm0, xmm9 );\
|
||||
xmm1 = _mm256_xor_si256( xmm1, xmm9 );\
|
||||
xmm2 = _mm256_xor_si256( xmm2, xmm9 );\
|
||||
xmm3 = _mm256_xor_si256( xmm3, xmm9 );\
|
||||
xmm4 = _mm256_xor_si256( xmm4, xmm9 );\
|
||||
xmm5 = _mm256_xor_si256( xmm5, xmm9 );\
|
||||
xmm6 = _mm256_xor_si256( xmm6, xmm9 );\
|
||||
xmm7 = _mm256_xor_si256( xmm7, m256_const1_128( \
|
||||
casti_m128i( round_const_q, round_counter+1 ) ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm0 = _mm256_shuffle_epi8( xmm0, SUBSH_MASK1_2WAY );\
|
||||
xmm1 = _mm256_shuffle_epi8( xmm1, SUBSH_MASK3_2WAY );\
|
||||
xmm2 = _mm256_shuffle_epi8( xmm2, SUBSH_MASK5_2WAY );\
|
||||
xmm3 = _mm256_shuffle_epi8( xmm3, SUBSH_MASK7_2WAY );\
|
||||
xmm4 = _mm256_shuffle_epi8( xmm4, SUBSH_MASK0_2WAY );\
|
||||
xmm5 = _mm256_shuffle_epi8( xmm5, SUBSH_MASK2_2WAY );\
|
||||
xmm6 = _mm256_shuffle_epi8( xmm6, SUBSH_MASK4_2WAY );\
|
||||
xmm7 = _mm256_shuffle_epi8( xmm7, SUBSH_MASK6_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
}\
|
||||
}
|
||||
|
||||
#define Matrix_Transpose_2way(i0, i1, i2, i3, i4, i5, i6, i7, t0, t1, t2, t3, t4, t5, t6, t7){\
|
||||
t0 = TRANSP_MASK_2WAY;\
|
||||
\
|
||||
i6 = _mm256_shuffle_epi8(i6, t0);\
|
||||
i0 = _mm256_shuffle_epi8(i0, t0);\
|
||||
i1 = _mm256_shuffle_epi8(i1, t0);\
|
||||
i2 = _mm256_shuffle_epi8(i2, t0);\
|
||||
i3 = _mm256_shuffle_epi8(i3, t0);\
|
||||
t1 = i2;\
|
||||
i4 = _mm256_shuffle_epi8(i4, t0);\
|
||||
i5 = _mm256_shuffle_epi8(i5, t0);\
|
||||
t2 = i4;\
|
||||
t3 = i6;\
|
||||
i7 = _mm256_shuffle_epi8(i7, t0);\
|
||||
\
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t0 = i0;\
|
||||
t2 = _mm256_unpackhi_epi16(t2, i5);\
|
||||
i4 = _mm256_unpacklo_epi16(i4, i5);\
|
||||
t3 = _mm256_unpackhi_epi16(t3, i7);\
|
||||
i6 = _mm256_unpacklo_epi16(i6, i7);\
|
||||
t0 = _mm256_unpackhi_epi16(t0, i1);\
|
||||
t1 = _mm256_unpackhi_epi16(t1, i3);\
|
||||
i2 = _mm256_unpacklo_epi16(i2, i3);\
|
||||
i0 = _mm256_unpacklo_epi16(i0, i1);\
|
||||
\
|
||||
/* shuffle with immediate */\
|
||||
t0 = _mm256_shuffle_epi32(t0, 216);\
|
||||
t1 = _mm256_shuffle_epi32(t1, 216);\
|
||||
t2 = _mm256_shuffle_epi32(t2, 216);\
|
||||
t3 = _mm256_shuffle_epi32(t3, 216);\
|
||||
i0 = _mm256_shuffle_epi32(i0, 216);\
|
||||
i2 = _mm256_shuffle_epi32(i2, 216);\
|
||||
i4 = _mm256_shuffle_epi32(i4, 216);\
|
||||
i6 = _mm256_shuffle_epi32(i6, 216);\
|
||||
\
|
||||
/* continue with unpack */\
|
||||
t4 = i0;\
|
||||
i0 = _mm256_unpacklo_epi32(i0, i2);\
|
||||
t4 = _mm256_unpackhi_epi32(t4, i2);\
|
||||
t5 = t0;\
|
||||
t0 = _mm256_unpacklo_epi32(t0, t1);\
|
||||
t5 = _mm256_unpackhi_epi32(t5, t1);\
|
||||
t6 = i4;\
|
||||
i4 = _mm256_unpacklo_epi32(i4, i6);\
|
||||
t7 = t2;\
|
||||
t6 = _mm256_unpackhi_epi32(t6, i6);\
|
||||
i2 = t0;\
|
||||
t2 = _mm256_unpacklo_epi32(t2, t3);\
|
||||
i3 = t0;\
|
||||
t7 = _mm256_unpackhi_epi32(t7, t3);\
|
||||
\
|
||||
/* there are now 2 rows in each xmm */\
|
||||
/* unpack to get 1 row of CV in each xmm */\
|
||||
i1 = i0;\
|
||||
i1 = _mm256_unpackhi_epi64(i1, i4);\
|
||||
i0 = _mm256_unpacklo_epi64(i0, i4);\
|
||||
i4 = t4;\
|
||||
i3 = _mm256_unpackhi_epi64(i3, t2);\
|
||||
i5 = t4;\
|
||||
i2 = _mm256_unpacklo_epi64(i2, t2);\
|
||||
i6 = t5;\
|
||||
i5 = _mm256_unpackhi_epi64(i5, t6);\
|
||||
i7 = t5;\
|
||||
i4 = _mm256_unpacklo_epi64(i4, t6);\
|
||||
i7 = _mm256_unpackhi_epi64(i7, t7);\
|
||||
i6 = _mm256_unpacklo_epi64(i6, t7);\
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, t0, t1, t2, t3, t4){\
|
||||
/* transpose matrix to get output format */\
|
||||
o1 = i0;\
|
||||
i0 = _mm256_unpacklo_epi64(i0, i1);\
|
||||
o1 = _mm256_unpackhi_epi64(o1, i1);\
|
||||
t0 = i2;\
|
||||
i2 = _mm256_unpacklo_epi64(i2, i3);\
|
||||
t0 = _mm256_unpackhi_epi64(t0, i3);\
|
||||
t1 = i4;\
|
||||
i4 = _mm256_unpacklo_epi64(i4, i5);\
|
||||
t1 = _mm256_unpackhi_epi64(t1, i5);\
|
||||
t2 = i6;\
|
||||
o0 = TRANSP_MASK_2WAY;\
|
||||
i6 = _mm256_unpacklo_epi64(i6, i7);\
|
||||
t2 = _mm256_unpackhi_epi64(t2, i7);\
|
||||
/* load transpose mask into a register, because it will be used 8 times */\
|
||||
i0 = _mm256_shuffle_epi8(i0, o0);\
|
||||
i2 = _mm256_shuffle_epi8(i2, o0);\
|
||||
i4 = _mm256_shuffle_epi8(i4, o0);\
|
||||
i6 = _mm256_shuffle_epi8(i6, o0);\
|
||||
o1 = _mm256_shuffle_epi8(o1, o0);\
|
||||
t0 = _mm256_shuffle_epi8(t0, o0);\
|
||||
t1 = _mm256_shuffle_epi8(t1, o0);\
|
||||
t2 = _mm256_shuffle_epi8(t2, o0);\
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t3 = i4;\
|
||||
o2 = o1;\
|
||||
o0 = i0;\
|
||||
t4 = t1;\
|
||||
\
|
||||
t3 = _mm256_unpackhi_epi16(t3, i6);\
|
||||
i4 = _mm256_unpacklo_epi16(i4, i6);\
|
||||
o0 = _mm256_unpackhi_epi16(o0, i2);\
|
||||
i0 = _mm256_unpacklo_epi16(i0, i2);\
|
||||
o2 = _mm256_unpackhi_epi16(o2, t0);\
|
||||
o1 = _mm256_unpacklo_epi16(o1, t0);\
|
||||
t4 = _mm256_unpackhi_epi16(t4, t2);\
|
||||
t1 = _mm256_unpacklo_epi16(t1, t2);\
|
||||
/* shuffle with immediate */\
|
||||
i4 = _mm256_shuffle_epi32(i4, 216);\
|
||||
t3 = _mm256_shuffle_epi32(t3, 216);\
|
||||
o1 = _mm256_shuffle_epi32(o1, 216);\
|
||||
o2 = _mm256_shuffle_epi32(o2, 216);\
|
||||
i0 = _mm256_shuffle_epi32(i0, 216);\
|
||||
o0 = _mm256_shuffle_epi32(o0, 216);\
|
||||
t1 = _mm256_shuffle_epi32(t1, 216);\
|
||||
t4 = _mm256_shuffle_epi32(t4, 216);\
|
||||
/* continue with unpack */\
|
||||
i1 = i0;\
|
||||
i3 = o0;\
|
||||
i5 = o1;\
|
||||
i7 = o2;\
|
||||
i0 = _mm256_unpacklo_epi32(i0, i4);\
|
||||
i1 = _mm256_unpackhi_epi32(i1, i4);\
|
||||
o0 = _mm256_unpacklo_epi32(o0, t3);\
|
||||
i3 = _mm256_unpackhi_epi32(i3, t3);\
|
||||
o1 = _mm256_unpacklo_epi32(o1, t1);\
|
||||
i5 = _mm256_unpackhi_epi32(i5, t1);\
|
||||
o2 = _mm256_unpacklo_epi32(o2, t4);\
|
||||
i7 = _mm256_unpackhi_epi32(i7, t4);\
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
void INIT_2way( __m256i *chaining )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
|
||||
/* load IV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
xmm9 = chaining[1];
|
||||
xmm10 = chaining[2];
|
||||
xmm11 = chaining[3];
|
||||
xmm12 = chaining[4];
|
||||
xmm13 = chaining[5];
|
||||
xmm14 = chaining[6];
|
||||
xmm15 = chaining[7];
|
||||
|
||||
/* transform chaining value from column ordering into row ordering */
|
||||
Matrix_Transpose_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
|
||||
|
||||
/* store transposed IV */
|
||||
chaining[0] = xmm8;
|
||||
chaining[1] = xmm9;
|
||||
chaining[2] = xmm10;
|
||||
chaining[3] = xmm11;
|
||||
chaining[4] = xmm12;
|
||||
chaining[5] = xmm13;
|
||||
chaining[6] = xmm14;
|
||||
chaining[7] = xmm15;
|
||||
}
|
||||
|
||||
void TF1024_2way( __m256i *chaining, const __m256i *message )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i QTEMP[8];
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load message into registers xmm8 - xmm15 (Q = message) */
|
||||
xmm8 = message[0];
|
||||
xmm9 = message[1];
|
||||
xmm10 = message[2];
|
||||
xmm11 = message[3];
|
||||
xmm12 = message[4];
|
||||
xmm13 = message[5];
|
||||
xmm14 = message[6];
|
||||
xmm15 = message[7];
|
||||
|
||||
/* transform message M from column ordering into row ordering */
|
||||
Matrix_Transpose_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
|
||||
|
||||
/* store message M (Q input) for later */
|
||||
QTEMP[0] = xmm8;
|
||||
QTEMP[1] = xmm9;
|
||||
QTEMP[2] = xmm10;
|
||||
QTEMP[3] = xmm11;
|
||||
QTEMP[4] = xmm12;
|
||||
QTEMP[5] = xmm13;
|
||||
QTEMP[6] = xmm14;
|
||||
QTEMP[7] = xmm15;
|
||||
|
||||
/* xor CV to message to get P input */
|
||||
/* result: CV+M in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* compute permutation P */
|
||||
/* result: P(CV+M) in xmm8...xmm15 */
|
||||
ROUNDS_P_2WAY();
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV+M)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* store P(CV+M)+CV */
|
||||
chaining[0] = xmm8;
|
||||
chaining[1] = xmm9;
|
||||
chaining[2] = xmm10;
|
||||
chaining[3] = xmm11;
|
||||
chaining[4] = xmm12;
|
||||
chaining[5] = xmm13;
|
||||
chaining[6] = xmm14;
|
||||
chaining[7] = xmm15;
|
||||
|
||||
/* load message M (Q input) into xmm8-15 */
|
||||
xmm8 = QTEMP[0];
|
||||
xmm9 = QTEMP[1];
|
||||
xmm10 = QTEMP[2];
|
||||
xmm11 = QTEMP[3];
|
||||
xmm12 = QTEMP[4];
|
||||
xmm13 = QTEMP[5];
|
||||
xmm14 = QTEMP[6];
|
||||
xmm15 = QTEMP[7];
|
||||
|
||||
/* compute permutation Q */
|
||||
/* result: Q(M) in xmm8...xmm15 */
|
||||
ROUNDS_Q_2WAY();
|
||||
|
||||
/* xor Q output */
|
||||
/* result: P(CV+M)+CV+Q(M) in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* store CV */
|
||||
chaining[0] = xmm8;
|
||||
chaining[1] = xmm9;
|
||||
chaining[2] = xmm10;
|
||||
chaining[3] = xmm11;
|
||||
chaining[4] = xmm12;
|
||||
chaining[5] = xmm13;
|
||||
chaining[6] = xmm14;
|
||||
chaining[7] = xmm15;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void OF1024_2way( __m256i* chaining )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load CV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
xmm9 = chaining[1];
|
||||
xmm10 = chaining[2];
|
||||
xmm11 = chaining[3];
|
||||
xmm12 = chaining[4];
|
||||
xmm13 = chaining[5];
|
||||
xmm14 = chaining[6];
|
||||
xmm15 = chaining[7];
|
||||
|
||||
/* compute permutation P */
|
||||
/* result: P(CV) in xmm8...xmm15 */
|
||||
ROUNDS_P_2WAY();
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* transpose CV back from row ordering to column ordering */
|
||||
/* result: final hash value in xmm0, xmm6, xmm13, xmm15 */
|
||||
Matrix_Transpose_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm4, xmm0, xmm6, xmm1, xmm2, xmm3, xmm5, xmm7);
|
||||
|
||||
/* we only need to return the truncated half of the state */
|
||||
chaining[4] = xmm0;
|
||||
chaining[5] = xmm6;
|
||||
chaining[6] = xmm13;
|
||||
chaining[7] = xmm15;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL512_INTR_4WAY_H__
|
||||
|
Reference in New Issue
Block a user