mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v23.5
This commit is contained in:
@@ -10,11 +10,9 @@
|
||||
* This code is placed in the public domain
|
||||
*/
|
||||
|
||||
#include <smmintrin.h>
|
||||
#include <wmmintrin.h>
|
||||
#include "hash-groestl.h"
|
||||
|
||||
static const __m128i round_const_p[] __attribute__ ((aligned (64))) =
|
||||
static const v128u64_t round_const_p[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
{ 0x7060504030201000, 0xf0e0d0c0b0a09080 },
|
||||
{ 0x7161514131211101, 0xf1e1d1c1b1a19181 },
|
||||
@@ -32,7 +30,7 @@ static const __m128i round_const_p[] __attribute__ ((aligned (64))) =
|
||||
{ 0x7d6d5d4d3d2d1d0d, 0xfdedddcdbdad9d8d }
|
||||
};
|
||||
|
||||
static const __m128i round_const_q[] __attribute__ ((aligned (64))) =
|
||||
static const v128u64_t round_const_q[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
{ 0x8f9fafbfcfdfefff, 0x0f1f2f3f4f5f6f7f },
|
||||
{ 0x8e9eaebecedeeefe, 0x0e1e2e3e4e5e6e7e },
|
||||
@@ -50,15 +48,29 @@ static const __m128i round_const_q[] __attribute__ ((aligned (64))) =
|
||||
{ 0x8292a2b2c2d2e2f2, 0x0212223242526272 }
|
||||
};
|
||||
|
||||
static const __m128i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02 };
|
||||
static const __m128i SUBSH_MASK0 = { 0x0b0e0104070a0d00, 0x0306090c0f020508 };
|
||||
static const __m128i SUBSH_MASK1 = { 0x0c0f0205080b0e01, 0x04070a0d00030609 };
|
||||
static const __m128i SUBSH_MASK2 = { 0x0d000306090c0f02, 0x05080b0e0104070a };
|
||||
static const __m128i SUBSH_MASK3 = { 0x0e0104070a0d0003, 0x06090c0f0205080b };
|
||||
static const __m128i SUBSH_MASK4 = { 0x0f0205080b0e0104, 0x070a0d000306090c };
|
||||
static const __m128i SUBSH_MASK5 = { 0x000306090c0f0205, 0x080b0e0104070a0d };
|
||||
static const __m128i SUBSH_MASK6 = { 0x0104070a0d000306, 0x090c0f0205080b0e };
|
||||
static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
static const v128u64_t TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02 };
|
||||
static const v128u64_t SUBSH_MASK0 = { 0x0b0e0104070a0d00, 0x0306090c0f020508 };
|
||||
static const v128u64_t SUBSH_MASK1 = { 0x0c0f0205080b0e01, 0x04070a0d00030609 };
|
||||
static const v128u64_t SUBSH_MASK2 = { 0x0d000306090c0f02, 0x05080b0e0104070a };
|
||||
static const v128u64_t SUBSH_MASK3 = { 0x0e0104070a0d0003, 0x06090c0f0205080b };
|
||||
static const v128u64_t SUBSH_MASK4 = { 0x0f0205080b0e0104, 0x070a0d000306090c };
|
||||
static const v128u64_t SUBSH_MASK5 = { 0x000306090c0f0205, 0x080b0e0104070a0d };
|
||||
static const v128u64_t SUBSH_MASK6 = { 0x0104070a0d000306, 0x090c0f0205080b0e };
|
||||
static const v128u64_t SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
|
||||
// No fast shuffle on NEON
|
||||
static const uint32x4_t vmask_d8 = { 3, 1, 2, 0 };
|
||||
|
||||
#define gr_shuffle32( v ) v128_shufflev32( v, vmask_d8 )
|
||||
|
||||
#else
|
||||
|
||||
#define gr_shuffle32( v ) _mm_shuffle_epi32( v, 0xd8 )
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define tos(a) #a
|
||||
#define tostr(a) tos(a)
|
||||
@@ -67,9 +79,9 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2(i, j, k){\
|
||||
j = _mm_cmpgt_epi8( m128_zero, i);\
|
||||
i = _mm_add_epi8(i, i);\
|
||||
i = mm128_xorand(i, j, k );\
|
||||
j = v128_cmpgt8( v128_zero, i);\
|
||||
i = v128_add8(i, i);\
|
||||
i = v128_xorand(i, j, k );\
|
||||
}
|
||||
|
||||
/**/
|
||||
@@ -98,85 +110,85 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
a0 = v128_xor(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
a1 = v128_xor(a1, a2);\
|
||||
b1 = a3;\
|
||||
TEMP2 = _mm_xor_si128(a2, a3);\
|
||||
TEMP2 = v128_xor(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
a3 = v128_xor(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
a4 = v128_xor(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
a5 = v128_xor(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
a6 = v128_xor(a6, a7);\
|
||||
a7 = v128_xor(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
TEMP0 = mm128_xor3( b0, a4, a6 ); \
|
||||
TEMP0 = v128_xor3( b0, a4, a6 ); \
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP1 = mm128_xor3( b1, a5, a7 );\
|
||||
b2 = mm128_xor3( b2, a6, a0 ); \
|
||||
TEMP1 = v128_xor3( b1, a5, a7 );\
|
||||
b2 = v128_xor3( b2, a6, a0 ); \
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b3 = mm128_xor3( b3, a7, a1 ); \
|
||||
b3 = v128_xor3( b3, a7, a1 ); \
|
||||
b1 = a1;\
|
||||
b6 = mm128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = mm128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = mm128_xor3( b7, a5, a3 ); \
|
||||
b5 = mm128_xor3( b5, a1, a3 ); \
|
||||
b6 = v128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = v128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = v128_xor3( b7, a5, a3 ); \
|
||||
b5 = v128_xor3( b5, a1, a3 ); \
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(TEMP2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
a0 = v128_xor(a0, a3);\
|
||||
a1 = v128_xor(a1, a4);\
|
||||
a2 = v128_xor(TEMP2, a5);\
|
||||
a3 = v128_xor(a3, a6);\
|
||||
a4 = v128_xor(a4, a7);\
|
||||
a5 = v128_xor(a5, b0);\
|
||||
a6 = v128_xor(a6, b1);\
|
||||
a7 = v128_xor(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = _mm_set1_epi64x( 0x1b1b1b1b1b1b1b1b );\
|
||||
b1 = v128_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
a0 = v128_xor(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
a1 = v128_xor(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
a2 = v128_xor(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
a3 = v128_xor(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
a4 = v128_xor(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
a5 = v128_xor(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
a6 = v128_xor(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
a7 = v128_xor(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
b5 = v128_xor(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
b6 = v128_xor(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
b7 = v128_xor(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
b2 = v128_xor(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
b3 = v128_xor(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
b4 = v128_xor(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
b0 = v128_xor(b0, a3);\
|
||||
b1 = v128_xor(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#else
|
||||
@@ -185,96 +197,96 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
a0 = v128_xor(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
a1 = v128_xor(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm_xor_si128(a2, a3);\
|
||||
a2 = v128_xor(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
a3 = v128_xor(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
a4 = v128_xor(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
a5 = v128_xor(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
a6 = v128_xor(a6, a7);\
|
||||
a7 = v128_xor(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm_xor_si128(b0, a4);\
|
||||
b6 = _mm_xor_si128(b6, a4);\
|
||||
b1 = _mm_xor_si128(b1, a5);\
|
||||
b7 = _mm_xor_si128(b7, a5);\
|
||||
b2 = _mm_xor_si128(b2, a6);\
|
||||
b0 = _mm_xor_si128(b0, a6);\
|
||||
b0 = v128_xor(b0, a4);\
|
||||
b6 = v128_xor(b6, a4);\
|
||||
b1 = v128_xor(b1, a5);\
|
||||
b7 = v128_xor(b7, a5);\
|
||||
b2 = v128_xor(b2, a6);\
|
||||
b0 = v128_xor(b0, a6);\
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm_xor_si128(b3, a7);\
|
||||
b1 = _mm_xor_si128(b1, a7);\
|
||||
b3 = v128_xor(b3, a7);\
|
||||
b1 = v128_xor(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm_xor_si128(b4, a0);\
|
||||
b2 = _mm_xor_si128(b2, a0);\
|
||||
b4 = v128_xor(b4, a0);\
|
||||
b2 = v128_xor(b2, a0);\
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm_xor_si128(b5, a1);\
|
||||
b3 = _mm_xor_si128(b3, a1);\
|
||||
b5 = v128_xor(b5, a1);\
|
||||
b3 = v128_xor(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm_xor_si128(b6, a2);\
|
||||
b4 = _mm_xor_si128(b4, a2);\
|
||||
b6 = v128_xor(b6, a2);\
|
||||
b4 = v128_xor(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm_xor_si128(b7, a3);\
|
||||
b5 = _mm_xor_si128(b5, a3);\
|
||||
b7 = v128_xor(b7, a3);\
|
||||
b5 = v128_xor(b5, a3);\
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(a2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
a0 = v128_xor(a0, a3);\
|
||||
a1 = v128_xor(a1, a4);\
|
||||
a2 = v128_xor(a2, a5);\
|
||||
a3 = v128_xor(a3, a6);\
|
||||
a4 = v128_xor(a4, a7);\
|
||||
a5 = v128_xor(a5, b0);\
|
||||
a6 = v128_xor(a6, b1);\
|
||||
a7 = v128_xor(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = _mm_set1_epi64x( 0x1b1b1b1b1b1b1b1b );\
|
||||
b1 = v128_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
a0 = v128_xor(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
a1 = v128_xor(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
a2 = v128_xor(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
a3 = v128_xor(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
a4 = v128_xor(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
a5 = v128_xor(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
a6 = v128_xor(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
a7 = v128_xor(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
b5 = v128_xor(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
b6 = v128_xor(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
b7 = v128_xor(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
b2 = v128_xor(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
b3 = v128_xor(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
b4 = v128_xor(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
b0 = v128_xor(b0, a3);\
|
||||
b1 = v128_xor(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#endif
|
||||
@@ -286,15 +298,15 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
*/
|
||||
#define SUBMIX(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* SubBytes */\
|
||||
b0 = _mm_xor_si128(b0, b0);\
|
||||
a0 = _mm_aesenclast_si128(a0, b0);\
|
||||
a1 = _mm_aesenclast_si128(a1, b0);\
|
||||
a2 = _mm_aesenclast_si128(a2, b0);\
|
||||
a3 = _mm_aesenclast_si128(a3, b0);\
|
||||
a4 = _mm_aesenclast_si128(a4, b0);\
|
||||
a5 = _mm_aesenclast_si128(a5, b0);\
|
||||
a6 = _mm_aesenclast_si128(a6, b0);\
|
||||
a7 = _mm_aesenclast_si128(a7, b0);\
|
||||
b0 = v128_xor(b0, b0);\
|
||||
a0 = v128_aesenclast(a0, b0);\
|
||||
a1 = v128_aesenclast(a1, b0);\
|
||||
a2 = v128_aesenclast(a2, b0);\
|
||||
a3 = v128_aesenclast(a3, b0);\
|
||||
a4 = v128_aesenclast(a4, b0);\
|
||||
a5 = v128_aesenclast(a5, b0);\
|
||||
a6 = v128_aesenclast(a6, b0);\
|
||||
a7 = v128_aesenclast(a7, b0);\
|
||||
/* MixBytes */\
|
||||
MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
|
||||
}
|
||||
@@ -303,32 +315,32 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
u8 round_counter = 0;\
|
||||
for(round_counter = 0; round_counter < 14; round_counter+=2) {\
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm8 = _mm_xor_si128( xmm8, \
|
||||
casti_m128i( round_const_p, round_counter ) ); \
|
||||
xmm8 = v128_xor( xmm8, \
|
||||
casti_v128( round_const_p, round_counter ) ); \
|
||||
/* ShiftBytes P1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm_shuffle_epi8( xmm8, SUBSH_MASK0 ); \
|
||||
xmm9 = _mm_shuffle_epi8( xmm9, SUBSH_MASK1 ); \
|
||||
xmm10 = _mm_shuffle_epi8( xmm10, SUBSH_MASK2 ); \
|
||||
xmm11 = _mm_shuffle_epi8( xmm11, SUBSH_MASK3 ); \
|
||||
xmm12 = _mm_shuffle_epi8( xmm12, SUBSH_MASK4 ); \
|
||||
xmm13 = _mm_shuffle_epi8( xmm13, SUBSH_MASK5 ); \
|
||||
xmm14 = _mm_shuffle_epi8( xmm14, SUBSH_MASK6 ); \
|
||||
xmm15 = _mm_shuffle_epi8( xmm15, SUBSH_MASK7 ); \
|
||||
xmm8 = v128_shuffle8( xmm8, SUBSH_MASK0 ); \
|
||||
xmm9 = v128_shuffle8( xmm9, SUBSH_MASK1 ); \
|
||||
xmm10 = v128_shuffle8( xmm10, SUBSH_MASK2 ); \
|
||||
xmm11 = v128_shuffle8( xmm11, SUBSH_MASK3 ); \
|
||||
xmm12 = v128_shuffle8( xmm12, SUBSH_MASK4 ); \
|
||||
xmm13 = v128_shuffle8( xmm13, SUBSH_MASK5 ); \
|
||||
xmm14 = v128_shuffle8( xmm14, SUBSH_MASK6 ); \
|
||||
xmm15 = v128_shuffle8( xmm15, SUBSH_MASK7 ); \
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX( xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, \
|
||||
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 ); \
|
||||
\
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm0 = _mm_xor_si128( xmm0, \
|
||||
casti_m128i( round_const_p, round_counter+1 ) ); \
|
||||
xmm0 = _mm_shuffle_epi8( xmm0, SUBSH_MASK0 ); \
|
||||
xmm1 = _mm_shuffle_epi8( xmm1, SUBSH_MASK1 ); \
|
||||
xmm2 = _mm_shuffle_epi8( xmm2, SUBSH_MASK2 ); \
|
||||
xmm3 = _mm_shuffle_epi8( xmm3, SUBSH_MASK3 ); \
|
||||
xmm4 = _mm_shuffle_epi8( xmm4, SUBSH_MASK4 ); \
|
||||
xmm5 = _mm_shuffle_epi8( xmm5, SUBSH_MASK5 ); \
|
||||
xmm6 = _mm_shuffle_epi8( xmm6, SUBSH_MASK6 ); \
|
||||
xmm7 = _mm_shuffle_epi8( xmm7, SUBSH_MASK7 ); \
|
||||
xmm0 = v128_xor( xmm0, \
|
||||
casti_v128( round_const_p, round_counter+1 ) ); \
|
||||
xmm0 = v128_shuffle8( xmm0, SUBSH_MASK0 ); \
|
||||
xmm1 = v128_shuffle8( xmm1, SUBSH_MASK1 ); \
|
||||
xmm2 = v128_shuffle8( xmm2, SUBSH_MASK2 ); \
|
||||
xmm3 = v128_shuffle8( xmm3, SUBSH_MASK3 ); \
|
||||
xmm4 = v128_shuffle8( xmm4, SUBSH_MASK4 ); \
|
||||
xmm5 = v128_shuffle8( xmm5, SUBSH_MASK5 ); \
|
||||
xmm6 = v128_shuffle8( xmm6, SUBSH_MASK6 ); \
|
||||
xmm7 = v128_shuffle8( xmm7, SUBSH_MASK7 ); \
|
||||
SUBMIX( xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, \
|
||||
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 ); \
|
||||
}\
|
||||
@@ -338,49 +350,49 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
u8 round_counter = 0;\
|
||||
for(round_counter = 0; round_counter < 14; round_counter+=2) {\
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm1 = m128_neg1;\
|
||||
xmm8 = _mm_xor_si128( xmm8, xmm1 ); \
|
||||
xmm9 = _mm_xor_si128( xmm9, xmm1 ); \
|
||||
xmm10 = _mm_xor_si128( xmm10, xmm1 ); \
|
||||
xmm11 = _mm_xor_si128( xmm11, xmm1 ); \
|
||||
xmm12 = _mm_xor_si128( xmm12, xmm1 ); \
|
||||
xmm13 = _mm_xor_si128( xmm13, xmm1 ); \
|
||||
xmm14 = _mm_xor_si128( xmm14, xmm1 ); \
|
||||
xmm15 = _mm_xor_si128( xmm15, \
|
||||
casti_m128i( round_const_q, round_counter ) ); \
|
||||
xmm1 = v128_neg1;\
|
||||
xmm8 = v128_xor( xmm8, xmm1 ); \
|
||||
xmm9 = v128_xor( xmm9, xmm1 ); \
|
||||
xmm10 = v128_xor( xmm10, xmm1 ); \
|
||||
xmm11 = v128_xor( xmm11, xmm1 ); \
|
||||
xmm12 = v128_xor( xmm12, xmm1 ); \
|
||||
xmm13 = v128_xor( xmm13, xmm1 ); \
|
||||
xmm14 = v128_xor( xmm14, xmm1 ); \
|
||||
xmm15 = v128_xor( xmm15, \
|
||||
casti_v128( round_const_q, round_counter ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm_shuffle_epi8( xmm8, SUBSH_MASK1 ); \
|
||||
xmm9 = _mm_shuffle_epi8( xmm9, SUBSH_MASK3 ); \
|
||||
xmm10 = _mm_shuffle_epi8( xmm10, SUBSH_MASK5 ); \
|
||||
xmm11 = _mm_shuffle_epi8( xmm11, SUBSH_MASK7 ); \
|
||||
xmm12 = _mm_shuffle_epi8( xmm12, SUBSH_MASK0 ); \
|
||||
xmm13 = _mm_shuffle_epi8( xmm13, SUBSH_MASK2 ); \
|
||||
xmm14 = _mm_shuffle_epi8( xmm14, SUBSH_MASK4 ); \
|
||||
xmm15 = _mm_shuffle_epi8( xmm15, SUBSH_MASK6 ); \
|
||||
xmm8 = v128_shuffle8( xmm8, SUBSH_MASK1 ); \
|
||||
xmm9 = v128_shuffle8( xmm9, SUBSH_MASK3 ); \
|
||||
xmm10 = v128_shuffle8( xmm10, SUBSH_MASK5 ); \
|
||||
xmm11 = v128_shuffle8( xmm11, SUBSH_MASK7 ); \
|
||||
xmm12 = v128_shuffle8( xmm12, SUBSH_MASK0 ); \
|
||||
xmm13 = v128_shuffle8( xmm13, SUBSH_MASK2 ); \
|
||||
xmm14 = v128_shuffle8( xmm14, SUBSH_MASK4 ); \
|
||||
xmm15 = v128_shuffle8( xmm15, SUBSH_MASK6 ); \
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX( xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, \
|
||||
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6 , xmm7 ); \
|
||||
\
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm9 = m128_neg1;\
|
||||
xmm0 = _mm_xor_si128( xmm0, xmm9 ); \
|
||||
xmm1 = _mm_xor_si128( xmm1, xmm9 ); \
|
||||
xmm2 = _mm_xor_si128( xmm2, xmm9 ); \
|
||||
xmm3 = _mm_xor_si128( xmm3, xmm9 ); \
|
||||
xmm4 = _mm_xor_si128( xmm4, xmm9 ); \
|
||||
xmm5 = _mm_xor_si128( xmm5, xmm9 ); \
|
||||
xmm6 = _mm_xor_si128( xmm6, xmm9 ); \
|
||||
xmm7 = _mm_xor_si128( xmm7, \
|
||||
casti_m128i( round_const_q, round_counter+1 ) ); \
|
||||
xmm9 = v128_neg1;\
|
||||
xmm0 = v128_xor( xmm0, xmm9 ); \
|
||||
xmm1 = v128_xor( xmm1, xmm9 ); \
|
||||
xmm2 = v128_xor( xmm2, xmm9 ); \
|
||||
xmm3 = v128_xor( xmm3, xmm9 ); \
|
||||
xmm4 = v128_xor( xmm4, xmm9 ); \
|
||||
xmm5 = v128_xor( xmm5, xmm9 ); \
|
||||
xmm6 = v128_xor( xmm6, xmm9 ); \
|
||||
xmm7 = v128_xor( xmm7, \
|
||||
casti_v128( round_const_q, round_counter+1 ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm0 = _mm_shuffle_epi8( xmm0, SUBSH_MASK1 ); \
|
||||
xmm1 = _mm_shuffle_epi8( xmm1, SUBSH_MASK3 ); \
|
||||
xmm2 = _mm_shuffle_epi8( xmm2, SUBSH_MASK5 ); \
|
||||
xmm3 = _mm_shuffle_epi8( xmm3, SUBSH_MASK7 ); \
|
||||
xmm4 = _mm_shuffle_epi8( xmm4, SUBSH_MASK0 ); \
|
||||
xmm5 = _mm_shuffle_epi8( xmm5, SUBSH_MASK2 ); \
|
||||
xmm6 = _mm_shuffle_epi8( xmm6, SUBSH_MASK4 ); \
|
||||
xmm7 = _mm_shuffle_epi8( xmm7, SUBSH_MASK6 ); \
|
||||
xmm0 = v128_shuffle8( xmm0, SUBSH_MASK1 ); \
|
||||
xmm1 = v128_shuffle8( xmm1, SUBSH_MASK3 ); \
|
||||
xmm2 = v128_shuffle8( xmm2, SUBSH_MASK5 ); \
|
||||
xmm3 = v128_shuffle8( xmm3, SUBSH_MASK7 ); \
|
||||
xmm4 = v128_shuffle8( xmm4, SUBSH_MASK0 ); \
|
||||
xmm5 = v128_shuffle8( xmm5, SUBSH_MASK2 ); \
|
||||
xmm6 = v128_shuffle8( xmm6, SUBSH_MASK4 ); \
|
||||
xmm7 = v128_shuffle8( xmm7, SUBSH_MASK6 ); \
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX( xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, \
|
||||
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 ); \
|
||||
@@ -397,70 +409,70 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
#define Matrix_Transpose(i0, i1, i2, i3, i4, i5, i6, i7, t0, t1, t2, t3, t4, t5, t6, t7){\
|
||||
t0 = TRANSP_MASK; \
|
||||
\
|
||||
i6 = _mm_shuffle_epi8(i6, t0);\
|
||||
i0 = _mm_shuffle_epi8(i0, t0);\
|
||||
i1 = _mm_shuffle_epi8(i1, t0);\
|
||||
i2 = _mm_shuffle_epi8(i2, t0);\
|
||||
i3 = _mm_shuffle_epi8(i3, t0);\
|
||||
i6 = v128_shuffle8(i6, t0);\
|
||||
i0 = v128_shuffle8(i0, t0);\
|
||||
i1 = v128_shuffle8(i1, t0);\
|
||||
i2 = v128_shuffle8(i2, t0);\
|
||||
i3 = v128_shuffle8(i3, t0);\
|
||||
t1 = i2;\
|
||||
i4 = _mm_shuffle_epi8(i4, t0);\
|
||||
i5 = _mm_shuffle_epi8(i5, t0);\
|
||||
i4 = v128_shuffle8(i4, t0);\
|
||||
i5 = v128_shuffle8(i5, t0);\
|
||||
t2 = i4;\
|
||||
t3 = i6;\
|
||||
i7 = _mm_shuffle_epi8(i7, t0);\
|
||||
i7 = v128_shuffle8(i7, t0);\
|
||||
\
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t0 = i0;\
|
||||
t2 = _mm_unpackhi_epi16(t2, i5);\
|
||||
i4 = _mm_unpacklo_epi16(i4, i5);\
|
||||
t3 = _mm_unpackhi_epi16(t3, i7);\
|
||||
i6 = _mm_unpacklo_epi16(i6, i7);\
|
||||
t0 = _mm_unpackhi_epi16(t0, i1);\
|
||||
t1 = _mm_unpackhi_epi16(t1, i3);\
|
||||
i2 = _mm_unpacklo_epi16(i2, i3);\
|
||||
i0 = _mm_unpacklo_epi16(i0, i1);\
|
||||
t2 = v128_unpackhi16(t2, i5);\
|
||||
i4 = v128_unpacklo16(i4, i5);\
|
||||
t3 = v128_unpackhi16(t3, i7);\
|
||||
i6 = v128_unpacklo16(i6, i7);\
|
||||
t0 = v128_unpackhi16(t0, i1);\
|
||||
t1 = v128_unpackhi16(t1, i3);\
|
||||
i2 = v128_unpacklo16(i2, i3);\
|
||||
i0 = v128_unpacklo16(i0, i1);\
|
||||
\
|
||||
/* shuffle with immediate */\
|
||||
t0 = _mm_shuffle_epi32(t0, 216);\
|
||||
t1 = _mm_shuffle_epi32(t1, 216);\
|
||||
t2 = _mm_shuffle_epi32(t2, 216);\
|
||||
t3 = _mm_shuffle_epi32(t3, 216);\
|
||||
i0 = _mm_shuffle_epi32(i0, 216);\
|
||||
i2 = _mm_shuffle_epi32(i2, 216);\
|
||||
i4 = _mm_shuffle_epi32(i4, 216);\
|
||||
i6 = _mm_shuffle_epi32(i6, 216);\
|
||||
t0 = gr_shuffle32( t0 ); \
|
||||
t1 = gr_shuffle32( t1 ); \
|
||||
t2 = gr_shuffle32( t2 ); \
|
||||
t3 = gr_shuffle32( t3 ); \
|
||||
i0 = gr_shuffle32( i0 ); \
|
||||
i2 = gr_shuffle32( i2 ); \
|
||||
i4 = gr_shuffle32( i4 ); \
|
||||
i6 = gr_shuffle32( i6 ); \
|
||||
\
|
||||
/* continue with unpack */\
|
||||
t4 = i0;\
|
||||
i0 = _mm_unpacklo_epi32(i0, i2);\
|
||||
t4 = _mm_unpackhi_epi32(t4, i2);\
|
||||
i0 = v128_unpacklo32(i0, i2);\
|
||||
t4 = v128_unpackhi32(t4, i2);\
|
||||
t5 = t0;\
|
||||
t0 = _mm_unpacklo_epi32(t0, t1);\
|
||||
t5 = _mm_unpackhi_epi32(t5, t1);\
|
||||
t0 = v128_unpacklo32(t0, t1);\
|
||||
t5 = v128_unpackhi32(t5, t1);\
|
||||
t6 = i4;\
|
||||
i4 = _mm_unpacklo_epi32(i4, i6);\
|
||||
i4 = v128_unpacklo32(i4, i6);\
|
||||
t7 = t2;\
|
||||
t6 = _mm_unpackhi_epi32(t6, i6);\
|
||||
t6 = v128_unpackhi32(t6, i6);\
|
||||
i2 = t0;\
|
||||
t2 = _mm_unpacklo_epi32(t2, t3);\
|
||||
t2 = v128_unpacklo32(t2, t3);\
|
||||
i3 = t0;\
|
||||
t7 = _mm_unpackhi_epi32(t7, t3);\
|
||||
t7 = v128_unpackhi32(t7, t3);\
|
||||
\
|
||||
/* there are now 2 rows in each xmm */\
|
||||
/* unpack to get 1 row of CV in each xmm */\
|
||||
i1 = i0;\
|
||||
i1 = _mm_unpackhi_epi64(i1, i4);\
|
||||
i0 = _mm_unpacklo_epi64(i0, i4);\
|
||||
i1 = v128_unpackhi64(i1, i4);\
|
||||
i0 = v128_unpacklo64(i0, i4);\
|
||||
i4 = t4;\
|
||||
i3 = _mm_unpackhi_epi64(i3, t2);\
|
||||
i3 = v128_unpackhi64(i3, t2);\
|
||||
i5 = t4;\
|
||||
i2 = _mm_unpacklo_epi64(i2, t2);\
|
||||
i2 = v128_unpacklo64(i2, t2);\
|
||||
i6 = t5;\
|
||||
i5 = _mm_unpackhi_epi64(i5, t6);\
|
||||
i5 = v128_unpackhi64(i5, t6);\
|
||||
i7 = t5;\
|
||||
i4 = _mm_unpacklo_epi64(i4, t6);\
|
||||
i7 = _mm_unpackhi_epi64(i7, t7);\
|
||||
i6 = _mm_unpacklo_epi64(i6, t7);\
|
||||
i4 = v128_unpacklo64(i4, t6);\
|
||||
i7 = v128_unpackhi64(i7, t7);\
|
||||
i6 = v128_unpacklo64(i6, t7);\
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
@@ -471,74 +483,76 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
* outputs: (i0, o0, i1, i3, o1, o2, i5, i7)
|
||||
* clobbers: t0-t4
|
||||
*/
|
||||
#define Matrix_Transpose_INV(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, t0, t1, t2, t3, t4){\
|
||||
#define Matrix_Transpose_INV( i0, i1, i2, i3, i4, i5, i6, i7, \
|
||||
o0, o1, o2, t0, t1, t2, t3, t4 ) \
|
||||
{ \
|
||||
/* transpose matrix to get output format */\
|
||||
o1 = i0;\
|
||||
i0 = _mm_unpacklo_epi64(i0, i1);\
|
||||
o1 = _mm_unpackhi_epi64(o1, i1);\
|
||||
t0 = i2;\
|
||||
i2 = _mm_unpacklo_epi64(i2, i3);\
|
||||
t0 = _mm_unpackhi_epi64(t0, i3);\
|
||||
t1 = i4;\
|
||||
i4 = _mm_unpacklo_epi64(i4, i5);\
|
||||
t1 = _mm_unpackhi_epi64(t1, i5);\
|
||||
t2 = i6;\
|
||||
o1 = i0; \
|
||||
i0 = v128_unpacklo64( i0, i1 ); \
|
||||
o1 = v128_unpackhi64( o1, i1 ); \
|
||||
t0 = i2; \
|
||||
i2 = v128_unpacklo64( i2, i3 ); \
|
||||
t0 = v128_unpackhi64( t0, i3 ); \
|
||||
t1 = i4; \
|
||||
i4 = v128_unpacklo64( i4, i5 ); \
|
||||
t1 = v128_unpackhi64( t1, i5 ); \
|
||||
t2 = i6; \
|
||||
o0 = TRANSP_MASK; \
|
||||
i6 = _mm_unpacklo_epi64(i6, i7);\
|
||||
t2 = _mm_unpackhi_epi64(t2, i7);\
|
||||
i6 = v128_unpacklo64( i6, i7 ); \
|
||||
t2 = v128_unpackhi64( t2, i7 ); \
|
||||
/* load transpose mask into a register, because it will be used 8 times */\
|
||||
i0 = _mm_shuffle_epi8(i0, o0);\
|
||||
i2 = _mm_shuffle_epi8(i2, o0);\
|
||||
i4 = _mm_shuffle_epi8(i4, o0);\
|
||||
i6 = _mm_shuffle_epi8(i6, o0);\
|
||||
o1 = _mm_shuffle_epi8(o1, o0);\
|
||||
t0 = _mm_shuffle_epi8(t0, o0);\
|
||||
t1 = _mm_shuffle_epi8(t1, o0);\
|
||||
t2 = _mm_shuffle_epi8(t2, o0);\
|
||||
i0 = v128_shuffle8( i0, o0 ); \
|
||||
i2 = v128_shuffle8( i2, o0 ); \
|
||||
i4 = v128_shuffle8( i4, o0 ); \
|
||||
i6 = v128_shuffle8( i6, o0 ); \
|
||||
o1 = v128_shuffle8( o1, o0 ); \
|
||||
t0 = v128_shuffle8( t0, o0 ); \
|
||||
t1 = v128_shuffle8( t1, o0 ); \
|
||||
t2 = v128_shuffle8( t2, o0 ); \
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t3 = i4;\
|
||||
o2 = o1;\
|
||||
o0 = i0;\
|
||||
t4 = t1;\
|
||||
t3 = i4; \
|
||||
o2 = o1; \
|
||||
o0 = i0; \
|
||||
t4 = t1; \
|
||||
\
|
||||
t3 = _mm_unpackhi_epi16(t3, i6);\
|
||||
i4 = _mm_unpacklo_epi16(i4, i6);\
|
||||
o0 = _mm_unpackhi_epi16(o0, i2);\
|
||||
i0 = _mm_unpacklo_epi16(i0, i2);\
|
||||
o2 = _mm_unpackhi_epi16(o2, t0);\
|
||||
o1 = _mm_unpacklo_epi16(o1, t0);\
|
||||
t4 = _mm_unpackhi_epi16(t4, t2);\
|
||||
t1 = _mm_unpacklo_epi16(t1, t2);\
|
||||
t3 = v128_unpackhi16( t3, i6 ); \
|
||||
i4 = v128_unpacklo16( i4, i6 ); \
|
||||
o0 = v128_unpackhi16( o0, i2 ); \
|
||||
i0 = v128_unpacklo16( i0, i2 ); \
|
||||
o2 = v128_unpackhi16( o2, t0 ); \
|
||||
o1 = v128_unpacklo16( o1, t0 ); \
|
||||
t4 = v128_unpackhi16( t4, t2 ); \
|
||||
t1 = v128_unpacklo16( t1, t2 ); \
|
||||
/* shuffle with immediate */\
|
||||
i4 = _mm_shuffle_epi32(i4, 216);\
|
||||
t3 = _mm_shuffle_epi32(t3, 216);\
|
||||
o1 = _mm_shuffle_epi32(o1, 216);\
|
||||
o2 = _mm_shuffle_epi32(o2, 216);\
|
||||
i0 = _mm_shuffle_epi32(i0, 216);\
|
||||
o0 = _mm_shuffle_epi32(o0, 216);\
|
||||
t1 = _mm_shuffle_epi32(t1, 216);\
|
||||
t4 = _mm_shuffle_epi32(t4, 216);\
|
||||
i4 = gr_shuffle32( i4 ); \
|
||||
t3 = gr_shuffle32( t3 ); \
|
||||
o1 = gr_shuffle32( o1 ); \
|
||||
o2 = gr_shuffle32( o2 ); \
|
||||
i0 = gr_shuffle32( i0 ); \
|
||||
o0 = gr_shuffle32( o0 ); \
|
||||
t1 = gr_shuffle32( t1 ); \
|
||||
t4 = gr_shuffle32( t4 ); \
|
||||
/* continue with unpack */\
|
||||
i1 = i0;\
|
||||
i3 = o0;\
|
||||
i5 = o1;\
|
||||
i7 = o2;\
|
||||
i0 = _mm_unpacklo_epi32(i0, i4);\
|
||||
i1 = _mm_unpackhi_epi32(i1, i4);\
|
||||
o0 = _mm_unpacklo_epi32(o0, t3);\
|
||||
i3 = _mm_unpackhi_epi32(i3, t3);\
|
||||
o1 = _mm_unpacklo_epi32(o1, t1);\
|
||||
i5 = _mm_unpackhi_epi32(i5, t1);\
|
||||
o2 = _mm_unpacklo_epi32(o2, t4);\
|
||||
i7 = _mm_unpackhi_epi32(i7, t4);\
|
||||
i1 = i0; \
|
||||
i3 = o0; \
|
||||
i5 = o1; \
|
||||
i7 = o2; \
|
||||
i0 = v128_unpacklo32( i0, i4 ); \
|
||||
i1 = v128_unpackhi32( i1, i4 ); \
|
||||
o0 = v128_unpacklo32( o0, t3 ); \
|
||||
i3 = v128_unpackhi32( i3, t3 ); \
|
||||
o1 = v128_unpacklo32( o1, t1 ); \
|
||||
i5 = v128_unpackhi32( i5, t1 ); \
|
||||
o2 = v128_unpacklo32( o2, t4 ); \
|
||||
i7 = v128_unpackhi32( i7, t4 ); \
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
|
||||
void INIT( __m128i* chaining )
|
||||
void INIT( v128_t* chaining )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
|
||||
/* load IV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
@@ -564,14 +578,14 @@ void INIT( __m128i* chaining )
|
||||
chaining[7] = xmm15;
|
||||
}
|
||||
|
||||
void TF1024( __m128i* chaining, const __m128i* message )
|
||||
void TF1024( v128_t* chaining, const v128_t* message )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m128i QTEMP[8];
|
||||
static __m128i TEMP0;
|
||||
static __m128i TEMP1;
|
||||
static __m128i TEMP2;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t QTEMP[8];
|
||||
static v128_t TEMP0;
|
||||
static v128_t TEMP1;
|
||||
static v128_t TEMP2;
|
||||
|
||||
#ifdef IACA_TRACE
|
||||
IACA_START;
|
||||
@@ -602,14 +616,14 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
|
||||
/* xor CV to message to get P input */
|
||||
/* result: CV+M in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* compute permutation P */
|
||||
/* result: P(CV+M) in xmm8...xmm15 */
|
||||
@@ -617,14 +631,14 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV+M)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* store P(CV+M)+CV */
|
||||
chaining[0] = xmm8;
|
||||
@@ -652,14 +666,14 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
|
||||
/* xor Q output */
|
||||
/* result: P(CV+M)+CV+Q(M) in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* store CV */
|
||||
chaining[0] = xmm8;
|
||||
@@ -678,13 +692,13 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
return;
|
||||
}
|
||||
|
||||
void OF1024( __m128i* chaining )
|
||||
void OF1024( v128_t* chaining )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m128i TEMP0;
|
||||
static __m128i TEMP1;
|
||||
static __m128i TEMP2;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t TEMP0;
|
||||
static v128_t TEMP1;
|
||||
static v128_t TEMP2;
|
||||
|
||||
/* load CV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
@@ -702,14 +716,14 @@ void OF1024( __m128i* chaining )
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* transpose CV back from row ordering to column ordering */
|
||||
/* result: final hash value in xmm0, xmm6, xmm13, xmm15 */
|
||||
|
||||
@@ -7,11 +7,9 @@
|
||||
* This code is placed in the public domain
|
||||
*/
|
||||
|
||||
#include <smmintrin.h>
|
||||
#include <wmmintrin.h>
|
||||
#include "hash-groestl256.h"
|
||||
|
||||
static const __m128i round_const_l0[] __attribute__ ((aligned (64))) =
|
||||
static const v128u64_t round_const_l0[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
{ 0x7060504030201000, 0xffffffffffffffff },
|
||||
{ 0x7161514131211101, 0xffffffffffffffff },
|
||||
@@ -25,7 +23,7 @@ static const __m128i round_const_l0[] __attribute__ ((aligned (64))) =
|
||||
{ 0x7969594939291909, 0xffffffffffffffff }
|
||||
};
|
||||
|
||||
static const __m128i round_const_l7[] __attribute__ ((aligned (64))) =
|
||||
static const v128u64_t round_const_l7[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
{ 0x0000000000000000, 0x8f9fafbfcfdfefff },
|
||||
{ 0x0000000000000000, 0x8e9eaebecedeeefe },
|
||||
@@ -39,16 +37,30 @@ static const __m128i round_const_l7[] __attribute__ ((aligned (64))) =
|
||||
{ 0x0000000000000000, 0x8696a6b6c6d6e6f6 }
|
||||
};
|
||||
|
||||
static const __m128i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02 };
|
||||
static const v128u64_t TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02 };
|
||||
|
||||
static const v128u64_t SUBSH_MASK0 = { 0x0c0f0104070b0e00, 0x03060a0d08020509 };
|
||||
static const v128u64_t SUBSH_MASK1 = { 0x0e090205000d0801, 0x04070c0f0a03060b };
|
||||
static const v128u64_t SUBSH_MASK2 = { 0x080b0306010f0a02, 0x05000e090c04070d };
|
||||
static const v128u64_t SUBSH_MASK3 = { 0x0a0d040702090c03, 0x0601080b0e05000f };
|
||||
static const v128u64_t SUBSH_MASK4 = { 0x0b0e0500030a0d04, 0x0702090c0f060108 };
|
||||
static const v128u64_t SUBSH_MASK5 = { 0x0d080601040c0f05, 0x00030b0e0907020a };
|
||||
static const v128u64_t SUBSH_MASK6 = { 0x0f0a0702050e0906, 0x01040d080b00030c };
|
||||
static const v128u64_t SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
|
||||
// No fast shuffle on NEON
|
||||
static const uint32x4_t vmask_d8 = { 3, 1, 2, 0 };
|
||||
|
||||
#define gr_shuffle32( v ) v128_shufflev32( v, vmask_d8 )
|
||||
|
||||
#else
|
||||
|
||||
#define gr_shuffle32( v ) _mm_shuffle_epi32( v, 0xd8 )
|
||||
|
||||
#endif
|
||||
|
||||
static const __m128i SUBSH_MASK0 = { 0x0c0f0104070b0e00, 0x03060a0d08020509 };
|
||||
static const __m128i SUBSH_MASK1 = { 0x0e090205000d0801, 0x04070c0f0a03060b };
|
||||
static const __m128i SUBSH_MASK2 = { 0x080b0306010f0a02, 0x05000e090c04070d };
|
||||
static const __m128i SUBSH_MASK3 = { 0x0a0d040702090c03, 0x0601080b0e05000f };
|
||||
static const __m128i SUBSH_MASK4 = { 0x0b0e0500030a0d04, 0x0702090c0f060108 };
|
||||
static const __m128i SUBSH_MASK5 = { 0x0d080601040c0f05, 0x00030b0e0907020a };
|
||||
static const __m128i SUBSH_MASK6 = { 0x0f0a0702050e0906, 0x01040d080b00030c };
|
||||
static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
|
||||
#define tos(a) #a
|
||||
#define tostr(a) tos(a)
|
||||
@@ -57,11 +69,11 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
/* xmm[i] will be multiplied by 2
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2(i, j, k){\
|
||||
j = _mm_cmpgt_epi8( m128_zero, i);\
|
||||
i = _mm_add_epi8(i, i);\
|
||||
i = mm128_xorand(i, j, k );\
|
||||
}
|
||||
#define MUL2( i, j, k ) \
|
||||
j = v128_cmpgt8( v128_zero, i ); \
|
||||
i = v128_add8( i, i ); \
|
||||
i = v128_xorand( i, j, k );
|
||||
|
||||
|
||||
/* Yet another implementation of MixBytes.
|
||||
This time we use the formulae (3) from the paper "Byte Slicing Groestl".
|
||||
@@ -87,85 +99,85 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
a0 = v128_xor(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
a1 = v128_xor(a1, a2);\
|
||||
b1 = a3;\
|
||||
TEMP2 = _mm_xor_si128(a2, a3);\
|
||||
TEMP2 = v128_xor(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
a3 = v128_xor(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
a4 = v128_xor(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
a5 = v128_xor(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
a6 = v128_xor(a6, a7);\
|
||||
a7 = v128_xor(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
TEMP0 = mm128_xor3( b0, a4, a6 ); \
|
||||
TEMP0 = v128_xor3( b0, a4, a6 ); \
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP1 = mm128_xor3( b1, a5, a7 );\
|
||||
b2 = mm128_xor3( b2, a6, a0 ); \
|
||||
TEMP1 = v128_xor3( b1, a5, a7 );\
|
||||
b2 = v128_xor3( b2, a6, a0 ); \
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b3 = mm128_xor3( b3, a7, a1 ); \
|
||||
b3 = v128_xor3( b3, a7, a1 ); \
|
||||
b1 = a1;\
|
||||
b6 = mm128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = mm128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = mm128_xor3( b7, a5, a3 ); \
|
||||
b5 = mm128_xor3( b5, a1, a3 ); \
|
||||
b6 = v128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = v128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = v128_xor3( b7, a5, a3 ); \
|
||||
b5 = v128_xor3( b5, a1, a3 ); \
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(TEMP2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
a0 = v128_xor(a0, a3);\
|
||||
a1 = v128_xor(a1, a4);\
|
||||
a2 = v128_xor(TEMP2, a5);\
|
||||
a3 = v128_xor(a3, a6);\
|
||||
a4 = v128_xor(a4, a7);\
|
||||
a5 = v128_xor(a5, b0);\
|
||||
a6 = v128_xor(a6, b1);\
|
||||
a7 = v128_xor(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = _mm_set1_epi64x( 0x1b1b1b1b1b1b1b1b );\
|
||||
b1 = v128_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
a0 = v128_xor(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
a1 = v128_xor(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
a2 = v128_xor(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
a3 = v128_xor(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
a4 = v128_xor(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
a5 = v128_xor(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
a6 = v128_xor(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
a7 = v128_xor(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
b5 = v128_xor(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
b6 = v128_xor(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
b7 = v128_xor(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
b2 = v128_xor(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
b3 = v128_xor(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
b4 = v128_xor(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
b0 = v128_xor(b0, a3);\
|
||||
b1 = v128_xor(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#else
|
||||
@@ -174,96 +186,96 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
a0 = v128_xor(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
a1 = v128_xor(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm_xor_si128(a2, a3);\
|
||||
a2 = v128_xor(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
a3 = v128_xor(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
a4 = v128_xor(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
a5 = v128_xor(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
a6 = v128_xor(a6, a7);\
|
||||
a7 = v128_xor(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm_xor_si128(b0, a4);\
|
||||
b6 = _mm_xor_si128(b6, a4);\
|
||||
b1 = _mm_xor_si128(b1, a5);\
|
||||
b7 = _mm_xor_si128(b7, a5);\
|
||||
b2 = _mm_xor_si128(b2, a6);\
|
||||
b0 = _mm_xor_si128(b0, a6);\
|
||||
b0 = v128_xor(b0, a4);\
|
||||
b6 = v128_xor(b6, a4);\
|
||||
b1 = v128_xor(b1, a5);\
|
||||
b7 = v128_xor(b7, a5);\
|
||||
b2 = v128_xor(b2, a6);\
|
||||
b0 = v128_xor(b0, a6);\
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm_xor_si128(b3, a7);\
|
||||
b1 = _mm_xor_si128(b1, a7);\
|
||||
b3 = v128_xor(b3, a7);\
|
||||
b1 = v128_xor(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm_xor_si128(b4, a0);\
|
||||
b2 = _mm_xor_si128(b2, a0);\
|
||||
b4 = v128_xor(b4, a0);\
|
||||
b2 = v128_xor(b2, a0);\
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm_xor_si128(b5, a1);\
|
||||
b3 = _mm_xor_si128(b3, a1);\
|
||||
b5 = v128_xor(b5, a1);\
|
||||
b3 = v128_xor(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm_xor_si128(b6, a2);\
|
||||
b4 = _mm_xor_si128(b4, a2);\
|
||||
b6 = v128_xor(b6, a2);\
|
||||
b4 = v128_xor(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm_xor_si128(b7, a3);\
|
||||
b5 = _mm_xor_si128(b5, a3);\
|
||||
b7 = v128_xor(b7, a3);\
|
||||
b5 = v128_xor(b5, a3);\
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(a2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
a0 = v128_xor(a0, a3);\
|
||||
a1 = v128_xor(a1, a4);\
|
||||
a2 = v128_xor(a2, a5);\
|
||||
a3 = v128_xor(a3, a6);\
|
||||
a4 = v128_xor(a4, a7);\
|
||||
a5 = v128_xor(a5, b0);\
|
||||
a6 = v128_xor(a6, b1);\
|
||||
a7 = v128_xor(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = _mm_set1_epi64x( 0x1b1b1b1b1b1b1b1b );\
|
||||
b1 = v128_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
a0 = v128_xor(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
a1 = v128_xor(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
a2 = v128_xor(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
a3 = v128_xor(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
a4 = v128_xor(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
a5 = v128_xor(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
a6 = v128_xor(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
a7 = v128_xor(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
b5 = v128_xor(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
b6 = v128_xor(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
b7 = v128_xor(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
b2 = v128_xor(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
b3 = v128_xor(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
b4 = v128_xor(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
b0 = v128_xor(b0, a3);\
|
||||
b1 = v128_xor(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#endif
|
||||
@@ -275,34 +287,34 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
*/
|
||||
#define ROUND(i, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* AddRoundConstant */\
|
||||
b1 = _mm_set_epi64x( 0xffffffffffffffff, 0 ); \
|
||||
a0 = _mm_xor_si128( a0, casti_m128i( round_const_l0, i ) ); \
|
||||
a1 = _mm_xor_si128( a1, b1 ); \
|
||||
a2 = _mm_xor_si128( a2, b1 ); \
|
||||
a3 = _mm_xor_si128( a3, b1 ); \
|
||||
a4 = _mm_xor_si128( a4, b1 ); \
|
||||
a5 = _mm_xor_si128( a5, b1 ); \
|
||||
a6 = _mm_xor_si128( a6, b1 ); \
|
||||
a7 = _mm_xor_si128( a7, casti_m128i( round_const_l7, i ) ); \
|
||||
b1 = v128_set64( 0xffffffffffffffff, 0 ); \
|
||||
a0 = v128_xor( a0, casti_v128( round_const_l0, i ) ); \
|
||||
a1 = v128_xor( a1, b1 ); \
|
||||
a2 = v128_xor( a2, b1 ); \
|
||||
a3 = v128_xor( a3, b1 ); \
|
||||
a4 = v128_xor( a4, b1 ); \
|
||||
a5 = v128_xor( a5, b1 ); \
|
||||
a6 = v128_xor( a6, b1 ); \
|
||||
a7 = v128_xor( a7, casti_v128( round_const_l7, i ) ); \
|
||||
\
|
||||
/* ShiftBytes + SubBytes (interleaved) */\
|
||||
b0 = _mm_xor_si128(b0, b0);\
|
||||
a0 = _mm_shuffle_epi8( a0, SUBSH_MASK0 ); \
|
||||
a0 = _mm_aesenclast_si128( a0, b0 );\
|
||||
a1 = _mm_shuffle_epi8( a1, SUBSH_MASK1 ); \
|
||||
a1 = _mm_aesenclast_si128( a1, b0 );\
|
||||
a2 = _mm_shuffle_epi8( a2, SUBSH_MASK2 ); \
|
||||
a2 = _mm_aesenclast_si128( a2, b0 );\
|
||||
a3 = _mm_shuffle_epi8( a3, SUBSH_MASK3 ); \
|
||||
a3 = _mm_aesenclast_si128( a3, b0 );\
|
||||
a4 = _mm_shuffle_epi8( a4, SUBSH_MASK4 ); \
|
||||
a4 = _mm_aesenclast_si128( a4, b0 );\
|
||||
a5 = _mm_shuffle_epi8( a5, SUBSH_MASK5 ); \
|
||||
a5 = _mm_aesenclast_si128( a5, b0 );\
|
||||
a6 = _mm_shuffle_epi8( a6, SUBSH_MASK6 ); \
|
||||
a6 = _mm_aesenclast_si128( a6, b0 );\
|
||||
a7 = _mm_shuffle_epi8( a7, SUBSH_MASK7 ); \
|
||||
a7 = _mm_aesenclast_si128( a7, b0 );\
|
||||
b0 = v128_xor(b0, b0);\
|
||||
a0 = v128_shuffle8( a0, SUBSH_MASK0 ); \
|
||||
a0 = v128_aesenclast( a0, b0 );\
|
||||
a1 = v128_shuffle8( a1, SUBSH_MASK1 ); \
|
||||
a1 = v128_aesenclast( a1, b0 );\
|
||||
a2 = v128_shuffle8( a2, SUBSH_MASK2 ); \
|
||||
a2 = v128_aesenclast( a2, b0 );\
|
||||
a3 = v128_shuffle8( a3, SUBSH_MASK3 ); \
|
||||
a3 = v128_aesenclast( a3, b0 );\
|
||||
a4 = v128_shuffle8( a4, SUBSH_MASK4 ); \
|
||||
a4 = v128_aesenclast( a4, b0 );\
|
||||
a5 = v128_shuffle8( a5, SUBSH_MASK5 ); \
|
||||
a5 = v128_aesenclast( a5, b0 );\
|
||||
a6 = v128_shuffle8( a6, SUBSH_MASK6 ); \
|
||||
a6 = v128_aesenclast( a6, b0 );\
|
||||
a7 = v128_shuffle8( a7, SUBSH_MASK7 ); \
|
||||
a7 = v128_aesenclast( a7, b0 );\
|
||||
\
|
||||
/* MixBytes */\
|
||||
MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
|
||||
@@ -334,31 +346,31 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
#define Matrix_Transpose_A(i0, i1, i2, i3, o1, o2, o3, t0){\
|
||||
t0 = TRANSP_MASK; \
|
||||
\
|
||||
i0 = _mm_shuffle_epi8(i0, t0);\
|
||||
i1 = _mm_shuffle_epi8(i1, t0);\
|
||||
i2 = _mm_shuffle_epi8(i2, t0);\
|
||||
i3 = _mm_shuffle_epi8(i3, t0);\
|
||||
i0 = v128_shuffle8(i0, t0);\
|
||||
i1 = v128_shuffle8(i1, t0);\
|
||||
i2 = v128_shuffle8(i2, t0);\
|
||||
i3 = v128_shuffle8(i3, t0);\
|
||||
\
|
||||
o1 = i0;\
|
||||
t0 = i2;\
|
||||
\
|
||||
i0 = _mm_unpacklo_epi16(i0, i1);\
|
||||
o1 = _mm_unpackhi_epi16(o1, i1);\
|
||||
i2 = _mm_unpacklo_epi16(i2, i3);\
|
||||
t0 = _mm_unpackhi_epi16(t0, i3);\
|
||||
i0 = v128_unpacklo16(i0, i1);\
|
||||
o1 = v128_unpackhi16(o1, i1);\
|
||||
i2 = v128_unpacklo16(i2, i3);\
|
||||
t0 = v128_unpackhi16(t0, i3);\
|
||||
\
|
||||
i0 = _mm_shuffle_epi32(i0, 216);\
|
||||
o1 = _mm_shuffle_epi32(o1, 216);\
|
||||
i2 = _mm_shuffle_epi32(i2, 216);\
|
||||
t0 = _mm_shuffle_epi32(t0, 216);\
|
||||
i0 = gr_shuffle32( i0 ); \
|
||||
o1 = gr_shuffle32( o1 ); \
|
||||
i2 = gr_shuffle32( i2 ); \
|
||||
t0 = gr_shuffle32( t0 ); \
|
||||
\
|
||||
o2 = i0;\
|
||||
o3 = o1;\
|
||||
\
|
||||
i0 = _mm_unpacklo_epi32(i0, i2);\
|
||||
o1 = _mm_unpacklo_epi32(o1, t0);\
|
||||
o2 = _mm_unpackhi_epi32(o2, i2);\
|
||||
o3 = _mm_unpackhi_epi32(o3, t0);\
|
||||
i0 = v128_unpacklo32(i0, i2);\
|
||||
o1 = v128_unpacklo32(o1, t0);\
|
||||
o2 = v128_unpackhi32(o2, i2);\
|
||||
o3 = v128_unpackhi32(o3, t0);\
|
||||
}/**/
|
||||
|
||||
/* Matrix Transpose Step 2
|
||||
@@ -376,19 +388,19 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
#define Matrix_Transpose_B(i0, i1, i2, i3, i4, i5, i6, i7, o1, o2, o3, o4, o5, o6, o7){\
|
||||
o1 = i0;\
|
||||
o2 = i1;\
|
||||
i0 = _mm_unpacklo_epi64(i0, i4);\
|
||||
o1 = _mm_unpackhi_epi64(o1, i4);\
|
||||
i0 = v128_unpacklo64(i0, i4);\
|
||||
o1 = v128_unpackhi64(o1, i4);\
|
||||
o3 = i1;\
|
||||
o4 = i2;\
|
||||
o2 = _mm_unpacklo_epi64(o2, i5);\
|
||||
o3 = _mm_unpackhi_epi64(o3, i5);\
|
||||
o2 = v128_unpacklo64(o2, i5);\
|
||||
o3 = v128_unpackhi64(o3, i5);\
|
||||
o5 = i2;\
|
||||
o6 = i3;\
|
||||
o4 = _mm_unpacklo_epi64(o4, i6);\
|
||||
o5 = _mm_unpackhi_epi64(o5, i6);\
|
||||
o4 = v128_unpacklo64(o4, i6);\
|
||||
o5 = v128_unpackhi64(o5, i6);\
|
||||
o7 = i3;\
|
||||
o6 = _mm_unpacklo_epi64(o6, i7);\
|
||||
o7 = _mm_unpackhi_epi64(o7, i7);\
|
||||
o6 = v128_unpacklo64(o6, i7);\
|
||||
o7 = v128_unpackhi64(o7, i7);\
|
||||
}/**/
|
||||
|
||||
/* Matrix Transpose Inverse Step 2
|
||||
@@ -399,17 +411,17 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
*/
|
||||
#define Matrix_Transpose_B_INV(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, o3){\
|
||||
o0 = i0;\
|
||||
i0 = _mm_unpacklo_epi64(i0, i1);\
|
||||
o0 = _mm_unpackhi_epi64(o0, i1);\
|
||||
i0 = v128_unpacklo64(i0, i1);\
|
||||
o0 = v128_unpackhi64(o0, i1);\
|
||||
o1 = i2;\
|
||||
i2 = _mm_unpacklo_epi64(i2, i3);\
|
||||
o1 = _mm_unpackhi_epi64(o1, i3);\
|
||||
i2 = v128_unpacklo64(i2, i3);\
|
||||
o1 = v128_unpackhi64(o1, i3);\
|
||||
o2 = i4;\
|
||||
i4 = _mm_unpacklo_epi64(i4, i5);\
|
||||
o2 = _mm_unpackhi_epi64(o2, i5);\
|
||||
i4 = v128_unpacklo64(i4, i5);\
|
||||
o2 = v128_unpackhi64(o2, i5);\
|
||||
o3 = i6;\
|
||||
i6 = _mm_unpacklo_epi64(i6, i7);\
|
||||
o3 = _mm_unpackhi_epi64(o3, i7);\
|
||||
i6 = v128_unpacklo64(i6, i7);\
|
||||
o3 = v128_unpackhi64(o3, i7);\
|
||||
}/**/
|
||||
|
||||
/* Matrix Transpose Output Step 2
|
||||
@@ -419,19 +431,19 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
* outputs: (i0-7) = (0|S)
|
||||
*/
|
||||
#define Matrix_Transpose_O_B(i0, i1, i2, i3, i4, i5, i6, i7, t0){\
|
||||
t0 = _mm_xor_si128(t0, t0);\
|
||||
t0 = v128_xor(t0, t0);\
|
||||
i1 = i0;\
|
||||
i3 = i2;\
|
||||
i5 = i4;\
|
||||
i7 = i6;\
|
||||
i0 = _mm_unpacklo_epi64(i0, t0);\
|
||||
i1 = _mm_unpackhi_epi64(i1, t0);\
|
||||
i2 = _mm_unpacklo_epi64(i2, t0);\
|
||||
i3 = _mm_unpackhi_epi64(i3, t0);\
|
||||
i4 = _mm_unpacklo_epi64(i4, t0);\
|
||||
i5 = _mm_unpackhi_epi64(i5, t0);\
|
||||
i6 = _mm_unpacklo_epi64(i6, t0);\
|
||||
i7 = _mm_unpackhi_epi64(i7, t0);\
|
||||
i0 = v128_unpacklo64(i0, t0);\
|
||||
i1 = v128_unpackhi64(i1, t0);\
|
||||
i2 = v128_unpacklo64(i2, t0);\
|
||||
i3 = v128_unpackhi64(i3, t0);\
|
||||
i4 = v128_unpacklo64(i4, t0);\
|
||||
i5 = v128_unpackhi64(i5, t0);\
|
||||
i6 = v128_unpacklo64(i6, t0);\
|
||||
i7 = v128_unpackhi64(i7, t0);\
|
||||
}/**/
|
||||
|
||||
/* Matrix Transpose Output Inverse Step 2
|
||||
@@ -441,17 +453,17 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
* outputs: (i0, i2, i4, i6) = S
|
||||
*/
|
||||
#define Matrix_Transpose_O_B_INV(i0, i1, i2, i3, i4, i5, i6, i7){\
|
||||
i0 = _mm_unpacklo_epi64(i0, i1);\
|
||||
i2 = _mm_unpacklo_epi64(i2, i3);\
|
||||
i4 = _mm_unpacklo_epi64(i4, i5);\
|
||||
i6 = _mm_unpacklo_epi64(i6, i7);\
|
||||
i0 = v128_unpacklo64(i0, i1);\
|
||||
i2 = v128_unpacklo64(i2, i3);\
|
||||
i4 = v128_unpacklo64(i4, i5);\
|
||||
i6 = v128_unpacklo64(i6, i7);\
|
||||
}/**/
|
||||
|
||||
|
||||
void INIT256( __m128i* chaining )
|
||||
void INIT256( v128_t* chaining )
|
||||
{
|
||||
static __m128i xmm0, /*xmm1,*/ xmm2, /*xmm3, xmm4, xmm5,*/ xmm6, xmm7;
|
||||
static __m128i /*xmm8, xmm9, xmm10, xmm11,*/ xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t xmm0, /*xmm1,*/ xmm2, /*xmm3, xmm4, xmm5,*/ xmm6, xmm7;
|
||||
static v128_t /*xmm8, xmm9, xmm10, xmm11,*/ xmm12, xmm13, xmm14, xmm15;
|
||||
|
||||
/* load IV into registers xmm12 - xmm15 */
|
||||
xmm12 = chaining[0];
|
||||
@@ -470,13 +482,13 @@ void INIT256( __m128i* chaining )
|
||||
chaining[3] = xmm7;
|
||||
}
|
||||
|
||||
void TF512( __m128i* chaining, __m128i* message )
|
||||
void TF512( v128_t* chaining, v128_t* message )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m128i TEMP0;
|
||||
static __m128i TEMP1;
|
||||
static __m128i TEMP2;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t TEMP0;
|
||||
static v128_t TEMP1;
|
||||
static v128_t TEMP2;
|
||||
|
||||
#ifdef IACA_TRACE
|
||||
IACA_START;
|
||||
@@ -501,10 +513,10 @@ void TF512( __m128i* chaining, __m128i* message )
|
||||
|
||||
/* xor message to CV get input of P */
|
||||
/* result: CV+M in xmm8, xmm0, xmm4, xmm5 */
|
||||
xmm8 = _mm_xor_si128(xmm8, xmm12);
|
||||
xmm0 = _mm_xor_si128(xmm0, xmm2);
|
||||
xmm4 = _mm_xor_si128(xmm4, xmm6);
|
||||
xmm5 = _mm_xor_si128(xmm5, xmm7);
|
||||
xmm8 = v128_xor(xmm8, xmm12);
|
||||
xmm0 = v128_xor(xmm0, xmm2);
|
||||
xmm4 = v128_xor(xmm4, xmm6);
|
||||
xmm5 = v128_xor(xmm5, xmm7);
|
||||
|
||||
/* there are now 2 rows of the Groestl state (P and Q) in each xmm register */
|
||||
/* unpack to get 1 row of P (64 bit) and Q (64 bit) into one xmm register */
|
||||
@@ -519,17 +531,17 @@ void TF512( __m128i* chaining, __m128i* message )
|
||||
|
||||
/* xor output of P and Q */
|
||||
/* result: P(CV+M)+Q(M) in xmm0...xmm3 */
|
||||
xmm0 = _mm_xor_si128(xmm0, xmm8);
|
||||
xmm1 = _mm_xor_si128(xmm1, xmm10);
|
||||
xmm2 = _mm_xor_si128(xmm2, xmm12);
|
||||
xmm3 = _mm_xor_si128(xmm3, xmm14);
|
||||
xmm0 = v128_xor(xmm0, xmm8);
|
||||
xmm1 = v128_xor(xmm1, xmm10);
|
||||
xmm2 = v128_xor(xmm2, xmm12);
|
||||
xmm3 = v128_xor(xmm3, xmm14);
|
||||
|
||||
/* xor CV (feed-forward) */
|
||||
/* result: P(CV+M)+Q(M)+CV in xmm0...xmm3 */
|
||||
xmm0 = _mm_xor_si128(xmm0, (chaining[0]));
|
||||
xmm1 = _mm_xor_si128(xmm1, (chaining[1]));
|
||||
xmm2 = _mm_xor_si128(xmm2, (chaining[2]));
|
||||
xmm3 = _mm_xor_si128(xmm3, (chaining[3]));
|
||||
xmm0 = v128_xor(xmm0, (chaining[0]));
|
||||
xmm1 = v128_xor(xmm1, (chaining[1]));
|
||||
xmm2 = v128_xor(xmm2, (chaining[2]));
|
||||
xmm3 = v128_xor(xmm3, (chaining[3]));
|
||||
|
||||
/* store CV */
|
||||
chaining[0] = xmm0;
|
||||
@@ -543,13 +555,13 @@ void TF512( __m128i* chaining, __m128i* message )
|
||||
return;
|
||||
}
|
||||
|
||||
void OF512( __m128i* chaining )
|
||||
void OF512( v128_t* chaining )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m128i TEMP0;
|
||||
static __m128i TEMP1;
|
||||
static __m128i TEMP2;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t TEMP0;
|
||||
static v128_t TEMP1;
|
||||
static v128_t TEMP2;
|
||||
|
||||
/* load CV into registers xmm8, xmm10, xmm12, xmm14 */
|
||||
xmm8 = chaining[0];
|
||||
@@ -572,10 +584,10 @@ void OF512( __m128i* chaining )
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV)+CV in xmm8, xmm10, xmm12, xmm14 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[1]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[2]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[3]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[1]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[2]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[3]));
|
||||
|
||||
/* transform state back from row ordering into column ordering */
|
||||
/* result: final hash value in xmm9, xmm11 */
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
* This code is placed in the public domain
|
||||
*/
|
||||
|
||||
// Optimized for hash and data length that are integrals of __m128i
|
||||
// Optimized for hash and data length that are integrals of v128_t
|
||||
|
||||
|
||||
#include <memory.h>
|
||||
@@ -14,11 +14,11 @@
|
||||
#include "miner.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
#ifdef __AES__
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
|
||||
#include "groestl-intr-aes.h"
|
||||
|
||||
HashReturn_gr init_groestl( hashState_groestl* ctx, int hashlen )
|
||||
int init_groestl( hashState_groestl* ctx, int hashlen )
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -26,52 +26,40 @@ HashReturn_gr init_groestl( hashState_groestl* ctx, int hashlen )
|
||||
|
||||
for ( i = 0; i < SIZE512; i++ )
|
||||
{
|
||||
ctx->chaining[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->chaining[i] = v128_zero;
|
||||
ctx->buffer[i] = v128_zero;
|
||||
}
|
||||
|
||||
// The only non-zero in the IV is len. It can be hard coded.
|
||||
ctx->chaining[ 6 ] = _mm_set_epi64x( 0x0200000000000000, 0 );
|
||||
ctx->chaining[ 6 ] = v128_set64( 0x0200000000000000, 0 );
|
||||
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
HashReturn_gr reinit_groestl( hashState_groestl* ctx )
|
||||
int reinit_groestl( hashState_groestl* ctx )
|
||||
{
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < SIZE512; i++ )
|
||||
{
|
||||
ctx->chaining[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->chaining[i] = v128_zero;
|
||||
ctx->buffer[i] = v128_zero;
|
||||
}
|
||||
ctx->chaining[ 6 ] = _mm_set_epi64x( 0x0200000000000000, 0 );
|
||||
ctx->chaining[ 6 ] = v128_set64( 0x0200000000000000, 0 );
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
//// midstate is broken
|
||||
// To use midstate:
|
||||
// 1. midstate must process all full blocks.
|
||||
// 2. tail must be less than a full block and may not straddle a
|
||||
// block boundary.
|
||||
// 3. midstate and tail each must be multiples of 128 bits.
|
||||
// 4. For best performance midstate length is a multiple of block size.
|
||||
// 5. Midstate will work at reduced impact than full hash, if total hash
|
||||
// (midstate + tail) is less than 1 block.
|
||||
// This, unfortunately, is the case with all current users.
|
||||
// 6. the more full blocks the bigger the gain
|
||||
|
||||
// use only for midstate precalc
|
||||
HashReturn_gr update_groestl( hashState_groestl* ctx, const void* input,
|
||||
DataLength_gr databitlen )
|
||||
int update_groestl( hashState_groestl* ctx, const void* input,
|
||||
int databitlen )
|
||||
{
|
||||
__m128i* in = (__m128i*)input;
|
||||
const int len = (int)databitlen / 128; // bits to __m128i
|
||||
v128_t* in = (v128_t*)input;
|
||||
const int len = (int)databitlen / 128; // bits to v128_t
|
||||
const int blocks = len / SIZE512; // __M128i to blocks
|
||||
int rem = ctx->rem_ptr;
|
||||
int i;
|
||||
@@ -92,16 +80,16 @@ HashReturn_gr update_groestl( hashState_groestl* ctx, const void* input,
|
||||
// adjust rem_ptr for possible new data
|
||||
ctx->rem_ptr += i;
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// deprecated do not use
|
||||
HashReturn_gr final_groestl( hashState_groestl* ctx, void* output )
|
||||
int final_groestl( hashState_groestl* ctx, void* output )
|
||||
{
|
||||
const int len = (int)ctx->databitlen / 128; // bits to __m128i
|
||||
const int len = (int)ctx->databitlen / 128; // bits to v128_t
|
||||
const uint64_t blocks = ctx->blk_count + 1; // adjust for final block
|
||||
const int rem_ptr = ctx->rem_ptr; // end of data start of padding
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to v128_t
|
||||
const int hash_offset = SIZE512 - hashlen_m128i; // where in buffer
|
||||
int i;
|
||||
|
||||
@@ -111,18 +99,18 @@ HashReturn_gr final_groestl( hashState_groestl* ctx, void* output )
|
||||
if ( rem_ptr == len - 1 )
|
||||
{
|
||||
// only 128 bits left in buffer, all padding at once
|
||||
ctx->buffer[rem_ptr] = _mm_set_epi64x( blocks << 56, 0x80 );
|
||||
ctx->buffer[rem_ptr] = v128_set64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[rem_ptr] = _mm_set_epi64x( 0, 0x80 );
|
||||
ctx->buffer[rem_ptr] = v128_set64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i = rem_ptr + 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = v128_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = _mm_set_epi64x( blocks << 56, 0 );
|
||||
ctx->buffer[i] = v128_set64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
@@ -131,13 +119,13 @@ HashReturn_gr final_groestl( hashState_groestl* ctx, void* output )
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m128i( output, i ) = ctx->chaining[ hash_offset + i];
|
||||
casti_v128( output, i ) = ctx->chaining[ hash_offset + i];
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl512_full( hashState_groestl* ctx, void* output,
|
||||
const void* input, uint64_t databitlen )
|
||||
int groestl512( hashState_groestl* ctx, void* output, const void* input,
|
||||
uint64_t databitlen )
|
||||
{
|
||||
|
||||
int i;
|
||||
@@ -145,19 +133,19 @@ int groestl512_full( hashState_groestl* ctx, void* output,
|
||||
|
||||
for ( i = 0; i < SIZE512; i++ )
|
||||
{
|
||||
ctx->chaining[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->chaining[i] = v128_zero;
|
||||
ctx->buffer[i] = v128_zero;
|
||||
}
|
||||
ctx->chaining[ 6 ] = _mm_set_epi64x( 0x0200000000000000, 0 );
|
||||
ctx->chaining[ 6 ] = v128_set64( 0x0200000000000000, 0 );
|
||||
ctx->buf_ptr = 0;
|
||||
|
||||
// --- update ---
|
||||
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to v128_t
|
||||
const int hash_offset = SIZE512 - hashlen_m128i;
|
||||
uint64_t blocks = len / SIZE512;
|
||||
__m128i* in = (__m128i*)input;
|
||||
v128_t* in = (v128_t*)input;
|
||||
|
||||
// digest any full blocks, process directly from input
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
@@ -177,18 +165,18 @@ int groestl512_full( hashState_groestl* ctx, void* output,
|
||||
if ( i == len -1 )
|
||||
{
|
||||
// only 128 bits left in buffer, all padding at once
|
||||
ctx->buffer[i] = _mm_set_epi64x( blocks << 56, 0x80 );
|
||||
ctx->buffer[i] = v128_set64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = _mm_set_epi64x( 0, 0x80 );
|
||||
ctx->buffer[i] = v128_set64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = v128_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = _mm_set_epi64x( blocks << 56, 0 );
|
||||
ctx->buffer[i] = v128_set64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
@@ -197,21 +185,21 @@ int groestl512_full( hashState_groestl* ctx, void* output,
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m128i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
casti_v128( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
HashReturn_gr update_and_final_groestl( hashState_groestl* ctx, void* output,
|
||||
const void* input, DataLength_gr databitlen )
|
||||
int update_and_final_groestl( hashState_groestl* ctx, void* output,
|
||||
const void* input, int databitlen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to v128_t
|
||||
const int hash_offset = SIZE512 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
uint64_t blocks = len / SIZE512;
|
||||
__m128i* in = (__m128i*)input;
|
||||
v128_t* in = (v128_t*)input;
|
||||
int i;
|
||||
|
||||
// --- update ---
|
||||
@@ -234,18 +222,18 @@ HashReturn_gr update_and_final_groestl( hashState_groestl* ctx, void* output,
|
||||
if ( i == len -1 )
|
||||
{
|
||||
// only 128 bits left in buffer, all padding at once
|
||||
ctx->buffer[i] = _mm_set_epi64x( blocks << 56, 0x80 );
|
||||
ctx->buffer[i] = v128_set64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = _mm_set_epi64x( 0, 0x80 );
|
||||
ctx->buffer[i] = v128_set64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = v128_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = _mm_set_epi64x( blocks << 56, 0 );
|
||||
ctx->buffer[i] = v128_set64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
@@ -254,17 +242,16 @@ HashReturn_gr update_and_final_groestl( hashState_groestl* ctx, void* output,
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m128i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
casti_v128( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* hash bit sequence */
|
||||
HashReturn_gr hash_groestl(int hashbitlen,
|
||||
const BitSequence_gr* data,
|
||||
DataLength_gr databitlen,
|
||||
BitSequence_gr* hashval) {
|
||||
HashReturn_gr ret;
|
||||
int hash_groestl( int hashbitlen, const BitSequence_gr* data, int databitlen,
|
||||
uint8_t* hashval )
|
||||
{
|
||||
int ret;
|
||||
hashState_groestl context;
|
||||
|
||||
/* initialise */
|
||||
@@ -290,4 +277,5 @@ int crypto_hash(unsigned char *out, const unsigned char *in, unsigned long long
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /// SSSE3 or NEON
|
||||
|
||||
@@ -16,8 +16,6 @@
|
||||
#include <stdlib.h>
|
||||
#include "simd-utils.h"
|
||||
|
||||
#define LENGTH (512)
|
||||
|
||||
#include "brg_endian.h"
|
||||
//#define NEED_UINT_64T
|
||||
#include "compat/brg_types.h"
|
||||
@@ -32,6 +30,8 @@
|
||||
//#define ROUNDS512 (10)
|
||||
#define ROUNDS1024 (14)
|
||||
|
||||
#define LENGTH 512
|
||||
|
||||
//#if LENGTH<=256
|
||||
//#define COLS (COLS512)
|
||||
//#define SIZE (SIZE512)
|
||||
@@ -76,17 +76,17 @@ typedef struct {
|
||||
} hashState_groestl;
|
||||
|
||||
|
||||
HashReturn_gr init_groestl( hashState_groestl*, int );
|
||||
int init_groestl( hashState_groestl*, int );
|
||||
|
||||
HashReturn_gr reinit_groestl( hashState_groestl* );
|
||||
int reinit_groestl( hashState_groestl* );
|
||||
|
||||
HashReturn_gr update_groestl( hashState_groestl*, const void*,
|
||||
DataLength_gr );
|
||||
int update_groestl( hashState_groestl*, const void*, int );
|
||||
|
||||
HashReturn_gr final_groestl( hashState_groestl*, void* );
|
||||
int final_groestl( hashState_groestl*, void* );
|
||||
|
||||
int update_and_final_groestl( hashState_groestl*, void*, const void*, int );
|
||||
int groestl512( hashState_groestl*, void*, const void*, uint64_t );
|
||||
#define groestl512_full groestl512
|
||||
|
||||
HashReturn_gr update_and_final_groestl( hashState_groestl*, void*,
|
||||
const void*, DataLength_gr );
|
||||
int groestl512_full( hashState_groestl*, void*, const void*, uint64_t );
|
||||
|
||||
#endif /* __hash_h */
|
||||
|
||||
@@ -11,12 +11,12 @@
|
||||
#include "miner.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
#ifdef __AES__
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
|
||||
#include "groestl256-intr-aes.h"
|
||||
|
||||
/* initialise context */
|
||||
HashReturn_gr init_groestl256( hashState_groestl256* ctx, int hashlen )
|
||||
int init_groestl256( hashState_groestl256* ctx, int hashlen )
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -24,42 +24,42 @@ HashReturn_gr init_groestl256( hashState_groestl256* ctx, int hashlen )
|
||||
|
||||
for ( i = 0; i < SIZE256; i++ )
|
||||
{
|
||||
ctx->chaining[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->chaining[i] = v128_zero;
|
||||
ctx->buffer[i] = v128_zero;
|
||||
}
|
||||
((u64*)ctx->chaining)[COLS-1] = U64BIG((u64)LENGTH);
|
||||
INIT256( ctx->chaining );
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
HashReturn_gr reinit_groestl256(hashState_groestl256* ctx)
|
||||
int reinit_groestl256(hashState_groestl256* ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < SIZE256; i++ )
|
||||
{
|
||||
ctx->chaining[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->chaining[i] = v128_zero;
|
||||
ctx->buffer[i] = v128_zero;
|
||||
}
|
||||
|
||||
ctx->chaining[ 3 ] = _mm_set_epi64x( 0, 0x0100000000000000 );
|
||||
ctx->chaining[ 3 ] = v128_set64( 0, 0x0100000000000000 );
|
||||
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Use this only for midstate and never for cryptonight
|
||||
HashReturn_gr update_groestl256( hashState_groestl256* ctx, const void* input,
|
||||
DataLength_gr databitlen )
|
||||
int update_groestl256( hashState_groestl256* ctx, const void* input,
|
||||
int databitlen )
|
||||
{
|
||||
__m128i* in = (__m128i*)input;
|
||||
const int len = (int)databitlen / 128; // bits to __m128i
|
||||
v128_t* in = (v128_t*)input;
|
||||
const int len = (int)databitlen / 128; // bits to v128_t
|
||||
const int blocks = len / SIZE256; // __M128i to blocks
|
||||
int rem = ctx->rem_ptr;
|
||||
int i;
|
||||
@@ -79,16 +79,16 @@ HashReturn_gr update_groestl256( hashState_groestl256* ctx, const void* input,
|
||||
// adjust rem_ptr for new data
|
||||
ctx->rem_ptr += i;
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// don't use this at all
|
||||
HashReturn_gr final_groestl256( hashState_groestl256* ctx, void* output )
|
||||
int final_groestl256( hashState_groestl256* ctx, void* output )
|
||||
{
|
||||
const int len = (int)ctx->databitlen / 128; // bits to __m128i
|
||||
const int len = (int)ctx->databitlen / 128; // bits to v128_t
|
||||
const int blocks = ctx->blk_count + 1; // adjust for final block
|
||||
const int rem_ptr = ctx->rem_ptr; // end of data start of padding
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to v128_t
|
||||
const int hash_offset = SIZE256 - hashlen_m128i; // where in buffer
|
||||
int i;
|
||||
|
||||
@@ -98,21 +98,20 @@ HashReturn_gr final_groestl256( hashState_groestl256* ctx, void* output )
|
||||
if ( rem_ptr == len - 1 )
|
||||
{
|
||||
// all padding at once
|
||||
ctx->buffer[rem_ptr] = _mm_set_epi8( blocks,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
ctx->buffer[rem_ptr] = v128_set8( blocks,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[rem_ptr] = _mm_set_epi8( 0,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
ctx->buffer[rem_ptr] = v128_set8( 0,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
// add zero padding
|
||||
for ( i = rem_ptr + 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = v128_zero;
|
||||
// add length padding
|
||||
// cheat since we know the block count is trivial, good if block < 256
|
||||
ctx->buffer[i] = _mm_set_epi8( blocks,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0 );
|
||||
ctx->buffer[i] = v128_set8( blocks,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
@@ -121,20 +120,20 @@ HashReturn_gr final_groestl256( hashState_groestl256* ctx, void* output )
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m128i( output, i ) = ctx->chaining[ hash_offset + i];
|
||||
casti_v128( output, i ) = ctx->chaining[ hash_offset + i];
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
HashReturn_gr update_and_final_groestl256( hashState_groestl256* ctx,
|
||||
void* output, const void* input, DataLength_gr databitlen )
|
||||
int update_and_final_groestl256( hashState_groestl256* ctx,
|
||||
void* output, const void* input, int databitlen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to v128_t
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE256;
|
||||
__m128i* in = (__m128i*)input;
|
||||
v128_t* in = (v128_t*)input;
|
||||
int i;
|
||||
|
||||
// --- update ---
|
||||
@@ -144,7 +143,7 @@ HashReturn_gr update_and_final_groestl256( hashState_groestl256* ctx,
|
||||
TF512( ctx->chaining, &in[ i * SIZE256 ] );
|
||||
ctx->buf_ptr = blocks * SIZE256;
|
||||
|
||||
// cryptonight has 200 byte input, an odd number of __m128i
|
||||
// cryptonight has 200 byte input, an odd number of v128_t
|
||||
// remainder is only 8 bytes, ie u64.
|
||||
if ( databitlen % 128 !=0 )
|
||||
{
|
||||
@@ -168,8 +167,8 @@ HashReturn_gr update_and_final_groestl256( hashState_groestl256* ctx,
|
||||
if ( i == len - 1 )
|
||||
{
|
||||
// all padding at once
|
||||
ctx->buffer[i] = _mm_set_epi8( blocks,blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0x80 );
|
||||
ctx->buffer[i] = v128_set8( blocks,blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -183,16 +182,16 @@ HashReturn_gr update_and_final_groestl256( hashState_groestl256* ctx,
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = _mm_set_epi8( 0,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
ctx->buffer[i] = v128_set8( 0,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
}
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = v128_zero;
|
||||
// add length padding
|
||||
// cheat since we know the block count is trivial, good if block < 256
|
||||
ctx->buffer[i] = _mm_set_epi8( blocks,blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0 );
|
||||
ctx->buffer[i] = v128_set8( blocks, blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
@@ -201,30 +200,30 @@ HashReturn_gr update_and_final_groestl256( hashState_groestl256* ctx,
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m128i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
casti_v128( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl256_full( hashState_groestl256* ctx,
|
||||
void* output, const void* input, DataLength_gr databitlen )
|
||||
void* output, const void* input, int databitlen )
|
||||
{
|
||||
int i;
|
||||
ctx->hashlen = 32;
|
||||
for ( i = 0; i < SIZE256; i++ )
|
||||
{
|
||||
ctx->chaining[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->chaining[i] = v128_zero;
|
||||
ctx->buffer[i] = v128_zero;
|
||||
}
|
||||
((u64*)ctx->chaining)[COLS-1] = U64BIG((u64)LENGTH);
|
||||
INIT256( ctx->chaining );
|
||||
ctx->buf_ptr = 0;
|
||||
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to v128_t
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int blocks = len / SIZE256;
|
||||
__m128i* in = (__m128i*)input;
|
||||
v128_t* in = (v128_t*)input;
|
||||
|
||||
// --- update ---
|
||||
|
||||
@@ -233,7 +232,7 @@ int groestl256_full( hashState_groestl256* ctx,
|
||||
TF512( ctx->chaining, &in[ i * SIZE256 ] );
|
||||
ctx->buf_ptr = blocks * SIZE256;
|
||||
|
||||
// cryptonight has 200 byte input, an odd number of __m128i
|
||||
// cryptonight has 200 byte input, an odd number of v128_t
|
||||
// remainder is only 8 bytes, ie u64.
|
||||
if ( databitlen % 128 != 0 )
|
||||
{
|
||||
@@ -257,8 +256,8 @@ int groestl256_full( hashState_groestl256* ctx,
|
||||
if ( i == len - 1 )
|
||||
{
|
||||
// all padding at once
|
||||
ctx->buffer[i] = _mm_set_epi8( blocks,blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0x80 );
|
||||
ctx->buffer[i] = v128_set8( blocks,blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -272,16 +271,16 @@ int groestl256_full( hashState_groestl256* ctx,
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = _mm_set_epi8( 0,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
ctx->buffer[i] = v128_set8( 0,0,0,0, 0,0,0,0,
|
||||
0,0,0,0, 0,0,0,0x80 );
|
||||
}
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = _mm_setzero_si128();
|
||||
ctx->buffer[i] = v128_zero;
|
||||
// add length padding
|
||||
// cheat since we know the block count is trivial, good if block < 256
|
||||
ctx->buffer[i] = _mm_set_epi8( blocks,blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0 );
|
||||
ctx->buffer[i] = v128_set8( blocks,blocks>>8,0,0, 0,0,0,0,
|
||||
0, 0,0,0, 0,0,0,0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
@@ -290,18 +289,17 @@ int groestl256_full( hashState_groestl256* ctx,
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m128i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
casti_v128( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return SUCCESS_GR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* hash bit sequence */
|
||||
HashReturn_gr hash_groestl256(int hashbitlen,
|
||||
const BitSequence_gr* data,
|
||||
DataLength_gr databitlen,
|
||||
BitSequence_gr* hashval) {
|
||||
HashReturn_gr ret;
|
||||
int hash_groestl256(int hashbitlen, const void* data, int databitlen,
|
||||
uint8_t* hashval)
|
||||
{
|
||||
int ret;
|
||||
hashState_groestl256 context;
|
||||
|
||||
/* initialise */
|
||||
@@ -327,4 +325,4 @@ HashReturn_gr hash_groestl256(int hashbitlen,
|
||||
//}
|
||||
//#endif
|
||||
|
||||
#endif
|
||||
#endif // SSSE3 or NEON
|
||||
|
||||
@@ -100,22 +100,20 @@ typedef struct {
|
||||
int databitlen;
|
||||
} hashState_groestl256;
|
||||
|
||||
HashReturn_gr init_groestl256( hashState_groestl256*, int );
|
||||
int init_groestl256( hashState_groestl256*, int );
|
||||
|
||||
HashReturn_gr reinit_groestl256( hashState_groestl256* );
|
||||
int reinit_groestl256( hashState_groestl256* );
|
||||
|
||||
HashReturn_gr update_groestl256( hashState_groestl256*, const void*,
|
||||
DataLength_gr );
|
||||
int update_groestl256( hashState_groestl256*, const void*, int );
|
||||
|
||||
HashReturn_gr final_groestl256( hashState_groestl256*, void* );
|
||||
int final_groestl256( hashState_groestl256*, void* );
|
||||
|
||||
HashReturn_gr hash_groestli256( int, const BitSequence_gr*, DataLength_gr,
|
||||
BitSequence_gr* );
|
||||
int hash_groestl256( int, const void*, int, uint8_t* );
|
||||
|
||||
HashReturn_gr update_and_final_groestl256( hashState_groestl256*, void*,
|
||||
const void*, DataLength_gr );
|
||||
int update_and_final_groestl256( hashState_groestl256*, void*,
|
||||
const void*, int );
|
||||
|
||||
int groestl256_full( hashState_groestl256* ctx,
|
||||
void* output, const void* input, DataLength_gr databitlen );
|
||||
void* output, const void* input, int databitlen );
|
||||
|
||||
#endif /* __hash_h */
|
||||
|
||||
@@ -11,8 +11,6 @@
|
||||
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#define LENGTH (512)
|
||||
|
||||
/* some sizes (number of bytes) */
|
||||
#define ROWS (8)
|
||||
#define LENGTHFIELDLEN (ROWS)
|
||||
|
||||
Reference in New Issue
Block a user