mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v23.5
This commit is contained in:
@@ -10,11 +10,9 @@
|
||||
* This code is placed in the public domain
|
||||
*/
|
||||
|
||||
#include <smmintrin.h>
|
||||
#include <wmmintrin.h>
|
||||
#include "hash-groestl.h"
|
||||
|
||||
static const __m128i round_const_p[] __attribute__ ((aligned (64))) =
|
||||
static const v128u64_t round_const_p[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
{ 0x7060504030201000, 0xf0e0d0c0b0a09080 },
|
||||
{ 0x7161514131211101, 0xf1e1d1c1b1a19181 },
|
||||
@@ -32,7 +30,7 @@ static const __m128i round_const_p[] __attribute__ ((aligned (64))) =
|
||||
{ 0x7d6d5d4d3d2d1d0d, 0xfdedddcdbdad9d8d }
|
||||
};
|
||||
|
||||
static const __m128i round_const_q[] __attribute__ ((aligned (64))) =
|
||||
static const v128u64_t round_const_q[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
{ 0x8f9fafbfcfdfefff, 0x0f1f2f3f4f5f6f7f },
|
||||
{ 0x8e9eaebecedeeefe, 0x0e1e2e3e4e5e6e7e },
|
||||
@@ -50,15 +48,29 @@ static const __m128i round_const_q[] __attribute__ ((aligned (64))) =
|
||||
{ 0x8292a2b2c2d2e2f2, 0x0212223242526272 }
|
||||
};
|
||||
|
||||
static const __m128i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02 };
|
||||
static const __m128i SUBSH_MASK0 = { 0x0b0e0104070a0d00, 0x0306090c0f020508 };
|
||||
static const __m128i SUBSH_MASK1 = { 0x0c0f0205080b0e01, 0x04070a0d00030609 };
|
||||
static const __m128i SUBSH_MASK2 = { 0x0d000306090c0f02, 0x05080b0e0104070a };
|
||||
static const __m128i SUBSH_MASK3 = { 0x0e0104070a0d0003, 0x06090c0f0205080b };
|
||||
static const __m128i SUBSH_MASK4 = { 0x0f0205080b0e0104, 0x070a0d000306090c };
|
||||
static const __m128i SUBSH_MASK5 = { 0x000306090c0f0205, 0x080b0e0104070a0d };
|
||||
static const __m128i SUBSH_MASK6 = { 0x0104070a0d000306, 0x090c0f0205080b0e };
|
||||
static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
static const v128u64_t TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02 };
|
||||
static const v128u64_t SUBSH_MASK0 = { 0x0b0e0104070a0d00, 0x0306090c0f020508 };
|
||||
static const v128u64_t SUBSH_MASK1 = { 0x0c0f0205080b0e01, 0x04070a0d00030609 };
|
||||
static const v128u64_t SUBSH_MASK2 = { 0x0d000306090c0f02, 0x05080b0e0104070a };
|
||||
static const v128u64_t SUBSH_MASK3 = { 0x0e0104070a0d0003, 0x06090c0f0205080b };
|
||||
static const v128u64_t SUBSH_MASK4 = { 0x0f0205080b0e0104, 0x070a0d000306090c };
|
||||
static const v128u64_t SUBSH_MASK5 = { 0x000306090c0f0205, 0x080b0e0104070a0d };
|
||||
static const v128u64_t SUBSH_MASK6 = { 0x0104070a0d000306, 0x090c0f0205080b0e };
|
||||
static const v128u64_t SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
|
||||
// No fast shuffle on NEON
|
||||
static const uint32x4_t vmask_d8 = { 3, 1, 2, 0 };
|
||||
|
||||
#define gr_shuffle32( v ) v128_shufflev32( v, vmask_d8 )
|
||||
|
||||
#else
|
||||
|
||||
#define gr_shuffle32( v ) _mm_shuffle_epi32( v, 0xd8 )
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define tos(a) #a
|
||||
#define tostr(a) tos(a)
|
||||
@@ -67,9 +79,9 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2(i, j, k){\
|
||||
j = _mm_cmpgt_epi8( m128_zero, i);\
|
||||
i = _mm_add_epi8(i, i);\
|
||||
i = mm128_xorand(i, j, k );\
|
||||
j = v128_cmpgt8( v128_zero, i);\
|
||||
i = v128_add8(i, i);\
|
||||
i = v128_xorand(i, j, k );\
|
||||
}
|
||||
|
||||
/**/
|
||||
@@ -98,85 +110,85 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
a0 = v128_xor(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
a1 = v128_xor(a1, a2);\
|
||||
b1 = a3;\
|
||||
TEMP2 = _mm_xor_si128(a2, a3);\
|
||||
TEMP2 = v128_xor(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
a3 = v128_xor(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
a4 = v128_xor(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
a5 = v128_xor(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
a6 = v128_xor(a6, a7);\
|
||||
a7 = v128_xor(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
TEMP0 = mm128_xor3( b0, a4, a6 ); \
|
||||
TEMP0 = v128_xor3( b0, a4, a6 ); \
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP1 = mm128_xor3( b1, a5, a7 );\
|
||||
b2 = mm128_xor3( b2, a6, a0 ); \
|
||||
TEMP1 = v128_xor3( b1, a5, a7 );\
|
||||
b2 = v128_xor3( b2, a6, a0 ); \
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b3 = mm128_xor3( b3, a7, a1 ); \
|
||||
b3 = v128_xor3( b3, a7, a1 ); \
|
||||
b1 = a1;\
|
||||
b6 = mm128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = mm128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = mm128_xor3( b7, a5, a3 ); \
|
||||
b5 = mm128_xor3( b5, a1, a3 ); \
|
||||
b6 = v128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = v128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = v128_xor3( b7, a5, a3 ); \
|
||||
b5 = v128_xor3( b5, a1, a3 ); \
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(TEMP2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
a0 = v128_xor(a0, a3);\
|
||||
a1 = v128_xor(a1, a4);\
|
||||
a2 = v128_xor(TEMP2, a5);\
|
||||
a3 = v128_xor(a3, a6);\
|
||||
a4 = v128_xor(a4, a7);\
|
||||
a5 = v128_xor(a5, b0);\
|
||||
a6 = v128_xor(a6, b1);\
|
||||
a7 = v128_xor(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = _mm_set1_epi64x( 0x1b1b1b1b1b1b1b1b );\
|
||||
b1 = v128_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
a0 = v128_xor(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
a1 = v128_xor(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
a2 = v128_xor(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
a3 = v128_xor(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
a4 = v128_xor(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
a5 = v128_xor(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
a6 = v128_xor(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
a7 = v128_xor(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
b5 = v128_xor(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
b6 = v128_xor(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
b7 = v128_xor(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
b2 = v128_xor(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
b3 = v128_xor(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
b4 = v128_xor(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
b0 = v128_xor(b0, a3);\
|
||||
b1 = v128_xor(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#else
|
||||
@@ -185,96 +197,96 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
a0 = v128_xor(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
a1 = v128_xor(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm_xor_si128(a2, a3);\
|
||||
a2 = v128_xor(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
a3 = v128_xor(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
a4 = v128_xor(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
a5 = v128_xor(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
a6 = v128_xor(a6, a7);\
|
||||
a7 = v128_xor(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm_xor_si128(b0, a4);\
|
||||
b6 = _mm_xor_si128(b6, a4);\
|
||||
b1 = _mm_xor_si128(b1, a5);\
|
||||
b7 = _mm_xor_si128(b7, a5);\
|
||||
b2 = _mm_xor_si128(b2, a6);\
|
||||
b0 = _mm_xor_si128(b0, a6);\
|
||||
b0 = v128_xor(b0, a4);\
|
||||
b6 = v128_xor(b6, a4);\
|
||||
b1 = v128_xor(b1, a5);\
|
||||
b7 = v128_xor(b7, a5);\
|
||||
b2 = v128_xor(b2, a6);\
|
||||
b0 = v128_xor(b0, a6);\
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm_xor_si128(b3, a7);\
|
||||
b1 = _mm_xor_si128(b1, a7);\
|
||||
b3 = v128_xor(b3, a7);\
|
||||
b1 = v128_xor(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm_xor_si128(b4, a0);\
|
||||
b2 = _mm_xor_si128(b2, a0);\
|
||||
b4 = v128_xor(b4, a0);\
|
||||
b2 = v128_xor(b2, a0);\
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm_xor_si128(b5, a1);\
|
||||
b3 = _mm_xor_si128(b3, a1);\
|
||||
b5 = v128_xor(b5, a1);\
|
||||
b3 = v128_xor(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm_xor_si128(b6, a2);\
|
||||
b4 = _mm_xor_si128(b4, a2);\
|
||||
b6 = v128_xor(b6, a2);\
|
||||
b4 = v128_xor(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm_xor_si128(b7, a3);\
|
||||
b5 = _mm_xor_si128(b5, a3);\
|
||||
b7 = v128_xor(b7, a3);\
|
||||
b5 = v128_xor(b5, a3);\
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(a2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
a0 = v128_xor(a0, a3);\
|
||||
a1 = v128_xor(a1, a4);\
|
||||
a2 = v128_xor(a2, a5);\
|
||||
a3 = v128_xor(a3, a6);\
|
||||
a4 = v128_xor(a4, a7);\
|
||||
a5 = v128_xor(a5, b0);\
|
||||
a6 = v128_xor(a6, b1);\
|
||||
a7 = v128_xor(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = _mm_set1_epi64x( 0x1b1b1b1b1b1b1b1b );\
|
||||
b1 = v128_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
a0 = v128_xor(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
a1 = v128_xor(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
a2 = v128_xor(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
a3 = v128_xor(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
a4 = v128_xor(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
a5 = v128_xor(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
a6 = v128_xor(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
a7 = v128_xor(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
b5 = v128_xor(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
b6 = v128_xor(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
b7 = v128_xor(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
b2 = v128_xor(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
b3 = v128_xor(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
b4 = v128_xor(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
b0 = v128_xor(b0, a3);\
|
||||
b1 = v128_xor(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#endif
|
||||
@@ -286,15 +298,15 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
*/
|
||||
#define SUBMIX(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* SubBytes */\
|
||||
b0 = _mm_xor_si128(b0, b0);\
|
||||
a0 = _mm_aesenclast_si128(a0, b0);\
|
||||
a1 = _mm_aesenclast_si128(a1, b0);\
|
||||
a2 = _mm_aesenclast_si128(a2, b0);\
|
||||
a3 = _mm_aesenclast_si128(a3, b0);\
|
||||
a4 = _mm_aesenclast_si128(a4, b0);\
|
||||
a5 = _mm_aesenclast_si128(a5, b0);\
|
||||
a6 = _mm_aesenclast_si128(a6, b0);\
|
||||
a7 = _mm_aesenclast_si128(a7, b0);\
|
||||
b0 = v128_xor(b0, b0);\
|
||||
a0 = v128_aesenclast(a0, b0);\
|
||||
a1 = v128_aesenclast(a1, b0);\
|
||||
a2 = v128_aesenclast(a2, b0);\
|
||||
a3 = v128_aesenclast(a3, b0);\
|
||||
a4 = v128_aesenclast(a4, b0);\
|
||||
a5 = v128_aesenclast(a5, b0);\
|
||||
a6 = v128_aesenclast(a6, b0);\
|
||||
a7 = v128_aesenclast(a7, b0);\
|
||||
/* MixBytes */\
|
||||
MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
|
||||
}
|
||||
@@ -303,32 +315,32 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
u8 round_counter = 0;\
|
||||
for(round_counter = 0; round_counter < 14; round_counter+=2) {\
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm8 = _mm_xor_si128( xmm8, \
|
||||
casti_m128i( round_const_p, round_counter ) ); \
|
||||
xmm8 = v128_xor( xmm8, \
|
||||
casti_v128( round_const_p, round_counter ) ); \
|
||||
/* ShiftBytes P1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm_shuffle_epi8( xmm8, SUBSH_MASK0 ); \
|
||||
xmm9 = _mm_shuffle_epi8( xmm9, SUBSH_MASK1 ); \
|
||||
xmm10 = _mm_shuffle_epi8( xmm10, SUBSH_MASK2 ); \
|
||||
xmm11 = _mm_shuffle_epi8( xmm11, SUBSH_MASK3 ); \
|
||||
xmm12 = _mm_shuffle_epi8( xmm12, SUBSH_MASK4 ); \
|
||||
xmm13 = _mm_shuffle_epi8( xmm13, SUBSH_MASK5 ); \
|
||||
xmm14 = _mm_shuffle_epi8( xmm14, SUBSH_MASK6 ); \
|
||||
xmm15 = _mm_shuffle_epi8( xmm15, SUBSH_MASK7 ); \
|
||||
xmm8 = v128_shuffle8( xmm8, SUBSH_MASK0 ); \
|
||||
xmm9 = v128_shuffle8( xmm9, SUBSH_MASK1 ); \
|
||||
xmm10 = v128_shuffle8( xmm10, SUBSH_MASK2 ); \
|
||||
xmm11 = v128_shuffle8( xmm11, SUBSH_MASK3 ); \
|
||||
xmm12 = v128_shuffle8( xmm12, SUBSH_MASK4 ); \
|
||||
xmm13 = v128_shuffle8( xmm13, SUBSH_MASK5 ); \
|
||||
xmm14 = v128_shuffle8( xmm14, SUBSH_MASK6 ); \
|
||||
xmm15 = v128_shuffle8( xmm15, SUBSH_MASK7 ); \
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX( xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, \
|
||||
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 ); \
|
||||
\
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm0 = _mm_xor_si128( xmm0, \
|
||||
casti_m128i( round_const_p, round_counter+1 ) ); \
|
||||
xmm0 = _mm_shuffle_epi8( xmm0, SUBSH_MASK0 ); \
|
||||
xmm1 = _mm_shuffle_epi8( xmm1, SUBSH_MASK1 ); \
|
||||
xmm2 = _mm_shuffle_epi8( xmm2, SUBSH_MASK2 ); \
|
||||
xmm3 = _mm_shuffle_epi8( xmm3, SUBSH_MASK3 ); \
|
||||
xmm4 = _mm_shuffle_epi8( xmm4, SUBSH_MASK4 ); \
|
||||
xmm5 = _mm_shuffle_epi8( xmm5, SUBSH_MASK5 ); \
|
||||
xmm6 = _mm_shuffle_epi8( xmm6, SUBSH_MASK6 ); \
|
||||
xmm7 = _mm_shuffle_epi8( xmm7, SUBSH_MASK7 ); \
|
||||
xmm0 = v128_xor( xmm0, \
|
||||
casti_v128( round_const_p, round_counter+1 ) ); \
|
||||
xmm0 = v128_shuffle8( xmm0, SUBSH_MASK0 ); \
|
||||
xmm1 = v128_shuffle8( xmm1, SUBSH_MASK1 ); \
|
||||
xmm2 = v128_shuffle8( xmm2, SUBSH_MASK2 ); \
|
||||
xmm3 = v128_shuffle8( xmm3, SUBSH_MASK3 ); \
|
||||
xmm4 = v128_shuffle8( xmm4, SUBSH_MASK4 ); \
|
||||
xmm5 = v128_shuffle8( xmm5, SUBSH_MASK5 ); \
|
||||
xmm6 = v128_shuffle8( xmm6, SUBSH_MASK6 ); \
|
||||
xmm7 = v128_shuffle8( xmm7, SUBSH_MASK7 ); \
|
||||
SUBMIX( xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, \
|
||||
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 ); \
|
||||
}\
|
||||
@@ -338,49 +350,49 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
u8 round_counter = 0;\
|
||||
for(round_counter = 0; round_counter < 14; round_counter+=2) {\
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm1 = m128_neg1;\
|
||||
xmm8 = _mm_xor_si128( xmm8, xmm1 ); \
|
||||
xmm9 = _mm_xor_si128( xmm9, xmm1 ); \
|
||||
xmm10 = _mm_xor_si128( xmm10, xmm1 ); \
|
||||
xmm11 = _mm_xor_si128( xmm11, xmm1 ); \
|
||||
xmm12 = _mm_xor_si128( xmm12, xmm1 ); \
|
||||
xmm13 = _mm_xor_si128( xmm13, xmm1 ); \
|
||||
xmm14 = _mm_xor_si128( xmm14, xmm1 ); \
|
||||
xmm15 = _mm_xor_si128( xmm15, \
|
||||
casti_m128i( round_const_q, round_counter ) ); \
|
||||
xmm1 = v128_neg1;\
|
||||
xmm8 = v128_xor( xmm8, xmm1 ); \
|
||||
xmm9 = v128_xor( xmm9, xmm1 ); \
|
||||
xmm10 = v128_xor( xmm10, xmm1 ); \
|
||||
xmm11 = v128_xor( xmm11, xmm1 ); \
|
||||
xmm12 = v128_xor( xmm12, xmm1 ); \
|
||||
xmm13 = v128_xor( xmm13, xmm1 ); \
|
||||
xmm14 = v128_xor( xmm14, xmm1 ); \
|
||||
xmm15 = v128_xor( xmm15, \
|
||||
casti_v128( round_const_q, round_counter ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm_shuffle_epi8( xmm8, SUBSH_MASK1 ); \
|
||||
xmm9 = _mm_shuffle_epi8( xmm9, SUBSH_MASK3 ); \
|
||||
xmm10 = _mm_shuffle_epi8( xmm10, SUBSH_MASK5 ); \
|
||||
xmm11 = _mm_shuffle_epi8( xmm11, SUBSH_MASK7 ); \
|
||||
xmm12 = _mm_shuffle_epi8( xmm12, SUBSH_MASK0 ); \
|
||||
xmm13 = _mm_shuffle_epi8( xmm13, SUBSH_MASK2 ); \
|
||||
xmm14 = _mm_shuffle_epi8( xmm14, SUBSH_MASK4 ); \
|
||||
xmm15 = _mm_shuffle_epi8( xmm15, SUBSH_MASK6 ); \
|
||||
xmm8 = v128_shuffle8( xmm8, SUBSH_MASK1 ); \
|
||||
xmm9 = v128_shuffle8( xmm9, SUBSH_MASK3 ); \
|
||||
xmm10 = v128_shuffle8( xmm10, SUBSH_MASK5 ); \
|
||||
xmm11 = v128_shuffle8( xmm11, SUBSH_MASK7 ); \
|
||||
xmm12 = v128_shuffle8( xmm12, SUBSH_MASK0 ); \
|
||||
xmm13 = v128_shuffle8( xmm13, SUBSH_MASK2 ); \
|
||||
xmm14 = v128_shuffle8( xmm14, SUBSH_MASK4 ); \
|
||||
xmm15 = v128_shuffle8( xmm15, SUBSH_MASK6 ); \
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX( xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, \
|
||||
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6 , xmm7 ); \
|
||||
\
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm9 = m128_neg1;\
|
||||
xmm0 = _mm_xor_si128( xmm0, xmm9 ); \
|
||||
xmm1 = _mm_xor_si128( xmm1, xmm9 ); \
|
||||
xmm2 = _mm_xor_si128( xmm2, xmm9 ); \
|
||||
xmm3 = _mm_xor_si128( xmm3, xmm9 ); \
|
||||
xmm4 = _mm_xor_si128( xmm4, xmm9 ); \
|
||||
xmm5 = _mm_xor_si128( xmm5, xmm9 ); \
|
||||
xmm6 = _mm_xor_si128( xmm6, xmm9 ); \
|
||||
xmm7 = _mm_xor_si128( xmm7, \
|
||||
casti_m128i( round_const_q, round_counter+1 ) ); \
|
||||
xmm9 = v128_neg1;\
|
||||
xmm0 = v128_xor( xmm0, xmm9 ); \
|
||||
xmm1 = v128_xor( xmm1, xmm9 ); \
|
||||
xmm2 = v128_xor( xmm2, xmm9 ); \
|
||||
xmm3 = v128_xor( xmm3, xmm9 ); \
|
||||
xmm4 = v128_xor( xmm4, xmm9 ); \
|
||||
xmm5 = v128_xor( xmm5, xmm9 ); \
|
||||
xmm6 = v128_xor( xmm6, xmm9 ); \
|
||||
xmm7 = v128_xor( xmm7, \
|
||||
casti_v128( round_const_q, round_counter+1 ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm0 = _mm_shuffle_epi8( xmm0, SUBSH_MASK1 ); \
|
||||
xmm1 = _mm_shuffle_epi8( xmm1, SUBSH_MASK3 ); \
|
||||
xmm2 = _mm_shuffle_epi8( xmm2, SUBSH_MASK5 ); \
|
||||
xmm3 = _mm_shuffle_epi8( xmm3, SUBSH_MASK7 ); \
|
||||
xmm4 = _mm_shuffle_epi8( xmm4, SUBSH_MASK0 ); \
|
||||
xmm5 = _mm_shuffle_epi8( xmm5, SUBSH_MASK2 ); \
|
||||
xmm6 = _mm_shuffle_epi8( xmm6, SUBSH_MASK4 ); \
|
||||
xmm7 = _mm_shuffle_epi8( xmm7, SUBSH_MASK6 ); \
|
||||
xmm0 = v128_shuffle8( xmm0, SUBSH_MASK1 ); \
|
||||
xmm1 = v128_shuffle8( xmm1, SUBSH_MASK3 ); \
|
||||
xmm2 = v128_shuffle8( xmm2, SUBSH_MASK5 ); \
|
||||
xmm3 = v128_shuffle8( xmm3, SUBSH_MASK7 ); \
|
||||
xmm4 = v128_shuffle8( xmm4, SUBSH_MASK0 ); \
|
||||
xmm5 = v128_shuffle8( xmm5, SUBSH_MASK2 ); \
|
||||
xmm6 = v128_shuffle8( xmm6, SUBSH_MASK4 ); \
|
||||
xmm7 = v128_shuffle8( xmm7, SUBSH_MASK6 ); \
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX( xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, \
|
||||
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 ); \
|
||||
@@ -397,70 +409,70 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
#define Matrix_Transpose(i0, i1, i2, i3, i4, i5, i6, i7, t0, t1, t2, t3, t4, t5, t6, t7){\
|
||||
t0 = TRANSP_MASK; \
|
||||
\
|
||||
i6 = _mm_shuffle_epi8(i6, t0);\
|
||||
i0 = _mm_shuffle_epi8(i0, t0);\
|
||||
i1 = _mm_shuffle_epi8(i1, t0);\
|
||||
i2 = _mm_shuffle_epi8(i2, t0);\
|
||||
i3 = _mm_shuffle_epi8(i3, t0);\
|
||||
i6 = v128_shuffle8(i6, t0);\
|
||||
i0 = v128_shuffle8(i0, t0);\
|
||||
i1 = v128_shuffle8(i1, t0);\
|
||||
i2 = v128_shuffle8(i2, t0);\
|
||||
i3 = v128_shuffle8(i3, t0);\
|
||||
t1 = i2;\
|
||||
i4 = _mm_shuffle_epi8(i4, t0);\
|
||||
i5 = _mm_shuffle_epi8(i5, t0);\
|
||||
i4 = v128_shuffle8(i4, t0);\
|
||||
i5 = v128_shuffle8(i5, t0);\
|
||||
t2 = i4;\
|
||||
t3 = i6;\
|
||||
i7 = _mm_shuffle_epi8(i7, t0);\
|
||||
i7 = v128_shuffle8(i7, t0);\
|
||||
\
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t0 = i0;\
|
||||
t2 = _mm_unpackhi_epi16(t2, i5);\
|
||||
i4 = _mm_unpacklo_epi16(i4, i5);\
|
||||
t3 = _mm_unpackhi_epi16(t3, i7);\
|
||||
i6 = _mm_unpacklo_epi16(i6, i7);\
|
||||
t0 = _mm_unpackhi_epi16(t0, i1);\
|
||||
t1 = _mm_unpackhi_epi16(t1, i3);\
|
||||
i2 = _mm_unpacklo_epi16(i2, i3);\
|
||||
i0 = _mm_unpacklo_epi16(i0, i1);\
|
||||
t2 = v128_unpackhi16(t2, i5);\
|
||||
i4 = v128_unpacklo16(i4, i5);\
|
||||
t3 = v128_unpackhi16(t3, i7);\
|
||||
i6 = v128_unpacklo16(i6, i7);\
|
||||
t0 = v128_unpackhi16(t0, i1);\
|
||||
t1 = v128_unpackhi16(t1, i3);\
|
||||
i2 = v128_unpacklo16(i2, i3);\
|
||||
i0 = v128_unpacklo16(i0, i1);\
|
||||
\
|
||||
/* shuffle with immediate */\
|
||||
t0 = _mm_shuffle_epi32(t0, 216);\
|
||||
t1 = _mm_shuffle_epi32(t1, 216);\
|
||||
t2 = _mm_shuffle_epi32(t2, 216);\
|
||||
t3 = _mm_shuffle_epi32(t3, 216);\
|
||||
i0 = _mm_shuffle_epi32(i0, 216);\
|
||||
i2 = _mm_shuffle_epi32(i2, 216);\
|
||||
i4 = _mm_shuffle_epi32(i4, 216);\
|
||||
i6 = _mm_shuffle_epi32(i6, 216);\
|
||||
t0 = gr_shuffle32( t0 ); \
|
||||
t1 = gr_shuffle32( t1 ); \
|
||||
t2 = gr_shuffle32( t2 ); \
|
||||
t3 = gr_shuffle32( t3 ); \
|
||||
i0 = gr_shuffle32( i0 ); \
|
||||
i2 = gr_shuffle32( i2 ); \
|
||||
i4 = gr_shuffle32( i4 ); \
|
||||
i6 = gr_shuffle32( i6 ); \
|
||||
\
|
||||
/* continue with unpack */\
|
||||
t4 = i0;\
|
||||
i0 = _mm_unpacklo_epi32(i0, i2);\
|
||||
t4 = _mm_unpackhi_epi32(t4, i2);\
|
||||
i0 = v128_unpacklo32(i0, i2);\
|
||||
t4 = v128_unpackhi32(t4, i2);\
|
||||
t5 = t0;\
|
||||
t0 = _mm_unpacklo_epi32(t0, t1);\
|
||||
t5 = _mm_unpackhi_epi32(t5, t1);\
|
||||
t0 = v128_unpacklo32(t0, t1);\
|
||||
t5 = v128_unpackhi32(t5, t1);\
|
||||
t6 = i4;\
|
||||
i4 = _mm_unpacklo_epi32(i4, i6);\
|
||||
i4 = v128_unpacklo32(i4, i6);\
|
||||
t7 = t2;\
|
||||
t6 = _mm_unpackhi_epi32(t6, i6);\
|
||||
t6 = v128_unpackhi32(t6, i6);\
|
||||
i2 = t0;\
|
||||
t2 = _mm_unpacklo_epi32(t2, t3);\
|
||||
t2 = v128_unpacklo32(t2, t3);\
|
||||
i3 = t0;\
|
||||
t7 = _mm_unpackhi_epi32(t7, t3);\
|
||||
t7 = v128_unpackhi32(t7, t3);\
|
||||
\
|
||||
/* there are now 2 rows in each xmm */\
|
||||
/* unpack to get 1 row of CV in each xmm */\
|
||||
i1 = i0;\
|
||||
i1 = _mm_unpackhi_epi64(i1, i4);\
|
||||
i0 = _mm_unpacklo_epi64(i0, i4);\
|
||||
i1 = v128_unpackhi64(i1, i4);\
|
||||
i0 = v128_unpacklo64(i0, i4);\
|
||||
i4 = t4;\
|
||||
i3 = _mm_unpackhi_epi64(i3, t2);\
|
||||
i3 = v128_unpackhi64(i3, t2);\
|
||||
i5 = t4;\
|
||||
i2 = _mm_unpacklo_epi64(i2, t2);\
|
||||
i2 = v128_unpacklo64(i2, t2);\
|
||||
i6 = t5;\
|
||||
i5 = _mm_unpackhi_epi64(i5, t6);\
|
||||
i5 = v128_unpackhi64(i5, t6);\
|
||||
i7 = t5;\
|
||||
i4 = _mm_unpacklo_epi64(i4, t6);\
|
||||
i7 = _mm_unpackhi_epi64(i7, t7);\
|
||||
i6 = _mm_unpacklo_epi64(i6, t7);\
|
||||
i4 = v128_unpacklo64(i4, t6);\
|
||||
i7 = v128_unpackhi64(i7, t7);\
|
||||
i6 = v128_unpacklo64(i6, t7);\
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
@@ -471,74 +483,76 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
* outputs: (i0, o0, i1, i3, o1, o2, i5, i7)
|
||||
* clobbers: t0-t4
|
||||
*/
|
||||
#define Matrix_Transpose_INV(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, t0, t1, t2, t3, t4){\
|
||||
#define Matrix_Transpose_INV( i0, i1, i2, i3, i4, i5, i6, i7, \
|
||||
o0, o1, o2, t0, t1, t2, t3, t4 ) \
|
||||
{ \
|
||||
/* transpose matrix to get output format */\
|
||||
o1 = i0;\
|
||||
i0 = _mm_unpacklo_epi64(i0, i1);\
|
||||
o1 = _mm_unpackhi_epi64(o1, i1);\
|
||||
t0 = i2;\
|
||||
i2 = _mm_unpacklo_epi64(i2, i3);\
|
||||
t0 = _mm_unpackhi_epi64(t0, i3);\
|
||||
t1 = i4;\
|
||||
i4 = _mm_unpacklo_epi64(i4, i5);\
|
||||
t1 = _mm_unpackhi_epi64(t1, i5);\
|
||||
t2 = i6;\
|
||||
o1 = i0; \
|
||||
i0 = v128_unpacklo64( i0, i1 ); \
|
||||
o1 = v128_unpackhi64( o1, i1 ); \
|
||||
t0 = i2; \
|
||||
i2 = v128_unpacklo64( i2, i3 ); \
|
||||
t0 = v128_unpackhi64( t0, i3 ); \
|
||||
t1 = i4; \
|
||||
i4 = v128_unpacklo64( i4, i5 ); \
|
||||
t1 = v128_unpackhi64( t1, i5 ); \
|
||||
t2 = i6; \
|
||||
o0 = TRANSP_MASK; \
|
||||
i6 = _mm_unpacklo_epi64(i6, i7);\
|
||||
t2 = _mm_unpackhi_epi64(t2, i7);\
|
||||
i6 = v128_unpacklo64( i6, i7 ); \
|
||||
t2 = v128_unpackhi64( t2, i7 ); \
|
||||
/* load transpose mask into a register, because it will be used 8 times */\
|
||||
i0 = _mm_shuffle_epi8(i0, o0);\
|
||||
i2 = _mm_shuffle_epi8(i2, o0);\
|
||||
i4 = _mm_shuffle_epi8(i4, o0);\
|
||||
i6 = _mm_shuffle_epi8(i6, o0);\
|
||||
o1 = _mm_shuffle_epi8(o1, o0);\
|
||||
t0 = _mm_shuffle_epi8(t0, o0);\
|
||||
t1 = _mm_shuffle_epi8(t1, o0);\
|
||||
t2 = _mm_shuffle_epi8(t2, o0);\
|
||||
i0 = v128_shuffle8( i0, o0 ); \
|
||||
i2 = v128_shuffle8( i2, o0 ); \
|
||||
i4 = v128_shuffle8( i4, o0 ); \
|
||||
i6 = v128_shuffle8( i6, o0 ); \
|
||||
o1 = v128_shuffle8( o1, o0 ); \
|
||||
t0 = v128_shuffle8( t0, o0 ); \
|
||||
t1 = v128_shuffle8( t1, o0 ); \
|
||||
t2 = v128_shuffle8( t2, o0 ); \
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t3 = i4;\
|
||||
o2 = o1;\
|
||||
o0 = i0;\
|
||||
t4 = t1;\
|
||||
t3 = i4; \
|
||||
o2 = o1; \
|
||||
o0 = i0; \
|
||||
t4 = t1; \
|
||||
\
|
||||
t3 = _mm_unpackhi_epi16(t3, i6);\
|
||||
i4 = _mm_unpacklo_epi16(i4, i6);\
|
||||
o0 = _mm_unpackhi_epi16(o0, i2);\
|
||||
i0 = _mm_unpacklo_epi16(i0, i2);\
|
||||
o2 = _mm_unpackhi_epi16(o2, t0);\
|
||||
o1 = _mm_unpacklo_epi16(o1, t0);\
|
||||
t4 = _mm_unpackhi_epi16(t4, t2);\
|
||||
t1 = _mm_unpacklo_epi16(t1, t2);\
|
||||
t3 = v128_unpackhi16( t3, i6 ); \
|
||||
i4 = v128_unpacklo16( i4, i6 ); \
|
||||
o0 = v128_unpackhi16( o0, i2 ); \
|
||||
i0 = v128_unpacklo16( i0, i2 ); \
|
||||
o2 = v128_unpackhi16( o2, t0 ); \
|
||||
o1 = v128_unpacklo16( o1, t0 ); \
|
||||
t4 = v128_unpackhi16( t4, t2 ); \
|
||||
t1 = v128_unpacklo16( t1, t2 ); \
|
||||
/* shuffle with immediate */\
|
||||
i4 = _mm_shuffle_epi32(i4, 216);\
|
||||
t3 = _mm_shuffle_epi32(t3, 216);\
|
||||
o1 = _mm_shuffle_epi32(o1, 216);\
|
||||
o2 = _mm_shuffle_epi32(o2, 216);\
|
||||
i0 = _mm_shuffle_epi32(i0, 216);\
|
||||
o0 = _mm_shuffle_epi32(o0, 216);\
|
||||
t1 = _mm_shuffle_epi32(t1, 216);\
|
||||
t4 = _mm_shuffle_epi32(t4, 216);\
|
||||
i4 = gr_shuffle32( i4 ); \
|
||||
t3 = gr_shuffle32( t3 ); \
|
||||
o1 = gr_shuffle32( o1 ); \
|
||||
o2 = gr_shuffle32( o2 ); \
|
||||
i0 = gr_shuffle32( i0 ); \
|
||||
o0 = gr_shuffle32( o0 ); \
|
||||
t1 = gr_shuffle32( t1 ); \
|
||||
t4 = gr_shuffle32( t4 ); \
|
||||
/* continue with unpack */\
|
||||
i1 = i0;\
|
||||
i3 = o0;\
|
||||
i5 = o1;\
|
||||
i7 = o2;\
|
||||
i0 = _mm_unpacklo_epi32(i0, i4);\
|
||||
i1 = _mm_unpackhi_epi32(i1, i4);\
|
||||
o0 = _mm_unpacklo_epi32(o0, t3);\
|
||||
i3 = _mm_unpackhi_epi32(i3, t3);\
|
||||
o1 = _mm_unpacklo_epi32(o1, t1);\
|
||||
i5 = _mm_unpackhi_epi32(i5, t1);\
|
||||
o2 = _mm_unpacklo_epi32(o2, t4);\
|
||||
i7 = _mm_unpackhi_epi32(i7, t4);\
|
||||
i1 = i0; \
|
||||
i3 = o0; \
|
||||
i5 = o1; \
|
||||
i7 = o2; \
|
||||
i0 = v128_unpacklo32( i0, i4 ); \
|
||||
i1 = v128_unpackhi32( i1, i4 ); \
|
||||
o0 = v128_unpacklo32( o0, t3 ); \
|
||||
i3 = v128_unpackhi32( i3, t3 ); \
|
||||
o1 = v128_unpacklo32( o1, t1 ); \
|
||||
i5 = v128_unpackhi32( i5, t1 ); \
|
||||
o2 = v128_unpacklo32( o2, t4 ); \
|
||||
i7 = v128_unpackhi32( i7, t4 ); \
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
|
||||
void INIT( __m128i* chaining )
|
||||
void INIT( v128_t* chaining )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
|
||||
/* load IV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
@@ -564,14 +578,14 @@ void INIT( __m128i* chaining )
|
||||
chaining[7] = xmm15;
|
||||
}
|
||||
|
||||
void TF1024( __m128i* chaining, const __m128i* message )
|
||||
void TF1024( v128_t* chaining, const v128_t* message )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m128i QTEMP[8];
|
||||
static __m128i TEMP0;
|
||||
static __m128i TEMP1;
|
||||
static __m128i TEMP2;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t QTEMP[8];
|
||||
static v128_t TEMP0;
|
||||
static v128_t TEMP1;
|
||||
static v128_t TEMP2;
|
||||
|
||||
#ifdef IACA_TRACE
|
||||
IACA_START;
|
||||
@@ -602,14 +616,14 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
|
||||
/* xor CV to message to get P input */
|
||||
/* result: CV+M in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* compute permutation P */
|
||||
/* result: P(CV+M) in xmm8...xmm15 */
|
||||
@@ -617,14 +631,14 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV+M)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* store P(CV+M)+CV */
|
||||
chaining[0] = xmm8;
|
||||
@@ -652,14 +666,14 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
|
||||
/* xor Q output */
|
||||
/* result: P(CV+M)+CV+Q(M) in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* store CV */
|
||||
chaining[0] = xmm8;
|
||||
@@ -678,13 +692,13 @@ void TF1024( __m128i* chaining, const __m128i* message )
|
||||
return;
|
||||
}
|
||||
|
||||
void OF1024( __m128i* chaining )
|
||||
void OF1024( v128_t* chaining )
|
||||
{
|
||||
static __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m128i TEMP0;
|
||||
static __m128i TEMP1;
|
||||
static __m128i TEMP2;
|
||||
static v128_t xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static v128_t xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static v128_t TEMP0;
|
||||
static v128_t TEMP1;
|
||||
static v128_t TEMP2;
|
||||
|
||||
/* load CV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
@@ -702,14 +716,14 @@ void OF1024( __m128i* chaining )
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm_xor_si128(xmm8, (chaining[0]));
|
||||
xmm9 = _mm_xor_si128(xmm9, (chaining[1]));
|
||||
xmm10 = _mm_xor_si128(xmm10, (chaining[2]));
|
||||
xmm11 = _mm_xor_si128(xmm11, (chaining[3]));
|
||||
xmm12 = _mm_xor_si128(xmm12, (chaining[4]));
|
||||
xmm13 = _mm_xor_si128(xmm13, (chaining[5]));
|
||||
xmm14 = _mm_xor_si128(xmm14, (chaining[6]));
|
||||
xmm15 = _mm_xor_si128(xmm15, (chaining[7]));
|
||||
xmm8 = v128_xor(xmm8, (chaining[0]));
|
||||
xmm9 = v128_xor(xmm9, (chaining[1]));
|
||||
xmm10 = v128_xor(xmm10, (chaining[2]));
|
||||
xmm11 = v128_xor(xmm11, (chaining[3]));
|
||||
xmm12 = v128_xor(xmm12, (chaining[4]));
|
||||
xmm13 = v128_xor(xmm13, (chaining[5]));
|
||||
xmm14 = v128_xor(xmm14, (chaining[6]));
|
||||
xmm15 = v128_xor(xmm15, (chaining[7]));
|
||||
|
||||
/* transpose CV back from row ordering to column ordering */
|
||||
/* result: final hash value in xmm0, xmm6, xmm13, xmm15 */
|
||||
|
||||
Reference in New Issue
Block a user