mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.8.4.1
This commit is contained in:
@@ -55,23 +55,23 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
|
||||
// returns void, updates all args
|
||||
#define G_4X64(a,b,c,d) \
|
||||
a = _mm256_add_epi64( a, b ); \
|
||||
d = mm256_rotr_64( _mm256_xor_si256( d, a), 32 ); \
|
||||
d = mm256_ror_64( _mm256_xor_si256( d, a), 32 ); \
|
||||
c = _mm256_add_epi64( c, d ); \
|
||||
b = mm256_rotr_64( _mm256_xor_si256( b, c ), 24 ); \
|
||||
b = mm256_ror_64( _mm256_xor_si256( b, c ), 24 ); \
|
||||
a = _mm256_add_epi64( a, b ); \
|
||||
d = mm256_rotr_64( _mm256_xor_si256( d, a ), 16 ); \
|
||||
d = mm256_ror_64( _mm256_xor_si256( d, a ), 16 ); \
|
||||
c = _mm256_add_epi64( c, d ); \
|
||||
b = mm256_rotr_64( _mm256_xor_si256( b, c ), 63 );
|
||||
b = mm256_ror_64( _mm256_xor_si256( b, c ), 63 );
|
||||
|
||||
#define LYRA_ROUND_AVX2( s0, s1, s2, s3 ) \
|
||||
G_4X64( s0, s1, s2, s3 ); \
|
||||
s1 = mm256_rotr256_1x64( s1); \
|
||||
s1 = mm256_ror256_1x64( s1); \
|
||||
s2 = mm256_swap_128( s2 ); \
|
||||
s3 = mm256_rotl256_1x64( s3 ); \
|
||||
s3 = mm256_rol256_1x64( s3 ); \
|
||||
G_4X64( s0, s1, s2, s3 ); \
|
||||
s1 = mm256_rotl256_1x64( s1 ); \
|
||||
s1 = mm256_rol256_1x64( s1 ); \
|
||||
s2 = mm256_swap_128( s2 ); \
|
||||
s3 = mm256_rotr256_1x64( s3 );
|
||||
s3 = mm256_ror256_1x64( s3 );
|
||||
|
||||
#define LYRA_12_ROUNDS_AVX2( s0, s1, s2, s3 ) \
|
||||
LYRA_ROUND_AVX2( s0, s1, s2, s3 ) \
|
||||
@@ -94,25 +94,25 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
|
||||
// returns void, all args updated
|
||||
#define G_2X64(a,b,c,d) \
|
||||
a = _mm_add_epi64( a, b ); \
|
||||
d = mm_rotr_64( _mm_xor_si128( d, a), 32 ); \
|
||||
d = mm_ror_64( _mm_xor_si128( d, a), 32 ); \
|
||||
c = _mm_add_epi64( c, d ); \
|
||||
b = mm_rotr_64( _mm_xor_si128( b, c ), 24 ); \
|
||||
b = mm_ror_64( _mm_xor_si128( b, c ), 24 ); \
|
||||
a = _mm_add_epi64( a, b ); \
|
||||
d = mm_rotr_64( _mm_xor_si128( d, a ), 16 ); \
|
||||
d = mm_ror_64( _mm_xor_si128( d, a ), 16 ); \
|
||||
c = _mm_add_epi64( c, d ); \
|
||||
b = mm_rotr_64( _mm_xor_si128( b, c ), 63 );
|
||||
b = mm_ror_64( _mm_xor_si128( b, c ), 63 );
|
||||
|
||||
#define LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
|
||||
G_2X64( s0, s2, s4, s6 ); \
|
||||
G_2X64( s1, s3, s5, s7 ); \
|
||||
mm_rotr256_1x64( s2, s3 ); \
|
||||
mm_ror256_1x64( s2, s3 ); \
|
||||
mm_swap_128( s4, s5 ); \
|
||||
mm_rotl256_1x64( s6, s7 ); \
|
||||
mm_rol256_1x64( s6, s7 ); \
|
||||
G_2X64( s0, s2, s4, s6 ); \
|
||||
G_2X64( s1, s3, s5, s7 ); \
|
||||
mm_rotl256_1x64( s2, s3 ); \
|
||||
mm_rol256_1x64( s2, s3 ); \
|
||||
mm_swap_128( s4, s5 ); \
|
||||
mm_rotr256_1x64( s6, s7 );
|
||||
mm_ror256_1x64( s6, s7 );
|
||||
|
||||
#define LYRA_12_ROUNDS_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
|
||||
LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
|
||||
|
||||
@@ -155,7 +155,7 @@ bool register_sha256t_algo( algo_gate_t* gate )
|
||||
gate->optimizations = SSE2_OPT | AVX_OPT | AVX2_OPT | SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256t;
|
||||
gate->hash = (void*)&sha256t_hash;
|
||||
gate->set_target = (void*)&sha256t_set_target;
|
||||
// gate->set_target = (void*)&sha256t_set_target;
|
||||
gate->get_max64 = (void*)&get_max64_0x3ffff;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -52,21 +52,6 @@ extern "C"{
|
||||
|
||||
#define C32 SPH_C32
|
||||
|
||||
/*
|
||||
* As of round 2 of the SHA-3 competition, the published reference
|
||||
* implementation and test vectors are wrong, because they use
|
||||
* big-endian AES tables while the internal decoding uses little-endian.
|
||||
* The code below follows the specification. To turn it into a code
|
||||
* which follows the reference implementation (the one called "BugFix"
|
||||
* on the SHAvite-3 web site, published on Nov 23rd, 2009), comment out
|
||||
* the code below (from the '#define AES_BIG_ENDIAN...' to the definition
|
||||
* of the AES_ROUND_NOKEY macro) and replace it with the version which
|
||||
* is commented out afterwards.
|
||||
*/
|
||||
|
||||
#define AES_BIG_ENDIAN 0
|
||||
#include "algo/sha/aes_helper.c"
|
||||
|
||||
static const sph_u32 IV512[] = {
|
||||
C32(0x72FCCDD8), C32(0x79CA4727), C32(0x128A077B), C32(0x40D55AEC),
|
||||
C32(0xD1901A06), C32(0x430AE307), C32(0xB29F5CD1), C32(0xDF07FBFC),
|
||||
@@ -74,210 +59,19 @@ static const sph_u32 IV512[] = {
|
||||
C32(0xE275EADE), C32(0x502D9FCD), C32(0xB9357178), C32(0x022A4B9A)
|
||||
};
|
||||
|
||||
// Return hi 128 bits with elements shifted one lane with vacated lane filled
|
||||
// with data rotated from lo.
|
||||
// Partially rotate elements in two 128 bit vectors as one 256 bit vector
|
||||
// and return the rotated high 128 bits.
|
||||
// Similar to mm_rotr256_1x32 but only a partial rotation as lo is not
|
||||
// completed. It's faster than a full rotation.
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
static inline __m128i mm_rotr256hi_1x32( __m128i hi, __m128i lo )
|
||||
{ return _mm_or_si128( _mm_srli_si128( hi, 4 ),
|
||||
_mm_slli_si128( lo, 12 ) );
|
||||
}
|
||||
#define mm_rotr256hi_1x32( hi, lo ) _mm_alignr_epi8( lo, hi, 4 )
|
||||
|
||||
#define AES_ROUND_NOKEY(x0, x1, x2, x3) do { \
|
||||
sph_u32 t0 = (x0); \
|
||||
sph_u32 t1 = (x1); \
|
||||
sph_u32 t2 = (x2); \
|
||||
sph_u32 t3 = (x3); \
|
||||
AES_ROUND_NOKEY_LE(t0, t1, t2, t3, x0, x1, x2, x3); \
|
||||
} while (0)
|
||||
#else // SSE2
|
||||
|
||||
|
||||
#define KEY_EXPAND_ELT(k0, k1, k2, k3) do { \
|
||||
sph_u32 kt; \
|
||||
AES_ROUND_NOKEY(k1, k2, k3, k0); \
|
||||
kt = (k0); \
|
||||
(k0) = (k1); \
|
||||
(k1) = (k2); \
|
||||
(k2) = (k3); \
|
||||
(k3) = kt; \
|
||||
} while (0)
|
||||
#define mm_rotr256hi_1x32( hi, lo ) \
|
||||
_mm_or_si128( _mm_srli_si128( hi, 4 ), \
|
||||
_mm_slli_si128( lo, 12 ) )
|
||||
|
||||
|
||||
#if SPH_SMALL_FOOTPRINT_SHAVITE
|
||||
|
||||
/*
|
||||
* This function assumes that "msg" is aligned for 32-bit access.
|
||||
*/
|
||||
static void
|
||||
c512(sph_shavite_big_context *sc, const void *msg)
|
||||
{
|
||||
sph_u32 p0, p1, p2, p3, p4, p5, p6, p7;
|
||||
sph_u32 p8, p9, pA, pB, pC, pD, pE, pF;
|
||||
sph_u32 rk[448];
|
||||
size_t u;
|
||||
int r, s;
|
||||
|
||||
#if SPH_LITTLE_ENDIAN
|
||||
memcpy(rk, msg, 128);
|
||||
#else
|
||||
for (u = 0; u < 32; u += 4) {
|
||||
rk[u + 0] = sph_dec32le_aligned(
|
||||
(const unsigned char *)msg + (u << 2) + 0);
|
||||
rk[u + 1] = sph_dec32le_aligned(
|
||||
(const unsigned char *)msg + (u << 2) + 4);
|
||||
rk[u + 2] = sph_dec32le_aligned(
|
||||
(const unsigned char *)msg + (u << 2) + 8);
|
||||
rk[u + 3] = sph_dec32le_aligned(
|
||||
(const unsigned char *)msg + (u << 2) + 12);
|
||||
}
|
||||
#endif
|
||||
u = 32;
|
||||
for (;;) {
|
||||
for (s = 0; s < 4; s ++) {
|
||||
sph_u32 x0, x1, x2, x3;
|
||||
|
||||
x0 = rk[u - 31];
|
||||
x1 = rk[u - 30];
|
||||
x2 = rk[u - 29];
|
||||
x3 = rk[u - 32];
|
||||
AES_ROUND_NOKEY(x0, x1, x2, x3);
|
||||
rk[u + 0] = x0 ^ rk[u - 4];
|
||||
rk[u + 1] = x1 ^ rk[u - 3];
|
||||
rk[u + 2] = x2 ^ rk[u - 2];
|
||||
rk[u + 3] = x3 ^ rk[u - 1];
|
||||
if (u == 32) {
|
||||
rk[ 32] ^= sc->count0;
|
||||
rk[ 33] ^= sc->count1;
|
||||
rk[ 34] ^= sc->count2;
|
||||
rk[ 35] ^= SPH_T32(~sc->count3);
|
||||
} else if (u == 440) {
|
||||
rk[440] ^= sc->count1;
|
||||
rk[441] ^= sc->count0;
|
||||
rk[442] ^= sc->count3;
|
||||
rk[443] ^= SPH_T32(~sc->count2);
|
||||
}
|
||||
u += 4;
|
||||
|
||||
x0 = rk[u - 31];
|
||||
x1 = rk[u - 30];
|
||||
x2 = rk[u - 29];
|
||||
x3 = rk[u - 32];
|
||||
AES_ROUND_NOKEY(x0, x1, x2, x3);
|
||||
rk[u + 0] = x0 ^ rk[u - 4];
|
||||
rk[u + 1] = x1 ^ rk[u - 3];
|
||||
rk[u + 2] = x2 ^ rk[u - 2];
|
||||
rk[u + 3] = x3 ^ rk[u - 1];
|
||||
if (u == 164) {
|
||||
rk[164] ^= sc->count3;
|
||||
rk[165] ^= sc->count2;
|
||||
rk[166] ^= sc->count1;
|
||||
rk[167] ^= SPH_T32(~sc->count0);
|
||||
} else if (u == 316) {
|
||||
rk[316] ^= sc->count2;
|
||||
rk[317] ^= sc->count3;
|
||||
rk[318] ^= sc->count0;
|
||||
rk[319] ^= SPH_T32(~sc->count1);
|
||||
}
|
||||
u += 4;
|
||||
}
|
||||
if (u == 448)
|
||||
break;
|
||||
for (s = 0; s < 8; s ++) {
|
||||
rk[u + 0] = rk[u - 32] ^ rk[u - 7];
|
||||
rk[u + 1] = rk[u - 31] ^ rk[u - 6];
|
||||
rk[u + 2] = rk[u - 30] ^ rk[u - 5];
|
||||
rk[u + 3] = rk[u - 29] ^ rk[u - 4];
|
||||
u += 4;
|
||||
}
|
||||
}
|
||||
|
||||
p0 = sc->h[0x0];
|
||||
p1 = sc->h[0x1];
|
||||
p2 = sc->h[0x2];
|
||||
p3 = sc->h[0x3];
|
||||
p4 = sc->h[0x4];
|
||||
p5 = sc->h[0x5];
|
||||
p6 = sc->h[0x6];
|
||||
p7 = sc->h[0x7];
|
||||
p8 = sc->h[0x8];
|
||||
p9 = sc->h[0x9];
|
||||
pA = sc->h[0xA];
|
||||
pB = sc->h[0xB];
|
||||
pC = sc->h[0xC];
|
||||
pD = sc->h[0xD];
|
||||
pE = sc->h[0xE];
|
||||
pF = sc->h[0xF];
|
||||
u = 0;
|
||||
for (r = 0; r < 14; r ++) {
|
||||
#define C512_ELT(l0, l1, l2, l3, r0, r1, r2, r3) do { \
|
||||
sph_u32 x0, x1, x2, x3; \
|
||||
x0 = r0 ^ rk[u ++]; \
|
||||
x1 = r1 ^ rk[u ++]; \
|
||||
x2 = r2 ^ rk[u ++]; \
|
||||
x3 = r3 ^ rk[u ++]; \
|
||||
AES_ROUND_NOKEY(x0, x1, x2, x3); \
|
||||
x0 ^= rk[u ++]; \
|
||||
x1 ^= rk[u ++]; \
|
||||
x2 ^= rk[u ++]; \
|
||||
x3 ^= rk[u ++]; \
|
||||
AES_ROUND_NOKEY(x0, x1, x2, x3); \
|
||||
x0 ^= rk[u ++]; \
|
||||
x1 ^= rk[u ++]; \
|
||||
x2 ^= rk[u ++]; \
|
||||
x3 ^= rk[u ++]; \
|
||||
AES_ROUND_NOKEY(x0, x1, x2, x3); \
|
||||
x0 ^= rk[u ++]; \
|
||||
x1 ^= rk[u ++]; \
|
||||
x2 ^= rk[u ++]; \
|
||||
x3 ^= rk[u ++]; \
|
||||
AES_ROUND_NOKEY(x0, x1, x2, x3); \
|
||||
l0 ^= x0; \
|
||||
l1 ^= x1; \
|
||||
l2 ^= x2; \
|
||||
l3 ^= x3; \
|
||||
} while (0)
|
||||
|
||||
#define WROT(a, b, c, d) do { \
|
||||
sph_u32 t = d; \
|
||||
d = c; \
|
||||
c = b; \
|
||||
b = a; \
|
||||
a = t; \
|
||||
} while (0)
|
||||
|
||||
C512_ELT(p0, p1, p2, p3, p4, p5, p6, p7);
|
||||
C512_ELT(p8, p9, pA, pB, pC, pD, pE, pF);
|
||||
|
||||
WROT(p0, p4, p8, pC);
|
||||
WROT(p1, p5, p9, pD);
|
||||
WROT(p2, p6, pA, pE);
|
||||
WROT(p3, p7, pB, pF);
|
||||
|
||||
#undef C512_ELT
|
||||
#undef WROT
|
||||
}
|
||||
sc->h[0x0] ^= p0;
|
||||
sc->h[0x1] ^= p1;
|
||||
sc->h[0x2] ^= p2;
|
||||
sc->h[0x3] ^= p3;
|
||||
sc->h[0x4] ^= p4;
|
||||
sc->h[0x5] ^= p5;
|
||||
sc->h[0x6] ^= p6;
|
||||
sc->h[0x7] ^= p7;
|
||||
sc->h[0x8] ^= p8;
|
||||
sc->h[0x9] ^= p9;
|
||||
sc->h[0xA] ^= pA;
|
||||
sc->h[0xB] ^= pB;
|
||||
sc->h[0xC] ^= pC;
|
||||
sc->h[0xD] ^= pD;
|
||||
sc->h[0xE] ^= pE;
|
||||
sc->h[0xF] ^= pF;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void
|
||||
c512( sph_shavite_big_context *sc, const void *msg )
|
||||
@@ -331,7 +125,7 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
for ( r = 0; r < 3; r ++ )
|
||||
{
|
||||
// round 1, 5, 9
|
||||
k00 = mm_rotr_1x32( _mm_aesenc_si128( k00, m128_zero ) );
|
||||
k00 = mm_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
|
||||
k00 = _mm_xor_si128( k00, k13 );
|
||||
|
||||
if ( r == 0 )
|
||||
@@ -340,7 +134,7 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
|
||||
x = _mm_xor_si128( p0, k00 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k01 = mm_rotr_1x32( _mm_aesenc_si128( k01, m128_zero ) );
|
||||
k01 = mm_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
|
||||
k01 = _mm_xor_si128( k01, k00 );
|
||||
|
||||
if ( r == 1 )
|
||||
@@ -349,33 +143,33 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
|
||||
x = _mm_xor_si128( x, k01 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k02 = mm_rotr_1x32( _mm_aesenc_si128( k02, m128_zero ) );
|
||||
k02 = mm_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
|
||||
k02 = _mm_xor_si128( k02, k01 );
|
||||
|
||||
x = _mm_xor_si128( x, k02 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k03 = mm_rotr_1x32( _mm_aesenc_si128( k03, m128_zero ) );
|
||||
k03 = mm_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
|
||||
k03 = _mm_xor_si128( k03, k02 );
|
||||
|
||||
x = _mm_xor_si128( x, k03 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
p3 = _mm_xor_si128( p3, x );
|
||||
k10 = mm_rotr_1x32( _mm_aesenc_si128( k10, m128_zero ) );
|
||||
k10 = mm_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
|
||||
k10 = _mm_xor_si128( k10, k03 );
|
||||
|
||||
x = _mm_xor_si128( p2, k10 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k11 = mm_rotr_1x32( _mm_aesenc_si128( k11, m128_zero ) );
|
||||
k11 = mm_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
|
||||
k11 = _mm_xor_si128( k11, k10 );
|
||||
|
||||
x = _mm_xor_si128( x, k11 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k12 = mm_rotr_1x32( _mm_aesenc_si128( k12, m128_zero ) );
|
||||
k12 = mm_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
|
||||
k12 = _mm_xor_si128( k12, k11 );
|
||||
|
||||
x = _mm_xor_si128( x, k12 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k13 = mm_rotr_1x32( _mm_aesenc_si128( k13, m128_zero ) );
|
||||
k13 = mm_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
|
||||
k13 = _mm_xor_si128( k13, k12 );
|
||||
|
||||
if ( r == 2 )
|
||||
@@ -424,44 +218,44 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
|
||||
// round 3, 7, 11
|
||||
|
||||
k00 = mm_rotr_1x32( _mm_aesenc_si128( k00, m128_zero ) );
|
||||
k00 = mm_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
|
||||
k00 = _mm_xor_si128( k00, k13 );
|
||||
|
||||
x = _mm_xor_si128( p2, k00 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
|
||||
k01 = mm_rotr_1x32( _mm_aesenc_si128( k01, m128_zero ) );
|
||||
k01 = mm_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
|
||||
k01 = _mm_xor_si128( k01, k00 );
|
||||
|
||||
x = _mm_xor_si128( x, k01 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k02 = mm_rotr_1x32( _mm_aesenc_si128( k02, m128_zero ) );
|
||||
k02 = mm_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
|
||||
k02 = _mm_xor_si128( k02, k01 );
|
||||
|
||||
x = _mm_xor_si128( x, k02 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k03 = mm_rotr_1x32( _mm_aesenc_si128( k03, m128_zero ) );
|
||||
k03 = mm_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
|
||||
k03 = _mm_xor_si128( k03, k02 );
|
||||
|
||||
x = _mm_xor_si128( x, k03 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
p1 = _mm_xor_si128( p1, x );
|
||||
k10 = mm_rotr_1x32( _mm_aesenc_si128( k10, m128_zero ) );
|
||||
k10 = mm_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
|
||||
k10 = _mm_xor_si128( k10, k03 );
|
||||
|
||||
x = _mm_xor_si128( p0, k10 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k11 = mm_rotr_1x32( _mm_aesenc_si128( k11, m128_zero ) );
|
||||
k11 = mm_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
|
||||
k11 = _mm_xor_si128( k11, k10 );
|
||||
|
||||
x = _mm_xor_si128( x, k11 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k12 = mm_rotr_1x32( _mm_aesenc_si128( k12, m128_zero ) );
|
||||
k12 = mm_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
|
||||
k12 = _mm_xor_si128( k12, k11 );
|
||||
|
||||
x = _mm_xor_si128( x, k12 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k13 = mm_rotr_1x32( _mm_aesenc_si128( k13, m128_zero ) );
|
||||
k13 = mm_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
|
||||
k13 = _mm_xor_si128( k13, k12 );
|
||||
|
||||
x = _mm_xor_si128( x, k13 );
|
||||
@@ -508,44 +302,44 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
|
||||
// round 13
|
||||
|
||||
k00 = mm_rotr_1x32( _mm_aesenc_si128( k00, m128_zero ) );
|
||||
k00 = mm_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
|
||||
k00 = _mm_xor_si128( k00, k13 );
|
||||
|
||||
x = _mm_xor_si128( p0, k00 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k01 = mm_rotr_1x32( _mm_aesenc_si128( k01, m128_zero ) );
|
||||
k01 = mm_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
|
||||
k01 = _mm_xor_si128( k01, k00 );
|
||||
|
||||
x = _mm_xor_si128( x, k01 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k02 = mm_rotr_1x32( _mm_aesenc_si128( k02, m128_zero ) );
|
||||
k02 = mm_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
|
||||
k02 = _mm_xor_si128( k02, k01 );
|
||||
|
||||
x = _mm_xor_si128( x, k02 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k03 = mm_rotr_1x32( _mm_aesenc_si128( k03, m128_zero ) );
|
||||
k03 = mm_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
|
||||
k03 = _mm_xor_si128( k03, k02 );
|
||||
|
||||
x = _mm_xor_si128( x, k03 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
p3 = _mm_xor_si128( p3, x );
|
||||
k10 = mm_rotr_1x32( _mm_aesenc_si128( k10, m128_zero ) );
|
||||
k10 = mm_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
|
||||
k10 = _mm_xor_si128( k10, k03 );
|
||||
|
||||
x = _mm_xor_si128( p2, k10 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k11 = mm_rotr_1x32( _mm_aesenc_si128( k11, m128_zero ) );
|
||||
k11 = mm_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
|
||||
k11 = _mm_xor_si128( k11, k10 );
|
||||
|
||||
x = _mm_xor_si128( x, k11 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k12 = mm_rotr_1x32( _mm_aesenc_si128( k12, m128_zero ) );
|
||||
k12 = mm_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
|
||||
k12 = _mm_xor_si128( k12, _mm_xor_si128( k11, _mm_set_epi32(
|
||||
~sc->count2, sc->count3, sc->count0, sc->count1 ) ) );
|
||||
|
||||
x = _mm_xor_si128( x, k12 );
|
||||
x = _mm_aesenc_si128( x, m128_zero );
|
||||
k13 = mm_rotr_1x32( _mm_aesenc_si128( k13, m128_zero ) );
|
||||
k13 = mm_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
|
||||
k13 = _mm_xor_si128( k13, k12 );
|
||||
|
||||
x = _mm_xor_si128( x, k13 );
|
||||
@@ -558,7 +352,6 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
h[3] = _mm_xor_si128( h[3], p1 );
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void
|
||||
shavite_big_aesni_init( sph_shavite_big_context *sc, const sph_u32 *iv )
|
||||
|
||||
Reference in New Issue
Block a user