This commit is contained in:
Jay D Dee
2019-05-19 13:39:45 -04:00
parent bfd1c002f9
commit e1aead3c76
139 changed files with 10907 additions and 4218 deletions

View File

@@ -64,11 +64,11 @@ static const sph_u32 IV512[] = {
// a[3:0] = { b[0], a[3], a[2], a[1] }
#if defined(__SSSE3__)
#define mm_ror256hi_1x32( a, b ) _mm_alignr_epi8( b, a, 4 )
#define mm128_ror256hi_1x32( a, b ) _mm_alignr_epi8( b, a, 4 )
#else // SSE2
#define mm_ror256hi_1x32( a, b ) \
#define mm128_ror256hi_1x32( a, b ) \
_mm_or_si128( _mm_srli_si128( a, 4 ), \
_mm_slli_si128( b, 12 ) )
@@ -136,7 +136,7 @@ c512( sph_shavite_big_context *sc, const void *msg )
for ( r = 0; r < 3; r ++ )
{
// round 1, 5, 9
k00 = mm_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = _mm_xor_si128( k00, k13 );
if ( r == 0 )
@@ -145,7 +145,7 @@ c512( sph_shavite_big_context *sc, const void *msg )
x = _mm_xor_si128( p0, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = mm_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
k01 = _mm_xor_si128( k01, k00 );
if ( r == 1 )
@@ -154,33 +154,33 @@ c512( sph_shavite_big_context *sc, const void *msg )
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = mm_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = mm_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
p3 = _mm_xor_si128( p3, x );
k10 = mm_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p2, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = mm_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = mm_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
k12 = _mm_xor_si128( k12, k11 );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = mm_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
k13 = _mm_xor_si128( k13, k12 );
if ( r == 2 )
@@ -193,80 +193,80 @@ c512( sph_shavite_big_context *sc, const void *msg )
// round 2, 6, 10
k00 = _mm_xor_si128( k00, mm_ror256hi_1x32( k12, k13 ) );
k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) );
x = _mm_xor_si128( p3, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = _mm_xor_si128( k01, mm_ror256hi_1x32( k13, k00 ) );
k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = _mm_xor_si128( k02, mm_ror256hi_1x32( k00, k01 ) );
k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = _mm_xor_si128( k03, mm_ror256hi_1x32( k01, k02 ) );
k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
p2 = _mm_xor_si128( p2, x );
k10 = _mm_xor_si128( k10, mm_ror256hi_1x32( k02, k03 ) );
k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) );
x = _mm_xor_si128( p1, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = _mm_xor_si128( k11, mm_ror256hi_1x32( k03, k10 ) );
k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = _mm_xor_si128( k12, mm_ror256hi_1x32( k10, k11 ) );
k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = _mm_xor_si128( k13, mm_ror256hi_1x32( k11, k12 ) );
k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
p0 = _mm_xor_si128( p0, x );
// round 3, 7, 11
k00 = mm_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = _mm_xor_si128( k00, k13 );
x = _mm_xor_si128( p2, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = mm_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
k01 = _mm_xor_si128( k01, k00 );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = mm_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = mm_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
p1 = _mm_xor_si128( p1, x );
k10 = mm_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p0, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = mm_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = mm_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
k12 = _mm_xor_si128( k12, k11 );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = mm_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
k13 = _mm_xor_si128( k13, k12 );
x = _mm_xor_si128( x, k13 );
@@ -275,36 +275,36 @@ c512( sph_shavite_big_context *sc, const void *msg )
// round 4, 8, 12
k00 = _mm_xor_si128( k00, mm_ror256hi_1x32( k12, k13 ) );
k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) );
x = _mm_xor_si128( p1, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = _mm_xor_si128( k01, mm_ror256hi_1x32( k13, k00 ) );
k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = _mm_xor_si128( k02, mm_ror256hi_1x32( k00, k01 ) );
k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = _mm_xor_si128( k03, mm_ror256hi_1x32( k01, k02 ) );
k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
p0 = _mm_xor_si128( p0, x );
k10 = _mm_xor_si128( k10, mm_ror256hi_1x32( k02, k03 ) );
k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) );
x = _mm_xor_si128( p3, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = _mm_xor_si128( k11, mm_ror256hi_1x32( k03, k10 ) );
k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = _mm_xor_si128( k12, mm_ror256hi_1x32( k10, k11 ) );
k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = _mm_xor_si128( k13, mm_ror256hi_1x32( k11, k12 ) );
k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, m128_zero );
@@ -313,44 +313,44 @@ c512( sph_shavite_big_context *sc, const void *msg )
// round 13
k00 = mm_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) );
k00 = _mm_xor_si128( k00, k13 );
x = _mm_xor_si128( p0, k00 );
x = _mm_aesenc_si128( x, m128_zero );
k01 = mm_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) );
k01 = _mm_xor_si128( k01, k00 );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, m128_zero );
k02 = mm_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, m128_zero );
k03 = mm_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, m128_zero );
p3 = _mm_xor_si128( p3, x );
k10 = mm_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p2, k10 );
x = _mm_aesenc_si128( x, m128_zero );
k11 = mm_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, m128_zero );
k12 = mm_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) );
k12 = _mm_xor_si128( k12, _mm_xor_si128( k11, _mm_set_epi32(
~sc->count2, sc->count3, sc->count0, sc->count1 ) ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, m128_zero );
k13 = mm_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) );
k13 = _mm_xor_si128( k13, k12 );
x = _mm_xor_si128( x, k13 );