This commit is contained in:
Jay D Dee
2023-10-06 22:18:09 -04:00
parent bc5a5c6df8
commit 31c4dedf59
144 changed files with 5931 additions and 3746 deletions

315
algo/shavite/shavite-hash.h Normal file
View File

@@ -0,0 +1,315 @@
/* $Id: sph_shavite.h 208 2010-06-02 20:33:00Z tp $ */
/**
* SHAvite-3 interface. This code implements SHAvite-3 with the
* recommended parameters for SHA-3, with outputs of 224, 256, 384 and
* 512 bits. In the following, we call the function "SHAvite" (without
* the "-3" suffix), thus "SHAvite-224" is "SHAvite-3 with a 224-bit
* output".
*
* ==========================(LICENSE BEGIN)============================
*
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ===========================(LICENSE END)=============================
*
* @file sph_shavite.h
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
*/
#ifndef SPH_SHAVITE_H__
#define SPH_SHAVITE_H__
#include <stddef.h>
#include "compat/sph_types.h"
#ifdef __cplusplus
extern "C"{
#endif
/**
* Output size (in bits) for SHAvite-224.
*/
#define SPH_SIZE_shavite224 224
/**
* Output size (in bits) for SHAvite-256.
*/
#define SPH_SIZE_shavite256 256
/**
* Output size (in bits) for SHAvite-384.
*/
#define SPH_SIZE_shavite384 384
/**
* Output size (in bits) for SHAvite-512.
*/
#define SPH_SIZE_shavite512 512
/**
* This structure is a context for SHAvite-224 and SHAvite-256 computations:
* it contains the intermediate values and some data from the last
* entered block. Once a SHAvite computation has been performed, the
* context can be reused for another computation.
*
* The contents of this structure are private. A running SHAvite
* computation can be cloned by copying the context (e.g. with a simple
* <code>memcpy()</code>).
*/
typedef struct {
#ifndef DOXYGEN_IGNORE
unsigned char buf[64] __attribute__ ((aligned (64)));
sph_u32 h[8] __attribute__ ((aligned (32)));
size_t ptr;
sph_u32 count0, count1;
#endif
} sph_shavite_small_context;
/**
* This structure is a context for SHAvite-224 computations. It is
* identical to the common <code>sph_shavite_small_context</code>.
*/
typedef sph_shavite_small_context sph_shavite224_context;
/**
* This structure is a context for SHAvite-256 computations. It is
* identical to the common <code>sph_shavite_small_context</code>.
*/
typedef sph_shavite_small_context sph_shavite256_context;
/**
* This structure is a context for SHAvite-384 and SHAvite-512 computations:
* it contains the intermediate values and some data from the last
* entered block. Once a SHAvite computation has been performed, the
* context can be reused for another computation.
*
* The contents of this structure are private. A running SHAvite
* computation can be cloned by copying the context (e.g. with a simple
* <code>memcpy()</code>).
*/
typedef struct {
#ifndef DOXYGEN_IGNORE
unsigned char buf[128] __attribute__ ((aligned (64)));
sph_u32 h[16] __attribute__ ((aligned (32)));;
size_t ptr;
sph_u32 count0, count1, count2, count3;
#endif
} sph_shavite_big_context;
/**
* This structure is a context for SHAvite-384 computations. It is
* identical to the common <code>sph_shavite_small_context</code>.
*/
typedef sph_shavite_big_context sph_shavite384_context;
/**
* This structure is a context for SHAvite-512 computations. It is
* identical to the common <code>sph_shavite_small_context</code>.
*/
typedef sph_shavite_big_context sph_shavite512_context;
/**
* Initialize a SHAvite-224 context. This process performs no memory allocation.
*
* @param cc the SHAvite-224 context (pointer to a
* <code>sph_shavite224_context</code>)
*/
void sph_shavite224_init(void *cc);
/**
* Process some data bytes. It is acceptable that <code>len</code> is zero
* (in which case this function does nothing).
*
* @param cc the SHAvite-224 context
* @param data the input data
* @param len the input data length (in bytes)
*/
void sph_shavite224(void *cc, const void *data, size_t len);
/**
* Terminate the current SHAvite-224 computation and output the result into
* the provided buffer. The destination buffer must be wide enough to
* accomodate the result (28 bytes). The context is automatically
* reinitialized.
*
* @param cc the SHAvite-224 context
* @param dst the destination buffer
*/
void sph_shavite224_close(void *cc, void *dst);
/**
* Add a few additional bits (0 to 7) to the current computation, then
* terminate it and output the result in the provided buffer, which must
* be wide enough to accomodate the result (28 bytes). If bit number i
* in <code>ub</code> has value 2^i, then the extra bits are those
* numbered 7 downto 8-n (this is the big-endian convention at the byte
* level). The context is automatically reinitialized.
*
* @param cc the SHAvite-224 context
* @param ub the extra bits
* @param n the number of extra bits (0 to 7)
* @param dst the destination buffer
*/
void sph_shavite224_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
/**
* Initialize a SHAvite-256 context. This process performs no memory allocation.
*
* @param cc the SHAvite-256 context (pointer to a
* <code>sph_shavite256_context</code>)
*/
void sph_shavite256_init(void *cc);
/**
* Process some data bytes. It is acceptable that <code>len</code> is zero
* (in which case this function does nothing).
*
* @param cc the SHAvite-256 context
* @param data the input data
* @param len the input data length (in bytes)
*/
void sph_shavite256(void *cc, const void *data, size_t len);
/**
* Terminate the current SHAvite-256 computation and output the result into
* the provided buffer. The destination buffer must be wide enough to
* accomodate the result (32 bytes). The context is automatically
* reinitialized.
*
* @param cc the SHAvite-256 context
* @param dst the destination buffer
*/
void sph_shavite256_close(void *cc, void *dst);
/**
* Add a few additional bits (0 to 7) to the current computation, then
* terminate it and output the result in the provided buffer, which must
* be wide enough to accomodate the result (32 bytes). If bit number i
* in <code>ub</code> has value 2^i, then the extra bits are those
* numbered 7 downto 8-n (this is the big-endian convention at the byte
* level). The context is automatically reinitialized.
*
* @param cc the SHAvite-256 context
* @param ub the extra bits
* @param n the number of extra bits (0 to 7)
* @param dst the destination buffer
*/
void sph_shavite256_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
/**
* Initialize a SHAvite-384 context. This process performs no memory allocation.
*
* @param cc the SHAvite-384 context (pointer to a
* <code>sph_shavite384_context</code>)
*/
void sph_shavite384_init(void *cc);
/**
* Process some data bytes. It is acceptable that <code>len</code> is zero
* (in which case this function does nothing).
*
* @param cc the SHAvite-384 context
* @param data the input data
* @param len the input data length (in bytes)
*/
void sph_shavite384(void *cc, const void *data, size_t len);
/**
* Terminate the current SHAvite-384 computation and output the result into
* the provided buffer. The destination buffer must be wide enough to
* accomodate the result (48 bytes). The context is automatically
* reinitialized.
*
* @param cc the SHAvite-384 context
* @param dst the destination buffer
*/
void sph_shavite384_close(void *cc, void *dst);
/**
* Add a few additional bits (0 to 7) to the current computation, then
* terminate it and output the result in the provided buffer, which must
* be wide enough to accomodate the result (48 bytes). If bit number i
* in <code>ub</code> has value 2^i, then the extra bits are those
* numbered 7 downto 8-n (this is the big-endian convention at the byte
* level). The context is automatically reinitialized.
*
* @param cc the SHAvite-384 context
* @param ub the extra bits
* @param n the number of extra bits (0 to 7)
* @param dst the destination buffer
*/
void sph_shavite384_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
//Don't call these directly from application code, use the macros below.
#if ( defined(__AES__) && defined(__SSSE3__) ) || ( defined(__ARM_NEON) && defined(__ARM_FEATURE_AES) )
void sph_shavite512_aesni_init(void *cc);
void sph_shavite512_aesni(void *cc, const void *data, size_t len);
void sph_shavite512_aesni_close(void *cc, void *dst);
void sph_shavite512_aesni_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
#define sph_shavite512_init sph_shavite512_aesni_init
#define sph_shavite512 sph_shavite512_aesni
#define sph_shavite512_close sph_shavite512_aesni_close
#define sph_shavite512_addbits_and_close \
sph_shavite512_aesni_addbits_and_close
#else
void sph_shavite512_sw_init(void *cc);
void sph_shavite512_sw(void *cc, const void *data, size_t len);
void sph_shavite512_sw_close(void *cc, void *dst);
void sph_shavite512_sw_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
#define sph_shavite512_init sph_shavite512_sw_init
#define sph_shavite512 sph_shavite512_sw
#define sph_shavite512_close sph_shavite512_sw_close
#define sph_shavite512_addbits_and_close \
sph_shavite512_sw_addbits_and_close
#endif
// Use these macros from application code.
#define shavite512_context sph_shavite512_context
#define shavite512_init sph_shavite512_init
#define shavite512_update sph_shavite512
#define shavite512_close sph_shavite512_close
#define shavite512_full( cc, dst, data, len ) \
do{ \
shavite512_init( cc ); \
shavite512_update( cc, data, len ); \
shavite512_close( cc, dst ); \
}while(0)
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -33,7 +33,9 @@
#include <stddef.h>
#include <string.h>
#if defined(__AES__)
#if ( defined(__AES__) && defined(__SSSE3__) ) || ( defined(__ARM_NEON) && defined(__ARM_FEATURE_AES) )
#pragma message "AES for shavite"
#include "sph_shavite.h"
#include "simd-utils.h"
@@ -50,24 +52,21 @@ extern "C"{
#pragma warning (disable: 4146)
#endif
#define C32 SPH_C32
static const sph_u32 IV512[] = {
C32(0x72FCCDD8), C32(0x79CA4727), C32(0x128A077B), C32(0x40D55AEC),
C32(0xD1901A06), C32(0x430AE307), C32(0xB29F5CD1), C32(0xDF07FBFC),
C32(0x8E45D73D), C32(0x681AB538), C32(0xBDE86578), C32(0xDD577E47),
C32(0xE275EADE), C32(0x502D9FCD), C32(0xB9357178), C32(0x022A4B9A)
0x72FCCDD8, 0x79CA4727, 0x128A077B, 0x40D55AEC,
0xD1901A06, 0x430AE307, 0xB29F5CD1, 0xDF07FBFC,
0x8E45D73D, 0x681AB538, 0xBDE86578, 0xDD577E47,
0xE275EADE, 0x502D9FCD, 0xB9357178, 0x022A4B9A
};
static void
c512( sph_shavite_big_context *sc, const void *msg )
{
const __m128i zero = _mm_setzero_si128();
__m128i p0, p1, p2, p3, x;
__m128i k00, k01, k02, k03, k10, k11, k12, k13;
__m128i *m = (__m128i*)msg;
__m128i *h = (__m128i*)sc->h;
const v128_t zero = v128_zero;
v128_t p0, p1, p2, p3, x;
v128_t k00, k01, k02, k03, k10, k11, k12, k13;
v128_t *m = (v128_t*)msg;
v128_t *h = (v128_t*)sc->h;
int r;
p0 = h[0];
@@ -78,242 +77,242 @@ c512( sph_shavite_big_context *sc, const void *msg )
// round
k00 = m[0];
x = _mm_xor_si128( p1, k00 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( p1, k00 );
x = v128_aesenc( x, zero );
k01 = m[1];
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k01 );
x = v128_aesenc( x, zero );
k02 = m[2];
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k02 );
x = v128_aesenc( x, zero );
k03 = m[3];
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k03 );
x = v128_aesenc( x, zero );
p0 = _mm_xor_si128( p0, x );
p0 = v128_xor( p0, x );
k10 = m[4];
x = _mm_xor_si128( p3, k10 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( p3, k10 );
x = v128_aesenc( x, zero );
k11 = m[5];
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k11 );
x = v128_aesenc( x, zero );
k12 = m[6];
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k12 );
x = v128_aesenc( x, zero );
k13 = m[7];
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k13 );
x = v128_aesenc( x, zero );
p2 = _mm_xor_si128( p2, x );
p2 = v128_xor( p2, x );
for ( r = 0; r < 3; r ++ )
{
// round 1, 5, 9
k00 = mm128_shuflr_32( _mm_aesenc_si128( k00, zero ) );
k00 = _mm_xor_si128( k00, k13 );
k00 = v128_shuflr32( v128_aesenc( k00, zero ) );
k00 = v128_xor( k00, k13 );
if ( r == 0 )
k00 = _mm_xor_si128( k00, _mm_set_epi32(
k00 = v128_xor( k00, v128_set32(
~sc->count3, sc->count2, sc->count1, sc->count0 ) );
x = _mm_xor_si128( p0, k00 );
x = _mm_aesenc_si128( x, zero );
k01 = mm128_shuflr_32( _mm_aesenc_si128( k01, zero ) );
k01 = _mm_xor_si128( k01, k00 );
x = v128_xor( p0, k00 );
x = v128_aesenc( x, zero );
k01 = v128_shuflr32( v128_aesenc( k01, zero ) );
k01 = v128_xor( k01, k00 );
if ( r == 1 )
k01 = _mm_xor_si128( k01, _mm_set_epi32(
k01 = v128_xor( k01, v128_set32(
~sc->count0, sc->count1, sc->count2, sc->count3 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, zero );
k02 = mm128_shuflr_32( _mm_aesenc_si128( k02, zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, zero );
k03 = mm128_shuflr_32( _mm_aesenc_si128( k03, zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k01 );
x = v128_aesenc( x, zero );
k02 = v128_shuflr32( v128_aesenc( k02, zero ) );
k02 = v128_xor( k02, k01 );
x = v128_xor( x, k02 );
x = v128_aesenc( x, zero );
k03 = v128_shuflr32( v128_aesenc( k03, zero ) );
k03 = v128_xor( k03, k02 );
x = v128_xor( x, k03 );
x = v128_aesenc( x, zero );
p3 = _mm_xor_si128( p3, x );
p3 = v128_xor( p3, x );
k10 = mm128_shuflr_32( _mm_aesenc_si128( k10, zero ) );
k10 = _mm_xor_si128( k10, k03 );
k10 = v128_shuflr32( v128_aesenc( k10, zero ) );
k10 = v128_xor( k10, k03 );
x = _mm_xor_si128( p2, k10 );
x = _mm_aesenc_si128( x, zero );
k11 = mm128_shuflr_32( _mm_aesenc_si128( k11, zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, zero );
k12 = mm128_shuflr_32( _mm_aesenc_si128( k12, zero ) );
k12 = _mm_xor_si128( k12, k11 );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, zero );
k13 = mm128_shuflr_32( _mm_aesenc_si128( k13, zero ) );
k13 = _mm_xor_si128( k13, k12 );
x = v128_xor( p2, k10 );
x = v128_aesenc( x, zero );
k11 = v128_shuflr32( v128_aesenc( k11, zero ) );
k11 = v128_xor( k11, k10 );
x = v128_xor( x, k11 );
x = v128_aesenc( x, zero );
k12 = v128_shuflr32( v128_aesenc( k12, zero ) );
k12 = v128_xor( k12, k11 );
x = v128_xor( x, k12 );
x = v128_aesenc( x, zero );
k13 = v128_shuflr32( v128_aesenc( k13, zero ) );
k13 = v128_xor( k13, k12 );
if ( r == 2 )
k13 = _mm_xor_si128( k13, _mm_set_epi32(
k13 = v128_xor( k13, v128_set32(
~sc->count1, sc->count0, sc->count3, sc->count2 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, zero );
p1 = _mm_xor_si128( p1, x );
x = v128_xor( x, k13 );
x = v128_aesenc( x, zero );
p1 = v128_xor( p1, x );
// round 2, 6, 10
k00 = _mm_xor_si128( k00, _mm_alignr_epi8( k13, k12, 4 ) );
x = _mm_xor_si128( p3, k00 );
x = _mm_aesenc_si128( x, zero );
k01 = _mm_xor_si128( k01, _mm_alignr_epi8( k00, k13, 4 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, zero );
k02 = _mm_xor_si128( k02, _mm_alignr_epi8( k01, k00, 4 ) );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, zero );
k03 = _mm_xor_si128( k03, _mm_alignr_epi8( k02, k01, 4 ) );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, zero );
k00 = v128_xor( k00, v128_alignr8( k13, k12, 4 ) );
x = v128_xor( p3, k00 );
x = v128_aesenc( x, zero );
k01 = v128_xor( k01, v128_alignr8( k00, k13, 4 ) );
x = v128_xor( x, k01 );
x = v128_aesenc( x, zero );
k02 = v128_xor( k02, v128_alignr8( k01, k00, 4 ) );
x = v128_xor( x, k02 );
x = v128_aesenc( x, zero );
k03 = v128_xor( k03, v128_alignr8( k02, k01, 4 ) );
x = v128_xor( x, k03 );
x = v128_aesenc( x, zero );
p2 = _mm_xor_si128( p2, x );
p2 = v128_xor( p2, x );
k10 = _mm_xor_si128( k10, _mm_alignr_epi8( k03, k02, 4 ) );
x = _mm_xor_si128( p1, k10 );
x = _mm_aesenc_si128( x, zero );
k11 = _mm_xor_si128( k11, _mm_alignr_epi8( k10, k03, 4 ) );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, zero );
k12 = _mm_xor_si128( k12, _mm_alignr_epi8( k11, k10, 4 ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, zero );
k13 = _mm_xor_si128( k13, _mm_alignr_epi8( k12, k11, 4 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, zero );
k10 = v128_xor( k10, v128_alignr8( k03, k02, 4 ) );
x = v128_xor( p1, k10 );
x = v128_aesenc( x, zero );
k11 = v128_xor( k11, v128_alignr8( k10, k03, 4 ) );
x = v128_xor( x, k11 );
x = v128_aesenc( x, zero );
k12 = v128_xor( k12, v128_alignr8( k11, k10, 4 ) );
x = v128_xor( x, k12 );
x = v128_aesenc( x, zero );
k13 = v128_xor( k13, v128_alignr8( k12, k11, 4 ) );
x = v128_xor( x, k13 );
x = v128_aesenc( x, zero );
p0 = _mm_xor_si128( p0, x );
p0 = v128_xor( p0, x );
// round 3, 7, 11
k00 = mm128_shuflr_32( _mm_aesenc_si128( k00, zero ) );
k00 = _mm_xor_si128( k00, k13 );
x = _mm_xor_si128( p2, k00 );
x = _mm_aesenc_si128( x, zero );
k01 = mm128_shuflr_32( _mm_aesenc_si128( k01, zero ) );
k01 = _mm_xor_si128( k01, k00 );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, zero );
k02 = mm128_shuflr_32( _mm_aesenc_si128( k02, zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, zero );
k03 = mm128_shuflr_32( _mm_aesenc_si128( k03, zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, zero );
k00 = v128_shuflr32( v128_aesenc( k00, zero ) );
k00 = v128_xor( k00, k13 );
x = v128_xor( p2, k00 );
x = v128_aesenc( x, zero );
k01 = v128_shuflr32( v128_aesenc( k01, zero ) );
k01 = v128_xor( k01, k00 );
x = v128_xor( x, k01 );
x = v128_aesenc( x, zero );
k02 = v128_shuflr32( v128_aesenc( k02, zero ) );
k02 = v128_xor( k02, k01 );
x = v128_xor( x, k02 );
x = v128_aesenc( x, zero );
k03 = v128_shuflr32( v128_aesenc( k03, zero ) );
k03 = v128_xor( k03, k02 );
x = v128_xor( x, k03 );
x = v128_aesenc( x, zero );
p1 = _mm_xor_si128( p1, x );
p1 = v128_xor( p1, x );
k10 = mm128_shuflr_32( _mm_aesenc_si128( k10, zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p0, k10 );
x = _mm_aesenc_si128( x, zero );
k11 = mm128_shuflr_32( _mm_aesenc_si128( k11, zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, zero );
k12 = mm128_shuflr_32( _mm_aesenc_si128( k12, zero ) );
k12 = _mm_xor_si128( k12, k11 );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, zero );
k13 = mm128_shuflr_32( _mm_aesenc_si128( k13, zero ) );
k13 = _mm_xor_si128( k13, k12 );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, zero );
k10 = v128_shuflr32( v128_aesenc( k10, zero ) );
k10 = v128_xor( k10, k03 );
x = v128_xor( p0, k10 );
x = v128_aesenc( x, zero );
k11 = v128_shuflr32( v128_aesenc( k11, zero ) );
k11 = v128_xor( k11, k10 );
x = v128_xor( x, k11 );
x = v128_aesenc( x, zero );
k12 = v128_shuflr32( v128_aesenc( k12, zero ) );
k12 = v128_xor( k12, k11 );
x = v128_xor( x, k12 );
x = v128_aesenc( x, zero );
k13 = v128_shuflr32( v128_aesenc( k13, zero ) );
k13 = v128_xor( k13, k12 );
x = v128_xor( x, k13 );
x = v128_aesenc( x, zero );
p3 = _mm_xor_si128( p3, x );
p3 = v128_xor( p3, x );
// round 4, 8, 12
k00 = _mm_xor_si128( k00, _mm_alignr_epi8( k13, k12, 4 ) );
x = _mm_xor_si128( p1, k00 );
x = _mm_aesenc_si128( x, zero );
k01 = _mm_xor_si128( k01, _mm_alignr_epi8( k00, k13, 4 ) );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, zero );
k02 = _mm_xor_si128( k02, _mm_alignr_epi8( k01, k00, 4 ) );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, zero );
k03 = _mm_xor_si128( k03, _mm_alignr_epi8( k02, k01, 4 ) );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, zero );
k00 = v128_xor( k00, v128_alignr8( k13, k12, 4 ) );
x = v128_xor( p1, k00 );
x = v128_aesenc( x, zero );
k01 = v128_xor( k01, v128_alignr8( k00, k13, 4 ) );
x = v128_xor( x, k01 );
x = v128_aesenc( x, zero );
k02 = v128_xor( k02, v128_alignr8( k01, k00, 4 ) );
x = v128_xor( x, k02 );
x = v128_aesenc( x, zero );
k03 = v128_xor( k03, v128_alignr8( k02, k01, 4 ) );
x = v128_xor( x, k03 );
x = v128_aesenc( x, zero );
p0 = _mm_xor_si128( p0, x );
p0 = v128_xor( p0, x );
k10 = _mm_xor_si128( k10, _mm_alignr_epi8( k03, k02, 4 ) );
x = _mm_xor_si128( p3, k10 );
x = _mm_aesenc_si128( x, zero );
k11 = _mm_xor_si128( k11, _mm_alignr_epi8( k10, k03, 4 ) );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, zero );
k12 = _mm_xor_si128( k12, _mm_alignr_epi8( k11, k10, 4 ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, zero );
k13 = _mm_xor_si128( k13, _mm_alignr_epi8( k12, k11, 4 ) );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, zero );
k10 = v128_xor( k10, v128_alignr8( k03, k02, 4 ) );
x = v128_xor( p3, k10 );
x = v128_aesenc( x, zero );
k11 = v128_xor( k11, v128_alignr8( k10, k03, 4 ) );
x = v128_xor( x, k11 );
x = v128_aesenc( x, zero );
k12 = v128_xor( k12, v128_alignr8( k11, k10, 4 ) );
x = v128_xor( x, k12 );
x = v128_aesenc( x, zero );
k13 = v128_xor( k13, v128_alignr8( k12, k11, 4 ) );
x = v128_xor( x, k13 );
x = v128_aesenc( x, zero );
p2 = _mm_xor_si128( p2, x );
p2 = v128_xor( p2, x );
}
// round 13
k00 = mm128_shuflr_32( _mm_aesenc_si128( k00, zero ) );
k00 = _mm_xor_si128( k00, k13 );
x = _mm_xor_si128( p0, k00 );
x = _mm_aesenc_si128( x, zero );
k01 = mm128_shuflr_32( _mm_aesenc_si128( k01, zero ) );
k01 = _mm_xor_si128( k01, k00 );
x = _mm_xor_si128( x, k01 );
x = _mm_aesenc_si128( x, zero );
k02 = mm128_shuflr_32( _mm_aesenc_si128( k02, zero ) );
k02 = _mm_xor_si128( k02, k01 );
x = _mm_xor_si128( x, k02 );
x = _mm_aesenc_si128( x, zero );
k03 = mm128_shuflr_32( _mm_aesenc_si128( k03, zero ) );
k03 = _mm_xor_si128( k03, k02 );
x = _mm_xor_si128( x, k03 );
x = _mm_aesenc_si128( x, zero );
k00 = v128_shuflr32( v128_aesenc( k00, zero ) );
k00 = v128_xor( k00, k13 );
x = v128_xor( p0, k00 );
x = v128_aesenc( x, zero );
k01 = v128_shuflr32( v128_aesenc( k01, zero ) );
k01 = v128_xor( k01, k00 );
x = v128_xor( x, k01 );
x = v128_aesenc( x, zero );
k02 = v128_shuflr32( v128_aesenc( k02, zero ) );
k02 = v128_xor( k02, k01 );
x = v128_xor( x, k02 );
x = v128_aesenc( x, zero );
k03 = v128_shuflr32( v128_aesenc( k03, zero ) );
k03 = v128_xor( k03, k02 );
x = v128_xor( x, k03 );
x = v128_aesenc( x, zero );
p3 = _mm_xor_si128( p3, x );
p3 = v128_xor( p3, x );
k10 = mm128_shuflr_32( _mm_aesenc_si128( k10, zero ) );
k10 = _mm_xor_si128( k10, k03 );
x = _mm_xor_si128( p2, k10 );
x = _mm_aesenc_si128( x, zero );
k11 = mm128_shuflr_32( _mm_aesenc_si128( k11, zero ) );
k11 = _mm_xor_si128( k11, k10 );
x = _mm_xor_si128( x, k11 );
x = _mm_aesenc_si128( x, zero );
k12 = mm128_shuflr_32( _mm_aesenc_si128( k12, zero ) );
k12 = _mm_xor_si128( k12, _mm_xor_si128( k11, _mm_set_epi32(
k10 = v128_shuflr32( v128_aesenc( k10, zero ) );
k10 = v128_xor( k10, k03 );
x = v128_xor( p2, k10 );
x = v128_aesenc( x, zero );
k11 = v128_shuflr32( v128_aesenc( k11, zero ) );
k11 = v128_xor( k11, k10 );
x = v128_xor( x, k11 );
x = v128_aesenc( x, zero );
k12 = v128_shuflr32( v128_aesenc( k12, zero ) );
k12 = v128_xor( k12, v128_xor( k11, v128_set32(
~sc->count2, sc->count3, sc->count0, sc->count1 ) ) );
x = _mm_xor_si128( x, k12 );
x = _mm_aesenc_si128( x, zero );
k13 = mm128_shuflr_32( _mm_aesenc_si128( k13, zero ) );
k13 = _mm_xor_si128( k13, k12 );
x = _mm_xor_si128( x, k13 );
x = _mm_aesenc_si128( x, zero );
x = v128_xor( x, k12 );
x = v128_aesenc( x, zero );
k13 = v128_shuflr32( v128_aesenc( k13, zero ) );
k13 = v128_xor( k13, k12 );
x = v128_xor( x, k13 );
x = v128_aesenc( x, zero );
p1 = _mm_xor_si128( p1, x );
p1 = v128_xor( p1, x );
h[0] = _mm_xor_si128( h[0], p2 );
h[1] = _mm_xor_si128( h[1], p3 );
h[2] = _mm_xor_si128( h[2], p0 );
h[3] = _mm_xor_si128( h[3], p1 );
h[0] = v128_xor( h[0], p2 );
h[1] = v128_xor( h[1], p3 );
h[2] = v128_xor( h[2], p0 );
h[3] = v128_xor( h[3], p1 );
}

View File

@@ -263,7 +263,7 @@ void sph_shavite384_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
//Don't call these directly from application code, use the macros below.
#if defined(__AES__) && defined(__SSSE3__)
#if ( defined(__AES__) && defined(__SSSE3__) ) || ( defined(__ARM_NEON) && defined(__ARM_FEATURE_AES) )
void sph_shavite512_aesni_init(void *cc);
void sph_shavite512_aesni(void *cc, const void *data, size_t len);