This commit is contained in:
Jay D Dee
2025-11-15 10:44:32 -05:00
parent 12480a3ea5
commit 8f2f9ec3e9
26 changed files with 474 additions and 452 deletions

View File

@@ -66,82 +66,60 @@ static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
#if defined(__SSSE3__) || defined(__ARM_NEON)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
v128_t t0 = v128_alignr8(B1, B0, 8); \
v128_t t1 = v128_alignr8(B0, B1, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = C0; \
C0 = C1; \
C1 = t0; \
\
t0 = v128_alignr8(D1, D0, 8); \
t1 = v128_alignr8(D0, D1, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define DIAGONALIZE( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
v128_t t = v128_alignr8( B1, B0, 8 ); \
B1 = v128_alignr8( B0, B1, 8 ); \
B0 = t; \
t = v128_alignr8( D1, D0, 8 ); \
D0 = v128_alignr8( D0, D1, 8 ); \
D1 = t; \
}
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
v128_t t0 = v128_alignr8(B0, B1, 8); \
v128_t t1 = v128_alignr8(B1, B0, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = C0; \
C0 = C1; \
C1 = t0; \
\
t0 = v128_alignr8(D0, D1, 8); \
t1 = v128_alignr8(D1, D0, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define UNDIAGONALIZE( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
v128_t t = v128_alignr8( B0, B1, 8 ); \
B1 = v128_alignr8( B1, B0, 8 ); \
B0 = t; \
t = v128_alignr8( D0, D1, 8 ); \
D0 = v128_alignr8( D1, D0, 8 ); \
D1 = t; \
}
#else /* SSE2 */
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
v128_t t0 = D0; \
v128_t t1 = B0; \
D0 = C0; \
C0 = C1; \
C1 = D0; \
D0 = v128_unpackhi64(D1, v128_unpacklo64(t0, t0)); \
D1 = v128_unpackhi64(t0, v128_unpacklo64(D1, D1)); \
B0 = v128_unpackhi64(B0, v128_unpacklo64(B1, B1)); \
B1 = v128_unpackhi64(B1, v128_unpacklo64(t1, t1)); \
} while ((void)0, 0)
#define DIAGONALIZE( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
v128_t t = D0; \
D0 = v128_unpackhi64( D1, v128_unpacklo64( D0, D0 ) ); \
D1 = v128_unpackhi64( t, v128_unpacklo64( D1, D1 ) ); \
t = B0; \
B0 = v128_unpackhi64( B0, v128_unpacklo64( B1, B1 ) ); \
B1 = v128_unpackhi64( B1, v128_unpacklo64( t, t ) ); \
}
#define UNDIAGONALIZE( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
v128_t t = B0; \
B0 = v128_unpackhi64( B1, v128_unpacklo64( B0, B0 ) ); \
B1 = v128_unpackhi64( t, v128_unpacklo64( B1, B1 ) ); \
t = D0; \
D0 = v128_unpackhi64( D0, v128_unpacklo64( D1, D1 ) ); \
D1 = v128_unpackhi64( D1, v128_unpacklo64( t, t ) ); \
}
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
v128_t t0, t1; \
t0 = C0; \
C0 = C1; \
C1 = t0; \
t0 = B0; \
t1 = D0; \
B0 = v128_unpackhi64(B1, v128_unpacklo64(B0, B0)); \
B1 = v128_unpackhi64(t0, v128_unpacklo64(B1, B1)); \
D0 = v128_unpackhi64(D0, v128_unpacklo64(D1, D1)); \
D1 = v128_unpackhi64(D1, v128_unpacklo64(t1, t1)); \
} while ((void)0, 0)
#endif
#define BLAKE2_ROUND(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
#define BLAKE2_ROUND( A0, A1, B0, B1, C0, C1, D0, D1 ) \
{ \
G1( A0, B0, C0, D0, A1, B1, C1, D1 ); \
G2( A0, B0, C0, D0, A1, B1, C1, D1 ); \
DIAGONALIZE( A0, B0, C0, D0, A1, B1, C1, D1 ); \
G1( A0, B0, C1, D0, A1, B1, C0, D1 ); \
G2( A0, B0, C1, D0, A1, B1, C0, D1 ); \
UNDIAGONALIZE( A0, B0, C0, D0, A1, B1, C1, D1 ); \
}
#else /* __AVX2__ */
#include <immintrin.h>
@@ -211,7 +189,6 @@ static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(0, 3, 2, 1)); \
C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(2, 1, 0, 3)); \
\
B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(0, 3, 2, 1)); \
C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(2, 1, 0, 3)); \
@@ -219,17 +196,14 @@ static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
#define DIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
__m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
__m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
B1 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
tmp1 = C0; \
B0 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
C0 = C1; \
tmp2 = _mm256_blend_epi32(D0, D1, 0x33); \
C1 = tmp1; \
__m256i tmp1 = _mm256_blend_epi32(B0, B1, 0x33); \
__m256i tmp2 = _mm256_blend_epi32(B0, B1, 0xCC); \
B0 = _mm256_shuffle_epi32( tmp1, 0x4e ); \
B1 = _mm256_shuffle_epi32( tmp2, 0x4e ); \
tmp1 = _mm256_blend_epi32(D0, D1, 0xCC); \
D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
tmp2 = _mm256_blend_epi32(D0, D1, 0x33); \
D0 = _mm256_shuffle_epi32( tmp1, 0x4e ); \
D1 = _mm256_shuffle_epi32( tmp2, 0x4e ); \
} while(0);
#define UNDIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
@@ -237,7 +211,6 @@ static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(2, 1, 0, 3)); \
C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(0, 3, 2, 1)); \
\
B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(2, 1, 0, 3)); \
C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(0, 3, 2, 1)); \
@@ -247,27 +220,21 @@ static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
do { \
__m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
__m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
B0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
tmp1 = C0; \
B1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
C0 = C1; \
B0 = _mm256_shuffle_epi32( tmp1, 0x4e ); \
B1 = _mm256_shuffle_epi32( tmp2, 0x4e ); \
tmp2 = _mm256_blend_epi32(D0, D1, 0xCC); \
C1 = tmp1; \
tmp1 = _mm256_blend_epi32(D0, D1, 0x33); \
D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
D1 = _mm256_shuffle_epi32( tmp2, 0x4e ); \
D0 = _mm256_shuffle_epi32( tmp1, 0x4e ); \
} while((void)0, 0);
#define BLAKE2_ROUND_1(A0, A1, B0, B1, C0, C1, D0, D1) \
do{ \
G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
\
DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
\
G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
\
UNDIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
} while((void)0, 0);
@@ -275,12 +242,9 @@ static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
do{ \
G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
\
DIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \
\
G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
\
G1_AVX2(A0, A1, B0, B1, C1, C0, D0, D1) \
G2_AVX2(A0, A1, B0, B1, C1, C0, D0, D1) \
UNDIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \
} while((void)0, 0);
@@ -290,12 +254,73 @@ static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
#include <immintrin.h>
/*
static inline __m512i muladd(__m512i x, __m512i y)
{
__m512i z = _mm512_mul_epu32(x, y);
return _mm512_add_epi64(_mm512_add_epi64(x, y), _mm512_add_epi64(z, z));
}
*/
#define G1( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
__m512i z0, z1; \
z0 = _mm512_mul_epu32( A0, B0 ); \
z1 = _mm512_mul_epu32( A1, B1 ); \
A0 = _mm512_add_epi64( A0, B0 ); \
A1 = _mm512_add_epi64( A1, B1 ); \
z0 = _mm512_add_epi64( z0, z0 ); \
z1 = _mm512_add_epi64( z1, z1 ); \
A0 = _mm512_add_epi64( A0, z0 ); \
A1 = _mm512_add_epi64( A1, z1 ); \
D0 = _mm512_xor_si512(D0, A0); \
D1 = _mm512_xor_si512(D1, A1); \
D0 = _mm512_ror_epi64(D0, 32); \
D1 = _mm512_ror_epi64(D1, 32); \
z0 = _mm512_mul_epu32( C0, D0 ); \
z1 = _mm512_mul_epu32( C1, D1 ); \
C0 = _mm512_add_epi64( C0, D0 ); \
C1 = _mm512_add_epi64( C1, D1 ); \
z0 = _mm512_add_epi64( z0, z0 ); \
z1 = _mm512_add_epi64( z1, z1 ); \
C0 = _mm512_add_epi64( C0, z0 ); \
C1 = _mm512_add_epi64( C1, z1 ); \
B0 = _mm512_xor_si512(B0, C0); \
B1 = _mm512_xor_si512(B1, C1); \
B0 = _mm512_ror_epi64(B0, 24); \
B1 = _mm512_ror_epi64(B1, 24); \
}
#define G2( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
__m512i z0, z1; \
z0 = _mm512_mul_epu32( A0, B0 ); \
z1 = _mm512_mul_epu32( A1, B1 ); \
A0 = _mm512_add_epi64( A0, B0 ); \
A1 = _mm512_add_epi64( A1, B1 ); \
z0 = _mm512_add_epi64( z0, z0 ); \
z1 = _mm512_add_epi64( z1, z1 ); \
A0 = _mm512_add_epi64( A0, z0 ); \
A1 = _mm512_add_epi64( A1, z1 ); \
D0 = _mm512_xor_si512(D0, A0); \
D1 = _mm512_xor_si512(D1, A1); \
D0 = _mm512_ror_epi64(D0, 16); \
D1 = _mm512_ror_epi64(D1, 16); \
z0 = _mm512_mul_epu32( C0, D0 ); \
z1 = _mm512_mul_epu32( C1, D1 ); \
C0 = _mm512_add_epi64( C0, D0 ); \
C1 = _mm512_add_epi64( C1, D1 ); \
z0 = _mm512_add_epi64( z0, z0 ); \
z1 = _mm512_add_epi64( z1, z1 ); \
C0 = _mm512_add_epi64( C0, z0 ); \
C1 = _mm512_add_epi64( C1, z1 ); \
B0 = _mm512_xor_si512(B0, C0); \
B1 = _mm512_xor_si512(B1, C1); \
B0 = _mm512_ror_epi64(B0, 63); \
B1 = _mm512_ror_epi64(B1, 63); \
}
/*
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = muladd(A0, B0); \
@@ -316,7 +341,8 @@ static inline __m512i muladd(__m512i x, __m512i y)
B0 = _mm512_ror_epi64(B0, 24); \
B1 = _mm512_ror_epi64(B1, 24); \
} while ((void)0, 0)
*/
/*
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = muladd(A0, B0); \
@@ -337,15 +363,14 @@ static inline __m512i muladd(__m512i x, __m512i y)
B0 = _mm512_ror_epi64(B0, 63); \
B1 = _mm512_ror_epi64(B1, 63); \
} while ((void)0, 0)
*/
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
B0 = _mm512_permutex_epi64(B0, _MM_SHUFFLE(0, 3, 2, 1)); \
B1 = _mm512_permutex_epi64(B1, _MM_SHUFFLE(0, 3, 2, 1)); \
\
C0 = _mm512_permutex_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
C1 = _mm512_permutex_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
\
D0 = _mm512_permutex_epi64(D0, _MM_SHUFFLE(2, 1, 0, 3)); \
D1 = _mm512_permutex_epi64(D1, _MM_SHUFFLE(2, 1, 0, 3)); \
} while ((void)0, 0)
@@ -354,10 +379,8 @@ static inline __m512i muladd(__m512i x, __m512i y)
do { \
B0 = _mm512_permutex_epi64(B0, _MM_SHUFFLE(2, 1, 0, 3)); \
B1 = _mm512_permutex_epi64(B1, _MM_SHUFFLE(2, 1, 0, 3)); \
\
C0 = _mm512_permutex_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \
C1 = _mm512_permutex_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \
\
D0 = _mm512_permutex_epi64(D0, _MM_SHUFFLE(0, 3, 2, 1)); \
D1 = _mm512_permutex_epi64(D1, _MM_SHUFFLE(0, 3, 2, 1)); \
} while ((void)0, 0)
@@ -366,15 +389,17 @@ static inline __m512i muladd(__m512i x, __m512i y)
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
static const __m512i swap_q0 = { 0,1, 8,9, 2,3, 10,11 };
static const __m512i swap_q1 = { 4,5, 12,13, 6,7, 14,15 };
static const __m512i uswap_q0 = { 0,1, 4,5, 8,9, 12,13 };
static const __m512i uswap_q1 = { 2,3, 6,7, 10,11, 14,15 };
#define SWAP_HALVES(A0, A1) \
do { \
__m512i t; \
@@ -383,19 +408,36 @@ static inline __m512i muladd(__m512i x, __m512i y)
A0 = t; \
} while((void)0, 0)
#define SWAP_QUARTERS(A0, A1) \
{ \
__m512i t = _mm512_permutex2var_epi64( A0, swap_q0, A1 ); \
A1 = _mm512_permutex2var_epi64( A0, swap_q1, A1 ); \
A0 = t; \
}
#define UNSWAP_QUARTERS(A0, A1) \
{ \
__m512i t = _mm512_permutex2var_epi64( A0, uswap_q0, A1 ); \
A1 = _mm512_permutex2var_epi64( A0, uswap_q1, A1 ); \
A0 = t; \
}
/*
#define SWAP_QUARTERS(A0, A1) \
do { \
SWAP_HALVES(A0, A1); \
A0 = _mm512_shuffle_i64x2( A0, A0, 0xd8 ); \
A1 = _mm512_shuffle_i64x2( A1, A1, 0xd8 ); \
} while((void)0, 0)
*/
/*
#define UNSWAP_QUARTERS(A0, A1) \
do { \
A0 = _mm512_shuffle_i64x2( A0, A0, 0xd8 ); \
A1 = _mm512_shuffle_i64x2( A1, A1, 0xd8 ); \
SWAP_HALVES(A0, A1); \
} while((void)0, 0)
*/
#define BLAKE2_ROUND_1(A0, C0, B0, D0, A1, C1, B1, D1) \
do { \

View File

@@ -683,8 +683,9 @@ void compress_big( const __m256i *M, const __m256i H[16], __m256i dH[16] )
mj[14] = mm256_rol_64( M[14], 15 );
mj[15] = mm256_rol_64( M[15], 16 );
__m256i K = _mm256_set1_epi64x( 16 * 0x0555555555555555ULL );
const __m256i Kincr = _mm256_set1_epi64x( 0x0555555555555555ULL );
__m256i K = _mm256_set1_epi64x( 0x5555555555555550ULL );
static const __m256i Kincr = { 0x0555555555555555ULL, 0x0555555555555555ULL,
0x0555555555555555ULL, 0x0555555555555555ULL };
qt[16] = add_elt_b( mj[ 0], mj[ 3], mj[10], H[ 7], K );
K = _mm256_add_epi64( K, Kincr );
@@ -1094,7 +1095,7 @@ void compress_big_8way( const __m512i *M, const __m512i H[16],
__m512i dH[16] )
{
__m512i qt[32], xl, xh;
__m512i mh[16];
__m512i mh[16], mj[16];
int i;
for ( i = 0; i < 16; i++ )
@@ -1117,8 +1118,6 @@ void compress_big_8way( const __m512i *M, const __m512i H[16],
qt[14] = _mm512_add_epi64( s8b4( W8b14), H[15] );
qt[15] = _mm512_add_epi64( s8b0( W8b15), H[ 0] );
__m512i mj[16];
mj[ 0] = mm512_rol_64( M[ 0], 1 );
mj[ 1] = mm512_rol_64( M[ 1], 2 );
mj[ 2] = mm512_rol_64( M[ 2], 3 );
@@ -1136,8 +1135,11 @@ void compress_big_8way( const __m512i *M, const __m512i H[16],
mj[14] = mm512_rol_64( M[14], 15 );
mj[15] = mm512_rol_64( M[15], 16 );
__m512i K = _mm512_set1_epi64( 16 * 0x0555555555555555ULL );
const __m512i Kincr = _mm512_set1_epi64( 0x0555555555555555ULL );
__m512i K = _mm512_set1_epi64( 0x5555555555555550ULL );
static const __m512i Kincr = { 0x0555555555555555ULL, 0x0555555555555555ULL,
0x0555555555555555ULL, 0x0555555555555555ULL,
0x0555555555555555ULL, 0x0555555555555555ULL,
0x0555555555555555ULL, 0x0555555555555555ULL };
qt[16] = add_elt_b8( mj[ 0], mj[ 3], mj[10], H[ 7], K );
K = _mm512_add_epi64( K, Kincr );

View File

@@ -503,32 +503,28 @@ do { \
SBOX8( s2, s6, sA, sE ); /* ( m1, c3, m5, c7 ) */ \
SBOX8( s3, s7, sB, sF ); /* ( c1, m3, c5, m7 ) */ \
s4 = mm512_swap64_32( s4 ); \
s5 = mm512_swap64_32( s5 ); \
t0 = _mm512_mask_shuffle_epi32( s4, 0xaaaa, s5, 0xb1 ); \
sD = mm512_swap64_32( sD ); \
sE = mm512_swap64_32( sE ); \
t0 = _mm512_mask_blend_epi32( 0xaaaa, s4, s5 ); \
t1 = _mm512_mask_blend_epi32( 0xaaaa, sD, sE ); \
t1 = _mm512_mask_shuffle_epi32( sD, 0xaaaa, sE, 0xb1 ); \
L8( s0, t0, s9, t1 ); \
s6 = mm512_swap64_32( s6 ); \
sF = mm512_swap64_32( sF ); \
t2 = _mm512_mask_blend_epi32( 0xaaaa, s5, s6 ); \
t3 = _mm512_mask_blend_epi32( 0xaaaa, sE, sF ); \
t2 = _mm512_mask_shuffle_epi32( s6, 0x5555, s5, 0xb1 ); \
t3 = _mm512_mask_shuffle_epi32( sF, 0x5555, sE, 0xb1 ); \
L8( s1, t2, sA, t3 ); \
s5 = _mm512_mask_blend_epi32( 0x5555, t0, t2 ); \
sE = _mm512_mask_blend_epi32( 0x5555, t1, t3 ); \
\
s7 = mm512_swap64_32( s7 ); \
sC = mm512_swap64_32( sC ); \
t4 = _mm512_mask_blend_epi32( 0xaaaa, s6, s7 ); \
t5 = _mm512_mask_blend_epi32( 0xaaaa, sF, sC ); \
t4 = _mm512_mask_shuffle_epi32( s6, 0xaaaa, s7, 0xb1 ); \
t5 = _mm512_mask_shuffle_epi32( sF, 0xaaaa, sC, 0xb1 ); \
L8( s2, t4, sB, t5 ); \
s6 = _mm512_mask_blend_epi32( 0x5555, t2, t4 ); \
sF = _mm512_mask_blend_epi32( 0x5555, t3, t5 ); \
s6 = mm512_swap64_32( s6 ); \
sF = mm512_swap64_32( sF ); \
\
t2 = _mm512_mask_blend_epi32( 0xaaaa, s7, s4 ); \
t3 = _mm512_mask_blend_epi32( 0xaaaa, sC, sD ); \
t2 = _mm512_mask_shuffle_epi32( s4, 0x5555, s7, 0xb1 ); \
t3 = _mm512_mask_shuffle_epi32( sD, 0x5555, sC, 0xb1 ); \
L8( s3, t2, s8, t3 ); \
s7 = _mm512_mask_blend_epi32( 0x5555, t4, t2 ); \
s4 = _mm512_mask_blend_epi32( 0xaaaa, t0, t2 ); \
@@ -537,21 +533,20 @@ do { \
s7 = mm512_swap64_32( s7 ); \
sC = mm512_swap64_32( sC ); \
\
t0 = _mm512_mask_blend_epi32( 0xaaaa, s0, mm512_swap64_32( s8 ) ); \
t0 = _mm512_mask_shuffle_epi32( s0, 0xaaaa, s8, 0xb1 ); \
t1 = _mm512_mask_blend_epi32( 0xaaaa, s1, s9 ); \
t2 = _mm512_mask_blend_epi32( 0xaaaa, mm512_swap64_32( s2 ), sA ); \
t2 = _mm512_mask_shuffle_epi32( sA, 0x5555, s2, 0xb1 ); \
t3 = _mm512_mask_blend_epi32( 0x5555, s3, sB ); \
t3 = mm512_swap64_32( t3 ); \
L8( t0, t1, t2, t3 ); \
t3 = mm512_swap64_32( t3 ); \
s0 = _mm512_mask_blend_epi32( 0x5555, s0, t0 ); \
s8 = _mm512_mask_blend_epi32( 0x5555, s8, mm512_swap64_32( t0 ) ); \
s8 = _mm512_mask_shuffle_epi32( s8, 0x5555, t0, 0xb1 ); \
s1 = _mm512_mask_blend_epi32( 0x5555, s1, t1 ); \
s9 = _mm512_mask_blend_epi32( 0xaaaa, s9, t1 ); \
s2 = _mm512_mask_blend_epi32( 0xaaaa, s2, mm512_swap64_32( t2 ) ); \
s2 = _mm512_mask_shuffle_epi32( s2, 0xaaaa, t2, 0xb1 ); \
sA = _mm512_mask_blend_epi32( 0xaaaa, sA, t2 ); \
s3 = _mm512_mask_blend_epi32( 0xaaaa, s3, t3 ); \
sB = _mm512_mask_blend_epi32( 0x5555, sB, t3 ); \
s3 = _mm512_mask_shuffle_epi32( s3, 0xaaaa, t3, 0xb1 ); \
sB = _mm512_mask_shuffle_epi32( sB, 0x5555, t3, 0xb1 ); \
\
t0 = _mm512_mask_blend_epi32( 0xaaaa, s4, sC ); \
t1 = _mm512_mask_blend_epi32( 0xaaaa, s5, sD ); \
@@ -1268,7 +1263,7 @@ do { \
} while (0)
#endif
// v3 no ternary logic, 15 instructions, 9 TL equivalent instructions
// v3, 15 instructions
#define SBOX( a, b, c, d ) \
{ \
__m256i tb, td; \
@@ -1286,7 +1281,7 @@ do { \
#endif
/*
/ v2, 16 instructions, 10 TL equivalent instructions
/ v2, 16 instructions
#define SBOX( a, b, c, d ) \
{ \
__m256i t = mm256_xorand( d, a, c ); \

View File

@@ -80,14 +80,14 @@ static const uint32_t CNS_INIT[128] __attribute((aligned(64))) = {
__m512i t = a0; \
a0 = mm512_xoror( a3, a0, a1 ); \
a2 = _mm512_xor_si512( a2, a3 ); \
a1 = _mm512_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 xnor (a3 & t) */ \
a1 = _mm512_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 nxor (a3 & t) */ \
a3 = mm512_xorand( a2, a3, t ); \
a2 = mm512_xorand( a1, a2, a0); \
a1 = _mm512_or_si512( a1, a3 ); \
a3 = _mm512_xor_si512( a3, a2 ); \
t = _mm512_xor_si512( t, a1 ); \
a2 = _mm512_and_si512( a2, a1 ); \
a1 = mm512_xnor( a1, a0 ); \
a1 = mm512_nxor( a1, a0 ); \
a0 = t; \
}
@@ -527,14 +527,14 @@ int luffa_4way_update_close( luffa_4way_context *state,
__m256i t = a0; \
a0 = mm256_xoror( a3, a0, a1 ); \
a2 = _mm256_xor_si256( a2, a3 ); \
a1 = _mm256_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 xnor (a3 & t) */ \
a1 = _mm256_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 nxor (a3 & t) */ \
a3 = mm256_xorand( a2, a3, t ); \
a2 = mm256_xorand( a1, a2, a0); \
a1 = _mm256_or_si256( a1, a3 ); \
a3 = _mm256_xor_si256( a3, a2 ); \
t = _mm256_xor_si256( t, a1 ); \
a2 = _mm256_and_si256( a2, a1 ); \
a1 = mm256_xnor( a1, a0 ); \
a1 = mm256_nxor( a1, a0 ); \
a0 = t; \
}

View File

@@ -69,18 +69,18 @@
v128_t t = a0; \
a0 = v128_xoror( a3, a0, a1 ); \
a2 = v128_xor( a2, a3 ); \
a1 = _mm_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 xnor (a3 & t) */ \
a1 = _mm_ternarylogic_epi64( a1, a3, t, 0x87 ); /* ~a1 ^ (a3 & t) */ \
a3 = v128_xorand( a2, a3, t ); \
a2 = v128_xorand( a1, a2, a0 ); \
a1 = v128_or( a1, a3 ); \
a3 = v128_xor( a3, a2 ); \
t = v128_xor( t, a1 ); \
a2 = v128_and( a2, a1 ); \
a1 = v128_xnor( a1, a0 ); \
a1 = v128_nxor( a1, a0 ); \
a0 = t; \
}
#else
#elif defined(__ARM_NEON) || defined(__SSE2__)
#define SUBCRUMB( a0, a1, a2, a3 ) \
{ \

View File

@@ -67,7 +67,7 @@ static const uint64_t K512[80] =
0x4CC5D4BECB3E42B6, 0x597F299CFC657E2A, 0x5FCB6FAB3AD6FAEC, 0x6C44198C4A475817
};
#if defined(__AVX2__) && defined(__SHA512__)
#if defined(__AVX__) && defined(__SHA512__)
// SHA-512 implemented using SHA512 CPU extension.

View File

@@ -5,7 +5,7 @@
#include "simd-utils.h"
#include "sph_sha2.h"
#if defined(__SHA512__) && defined(__AVX2__)
#if defined(__SHA512__) && defined(__AVX__)
// Experimental, untested
// Need to substitute for sph_sha512

View File

@@ -305,7 +305,7 @@ do { \
xb0 = mm512_rol_32( xb0, 1 ); \
xa0 = mm512_xor3( xm, xb1, \
mm512_xorandnot( v512_mult_x3( xa0 ), xb3, xb2 ) ); \
xb0 = mm512_xnor( xa0, xb0 ); \
xb0 = mm512_nxor( xa0, xb0 ); \
} while (0)
#define PERM_STEP_0_16 do { \
@@ -898,7 +898,7 @@ do { \
xb0 = mm256_rol_32( xb0, 1 ); \
xa0 = mm256_xor3( xm, xb1, \
mm256_xorandnot( v256_mult_x3( xa0 ), xb3, xb2 ) ); \
xb0 = mm256_xnor( xa0, xb0 ); \
xb0 = mm256_nxor( xa0, xb0 ); \
} while (0)
#define PERM_STEP_0_8 do { \

View File

@@ -171,6 +171,53 @@ static const m128_v16 FFT256_twiddle[] __attribute__((aligned(64))) =
{{ -30, 55, -58, -65, -95, -40, -98, 94 }},
};
#if defined(__AVX2__)
static const __m256i V256_00FF = { 0x00ff00ff00ff00ff, 0x00ff00ff00ff00ff,
0x00ff00ff00ff00ff, 0x00ff00ff00ff00ff };
#define V128_00FF _mm256_castsi256_si128( V256_00FF )
#elif defined(__SSE2__) || defined(__ARM_NEON )
static const v128u64_t V128_00FF = { 0x00ff00ff00ff00ff, 0x00ff00ff00ff00ff };
#endif
#if defined(SIMD512)
static const __m512i V512_0101 = { 0x0101010101010101, 0x0101010101010101,
0x0101010101010101, 0x0101010101010101,
0x0101010101010101, 0x0101010101010101,
0x0101010101010101, 0x0101010101010101 };
#define V256_0101 _mm512_castsi512_si256( V512_0101 )
#define V128_0101 _mm512_castsi512_si128( V512_0101 )
static const __m512i V512_0080 = { 0x0080008000800080, 0x0080008000800080,
0x0080008000800080, 0x0080008000800080,
0x0080008000800080, 0x0080008000800080,
0x0080008000800080, 0x0080008000800080 };
#define V256_0080 _mm512_castsi512_si256( V512_0080 )
#define V128_0080 _mm512_castsi512_si128( V512_0080 )
#elif defined(__AVX2__)
static const __m256i V256_0101 = { 0x0101010101010101, 0x0101010101010101,
0x0101010101010101, 0x0101010101010101 };
#define V128_0101 _mm256_castsi256_si128( V256_0101 )
static const __m256i V256_0080 = { 0x0080008000800080, 0x0080008000800080,
0x0080008000800080, 0x0080008000800080 };
#define V128_0080 _mm256_castsi256_si128( V256_0080 )
#elif defined(__SSE2__) || defined(__ARM_NEON )
static const v128u64_t V128_0101 = { 0x0101010101010101, 0x0101010101010101 };
static const v128u64_t V128_0080 = { 0x0080008000800080, 0x0080008000800080 };
#endif
#if defined(__x86_64__)
#define SHUFXOR_1(x) _mm_shuffle_epi32(x,0xb1)
@@ -190,13 +237,10 @@ static const m128_v16 FFT256_twiddle[] __attribute__((aligned(64))) =
#define shufxor(x,s) XCAT(SHUFXOR_,s)(x)
#define REDUCE(x) \
v128_sub16( v128_and( x, v128_64( \
0x00ff00ff00ff00ff ) ), v128_sra16( x, 8 ) )
v128_sub16( v128_and( x, V128_00FF ), v128_sra16( x, 8 ) )
#define EXTRA_REDUCE_S(x)\
v128_sub16( x, v128_and( \
v128_64( 0x0101010101010101 ), \
v128_cmpgt16( x, v128_64( 0x0080008000800080 ) ) ) )
v128_sub16( x, v128_and( V128_0101, v128_cmpgt16( x, V128_0080 ) ) )
#define REDUCE_FULL_S( x ) EXTRA_REDUCE_S( REDUCE (x ) )
@@ -293,10 +337,9 @@ do { \
// This will make the full FFT_64 in order.
#define INTERLEAVE(i,j) \
do { \
v128u16_t t1= X(i); \
v128u16_t t2= X(j); \
X(i) = v128_unpacklo16( t1, t2 ); \
X(j) = v128_unpackhi16( t1, t2 ); \
v128u16_t t = X(i); \
X(i) = v128_unpacklo16( t, X(j) ); \
X(j) = v128_unpackhi16( t, X(j) ); \
} while(0)
INTERLEAVE( 1, 0 );
@@ -803,23 +846,12 @@ static const m256_v16 FFT256_Twiddle[] =
#define shufxor2w(x,s) XCAT(SHUFXOR_,s)(x)
#if defined(VL256)
#define REDUCE(x) \
_mm256_sub_epi16( _mm256_maskz_mov_epi8( 0x55555555, x ), \
_mm256_srai_epi16( x, 8 ) )
#else
#define REDUCE(x) \
_mm256_sub_epi16( _mm256_and_si256( x, _mm256_set1_epi64x( \
0x00ff00ff00ff00ff ) ), _mm256_srai_epi16( x, 8 ) )
#endif
_mm256_sub_epi16( _mm256_and_si256( x, V256_00FF ), _mm256_srai_epi16( x, 8 ) )
#define EXTRA_REDUCE_S(x)\
_mm256_sub_epi16( x, _mm256_and_si256( \
_mm256_set1_epi64x( 0x0101010101010101 ), \
_mm256_cmpgt_epi16( x, _mm256_set1_epi64x( 0x0080008000800080 ) ) ) )
_mm256_sub_epi16( x, _mm256_and_si256( V256_0101, \
_mm256_cmpgt_epi16( x, V256_0080 ) ) )
#define REDUCE_FULL_S( x ) EXTRA_REDUCE_S( REDUCE (x ) )
@@ -917,10 +949,9 @@ do { \
// This will make the full FFT_64 in order.
#define INTERLEAVE(i,j) \
do { \
__m256i t1= X(i); \
__m256i t2= X(j); \
X(i) = _mm256_unpacklo_epi16( t1, t2 ); \
X(j) = _mm256_unpackhi_epi16( t1, t2 ); \
__m256i t = X(i); \
X(i) = _mm256_unpacklo_epi16( t, X(j) ); \
X(j) = _mm256_unpackhi_epi16( t, X(j) ); \
} while(0)
INTERLEAVE( 1, 0 );
@@ -1658,10 +1689,8 @@ static const m512_v16 FFT256_Twiddle4w[] =
_mm512_srai_epi16( x, 8 ) )
#define EXTRA_REDUCE_S4w(x) \
_mm512_sub_epi16( x, _mm512_and_si512( \
_mm512_set1_epi64( 0x0101010101010101 ), \
_mm512_movm_epi16( _mm512_cmpgt_epi16_mask( \
x, _mm512_set1_epi64( 0x0080008000800080 ) ) ) ) )
_mm512_sub_epi16( x, _mm512_and_si512( V512_0101, \
_mm512_movm_epi16( _mm512_cmpgt_epi16_mask( x, V512_0080 ) ) ) )
// generic, except it calls targetted macros
#define REDUCE_FULL_S4w( x ) EXTRA_REDUCE_S4w( REDUCE4w (x ) )

View File

@@ -640,24 +640,25 @@ void FFT( const unsigned char input[EIGHTH_N], swift_int32_t *output )
#if defined(__AVX2__)
__m256i F0, F1, F2, F3, F4, F5, F6, F7;
__m256i tbl = *(__m256i*)&( fftTable[ input[0] << 3 ] );
__m256i *table = (__m256i*)fftTable;
__m256i tbl = table[ input[0] ];
__m256i *mul = (__m256i*)multipliers;
__m256i *out = (__m256i*)output;
F0 = _mm256_mullo_epi32( mul[0], tbl );
tbl = *(__m256i*)&( fftTable[ input[1] << 3 ] );
tbl = table[ input[1] ];
F1 = _mm256_mullo_epi32( mul[1], tbl );
tbl = *(__m256i*)&( fftTable[ input[2] << 3 ] );
tbl = table[ input[2] ];
F2 = _mm256_mullo_epi32( mul[2], tbl );
tbl = *(__m256i*)&( fftTable[ input[3] << 3 ] );
tbl = table[ input[3] ];
F3 = _mm256_mullo_epi32( mul[3], tbl );
tbl = *(__m256i*)&( fftTable[ input[4] << 3 ] );
tbl = table[ input[4] ];
F4 = _mm256_mullo_epi32( mul[4], tbl );
tbl = *(__m256i*)&( fftTable[ input[5] << 3 ] );
tbl = table[ input[5] ];
F5 = _mm256_mullo_epi32( mul[5], tbl );
tbl = *(__m256i*)&( fftTable[ input[6] << 3 ] );
tbl = table[ input[6] ];
F6 = _mm256_mullo_epi32( mul[6], tbl );
tbl = *(__m256i*)&( fftTable[ input[7] << 3 ] );
tbl = table[ input[7] ];
F7 = _mm256_mullo_epi32( mul[7], tbl );
#define ADD_SUB( a, b ) \
@@ -677,9 +678,9 @@ void FFT( const unsigned char input[EIGHTH_N], swift_int32_t *output )
ADD_SUB( F1, F3 );
ADD_SUB( F4, F6 );
ADD_SUB( F5, F7 );
F5 = _mm256_slli_epi32( F5, 2 );
F6 = _mm256_slli_epi32( F6, 4 );
F7 = _mm256_slli_epi32( F7, 6 );
F5 = _mm256_slli_epi32( F5, 2 );
ADD_SUB( F0, F4 );
ADD_SUB( F1, F5 );
ADD_SUB( F2, F6 );