Compare commits

..

3 Commits

Author SHA1 Message Date
Jay D Dee
73430b13b1 v3.10.1 2019-12-05 19:09:23 -05:00
Jay D Dee
40039386a0 v3.10.0 2019-12-03 12:26:11 -05:00
Jay D Dee
91ec6f1771 v3.9.11 2019-11-26 09:22:03 -05:00
111 changed files with 15198 additions and 2537 deletions

View File

@@ -24,18 +24,10 @@ be installed manually. There may be others, read the error messages they
will give a clue as to the missing package.
The following command should install everything you need on Debian based
distributions such as Ubuntu:
distributions such as Ubuntu. Fedora and other distributions may have similar
but different package names.
sudo apt-get install build-essential libssl-dev libcurl4-openssl-dev libjansson-dev libgmp-dev automake zlib1g-dev
build-essential (Development Tools package group on Fedora)
automake
libjansson-dev
libgmp-dev
libcurl4-openssl-dev
libssl-dev
lib-thread
zlib1g-dev
sudo apt-get install build-essential libssl-dev libcurl4-openssl-dev libjansson-dev libgmp-dev zlib1g-dev
SHA support on AMD Ryzen CPUs requires gcc version 5 or higher and
openssl 1.1.0e or higher. Add one of the following, depending on the

View File

@@ -22,14 +22,13 @@ Step by step...
Refer to Linux compile instructions and install required packages.
Additionally, install mingw-64.
Additionally, install mingw-w64.
sudo apt-get install mingw-w64
2. Create a local library directory for packages to be compiled in the next
step. Recommended location is $HOME/usr/lib/
step. Suggested location is $HOME/usr/lib/
3. Download and build other packages for mingw that don't have a mingw64
version available in the repositories.

View File

@@ -117,6 +117,7 @@ cpuminer_SOURCES = \
algo/keccak/keccak-4way.c\
algo/keccak/keccak-gate.c \
algo/keccak/sse2/keccak.c \
algo/lanehash/lane.c \
algo/luffa/sph_luffa.c \
algo/luffa/luffa.c \
algo/luffa/luffa_for_sse2.c \
@@ -173,7 +174,6 @@ cpuminer_SOURCES = \
algo/sha/sph_sha2big.c \
algo/sha/sha256-hash-4way.c \
algo/sha/sha512-hash-4way.c \
algo/sha/sha256_hash_11way.c \
algo/sha/sha2.c \
algo/sha/sha256t-gate.c \
algo/sha/sha256t-4way.c \
@@ -197,9 +197,9 @@ cpuminer_SOURCES = \
algo/skein/skein-gate.c \
algo/skein/skein2.c \
algo/skein/skein2-4way.c \
algo/skein/skein2-gate.c \
algo/sm3/sm3.c \
algo/sm3/sm3-hash-4way.c \
algo/swifftx/swifftx.c \
algo/tiger/sph_tiger.c \
algo/whirlpool/sph_whirlpool.c \
algo/whirlpool/whirlpool-hash-4way.c \
@@ -279,6 +279,11 @@ cpuminer_SOURCES = \
algo/x17/sonoa-4way.c \
algo/x17/sonoa.c \
algo/x20/x20r.c \
algo/x22/x22i-4way.c \
algo/x22/x22i.c \
algo/x22/x22i-gate.c \
algo/x22/x25x.c \
algo/x22/x25x-4way.c \
algo/yescrypt/yescrypt.c \
algo/yescrypt/sha256_Y.c \
algo/yescrypt/yescrypt-best.c \

View File

@@ -129,6 +129,8 @@ Supported Algorithms
x16s Pigeoncoin (PGN)
x17
x21s
x22i
x25x
xevan Bitsend (BSD)
yescrypt Globalboost-Y (BSTY)
yescryptr8 BitZeny (ZNY)
@@ -142,6 +144,9 @@ Supported Algorithms
Errata
------
Old algorithms that are no longer used frequently will not have the latest
optimizations.
Cryptonight and variants are no longer supported, use another miner.
Neoscrypt crashes on Windows, use legacy version.

View File

@@ -15,8 +15,8 @@ the features listed at cpuminer startup to ensure you are mining at
optimum speed using the best available features.
Architecture names and compile options used are only provided for Intel
Core series. Even the newest Pentium and Celeron CPUs are often missing
features.
Core series. Budget CPUs like Pentium and Celeron are often missing the
latest features.
AMD CPUs older than Piledriver, including Athlon x2 and Phenom II x4, are not
supported by cpuminer-opt due to an incompatible implementation of SSE2 on
@@ -28,7 +28,8 @@ Exe name Compile flags Arch name
cpuminer-sse2.exe "-msse2" Core2, Nehalem
cpuminer-aes-sse42.exe "-march=westmere" Westmere
cpuminer-avx.exe "-march=corei7-avx" Sandy-Ivybridge
cpuminer-avx2.exe "-march=core-avx2" Haswell, Sky-Kaby-Coffeelake
cpuminer-avx2.exe "-march=core-avx2 -maes" Haswell, Sky-Kaby-Coffeelake
cpuminer-avx512.exe "-march=skylake-avx512" Skylake-X, Cascadelake-X
cpuminer-zen "-march=znver1" AMD Ryzen, Threadripper
If you like this software feel free to donate:

View File

@@ -31,6 +31,42 @@ FreeBSD YMMV.
Change Log
----------
v3.10.1
AVX512 for blake2b, nist5, quark, tribus.
More broken lane fixes.
Fixed buffer overflow in skein AVX512.
Only the highest ranking feature in a class is listed at startup, lower ranking
features are available but no longer listed.
v3.10.0
AVX512 is now supported on selected algos, Windows binary is now available.
AVX512 optimizations are available for argon2d, blake2s, keccak, keccakc,
skein & skein2.
Fixed CPU temperature for some CPU models (Linux only).
Fixed a bug that caused some lanes not to submit shares.
Fixed some previously undetected buffer overflows.
Lyra2rev2 3% faster SSE2 and AVX2.
Added "-fno-asynchronous-unwind-tables" to AVX512 build script for Windows
to fix known mingw issue.
Changed AVX2 build script to explicitly add AES to address change in
behaviour in GCC 9.
v3.9.11
Added x22i & x25x algos.
Blake2s 2% faster AVX2 with Intel CPU, slower with Ryzen v1, v2 ?
v3.9.10
Faster X* algos with AVX2.

View File

@@ -238,6 +238,8 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_X16S: register_x16s_algo ( gate ); break;
case ALGO_X17: register_x17_algo ( gate ); break;
case ALGO_X21S: register_x21s_algo ( gate ); break;
case ALGO_X22I: register_x22i_algo ( gate ); break;
case ALGO_X25X: register_x25x_algo ( gate ); break;
case ALGO_XEVAN: register_xevan_algo ( gate ); break;
/* case ALGO_YESCRYPT: register_yescrypt_05_algo ( gate ); break;
case ALGO_YESCRYPTR8: register_yescryptr8_05_algo ( gate ); break;

View File

@@ -21,7 +21,7 @@
#include "argon2.h"
#include "core.h"
#include "simd-utils.h"
#include "../blake2/blake2.h"
#include "../blake2/blamka-round-opt.h"
@@ -37,23 +37,27 @@
#if defined(__AVX512F__)
static void fill_block(__m512i *state, const block *ref_block,
block *next_block, int with_xor) {
static void fill_block( __m512i *state, const block *ref_block,
block *next_block, int with_xor )
{
__m512i block_XY[ARGON2_512BIT_WORDS_IN_BLOCK];
unsigned int i;
if (with_xor) {
for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
state[i] = _mm512_xor_si512(
state[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
block_XY[i] = _mm512_xor_si512(
state[i], _mm512_loadu_si512((const __m512i *)next_block->v + i));
if ( with_xor )
{
for ( i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++ )
{
state[i] = _mm512_xor_si512( state[i],
_mm512_load_si512( (const __m512i*)ref_block->v + i ) );
block_XY[i] = _mm512_xor_si512( state[i],
_mm512_load_si512( (const __m512i*)next_block->v + i ) );
}
} else {
for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
block_XY[i] = state[i] = _mm512_xor_si512(
state[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
}
else
{
for ( i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++ )
block_XY[i] = state[i] = _mm512_xor_si512( state[i],
_mm512_load_si512( (const __m512i*)ref_block->v + i ) );
}
BLAKE2_ROUND_1( state[ 0], state[ 1], state[ 2], state[ 3],
@@ -66,23 +70,10 @@ static void fill_block(__m512i *state, const block *ref_block,
BLAKE2_ROUND_2( state[ 1], state[ 3], state[ 5], state[ 7],
state[ 9], state[11], state[13], state[15] );
/*
for (i = 0; i < 2; ++i) {
BLAKE2_ROUND_1(
state[8 * i + 0], state[8 * i + 1], state[8 * i + 2], state[8 * i + 3],
state[8 * i + 4], state[8 * i + 5], state[8 * i + 6], state[8 * i + 7]);
}
for (i = 0; i < 2; ++i) {
BLAKE2_ROUND_2(
state[2 * 0 + i], state[2 * 1 + i], state[2 * 2 + i], state[2 * 3 + i],
state[2 * 4 + i], state[2 * 5 + i], state[2 * 6 + i], state[2 * 7 + i]);
}
*/
for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
state[i] = _mm512_xor_si512(state[i], block_XY[i]);
_mm512_storeu_si512((__m512i *)next_block->v + i, state[i]);
for ( i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++ )
{
state[i] = _mm512_xor_si512( state[i], block_XY[i] );
_mm512_store_si512( (__m512i*)next_block->v + i, state[i] );
}
}
@@ -125,18 +116,6 @@ static void fill_block(__m256i *state, const block *ref_block,
BLAKE2_ROUND_2( state[ 3], state[ 7], state[11], state[15],
state[19], state[23], state[27], state[31] );
/*
for (i = 0; i < 4; ++i) {
BLAKE2_ROUND_1(state[8 * i + 0], state[8 * i + 4], state[8 * i + 1], state[8 * i + 5],
state[8 * i + 2], state[8 * i + 6], state[8 * i + 3], state[8 * i + 7]);
}
for (i = 0; i < 4; ++i) {
BLAKE2_ROUND_2(state[ 0 + i], state[ 4 + i], state[ 8 + i], state[12 + i],
state[16 + i], state[20 + i], state[24 + i], state[28 + i]);
}
*/
for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
state[i] = _mm256_xor_si256(state[i], block_XY[i]);
_mm256_store_si256((__m256i *)next_block->v + i, state[i]);
@@ -153,14 +132,14 @@ static void fill_block(__m128i *state, const block *ref_block,
if (with_xor) {
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
state[i] = _mm_xor_si128(
state[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
state[i], _mm_load_si128((const __m128i *)ref_block->v + i));
block_XY[i] = _mm_xor_si128(
state[i], _mm_loadu_si128((const __m128i *)next_block->v + i));
state[i], _mm_load_si128((const __m128i *)next_block->v + i));
}
} else {
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
block_XY[i] = state[i] = _mm_xor_si128(
state[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
state[i], _mm_load_si128((const __m128i *)ref_block->v + i));
}
}
@@ -198,22 +177,9 @@ static void fill_block(__m128i *state, const block *ref_block,
BLAKE2_ROUND( state[ 7], state[15], state[23], state[31],
state[39], state[47], state[55], state[63] );
/*
for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(state[8 * i + 0], state[8 * i + 1], state[8 * i + 2],
state[8 * i + 3], state[8 * i + 4], state[8 * i + 5],
state[8 * i + 6], state[8 * i + 7]);
}
for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(state[8 * 0 + i], state[8 * 1 + i], state[8 * 2 + i],
state[8 * 3 + i], state[8 * 4 + i], state[8 * 5 + i],
state[8 * 6 + i], state[8 * 7 + i]);
}
*/
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
state[i] = _mm_xor_si128(state[i], block_XY[i]);
_mm_storeu_si128((__m128i *)next_block->v + i, state[i]);
_mm_store_si128((__m128i *)next_block->v + i, state[i]);
}
}

View File

@@ -427,14 +427,14 @@ static __m512i muladd(__m512i x, __m512i y)
#define SWAP_QUARTERS(A0, A1) \
do { \
SWAP_HALVES(A0, A1); \
A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
A0 = _mm512_shuffle_i64x2( A0, A0, 0xd8 ); \
A1 = _mm512_shuffle_i64x2( A1, A1, 0xd8 ); \
} while((void)0, 0)
#define UNSWAP_QUARTERS(A0, A1) \
do { \
A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
A0 = _mm512_shuffle_i64x2( A0, A0, 0xd8 ); \
A1 = _mm512_shuffle_i64x2( A1, A1, 0xd8 ); \
SWAP_HALVES(A0, A1); \
} while((void)0, 0)

View File

@@ -59,7 +59,6 @@ extern "C"{
typedef struct {
unsigned char buf[64<<2];
uint32_t H[8<<2];
uint32_t S[4<<2];
// __m128i buf[16] __attribute__ ((aligned (64)));
// __m128i H[8];
// __m128i S[4];
@@ -93,7 +92,6 @@ void blake256r8_4way_close(void *cc, void *dst);
typedef struct {
__m256i buf[16] __attribute__ ((aligned (64)));
__m256i H[8];
__m256i S[4];
size_t ptr;
sph_u32 T0, T1;
int rounds; // 14 for blake, 8 for blakecoin & vanilla
@@ -120,20 +118,42 @@ void blake256r8_8way_close(void *cc, void *dst);
// Blake-512 4 way
typedef struct {
__m256i buf[16] __attribute__ ((aligned (64)));
__m256i buf[16];
__m256i H[8];
__m256i S[4];
size_t ptr;
sph_u64 T0, T1;
} blake_4way_big_context;
} blake_4way_big_context __attribute__ ((aligned (128)));
typedef blake_4way_big_context blake512_4way_context;
void blake512_4way_init(void *cc);
void blake512_4way(void *cc, const void *data, size_t len);
void blake512_4way_close(void *cc, void *dst);
void blake512_4way_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
void blake512_4way_init( void *cc );
void blake512_4way_update( void *cc, const void *data, size_t len );
#define blake512_4way blake512_4way_update
void blake512_4way_close( void *cc, void *dst );
void blake512_4way_addbits_and_close( void *cc, unsigned ub, unsigned n,
void *dst );
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
__m512i buf[16];
__m512i H[8];
__m512i S[4];
size_t ptr;
sph_u64 T0, T1;
} blake_8way_big_context __attribute__ ((aligned (128)));
typedef blake_8way_big_context blake512_8way_context;
void blake512_8way_init( void *cc );
void blake512_8way_update( void *cc, const void *data, size_t len );
void blake512_8way_close( void *cc, void *dst );
void blake512_8way_addbits_and_close( void *cc, unsigned ub, unsigned n,
void *dst );
#endif // AVX512
#endif // AVX2

View File

@@ -304,16 +304,17 @@ static const sph_u32 CS[16] = {
#endif
// Blake-256 4 way
#define GS_4WAY( m0, m1, c0, c1, a, b, c, d ) \
do { \
a = _mm_add_epi32( _mm_add_epi32( _mm_xor_si128( \
_mm_set1_epi32( c1 ), m0 ), b ), a ); \
a = _mm_add_epi32( _mm_add_epi32( a, b ), \
_mm_xor_si128( _mm_set1_epi32( c1 ), m0 ) ); \
d = mm128_ror_32( _mm_xor_si128( d, a ), 16 ); \
c = _mm_add_epi32( c, d ); \
b = mm128_ror_32( _mm_xor_si128( b, c ), 12 ); \
a = _mm_add_epi32( _mm_add_epi32( _mm_xor_si128( \
_mm_set1_epi32( c0 ), m1 ), b ), a ); \
a = _mm_add_epi32( _mm_add_epi32( a, b ), \
_mm_xor_si128( _mm_set1_epi32( c0 ), m1 ) ); \
d = mm128_ror_32( _mm_xor_si128( d, a ), 8 ); \
c = _mm_add_epi32( c, d ); \
b = mm128_ror_32( _mm_xor_si128( b, c ), 7 ); \
@@ -321,7 +322,8 @@ do { \
#if SPH_COMPACT_BLAKE_32
// Blake-256 4 way
// Not used
#if 0
#define ROUND_S_4WAY(r) do { \
GS_4WAY(M[sigma[r][0x0]], M[sigma[r][0x1]], \
@@ -342,6 +344,8 @@ do { \
CS[sigma[r][0xE]], CS[sigma[r][0xF]], V3, V4, V9, VE); \
} while (0)
#endif
#else
#define ROUND_S_4WAY(r) do { \
@@ -359,7 +363,6 @@ do { \
#define DECL_STATE32_4WAY \
__m128i H0, H1, H2, H3, H4, H5, H6, H7; \
__m128i S0, S1, S2, S3; \
uint32_t T0, T1;
#define READ_STATE32_4WAY(state) do { \
@@ -371,10 +374,6 @@ do { \
H5 = casti_m128i( state->H, 5 ); \
H6 = casti_m128i( state->H, 6 ); \
H7 = casti_m128i( state->H, 7 ); \
S0 = casti_m128i( state->S, 0 ); \
S1 = casti_m128i( state->S, 1 ); \
S2 = casti_m128i( state->S, 2 ); \
S3 = casti_m128i( state->S, 3 ); \
T0 = (state)->T0; \
T1 = (state)->T1; \
} while (0)
@@ -388,17 +387,13 @@ do { \
casti_m128i( state->H, 5 ) = H5; \
casti_m128i( state->H, 6 ) = H6; \
casti_m128i( state->H, 7 ) = H7; \
casti_m128i( state->S, 0 ) = S0; \
casti_m128i( state->S, 1 ) = S1; \
casti_m128i( state->S, 2 ) = S2; \
casti_m128i( state->S, 3 ) = S3; \
(state)->T0 = T0; \
(state)->T1 = T1; \
} while (0)
#if SPH_COMPACT_BLAKE_32
// not used
#if 0
#define COMPRESS32_4WAY( rounds ) do { \
__m128i M[16]; \
__m128i V0, V1, V2, V3, V4, V5, V6, V7; \
@@ -441,6 +436,7 @@ do { \
H7 = _mm_xor_si128( _mm_xor_si128( \
_mm_xor_si128( S3, V7 ), VF ), H7 ); \
} while (0)
#endif
#else
@@ -508,10 +504,10 @@ do { \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm_xor_si128( S0, m128_const1_64( 0x243F6A88243F6A88 ) ); \
V9 = _mm_xor_si128( S1, m128_const1_64( 0x85A308D385A308D3 ) ); \
VA = _mm_xor_si128( S2, m128_const1_64( 0x13198A2E13198A2E ) ); \
VB = _mm_xor_si128( S3, m128_const1_64( 0x0370734403707344 ) ); \
V8 = m128_const1_64( 0x243F6A88243F6A88 ); \
V9 = m128_const1_64( 0x85A308D385A308D3 ); \
VA = m128_const1_64( 0x13198A2E13198A2E ); \
VB = m128_const1_64( 0x0370734403707344 ); \
VC = _mm_xor_si128( _mm_set1_epi32( T0 ), \
m128_const1_64( 0xA4093822A4093822 ) ); \
VD = _mm_xor_si128( _mm_set1_epi32( T0 ), \
@@ -538,14 +534,14 @@ do { \
ROUND_S_4WAY(2); \
ROUND_S_4WAY(3); \
} \
H0 = mm128_xor4( V8, V0, S0, H0 ); \
H1 = mm128_xor4( V9, V1, S1, H1 ); \
H2 = mm128_xor4( VA, V2, S2, H2 ); \
H3 = mm128_xor4( VB, V3, S3, H3 ); \
H4 = mm128_xor4( VC, V4, S0, H4 ); \
H5 = mm128_xor4( VD, V5, S1, H5 ); \
H6 = mm128_xor4( VE, V6, S2, H6 ); \
H7 = mm128_xor4( VF, V7, S3, H7 ); \
H0 = _mm_xor_si128( _mm_xor_si128( V8, V0 ), H0 ); \
H1 = _mm_xor_si128( _mm_xor_si128( V9, V1 ), H1 ); \
H2 = _mm_xor_si128( _mm_xor_si128( VA, V2 ), H2 ); \
H3 = _mm_xor_si128( _mm_xor_si128( VB, V3 ), H3 ); \
H4 = _mm_xor_si128( _mm_xor_si128( VC, V4 ), H4 ); \
H5 = _mm_xor_si128( _mm_xor_si128( VD, V5 ), H5 ); \
H6 = _mm_xor_si128( _mm_xor_si128( VE, V6 ), H6 ); \
H7 = _mm_xor_si128( _mm_xor_si128( VF, V7 ), H7 ); \
} while (0)
#endif
@@ -556,13 +552,13 @@ do { \
#define GS_8WAY( m0, m1, c0, c1, a, b, c, d ) \
do { \
a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \
_mm256_set1_epi32( c1 ), m0 ), b ), a ); \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), \
_mm256_xor_si256( _mm256_set1_epi32( c1 ), m0 ) ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \
a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \
_mm256_set1_epi32( c0 ), m1 ), b ), a ); \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), \
_mm256_xor_si256( _mm256_set1_epi32( c0 ), m1 ) ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \
@@ -581,7 +577,6 @@ do { \
#define DECL_STATE32_8WAY \
__m256i H0, H1, H2, H3, H4, H5, H6, H7; \
__m256i S0, S1, S2, S3; \
sph_u32 T0, T1;
#define READ_STATE32_8WAY(state) \
@@ -594,10 +589,6 @@ do { \
H5 = (state)->H[5]; \
H6 = (state)->H[6]; \
H7 = (state)->H[7]; \
S0 = (state)->S[0]; \
S1 = (state)->S[1]; \
S2 = (state)->S[2]; \
S3 = (state)->S[3]; \
T0 = (state)->T0; \
T1 = (state)->T1; \
} while (0)
@@ -612,10 +603,6 @@ do { \
(state)->H[5] = H5; \
(state)->H[6] = H6; \
(state)->H[7] = H7; \
(state)->S[0] = S0; \
(state)->S[1] = S1; \
(state)->S[2] = S2; \
(state)->S[3] = S3; \
(state)->T0 = T0; \
(state)->T1 = T1; \
} while (0)
@@ -635,10 +622,10 @@ do { \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm256_xor_si256( S0, m256_const1_64( 0x243F6A88243F6A88 ) ); \
V9 = _mm256_xor_si256( S1, m256_const1_64( 0x85A308D385A308D3 ) ); \
VA = _mm256_xor_si256( S2, m256_const1_64( 0x13198A2E13198A2E ) ); \
VB = _mm256_xor_si256( S3, m256_const1_64( 0x0370734403707344 ) ); \
V8 = m256_const1_64( 0x243F6A88243F6A88 ); \
V9 = m256_const1_64( 0x85A308D385A308D3 ); \
VA = m256_const1_64( 0x13198A2E13198A2E ); \
VB = m256_const1_64( 0x0370734403707344 ); \
VC = _mm256_xor_si256( _mm256_set1_epi32( T0 ),\
m256_const1_64( 0xA4093822A4093822 ) ); \
VD = _mm256_xor_si256( _mm256_set1_epi32( T0 ),\
@@ -682,14 +669,14 @@ do { \
ROUND_S_8WAY(2); \
ROUND_S_8WAY(3); \
} \
H0 = mm256_xor4( V8, V0, S0, H0 ); \
H1 = mm256_xor4( V9, V1, S1, H1 ); \
H2 = mm256_xor4( VA, V2, S2, H2 ); \
H3 = mm256_xor4( VB, V3, S3, H3 ); \
H4 = mm256_xor4( VC, V4, S0, H4 ); \
H5 = mm256_xor4( VD, V5, S1, H5 ); \
H6 = mm256_xor4( VE, V6, S2, H6 ); \
H7 = mm256_xor4( VF, V7, S3, H7 ); \
H0 = _mm256_xor_si256( _mm256_xor_si256( V8, V0 ), H0 ); \
H1 = _mm256_xor_si256( _mm256_xor_si256( V9, V1 ), H1 ); \
H2 = _mm256_xor_si256( _mm256_xor_si256( VA, V2 ), H2 ); \
H3 = _mm256_xor_si256( _mm256_xor_si256( VB, V3 ), H3 ); \
H4 = _mm256_xor_si256( _mm256_xor_si256( VC, V4 ), H4 ); \
H5 = _mm256_xor_si256( _mm256_xor_si256( VD, V5 ), H5 ); \
H6 = _mm256_xor_si256( _mm256_xor_si256( VE, V6 ), H6 ); \
H7 = _mm256_xor_si256( _mm256_xor_si256( VF, V7 ), H7 ); \
} while (0)
@@ -703,7 +690,6 @@ static void
blake32_4way_init( blake_4way_small_context *ctx, const uint32_t *iv,
const uint32_t *salt, int rounds )
{
__m128i zero = m128_zero;
casti_m128i( ctx->H, 0 ) = m128_const1_64( 0x6A09E6676A09E667 );
casti_m128i( ctx->H, 1 ) = m128_const1_64( 0xBB67AE85BB67AE85 );
casti_m128i( ctx->H, 2 ) = m128_const1_64( 0x3C6EF3723C6EF372 );
@@ -712,11 +698,6 @@ blake32_4way_init( blake_4way_small_context *ctx, const uint32_t *iv,
casti_m128i( ctx->H, 5 ) = m128_const1_64( 0x9B05688C9B05688C );
casti_m128i( ctx->H, 6 ) = m128_const1_64( 0x1F83D9AB1F83D9AB );
casti_m128i( ctx->H, 7 ) = m128_const1_64( 0x5BE0CD195BE0CD19 );
casti_m128i( ctx->S, 0 ) = zero;
casti_m128i( ctx->S, 1 ) = zero;
casti_m128i( ctx->S, 2 ) = zero;
casti_m128i( ctx->S, 3 ) = zero;
ctx->T0 = ctx->T1 = 0;
ctx->ptr = 0;
ctx->rounds = rounds;
@@ -824,7 +805,6 @@ static void
blake32_8way_init( blake_8way_small_context *sc, const sph_u32 *iv,
const sph_u32 *salt, int rounds )
{
__m256i zero = m256_zero;
casti_m256i( sc->H, 0 ) = m256_const1_64( 0x6A09E6676A09E667 );
casti_m256i( sc->H, 1 ) = m256_const1_64( 0xBB67AE85BB67AE85 );
casti_m256i( sc->H, 2 ) = m256_const1_64( 0x3C6EF3723C6EF372 );
@@ -833,10 +813,6 @@ blake32_8way_init( blake_8way_small_context *sc, const sph_u32 *iv,
casti_m256i( sc->H, 5 ) = m256_const1_64( 0x9B05688C9B05688C );
casti_m256i( sc->H, 6 ) = m256_const1_64( 0x1F83D9AB1F83D9AB );
casti_m256i( sc->H, 7 ) = m256_const1_64( 0x5BE0CD195BE0CD19 );
casti_m256i( sc->S, 0 ) = zero;
casti_m256i( sc->S, 1 ) = zero;
casti_m256i( sc->S, 2 ) = zero;
casti_m256i( sc->S, 3 ) = zero;
sc->T0 = sc->T1 = 0;
sc->ptr = 0;
sc->rounds = rounds;

View File

@@ -4,13 +4,59 @@
*/
#include "blake2b-gate.h"
#if defined(BLAKE2B_4WAY)
#include <string.h>
#include <stdint.h>
#include "blake2b-hash-4way.h"
#if defined(BLAKE2B_8WAY)
int scanhash_blake2b_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));;
uint32_t vdata[20*8] __attribute__ ((aligned (64)));;
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
blake2b_8way_ctx ctx __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]); // 3*16+1
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do {
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
blake2b_8way_init( &ctx );
blake2b_8way_update( &ctx, vdata, 80 );
blake2b_8way_final( &ctx, hash );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane<<1 ] < Htarg )
{
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(BLAKE2B_4WAY)
// Function not used, code inlined.
void blake2b_4way_hash(void *output, const void *input)
{

View File

@@ -1,15 +1,19 @@
#include "blake2b-gate.h"
bool register_blake2b_algo( algo_gate_t* gate )
{
#if defined(BLAKE2B_4WAY)
#if defined(BLAKE2B_8WAY)
gate->scanhash = (void*)&scanhash_blake2b_8way;
// gate->hash = (void*)&blake2b_8way_hash;
#elif defined(BLAKE2B_4WAY)
gate->scanhash = (void*)&scanhash_blake2b_4way;
gate->hash = (void*)&blake2b_4way_hash;
#else
gate->scanhash = (void*)&scanhash_blake2b;
gate->hash = (void*)&blake2b_hash;
#endif
gate->optimizations = AVX2_OPT;
gate->optimizations = AVX2_OPT | AVX512_OPT;
return true;
};

View File

@@ -4,13 +4,21 @@
#include <stdint.h>
#include "algo-gate-api.h"
#if defined(__AVX2__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define BLAKE2B_8WAY
#elif defined(__AVX2__)
#define BLAKE2B_4WAY
#endif
bool register_blake2b_algo( algo_gate_t* gate );
#if defined(BLAKE2B_4WAY)
#if defined(BLAKE2B_8WAY)
//void blake2b_8way_hash( void *state, const void *input );
int scanhash_blake2b_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(BLAKE2B_4WAY)
void blake2b_4way_hash( void *state, const void *input );
int scanhash_blake2b_4way( struct work *work, uint32_t max_nonce,

View File

@@ -33,6 +33,178 @@
#include "blake2b-hash-4way.h"
static const uint8_t sigma[12][16] =
{
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define B2B8W_G(a, b, c, d, x, y) \
{ \
v[a] = _mm512_add_epi64( _mm512_add_epi64( v[a], v[b] ), x ); \
v[d] = mm512_ror_64( _mm512_xor_si512( v[d], v[a] ), 32 ); \
v[c] = _mm512_add_epi64( v[c], v[d] ); \
v[b] = mm512_ror_64( _mm512_xor_si512( v[b], v[c] ), 24 ); \
v[a] = _mm512_add_epi64( _mm512_add_epi64( v[a], v[b] ), y ); \
v[d] = mm512_ror_64( _mm512_xor_si512( v[d], v[a] ), 16 ); \
v[c] = _mm512_add_epi64( v[c], v[d] ); \
v[b] = mm512_ror_64( _mm512_xor_si512( v[b], v[c] ), 63 ); \
}
static void blake2b_8way_compress( blake2b_8way_ctx *ctx, int last )
{
__m512i v[16], m[16];
v[ 0] = ctx->h[0];
v[ 1] = ctx->h[1];
v[ 2] = ctx->h[2];
v[ 3] = ctx->h[3];
v[ 4] = ctx->h[4];
v[ 5] = ctx->h[5];
v[ 6] = ctx->h[6];
v[ 7] = ctx->h[7];
v[ 8] = m512_const1_64( 0x6A09E667F3BCC908 );
v[ 9] = m512_const1_64( 0xBB67AE8584CAA73B );
v[10] = m512_const1_64( 0x3C6EF372FE94F82B );
v[11] = m512_const1_64( 0xA54FF53A5F1D36F1 );
v[12] = m512_const1_64( 0x510E527FADE682D1 );
v[13] = m512_const1_64( 0x9B05688C2B3E6C1F );
v[14] = m512_const1_64( 0x1F83D9ABFB41BD6B );
v[15] = m512_const1_64( 0x5BE0CD19137E2179 );
v[12] = _mm512_xor_si512( v[12], _mm512_set1_epi64( ctx->t[0] ) );
v[13] = _mm512_xor_si512( v[13], _mm512_set1_epi64( ctx->t[1] ) );
if ( last )
v[14] = mm512_not( v[14] );
m[ 0] = ctx->b[ 0];
m[ 1] = ctx->b[ 1];
m[ 2] = ctx->b[ 2];
m[ 3] = ctx->b[ 3];
m[ 4] = ctx->b[ 4];
m[ 5] = ctx->b[ 5];
m[ 6] = ctx->b[ 6];
m[ 7] = ctx->b[ 7];
m[ 8] = ctx->b[ 8];
m[ 9] = ctx->b[ 9];
m[10] = ctx->b[10];
m[11] = ctx->b[11];
m[12] = ctx->b[12];
m[13] = ctx->b[13];
m[14] = ctx->b[14];
m[15] = ctx->b[15];
for ( int i = 0; i < 12; i++ )
{
B2B8W_G( 0, 4, 8, 12, m[ sigma[i][ 0] ], m[ sigma[i][ 1] ] );
B2B8W_G( 1, 5, 9, 13, m[ sigma[i][ 2] ], m[ sigma[i][ 3] ] );
B2B8W_G( 2, 6, 10, 14, m[ sigma[i][ 4] ], m[ sigma[i][ 5] ] );
B2B8W_G( 3, 7, 11, 15, m[ sigma[i][ 6] ], m[ sigma[i][ 7] ] );
B2B8W_G( 0, 5, 10, 15, m[ sigma[i][ 8] ], m[ sigma[i][ 9] ] );
B2B8W_G( 1, 6, 11, 12, m[ sigma[i][10] ], m[ sigma[i][11] ] );
B2B8W_G( 2, 7, 8, 13, m[ sigma[i][12] ], m[ sigma[i][13] ] );
B2B8W_G( 3, 4, 9, 14, m[ sigma[i][14] ], m[ sigma[i][15] ] );
}
ctx->h[0] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[0], v[0] ), v[ 8] );
ctx->h[1] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[1], v[1] ), v[ 9] );
ctx->h[2] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[2], v[2] ), v[10] );
ctx->h[3] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[3], v[3] ), v[11] );
ctx->h[4] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[4], v[4] ), v[12] );
ctx->h[5] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[5], v[5] ), v[13] );
ctx->h[6] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[6], v[6] ), v[14] );
ctx->h[7] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[7], v[7] ), v[15] );
}
int blake2b_8way_init( blake2b_8way_ctx *ctx )
{
size_t i;
ctx->h[0] = m512_const1_64( 0x6A09E667F3BCC908 );
ctx->h[1] = m512_const1_64( 0xBB67AE8584CAA73B );
ctx->h[2] = m512_const1_64( 0x3C6EF372FE94F82B );
ctx->h[3] = m512_const1_64( 0xA54FF53A5F1D36F1 );
ctx->h[4] = m512_const1_64( 0x510E527FADE682D1 );
ctx->h[5] = m512_const1_64( 0x9B05688C2B3E6C1F );
ctx->h[6] = m512_const1_64( 0x1F83D9ABFB41BD6B );
ctx->h[7] = m512_const1_64( 0x5BE0CD19137E2179 );
ctx->h[0] = _mm512_xor_si512( ctx->h[0], m512_const1_64( 0x01010020 ) );
ctx->t[0] = 0;
ctx->t[1] = 0;
ctx->c = 0;
ctx->outlen = 32;
for ( i = 0; i < 16; i++ )
ctx->b[i] = m512_zero;
return 0;
}
void blake2b_8way_update( blake2b_8way_ctx *ctx, const void *input,
size_t inlen )
{
__m512i* in =(__m512i*)input;
size_t i, c;
c = ctx->c >> 3;
for ( i = 0; i < (inlen >> 3); i++ )
{
if ( ctx->c == 128 )
{
ctx->t[0] += ctx->c;
if ( ctx->t[0] < ctx->c )
ctx->t[1]++;
blake2b_8way_compress( ctx, 0 );
ctx->c = 0;
}
ctx->b[ c++ ] = in[i];
ctx->c += 8;
}
}
void blake2b_8way_final( blake2b_8way_ctx *ctx, void *out )
{
size_t c;
c = ctx->c >> 3;
ctx->t[0] += ctx->c;
if ( ctx->t[0] < ctx->c )
ctx->t[1]++;
while ( ctx->c < 128 )
{
ctx->b[c++] = m512_zero;
ctx->c += 8;
}
blake2b_8way_compress( ctx, 1 ); // final block flag = 1
casti_m512i( out, 0 ) = ctx->h[0];
casti_m512i( out, 1 ) = ctx->h[1];
casti_m512i( out, 2 ) = ctx->h[2];
casti_m512i( out, 3 ) = ctx->h[3];
}
#endif
#if defined(__AVX2__)
// G Mixing function.
@@ -61,21 +233,6 @@ static const uint64_t blake2b_iv[8] = {
static void blake2b_4way_compress( blake2b_4way_ctx *ctx, int last )
{
const uint8_t sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
int i;
__m256i v[16], m[16];
v[ 0] = ctx->h[0];
@@ -118,7 +275,7 @@ static void blake2b_4way_compress( blake2b_4way_ctx *ctx, int last )
m[14] = ctx->b[14];
m[15] = ctx->b[15];
for ( i = 0; i < 12; i++ )
for ( int i = 0; i < 12; i++ )
{
B2B_G( 0, 4, 8, 12, m[ sigma[i][ 0] ], m[ sigma[i][ 1] ] );
B2B_G( 1, 5, 9, 13, m[ sigma[i][ 2] ], m[ sigma[i][ 3] ] );

View File

@@ -2,8 +2,6 @@
#ifndef __BLAKE2B_HASH_4WAY_H__
#define __BLAKE2B_HASH_4WAY_H__
#if defined(__AVX2__)
#include "simd-utils.h"
#include <stddef.h>
#include <stdint.h>
@@ -16,14 +14,34 @@
#define ALIGN(x) __attribute__((aligned(x)))
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
ALIGN(128) typedef struct {
__m512i b[16]; // input buffer
__m512i h[8]; // chained state
uint64_t t[2]; // total number of bytes
size_t c; // pointer for b[]
size_t outlen; // digest size
} blake2b_8way_ctx;
int blake2b_8way_init( blake2b_8way_ctx *ctx );
void blake2b_8way_update( blake2b_8way_ctx *ctx, const void *input,
size_t inlen );
void blake2b_8way_final( blake2b_8way_ctx *ctx, void *out );
#endif
#if defined(__AVX2__)
// state context
ALIGN(64) typedef struct {
ALIGN(128) typedef struct {
__m256i b[16]; // input buffer
__m256i h[8]; // chained state
uint64_t t[2]; // total number of bytes
size_t c; // pointer for b[]
size_t outlen; // digest size
} blake2b_4way_ctx __attribute__((aligned(64)));
} blake2b_4way_ctx;
int blake2b_4way_init( blake2b_4way_ctx *ctx );
void blake2b_4way_update( blake2b_4way_ctx *ctx, const void *input,

View File

@@ -3,22 +3,72 @@
#include <string.h>
#include <stdint.h>
#if defined(BLAKE2S_8WAY)
#if defined(BLAKE2S_16WAY)
static __thread blake2s_16way_state blake2s_16w_ctx;
void blake2s_16way_hash( void *output, const void *input )
{
blake2s_16way_state ctx;
memcpy( &ctx, &blake2s_16w_ctx, sizeof ctx );
blake2s_16way_update( &ctx, input + (64<<4), 16 );
blake2s_16way_final( &ctx, output, BLAKE2S_OUTBYTES );
}
int scanhash_blake2s_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*16] __attribute__ ((aligned (128)));
uint32_t hash[8*16] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<4]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m512i *noncev = (__m512i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id;
mm512_bswap32_intrlv80_16x32( vdata, pdata );
blake2s_16way_init( &blake2s_16w_ctx, BLAKE2S_OUTBYTES );
blake2s_16way_update( &blake2s_16w_ctx, vdata, 64 );
do {
*noncev = mm512_bswap_32( _mm512_set_epi32(
n+15, n+14, n+13, n+12, n+11, n+10, n+ 9, n+ 8,
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+ 1, n ) );
pdata[19] = n;
blake2s_16way_hash( hash, vdata );
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
{
extr_lane_16x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 16;
} while ( (n < max_nonce-16) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(BLAKE2S_8WAY)
static __thread blake2s_8way_state blake2s_8w_ctx;
void blake2s_8way_hash( void *output, const void *input )
{
uint32_t vhash[8*8] __attribute__ ((aligned (64)));
blake2s_8way_state ctx;
memcpy( &ctx, &blake2s_8w_ctx, sizeof ctx );
blake2s_8way_update( &ctx, input + (64<<3), 16 );
blake2s_8way_final( &ctx, vhash, BLAKE2S_OUTBYTES );
dintrlv_8x32( output, output+ 32, output+ 64, output+ 96,
output+128, output+160, output+192, output+224,
vhash, 256 );
blake2s_8way_final( &ctx, output, BLAKE2S_OUTBYTES );
}
int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
@@ -26,13 +76,15 @@ int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
{
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake2s_8way_init( &blake2s_8w_ctx, BLAKE2S_OUTBYTES );
@@ -45,16 +97,17 @@ int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
blake2s_8way_hash( hash, vdata );
for ( int i = 0; i < 8; i++ )
if ( (hash+(i<<3))[7] <= Htarg )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
@@ -67,15 +120,10 @@ static __thread blake2s_4way_state blake2s_4w_ctx;
void blake2s_4way_hash( void *output, const void *input )
{
uint32_t vhash[8*4] __attribute__ ((aligned (64)));
blake2s_4way_state ctx;
memcpy( &ctx, &blake2s_4w_ctx, sizeof ctx );
blake2s_4way_update( &ctx, input + (64<<2), 16 );
blake2s_4way_final( &ctx, vhash, BLAKE2S_OUTBYTES );
dintrlv_4x32( output, output+32, output+64, output+96,
vhash, 256 );
blake2s_4way_final( &ctx, output, BLAKE2S_OUTBYTES );
}
int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
@@ -83,13 +131,15 @@ int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
{
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m128i *noncev = (__m128i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake2s_4way_init( &blake2s_4w_ctx, BLAKE2S_OUTBYTES );
@@ -101,15 +151,16 @@ int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
blake2s_4way_hash( hash, vdata );
for ( int i = 0; i < 4; i++ )
if ( (hash+(i<<3))[7] <= Htarg )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 4;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;

View File

@@ -2,7 +2,11 @@
bool register_blake2s_algo( algo_gate_t* gate )
{
#if defined(BLAKE2S_8WAY)
#if defined(BLAKE2S_16WAY)
gate->scanhash = (void*)&scanhash_blake2s_16way;
gate->hash = (void*)&blake2s_16way_hash;
#elif defined(BLAKE2S_8WAY)
//#if defined(BLAKE2S_8WAY)
gate->scanhash = (void*)&scanhash_blake2s_8way;
gate->hash = (void*)&blake2s_8way_hash;
#elif defined(BLAKE2S_4WAY)
@@ -12,7 +16,7 @@ bool register_blake2s_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_blake2s;
gate->hash = (void*)&blake2s_hash;
#endif
gate->optimizations = SSE2_OPT | AVX2_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
return true;
};

View File

@@ -8,13 +8,26 @@
#if defined(__SSE2__)
#define BLAKE2S_4WAY
#endif
#if defined(__AVX2__)
#define BLAKE2S_8WAY
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define BLAKE2S_16WAY
#endif
bool register_blake2s_algo( algo_gate_t* gate );
#if defined(BLAKE2S_8WAY)
#if defined(BLAKE2S_16WAY)
void blake2s_16way_hash( void *state, const void *input );
int scanhash_blake2s_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined (BLAKE2S_8WAY)
//#if defined(BLAKE2S_8WAY)
void blake2s_8way_hash( void *state, const void *input );
int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,

View File

@@ -20,12 +20,13 @@
//#if defined(__SSE4_2__)
#if defined(__SSE2__)
/*
static const uint32_t blake2s_IV[8] =
{
0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL
};
*/
static const uint8_t blake2s_sigma[10][16] =
{
@@ -41,6 +42,7 @@ static const uint8_t blake2s_sigma[10][16] =
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } ,
};
// define a constant for initial param.
int blake2s_4way_init( blake2s_4way_state *S, const uint8_t outlen )
@@ -88,41 +90,45 @@ int blake2s_4way_compress( blake2s_4way_state *S, const __m128i* block )
memcpy_128( m, block, 16 );
memcpy_128( v, S->h, 8 );
v[ 8] = _mm_set1_epi32( blake2s_IV[0] );
v[ 9] = _mm_set1_epi32( blake2s_IV[1] );
v[10] = _mm_set1_epi32( blake2s_IV[2] );
v[11] = _mm_set1_epi32( blake2s_IV[3] );
v[ 8] = m128_const1_64( 0x6A09E6676A09E667ULL );
v[ 9] = m128_const1_64( 0xBB67AE85BB67AE85ULL );
v[10] = m128_const1_64( 0x3C6EF3723C6EF372ULL );
v[11] = m128_const1_64( 0xA54FF53AA54FF53AULL );
v[12] = _mm_xor_si128( _mm_set1_epi32( S->t[0] ),
_mm_set1_epi32( blake2s_IV[4] ) );
m128_const1_64( 0x510E527F510E527FULL ) );
v[13] = _mm_xor_si128( _mm_set1_epi32( S->t[1] ),
_mm_set1_epi32( blake2s_IV[5] ) );
m128_const1_64( 0x9B05688C9B05688CULL ) );
v[14] = _mm_xor_si128( _mm_set1_epi32( S->f[0] ),
_mm_set1_epi32( blake2s_IV[6] ) );
m128_const1_64( 0x1F83D9AB1F83D9ABULL ) );
v[15] = _mm_xor_si128( _mm_set1_epi32( S->f[1] ),
_mm_set1_epi32( blake2s_IV[7] ) );
m128_const1_64( 0x5BE0CD195BE0CD19ULL ) );
#define G4W(r,i,a,b,c,d) \
#define G4W( sigma0, sigma1, a, b, c, d ) \
do { \
a = _mm_add_epi32( _mm_add_epi32( a, b ), m[ blake2s_sigma[r][2*i+0] ] ); \
uint8_t s0 = sigma0; \
uint8_t s1 = sigma1; \
a = _mm_add_epi32( _mm_add_epi32( a, b ), m[ s0 ] ); \
d = mm128_ror_32( _mm_xor_si128( d, a ), 16 ); \
c = _mm_add_epi32( c, d ); \
b = mm128_ror_32( _mm_xor_si128( b, c ), 12 ); \
a = _mm_add_epi32( _mm_add_epi32( a, b ), m[ blake2s_sigma[r][2*i+1] ] ); \
a = _mm_add_epi32( _mm_add_epi32( a, b ), m[ s1 ] ); \
d = mm128_ror_32( _mm_xor_si128( d, a ), 8 ); \
c = _mm_add_epi32( c, d ); \
b = mm128_ror_32( _mm_xor_si128( b, c ), 7 ); \
} while(0)
#define ROUND4W(r) \
do { \
G4W( r, 0, v[ 0], v[ 4], v[ 8], v[12] ); \
G4W( r, 1, v[ 1], v[ 5], v[ 9], v[13] ); \
G4W( r, 2, v[ 2], v[ 6], v[10], v[14] ); \
G4W( r, 3, v[ 3], v[ 7], v[11], v[15] ); \
G4W( r, 4, v[ 0], v[ 5], v[10], v[15] ); \
G4W( r, 5, v[ 1], v[ 6], v[11], v[12] ); \
G4W( r, 6, v[ 2], v[ 7], v[ 8], v[13] ); \
G4W( r, 7, v[ 3], v[ 4], v[ 9], v[14] ); \
uint8_t *sigma = (uint8_t*)&blake2s_sigma[r]; \
G4W( sigma[ 0], sigma[ 1], v[ 0], v[ 4], v[ 8], v[12] ); \
G4W( sigma[ 2], sigma[ 3], v[ 1], v[ 5], v[ 9], v[13] ); \
G4W( sigma[ 4], sigma[ 5], v[ 2], v[ 6], v[10], v[14] ); \
G4W( sigma[ 6], sigma[ 7], v[ 3], v[ 7], v[11], v[15] ); \
G4W( sigma[ 8], sigma[ 9], v[ 0], v[ 5], v[10], v[15] ); \
G4W( sigma[10], sigma[11], v[ 1], v[ 6], v[11], v[12] ); \
G4W( sigma[12], sigma[13], v[ 2], v[ 7], v[ 8], v[13] ); \
G4W( sigma[14], sigma[15], v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
ROUND4W( 0 );
@@ -144,26 +150,47 @@ do { \
return 0;
}
// There is a problem that can't be resolved internally.
// If the last block is a full 64 bytes it should not be compressed in
// update but left for final. However, when streaming, it isn't known
// which block is last. There may be a subsequent call to update to add
// more data.
//
// The reference code handled this by juggling 2 blocks at a time at
// a significant performance penalty.
//
// Instead a new function is introduced called full_blocks which combines
// update and final and is to be used in non-streaming mode where the data
// is a multiple of 64 bytes.
//
// Supported:
// 64 + 16 bytes (blake2s with midstate optimization)
// 80 bytes (blake2s without midstate optimization)
// Any multiple of 64 bytes in one shot (x25x)
//
// Unsupported:
// Stream of full 64 byte blocks one at a time.
// use only when streaming more data or final block not full.
int blake2s_4way_update( blake2s_4way_state *S, const void *in,
uint64_t inlen )
{
__m128i *input = (__m128i*)in;
__m128i *buf = (__m128i*)S->buf;
const int bsize = BLAKE2S_BLOCKBYTES;
while( inlen > 0 )
{
size_t left = S->buflen;
if( inlen >= bsize - left )
if( inlen >= BLAKE2S_BLOCKBYTES - left )
{
memcpy_128( buf + (left>>2), input, (bsize - left) >> 2 );
S->buflen += bsize - left;
memcpy_128( buf + (left>>2), input, (BLAKE2S_BLOCKBYTES - left) >> 2 );
S->buflen += BLAKE2S_BLOCKBYTES - left;
S->t[0] += BLAKE2S_BLOCKBYTES;
S->t[1] += ( S->t[0] < BLAKE2S_BLOCKBYTES );
blake2s_4way_compress( S, buf );
S->buflen = 0;
input += ( bsize >> 2 );
inlen -= bsize;
input += ( BLAKE2S_BLOCKBYTES >> 2 );
inlen -= BLAKE2S_BLOCKBYTES;
}
else
{
@@ -195,8 +222,45 @@ int blake2s_4way_final( blake2s_4way_state *S, void *out, uint8_t outlen )
return 0;
}
// Update and final when inlen is a multiple of 64 bytes
int blake2s_4way_full_blocks( blake2s_4way_state *S, void *out,
const void *input, uint64_t inlen )
{
__m128i *in = (__m128i*)input;
__m128i *buf = (__m128i*)S->buf;
while( inlen > BLAKE2S_BLOCKBYTES )
{
memcpy_128( buf, in, BLAKE2S_BLOCKBYTES >> 2 );
S->buflen = BLAKE2S_BLOCKBYTES;
inlen -= BLAKE2S_BLOCKBYTES;
S->t[0] += BLAKE2S_BLOCKBYTES;
S->t[1] += ( S->t[0] < BLAKE2S_BLOCKBYTES );
blake2s_4way_compress( S, buf );
S->buflen = 0;
in += ( BLAKE2S_BLOCKBYTES >> 2 );
}
// last block
memcpy_128( buf, in, BLAKE2S_BLOCKBYTES >> 2 );
S->buflen = BLAKE2S_BLOCKBYTES;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node ) S->f[1] = ~0U;
S->f[0] = ~0U;
blake2s_4way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_m128i( out, i ) = S->h[ i ];
return 0;
}
#if defined(__AVX2__)
// The commented code below is slower on Intel but faster on
// Zen1 AVX2. It's also faster than Zen1 AVX.
// Ryzen gen2 is unknown at this time.
int blake2s_8way_compress( blake2s_8way_state *S, const __m256i *block )
{
__m256i m[16];
@@ -205,6 +269,23 @@ int blake2s_8way_compress( blake2s_8way_state *S, const __m256i *block )
memcpy_256( m, block, 16 );
memcpy_256( v, S->h, 8 );
v[ 8] = m256_const1_64( 0x6A09E6676A09E667ULL );
v[ 9] = m256_const1_64( 0xBB67AE85BB67AE85ULL );
v[10] = m256_const1_64( 0x3C6EF3723C6EF372ULL );
v[11] = m256_const1_64( 0xA54FF53AA54FF53AULL );
v[12] = _mm256_xor_si256( _mm256_set1_epi32( S->t[0] ),
m256_const1_64( 0x510E527F510E527FULL ) );
v[13] = _mm256_xor_si256( _mm256_set1_epi32( S->t[1] ),
m256_const1_64( 0x9B05688C9B05688CULL ) );
v[14] = _mm256_xor_si256( _mm256_set1_epi32( S->f[0] ),
m256_const1_64( 0x1F83D9AB1F83D9ABULL ) );
v[15] = _mm256_xor_si256( _mm256_set1_epi32( S->f[1] ),
m256_const1_64( 0x5BE0CD195BE0CD19ULL ) );
/*
v[ 8] = _mm256_set1_epi32( blake2s_IV[0] );
v[ 9] = _mm256_set1_epi32( blake2s_IV[1] );
v[10] = _mm256_set1_epi32( blake2s_IV[2] );
@@ -218,6 +299,7 @@ int blake2s_8way_compress( blake2s_8way_state *S, const __m256i *block )
v[15] = _mm256_xor_si256( _mm256_set1_epi32( S->f[1] ),
_mm256_set1_epi32( blake2s_IV[7] ) );
#define G8W(r,i,a,b,c,d) \
do { \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), \
@@ -231,7 +313,36 @@ do { \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \
} while(0)
*/
#define G8W( sigma0, sigma1, a, b, c, d) \
do { \
uint8_t s0 = sigma0; \
uint8_t s1 = sigma1; \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), m[ s0 ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), m[ s1 ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \
} while(0)
#define ROUND8W(r) \
do { \
uint8_t *sigma = (uint8_t*)&blake2s_sigma[r]; \
G8W( sigma[ 0], sigma[ 1], v[ 0], v[ 4], v[ 8], v[12] ); \
G8W( sigma[ 2], sigma[ 3], v[ 1], v[ 5], v[ 9], v[13] ); \
G8W( sigma[ 4], sigma[ 5], v[ 2], v[ 6], v[10], v[14] ); \
G8W( sigma[ 6], sigma[ 7], v[ 3], v[ 7], v[11], v[15] ); \
G8W( sigma[ 8], sigma[ 9], v[ 0], v[ 5], v[10], v[15] ); \
G8W( sigma[10], sigma[11], v[ 1], v[ 6], v[11], v[12] ); \
G8W( sigma[12], sigma[13], v[ 2], v[ 7], v[ 8], v[13] ); \
G8W( sigma[14], sigma[15], v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
/*
#define ROUND8W(r) \
do { \
G8W( r, 0, v[ 0], v[ 4], v[ 8], v[12] ); \
@@ -243,6 +354,7 @@ do { \
G8W( r, 6, v[ 2], v[ 7], v[ 8], v[13] ); \
G8W( r, 7, v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
*/
ROUND8W( 0 );
ROUND8W( 1 );
@@ -354,6 +466,168 @@ int blake2s_8way_final( blake2s_8way_state *S, void *out, uint8_t outlen )
#endif // __AVX2__
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Blake2s-256 16 way
int blake2s_16way_compress( blake2s_16way_state *S, const __m512i *block )
{
__m512i m[16];
__m512i v[16];
memcpy_512( m, block, 16 );
memcpy_512( v, S->h, 8 );
v[ 8] = m512_const1_64( 0x6A09E6676A09E667ULL );
v[ 9] = m512_const1_64( 0xBB67AE85BB67AE85ULL );
v[10] = m512_const1_64( 0x3C6EF3723C6EF372ULL );
v[11] = m512_const1_64( 0xA54FF53AA54FF53AULL );
v[12] = _mm512_xor_si512( _mm512_set1_epi32( S->t[0] ),
m512_const1_64( 0x510E527F510E527FULL ) );
v[13] = _mm512_xor_si512( _mm512_set1_epi32( S->t[1] ),
m512_const1_64( 0x9B05688C9B05688CULL ) );
v[14] = _mm512_xor_si512( _mm512_set1_epi32( S->f[0] ),
m512_const1_64( 0x1F83D9AB1F83D9ABULL ) );
v[15] = _mm512_xor_si512( _mm512_set1_epi32( S->f[1] ),
m512_const1_64( 0x5BE0CD195BE0CD19ULL ) );
#define G16W( sigma0, sigma1, a, b, c, d) \
do { \
uint8_t s0 = sigma0; \
uint8_t s1 = sigma1; \
a = _mm512_add_epi32( _mm512_add_epi32( a, b ), m[ s0 ] ); \
d = mm512_ror_32( _mm512_xor_si512( d, a ), 16 ); \
c = _mm512_add_epi32( c, d ); \
b = mm512_ror_32( _mm512_xor_si512( b, c ), 12 ); \
a = _mm512_add_epi32( _mm512_add_epi32( a, b ), m[ s1 ] ); \
d = mm512_ror_32( _mm512_xor_si512( d, a ), 8 ); \
c = _mm512_add_epi32( c, d ); \
b = mm512_ror_32( _mm512_xor_si512( b, c ), 7 ); \
} while(0)
#define ROUND16W(r) \
do { \
uint8_t *sigma = (uint8_t*)&blake2s_sigma[r]; \
G16W( sigma[ 0], sigma[ 1], v[ 0], v[ 4], v[ 8], v[12] ); \
G16W( sigma[ 2], sigma[ 3], v[ 1], v[ 5], v[ 9], v[13] ); \
G16W( sigma[ 4], sigma[ 5], v[ 2], v[ 6], v[10], v[14] ); \
G16W( sigma[ 6], sigma[ 7], v[ 3], v[ 7], v[11], v[15] ); \
G16W( sigma[ 8], sigma[ 9], v[ 0], v[ 5], v[10], v[15] ); \
G16W( sigma[10], sigma[11], v[ 1], v[ 6], v[11], v[12] ); \
G16W( sigma[12], sigma[13], v[ 2], v[ 7], v[ 8], v[13] ); \
G16W( sigma[14], sigma[15], v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
ROUND16W( 0 );
ROUND16W( 1 );
ROUND16W( 2 );
ROUND16W( 3 );
ROUND16W( 4 );
ROUND16W( 5 );
ROUND16W( 6 );
ROUND16W( 7 );
ROUND16W( 8 );
ROUND16W( 9 );
for( size_t i = 0; i < 8; ++i )
S->h[i] = _mm512_xor_si512( _mm512_xor_si512( S->h[i], v[i] ), v[i + 8] );
#undef G16W
#undef ROUND16W
return 0;
}
int blake2s_16way_init( blake2s_16way_state *S, const uint8_t outlen )
{
blake2s_nway_param P[1];
P->digest_length = outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
*((uint64_t*)(P->node_offset)) = 0;
P->node_depth = 0;
P->inner_length = 0;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
memset( S, 0, sizeof( blake2s_16way_state ) );
S->h[0] = m512_const1_64( 0x6A09E6676A09E667ULL );
S->h[1] = m512_const1_64( 0xBB67AE85BB67AE85ULL );
S->h[2] = m512_const1_64( 0x3C6EF3723C6EF372ULL );
S->h[3] = m512_const1_64( 0xA54FF53AA54FF53AULL );
S->h[4] = m512_const1_64( 0x510E527F510E527FULL );
S->h[5] = m512_const1_64( 0x9B05688C9B05688CULL );
S->h[6] = m512_const1_64( 0x1F83D9AB1F83D9ABULL );
S->h[7] = m512_const1_64( 0x5BE0CD195BE0CD19ULL );
uint32_t *p = ( uint32_t * )( P );
/* IV XOR ParamBlock */
for ( size_t i = 0; i < 8; ++i )
S->h[i] = _mm512_xor_si512( S->h[i], _mm512_set1_epi32( p[i] ) );
return 0;
}
int blake2s_16way_update( blake2s_16way_state *S, const void *in,
uint64_t inlen )
{
__m512i *input = (__m512i*)in;
__m512i *buf = (__m512i*)S->buf;
const int bsize = BLAKE2S_BLOCKBYTES;
while( inlen > 0 )
{
size_t left = S->buflen;
if( inlen >= bsize - left )
{
memcpy_512( buf + (left>>2), input, (bsize - left) >> 2 );
S->buflen += bsize - left;
S->t[0] += BLAKE2S_BLOCKBYTES;
S->t[1] += ( S->t[0] < BLAKE2S_BLOCKBYTES );
blake2s_16way_compress( S, buf );
S->buflen = 0;
input += ( bsize >> 2 );
inlen -= bsize;
}
else
{
memcpy_512( buf + ( left>>2 ), input, inlen>>2 );
S->buflen += (size_t) inlen;
input += ( inlen>>2 );
inlen -= inlen;
}
}
return 0;
}
int blake2s_16way_final( blake2s_16way_state *S, void *out, uint8_t outlen )
{
__m512i *buf = (__m512i*)S->buf;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node )
S->f[1] = ~0U;
S->f[0] = ~0U;
memset_zero_512( buf + ( S->buflen>>2 ),
( BLAKE2S_BLOCKBYTES - S->buflen ) >> 2 );
blake2s_16way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_m512i( out, i ) = S->h[ i ];
return 0;
}
#endif // AVX512
#if 0
int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen )
{

View File

@@ -75,6 +75,9 @@ int blake2s_4way_init( blake2s_4way_state *S, const uint8_t outlen );
int blake2s_4way_update( blake2s_4way_state *S, const void *in,
uint64_t inlen );
int blake2s_4way_final( blake2s_4way_state *S, void *out, uint8_t outlen );
int blake2s_4way_full_blocks( blake2s_4way_state *S, void *out,
const void *input, uint64_t inlen );
#if defined(__AVX2__)
@@ -92,6 +95,27 @@ int blake2s_8way_init( blake2s_8way_state *S, const uint8_t outlen );
int blake2s_8way_update( blake2s_8way_state *S, const void *in,
uint64_t inlen );
int blake2s_8way_final( blake2s_8way_state *S, void *out, uint8_t outlen );
//int blake2s_8way_full_blocks( blake2s_8way_state *S, void *out,
// const void *input, uint64_t inlen );
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
ALIGN( 128 ) typedef struct __blake2s_16way_state
{
__m512i h[8];
uint8_t buf[ BLAKE2S_BLOCKBYTES * 16 ];
uint32_t t[2];
uint32_t f[2];
size_t buflen;
uint8_t last_node;
} blake2s_16way_state ;
int blake2s_16way_init( blake2s_16way_state *S, const uint8_t outlen );
int blake2s_16way_update( blake2s_16way_state *S, const void *in,
uint64_t inlen );
int blake2s_16way_final( blake2s_16way_state *S, void *out, uint8_t outlen );
#endif

View File

@@ -54,7 +54,6 @@ extern "C"{
#pragma warning (disable: 4146)
#endif
// Blake-512
static const sph_u64 IV512[8] = {
@@ -64,6 +63,7 @@ static const sph_u64 IV512[8] = {
SPH_C64(0x1F83D9ABFB41BD6B), SPH_C64(0x5BE0CD19137E2179)
};
static const sph_u64 salt_zero_big[4] = { 0, 0, 0, 0 };
#if SPH_COMPACT_BLAKE_32 || SPH_COMPACT_BLAKE_64
@@ -264,8 +264,6 @@ static const unsigned sigma[16][16] = {
#define Mx_(n) Mx__(n)
#define Mx__(n) M ## n
// Blake-512 4 way
#define CBx(r, i) CBx_(Z ## r ## i)
#define CBx_(n) CBx__(n)
#define CBx__(n) CB ## n
@@ -287,6 +285,7 @@ static const unsigned sigma[16][16] = {
#define CBE SPH_C64(0x0801F2E2858EFC16)
#define CBF SPH_C64(0x636920D871574E69)
/*
#if SPH_COMPACT_BLAKE_64
// not used
static const sph_u64 CB[16] = {
@@ -301,68 +300,9 @@ static const sph_u64 CB[16] = {
};
#endif
*/
// Blake-512 4 way
#define GB_4WAY(m0, m1, c0, c1, a, b, c, d) do { \
a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \
_mm256_set1_epi64x( c1 ), m0 ), b ), a ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 32 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 25 ); \
a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \
_mm256_set1_epi64x( c0 ), m1 ), b ), a ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 11 ); \
} while (0)
#if SPH_COMPACT_BLAKE_64
// not used
#define ROUND_B_4WAY(r) do { \
GB_4WAY(M[sigma[r][0x0]], M[sigma[r][0x1]], \
CB[sigma[r][0x0]], CB[sigma[r][0x1]], V0, V4, V8, VC); \
GB_4WAY(M[sigma[r][0x2]], M[sigma[r][0x3]], \
CB[sigma[r][0x2]], CB[sigma[r][0x3]], V1, V5, V9, VD); \
GB_4WAY(M[sigma[r][0x4]], M[sigma[r][0x5]], \
CB[sigma[r][0x4]], CB[sigma[r][0x5]], V2, V6, VA, VE); \
GB_4WAY(M[sigma[r][0x6]], M[sigma[r][0x7]], \
CB[sigma[r][0x6]], CB[sigma[r][0x7]], V3, V7, VB, VF); \
GB_4WAY(M[sigma[r][0x8]], M[sigma[r][0x9]], \
CB[sigma[r][0x8]], CB[sigma[r][0x9]], V0, V5, VA, VF); \
GB_4WAY(M[sigma[r][0xA]], M[sigma[r][0xB]], \
CB[sigma[r][0xA]], CB[sigma[r][0xB]], V1, V6, VB, VC); \
GB_4WAY(M[sigma[r][0xC]], M[sigma[r][0xD]], \
CB[sigma[r][0xC]], CB[sigma[r][0xD]], V2, V7, V8, VD); \
GB_4WAY(M[sigma[r][0xE]], M[sigma[r][0xF]], \
CB[sigma[r][0xE]], CB[sigma[r][0xF]], V3, V4, V9, VE); \
} while (0)
#else
//current_impl
#define ROUND_B_4WAY(r) do { \
GB_4WAY(Mx(r, 0), Mx(r, 1), CBx(r, 0), CBx(r, 1), V0, V4, V8, VC); \
GB_4WAY(Mx(r, 2), Mx(r, 3), CBx(r, 2), CBx(r, 3), V1, V5, V9, VD); \
GB_4WAY(Mx(r, 4), Mx(r, 5), CBx(r, 4), CBx(r, 5), V2, V6, VA, VE); \
GB_4WAY(Mx(r, 6), Mx(r, 7), CBx(r, 6), CBx(r, 7), V3, V7, VB, VF); \
GB_4WAY(Mx(r, 8), Mx(r, 9), CBx(r, 8), CBx(r, 9), V0, V5, VA, VF); \
GB_4WAY(Mx(r, A), Mx(r, B), CBx(r, A), CBx(r, B), V1, V6, VB, VC); \
GB_4WAY(Mx(r, C), Mx(r, D), CBx(r, C), CBx(r, D), V2, V7, V8, VD); \
GB_4WAY(Mx(r, E), Mx(r, F), CBx(r, E), CBx(r, F), V3, V4, V9, VE); \
} while (0)
#endif
// Blake-512 4 way
#define DECL_STATE64_4WAY \
__m256i H0, H1, H2, H3, H4, H5, H6, H7; \
__m256i S0, S1, S2, S3; \
sph_u64 T0, T1;
#define READ_STATE64_4WAY(state) do { \
#define READ_STATE64(state) do { \
H0 = (state)->H[0]; \
H1 = (state)->H[1]; \
H2 = (state)->H[2]; \
@@ -379,7 +319,7 @@ static const sph_u64 CB[16] = {
T1 = (state)->T1; \
} while (0)
#define WRITE_STATE64_4WAY(state) do { \
#define WRITE_STATE64(state) do { \
(state)->H[0] = H0; \
(state)->H[1] = H1; \
(state)->H[2] = H2; \
@@ -396,16 +336,47 @@ static const sph_u64 CB[16] = {
(state)->T1 = T1; \
} while (0)
#if SPH_COMPACT_BLAKE_64
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// not used
#define COMPRESS64_4WAY do { \
__m256i M[16]; \
__m256i V0, V1, V2, V3, V4, V5, V6, V7; \
__m256i V8, V9, VA, VB, VC, VD, VE, VF; \
const __m256i shuff_bswap64 = m256_const2_64( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) \
unsigned r; \
// Blake-512 8 way
#define GB_8WAY(m0, m1, c0, c1, a, b, c, d) do { \
a = _mm512_add_epi64( _mm512_add_epi64( _mm512_xor_si512( \
_mm512_set1_epi64( c1 ), m0 ), b ), a ); \
d = mm512_ror_64( _mm512_xor_si512( d, a ), 32 ); \
c = _mm512_add_epi64( c, d ); \
b = mm512_ror_64( _mm512_xor_si512( b, c ), 25 ); \
a = _mm512_add_epi64( _mm512_add_epi64( _mm512_xor_si512( \
_mm512_set1_epi64( c0 ), m1 ), b ), a ); \
d = mm512_ror_64( _mm512_xor_si512( d, a ), 16 ); \
c = _mm512_add_epi64( c, d ); \
b = mm512_ror_64( _mm512_xor_si512( b, c ), 11 ); \
} while (0)
#define ROUND_B_8WAY(r) do { \
GB_8WAY(Mx(r, 0), Mx(r, 1), CBx(r, 0), CBx(r, 1), V0, V4, V8, VC); \
GB_8WAY(Mx(r, 2), Mx(r, 3), CBx(r, 2), CBx(r, 3), V1, V5, V9, VD); \
GB_8WAY(Mx(r, 4), Mx(r, 5), CBx(r, 4), CBx(r, 5), V2, V6, VA, VE); \
GB_8WAY(Mx(r, 6), Mx(r, 7), CBx(r, 6), CBx(r, 7), V3, V7, VB, VF); \
GB_8WAY(Mx(r, 8), Mx(r, 9), CBx(r, 8), CBx(r, 9), V0, V5, VA, VF); \
GB_8WAY(Mx(r, A), Mx(r, B), CBx(r, A), CBx(r, B), V1, V6, VB, VC); \
GB_8WAY(Mx(r, C), Mx(r, D), CBx(r, C), CBx(r, D), V2, V7, V8, VD); \
GB_8WAY(Mx(r, E), Mx(r, F), CBx(r, E), CBx(r, F), V3, V4, V9, VE); \
} while (0)
#define DECL_STATE64_8WAY \
__m512i H0, H1, H2, H3, H4, H5, H6, H7; \
__m512i S0, S1, S2, S3; \
sph_u64 T0, T1;
#define COMPRESS64_8WAY do \
{ \
__m512i M0, M1, M2, M3, M4, M5, M6, M7; \
__m512i M8, M9, MA, MB, MC, MD, ME, MF; \
__m512i V0, V1, V2, V3, V4, V5, V6, V7; \
__m512i V8, V9, VA, VB, VC, VD, VE, VF; \
__m512i shuf_bswap64; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
@@ -414,57 +385,247 @@ static const sph_u64 CB[16] = {
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm256_xor_si256( S0, _mm256_set1_epi64x( CB0 ) ); \
V9 = _mm256_xor_si256( S1, _mm256_set1_epi64x( CB1 ) ); \
VA = _mm256_xor_si256( S2, _mm256_set1_epi64x( CB2 ) ); \
VB = _mm256_xor_si256( S3, _mm256_set1_epi64x( CB3 ) ); \
VC = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
_mm256_set1_epi64x( CB4 ) ); \
VD = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
_mm256_set1_epi64x( CB5 ) ); \
VE = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \
_mm256_set1_epi64x( CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \
_mm256_set1_epi64x( CB7, CB7, CB7, CB7 ) ); \
M[0x0] = _mm256_shuffle_epi8( *(buf+ 0), shuff_bswap64 ); \
M[0x1] = _mm256_shuffle_epi8( *(buf+ 1), shuff_bswap64 ); \
M[0x2] = _mm256_shuffle_epi8( *(buf+ 2), shuff_bswap64 ); \
M[0x3] = _mm256_shuffle_epi8( *(buf+ 3), shuff_bswap64 ); \
M[0x4] = _mm256_shuffle_epi8( *(buf+ 4), shuff_bswap64 ); \
M[0x5] = _mm256_shuffle_epi8( *(buf+ 5), shuff_bswap64 ); \
M[0x6] = _mm256_shuffle_epi8( *(buf+ 6), shuff_bswap64 ); \
M[0x7] = _mm256_shuffle_epi8( *(buf+ 7), shuff_bswap64 ); \
M[0x8] = _mm256_shuffle_epi8( *(buf+ 8), shuff_bswap64 ); \
M[0x9] = _mm256_shuffle_epi8( *(buf+ 9), shuff_bswap64 ); \
M[0xA] = _mm256_shuffle_epi8( *(buf+10), shuff_bswap64 ); \
M[0xB] = _mm256_shuffle_epi8( *(buf+11), shuff_bswap64 ); \
M[0xC] = _mm256_shuffle_epi8( *(buf+12), shuff_bswap64 ); \
M[0xD] = _mm256_shuffle_epi8( *(buf+13), shuff_bswap64 ); \
M[0xE] = _mm256_shuffle_epi8( *(buf+14), shuff_bswap64 ); \
M[0xF] = _mm256_shuffle_epi8( *(buf+15), shuff_bswap64 ); \
for (r = 0; r < 16; r ++) \
ROUND_B_4WAY(r); \
H0 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S0, V0 ), V8 ), H0 ); \
H1 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S1, V1 ), V9 ), H1 ); \
H2 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S2, V2 ), VA ), H2 ); \
H3 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S3, V3 ), VB ), H3 ); \
H4 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S0, V4 ), VC ), H4 ); \
H5 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S1, V5 ), VD ), H5 ); \
H6 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S2, V6 ), VE ), H6 ); \
H7 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S3, V7 ), VF ), H7 ); \
V8 = _mm512_xor_si512( S0, m512_const1_64( CB0 ) ); \
V9 = _mm512_xor_si512( S1, m512_const1_64( CB1 ) ); \
VA = _mm512_xor_si512( S2, m512_const1_64( CB2 ) ); \
VB = _mm512_xor_si512( S3, m512_const1_64( CB3 ) ); \
VC = _mm512_xor_si512( _mm512_set1_epi64( T0 ), \
m512_const1_64( CB4 ) ); \
VD = _mm512_xor_si512( _mm512_set1_epi64( T0 ), \
m512_const1_64( CB5 ) ); \
VE = _mm512_xor_si512( _mm512_set1_epi64( T1 ), \
m512_const1_64( CB6 ) ); \
VF = _mm512_xor_si512( _mm512_set1_epi64( T1 ), \
m512_const1_64( CB7 ) ); \
shuf_bswap64 = m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
0x28292a2b2c2d2e2f, 0x2021222324252627, \
0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
M0 = _mm512_shuffle_epi8( *(buf+ 0), shuf_bswap64 ); \
M1 = _mm512_shuffle_epi8( *(buf+ 1), shuf_bswap64 ); \
M2 = _mm512_shuffle_epi8( *(buf+ 2), shuf_bswap64 ); \
M3 = _mm512_shuffle_epi8( *(buf+ 3), shuf_bswap64 ); \
M4 = _mm512_shuffle_epi8( *(buf+ 4), shuf_bswap64 ); \
M5 = _mm512_shuffle_epi8( *(buf+ 5), shuf_bswap64 ); \
M6 = _mm512_shuffle_epi8( *(buf+ 6), shuf_bswap64 ); \
M7 = _mm512_shuffle_epi8( *(buf+ 7), shuf_bswap64 ); \
M8 = _mm512_shuffle_epi8( *(buf+ 8), shuf_bswap64 ); \
M9 = _mm512_shuffle_epi8( *(buf+ 9), shuf_bswap64 ); \
MA = _mm512_shuffle_epi8( *(buf+10), shuf_bswap64 ); \
MB = _mm512_shuffle_epi8( *(buf+11), shuf_bswap64 ); \
MC = _mm512_shuffle_epi8( *(buf+12), shuf_bswap64 ); \
MD = _mm512_shuffle_epi8( *(buf+13), shuf_bswap64 ); \
ME = _mm512_shuffle_epi8( *(buf+14), shuf_bswap64 ); \
MF = _mm512_shuffle_epi8( *(buf+15), shuf_bswap64 ); \
ROUND_B_8WAY(0); \
ROUND_B_8WAY(1); \
ROUND_B_8WAY(2); \
ROUND_B_8WAY(3); \
ROUND_B_8WAY(4); \
ROUND_B_8WAY(5); \
ROUND_B_8WAY(6); \
ROUND_B_8WAY(7); \
ROUND_B_8WAY(8); \
ROUND_B_8WAY(9); \
ROUND_B_8WAY(0); \
ROUND_B_8WAY(1); \
ROUND_B_8WAY(2); \
ROUND_B_8WAY(3); \
ROUND_B_8WAY(4); \
ROUND_B_8WAY(5); \
H0 = mm512_xor4( V8, V0, S0, H0 ); \
H1 = mm512_xor4( V9, V1, S1, H1 ); \
H2 = mm512_xor4( VA, V2, S2, H2 ); \
H3 = mm512_xor4( VB, V3, S3, H3 ); \
H4 = mm512_xor4( VC, V4, S0, H4 ); \
H5 = mm512_xor4( VD, V5, S1, H5 ); \
H6 = mm512_xor4( VE, V6, S2, H6 ); \
H7 = mm512_xor4( VF, V7, S3, H7 ); \
} while (0)
#else
static void
blake64_8way_init( blake_8way_big_context *sc, const sph_u64 *iv,
const sph_u64 *salt )
{
__m512i zero = m512_zero;
casti_m512i( sc->H, 0 ) = m512_const1_64( 0x6A09E667F3BCC908 );
casti_m512i( sc->H, 1 ) = m512_const1_64( 0xBB67AE8584CAA73B );
casti_m512i( sc->H, 2 ) = m512_const1_64( 0x3C6EF372FE94F82B );
casti_m512i( sc->H, 3 ) = m512_const1_64( 0xA54FF53A5F1D36F1 );
casti_m512i( sc->H, 4 ) = m512_const1_64( 0x510E527FADE682D1 );
casti_m512i( sc->H, 5 ) = m512_const1_64( 0x9B05688C2B3E6C1F );
casti_m512i( sc->H, 6 ) = m512_const1_64( 0x1F83D9ABFB41BD6B );
casti_m512i( sc->H, 7 ) = m512_const1_64( 0x5BE0CD19137E2179 );
//current impl
casti_m512i( sc->S, 0 ) = zero;
casti_m512i( sc->S, 1 ) = zero;
casti_m512i( sc->S, 2 ) = zero;
casti_m512i( sc->S, 3 ) = zero;
sc->T0 = sc->T1 = 0;
sc->ptr = 0;
}
static void
blake64_8way( blake_8way_big_context *sc, const void *data, size_t len )
{
__m512i *vdata = (__m512i*)data;
__m512i *buf;
size_t ptr;
DECL_STATE64_8WAY
const int buf_size = 128; // sizeof/8
buf = sc->buf;
ptr = sc->ptr;
if ( len < (buf_size - ptr) )
{
memcpy_512( buf + (ptr>>3), vdata, len>>3 );
ptr += len;
sc->ptr = ptr;
return;
}
READ_STATE64(sc);
while ( len > 0 )
{
size_t clen;
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_512( buf + (ptr>>3), vdata, clen>>3 );
ptr += clen;
vdata = vdata + (clen>>3);
len -= clen;
if ( ptr == buf_size )
{
if ( ( T0 = SPH_T64(T0 + 1024) ) < 1024 )
T1 = SPH_T64(T1 + 1);
COMPRESS64_8WAY;
ptr = 0;
}
}
WRITE_STATE64(sc);
sc->ptr = ptr;
}
static void
blake64_8way_close( blake_8way_big_context *sc,
unsigned ub, unsigned n, void *dst, size_t out_size_w64)
{
__m512i buf[16];
size_t ptr;
unsigned bit_len;
uint64_t z, zz;
sph_u64 th, tl;
ptr = sc->ptr;
bit_len = ((unsigned)ptr << 3);
z = 0x80 >> n;
zz = ((ub & -z) | z) & 0xFF;
buf[ptr>>3] = _mm512_set1_epi64( zz );
tl = sc->T0 + bit_len;
th = sc->T1;
if (ptr == 0 )
{
sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL);
sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL);
}
else if ( sc->T0 == 0 )
{
sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL) + bit_len;
sc->T1 = SPH_T64(sc->T1 - 1);
}
else
{
sc->T0 -= 1024 - bit_len;
}
if ( ptr <= 104 )
{
memset_zero_512( buf + (ptr>>3) + 1, (104-ptr) >> 3 );
if ( out_size_w64 == 8 )
buf[(104>>3)] = _mm512_or_si512( buf[(104>>3)],
m512_const1_64( 0x0100000000000000ULL ) );
*(buf+(112>>3)) = _mm512_set1_epi64( bswap_64( th ) );
*(buf+(120>>3)) = _mm512_set1_epi64( bswap_64( tl ) );
blake64_8way( sc, buf + (ptr>>3), 128 - ptr );
}
else
{
memset_zero_512( buf + (ptr>>3) + 1, (120 - ptr) >> 3 );
blake64_8way( sc, buf + (ptr>>3), 128 - ptr );
sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL);
sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL);
memset_zero_512( buf, 112>>3 );
if ( out_size_w64 == 8 )
buf[104>>3] = m512_const1_64( 0x0100000000000000ULL );
*(buf+(112>>3)) = _mm512_set1_epi64( bswap_64( th ) );
*(buf+(120>>3)) = _mm512_set1_epi64( bswap_64( tl ) );
blake64_8way( sc, buf, 128 );
}
mm512_block_bswap_64( (__m512i*)dst, sc->H );
}
void
blake512_8way_init(void *cc)
{
blake64_8way_init(cc, IV512, salt_zero_big);
}
void
blake512_8way_update(void *cc, const void *data, size_t len)
{
blake64_8way(cc, data, len);
}
void
blake512_8way_close(void *cc, void *dst)
{
blake512_8way_addbits_and_close(cc, 0, 0, dst);
}
void
blake512_8way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
blake64_8way_close(cc, ub, n, dst, 8);
}
#endif // AVX512
// Blake-512 4 way
#define GB_4WAY(m0, m1, c0, c1, a, b, c, d) do { \
a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \
_mm256_set1_epi64x( c1 ), m0 ), b ), a ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 32 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 25 ); \
a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \
_mm256_set1_epi64x( c0 ), m1 ), b ), a ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 11 ); \
} while (0)
#define ROUND_B_4WAY(r) do { \
GB_4WAY(Mx(r, 0), Mx(r, 1), CBx(r, 0), CBx(r, 1), V0, V4, V8, VC); \
GB_4WAY(Mx(r, 2), Mx(r, 3), CBx(r, 2), CBx(r, 3), V1, V5, V9, VD); \
GB_4WAY(Mx(r, 4), Mx(r, 5), CBx(r, 4), CBx(r, 5), V2, V6, VA, VE); \
GB_4WAY(Mx(r, 6), Mx(r, 7), CBx(r, 6), CBx(r, 7), V3, V7, VB, VF); \
GB_4WAY(Mx(r, 8), Mx(r, 9), CBx(r, 8), CBx(r, 9), V0, V5, VA, VF); \
GB_4WAY(Mx(r, A), Mx(r, B), CBx(r, A), CBx(r, B), V1, V6, VB, VC); \
GB_4WAY(Mx(r, C), Mx(r, D), CBx(r, C), CBx(r, D), V2, V7, V8, VD); \
GB_4WAY(Mx(r, E), Mx(r, F), CBx(r, E), CBx(r, F), V3, V4, V9, VE); \
} while (0)
#define DECL_STATE64_4WAY \
__m256i H0, H1, H2, H3, H4, H5, H6, H7; \
__m256i S0, S1, S2, S3; \
sph_u64 T0, T1;
#define COMPRESS64_4WAY do \
{ \
@@ -493,7 +654,8 @@ static const sph_u64 CB[16] = {
m256_const1_64( CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \
m256_const1_64( CB7 ) ); \
shuf_bswap64 = m256_const2_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
shuf_bswap64 = m256_const_64( 0x18191a1b1c1d1e1f, 0x1011121314151617, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
M0 = _mm256_shuffle_epi8( *(buf+ 0), shuf_bswap64 ); \
M1 = _mm256_shuffle_epi8( *(buf+ 1), shuf_bswap64 ); \
M2 = _mm256_shuffle_epi8( *(buf+ 2), shuf_bswap64 ); \
@@ -536,9 +698,7 @@ static const sph_u64 CB[16] = {
H7 = mm256_xor4( VF, V7, S3, H7 ); \
} while (0)
#endif
static const sph_u64 salt_zero_big[4] = { 0, 0, 0, 0 };
//static const sph_u64 salt_zero_big[4] = { 0, 0, 0, 0 };
static void
blake64_4way_init( blake_4way_big_context *sc, const sph_u64 *iv,
@@ -583,7 +743,7 @@ blake64_4way( blake_4way_big_context *sc, const void *data, size_t len)
return;
}
READ_STATE64_4WAY(sc);
READ_STATE64(sc);
while ( len > 0 )
{
size_t clen;
@@ -603,7 +763,7 @@ blake64_4way( blake_4way_big_context *sc, const void *data, size_t len)
ptr = 0;
}
}
WRITE_STATE64_4WAY(sc);
WRITE_STATE64(sc);
sc->ptr = ptr;
}
@@ -674,7 +834,7 @@ blake512_4way_init(void *cc)
}
void
blake512_4way(void *cc, const void *data, size_t len)
blake512_4way_update(void *cc, const void *data, size_t len)
{
blake64_4way(cc, data, len);
}

View File

@@ -78,7 +78,7 @@ void bmw256_4way_addbits_and_close(
// BMW-256 8 way 32
typedef struct {
__m256i buf[64];
__m256i buf[16];
__m256i H[16];
size_t ptr;
uint32_t bit_count; // assume bit_count fits in 32 bits
@@ -107,7 +107,8 @@ typedef struct {
typedef bmw_2way_big_context bmw512_2way_context;
void bmw512_2way_init( bmw512_2way_context *ctx );
void bmw512_2way( bmw512_2way_context *ctx, const void *data, size_t len );
void bmw512_2way_update( bmw512_2way_context *ctx, const void *data,
size_t len );
void bmw512_2way_close( bmw512_2way_context *ctx, void *dst );
#endif // __SSE2__
@@ -121,14 +122,15 @@ typedef struct {
__m256i H[16];
size_t ptr;
sph_u64 bit_count;
} bmw_4way_big_context;
} bmw_4way_big_context __attribute__((aligned(128)));
typedef bmw_4way_big_context bmw512_4way_context;
void bmw512_4way_init(void *cc);
void bmw512_4way(void *cc, const void *data, size_t len);
void bmw512_4way_update(void *cc, const void *data, size_t len);
#define bmw512_4way bmw512_4way_update
void bmw512_4way_close(void *cc, void *dst);
@@ -137,6 +139,22 @@ void bmw512_4way_addbits_and_close(
#endif // __AVX2__
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
__m512i buf[16];
__m512i H[16];
size_t ptr;
uint64_t bit_count;
} bmw512_8way_context __attribute__((aligned(128)));
void bmw512_8way_init( bmw512_8way_context *ctx );
void bmw512_8way_update( bmw512_8way_context *ctx, const void *data,
size_t len );
void bmw512_8way_close( bmw512_8way_context *ctx, void *dst );
#endif // AVX512
#ifdef __cplusplus
}
#endif

View File

@@ -137,165 +137,151 @@ static const uint32_t IV256[] = {
ss4( qt[ (i)- 2 ] ), ss5( qt[ (i)- 1 ] ) ) ), \
add_elt_s( M, H, (i)-16 ) )
// Expressions are grouped using associativity to reduce CPU depenedencies,
// resulting in some sign changes compared to the reference code.
#define Ws0 \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 5], H[ 5] ), \
_mm_xor_si128( M[ 7], H[ 7] ) ), \
_mm_xor_si128( M[10], H[10] ) ), \
_mm_xor_si128( M[13], H[13] ) ), \
_mm_xor_si128( M[14], H[14] ) )
_mm_add_epi32( _mm_xor_si128( M[13], H[13] ), \
_mm_xor_si128( M[14], H[14] ) ) )
#define Ws1 \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 6], H[ 6] ), \
_mm_xor_si128( M[ 8], H[ 8] ) ), \
_mm_xor_si128( M[11], H[11] ) ), \
_mm_xor_si128( M[14], H[14] ) ), \
_mm_xor_si128( M[15], H[15] ) )
_mm_sub_epi32( _mm_xor_si128( M[14], H[14] ), \
_mm_xor_si128( M[15], H[15] ) ) )
#define Ws2 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_add_epi32( _mm_xor_si128( M[ 0], H[ 0] ), \
_mm_xor_si128( M[ 7], H[ 7] ) ), \
_mm_xor_si128( M[ 9], H[ 9] ) ), \
_mm_xor_si128( M[12], H[12] ) ), \
_mm_xor_si128( M[15], H[15] ) )
_mm_sub_epi32( _mm_xor_si128( M[12], H[12] ), \
_mm_xor_si128( M[15], H[15] ) ) )
#define Ws3 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 0], H[ 0] ), \
_mm_xor_si128( M[ 1], H[ 1] ) ), \
_mm_xor_si128( M[ 8], H[ 8] ) ), \
_mm_xor_si128( M[10], H[10] ) ), \
_mm_xor_si128( M[13], H[13] ) )
_mm_sub_epi32( _mm_xor_si128( M[10], H[10] ), \
_mm_xor_si128( M[13], H[13] ) ) )
#define Ws4 \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_add_epi32( _mm_xor_si128( M[ 1], H[ 1] ), \
_mm_xor_si128( M[ 2], H[ 2] ) ), \
_mm_xor_si128( M[ 9], H[ 9] ) ), \
_mm_xor_si128( M[11], H[11] ) ), \
_mm_xor_si128( M[14], H[14] ) )
_mm_add_epi32( _mm_xor_si128( M[11], H[11] ), \
_mm_xor_si128( M[14], H[14] ) ) )
#define Ws5 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 3], H[ 3] ), \
_mm_xor_si128( M[ 2], H[ 2] ) ), \
_mm_xor_si128( M[10], H[10] ) ), \
_mm_xor_si128( M[12], H[12] ) ), \
_mm_xor_si128( M[15], H[15] ) )
_mm_sub_epi32( _mm_xor_si128( M[12], H[12] ), \
_mm_xor_si128( M[15], H[15] ) ) )
#define Ws6 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 4], H[ 4] ), \
_mm_xor_si128( M[ 0], H[ 0] ) ), \
_mm_xor_si128( M[ 3], H[ 3] ) ), \
_mm_xor_si128( M[11], H[11] ) ), \
_mm_xor_si128( M[13], H[13] ) )
_mm_sub_epi32( _mm_xor_si128( M[11], H[11] ), \
_mm_xor_si128( M[13], H[13] ) ) )
#define Ws7 \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 1], H[ 1] ), \
_mm_xor_si128( M[ 4], H[ 4] ) ), \
_mm_xor_si128( M[ 5], H[ 5] ) ), \
_mm_xor_si128( M[12], H[12] ) ), \
_mm_xor_si128( M[14], H[14] ) )
_mm_add_epi32( _mm_xor_si128( M[12], H[12] ), \
_mm_xor_si128( M[14], H[14] ) ) )
#define Ws8 \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 2], H[ 2] ), \
_mm_xor_si128( M[ 5], H[ 5] ) ), \
_mm_xor_si128( M[ 6], H[ 6] ) ), \
_mm_xor_si128( M[13], H[13] ) ), \
_mm_xor_si128( M[15], H[15] ) )
_mm_sub_epi32( _mm_xor_si128( M[13], H[13] ), \
_mm_xor_si128( M[15], H[15] ) ) )
#define Ws9 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 0], H[ 0] ), \
_mm_xor_si128( M[ 3], H[ 3] ) ), \
_mm_xor_si128( M[ 6], H[ 6] ) ), \
_mm_xor_si128( M[ 7], H[ 7] ) ), \
_mm_xor_si128( M[14], H[14] ) )
_mm_sub_epi32( _mm_xor_si128( M[ 7], H[ 7] ), \
_mm_xor_si128( M[14], H[14] ) ) )
#define Ws10 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 8], H[ 8] ), \
_mm_xor_si128( M[ 1], H[ 1] ) ), \
_mm_xor_si128( M[ 4], H[ 4] ) ), \
_mm_xor_si128( M[ 7], H[ 7] ) ), \
_mm_xor_si128( M[15], H[15] ) )
_mm_sub_epi32( _mm_xor_si128( M[ 7], H[ 7] ), \
_mm_xor_si128( M[15], H[15] ) ) )
#define Ws11 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 8], H[ 8] ), \
_mm_xor_si128( M[ 0], H[ 0] ) ), \
_mm_xor_si128( M[ 2], H[ 2] ) ), \
_mm_xor_si128( M[ 5], H[ 5] ) ), \
_mm_xor_si128( M[ 9], H[ 9] ) )
_mm_sub_epi32( _mm_xor_si128( M[ 5], H[ 5] ), \
_mm_xor_si128( M[ 9], H[ 9] ) ) )
#define Ws12 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_add_epi32( _mm_xor_si128( M[ 1], H[ 1] ), \
_mm_xor_si128( M[ 3], H[ 3] ) ), \
_mm_xor_si128( M[ 6], H[ 6] ) ), \
_mm_xor_si128( M[ 9], H[ 9] ) ), \
_mm_xor_si128( M[10], H[10] ) )
_mm_sub_epi32( _mm_xor_si128( M[ 9], H[ 9] ), \
_mm_xor_si128( M[10], H[10] ) ) )
#define Ws13 \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( \
_mm_add_epi32( _mm_xor_si128( M[ 2], H[ 2] ), \
_mm_xor_si128( M[ 4], H[ 4] ) ), \
_mm_xor_si128( M[ 7], H[ 7] ) ), \
_mm_xor_si128( M[10], H[10] ) ), \
_mm_xor_si128( M[11], H[11] ) )
_mm_add_epi32( _mm_xor_si128( M[10], H[10] ), \
_mm_xor_si128( M[11], H[11] ) ) )
#define Ws14 \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_add_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[ 3], H[ 3] ), \
_mm_xor_si128( M[ 5], H[ 5] ) ), \
_mm_xor_si128( M[ 8], H[ 8] ) ), \
_mm_xor_si128( M[11], H[11] ) ), \
_mm_xor_si128( M[12], H[12] ) )
_mm_add_epi32( _mm_xor_si128( M[11], H[11] ), \
_mm_xor_si128( M[12], H[12] ) ) )
#define Ws15 \
_mm_add_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( \
_mm_sub_epi32( _mm_xor_si128( M[12], H[12] ), \
_mm_xor_si128( M[ 4], H[ 4] ) ), \
_mm_xor_si128( M[ 4], H[4] ) ), \
_mm_xor_si128( M[ 6], H[ 6] ) ), \
_mm_xor_si128( M[ 9], H[ 9] ) ), \
_mm_xor_si128( M[13], H[13] ) )
_mm_sub_epi32( _mm_xor_si128( M[ 9], H[ 9] ), \
_mm_xor_si128( M[13], H[13] ) ) )
void compress_small( const __m128i *M, const __m128i H[16], __m128i dH[16] )
@@ -699,164 +685,149 @@ bmw256_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
#define W8s0 \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 5], H[ 5] ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[13], H[13] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_add_epi32( _mm256_xor_si256( M[13], H[13] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define W8s1 \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 6], H[ 6] ), \
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[14], H[14] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[14], H[14] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define W8s2 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( _mm256_xor_si256( M[ 0], H[ 0] ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[12], H[12] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define W8s3 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 0], H[ 0] ), \
_mm256_xor_si256( M[ 1], H[ 1] ) ), \
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[13], H[13] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[10], H[10] ), \
_mm256_xor_si256( M[13], H[13] ) ) )
#define W8s4 \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( _mm256_xor_si256( M[ 1], H[ 1] ), \
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_add_epi32( _mm256_xor_si256( M[11], H[11] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define W8s5 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 3], H[ 3] ), \
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[12], H[12] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define W8s6 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 4], H[ 4] ), \
_mm256_xor_si256( M[ 0], H[ 0] ) ), \
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[13], H[13] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[11], H[11] ), \
_mm256_xor_si256( M[13], H[13] ) ) )
#define W8s7 \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 1], H[ 1] ), \
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[12], H[12] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_add_epi32( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define W8s8 \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 2], H[ 2] ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[13], H[13] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[13], H[13] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define W8s9 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 0], H[ 0] ), \
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[ 7], H[ 7] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define W8s10 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 8], H[ 8] ), \
_mm256_xor_si256( M[ 1], H[ 1] ) ), \
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[ 7], H[ 7] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define W8s11 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 8], H[ 8] ), \
_mm256_xor_si256( M[ 0], H[ 0] ) ), \
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[ 5], H[ 5] ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ) )
#define W8s12 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_add_epi32( _mm256_xor_si256( M[ 1], H[ 1] ), \
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[10], H[10] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[ 9], H[ 9] ), \
_mm256_xor_si256( M[10], H[10] ) ) )
#define W8s13 \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( \
_mm256_add_epi32( _mm256_xor_si256( M[ 2], H[ 2] ), \
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[11], H[11] ) )
_mm256_add_epi32( _mm256_xor_si256( M[10], H[10] ), \
_mm256_xor_si256( M[11], H[11] ) ) )
#define W8s14 \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_add_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[ 3], H[ 3] ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[12], H[12] ) )
_mm256_add_epi32( _mm256_xor_si256( M[11], H[11] ), \
_mm256_xor_si256( M[12], H[12] ) ) )
#define W8s15 \
_mm256_add_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( \
_mm256_sub_epi32( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[ 4], H[4] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[13], H[13] ) )
_mm256_sub_epi32( _mm256_xor_si256( M[ 9], H[ 9] ), \
_mm256_xor_si256( M[13], H[13] ) ) )
void compress_small_8way( const __m256i *M, const __m256i H[16],
__m256i dH[16] )

View File

@@ -1,13 +1,66 @@
#include "bmw512-gate.h"
#ifdef BMW512_4WAY
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
//#include "sph_keccak.h"
#include "bmw-hash-4way.h"
#if defined(BMW512_8WAY)
void bmw512hash_8way(void *state, const void *input)
{
bmw512_8way_context ctx;
bmw512_8way_init( &ctx );
bmw512_8way_update( &ctx, input, 80 );
bmw512_8way_close( &ctx, state );
}
int scanhash_bmw512_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[24*8] __attribute__ ((aligned (64)));
uint32_t hash[16*8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[49]); // 3*16+1
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m512i *noncev = (__m512i*)vdata + 9; // aligned
// const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do {
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0 ,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
bmw512hash_8way( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( ( ( hash7[ lane<<1 ] & 0xFFFFFF00 ) == 0 ) )
{
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 4;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(BMW512_4WAY)
//#ifdef BMW512_4WAY
void bmw512hash_4way(void *state, const void *input)
{
bmw512_4way_context ctx;

View File

@@ -2,9 +2,12 @@
bool register_bmw512_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->optimizations = AVX2_OPT | AVX512_OPT;
opt_target_factor = 256.0;
#if defined (BMW512_4WAY)
#if defined (BMW512_8WAY)
gate->scanhash = (void*)&scanhash_bmw512_8way;
gate->hash = (void*)&bmw512hash_8way;
#elif defined (BMW512_4WAY)
gate->scanhash = (void*)&scanhash_bmw512_4way;
gate->hash = (void*)&bmw512hash_4way;
#else

View File

@@ -1,23 +1,33 @@
#ifndef BMW512_GATE_H__
#define BMW512_GATE_H__
#define BMW512_GATE_H__ 1
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define BMW512_8WAY 1
#elif defined(__AVX2__)
#define BMW512_4WAY 1
#endif
#if defined(BMW512_4WAY)
#if defined(BMW512_8WAY)
void bmw512hash_8way( void *state, const void *input );
int scanhash_bmw512_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(BMW512_4WAY)
void bmw512hash_4way( void *state, const void *input );
int scanhash_bmw512_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#else
void bmw512hash( void *state, const void *input );
int scanhash_bmw512( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#endif

View File

@@ -556,18 +556,15 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst )
compress_big_2way( buf, h, h2 );
memcpy_128( buf, h2, 16 );
compress_big_2way( buf, final_b2, h1 );
memcpy( (__m128i*)dst, h1+16, 8 );
memcpy( (__m128i*)dst, h1+8, 8 );
}
#endif // __SSE2__
#if defined(__AVX2__)
// BMW-512 4 way 64
#define sb0(x) \
mm256_xor4( _mm256_srli_epi64( (x), 1), _mm256_slli_epi64( (x), 3), \
mm256_rol_64( (x), 4), mm256_rol_64( (x),37) )
@@ -636,165 +633,152 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst )
sb4( qt[ (i)- 2 ] ), sb5( qt[ (i)- 1 ] ) ) ), \
add_elt_b( M, H, (i)-16 ) )
#define Wb0 \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 5], H[ 5] ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[13], H[13] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_add_epi64( _mm256_xor_si256( M[13], H[13] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define Wb1 \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 6], H[ 6] ), \
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[14], H[14] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[14], H[14] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define Wb2 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( _mm256_xor_si256( M[ 0], H[ 0] ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[12], H[12] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define Wb3 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 0], H[ 0] ), \
_mm256_xor_si256( M[ 1], H[ 1] ) ), \
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[13], H[13] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[10], H[10] ), \
_mm256_xor_si256( M[13], H[13] ) ) )
#define Wb4 \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( _mm256_xor_si256( M[ 1], H[ 1] ), \
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_add_epi64( _mm256_xor_si256( M[11], H[11] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define Wb5 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 3], H[ 3] ), \
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[12], H[12] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define Wb6 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 4], H[ 4] ), \
_mm256_xor_si256( M[ 0], H[ 0] ) ), \
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[13], H[13] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[11], H[11] ), \
_mm256_xor_si256( M[13], H[13] ) ) )
#define Wb7 \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 1], H[ 1] ), \
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[12], H[12] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_add_epi64( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define Wb8 \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 2], H[ 2] ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[13], H[13] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[13], H[13] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define Wb9 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 0], H[ 0] ), \
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[14], H[14] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[ 7], H[ 7] ), \
_mm256_xor_si256( M[14], H[14] ) ) )
#define Wb10 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 8], H[ 8] ), \
_mm256_xor_si256( M[ 1], H[ 1] ) ), \
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[15], H[15] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[ 7], H[ 7] ), \
_mm256_xor_si256( M[15], H[15] ) ) )
#define Wb11 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 8], H[ 8] ), \
_mm256_xor_si256( M[ 0], H[ 0] ) ), \
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[ 5], H[ 5] ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ) )
#define Wb12 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_add_epi64( _mm256_xor_si256( M[ 1], H[ 1] ), \
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[10], H[10] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[ 9], H[ 9] ), \
_mm256_xor_si256( M[10], H[10] ) ) )
#define Wb13 \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( \
_mm256_add_epi64( _mm256_xor_si256( M[ 2], H[ 2] ), \
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
_mm256_xor_si256( M[10], H[10] ) ), \
_mm256_xor_si256( M[11], H[11] ) )
_mm256_add_epi64( _mm256_xor_si256( M[10], H[10] ), \
_mm256_xor_si256( M[11], H[11] ) ) )
#define Wb14 \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_add_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[ 3], H[ 3] ), \
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
_mm256_xor_si256( M[11], H[11] ) ), \
_mm256_xor_si256( M[12], H[12] ) )
_mm256_add_epi64( _mm256_xor_si256( M[11], H[11] ), \
_mm256_xor_si256( M[12], H[12] ) ) )
#define Wb15 \
_mm256_add_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( \
_mm256_sub_epi64( _mm256_xor_si256( M[12], H[12] ), \
_mm256_xor_si256( M[ 4], H[4] ) ), \
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
_mm256_xor_si256( M[13], H[13] ) )
_mm256_sub_epi64( _mm256_xor_si256( M[ 9], H[ 9] ), \
_mm256_xor_si256( M[13], H[13] ) ) )
void compress_big( const __m256i *M, const __m256i H[16], __m256i dH[16] )
{
@@ -1060,7 +1044,7 @@ bmw512_4way_init(void *cc)
}
void
bmw512_4way(void *cc, const void *data, size_t len)
bmw512_4way_update(void *cc, const void *data, size_t len)
{
bmw64_4way(cc, data, len);
}
@@ -1079,6 +1063,483 @@ bmw512_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
#endif // __AVX2__
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// BMW-512 8 WAY
#define s8b0(x) \
mm512_xor4( _mm512_srli_epi64( (x), 1), _mm512_slli_epi64( (x), 3), \
mm512_rol_64( (x), 4), mm512_rol_64( (x),37) )
#define s8b1(x) \
mm512_xor4( _mm512_srli_epi64( (x), 1), _mm512_slli_epi64( (x), 2), \
mm512_rol_64( (x),13), mm512_rol_64( (x),43) )
#define s8b2(x) \
mm512_xor4( _mm512_srli_epi64( (x), 2), _mm512_slli_epi64( (x), 1), \
mm512_rol_64( (x),19), mm512_rol_64( (x),53) )
#define s8b3(x) \
mm512_xor4( _mm512_srli_epi64( (x), 2), _mm512_slli_epi64( (x), 2), \
mm512_rol_64( (x),28), mm512_rol_64( (x),59) )
#define s8b4(x) \
_mm512_xor_si512( (x), _mm512_srli_epi64( (x), 1 ) )
#define s8b5(x) \
_mm512_xor_si512( (x), _mm512_srli_epi64( (x), 2 ) )
#define r8b1(x) mm512_rol_64( x, 5 )
#define r8b2(x) mm512_rol_64( x, 11 )
#define r8b3(x) mm512_rol_64( x, 27 )
#define r8b4(x) mm512_rol_64( x, 32 )
#define r8b5(x) mm512_rol_64( x, 37 )
#define r8b6(x) mm512_rol_64( x, 43 )
#define r8b7(x) mm512_rol_64( x, 53 )
#define rol8w_off_64( M, j, off ) \
mm512_rol_64( M[ ( (j) + (off) ) & 0xF ] , \
( ( (j) + (off) ) & 0xF ) + 1 )
#define add_elt_b8( M, H, j ) \
_mm512_xor_si512( \
_mm512_add_epi64( \
_mm512_sub_epi64( _mm512_add_epi64( rol8w_off_64( M, j, 0 ), \
rol8w_off_64( M, j, 3 ) ), \
rol8w_off_64( M, j, 10 ) ), \
_mm512_set1_epi64( ( (j) + 16 ) * 0x0555555555555555ULL ) ), \
H[ ( (j)+7 ) & 0xF ] )
#define expand1b8( qt, M, H, i ) \
_mm512_add_epi64( mm512_add4_64( \
mm512_add4_64( s8b1( qt[ (i)-16 ] ), s8b2( qt[ (i)-15 ] ), \
s8b3( qt[ (i)-14 ] ), s8b0( qt[ (i)-13 ] )), \
mm512_add4_64( s8b1( qt[ (i)-12 ] ), s8b2( qt[ (i)-11 ] ), \
s8b3( qt[ (i)-10 ] ), s8b0( qt[ (i)- 9 ] )), \
mm512_add4_64( s8b1( qt[ (i)- 8 ] ), s8b2( qt[ (i)- 7 ] ), \
s8b3( qt[ (i)- 6 ] ), s8b0( qt[ (i)- 5 ] )), \
mm512_add4_64( s8b1( qt[ (i)- 4 ] ), s8b2( qt[ (i)- 3 ] ), \
s8b3( qt[ (i)- 2 ] ), s8b0( qt[ (i)- 1 ] ) ) ), \
add_elt_b8( M, H, (i)-16 ) )
#define expand2b8( qt, M, H, i) \
_mm512_add_epi64( mm512_add4_64( \
mm512_add4_64( qt[ (i)-16 ], r8b1( qt[ (i)-15 ] ), \
qt[ (i)-14 ], r8b2( qt[ (i)-13 ] ) ), \
mm512_add4_64( qt[ (i)-12 ], r8b3( qt[ (i)-11 ] ), \
qt[ (i)-10 ], r8b4( qt[ (i)- 9 ] ) ), \
mm512_add4_64( qt[ (i)- 8 ], r8b5( qt[ (i)- 7 ] ), \
qt[ (i)- 6 ], r8b6( qt[ (i)- 5 ] ) ), \
mm512_add4_64( qt[ (i)- 4 ], r8b7( qt[ (i)- 3 ] ), \
s8b4( qt[ (i)- 2 ] ), s8b5( qt[ (i)- 1 ] ) ) ), \
add_elt_b8( M, H, (i)-16 ) )
#define W8b0 \
_mm512_add_epi64( \
_mm512_add_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 5], H[ 5] ), \
_mm512_xor_si512( M[ 7], H[ 7] ) ), \
_mm512_xor_si512( M[10], H[10] ) ), \
_mm512_add_epi64( _mm512_xor_si512( M[13], H[13] ), \
_mm512_xor_si512( M[14], H[14] ) ) )
#define W8b1 \
_mm512_add_epi64( \
_mm512_add_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 6], H[ 6] ), \
_mm512_xor_si512( M[ 8], H[ 8] ) ), \
_mm512_xor_si512( M[11], H[11] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[14], H[14] ), \
_mm512_xor_si512( M[15], H[15] ) ) )
#define W8b2 \
_mm512_sub_epi64( \
_mm512_add_epi64( \
_mm512_add_epi64( _mm512_xor_si512( M[ 0], H[ 0] ), \
_mm512_xor_si512( M[ 7], H[ 7] ) ), \
_mm512_xor_si512( M[ 9], H[ 9] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[12], H[12] ), \
_mm512_xor_si512( M[15], H[15] ) ) )
#define W8b3 \
_mm512_sub_epi64( \
_mm512_add_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 0], H[ 0] ), \
_mm512_xor_si512( M[ 1], H[ 1] ) ), \
_mm512_xor_si512( M[ 8], H[ 8] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[10], H[10] ), \
_mm512_xor_si512( M[13], H[13] ) ) )
#define W8b4 \
_mm512_sub_epi64( \
_mm512_add_epi64( \
_mm512_add_epi64( _mm512_xor_si512( M[ 1], H[ 1] ), \
_mm512_xor_si512( M[ 2], H[ 2] ) ), \
_mm512_xor_si512( M[ 9], H[ 9] ) ), \
_mm512_add_epi64( _mm512_xor_si512( M[11], H[11] ), \
_mm512_xor_si512( M[14], H[14] ) ) )
#define W8b5 \
_mm512_sub_epi64( \
_mm512_add_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 3], H[ 3] ), \
_mm512_xor_si512( M[ 2], H[ 2] ) ), \
_mm512_xor_si512( M[10], H[10] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[12], H[12] ), \
_mm512_xor_si512( M[15], H[15] ) ) )
#define W8b6 \
_mm512_sub_epi64( \
_mm512_sub_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 4], H[ 4] ), \
_mm512_xor_si512( M[ 0], H[ 0] ) ), \
_mm512_xor_si512( M[ 3], H[ 3] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[11], H[11] ), \
_mm512_xor_si512( M[13], H[13] ) ) )
#define W8b7 \
_mm512_sub_epi64( \
_mm512_sub_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 1], H[ 1] ), \
_mm512_xor_si512( M[ 4], H[ 4] ) ), \
_mm512_xor_si512( M[ 5], H[ 5] ) ), \
_mm512_add_epi64( _mm512_xor_si512( M[12], H[12] ), \
_mm512_xor_si512( M[14], H[14] ) ) )
#define W8b8 \
_mm512_add_epi64( \
_mm512_sub_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 2], H[ 2] ), \
_mm512_xor_si512( M[ 5], H[ 5] ) ), \
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[13], H[13] ), \
_mm512_xor_si512( M[15], H[15] ) ) )
#define W8b9 \
_mm512_sub_epi64( \
_mm512_add_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 0], H[ 0] ), \
_mm512_xor_si512( M[ 3], H[ 3] ) ), \
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[ 7], H[ 7] ), \
_mm512_xor_si512( M[14], H[14] ) ) )
#define W8b10 \
_mm512_sub_epi64( \
_mm512_sub_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 8], H[ 8] ), \
_mm512_xor_si512( M[ 1], H[ 1] ) ), \
_mm512_xor_si512( M[ 4], H[ 4] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[ 7], H[ 7] ), \
_mm512_xor_si512( M[15], H[15] ) ) )
#define W8b11 \
_mm512_sub_epi64( \
_mm512_sub_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 8], H[ 8] ), \
_mm512_xor_si512( M[ 0], H[ 0] ) ), \
_mm512_xor_si512( M[ 2], H[ 2] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[ 5], H[ 5] ), \
_mm512_xor_si512( M[ 9], H[ 9] ) ) )
#define W8b12 \
_mm512_sub_epi64( \
_mm512_sub_epi64( \
_mm512_add_epi64( _mm512_xor_si512( M[ 1], H[ 1] ), \
_mm512_xor_si512( M[ 3], H[ 3] ) ), \
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[ 9], H[ 9] ), \
_mm512_xor_si512( M[10], H[10] ) ) )
#define W8b13 \
_mm512_add_epi64( \
_mm512_add_epi64( \
_mm512_add_epi64( _mm512_xor_si512( M[ 2], H[ 2] ), \
_mm512_xor_si512( M[ 4], H[ 4] ) ), \
_mm512_xor_si512( M[ 7], H[ 7] ) ), \
_mm512_add_epi64( _mm512_xor_si512( M[10], H[10] ), \
_mm512_xor_si512( M[11], H[11] ) ) )
#define W8b14 \
_mm512_sub_epi64( \
_mm512_add_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[ 3], H[ 3] ), \
_mm512_xor_si512( M[ 5], H[ 5] ) ), \
_mm512_xor_si512( M[ 8], H[ 8] ) ), \
_mm512_add_epi64( _mm512_xor_si512( M[11], H[11] ), \
_mm512_xor_si512( M[12], H[12] ) ) )
#define W8b15 \
_mm512_sub_epi64( \
_mm512_sub_epi64( \
_mm512_sub_epi64( _mm512_xor_si512( M[12], H[12] ), \
_mm512_xor_si512( M[ 4], H[4] ) ), \
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
_mm512_sub_epi64( _mm512_xor_si512( M[ 9], H[ 9] ), \
_mm512_xor_si512( M[13], H[13] ) ) )
void compress_big_8way( const __m512i *M, const __m512i H[16],
__m512i dH[16] )
{
__m512i qt[32], xl, xh;
qt[ 0] = _mm512_add_epi64( s8b0( W8b0 ), H[ 1] );
qt[ 1] = _mm512_add_epi64( s8b1( W8b1 ), H[ 2] );
qt[ 2] = _mm512_add_epi64( s8b2( W8b2 ), H[ 3] );
qt[ 3] = _mm512_add_epi64( s8b3( W8b3 ), H[ 4] );
qt[ 4] = _mm512_add_epi64( s8b4( W8b4 ), H[ 5] );
qt[ 5] = _mm512_add_epi64( s8b0( W8b5 ), H[ 6] );
qt[ 6] = _mm512_add_epi64( s8b1( W8b6 ), H[ 7] );
qt[ 7] = _mm512_add_epi64( s8b2( W8b7 ), H[ 8] );
qt[ 8] = _mm512_add_epi64( s8b3( W8b8 ), H[ 9] );
qt[ 9] = _mm512_add_epi64( s8b4( W8b9 ), H[10] );
qt[10] = _mm512_add_epi64( s8b0( W8b10), H[11] );
qt[11] = _mm512_add_epi64( s8b1( W8b11), H[12] );
qt[12] = _mm512_add_epi64( s8b2( W8b12), H[13] );
qt[13] = _mm512_add_epi64( s8b3( W8b13), H[14] );
qt[14] = _mm512_add_epi64( s8b4( W8b14), H[15] );
qt[15] = _mm512_add_epi64( s8b0( W8b15), H[ 0] );
qt[16] = expand1b8( qt, M, H, 16 );
qt[17] = expand1b8( qt, M, H, 17 );
qt[18] = expand2b8( qt, M, H, 18 );
qt[19] = expand2b8( qt, M, H, 19 );
qt[20] = expand2b8( qt, M, H, 20 );
qt[21] = expand2b8( qt, M, H, 21 );
qt[22] = expand2b8( qt, M, H, 22 );
qt[23] = expand2b8( qt, M, H, 23 );
qt[24] = expand2b8( qt, M, H, 24 );
qt[25] = expand2b8( qt, M, H, 25 );
qt[26] = expand2b8( qt, M, H, 26 );
qt[27] = expand2b8( qt, M, H, 27 );
qt[28] = expand2b8( qt, M, H, 28 );
qt[29] = expand2b8( qt, M, H, 29 );
qt[30] = expand2b8( qt, M, H, 30 );
qt[31] = expand2b8( qt, M, H, 31 );
xl = _mm512_xor_si512(
mm512_xor4( qt[16], qt[17], qt[18], qt[19] ),
mm512_xor4( qt[20], qt[21], qt[22], qt[23] ) );
xh = _mm512_xor_si512( xl, _mm512_xor_si512(
mm512_xor4( qt[24], qt[25], qt[26], qt[27] ),
mm512_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
#define DH1L( m, sl, sr, a, b, c ) \
_mm512_add_epi64( \
_mm512_xor_si512( M[m], \
_mm512_xor_si512( _mm512_slli_epi64( xh, sl ), \
_mm512_srli_epi64( qt[a], sr ) ) ), \
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
#define DH1R( m, sl, sr, a, b, c ) \
_mm512_add_epi64( \
_mm512_xor_si512( M[m], \
_mm512_xor_si512( _mm512_srli_epi64( xh, sl ), \
_mm512_slli_epi64( qt[a], sr ) ) ), \
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
#define DH2L( m, rl, sl, h, a, b, c ) \
_mm512_add_epi64( _mm512_add_epi64( \
mm512_rol_64( dH[h], rl ), \
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
_mm512_xor_si512( _mm512_slli_epi64( xl, sl ), \
_mm512_xor_si512( qt[b], qt[c] ) ) );
#define DH2R( m, rl, sr, h, a, b, c ) \
_mm512_add_epi64( _mm512_add_epi64( \
mm512_rol_64( dH[h], rl ), \
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
_mm512_xor_si512( _mm512_srli_epi64( xl, sr ), \
_mm512_xor_si512( qt[b], qt[c] ) ) );
dH[ 0] = DH1L( 0, 5, 5, 16, 24, 0 );
dH[ 1] = DH1R( 1, 7, 8, 17, 25, 1 );
dH[ 2] = DH1R( 2, 5, 5, 18, 26, 2 );
dH[ 3] = DH1R( 3, 1, 5, 19, 27, 3 );
dH[ 4] = DH1R( 4, 3, 0, 20, 28, 4 );
dH[ 5] = DH1L( 5, 6, 6, 21, 29, 5 );
dH[ 6] = DH1R( 6, 4, 6, 22, 30, 6 );
dH[ 7] = DH1R( 7, 11, 2, 23, 31, 7 );
dH[ 8] = DH2L( 8, 9, 8, 4, 24, 23, 8 );
dH[ 9] = DH2R( 9, 10, 6, 5, 25, 16, 9 );
dH[10] = DH2L( 10, 11, 6, 6, 26, 17, 10 );
dH[11] = DH2L( 11, 12, 4, 7, 27, 18, 11 );
dH[12] = DH2R( 12, 13, 3, 0, 28, 19, 12 );
dH[13] = DH2R( 13, 14, 4, 1, 29, 20, 13 );
dH[14] = DH2R( 14, 15, 7, 2, 30, 21, 14 );
dH[15] = DH2R( 15, 16, 2, 3, 31, 22, 15 );
#undef DH1L
#undef DH1R
#undef DH2L
#undef DH2R
}
static const __m512i final_b8[16] =
{
{ 0xaaaaaaaaaaaaaaa0, 0xaaaaaaaaaaaaaaa0,
0xaaaaaaaaaaaaaaa0, 0xaaaaaaaaaaaaaaa0,
0xaaaaaaaaaaaaaaa0, 0xaaaaaaaaaaaaaaa0,
0xaaaaaaaaaaaaaaa0, 0xaaaaaaaaaaaaaaa0 },
{ 0xaaaaaaaaaaaaaaa1, 0xaaaaaaaaaaaaaaa1,
0xaaaaaaaaaaaaaaa1, 0xaaaaaaaaaaaaaaa1,
0xaaaaaaaaaaaaaaa1, 0xaaaaaaaaaaaaaaa1,
0xaaaaaaaaaaaaaaa1, 0xaaaaaaaaaaaaaaa1 },
{ 0xaaaaaaaaaaaaaaa2, 0xaaaaaaaaaaaaaaa2,
0xaaaaaaaaaaaaaaa2, 0xaaaaaaaaaaaaaaa2,
0xaaaaaaaaaaaaaaa2, 0xaaaaaaaaaaaaaaa2,
0xaaaaaaaaaaaaaaa2, 0xaaaaaaaaaaaaaaa2 },
{ 0xaaaaaaaaaaaaaaa3, 0xaaaaaaaaaaaaaaa3,
0xaaaaaaaaaaaaaaa3, 0xaaaaaaaaaaaaaaa3,
0xaaaaaaaaaaaaaaa3, 0xaaaaaaaaaaaaaaa3,
0xaaaaaaaaaaaaaaa3, 0xaaaaaaaaaaaaaaa3 },
{ 0xaaaaaaaaaaaaaaa4, 0xaaaaaaaaaaaaaaa4,
0xaaaaaaaaaaaaaaa4, 0xaaaaaaaaaaaaaaa4,
0xaaaaaaaaaaaaaaa4, 0xaaaaaaaaaaaaaaa4,
0xaaaaaaaaaaaaaaa4, 0xaaaaaaaaaaaaaaa4 },
{ 0xaaaaaaaaaaaaaaa5, 0xaaaaaaaaaaaaaaa5,
0xaaaaaaaaaaaaaaa5, 0xaaaaaaaaaaaaaaa5,
0xaaaaaaaaaaaaaaa5, 0xaaaaaaaaaaaaaaa5,
0xaaaaaaaaaaaaaaa5, 0xaaaaaaaaaaaaaaa5 },
{ 0xaaaaaaaaaaaaaaa6, 0xaaaaaaaaaaaaaaa6,
0xaaaaaaaaaaaaaaa6, 0xaaaaaaaaaaaaaaa6,
0xaaaaaaaaaaaaaaa6, 0xaaaaaaaaaaaaaaa6,
0xaaaaaaaaaaaaaaa6, 0xaaaaaaaaaaaaaaa6 },
{ 0xaaaaaaaaaaaaaaa7, 0xaaaaaaaaaaaaaaa7,
0xaaaaaaaaaaaaaaa7, 0xaaaaaaaaaaaaaaa7,
0xaaaaaaaaaaaaaaa7, 0xaaaaaaaaaaaaaaa7,
0xaaaaaaaaaaaaaaa7, 0xaaaaaaaaaaaaaaa7 },
{ 0xaaaaaaaaaaaaaaa8, 0xaaaaaaaaaaaaaaa8,
0xaaaaaaaaaaaaaaa8, 0xaaaaaaaaaaaaaaa8,
0xaaaaaaaaaaaaaaa8, 0xaaaaaaaaaaaaaaa8,
0xaaaaaaaaaaaaaaa8, 0xaaaaaaaaaaaaaaa8 },
{ 0xaaaaaaaaaaaaaaa9, 0xaaaaaaaaaaaaaaa9,
0xaaaaaaaaaaaaaaa9, 0xaaaaaaaaaaaaaaa9,
0xaaaaaaaaaaaaaaa9, 0xaaaaaaaaaaaaaaa9,
0xaaaaaaaaaaaaaaa9, 0xaaaaaaaaaaaaaaa9 },
{ 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa,
0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa,
0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa,
0xaaaaaaaaaaaaaaaa, 0xaaaaaaaaaaaaaaaa },
{ 0xaaaaaaaaaaaaaaab, 0xaaaaaaaaaaaaaaab,
0xaaaaaaaaaaaaaaab, 0xaaaaaaaaaaaaaaab,
0xaaaaaaaaaaaaaaab, 0xaaaaaaaaaaaaaaab,
0xaaaaaaaaaaaaaaab, 0xaaaaaaaaaaaaaaab },
{ 0xaaaaaaaaaaaaaaac, 0xaaaaaaaaaaaaaaac,
0xaaaaaaaaaaaaaaac, 0xaaaaaaaaaaaaaaac,
0xaaaaaaaaaaaaaaac, 0xaaaaaaaaaaaaaaac,
0xaaaaaaaaaaaaaaac, 0xaaaaaaaaaaaaaaac },
{ 0xaaaaaaaaaaaaaaad, 0xaaaaaaaaaaaaaaad,
0xaaaaaaaaaaaaaaad, 0xaaaaaaaaaaaaaaad,
0xaaaaaaaaaaaaaaad, 0xaaaaaaaaaaaaaaad,
0xaaaaaaaaaaaaaaad, 0xaaaaaaaaaaaaaaad },
{ 0xaaaaaaaaaaaaaaae, 0xaaaaaaaaaaaaaaae,
0xaaaaaaaaaaaaaaae, 0xaaaaaaaaaaaaaaae,
0xaaaaaaaaaaaaaaae, 0xaaaaaaaaaaaaaaae,
0xaaaaaaaaaaaaaaae, 0xaaaaaaaaaaaaaaae },
{ 0xaaaaaaaaaaaaaaaf, 0xaaaaaaaaaaaaaaaf,
0xaaaaaaaaaaaaaaaf, 0xaaaaaaaaaaaaaaaf,
0xaaaaaaaaaaaaaaaf, 0xaaaaaaaaaaaaaaaf,
0xaaaaaaaaaaaaaaaf, 0xaaaaaaaaaaaaaaaf }
};
void bmw512_8way_init( bmw512_8way_context *ctx )
//bmw64_4way_init( bmw_4way_big_context *sc, const sph_u64 *iv )
{
ctx->H[ 0] = m512_const1_64( 0x8081828384858687 );
ctx->H[ 1] = m512_const1_64( 0x88898A8B8C8D8E8F );
ctx->H[ 2] = m512_const1_64( 0x9091929394959697 );
ctx->H[ 3] = m512_const1_64( 0x98999A9B9C9D9E9F );
ctx->H[ 4] = m512_const1_64( 0xA0A1A2A3A4A5A6A7 );
ctx->H[ 5] = m512_const1_64( 0xA8A9AAABACADAEAF );
ctx->H[ 6] = m512_const1_64( 0xB0B1B2B3B4B5B6B7 );
ctx->H[ 7] = m512_const1_64( 0xB8B9BABBBCBDBEBF );
ctx->H[ 8] = m512_const1_64( 0xC0C1C2C3C4C5C6C7 );
ctx->H[ 9] = m512_const1_64( 0xC8C9CACBCCCDCECF );
ctx->H[10] = m512_const1_64( 0xD0D1D2D3D4D5D6D7 );
ctx->H[11] = m512_const1_64( 0xD8D9DADBDCDDDEDF );
ctx->H[12] = m512_const1_64( 0xE0E1E2E3E4E5E6E7 );
ctx->H[13] = m512_const1_64( 0xE8E9EAEBECEDEEEF );
ctx->H[14] = m512_const1_64( 0xF0F1F2F3F4F5F6F7 );
ctx->H[15] = m512_const1_64( 0xF8F9FAFBFCFDFEFF );
ctx->ptr = 0;
ctx->bit_count = 0;
}
void bmw512_8way_update( bmw512_8way_context *ctx, const void *data,
size_t len )
{
__m512i *vdata = (__m512i*)data;
__m512i *buf;
__m512i htmp[16];
__m512i *h1, *h2;
size_t ptr;
const int buf_size = 128; // bytes of one lane, compatible with len
ctx->bit_count += len << 3;
buf = ctx->buf;
ptr = ctx->ptr;
h1 = ctx->H;
h2 = htmp;
while ( len > 0 )
{
size_t clen;
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_512( buf + (ptr>>3), vdata, clen >> 3 );
vdata = vdata + (clen>>3);
len -= clen;
ptr += clen;
if ( ptr == buf_size )
{
__m512i *ht;
compress_big_8way( buf, h1, h2 );
ht = h1;
h1 = h2;
h2 = ht;
ptr = 0;
}
}
ctx->ptr = ptr;
if ( h1 != ctx->H )
memcpy_512( ctx->H, h1, 16 );
}
void bmw512_8way_close( bmw512_8way_context *ctx, void *dst )
{
__m512i *buf;
__m512i h1[16], h2[16], *h;
size_t ptr, u, v;
const int buf_size = 128; // bytes of one lane, compatible with len
buf = ctx->buf;
ptr = ctx->ptr;
buf[ ptr>>3 ] = m512_const1_64( 0x80 );
ptr += 8;
h = ctx->H;
if ( ptr > (buf_size - 8) )
{
memset_zero_512( buf + (ptr>>3), (buf_size - ptr) >> 3 );
compress_big_8way( buf, h, h1 );
ptr = 0;
h = h1;
}
memset_zero_512( buf + (ptr>>3), (buf_size - 8 - ptr) >> 3 );
buf[ (buf_size - 8) >> 3 ] = _mm512_set1_epi64( ctx->bit_count );
compress_big_8way( buf, h, h2 );
for ( u = 0; u < 16; u ++ )
buf[ u ] = h2[ u ];
compress_big_8way( buf, final_b8, h1 );
for (u = 0, v = 8; u < 8; u ++, v ++)
casti_m512i( dst, u ) = h1[ v ];
}
#endif // AVX512
#ifdef __cplusplus
}
#endif

View File

@@ -26,6 +26,180 @@ static const uint64_t IV512[] =
0xA5A70E75D65C8A2B, 0xBC796576B1C62456, 0xE7989AF11921C8F7, 0xD43E3B447795D246
};
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
static void transform_4way( cube_4way_context *sp )
{
int r;
const int rounds = sp->rounds;
__m512i x0, x1, x2, x3, x4, x5, x6, x7, y0, y1;
x0 = _mm512_load_si512( (__m512i*)sp->h );
x1 = _mm512_load_si512( (__m512i*)sp->h + 1 );
x2 = _mm512_load_si512( (__m512i*)sp->h + 2 );
x3 = _mm512_load_si512( (__m512i*)sp->h + 3 );
x4 = _mm512_load_si512( (__m512i*)sp->h + 4 );
x5 = _mm512_load_si512( (__m512i*)sp->h + 5 );
x6 = _mm512_load_si512( (__m512i*)sp->h + 6 );
x7 = _mm512_load_si512( (__m512i*)sp->h + 7 );
for ( r = 0; r < rounds; ++r )
{
x4 = _mm512_add_epi32( x0, x4 );
x5 = _mm512_add_epi32( x1, x5 );
x6 = _mm512_add_epi32( x2, x6 );
x7 = _mm512_add_epi32( x3, x7 );
y0 = x0;
y1 = x1;
x0 = mm512_rol_32( x2, 7 );
x1 = mm512_rol_32( x3, 7 );
x2 = mm512_rol_32( y0, 7 );
x3 = mm512_rol_32( y1, 7 );
x0 = _mm512_xor_si512( x0, x4 );
x1 = _mm512_xor_si512( x1, x5 );
x2 = _mm512_xor_si512( x2, x6 );
x3 = _mm512_xor_si512( x3, x7 );
x4 = mm512_swap64_128( x4 );
x5 = mm512_swap64_128( x5 );
x6 = mm512_swap64_128( x6 );
x7 = mm512_swap64_128( x7 );
x4 = _mm512_add_epi32( x0, x4 );
x5 = _mm512_add_epi32( x1, x5 );
x6 = _mm512_add_epi32( x2, x6 );
x7 = _mm512_add_epi32( x3, x7 );
y0 = x0;
y1 = x2;
x0 = mm512_rol_32( x1, 11 );
x1 = mm512_rol_32( y0, 11 );
x2 = mm512_rol_32( x3, 11 );
x3 = mm512_rol_32( y1, 11 );
x0 = _mm512_xor_si512( x0, x4 );
x1 = _mm512_xor_si512( x1, x5 );
x2 = _mm512_xor_si512( x2, x6 );
x3 = _mm512_xor_si512( x3, x7 );
x4 = mm512_swap32_64( x4 );
x5 = mm512_swap32_64( x5 );
x6 = mm512_swap32_64( x6 );
x7 = mm512_swap32_64( x7 );
}
_mm512_store_si512( (__m512i*)sp->h, x0 );
_mm512_store_si512( (__m512i*)sp->h + 1, x1 );
_mm512_store_si512( (__m512i*)sp->h + 2, x2 );
_mm512_store_si512( (__m512i*)sp->h + 3, x3 );
_mm512_store_si512( (__m512i*)sp->h + 4, x4 );
_mm512_store_si512( (__m512i*)sp->h + 5, x5 );
_mm512_store_si512( (__m512i*)sp->h + 6, x6 );
_mm512_store_si512( (__m512i*)sp->h + 7, x7 );
}
int cube_4way_init( cube_4way_context *sp, int hashbitlen, int rounds,
int blockbytes )
{
__m512i *h = (__m512i*)sp->h;
__m128i *iv = (__m128i*)( hashbitlen == 512 ? (__m128i*)IV512
: (__m128i*)IV256 );
sp->hashlen = hashbitlen/128;
sp->blocksize = blockbytes/16;
sp->rounds = rounds;
sp->pos = 0;
h[ 0] = m512_const1_128( iv[0] );
h[ 1] = m512_const1_128( iv[1] );
h[ 2] = m512_const1_128( iv[2] );
h[ 3] = m512_const1_128( iv[3] );
h[ 4] = m512_const1_128( iv[4] );
h[ 5] = m512_const1_128( iv[5] );
h[ 6] = m512_const1_128( iv[6] );
h[ 7] = m512_const1_128( iv[7] );
h[ 0] = m512_const1_128( iv[0] );
h[ 1] = m512_const1_128( iv[1] );
h[ 2] = m512_const1_128( iv[2] );
h[ 3] = m512_const1_128( iv[3] );
h[ 4] = m512_const1_128( iv[4] );
h[ 5] = m512_const1_128( iv[5] );
h[ 6] = m512_const1_128( iv[6] );
h[ 7] = m512_const1_128( iv[7] );
return 0;
}
int cube_4way_update( cube_4way_context *sp, const void *data, size_t size )
{
const int len = size >> 4;
const __m512i *in = (__m512i*)data;
int i;
for ( i = 0; i < len; i++ )
{
sp->h[ sp->pos ] = _mm512_xor_si512( sp->h[ sp->pos ], in[i] );
sp->pos++;
if ( sp->pos == sp->blocksize )
{
transform_4way( sp );
sp->pos = 0;
}
}
return 0;
}
int cube_4way_close( cube_4way_context *sp, void *output )
{
__m512i *hash = (__m512i*)output;
int i;
// pos is zero for 64 byte data, 1 for 80 byte data.
sp->h[ sp->pos ] = _mm512_xor_si512( sp->h[ sp->pos ],
m512_const2_64( 0, 0x0000000000000080 ) );
transform_4way( sp );
sp->h[7] = _mm512_xor_si512( sp->h[7],
m512_const2_64( 0x0000000100000000, 0 ) );
for ( i = 0; i < 10; ++i )
transform_4way( sp );
memcpy( hash, sp->h, sp->hashlen<<6 );
return 0;
}
int cube_4way_update_close( cube_4way_context *sp, void *output,
const void *data, size_t size )
{
const int len = size >> 4;
const __m512i *in = (__m512i*)data;
__m512i *hash = (__m512i*)output;
int i;
for ( i = 0; i < len; i++ )
{
sp->h[ sp->pos ] = _mm512_xor_si512( sp->h[ sp->pos ], in[i] );
sp->pos++;
if ( sp->pos == sp->blocksize )
{
transform_4way( sp );
sp->pos = 0;
}
}
// pos is zero for 64 byte data, 1 for 80 byte data.
sp->h[ sp->pos ] = _mm512_xor_si512( sp->h[ sp->pos ],
m512_const2_64( 0, 0x0000000000000080 ) );
transform_4way( sp );
sp->h[7] = _mm512_xor_si512( sp->h[7],
m512_const2_64( 0x0000000100000000, 0 ) );
for ( i = 0; i < 10; ++i )
transform_4way( sp );
memcpy( hash, sp->h, sp->hashlen<<6);
return 0;
}
#endif // AVX512
static void transform_2way( cube_2way_context *sp )
{
@@ -91,7 +265,6 @@ static void transform_2way( cube_2way_context *sp )
_mm256_store_si256( (__m256i*)sp->h + 5, x5 );
_mm256_store_si256( (__m256i*)sp->h + 6, x6 );
_mm256_store_si256( (__m256i*)sp->h + 7, x7 );
}
int cube_2way_init( cube_2way_context *sp, int hashbitlen, int rounds,
@@ -132,9 +305,6 @@ int cube_2way_update( cube_2way_context *sp, const void *data, size_t size )
const __m256i *in = (__m256i*)data;
int i;
// It is assumed data is aligned to 256 bits and is a multiple of 128 bits.
// Current usage sata is either 64 or 80 bytes.
for ( i = 0; i < len; i++ )
{
sp->h[ sp->pos ] = _mm256_xor_si256( sp->h[ sp->pos ], in[i] );

View File

@@ -0,0 +1,203 @@
#if defined(__AVX2__)
#include <stdbool.h>
#include <unistd.h>
#include <memory.h>
#include "cube-hash-2way.h"
// 2x128
// The result of hashing 10 rounds of initial data which consists of params
// zero padded.
static const uint64_t IV256[] =
{
0xCCD6F29FEA2BD4B4, 0x35481EAE63117E71, 0xE5D94E6322512D5B, 0xF4CC12BE7E624131,
0x42AF2070C2D0B696, 0x3361DA8CD0720C35, 0x8EF8AD8328CCECA4, 0x40E5FBAB4680AC00,
0x6107FBD5D89041C3, 0xF0B266796C859D41, 0x5FA2560309392549, 0x93CB628565C892FD,
0x9E4B4E602AF2B5AE, 0x85254725774ABFDD, 0x4AB6AAD615815AEB, 0xD6032C0A9CDAF8AF
};
static const uint64_t IV512[] =
{
0x50F494D42AEA2A61, 0x4167D83E2D538B8B, 0xC701CF8C3FEE2313, 0x50AC5695CC39968E,
0xA647A8B34D42C787, 0x825B453797CF0BEF, 0xF22090C4EEF864D2, 0xA23911AED0E5CD33,
0x148FE485FCD398D9, 0xB64445321B017BEF, 0x2FF5781C6A536159, 0x0DBADEA991FA7934,
0xA5A70E75D65C8A2B, 0xBC796576B1C62456, 0xE7989AF11921C8F7, 0xD43E3B447795D246
};
static void transform_2way( cube_2way_context *sp )
{
int r;
const int rounds = sp->rounds;
__m256i x0, x1, x2, x3, x4, x5, x6, x7, y0, y1;
x0 = _mm256_load_si256( (__m256i*)sp->h );
x1 = _mm256_load_si256( (__m256i*)sp->h + 1 );
x2 = _mm256_load_si256( (__m256i*)sp->h + 2 );
x3 = _mm256_load_si256( (__m256i*)sp->h + 3 );
x4 = _mm256_load_si256( (__m256i*)sp->h + 4 );
x5 = _mm256_load_si256( (__m256i*)sp->h + 5 );
x6 = _mm256_load_si256( (__m256i*)sp->h + 6 );
x7 = _mm256_load_si256( (__m256i*)sp->h + 7 );
for ( r = 0; r < rounds; ++r )
{
x4 = _mm256_add_epi32( x0, x4 );
x5 = _mm256_add_epi32( x1, x5 );
x6 = _mm256_add_epi32( x2, x6 );
x7 = _mm256_add_epi32( x3, x7 );
y0 = x0;
y1 = x1;
x0 = mm256_rol_32( x2, 7 );
x1 = mm256_rol_32( x3, 7 );
x2 = mm256_rol_32( y0, 7 );
x3 = mm256_rol_32( y1, 7 );
x0 = _mm256_xor_si256( x0, x4 );
x1 = _mm256_xor_si256( x1, x5 );
x2 = _mm256_xor_si256( x2, x6 );
x3 = _mm256_xor_si256( x3, x7 );
x4 = mm256_swap64_128( x4 );
x5 = mm256_swap64_128( x5 );
x6 = mm256_swap64_128( x6 );
x7 = mm256_swap64_128( x7 );
x4 = _mm256_add_epi32( x0, x4 );
x5 = _mm256_add_epi32( x1, x5 );
x6 = _mm256_add_epi32( x2, x6 );
x7 = _mm256_add_epi32( x3, x7 );
y0 = x0;
y1 = x2;
x0 = mm256_rol_32( x1, 11 );
x1 = mm256_rol_32( y0, 11 );
x2 = mm256_rol_32( x3, 11 );
x3 = mm256_rol_32( y1, 11 );
x0 = _mm256_xor_si256( x0, x4 );
x1 = _mm256_xor_si256( x1, x5 );
x2 = _mm256_xor_si256( x2, x6 );
x3 = _mm256_xor_si256( x3, x7 );
x4 = mm256_swap32_64( x4 );
x5 = mm256_swap32_64( x5 );
x6 = mm256_swap32_64( x6 );
x7 = mm256_swap32_64( x7 );
}
_mm256_store_si256( (__m256i*)sp->h, x0 );
_mm256_store_si256( (__m256i*)sp->h + 1, x1 );
_mm256_store_si256( (__m256i*)sp->h + 2, x2 );
_mm256_store_si256( (__m256i*)sp->h + 3, x3 );
_mm256_store_si256( (__m256i*)sp->h + 4, x4 );
_mm256_store_si256( (__m256i*)sp->h + 5, x5 );
_mm256_store_si256( (__m256i*)sp->h + 6, x6 );
_mm256_store_si256( (__m256i*)sp->h + 7, x7 );
}
int cube_2way_init( cube_2way_context *sp, int hashbitlen, int rounds,
int blockbytes )
{
__m256i *h = (__m256i*)sp->h;
__m128i *iv = (__m128i*)( hashbitlen == 512 ? (__m128i*)IV512
: (__m128i*)IV256 );
sp->hashlen = hashbitlen/128;
sp->blocksize = blockbytes/16;
sp->rounds = rounds;
sp->pos = 0;
h[ 0] = m256_const1_128( iv[0] );
h[ 1] = m256_const1_128( iv[1] );
h[ 2] = m256_const1_128( iv[2] );
h[ 3] = m256_const1_128( iv[3] );
h[ 4] = m256_const1_128( iv[4] );
h[ 5] = m256_const1_128( iv[5] );
h[ 6] = m256_const1_128( iv[6] );
h[ 7] = m256_const1_128( iv[7] );
h[ 0] = m256_const1_128( iv[0] );
h[ 1] = m256_const1_128( iv[1] );
h[ 2] = m256_const1_128( iv[2] );
h[ 3] = m256_const1_128( iv[3] );
h[ 4] = m256_const1_128( iv[4] );
h[ 5] = m256_const1_128( iv[5] );
h[ 6] = m256_const1_128( iv[6] );
h[ 7] = m256_const1_128( iv[7] );
return 0;
}
int cube_2way_update( cube_2way_context *sp, const void *data, size_t size )
{
const int len = size >> 4;
const __m256i *in = (__m256i*)data;
int i;
// It is assumed data is aligned to 256 bits and is a multiple of 128 bits.
// Current usage sata is either 64 or 80 bytes.
for ( i = 0; i < len; i++ )
{
sp->h[ sp->pos ] = _mm256_xor_si256( sp->h[ sp->pos ], in[i] );
sp->pos++;
if ( sp->pos == sp->blocksize )
{
transform_2way( sp );
sp->pos = 0;
}
}
return 0;
}
int cube_2way_close( cube_2way_context *sp, void *output )
{
__m256i *hash = (__m256i*)output;
int i;
// pos is zero for 64 byte data, 1 for 80 byte data.
sp->h[ sp->pos ] = _mm256_xor_si256( sp->h[ sp->pos ],
m256_const2_64( 0, 0x0000000000000080 ) );
transform_2way( sp );
sp->h[7] = _mm256_xor_si256( sp->h[7],
m256_const2_64( 0x0000000100000000, 0 ) );
for ( i = 0; i < 10; ++i ) transform_2way( sp );
memcpy( hash, sp->h, sp->hashlen<<5 );
return 0;
}
int cube_2way_update_close( cube_2way_context *sp, void *output,
const void *data, size_t size )
{
const int len = size >> 4;
const __m256i *in = (__m256i*)data;
__m256i *hash = (__m256i*)output;
int i;
for ( i = 0; i < len; i++ )
{
sp->h[ sp->pos ] = _mm256_xor_si256( sp->h[ sp->pos ], in[i] );
sp->pos++;
if ( sp->pos == sp->blocksize )
{
transform_2way( sp );
sp->pos = 0;
}
}
// pos is zero for 64 byte data, 1 for 80 byte data.
sp->h[ sp->pos ] = _mm256_xor_si256( sp->h[ sp->pos ],
m256_const2_64( 0, 0x0000000000000080 ) );
transform_2way( sp );
sp->h[7] = _mm256_xor_si256( sp->h[7],
m256_const2_64( 0x0000000100000000, 0 ) );
for ( i = 0; i < 10; ++i ) transform_2way( sp );
memcpy( hash, sp->h, sp->hashlen<<5 );
return 0;
}
#endif

View File

@@ -1,11 +1,38 @@
#ifndef CUBE_HASH_2WAY_H__
#define CUBE_HASH_2WAY_H__
#if defined(__AVX2__)
#define CUBE_HASH_2WAY_H__ 1
#include <stdint.h>
#include "simd-utils.h"
#if defined(__AVX2__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
struct _cube_4way_context
{
__m512i h[8];
int hashlen;
int rounds;
int blocksize;
int pos;
} __attribute__ ((aligned (128)));
typedef struct _cube_4way_context cube_4way_context;
int cube_4way_init( cube_4way_context* sp, int hashbitlen, int rounds,
int blockbytes );
// reinitialize context with same parameters, much faster.
int cube_4way_reinit( cube_4way_context *sp );
int cube_4way_update( cube_4way_context *sp, const void *data, size_t size );
int cube_4way_close( cube_4way_context *sp, void *output );
int cube_4way_update_close( cube_4way_context *sp, void *output,
const void *data, size_t size );
#endif
// 2x128, 2 way parallel SSE2
struct _cube_2way_context
@@ -15,7 +42,7 @@ struct _cube_2way_context
int rounds;
int blocksize; // __m128i
int pos; // number of __m128i read into x from current block
} __attribute__ ((aligned (64)));
} __attribute__ ((aligned (128)));
typedef struct _cube_2way_context cube_2way_context;

View File

@@ -0,0 +1,36 @@
#ifndef CUBE_HASH_2WAY_H__
#define CUBE_HASH_2WAY_H__
#if defined(__AVX2__)
#include <stdint.h>
#include "simd-utils.h"
// 2x128, 2 way parallel SSE2
struct _cube_2way_context
{
__m256i h[8];
int hashlen; // __m128i
int rounds;
int blocksize; // __m128i
int pos; // number of __m128i read into x from current block
} __attribute__ ((aligned (64)));
typedef struct _cube_2way_context cube_2way_context;
int cube_2way_init( cube_2way_context* sp, int hashbitlen, int rounds,
int blockbytes );
// reinitialize context with same parameters, much faster.
int cube_2way_reinit( cube_2way_context *sp );
int cube_2way_update( cube_2way_context *sp, const void *data, size_t size );
int cube_2way_close( cube_2way_context *sp, void *output );
int cube_2way_update_close( cube_2way_context *sp, void *output,
const void *data, size_t size );
#endif
#endif

View File

@@ -92,6 +92,38 @@ extern "C"{
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define Sb_8W(x0, x1, x2, x3, c) \
do { \
__m512i cc = _mm512_set1_epi64( c ); \
x3 = mm512_not( x3 ); \
x0 = _mm512_xor_si512( x0, _mm512_andnot_si512( x2, cc ) ); \
tmp = _mm512_xor_si512( cc, _mm512_and_si512( x0, x1 ) ); \
x0 = _mm512_xor_si512( x0, _mm512_and_si512( x2, x3 ) ); \
x3 = _mm512_xor_si512( x3, _mm512_andnot_si512( x1, x2 ) ); \
x1 = _mm512_xor_si512( x1, _mm512_and_si512( x0, x2 ) ); \
x2 = _mm512_xor_si512( x2, _mm512_andnot_si512( x3, x0 ) ); \
x0 = _mm512_xor_si512( x0, _mm512_or_si512( x1, x3 ) ); \
x3 = _mm512_xor_si512( x3, _mm512_and_si512( x1, x2 ) ); \
x1 = _mm512_xor_si512( x1, _mm512_and_si512( tmp, x0 ) ); \
x2 = _mm512_xor_si512( x2, tmp ); \
} while (0)
#define Lb_8W(x0, x1, x2, x3, x4, x5, x6, x7) \
do { \
x4 = _mm512_xor_si512( x4, x1 ); \
x5 = _mm512_xor_si512( x5, x2 ); \
x6 = _mm512_xor_si512( x6, _mm512_xor_si512( x3, x0 ) ); \
x7 = _mm512_xor_si512( x7, x0 ); \
x0 = _mm512_xor_si512( x0, x5 ); \
x1 = _mm512_xor_si512( x1, x6 ); \
x2 = _mm512_xor_si512( x2, _mm512_xor_si512( x7, x4 ) ); \
x3 = _mm512_xor_si512( x3, x4 ); \
} while (0)
#endif
#define Sb(x0, x1, x2, x3, c) \
do { \
__m256i cc = _mm256_set1_epi64x( c ); \
@@ -226,6 +258,48 @@ static const sph_u64 C[] = {
x4 ## l, x5 ## l, x6 ## l, x7 ## l); \
} while (0)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define S_8W(x0, x1, x2, x3, cb, r) do { \
Sb_8W(x0 ## h, x1 ## h, x2 ## h, x3 ## h, cb ## hi(r)); \
Sb_8W(x0 ## l, x1 ## l, x2 ## l, x3 ## l, cb ## lo(r)); \
} while (0)
#define L_8W(x0, x1, x2, x3, x4, x5, x6, x7) do { \
Lb_8W(x0 ## h, x1 ## h, x2 ## h, x3 ## h, \
x4 ## h, x5 ## h, x6 ## h, x7 ## h); \
Lb_8W(x0 ## l, x1 ## l, x2 ## l, x3 ## l, \
x4 ## l, x5 ## l, x6 ## l, x7 ## l); \
} while (0)
#define Wz_8W(x, c, n) \
do { \
__m512i t = _mm512_slli_epi64( _mm512_and_si512(x ## h, (c)), (n) ); \
x ## h = _mm512_or_si512( _mm512_and_si512( \
_mm512_srli_epi64(x ## h, (n)), (c)), t ); \
t = _mm512_slli_epi64( _mm512_and_si512(x ## l, (c)), (n) ); \
x ## l = _mm512_or_si512( _mm512_and_si512((x ## l >> (n)), (c)), t ); \
} while (0)
#define W80(x) Wz_8W(x, m512_const1_64( 0x5555555555555555 ), 1 )
#define W81(x) Wz_8W(x, m512_const1_64( 0x3333333333333333 ), 2 )
#define W82(x) Wz_8W(x, m512_const1_64( 0x0F0F0F0F0F0F0F0F ), 4 )
#define W83(x) Wz_8W(x, m512_const1_64( 0x00FF00FF00FF00FF ), 8 )
#define W84(x) Wz_8W(x, m512_const1_64( 0x0000FFFF0000FFFF ), 16 )
#define W85(x) Wz_8W(x, m512_const1_64( 0x00000000FFFFFFFF ), 32 )
#define W86(x) \
do { \
__m512i t = x ## h; \
x ## h = x ## l; \
x ## l = t; \
} while (0)
#define DECL_STATE_8W \
__m512i h0h, h1h, h2h, h3h, h4h, h5h, h6h, h7h; \
__m512i h0l, h1l, h2l, h3l, h4l, h5l, h6l, h7l; \
__m512i tmp;
#endif
#define Wz(x, c, n) \
do { \
@@ -236,16 +310,6 @@ do { \
x ## l = _mm256_or_si256( _mm256_and_si256((x ## l >> (n)), (c)), t ); \
} while (0)
/*
#define Wz(x, c, n) do { \
sph_u64 t = (x ## h & (c)) << (n); \
x ## h = ((x ## h >> (n)) & (c)) | t; \
t = (x ## l & (c)) << (n); \
x ## l = ((x ## l >> (n)) & (c)) | t; \
} while (0)
*/
#define W0(x) Wz(x, m256_const1_64( 0x5555555555555555 ), 1 )
#define W1(x) Wz(x, m256_const1_64( 0x3333333333333333 ), 2 )
#define W2(x) Wz(x, m256_const1_64( 0x0F0F0F0F0F0F0F0F ), 4 )
@@ -259,25 +323,12 @@ do { \
x ## l = t; \
} while (0)
/*
#define W0(x) Wz(x, SPH_C64(0x5555555555555555), 1)
#define W1(x) Wz(x, SPH_C64(0x3333333333333333), 2)
#define W2(x) Wz(x, SPH_C64(0x0F0F0F0F0F0F0F0F), 4)
#define W3(x) Wz(x, SPH_C64(0x00FF00FF00FF00FF), 8)
#define W4(x) Wz(x, SPH_C64(0x0000FFFF0000FFFF), 16)
#define W5(x) Wz(x, SPH_C64(0x00000000FFFFFFFF), 32)
#define W6(x) do { \
sph_u64 t = x ## h; \
x ## h = x ## l; \
x ## l = t; \
} while (0)
*/
#define DECL_STATE \
__m256i h0h, h1h, h2h, h3h, h4h, h5h, h6h, h7h; \
__m256i h0l, h1l, h2l, h3l, h4l, h5l, h6l, h7l; \
__m256i tmp;
#define READ_STATE(state) do { \
h0h = (state)->H[ 0]; \
h0l = (state)->H[ 1]; \
@@ -316,6 +367,38 @@ do { \
(state)->H[15] = h7l; \
} while (0)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define INPUT_BUF1_8W \
__m512i m0h = buf[0]; \
__m512i m0l = buf[1]; \
__m512i m1h = buf[2]; \
__m512i m1l = buf[3]; \
__m512i m2h = buf[4]; \
__m512i m2l = buf[5]; \
__m512i m3h = buf[6]; \
__m512i m3l = buf[7]; \
h0h = _mm512_xor_si512( h0h, m0h ); \
h0l = _mm512_xor_si512( h0l, m0l ); \
h1h = _mm512_xor_si512( h1h, m1h ); \
h1l = _mm512_xor_si512( h1l, m1l ); \
h2h = _mm512_xor_si512( h2h, m2h ); \
h2l = _mm512_xor_si512( h2l, m2l ); \
h3h = _mm512_xor_si512( h3h, m3h ); \
h3l = _mm512_xor_si512( h3l, m3l ); \
#define INPUT_BUF2_8W \
h4h = _mm512_xor_si512( h4h, m0h ); \
h4l = _mm512_xor_si512( h4l, m0l ); \
h5h = _mm512_xor_si512( h5h, m1h ); \
h5l = _mm512_xor_si512( h5l, m1l ); \
h6h = _mm512_xor_si512( h6h, m2h ); \
h6l = _mm512_xor_si512( h6l, m2l ); \
h7h = _mm512_xor_si512( h7h, m3h ); \
h7l = _mm512_xor_si512( h7l, m3l ); \
#endif
#define INPUT_BUF1 \
__m256i m0h = buf[0]; \
__m256i m0l = buf[1]; \
@@ -344,6 +427,7 @@ do { \
h7h = _mm256_xor_si256( h7h, m3h ); \
h7l = _mm256_xor_si256( h7l, m3l ); \
static const sph_u64 IV256[] = {
C64e(0xeb98a3412c20d3eb), C64e(0x92cdbe7b9cb245c1),
C64e(0x1c93519160d4c7fa), C64e(0x260082d67e508a03),
@@ -370,6 +454,22 @@ static const sph_u64 IV512[] = {
#else
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define SL_8W(ro) SLu_8W(r + ro, ro)
#define SLu_8W(r, ro) do { \
S_8W(h0, h2, h4, h6, Ceven_, r); \
S_8W(h1, h3, h5, h7, Codd_, r); \
L_8W(h0, h2, h4, h6, h1, h3, h5, h7); \
W8 ## ro(h1); \
W8 ## ro(h3); \
W8 ## ro(h5); \
W8 ## ro(h7); \
} while (0)
#endif
#define SL(ro) SLu(r + ro, ro)
@@ -393,6 +493,23 @@ static const sph_u64 IV512[] = {
* loop.
*/
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define E8_8W do { \
unsigned r; \
for (r = 0; r < 42; r += 7) { \
SL_8W(0); \
SL_8W(1); \
SL_8W(2); \
SL_8W(3); \
SL_8W(4); \
SL_8W(5); \
SL_8W(6); \
} \
} while (0)
#endif
#define E8 do { \
unsigned r; \
for (r = 0; r < 42; r += 7) { \
@@ -419,6 +536,55 @@ static const sph_u64 IV512[] = {
* On a "true 64-bit" architecture, we can unroll at will.
*/
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define E8_8W do { \
SLu_8W( 0, 0); \
SLu_8W( 1, 1); \
SLu_8W( 2, 2); \
SLu_8W( 3, 3); \
SLu_8W( 4, 4); \
SLu_8W( 5, 5); \
SLu_8W( 6, 6); \
SLu_8W( 7, 0); \
SLu_8W( 8, 1); \
SLu_8W( 9, 2); \
SLu_8W(10, 3); \
SLu_8W(11, 4); \
SLu_8W(12, 5); \
SLu_8W(13, 6); \
SLu_8W(14, 0); \
SLu_8W(15, 1); \
SLu_8W(16, 2); \
SLu_8W(17, 3); \
SLu_8W(18, 4); \
SLu_8W(19, 5); \
SLu_8W(20, 6); \
SLu_8W(21, 0); \
SLu_8W(22, 1); \
SLu_8W(23, 2); \
SLu_8W(24, 3); \
SLu_8W(25, 4); \
SLu_8W(26, 5); \
SLu_8W(27, 6); \
SLu_8W(28, 0); \
SLu_8W(29, 1); \
SLu_8W(30, 2); \
SLu_8W(31, 3); \
SLu_8W(32, 4); \
SLu_8W(33, 5); \
SLu_8W(34, 6); \
SLu_8W(35, 0); \
SLu_8W(36, 1); \
SLu_8W(37, 2); \
SLu_8W(38, 3); \
SLu_8W(39, 4); \
SLu_8W(40, 5); \
SLu_8W(41, 6); \
} while (0)
#endif // AVX512
#define E8 do { \
SLu( 0, 0); \
SLu( 1, 1); \
@@ -471,6 +637,158 @@ static const sph_u64 IV512[] = {
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
void jh256_8way_init( jh_8way_context *sc )
{
// bswapped IV256
sc->H[ 0] = m512_const1_64( 0xebd3202c41a398eb );
sc->H[ 1] = m512_const1_64( 0xc145b29c7bbecd92 );
sc->H[ 2] = m512_const1_64( 0xfac7d4609151931c );
sc->H[ 3] = m512_const1_64( 0x038a507ed6820026 );
sc->H[ 4] = m512_const1_64( 0x45b92677269e23a4 );
sc->H[ 5] = m512_const1_64( 0x77941ad4481afbe0 );
sc->H[ 6] = m512_const1_64( 0x7a176b0226abb5cd );
sc->H[ 7] = m512_const1_64( 0xa82fff0f4224f056 );
sc->H[ 8] = m512_const1_64( 0x754d2e7f8996a371 );
sc->H[ 9] = m512_const1_64( 0x62e27df70849141d );
sc->H[10] = m512_const1_64( 0x948f2476f7957627 );
sc->H[11] = m512_const1_64( 0x6c29804757b6d587 );
sc->H[12] = m512_const1_64( 0x6c0d8eac2d275e5c );
sc->H[13] = m512_const1_64( 0x0f7a0557c6508451 );
sc->H[14] = m512_const1_64( 0xea12247067d3e47b );
sc->H[15] = m512_const1_64( 0x69d71cd313abe389 );
sc->ptr = 0;
sc->block_count = 0;
}
void jh512_8way_init( jh_8way_context *sc )
{
// bswapped IV512
sc->H[ 0] = m512_const1_64( 0x17aa003e964bd16f );
sc->H[ 1] = m512_const1_64( 0x43d5157a052e6a63 );
sc->H[ 2] = m512_const1_64( 0x0bef970c8d5e228a );
sc->H[ 3] = m512_const1_64( 0x61c3b3f2591234e9 );
sc->H[ 4] = m512_const1_64( 0x1e806f53c1a01d89 );
sc->H[ 5] = m512_const1_64( 0x806d2bea6b05a92a );
sc->H[ 6] = m512_const1_64( 0xa6ba7520dbcc8e58 );
sc->H[ 7] = m512_const1_64( 0xf73bf8ba763a0fa9 );
sc->H[ 8] = m512_const1_64( 0x694ae34105e66901 );
sc->H[ 9] = m512_const1_64( 0x5ae66f2e8e8ab546 );
sc->H[10] = m512_const1_64( 0x243c84c1d0a74710 );
sc->H[11] = m512_const1_64( 0x99c15a2db1716e3b );
sc->H[12] = m512_const1_64( 0x56f8b19decf657cf );
sc->H[13] = m512_const1_64( 0x56b116577c8806a7 );
sc->H[14] = m512_const1_64( 0xfb1785e6dffcc2e3 );
sc->H[15] = m512_const1_64( 0x4bdd8ccc78465a54 );
sc->ptr = 0;
sc->block_count = 0;
}
static void
jh_8way_core( jh_8way_context *sc, const void *data, size_t len )
{
__m512i *buf;
__m512i *vdata = (__m512i*)data;
const int buf_size = 64; // 64 * _m512i
size_t ptr;
DECL_STATE_8W
buf = sc->buf;
ptr = sc->ptr;
if ( len < (buf_size - ptr) )
{
memcpy_512( buf + (ptr>>3), vdata, len>>3 );
ptr += len;
sc->ptr = ptr;
return;
}
READ_STATE(sc);
while ( len > 0 )
{
size_t clen;
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_512( buf + (ptr>>3), vdata, clen>>3 );
ptr += clen;
vdata += (clen>>3);
len -= clen;
if ( ptr == buf_size )
{
INPUT_BUF1_8W;
E8_8W;
INPUT_BUF2_8W;
sc->block_count ++;
ptr = 0;
}
}
WRITE_STATE(sc);
sc->ptr = ptr;
}
static void
jh_8way_close( jh_8way_context *sc, unsigned ub, unsigned n, void *dst,
size_t out_size_w32, const void *iv )
{
__m512i buf[16*4];
__m512i *dst512 = (__m512i*)dst;
size_t numz, u;
sph_u64 l0, l1, l0e, l1e;
buf[0] = m512_const1_64( 0x80ULL );
if ( sc->ptr == 0 )
numz = 48;
else
numz = 112 - sc->ptr;
memset_zero_512( buf+1, (numz>>3) - 1 );
l0 = SPH_T64(sc->block_count << 9) + (sc->ptr << 3);
l1 = SPH_T64(sc->block_count >> 55);
sph_enc64be( &l0e, l0 );
sph_enc64be( &l1e, l1 );
*(buf + (numz>>3) ) = _mm512_set1_epi64( l1e );
*(buf + (numz>>3) + 1) = _mm512_set1_epi64( l0e );
jh_8way_core( sc, buf, numz + 16 );
for ( u=0; u < 8; u++ )
buf[u] = sc->H[u+8];
memcpy_512( dst512, buf, 8 );
}
void
jh256_8way_update(void *cc, const void *data, size_t len)
{
jh_8way_core(cc, data, len);
}
void
jh256_8way_close(void *cc, void *dst)
{
jh_8way_close(cc, 0, 0, dst, 8, IV256);
}
void
jh512_8way_update(void *cc, const void *data, size_t len)
{
jh_8way_core(cc, data, len);
}
void
jh512_8way_close(void *cc, void *dst)
{
jh_8way_close(cc, 0, 0, dst, 16, IV512);
}
#endif
void jh256_4way_init( jh_4way_context *sc )
{
// bswapped IV256
@@ -595,16 +913,8 @@ jh_4way_close( jh_4way_context *sc, unsigned ub, unsigned n, void *dst,
memcpy_256( dst256, buf, 8 );
}
/*
void
jh256_4way_init(void *cc)
{
jhs_4way_init(cc, IV256);
}
*/
void
jh256_4way(void *cc, const void *data, size_t len)
jh256_4way_update(void *cc, const void *data, size_t len)
{
jh_4way_core(cc, data, len);
}
@@ -615,16 +925,8 @@ jh256_4way_close(void *cc, void *dst)
jh_4way_close(cc, 0, 0, dst, 8, IV256);
}
/*
void
jh512_4way_init(void *cc)
{
jhb_4way_init(cc, IV512);
}
*/
void
jh512_4way(void *cc, const void *data, size_t len)
jh512_4way_update(void *cc, const void *data, size_t len)
{
jh_4way_core(cc, data, len);
}
@@ -635,6 +937,7 @@ jh512_4way_close(void *cc, void *dst)
jh_4way_close(cc, 0, 0, dst, 16, IV512);
}
#ifdef __cplusplus
}
#endif

View File

@@ -60,20 +60,41 @@ extern "C"{
* can be cloned by copying the context (e.g. with a simple
* <code>memcpy()</code>).
*/
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
__m256i buf[8] __attribute__ ((aligned (64)));
__m512i buf[8];
__m512i H[16];
size_t ptr;
uint64_t block_count;
} jh_8way_context __attribute__ ((aligned (128)));
typedef jh_8way_context jh256_8way_context;
typedef jh_8way_context jh512_8way_context;
void jh256_8way_init( jh_8way_context *sc);
void jh256_8way_update(void *cc, const void *data, size_t len);
void jh256_8way_close(void *cc, void *dst);
void jh512_8way_init( jh_8way_context *sc );
void jh512_8way_update(void *cc, const void *data, size_t len);
void jh512_8way_close(void *cc, void *dst);
#endif
typedef struct {
__m256i buf[8];
__m256i H[16];
size_t ptr;
uint64_t block_count;
/*
unsigned char buf[64];
size_t ptr;
union {
sph_u64 wide[16];
} H;
sph_u64 block_count;
*/
} jh_4way_context;
} jh_4way_context __attribute__ ((aligned (128)));
typedef jh_4way_context jh256_4way_context;
@@ -81,13 +102,15 @@ typedef jh_4way_context jh512_4way_context;
void jh256_4way_init( jh_4way_context *sc);
void jh256_4way(void *cc, const void *data, size_t len);
void jh256_4way_update(void *cc, const void *data, size_t len);
#define jh256_4way jh256_4way_update
void jh256_4way_close(void *cc, void *dst);
void jh512_4way_init( jh_4way_context *sc );
void jh512_4way(void *cc, const void *data, size_t len);
void jh512_4way_update(void *cc, const void *data, size_t len);
#define jh512_4way jh512_4way_update
void jh512_4way_close(void *cc, void *dst);
@@ -95,6 +118,6 @@ void jh512_4way_close(void *cc, void *dst);
}
#endif
#endif
#endif // AVX2
#endif

View File

@@ -1,18 +1,68 @@
#include "keccak-gate.h"
#ifdef KECCAK_4WAY
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include "sph_keccak.h"
#include "keccak-hash-4way.h"
#if defined(KECCAK_8WAY)
void keccakhash_8way(void *state, const void *input)
{
keccak256_8way_context ctx;
keccak256_8way_init( &ctx );
keccak256_8way_update( &ctx, input, 80 );
keccak256_8way_close( &ctx, state );
}
int scanhash_keccak_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[24*8] __attribute__ ((aligned (128)));
uint32_t hash[16*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]); // 3*16+1
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m512i *noncev = (__m512i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do {
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
keccakhash_8way( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane<<1 ] < Htarg )
{
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(KECCAK_4WAY)
void keccakhash_4way(void *state, const void *input)
{
keccak256_4way_context ctx;
keccak256_4way_init( &ctx );
keccak256_4way( &ctx, input, 80 );
keccak256_4way_update( &ctx, input, 80 );
keccak256_4way_close( &ctx, state );
}
@@ -28,8 +78,8 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce,
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
// const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do {
@@ -39,7 +89,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce,
keccakhash_4way( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( ( hash7[ lane<<1 ] & 0xFFFFFF00 ) == 0 )
if ( hash7[ lane<<1 ] < Htarg )
{
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )

View File

@@ -3,10 +3,13 @@
bool register_keccak_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->optimizations = AVX2_OPT | AVX512_OPT;
gate->gen_merkle_root = (void*)&SHA256_gen_merkle_root;
opt_target_factor = 128.0;
#if defined (KECCAK_4WAY)
#if defined (KECCAK_8WAY)
gate->scanhash = (void*)&scanhash_keccak_8way;
gate->hash = (void*)&keccakhash_8way;
#elif defined (KECCAK_4WAY)
gate->scanhash = (void*)&scanhash_keccak_4way;
gate->hash = (void*)&keccakhash_4way;
#else
@@ -18,10 +21,13 @@ bool register_keccak_algo( algo_gate_t* gate )
bool register_keccakc_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->optimizations = AVX2_OPT | AVX512_OPT;
gate->gen_merkle_root = (void*)&sha256d_gen_merkle_root;
opt_target_factor = 256.0;
#if defined (KECCAK_4WAY)
#if defined (KECCAK_8WAY)
gate->scanhash = (void*)&scanhash_keccak_8way;
gate->hash = (void*)&keccakhash_8way;
#elif defined (KECCAK_4WAY)
gate->scanhash = (void*)&scanhash_keccak_4way;
gate->hash = (void*)&keccakhash_4way;
#else

View File

@@ -1,23 +1,33 @@
#ifndef KECCAK_GATE_H__
#define KECCAK_GATE_H__
#define KECCAK_GATE_H__ 1
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__)
#define KECCAK_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define KECCAK_8WAY 1
#elif defined(__AVX2__)
#define KECCAK_4WAY 1
#endif
#if defined(KECCAK_4WAY)
#if defined(KECCAK_8WAY)
void keccakhash_8way( void *state, const void *input );
int scanhash_keccak_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(KECCAK_4WAY)
void keccakhash_4way( void *state, const void *input );
int scanhash_keccak_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#else
void keccakhash( void *state, const void *input );
int scanhash_keccak( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#endif

View File

@@ -1,23 +1,24 @@
#include <stddef.h>
#include <stdint.h>
#include "keccak-hash-4way.h"
#if defined(__AVX2__)
static const sph_u64 RC[] = {
SPH_C64(0x0000000000000001), SPH_C64(0x0000000000008082),
SPH_C64(0x800000000000808A), SPH_C64(0x8000000080008000),
SPH_C64(0x000000000000808B), SPH_C64(0x0000000080000001),
SPH_C64(0x8000000080008081), SPH_C64(0x8000000000008009),
SPH_C64(0x000000000000008A), SPH_C64(0x0000000000000088),
SPH_C64(0x0000000080008009), SPH_C64(0x000000008000000A),
SPH_C64(0x000000008000808B), SPH_C64(0x800000000000008B),
SPH_C64(0x8000000000008089), SPH_C64(0x8000000000008003),
SPH_C64(0x8000000000008002), SPH_C64(0x8000000000000080),
SPH_C64(0x000000000000800A), SPH_C64(0x800000008000000A),
SPH_C64(0x8000000080008081), SPH_C64(0x8000000000008080),
SPH_C64(0x0000000080000001), SPH_C64(0x8000000080008008)
static const uint64_t RC[] = {
0x0000000000000001, 0x0000000000008082,
0x800000000000808A, 0x8000000080008000,
0x000000000000808B, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009,
0x000000000000008A, 0x0000000000000088,
0x0000000080008009, 0x000000008000000A,
0x000000008000808B, 0x800000000000008B,
0x8000000000008089, 0x8000000000008003,
0x8000000000008002, 0x8000000000000080,
0x000000000000800A, 0x800000008000000A,
0x8000000080008081, 0x8000000000008080,
0x0000000080000001, 0x8000000080008008
};
// generic macros
#define a00 (kc->w[ 0])
#define a10 (kc->w[ 1])
#define a20 (kc->w[ 2])
@@ -48,6 +49,197 @@ static const sph_u64 RC[] = {
#define READ_STATE(sc)
#define WRITE_STATE(sc)
#define MOV64(d, s) (d = s)
#define XOR64_IOTA XOR64
#define LPAR (
#define RPAR )
#define DO(x) x
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define INPUT_BUF(size) do { \
size_t j; \
for (j = 0; j < (size>>3); j++ ) \
kc->w[j ] = _mm512_xor_si512( kc->w[j], buf[j] ); \
} while (0)
// Targetted macros, keccak-macros.h is included for each target.
#define DECL64(x) __m512i x
#define XOR64(d, a, b) (d = _mm512_xor_si512(a,b))
#define AND64(d, a, b) (d = _mm512_and_si512(a,b))
#define OR64(d, a, b) (d = _mm512_or_si512(a,b))
#define NOT64(d, s) (d = _mm512_xor_si512(s,m512_neg1))
#define ROL64(d, v, n) (d = mm512_rol_64(v, n))
#include "keccak-macros.c"
#define KECCAK_F_1600 DO(KECCAK_F_1600_512)
#define KECCAK_F_1600_512 do { \
int j; \
for (j = 0; j < 24; j += 8) \
{ \
KF_ELT( 0, 1, _mm512_set1_epi64( RC[j + 0] ) ); \
KF_ELT( 1, 2, _mm512_set1_epi64( RC[j + 1] ) ); \
KF_ELT( 2, 3, _mm512_set1_epi64( RC[j + 2] ) ); \
KF_ELT( 3, 4, _mm512_set1_epi64( RC[j + 3] ) ); \
KF_ELT( 4, 5, _mm512_set1_epi64( RC[j + 4] ) ); \
KF_ELT( 5, 6, _mm512_set1_epi64( RC[j + 5] ) ); \
KF_ELT( 6, 7, _mm512_set1_epi64( RC[j + 6] ) ); \
KF_ELT( 7, 8, _mm512_set1_epi64( RC[j + 7] ) ); \
P8_TO_P0; \
} \
} while (0)
static void keccak64_8way_init( keccak64_ctx_m512i *kc, unsigned out_size )
{
__m512i zero = m512_zero;
__m512i neg1 = m512_neg1;
// Initialization for the "lane complement".
kc->w[ 0] = zero; kc->w[ 1] = neg1;
kc->w[ 2] = neg1; kc->w[ 3] = zero;
kc->w[ 4] = zero; kc->w[ 5] = zero;
kc->w[ 6] = zero; kc->w[ 7] = zero;
kc->w[ 8] = neg1; kc->w[ 9] = zero;
kc->w[10] = zero; kc->w[11] = zero;
kc->w[12] = neg1; kc->w[13] = zero;
kc->w[14] = zero; kc->w[15] = zero;
kc->w[16] = zero; kc->w[17] = neg1;
kc->w[18] = zero; kc->w[19] = zero;
kc->w[20] = neg1; kc->w[21] = zero;
kc->w[22] = zero; kc->w[23] = zero;
kc->w[24] = zero; kc->ptr = 0;
kc->lim = 200 - (out_size >> 2);
}
static void
keccak64_8way_core( keccak64_ctx_m512i *kc, const void *data, size_t len,
size_t lim )
{
__m512i *buf;
__m512i *vdata = (__m512i*)data;
size_t ptr;
DECL_STATE
buf = kc->buf;
ptr = kc->ptr;
if ( len < (lim - ptr) )
{
memcpy_512( buf + (ptr>>3), vdata, len>>3 );
kc->ptr = ptr + len;
return;
}
READ_STATE( kc );
while ( len > 0 )
{
size_t clen;
clen = (lim - ptr);
if ( clen > len )
clen = len;
memcpy_512( buf + (ptr>>3), vdata, clen>>3 );
ptr += clen;
vdata = vdata + (clen>>3);
len -= clen;
if ( ptr == lim )
{
INPUT_BUF( lim );
KECCAK_F_1600;
ptr = 0;
}
}
WRITE_STATE( kc );
kc->ptr = ptr;
}
static void keccak64_8way_close( keccak64_ctx_m512i *kc, void *dst,
size_t byte_len, size_t lim )
{
unsigned eb;
union {
__m512i tmp[lim + 1];
sph_u64 dummy; /* for alignment */
} u;
size_t j;
size_t m512_len = byte_len >> 3;
eb = 0x100 >> 8;
if ( kc->ptr == (lim - 8) )
{
const uint64_t t = eb | 0x8000000000000000;
u.tmp[0] = m512_const1_64( t );
j = 8;
}
else
{
j = lim - kc->ptr;
u.tmp[0] = m512_const1_64( eb );
memset_zero_512( u.tmp + 1, (j>>3) - 2 );
u.tmp[ (j>>3) - 1] = m512_const1_64( 0x8000000000000000 );
}
keccak64_8way_core( kc, u.tmp, j, lim );
/* Finalize the "lane complement" */
NOT64( kc->w[ 1], kc->w[ 1] );
NOT64( kc->w[ 2], kc->w[ 2] );
NOT64( kc->w[ 8], kc->w[ 8] );
NOT64( kc->w[12], kc->w[12] );
NOT64( kc->w[17], kc->w[17] );
NOT64( kc->w[20], kc->w[20] );
memcpy_512( dst, kc->w, m512_len );
}
void keccak256_8way_init( void *kc )
{
keccak64_8way_init( kc, 256 );
}
void
keccak256_8way_update(void *cc, const void *data, size_t len)
{
keccak64_8way_core(cc, data, len, 136);
}
void
keccak256_8way_close(void *cc, void *dst)
{
keccak64_8way_close(cc, dst, 32, 136);
}
void keccak512_8way_init( void *kc )
{
keccak64_8way_init( kc, 512 );
}
void
keccak512_8way_update(void *cc, const void *data, size_t len)
{
keccak64_8way_core(cc, data, len, 72);
}
void
keccak512_8way_close(void *cc, void *dst)
{
keccak64_8way_close(cc, dst, 64, 72);
}
#undef INPUT_BUF
#undef DECL64
#undef XOR64
#undef AND64
#undef OR64
#undef NOT64
#undef ROL64
#undef KECCAK_F_1600
#endif // AVX512
#if defined(__AVX2__)
#define INPUT_BUF(size) do { \
size_t j; \
for (j = 0; j < (size>>3); j++ ) \
@@ -55,314 +247,28 @@ static const sph_u64 RC[] = {
} while (0)
#define DECL64(x) __m256i x
#define MOV64(d, s) (d = s)
#define XOR64(d, a, b) (d = _mm256_xor_si256(a,b))
#define AND64(d, a, b) (d = _mm256_and_si256(a,b))
#define OR64(d, a, b) (d = _mm256_or_si256(a,b))
#define NOT64(d, s) (d = _mm256_xor_si256(s,m256_neg1))
#define ROL64(d, v, n) (d = mm256_rol_64(v, n))
#define XOR64_IOTA XOR64
#define TH_ELT(t, c0, c1, c2, c3, c4, d0, d1, d2, d3, d4) do { \
DECL64(tt0); \
DECL64(tt1); \
DECL64(tt2); \
DECL64(tt3); \
XOR64(tt0, d0, d1); \
XOR64(tt1, d2, d3); \
XOR64(tt0, tt0, d4); \
XOR64(tt0, tt0, tt1); \
ROL64(tt0, tt0, 1); \
XOR64(tt2, c0, c1); \
XOR64(tt3, c2, c3); \
XOR64(tt0, tt0, c4); \
XOR64(tt2, tt2, tt3); \
XOR64(t, tt0, tt2); \
} while (0)
#include "keccak-macros.c"
#define THETA(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \
b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \
b40, b41, b42, b43, b44) \
do { \
DECL64(t0); \
DECL64(t1); \
DECL64(t2); \
DECL64(t3); \
DECL64(t4); \
TH_ELT(t0, b40, b41, b42, b43, b44, b10, b11, b12, b13, b14); \
TH_ELT(t1, b00, b01, b02, b03, b04, b20, b21, b22, b23, b24); \
TH_ELT(t2, b10, b11, b12, b13, b14, b30, b31, b32, b33, b34); \
TH_ELT(t3, b20, b21, b22, b23, b24, b40, b41, b42, b43, b44); \
TH_ELT(t4, b30, b31, b32, b33, b34, b00, b01, b02, b03, b04); \
XOR64(b00, b00, t0); \
XOR64(b01, b01, t0); \
XOR64(b02, b02, t0); \
XOR64(b03, b03, t0); \
XOR64(b04, b04, t0); \
XOR64(b10, b10, t1); \
XOR64(b11, b11, t1); \
XOR64(b12, b12, t1); \
XOR64(b13, b13, t1); \
XOR64(b14, b14, t1); \
XOR64(b20, b20, t2); \
XOR64(b21, b21, t2); \
XOR64(b22, b22, t2); \
XOR64(b23, b23, t2); \
XOR64(b24, b24, t2); \
XOR64(b30, b30, t3); \
XOR64(b31, b31, t3); \
XOR64(b32, b32, t3); \
XOR64(b33, b33, t3); \
XOR64(b34, b34, t3); \
XOR64(b40, b40, t4); \
XOR64(b41, b41, t4); \
XOR64(b42, b42, t4); \
XOR64(b43, b43, t4); \
XOR64(b44, b44, t4); \
} while (0)
#define KECCAK_F_1600 DO(KECCAK_F_1600_256)
#define RHO(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \
b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \
b40, b41, b42, b43, b44) \
do { \
/* ROL64(b00, b00, 0); */ \
ROL64(b01, b01, 36); \
ROL64(b02, b02, 3); \
ROL64(b03, b03, 41); \
ROL64(b04, b04, 18); \
ROL64(b10, b10, 1); \
ROL64(b11, b11, 44); \
ROL64(b12, b12, 10); \
ROL64(b13, b13, 45); \
ROL64(b14, b14, 2); \
ROL64(b20, b20, 62); \
ROL64(b21, b21, 6); \
ROL64(b22, b22, 43); \
ROL64(b23, b23, 15); \
ROL64(b24, b24, 61); \
ROL64(b30, b30, 28); \
ROL64(b31, b31, 55); \
ROL64(b32, b32, 25); \
ROL64(b33, b33, 21); \
ROL64(b34, b34, 56); \
ROL64(b40, b40, 27); \
ROL64(b41, b41, 20); \
ROL64(b42, b42, 39); \
ROL64(b43, b43, 8); \
ROL64(b44, b44, 14); \
} while (0)
/*
* The KHI macro integrates the "lane complement" optimization. On input,
* some words are complemented:
* a00 a01 a02 a04 a13 a20 a21 a22 a30 a33 a34 a43
* On output, the following words are complemented:
* a04 a10 a20 a22 a23 a31
*
* The (implicit) permutation and the theta expansion will bring back
* the input mask for the next round.
*/
#define KHI_XO(d, a, b, c) do { \
DECL64(kt); \
OR64(kt, b, c); \
XOR64(d, a, kt); \
} while (0)
#define KHI_XA(d, a, b, c) do { \
DECL64(kt); \
AND64(kt, b, c); \
XOR64(d, a, kt); \
} while (0)
#define KHI(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \
b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \
b40, b41, b42, b43, b44) \
do { \
DECL64(c0); \
DECL64(c1); \
DECL64(c2); \
DECL64(c3); \
DECL64(c4); \
DECL64(bnn); \
NOT64(bnn, b20); \
KHI_XO(c0, b00, b10, b20); \
KHI_XO(c1, b10, bnn, b30); \
KHI_XA(c2, b20, b30, b40); \
KHI_XO(c3, b30, b40, b00); \
KHI_XA(c4, b40, b00, b10); \
MOV64(b00, c0); \
MOV64(b10, c1); \
MOV64(b20, c2); \
MOV64(b30, c3); \
MOV64(b40, c4); \
NOT64(bnn, b41); \
KHI_XO(c0, b01, b11, b21); \
KHI_XA(c1, b11, b21, b31); \
KHI_XO(c2, b21, b31, bnn); \
KHI_XO(c3, b31, b41, b01); \
KHI_XA(c4, b41, b01, b11); \
MOV64(b01, c0); \
MOV64(b11, c1); \
MOV64(b21, c2); \
MOV64(b31, c3); \
MOV64(b41, c4); \
NOT64(bnn, b32); \
KHI_XO(c0, b02, b12, b22); \
KHI_XA(c1, b12, b22, b32); \
KHI_XA(c2, b22, bnn, b42); \
KHI_XO(c3, bnn, b42, b02); \
KHI_XA(c4, b42, b02, b12); \
MOV64(b02, c0); \
MOV64(b12, c1); \
MOV64(b22, c2); \
MOV64(b32, c3); \
MOV64(b42, c4); \
NOT64(bnn, b33); \
KHI_XA(c0, b03, b13, b23); \
KHI_XO(c1, b13, b23, b33); \
KHI_XO(c2, b23, bnn, b43); \
KHI_XA(c3, bnn, b43, b03); \
KHI_XO(c4, b43, b03, b13); \
MOV64(b03, c0); \
MOV64(b13, c1); \
MOV64(b23, c2); \
MOV64(b33, c3); \
MOV64(b43, c4); \
NOT64(bnn, b14); \
KHI_XA(c0, b04, bnn, b24); \
KHI_XO(c1, bnn, b24, b34); \
KHI_XA(c2, b24, b34, b44); \
KHI_XO(c3, b34, b44, b04); \
KHI_XA(c4, b44, b04, b14); \
MOV64(b04, c0); \
MOV64(b14, c1); \
MOV64(b24, c2); \
MOV64(b34, c3); \
MOV64(b44, c4); \
} while (0)
#define IOTA(r) XOR64_IOTA(a00, a00, r)
#define P0 a00, a01, a02, a03, a04, a10, a11, a12, a13, a14, a20, a21, \
a22, a23, a24, a30, a31, a32, a33, a34, a40, a41, a42, a43, a44
#define P1 a00, a30, a10, a40, a20, a11, a41, a21, a01, a31, a22, a02, \
a32, a12, a42, a33, a13, a43, a23, a03, a44, a24, a04, a34, a14
#define P2 a00, a33, a11, a44, a22, a41, a24, a02, a30, a13, a32, a10, \
a43, a21, a04, a23, a01, a34, a12, a40, a14, a42, a20, a03, a31
#define P3 a00, a23, a41, a14, a32, a24, a42, a10, a33, a01, a43, a11, \
a34, a02, a20, a12, a30, a03, a21, a44, a31, a04, a22, a40, a13
#define P4 a00, a12, a24, a31, a43, a42, a04, a11, a23, a30, a34, a41, \
a03, a10, a22, a21, a33, a40, a02, a14, a13, a20, a32, a44, a01
#define P5 a00, a21, a42, a13, a34, a04, a20, a41, a12, a33, a03, a24, \
a40, a11, a32, a02, a23, a44, a10, a31, a01, a22, a43, a14, a30
#define P6 a00, a02, a04, a01, a03, a20, a22, a24, a21, a23, a40, a42, \
a44, a41, a43, a10, a12, a14, a11, a13, a30, a32, a34, a31, a33
#define P7 a00, a10, a20, a30, a40, a22, a32, a42, a02, a12, a44, a04, \
a14, a24, a34, a11, a21, a31, a41, a01, a33, a43, a03, a13, a23
#define P8 a00, a11, a22, a33, a44, a32, a43, a04, a10, a21, a14, a20, \
a31, a42, a03, a41, a02, a13, a24, a30, a23, a34, a40, a01, a12
#define P9 a00, a41, a32, a23, a14, a43, a34, a20, a11, a02, a31, a22, \
a13, a04, a40, a24, a10, a01, a42, a33, a12, a03, a44, a30, a21
#define P10 a00, a24, a43, a12, a31, a34, a03, a22, a41, a10, a13, a32, \
a01, a20, a44, a42, a11, a30, a04, a23, a21, a40, a14, a33, a02
#define P11 a00, a42, a34, a21, a13, a03, a40, a32, a24, a11, a01, a43, \
a30, a22, a14, a04, a41, a33, a20, a12, a02, a44, a31, a23, a10
#define P12 a00, a04, a03, a02, a01, a40, a44, a43, a42, a41, a30, a34, \
a33, a32, a31, a20, a24, a23, a22, a21, a10, a14, a13, a12, a11
#define P13 a00, a20, a40, a10, a30, a44, a14, a34, a04, a24, a33, a03, \
a23, a43, a13, a22, a42, a12, a32, a02, a11, a31, a01, a21, a41
#define P14 a00, a22, a44, a11, a33, a14, a31, a03, a20, a42, a23, a40, \
a12, a34, a01, a32, a04, a21, a43, a10, a41, a13, a30, a02, a24
#define P15 a00, a32, a14, a41, a23, a31, a13, a40, a22, a04, a12, a44, \
a21, a03, a30, a43, a20, a02, a34, a11, a24, a01, a33, a10, a42
#define P16 a00, a43, a31, a24, a12, a13, a01, a44, a32, a20, a21, a14, \
a02, a40, a33, a34, a22, a10, a03, a41, a42, a30, a23, a11, a04
#define P17 a00, a34, a13, a42, a21, a01, a30, a14, a43, a22, a02, a31, \
a10, a44, a23, a03, a32, a11, a40, a24, a04, a33, a12, a41, a20
#define P18 a00, a03, a01, a04, a02, a30, a33, a31, a34, a32, a10, a13, \
a11, a14, a12, a40, a43, a41, a44, a42, a20, a23, a21, a24, a22
#define P19 a00, a40, a30, a20, a10, a33, a23, a13, a03, a43, a11, a01, \
a41, a31, a21, a44, a34, a24, a14, a04, a22, a12, a02, a42, a32
#define P20 a00, a44, a33, a22, a11, a23, a12, a01, a40, a34, a41, a30, \
a24, a13, a02, a14, a03, a42, a31, a20, a32, a21, a10, a04, a43
#define P21 a00, a14, a23, a32, a41, a12, a21, a30, a44, a03, a24, a33, \
a42, a01, a10, a31, a40, a04, a13, a22, a43, a02, a11, a20, a34
#define P22 a00, a31, a12, a43, a24, a21, a02, a33, a14, a40, a42, a23, \
a04, a30, a11, a13, a44, a20, a01, a32, a34, a10, a41, a22, a03
#define P23 a00, a13, a21, a34, a42, a02, a10, a23, a31, a44, a04, a12, \
a20, a33, a41, a01, a14, a22, a30, a43, a03, a11, a24, a32, a40
#define P8_TO_P0 do { \
DECL64(t); \
MOV64(t, a01); \
MOV64(a01, a11); \
MOV64(a11, a43); \
MOV64(a43, t); \
MOV64(t, a02); \
MOV64(a02, a22); \
MOV64(a22, a31); \
MOV64(a31, t); \
MOV64(t, a03); \
MOV64(a03, a33); \
MOV64(a33, a24); \
MOV64(a24, t); \
MOV64(t, a04); \
MOV64(a04, a44); \
MOV64(a44, a12); \
MOV64(a12, t); \
MOV64(t, a10); \
MOV64(a10, a32); \
MOV64(a32, a13); \
MOV64(a13, t); \
MOV64(t, a14); \
MOV64(a14, a21); \
MOV64(a21, a20); \
MOV64(a20, t); \
MOV64(t, a23); \
MOV64(a23, a42); \
MOV64(a42, a40); \
MOV64(a40, t); \
MOV64(t, a30); \
MOV64(a30, a41); \
MOV64(a41, a34); \
MOV64(a34, t); \
} while (0)
#define LPAR (
#define RPAR )
#define KF_ELT(r, s, k) do { \
THETA LPAR P ## r RPAR; \
RHO LPAR P ## r RPAR; \
KHI LPAR P ## s RPAR; \
IOTA(k); \
} while (0)
#define DO(x) x
#define KECCAK_F_1600 DO(KECCAK_F_1600_)
#define KECCAK_F_1600_ do { \
#define KECCAK_F_1600_256 do { \
int j; \
for (j = 0; j < 24; j += 8) \
{ \
KF_ELT( 0, 1, (_mm256_set_epi64x( RC[j + 0], RC[j + 0], \
RC[j + 0], RC[j + 0])) ); \
KF_ELT( 1, 2, (_mm256_set_epi64x( RC[j + 1], RC[j + 1], \
RC[j + 1], RC[j + 1])) ); \
KF_ELT( 2, 3, (_mm256_set_epi64x( RC[j + 2], RC[j + 2], \
RC[j + 2], RC[j + 2])) ); \
KF_ELT( 3, 4, (_mm256_set_epi64x( RC[j + 3], RC[j + 3], \
RC[j + 3], RC[j + 3])) ); \
KF_ELT( 4, 5, (_mm256_set_epi64x( RC[j + 4], RC[j + 4], \
RC[j + 4], RC[j + 4])) ); \
KF_ELT( 5, 6, (_mm256_set_epi64x( RC[j + 5], RC[j + 5], \
RC[j + 5], RC[j + 5])) ); \
KF_ELT( 6, 7, (_mm256_set_epi64x( RC[j + 6], RC[j + 6], \
RC[j + 6], RC[j + 6])) ); \
KF_ELT( 7, 8, (_mm256_set_epi64x( RC[j + 7], RC[j + 7], \
RC[j + 7], RC[j + 7])) ); \
KF_ELT( 0, 1, _mm256_set1_epi64x( RC[j + 0] ) ); \
KF_ELT( 1, 2, _mm256_set1_epi64x( RC[j + 1] ) ); \
KF_ELT( 2, 3, _mm256_set1_epi64x( RC[j + 2] ) ); \
KF_ELT( 3, 4, _mm256_set1_epi64x( RC[j + 3] ) ); \
KF_ELT( 4, 5, _mm256_set1_epi64x( RC[j + 4] ) ); \
KF_ELT( 5, 6, _mm256_set1_epi64x( RC[j + 5] ) ); \
KF_ELT( 6, 7, _mm256_set1_epi64x( RC[j + 6] ) ); \
KF_ELT( 7, 8, _mm256_set1_epi64x( RC[j + 7] ) ); \
P8_TO_P0; \
} \
} while (0)
@@ -453,7 +359,7 @@ static void keccak64_close( keccak64_ctx_m256i *kc, void *dst, size_t byte_len,
else
{
j = lim - kc->ptr;
u.tmp[0] = _mm256_set_epi64x( eb, eb, eb, eb );
u.tmp[0] = m256_const1_64( eb );
memset_zero_256( u.tmp + 1, (j>>3) - 2 );
u.tmp[ (j>>3) - 1] = m256_const1_64( 0x8000000000000000 );
}
@@ -474,7 +380,7 @@ void keccak256_4way_init( void *kc )
}
void
keccak256_4way(void *cc, const void *data, size_t len)
keccak256_4way_update(void *cc, const void *data, size_t len)
{
keccak64_core(cc, data, len, 136);
}
@@ -491,7 +397,7 @@ void keccak512_4way_init( void *kc )
}
void
keccak512_4way(void *cc, const void *data, size_t len)
keccak512_4way_update(void *cc, const void *data, size_t len)
{
keccak64_core(cc, data, len, 72);
}
@@ -502,4 +408,13 @@ keccak512_4way_close(void *cc, void *dst)
keccak64_close(cc, dst, 64, 72);
}
#endif
#undef INPUT_BUF
#undef DECL64
#undef XOR64
#undef AND64
#undef OR64
#undef NOT64
#undef ROL64
#undef KECCAK_F_1600
#endif // AVX2

View File

@@ -64,26 +64,49 @@ extern "C"{
* <code>memcpy()</code>).
*/
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
__m256i buf[144*8]; /* first field, for alignment */
__m512i buf[144*8];
__m512i w[25];
size_t ptr, lim;
} keccak64_ctx_m512i __attribute__((aligned(128)));
typedef keccak64_ctx_m512i keccak256_8way_context;
typedef keccak64_ctx_m512i keccak512_8way_context;
void keccak256_8way_init(void *cc);
void keccak256_8way_update(void *cc, const void *data, size_t len);
void keccak256_8way_close(void *cc, void *dst);
void keccak512_8way_init(void *cc);
void keccak512_8way_update(void *cc, const void *data, size_t len);
void keccak512_8way_close(void *cc, void *dst);
void keccak512_8way_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
#endif
typedef struct {
__m256i buf[144*8];
__m256i w[25];
size_t ptr, lim;
// sph_u64 wide[25];
} keccak64_ctx_m256i;
} keccak64_ctx_m256i __attribute__((aligned(128)));
typedef keccak64_ctx_m256i keccak256_4way_context;
typedef keccak64_ctx_m256i keccak512_4way_context;
void keccak256_4way_init(void *cc);
void keccak256_4way(void *cc, const void *data, size_t len);
void keccak256_4way_update(void *cc, const void *data, size_t len);
void keccak256_4way_close(void *cc, void *dst);
#define keccak256_4way keccak256_4way_update
void keccak512_4way_init(void *cc);
void keccak512_4way(void *cc, const void *data, size_t len);
void keccak512_4way_update(void *cc, const void *data, size_t len);
void keccak512_4way_close(void *cc, void *dst);
void keccak512_4way_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
#define keccak512_4way keccak512_4way_update
#endif

324
algo/keccak/keccak-macros.c Normal file
View File

@@ -0,0 +1,324 @@
#ifdef TH_ELT
#undef TH_ELT
#endif
#define TH_ELT(t, c0, c1, c2, c3, c4, d0, d1, d2, d3, d4) do { \
DECL64(tt0); \
DECL64(tt1); \
DECL64(tt2); \
DECL64(tt3); \
XOR64(tt0, d0, d1); \
XOR64(tt1, d2, d3); \
XOR64(tt0, tt0, d4); \
XOR64(tt0, tt0, tt1); \
ROL64(tt0, tt0, 1); \
XOR64(tt2, c0, c1); \
XOR64(tt3, c2, c3); \
XOR64(tt0, tt0, c4); \
XOR64(tt2, tt2, tt3); \
XOR64(t, tt0, tt2); \
} while (0)
#ifdef THETA
#undef THETA
#endif
#define THETA(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \
b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \
b40, b41, b42, b43, b44) \
do { \
DECL64(t0); \
DECL64(t1); \
DECL64(t2); \
DECL64(t3); \
DECL64(t4); \
TH_ELT(t0, b40, b41, b42, b43, b44, b10, b11, b12, b13, b14); \
TH_ELT(t1, b00, b01, b02, b03, b04, b20, b21, b22, b23, b24); \
TH_ELT(t2, b10, b11, b12, b13, b14, b30, b31, b32, b33, b34); \
TH_ELT(t3, b20, b21, b22, b23, b24, b40, b41, b42, b43, b44); \
TH_ELT(t4, b30, b31, b32, b33, b34, b00, b01, b02, b03, b04); \
XOR64(b00, b00, t0); \
XOR64(b01, b01, t0); \
XOR64(b02, b02, t0); \
XOR64(b03, b03, t0); \
XOR64(b04, b04, t0); \
XOR64(b10, b10, t1); \
XOR64(b11, b11, t1); \
XOR64(b12, b12, t1); \
XOR64(b13, b13, t1); \
XOR64(b14, b14, t1); \
XOR64(b20, b20, t2); \
XOR64(b21, b21, t2); \
XOR64(b22, b22, t2); \
XOR64(b23, b23, t2); \
XOR64(b24, b24, t2); \
XOR64(b30, b30, t3); \
XOR64(b31, b31, t3); \
XOR64(b32, b32, t3); \
XOR64(b33, b33, t3); \
XOR64(b34, b34, t3); \
XOR64(b40, b40, t4); \
XOR64(b41, b41, t4); \
XOR64(b42, b42, t4); \
XOR64(b43, b43, t4); \
XOR64(b44, b44, t4); \
} while (0)
#ifdef RHO
#undef RHO
#endif
#define RHO(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \
b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \
b40, b41, b42, b43, b44) \
do { \
/* ROL64(b00, b00, 0); */ \
ROL64(b01, b01, 36); \
ROL64(b02, b02, 3); \
ROL64(b03, b03, 41); \
ROL64(b04, b04, 18); \
ROL64(b10, b10, 1); \
ROL64(b11, b11, 44); \
ROL64(b12, b12, 10); \
ROL64(b13, b13, 45); \
ROL64(b14, b14, 2); \
ROL64(b20, b20, 62); \
ROL64(b21, b21, 6); \
ROL64(b22, b22, 43); \
ROL64(b23, b23, 15); \
ROL64(b24, b24, 61); \
ROL64(b30, b30, 28); \
ROL64(b31, b31, 55); \
ROL64(b32, b32, 25); \
ROL64(b33, b33, 21); \
ROL64(b34, b34, 56); \
ROL64(b40, b40, 27); \
ROL64(b41, b41, 20); \
ROL64(b42, b42, 39); \
ROL64(b43, b43, 8); \
ROL64(b44, b44, 14); \
} while (0)
/*
* The KHI macro integrates the "lane complement" optimization. On input,
* some words are complemented:
* a00 a01 a02 a04 a13 a20 a21 a22 a30 a33 a34 a43
* On output, the following words are complemented:
* a04 a10 a20 a22 a23 a31
*
* The (implicit) permutation and the theta expansion will bring back
* the input mask for the next round.
*/
#ifdef KHI_XO
#undef KHI_XO
#endif
#define KHI_XO(d, a, b, c) do { \
DECL64(kt); \
OR64(kt, b, c); \
XOR64(d, a, kt); \
} while (0)
#ifdef KHI_XA
#undef KHI_XA
#endif
#define KHI_XA(d, a, b, c) do { \
DECL64(kt); \
AND64(kt, b, c); \
XOR64(d, a, kt); \
} while (0)
#ifdef KHI
#undef KHI
#endif
#define KHI(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \
b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \
b40, b41, b42, b43, b44) \
do { \
DECL64(c0); \
DECL64(c1); \
DECL64(c2); \
DECL64(c3); \
DECL64(c4); \
DECL64(bnn); \
NOT64(bnn, b20); \
KHI_XO(c0, b00, b10, b20); \
KHI_XO(c1, b10, bnn, b30); \
KHI_XA(c2, b20, b30, b40); \
KHI_XO(c3, b30, b40, b00); \
KHI_XA(c4, b40, b00, b10); \
MOV64(b00, c0); \
MOV64(b10, c1); \
MOV64(b20, c2); \
MOV64(b30, c3); \
MOV64(b40, c4); \
NOT64(bnn, b41); \
KHI_XO(c0, b01, b11, b21); \
KHI_XA(c1, b11, b21, b31); \
KHI_XO(c2, b21, b31, bnn); \
KHI_XO(c3, b31, b41, b01); \
KHI_XA(c4, b41, b01, b11); \
MOV64(b01, c0); \
MOV64(b11, c1); \
MOV64(b21, c2); \
MOV64(b31, c3); \
MOV64(b41, c4); \
NOT64(bnn, b32); \
KHI_XO(c0, b02, b12, b22); \
KHI_XA(c1, b12, b22, b32); \
KHI_XA(c2, b22, bnn, b42); \
KHI_XO(c3, bnn, b42, b02); \
KHI_XA(c4, b42, b02, b12); \
MOV64(b02, c0); \
MOV64(b12, c1); \
MOV64(b22, c2); \
MOV64(b32, c3); \
MOV64(b42, c4); \
NOT64(bnn, b33); \
KHI_XA(c0, b03, b13, b23); \
KHI_XO(c1, b13, b23, b33); \
KHI_XO(c2, b23, bnn, b43); \
KHI_XA(c3, bnn, b43, b03); \
KHI_XO(c4, b43, b03, b13); \
MOV64(b03, c0); \
MOV64(b13, c1); \
MOV64(b23, c2); \
MOV64(b33, c3); \
MOV64(b43, c4); \
NOT64(bnn, b14); \
KHI_XA(c0, b04, bnn, b24); \
KHI_XO(c1, bnn, b24, b34); \
KHI_XA(c2, b24, b34, b44); \
KHI_XO(c3, b34, b44, b04); \
KHI_XA(c4, b44, b04, b14); \
MOV64(b04, c0); \
MOV64(b14, c1); \
MOV64(b24, c2); \
MOV64(b34, c3); \
MOV64(b44, c4); \
} while (0)
#ifdef IOTA
#undef IOTA
#endif
#define IOTA(r) XOR64_IOTA(a00, a00, r)
#ifdef P0
#undef P1
#undef P2
#undef P3
#undef P4
#undef P5
#undef P6
#undef P7
#undef P8
#undef P9
#undef P10
#undef p11
#undef P12
#undef P13
#undef P14
#undef P15
#undef P16
#undef P17
#undef P18
#undef P19
#undef P20
#undef P21
#undef P22
#undef P23
#endif
#define P0 a00, a01, a02, a03, a04, a10, a11, a12, a13, a14, a20, a21, \
a22, a23, a24, a30, a31, a32, a33, a34, a40, a41, a42, a43, a44
#define P1 a00, a30, a10, a40, a20, a11, a41, a21, a01, a31, a22, a02, \
a32, a12, a42, a33, a13, a43, a23, a03, a44, a24, a04, a34, a14
#define P2 a00, a33, a11, a44, a22, a41, a24, a02, a30, a13, a32, a10, \
a43, a21, a04, a23, a01, a34, a12, a40, a14, a42, a20, a03, a31
#define P3 a00, a23, a41, a14, a32, a24, a42, a10, a33, a01, a43, a11, \
a34, a02, a20, a12, a30, a03, a21, a44, a31, a04, a22, a40, a13
#define P4 a00, a12, a24, a31, a43, a42, a04, a11, a23, a30, a34, a41, \
a03, a10, a22, a21, a33, a40, a02, a14, a13, a20, a32, a44, a01
#define P5 a00, a21, a42, a13, a34, a04, a20, a41, a12, a33, a03, a24, \
a40, a11, a32, a02, a23, a44, a10, a31, a01, a22, a43, a14, a30
#define P6 a00, a02, a04, a01, a03, a20, a22, a24, a21, a23, a40, a42, \
a44, a41, a43, a10, a12, a14, a11, a13, a30, a32, a34, a31, a33
#define P7 a00, a10, a20, a30, a40, a22, a32, a42, a02, a12, a44, a04, \
a14, a24, a34, a11, a21, a31, a41, a01, a33, a43, a03, a13, a23
#define P8 a00, a11, a22, a33, a44, a32, a43, a04, a10, a21, a14, a20, \
a31, a42, a03, a41, a02, a13, a24, a30, a23, a34, a40, a01, a12
#define P9 a00, a41, a32, a23, a14, a43, a34, a20, a11, a02, a31, a22, \
a13, a04, a40, a24, a10, a01, a42, a33, a12, a03, a44, a30, a21
#define P10 a00, a24, a43, a12, a31, a34, a03, a22, a41, a10, a13, a32, \
a01, a20, a44, a42, a11, a30, a04, a23, a21, a40, a14, a33, a02
#define P11 a00, a42, a34, a21, a13, a03, a40, a32, a24, a11, a01, a43, \
a30, a22, a14, a04, a41, a33, a20, a12, a02, a44, a31, a23, a10
#define P12 a00, a04, a03, a02, a01, a40, a44, a43, a42, a41, a30, a34, \
a33, a32, a31, a20, a24, a23, a22, a21, a10, a14, a13, a12, a11
#define P13 a00, a20, a40, a10, a30, a44, a14, a34, a04, a24, a33, a03, \
a23, a43, a13, a22, a42, a12, a32, a02, a11, a31, a01, a21, a41
#define P14 a00, a22, a44, a11, a33, a14, a31, a03, a20, a42, a23, a40, \
a12, a34, a01, a32, a04, a21, a43, a10, a41, a13, a30, a02, a24
#define P15 a00, a32, a14, a41, a23, a31, a13, a40, a22, a04, a12, a44, \
a21, a03, a30, a43, a20, a02, a34, a11, a24, a01, a33, a10, a42
#define P16 a00, a43, a31, a24, a12, a13, a01, a44, a32, a20, a21, a14, \
a02, a40, a33, a34, a22, a10, a03, a41, a42, a30, a23, a11, a04
#define P17 a00, a34, a13, a42, a21, a01, a30, a14, a43, a22, a02, a31, \
a10, a44, a23, a03, a32, a11, a40, a24, a04, a33, a12, a41, a20
#define P18 a00, a03, a01, a04, a02, a30, a33, a31, a34, a32, a10, a13, \
a11, a14, a12, a40, a43, a41, a44, a42, a20, a23, a21, a24, a22
#define P19 a00, a40, a30, a20, a10, a33, a23, a13, a03, a43, a11, a01, \
a41, a31, a21, a44, a34, a24, a14, a04, a22, a12, a02, a42, a32
#define P20 a00, a44, a33, a22, a11, a23, a12, a01, a40, a34, a41, a30, \
a24, a13, a02, a14, a03, a42, a31, a20, a32, a21, a10, a04, a43
#define P21 a00, a14, a23, a32, a41, a12, a21, a30, a44, a03, a24, a33, \
a42, a01, a10, a31, a40, a04, a13, a22, a43, a02, a11, a20, a34
#define P22 a00, a31, a12, a43, a24, a21, a02, a33, a14, a40, a42, a23, \
a04, a30, a11, a13, a44, a20, a01, a32, a34, a10, a41, a22, a03
#define P23 a00, a13, a21, a34, a42, a02, a10, a23, a31, a44, a04, a12, \
a20, a33, a41, a01, a14, a22, a30, a43, a03, a11, a24, a32, a40
#ifdef P8_TO_P0
#undef P8_TO_P0
#endif
#define P8_TO_P0 do { \
DECL64(t); \
MOV64(t, a01); \
MOV64(a01, a11); \
MOV64(a11, a43); \
MOV64(a43, t); \
MOV64(t, a02); \
MOV64(a02, a22); \
MOV64(a22, a31); \
MOV64(a31, t); \
MOV64(t, a03); \
MOV64(a03, a33); \
MOV64(a33, a24); \
MOV64(a24, t); \
MOV64(t, a04); \
MOV64(a04, a44); \
MOV64(a44, a12); \
MOV64(a12, t); \
MOV64(t, a10); \
MOV64(a10, a32); \
MOV64(a32, a13); \
MOV64(a13, t); \
MOV64(t, a14); \
MOV64(a14, a21); \
MOV64(a21, a20); \
MOV64(a20, t); \
MOV64(t, a23); \
MOV64(a23, a42); \
MOV64(a42, a40); \
MOV64(a40, t); \
MOV64(t, a30); \
MOV64(a30, a41); \
MOV64(a41, a34); \
MOV64(a34, t); \
} while (0)
#define KF_ELT(r, s, k) do { \
THETA LPAR P ## r RPAR; \
RHO LPAR P ## r RPAR; \
KHI LPAR P ## s RPAR; \
IOTA(k); \
} while (0)

2156
algo/lanehash/lane.c Normal file

File diff suppressed because it is too large Load Diff

50
algo/lanehash/lane.h Normal file
View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2008 Sebastiaan Indesteege
* <sebastiaan.indesteege@esat.kuleuven.be>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Optimised ANSI-C implementation of LANE
*/
#ifndef LANE_H
#define LANE_H
#include <string.h>
//#include "algo/sha/sha3-defs.h"
#include <stdint.h>
typedef unsigned char BitSequence;
typedef unsigned long long DataLength;
//typedef enum { SUCCESS = 0, FAIL = 1, BAD_HASHBITLEN = 2, BAD_DATABITLEN = 3 } HashReturn;
//typedef unsigned char u8;
//typedef unsigned int u32;
//typedef unsigned long long u64;
typedef struct {
int hashbitlen;
uint64_t ctr;
uint32_t h[16];
uint8_t buffer[128];
} hashState;
void laneInit (hashState *state, int hashbitlen);
void laneUpdate (hashState *state, const BitSequence *data, DataLength databitlen);
void laneFinal (hashState *state, BitSequence *hashval);
void laneHash (int hashbitlen, const BitSequence *data, DataLength databitlen, BitSequence *hashval);
#endif /* LANE_H */

View File

@@ -6,11 +6,591 @@
#include "simd-utils.h"
/* initial values of chaining variables */
static const uint32 IV[40] __attribute((aligned(64))) = {
0xdbf78465,0x4eaa6fb4,0x44b051e0,0x6d251e69,
0xdef610bb,0xee058139,0x90152df4,0x6e292011,
0xde099fa3,0x70eee9a0,0xd9d2f256,0xc3b44b95,
0x746cd581,0xcf1ccf0e,0x8fc944b3,0x5d9b0557,
0xad659c05,0x04016ce5,0x5dba5781,0xf7efc89d,
0x8b264ae7,0x24aa230a,0x666d1836,0x0306194f,
0x204b1f67,0xe571f7d7,0x36d79cce,0x858075d5,
0x7cde72ce,0x14bcb808,0x57e9e923,0x35870c6a,
0xaffb4363,0xc825b7c7,0x5ec41e22,0x6c68e9be,
0x03e86cea,0xb07224cc,0x0fc688f1,0xf5df3999
};
/* Round Constants */
static const uint32 CNS_INIT[128] __attribute((aligned(64))) = {
0xb213afa5,0xfc20d9d2,0xb6de10ed,0x303994a6,
0xe028c9bf,0xe25e72c1,0x01685f3d,0xe0337818,
0xc84ebe95,0x34552e25,0x70f47aae,0xc0e65299,
0x44756f91,0xe623bb72,0x05a17cf4,0x441ba90d,
0x4e608a22,0x7ad8818f,0x0707a3d4,0x6cc33a12,
0x7e8fce32,0x5c58a4a4,0xbd09caca,0x7f34d442,
0x56d858fe,0x8438764a,0x1c1e8f51,0xdc56983e,
0x956548be,0x1e38e2e7,0xf4272b28,0x9389217f,
0x343b138f,0xbb6de032,0x707a3d45,0x1e00108f,
0xfe191be2,0x78e38b9d,0x144ae5cc,0xe5a8bce6,
0xd0ec4e3d,0xedb780c8,0xaeb28562,0x7800423d,
0x3cb226e5,0x27586719,0xfaa7ae2b,0x5274baf4,
0x2ceb4882,0xd9847356,0xbaca1589,0x8f5b7882,
0x5944a28e,0x36eda57f,0x2e48f1c1,0x26889ba7,
0xb3ad2208,0xa2c78434,0x40a46f3e,0x96e1db12,
0xa1c4c355,0x703aace7,0xb923c704,0x9a226e9d,
0x00000000,0x00000000,0x00000000,0xf0d2e9e3,
0x00000000,0x00000000,0x00000000,0x5090d577,
0x00000000,0x00000000,0x00000000,0xac11d7fa,
0x00000000,0x00000000,0x00000000,0x2d1925ab,
0x00000000,0x00000000,0x00000000,0x1bcb66f2,
0x00000000,0x00000000,0x00000000,0xb46496ac,
0x00000000,0x00000000,0x00000000,0x6f2d9bc9,
0x00000000,0x00000000,0x00000000,0xd1925ab0,
0x00000000,0x00000000,0x00000000,0x78602649,
0x00000000,0x00000000,0x00000000,0x29131ab6,
0x00000000,0x00000000,0x00000000,0x8edae952,
0x00000000,0x00000000,0x00000000,0x0fc053c3,
0x00000000,0x00000000,0x00000000,0x3b6ba548,
0x00000000,0x00000000,0x00000000,0x3f014f0c,
0x00000000,0x00000000,0x00000000,0xedae9520,
0x00000000,0x00000000,0x00000000,0xfc053c31
};
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define cns4w(i) m512_const1_128( ( (__m128i*)CNS_INIT)[i] )
#define ADD_CONSTANT4W(a,b,c0,c1)\
a = _mm512_xor_si512(a,c0);\
b = _mm512_xor_si512(b,c1);
#define MULT24W( a0, a1, mask ) \
do { \
__m512i b = _mm512_xor_si512( a0, \
_mm512_shuffle_epi32( _mm512_and_si512(a1,mask), 16 ) ); \
a0 = _mm512_or_si512( _mm512_bsrli_epi128(b,4), _mm512_bslli_epi128(a1,12) );\
a1 = _mm512_or_si512( _mm512_bsrli_epi128(a1,4), _mm512_bslli_epi128(b,12) );\
} while(0)
// confirm pointer arithmetic
// ok but use array indexes
#define STEP_PART4W(x,c0,c1,t)\
SUBCRUMB4W(*x,*(x+1),*(x+2),*(x+3),*t);\
SUBCRUMB4W(*(x+5),*(x+6),*(x+7),*(x+4),*t);\
MIXWORD4W(*x,*(x+4),*t,*(t+1));\
MIXWORD4W(*(x+1),*(x+5),*t,*(t+1));\
MIXWORD4W(*(x+2),*(x+6),*t,*(t+1));\
MIXWORD4W(*(x+3),*(x+7),*t,*(t+1));\
ADD_CONSTANT4W(*x, *(x+4), c0, c1);
#define SUBCRUMB4W(a0,a1,a2,a3,t)\
t = _mm512_load_si512(&a0);\
a0 = _mm512_or_si512(a0,a1);\
a2 = _mm512_xor_si512(a2,a3);\
a1 = _mm512_andnot_si512(a1, m512_neg1 );\
a0 = _mm512_xor_si512(a0,a3);\
a3 = _mm512_and_si512(a3,t);\
a1 = _mm512_xor_si512(a1,a3);\
a3 = _mm512_xor_si512(a3,a2);\
a2 = _mm512_and_si512(a2,a0);\
a0 = _mm512_andnot_si512(a0, m512_neg1 );\
a2 = _mm512_xor_si512(a2,a1);\
a1 = _mm512_or_si512(a1,a3);\
t = _mm512_xor_si512(t,a1);\
a3 = _mm512_xor_si512(a3,a2);\
a2 = _mm512_and_si512(a2,a1);\
a1 = _mm512_xor_si512(a1,a0);\
a0 = _mm512_load_si512(&t);
#define MIXWORD4W(a,b,t1,t2)\
b = _mm512_xor_si512(a,b);\
t1 = _mm512_slli_epi32(a,2);\
t2 = _mm512_srli_epi32(a,30);\
a = _mm512_or_si512(t1,t2);\
a = _mm512_xor_si512(a,b);\
t1 = _mm512_slli_epi32(b,14);\
t2 = _mm512_srli_epi32(b,18);\
b = _mm512_or_si512(t1,t2);\
b = _mm512_xor_si512(a,b);\
t1 = _mm512_slli_epi32(a,10);\
t2 = _mm512_srli_epi32(a,22);\
a = _mm512_or_si512(t1,t2);\
a = _mm512_xor_si512(a,b);\
t1 = _mm512_slli_epi32(b,1);\
t2 = _mm512_srli_epi32(b,31);\
b = _mm512_or_si512(t1,t2);
#define STEP_PART24W(a0,a1,t0,t1,c0,c1,tmp0,tmp1)\
a1 = _mm512_shuffle_epi32(a1,147);\
t0 = _mm512_load_si512(&a1);\
a1 = _mm512_unpacklo_epi32(a1,a0);\
t0 = _mm512_unpackhi_epi32(t0,a0);\
t1 = _mm512_shuffle_epi32(t0,78);\
a0 = _mm512_shuffle_epi32(a1,78);\
SUBCRUMB4W(t1,t0,a0,a1,tmp0);\
t0 = _mm512_unpacklo_epi32(t0,t1);\
a1 = _mm512_unpacklo_epi32(a1,a0);\
a0 = _mm512_load_si512(&a1);\
a0 = _mm512_unpackhi_epi64(a0,t0);\
a1 = _mm512_unpacklo_epi64(a1,t0);\
a1 = _mm512_shuffle_epi32(a1,57);\
MIXWORD4W(a0,a1,tmp0,tmp1);\
ADD_CONSTANT4W(a0,a1,c0,c1);
#define NMLTOM7684W(r0,r1,r2,s0,s1,s2,s3,p0,p1,p2,q0,q1,q2,q3)\
s2 = _mm512_load_si512(&r1);\
q2 = _mm512_load_si512(&p1);\
r2 = _mm512_shuffle_epi32(r2,216);\
p2 = _mm512_shuffle_epi32(p2,216);\
r1 = _mm512_unpacklo_epi32(r1,r0);\
p1 = _mm512_unpacklo_epi32(p1,p0);\
s2 = _mm512_unpackhi_epi32(s2,r0);\
q2 = _mm512_unpackhi_epi32(q2,p0);\
s0 = _mm512_load_si512(&r2);\
q0 = _mm512_load_si512(&p2);\
r2 = _mm512_unpacklo_epi64(r2,r1);\
p2 = _mm512_unpacklo_epi64(p2,p1);\
s1 = _mm512_load_si512(&s0);\
q1 = _mm512_load_si512(&q0);\
s0 = _mm512_unpackhi_epi64(s0,r1);\
q0 = _mm512_unpackhi_epi64(q0,p1);\
r2 = _mm512_shuffle_epi32(r2,225);\
p2 = _mm512_shuffle_epi32(p2,225);\
r0 = _mm512_load_si512(&s1);\
p0 = _mm512_load_si512(&q1);\
s0 = _mm512_shuffle_epi32(s0,225);\
q0 = _mm512_shuffle_epi32(q0,225);\
s1 = _mm512_unpacklo_epi64(s1,s2);\
q1 = _mm512_unpacklo_epi64(q1,q2);\
r0 = _mm512_unpackhi_epi64(r0,s2);\
p0 = _mm512_unpackhi_epi64(p0,q2);\
s2 = _mm512_load_si512(&r0);\
q2 = _mm512_load_si512(&p0);\
s3 = _mm512_load_si512(&r2);\
q3 = _mm512_load_si512(&p2);
#define MIXTON7684W(r0,r1,r2,r3,s0,s1,s2,p0,p1,p2,p3,q0,q1,q2)\
s0 = _mm512_load_si512(&r0);\
q0 = _mm512_load_si512(&p0);\
s1 = _mm512_load_si512(&r2);\
q1 = _mm512_load_si512(&p2);\
r0 = _mm512_unpackhi_epi32(r0,r1);\
p0 = _mm512_unpackhi_epi32(p0,p1);\
r2 = _mm512_unpackhi_epi32(r2,r3);\
p2 = _mm512_unpackhi_epi32(p2,p3);\
s0 = _mm512_unpacklo_epi32(s0,r1);\
q0 = _mm512_unpacklo_epi32(q0,p1);\
s1 = _mm512_unpacklo_epi32(s1,r3);\
q1 = _mm512_unpacklo_epi32(q1,p3);\
r1 = _mm512_load_si512(&r0);\
p1 = _mm512_load_si512(&p0);\
r0 = _mm512_unpackhi_epi64(r0,r2);\
p0 = _mm512_unpackhi_epi64(p0,p2);\
s0 = _mm512_unpackhi_epi64(s0,s1);\
q0 = _mm512_unpackhi_epi64(q0,q1);\
r1 = _mm512_unpacklo_epi64(r1,r2);\
p1 = _mm512_unpacklo_epi64(p1,p2);\
s2 = _mm512_load_si512(&r0);\
q2 = _mm512_load_si512(&p0);\
s1 = _mm512_load_si512(&r1);\
q1 = _mm512_load_si512(&p1);
#define NMLTOM10244W(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3)\
s1 = _mm512_load_si512(&r3);\
q1 = _mm512_load_si512(&p3);\
s3 = _mm512_load_si512(&r3);\
q3 = _mm512_load_si512(&p3);\
s1 = _mm512_unpackhi_epi32(s1,r2);\
q1 = _mm512_unpackhi_epi32(q1,p2);\
s3 = _mm512_unpacklo_epi32(s3,r2);\
q3 = _mm512_unpacklo_epi32(q3,p2);\
s0 = _mm512_load_si512(&s1);\
q0 = _mm512_load_si512(&q1);\
s2 = _mm512_load_si512(&s3);\
q2 = _mm512_load_si512(&q3);\
r3 = _mm512_load_si512(&r1);\
p3 = _mm512_load_si512(&p1);\
r1 = _mm512_unpacklo_epi32(r1,r0);\
p1 = _mm512_unpacklo_epi32(p1,p0);\
r3 = _mm512_unpackhi_epi32(r3,r0);\
p3 = _mm512_unpackhi_epi32(p3,p0);\
s0 = _mm512_unpackhi_epi64(s0,r3);\
q0 = _mm512_unpackhi_epi64(q0,p3);\
s1 = _mm512_unpacklo_epi64(s1,r3);\
q1 = _mm512_unpacklo_epi64(q1,p3);\
s2 = _mm512_unpackhi_epi64(s2,r1);\
q2 = _mm512_unpackhi_epi64(q2,p1);\
s3 = _mm512_unpacklo_epi64(s3,r1);\
q3 = _mm512_unpacklo_epi64(q3,p1);
#define MIXTON10244W(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3)\
NMLTOM10244W(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3);
void rnd512_4way( luffa_4way_context *state, __m512i *msg )
{
__m512i t0, t1;
__m512i *chainv = state->chainv;
__m512i msg0, msg1;
__m512i tmp[2];
__m512i x[8];
const __m512i MASK = m512_const2_64( 0, 0x00000000ffffffff );
t0 = chainv[0];
t1 = chainv[1];
t0 = _mm512_xor_si512( t0, chainv[2] );
t1 = _mm512_xor_si512( t1, chainv[3] );
t0 = _mm512_xor_si512( t0, chainv[4] );
t1 = _mm512_xor_si512( t1, chainv[5] );
t0 = _mm512_xor_si512( t0, chainv[6] );
t1 = _mm512_xor_si512( t1, chainv[7] );
t0 = _mm512_xor_si512( t0, chainv[8] );
t1 = _mm512_xor_si512( t1, chainv[9] );
MULT24W( t0, t1, MASK );
msg0 = _mm512_shuffle_epi32( msg[0], 27 );
msg1 = _mm512_shuffle_epi32( msg[1], 27 );
chainv[0] = _mm512_xor_si512( chainv[0], t0 );
chainv[1] = _mm512_xor_si512( chainv[1], t1 );
chainv[2] = _mm512_xor_si512( chainv[2], t0 );
chainv[3] = _mm512_xor_si512( chainv[3], t1 );
chainv[4] = _mm512_xor_si512( chainv[4], t0 );
chainv[5] = _mm512_xor_si512( chainv[5], t1 );
chainv[6] = _mm512_xor_si512( chainv[6], t0 );
chainv[7] = _mm512_xor_si512( chainv[7], t1 );
chainv[8] = _mm512_xor_si512( chainv[8], t0 );
chainv[9] = _mm512_xor_si512( chainv[9], t1 );
t0 = chainv[0];
t1 = chainv[1];
MULT24W( chainv[0], chainv[1], MASK );
chainv[0] = _mm512_xor_si512( chainv[0], chainv[2] );
chainv[1] = _mm512_xor_si512( chainv[1], chainv[3] );
MULT24W( chainv[2], chainv[3], MASK );
chainv[2] = _mm512_xor_si512(chainv[2], chainv[4]);
chainv[3] = _mm512_xor_si512(chainv[3], chainv[5]);
MULT24W( chainv[4], chainv[5], MASK );
chainv[4] = _mm512_xor_si512(chainv[4], chainv[6]);
chainv[5] = _mm512_xor_si512(chainv[5], chainv[7]);
MULT24W( chainv[6], chainv[7], MASK );
chainv[6] = _mm512_xor_si512(chainv[6], chainv[8]);
chainv[7] = _mm512_xor_si512(chainv[7], chainv[9]);
MULT24W( chainv[8], chainv[9], MASK );
chainv[8] = _mm512_xor_si512( chainv[8], t0 );
chainv[9] = _mm512_xor_si512( chainv[9], t1 );
t0 = chainv[8];
t1 = chainv[9];
MULT24W( chainv[8], chainv[9], MASK );
chainv[8] = _mm512_xor_si512( chainv[8], chainv[6] );
chainv[9] = _mm512_xor_si512( chainv[9], chainv[7] );
MULT24W( chainv[6], chainv[7], MASK );
chainv[6] = _mm512_xor_si512( chainv[6], chainv[4] );
chainv[7] = _mm512_xor_si512( chainv[7], chainv[5] );
MULT24W( chainv[4], chainv[5], MASK );
chainv[4] = _mm512_xor_si512( chainv[4], chainv[2] );
chainv[5] = _mm512_xor_si512( chainv[5], chainv[3] );
MULT24W( chainv[2], chainv[3], MASK );
chainv[2] = _mm512_xor_si512( chainv[2], chainv[0] );
chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] );
MULT24W( chainv[0], chainv[1], MASK );
chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 );
chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 );
MULT24W( msg0, msg1, MASK );
chainv[2] = _mm512_xor_si512( chainv[2], msg0 );
chainv[3] = _mm512_xor_si512( chainv[3], msg1 );
MULT24W( msg0, msg1, MASK );
chainv[4] = _mm512_xor_si512( chainv[4], msg0 );
chainv[5] = _mm512_xor_si512( chainv[5], msg1 );
MULT24W( chainv[2], chainv[3], MASK );
chainv[2] = _mm512_xor_si512( chainv[2], chainv[0] );
chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] );
MULT24W( chainv[0], chainv[1], MASK );
chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 );
chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 );
MULT24W( msg0, msg1, MASK );
chainv[2] = _mm512_xor_si512( chainv[2], msg0 );
chainv[3] = _mm512_xor_si512( chainv[3], msg1 );
MULT24W( msg0, msg1, MASK );
chainv[4] = _mm512_xor_si512( chainv[4], msg0 );
chainv[5] = _mm512_xor_si512( chainv[5], msg1 );
MULT24W( msg0, msg1, MASK );
chainv[6] = _mm512_xor_si512( chainv[6], msg0 );
chainv[7] = _mm512_xor_si512( chainv[7], msg1 );
MULT24W( msg0, msg1, MASK );
chainv[8] = _mm512_xor_si512( chainv[8], msg0 );
chainv[9] = _mm512_xor_si512( chainv[9], msg1 );
MULT24W( msg0, msg1, MASK );
// replace with ror
chainv[3] = _mm512_or_si512( _mm512_slli_epi32( chainv[3], 1 ),
_mm512_srli_epi32( chainv[3], 31 ) );
chainv[5] = _mm512_or_si512( _mm512_slli_epi32( chainv[5], 2 ),
_mm512_srli_epi32( chainv[5], 30 ) );
chainv[7] = _mm512_or_si512( _mm512_slli_epi32( chainv[7], 3 ),
_mm512_srli_epi32( chainv[7], 29 ) );
chainv[9] = _mm512_or_si512( _mm512_slli_epi32( chainv[9], 4 ),
_mm512_srli_epi32( chainv[9], 28 ) );
NMLTOM10244W( chainv[0], chainv[2], chainv[4], chainv[6],
x[0], x[1], x[2], x[3],
chainv[1],chainv[3],chainv[5],chainv[7],
x[4], x[5], x[6], x[7] );
STEP_PART4W( &x[0], cns4w( 0), cns4w( 1), &tmp[0] );
STEP_PART4W( &x[0], cns4w( 2), cns4w( 3), &tmp[0] );
STEP_PART4W( &x[0], cns4w( 4), cns4w( 5), &tmp[0] );
STEP_PART4W( &x[0], cns4w( 6), cns4w( 7), &tmp[0] );
STEP_PART4W( &x[0], cns4w( 8), cns4w( 9), &tmp[0] );
STEP_PART4W( &x[0], cns4w(10), cns4w(11), &tmp[0] );
STEP_PART4W( &x[0], cns4w(12), cns4w(13), &tmp[0] );
STEP_PART4W( &x[0], cns4w(14), cns4w(15), &tmp[0] );
MIXTON10244W( x[0], x[1], x[2], x[3],
chainv[0], chainv[2], chainv[4],chainv[6],
x[4], x[5], x[6], x[7],
chainv[1],chainv[3],chainv[5],chainv[7]);
/* Process last 256-bit block */
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(16), cns4w(17),
tmp[0], tmp[1] );
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(18), cns4w(19),
tmp[0], tmp[1] );
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(20), cns4w(21),
tmp[0], tmp[1] );
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(22), cns4w(23),
tmp[0], tmp[1] );
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(24), cns4w(25),
tmp[0], tmp[1] );
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(26), cns4w(27),
tmp[0], tmp[1] );
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(28), cns4w(29),
tmp[0], tmp[1] );
STEP_PART24W( chainv[8], chainv[9], t0, t1, cns4w(30), cns4w(31),
tmp[0], tmp[1] );
}
void finalization512_4way( luffa_4way_context *state, uint32 *b )
{
uint32 hash[8*4] __attribute((aligned(128)));
__m512i* chainv = state->chainv;
__m512i t[2];
__m512i zero[2];
zero[0] = zero[1] = m512_zero;
const __m512i shuff_bswap32 = m512_const_64(
0x3c3d3e3f38393a3b, 0x3435363730313233,
0x2c2d2e2f28292a2b, 0x2425262720212223,
0x1c1d1e1f18191a1b, 0x1415161710111213,
0x0c0d0e0f08090a0b, 0x0405060700010203 );
/*---- blank round with m=0 ----*/
rnd512_4way( state, zero );
t[0] = chainv[0];
t[1] = chainv[1];
t[0] = _mm512_xor_si512( t[0], chainv[2] );
t[1] = _mm512_xor_si512( t[1], chainv[3] );
t[0] = _mm512_xor_si512( t[0], chainv[4] );
t[1] = _mm512_xor_si512( t[1], chainv[5] );
t[0] = _mm512_xor_si512( t[0], chainv[6] );
t[1] = _mm512_xor_si512( t[1], chainv[7] );
t[0] = _mm512_xor_si512( t[0], chainv[8] );
t[1] = _mm512_xor_si512( t[1], chainv[9] );
t[0] = _mm512_shuffle_epi32( t[0], 27 );
t[1] = _mm512_shuffle_epi32( t[1], 27 );
_mm512_store_si512( (__m512i*)&hash[0], t[0] );
_mm512_store_si512( (__m512i*)&hash[8], t[1] );
casti_m512i( b, 0 ) = _mm512_shuffle_epi8(
casti_m512i( hash, 0 ), shuff_bswap32 );
casti_m512i( b, 1 ) = _mm512_shuffle_epi8(
casti_m512i( hash, 1 ), shuff_bswap32 );
rnd512_4way( state, zero );
t[0] = chainv[0];
t[1] = chainv[1];
t[0] = _mm512_xor_si512( t[0], chainv[2] );
t[1] = _mm512_xor_si512( t[1], chainv[3] );
t[0] = _mm512_xor_si512( t[0], chainv[4] );
t[1] = _mm512_xor_si512( t[1], chainv[5] );
t[0] = _mm512_xor_si512( t[0], chainv[6] );
t[1] = _mm512_xor_si512( t[1], chainv[7] );
t[0] = _mm512_xor_si512( t[0], chainv[8] );
t[1] = _mm512_xor_si512( t[1], chainv[9] );
t[0] = _mm512_shuffle_epi32( t[0], 27 );
t[1] = _mm512_shuffle_epi32( t[1], 27 );
_mm512_store_si512( (__m512i*)&hash[0], t[0] );
_mm512_store_si512( (__m512i*)&hash[8], t[1] );
casti_m512i( b, 2 ) = _mm512_shuffle_epi8(
casti_m512i( hash, 0 ), shuff_bswap32 );
casti_m512i( b, 3 ) = _mm512_shuffle_epi8(
casti_m512i( hash, 1 ), shuff_bswap32 );
}
int luffa_4way_init( luffa_4way_context *state, int hashbitlen )
{
state->hashbitlen = hashbitlen;
__m128i *iv = (__m128i*)IV;
state->chainv[0] = m512_const1_128( iv[0] );
state->chainv[1] = m512_const1_128( iv[1] );
state->chainv[2] = m512_const1_128( iv[2] );
state->chainv[3] = m512_const1_128( iv[3] );
state->chainv[4] = m512_const1_128( iv[4] );
state->chainv[5] = m512_const1_128( iv[5] );
state->chainv[6] = m512_const1_128( iv[6] );
state->chainv[7] = m512_const1_128( iv[7] );
state->chainv[8] = m512_const1_128( iv[8] );
state->chainv[9] = m512_const1_128( iv[9] );
((__m512i*)state->buffer)[0] = m512_zero;
((__m512i*)state->buffer)[1] = m512_zero;
return 0;
}
// Do not call luffa_update_close after having called luffa_update.
// Once luffa_update has been called only call luffa_update or luffa_close.
int luffa_4way_update( luffa_4way_context *state, const void *data,
size_t len )
{
__m512i *vdata = (__m512i*)data;
__m512i *buffer = (__m512i*)state->buffer;
__m512i msg[2];
int i;
int blocks = (int)len >> 5;
const __m512i shuff_bswap32 = m512_const_64(
0x3c3d3e3f38393a3b, 0x3435363730313233,
0x2c2d2e2f28292a2b, 0x2425262720212223,
0x1c1d1e1f18191a1b, 0x1415161710111213,
0x0c0d0e0f08090a0b, 0x0405060700010203 );
state-> rembytes = (int)len & 0x1F;
// full blocks
for ( i = 0; i < blocks; i++, vdata+=2 )
{
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = _mm512_shuffle_epi8( vdata[ 1 ], shuff_bswap32 );
rnd512_4way( state, msg );
}
// 16 byte partial block exists for 80 byte len
// store in buffer for transform in final for midstate to work
if ( state->rembytes )
{
// remaining data bytes
buffer[0] = _mm512_shuffle_epi8( vdata[0], shuff_bswap32 );
buffer[1] = m512_const2_64( 0, 0x0000000080000000 );
}
return 0;
}
int luffa_4way_close( luffa_4way_context *state, void *hashval )
{
__m512i *buffer = (__m512i*)state->buffer;
__m512i msg[2];
// transform pad block
if ( state->rembytes )
// not empty, data is in buffer
rnd512_4way( state, buffer );
else
{ // empty pad block, constant data
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
msg[1] = m512_zero;
rnd512_4way( state, msg );
}
finalization512_4way( state, (uint32*)hashval );
if ( state->hashbitlen > 512 )
finalization512_4way( state, (uint32*)( hashval+32 ) );
return 0;
}
int luffa_4way_update_close( luffa_4way_context *state,
void *output, const void *data, size_t inlen )
{
// Optimized for integrals of 16 bytes, good for 64 and 80 byte len
const __m512i *vdata = (__m512i*)data;
__m512i msg[2];
int i;
const int blocks = (int)( inlen >> 5 );
const __m512i shuff_bswap32 = m512_const_64(
0x3c3d3e3f38393a3b, 0x3435363730313233,
0x2c2d2e2f28292a2b, 0x2425262720212223,
0x1c1d1e1f18191a1b, 0x1415161710111213,
0x0c0d0e0f08090a0b, 0x0405060700010203 );
state->rembytes = inlen & 0x1F;
// full blocks
for ( i = 0; i < blocks; i++, vdata+=2 )
{
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = _mm512_shuffle_epi8( vdata[ 1 ], shuff_bswap32 );
rnd512_4way( state, msg );
}
// 16 byte partial block exists for 80 byte len
if ( state->rembytes )
{
// padding of partial block
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m512_const2_64( 0, 0x0000000080000000 );
rnd512_4way( state, msg );
}
else
{
// empty pad block
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
msg[1] = m512_zero;
rnd512_4way( state, msg );
}
finalization512_4way( state, (uint32*)output );
if ( state->hashbitlen > 512 )
finalization512_4way( state, (uint32*)( output+32 ) );
return 0;
}
#endif // AVX512
#define cns(i) m256_const1_128( ( (__m128i*)CNS_INIT)[i] )
#define ADD_CONSTANT(a,b,c0,c1)\
a = _mm256_xor_si256(a,c0);\
b = _mm256_xor_si256(b,c1);\
b = _mm256_xor_si256(b,c1);
#define MULT2( a0, a1, mask ) \
do { \
@@ -115,7 +695,7 @@ do { \
s2 = _mm256_load_si256(&r0);\
q2 = _mm256_load_si256(&p0);\
s3 = _mm256_load_si256(&r2);\
q3 = _mm256_load_si256(&p2);\
q3 = _mm256_load_si256(&p2);
#define MIXTON768(r0,r1,r2,r3,s0,s1,s2,p0,p1,p2,p3,q0,q1,q2)\
s0 = _mm256_load_si256(&r0);\
@@ -174,57 +754,6 @@ do { \
#define MIXTON1024(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3)\
NMLTOM1024(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3);
/* initial values of chaining variables */
static const uint32 IV[40] __attribute((aligned(32))) = {
0xdbf78465,0x4eaa6fb4,0x44b051e0,0x6d251e69,
0xdef610bb,0xee058139,0x90152df4,0x6e292011,
0xde099fa3,0x70eee9a0,0xd9d2f256,0xc3b44b95,
0x746cd581,0xcf1ccf0e,0x8fc944b3,0x5d9b0557,
0xad659c05,0x04016ce5,0x5dba5781,0xf7efc89d,
0x8b264ae7,0x24aa230a,0x666d1836,0x0306194f,
0x204b1f67,0xe571f7d7,0x36d79cce,0x858075d5,
0x7cde72ce,0x14bcb808,0x57e9e923,0x35870c6a,
0xaffb4363,0xc825b7c7,0x5ec41e22,0x6c68e9be,
0x03e86cea,0xb07224cc,0x0fc688f1,0xf5df3999
};
/* Round Constants */
static const uint32 CNS_INIT[128] __attribute((aligned(32))) = {
0xb213afa5,0xfc20d9d2,0xb6de10ed,0x303994a6,
0xe028c9bf,0xe25e72c1,0x01685f3d,0xe0337818,
0xc84ebe95,0x34552e25,0x70f47aae,0xc0e65299,
0x44756f91,0xe623bb72,0x05a17cf4,0x441ba90d,
0x4e608a22,0x7ad8818f,0x0707a3d4,0x6cc33a12,
0x7e8fce32,0x5c58a4a4,0xbd09caca,0x7f34d442,
0x56d858fe,0x8438764a,0x1c1e8f51,0xdc56983e,
0x956548be,0x1e38e2e7,0xf4272b28,0x9389217f,
0x343b138f,0xbb6de032,0x707a3d45,0x1e00108f,
0xfe191be2,0x78e38b9d,0x144ae5cc,0xe5a8bce6,
0xd0ec4e3d,0xedb780c8,0xaeb28562,0x7800423d,
0x3cb226e5,0x27586719,0xfaa7ae2b,0x5274baf4,
0x2ceb4882,0xd9847356,0xbaca1589,0x8f5b7882,
0x5944a28e,0x36eda57f,0x2e48f1c1,0x26889ba7,
0xb3ad2208,0xa2c78434,0x40a46f3e,0x96e1db12,
0xa1c4c355,0x703aace7,0xb923c704,0x9a226e9d,
0x00000000,0x00000000,0x00000000,0xf0d2e9e3,
0x00000000,0x00000000,0x00000000,0x5090d577,
0x00000000,0x00000000,0x00000000,0xac11d7fa,
0x00000000,0x00000000,0x00000000,0x2d1925ab,
0x00000000,0x00000000,0x00000000,0x1bcb66f2,
0x00000000,0x00000000,0x00000000,0xb46496ac,
0x00000000,0x00000000,0x00000000,0x6f2d9bc9,
0x00000000,0x00000000,0x00000000,0xd1925ab0,
0x00000000,0x00000000,0x00000000,0x78602649,
0x00000000,0x00000000,0x00000000,0x29131ab6,
0x00000000,0x00000000,0x00000000,0x8edae952,
0x00000000,0x00000000,0x00000000,0x0fc053c3,
0x00000000,0x00000000,0x00000000,0x3b6ba548,
0x00000000,0x00000000,0x00000000,0x3f014f0c,
0x00000000,0x00000000,0x00000000,0xedae9520,
0x00000000,0x00000000,0x00000000,0xfc053c31
};
/***************************************************/
/* Round function */
@@ -385,12 +914,14 @@ void rnd512_2way( luffa_2way_context *state, __m256i *msg )
void finalization512_2way( luffa_2way_context *state, uint32 *b )
{
uint32 hash[8] __attribute((aligned(64)));
uint32 hash[8*2] __attribute((aligned(64)));
__m256i* chainv = state->chainv;
__m256i t[2];
__m256i zero[2];
zero[0] = zero[1] = m256_zero;
const __m256i shuff_bswap32 = m256_const2_64( 0x0c0d0e0f08090a0b,
const __m256i shuff_bswap32 = m256_const_64( 0x1c1d1e1f18191a1b,
0x1415161710111213,
0x0c0d0e0f08090a0b,
0x0405060700010203 );
/*---- blank round with m=0 ----*/
rnd512_2way( state, zero );
@@ -475,7 +1006,9 @@ int luffa_2way_update( luffa_2way_context *state, const void *data,
__m256i msg[2];
int i;
int blocks = (int)len >> 5;
const __m256i shuff_bswap32 = m256_const2_64( 0x0c0d0e0f08090a0b,
const __m256i shuff_bswap32 = m256_const_64( 0x1c1d1e1f18191a1b,
0x1415161710111213,
0x0c0d0e0f08090a0b,
0x0405060700010203 );
state-> rembytes = (int)len & 0x1F;
@@ -528,7 +1061,9 @@ int luffa_2way_update_close( luffa_2way_context *state,
__m256i msg[2];
int i;
const int blocks = (int)( inlen >> 5 );
const __m256i shuff_bswap32 = m256_const2_64( 0x0c0d0e0f08090a0b,
const __m256i shuff_bswap32 = m256_const_64( 0x1c1d1e1f18191a1b,
0x1415161710111213,
0x0c0d0e0f08090a0b,
0x0405060700010203 );
state->rembytes = inlen & 0x1F;

View File

@@ -0,0 +1,573 @@
#include <string.h>
#include <immintrin.h>
#include "luffa-hash-2way.h"
#if defined(__AVX2__)
#include "simd-utils.h"
#define cns(i) m256_const1_128( ( (__m128i*)CNS_INIT)[i] )
#define ADD_CONSTANT(a,b,c0,c1)\
a = _mm256_xor_si256(a,c0);\
b = _mm256_xor_si256(b,c1);\
#define MULT2( a0, a1, mask ) \
do { \
__m256i b = _mm256_xor_si256( a0, \
_mm256_shuffle_epi32( _mm256_and_si256(a1,mask), 16 ) ); \
a0 = _mm256_or_si256( _mm256_srli_si256(b,4), _mm256_slli_si256(a1,12) ); \
a1 = _mm256_or_si256( _mm256_srli_si256(a1,4), _mm256_slli_si256(b,12) ); \
} while(0)
// confirm pointer arithmetic
// ok but use array indexes
#define STEP_PART(x,c0,c1,t)\
SUBCRUMB(*x,*(x+1),*(x+2),*(x+3),*t);\
SUBCRUMB(*(x+5),*(x+6),*(x+7),*(x+4),*t);\
MIXWORD(*x,*(x+4),*t,*(t+1));\
MIXWORD(*(x+1),*(x+5),*t,*(t+1));\
MIXWORD(*(x+2),*(x+6),*t,*(t+1));\
MIXWORD(*(x+3),*(x+7),*t,*(t+1));\
ADD_CONSTANT(*x, *(x+4), c0, c1);
#define SUBCRUMB(a0,a1,a2,a3,t)\
t = _mm256_load_si256(&a0);\
a0 = _mm256_or_si256(a0,a1);\
a2 = _mm256_xor_si256(a2,a3);\
a1 = _mm256_andnot_si256(a1, m256_neg1 );\
a0 = _mm256_xor_si256(a0,a3);\
a3 = _mm256_and_si256(a3,t);\
a1 = _mm256_xor_si256(a1,a3);\
a3 = _mm256_xor_si256(a3,a2);\
a2 = _mm256_and_si256(a2,a0);\
a0 = _mm256_andnot_si256(a0, m256_neg1 );\
a2 = _mm256_xor_si256(a2,a1);\
a1 = _mm256_or_si256(a1,a3);\
t = _mm256_xor_si256(t,a1);\
a3 = _mm256_xor_si256(a3,a2);\
a2 = _mm256_and_si256(a2,a1);\
a1 = _mm256_xor_si256(a1,a0);\
a0 = _mm256_load_si256(&t);\
#define MIXWORD(a,b,t1,t2)\
b = _mm256_xor_si256(a,b);\
t1 = _mm256_slli_epi32(a,2);\
t2 = _mm256_srli_epi32(a,30);\
a = _mm256_or_si256(t1,t2);\
a = _mm256_xor_si256(a,b);\
t1 = _mm256_slli_epi32(b,14);\
t2 = _mm256_srli_epi32(b,18);\
b = _mm256_or_si256(t1,t2);\
b = _mm256_xor_si256(a,b);\
t1 = _mm256_slli_epi32(a,10);\
t2 = _mm256_srli_epi32(a,22);\
a = _mm256_or_si256(t1,t2);\
a = _mm256_xor_si256(a,b);\
t1 = _mm256_slli_epi32(b,1);\
t2 = _mm256_srli_epi32(b,31);\
b = _mm256_or_si256(t1,t2);
#define STEP_PART2(a0,a1,t0,t1,c0,c1,tmp0,tmp1)\
a1 = _mm256_shuffle_epi32(a1,147);\
t0 = _mm256_load_si256(&a1);\
a1 = _mm256_unpacklo_epi32(a1,a0);\
t0 = _mm256_unpackhi_epi32(t0,a0);\
t1 = _mm256_shuffle_epi32(t0,78);\
a0 = _mm256_shuffle_epi32(a1,78);\
SUBCRUMB(t1,t0,a0,a1,tmp0);\
t0 = _mm256_unpacklo_epi32(t0,t1);\
a1 = _mm256_unpacklo_epi32(a1,a0);\
a0 = _mm256_load_si256(&a1);\
a0 = _mm256_unpackhi_epi64(a0,t0);\
a1 = _mm256_unpacklo_epi64(a1,t0);\
a1 = _mm256_shuffle_epi32(a1,57);\
MIXWORD(a0,a1,tmp0,tmp1);\
ADD_CONSTANT(a0,a1,c0,c1);
#define NMLTOM768(r0,r1,r2,s0,s1,s2,s3,p0,p1,p2,q0,q1,q2,q3)\
s2 = _mm256_load_si256(&r1);\
q2 = _mm256_load_si256(&p1);\
r2 = _mm256_shuffle_epi32(r2,216);\
p2 = _mm256_shuffle_epi32(p2,216);\
r1 = _mm256_unpacklo_epi32(r1,r0);\
p1 = _mm256_unpacklo_epi32(p1,p0);\
s2 = _mm256_unpackhi_epi32(s2,r0);\
q2 = _mm256_unpackhi_epi32(q2,p0);\
s0 = _mm256_load_si256(&r2);\
q0 = _mm256_load_si256(&p2);\
r2 = _mm256_unpacklo_epi64(r2,r1);\
p2 = _mm256_unpacklo_epi64(p2,p1);\
s1 = _mm256_load_si256(&s0);\
q1 = _mm256_load_si256(&q0);\
s0 = _mm256_unpackhi_epi64(s0,r1);\
q0 = _mm256_unpackhi_epi64(q0,p1);\
r2 = _mm256_shuffle_epi32(r2,225);\
p2 = _mm256_shuffle_epi32(p2,225);\
r0 = _mm256_load_si256(&s1);\
p0 = _mm256_load_si256(&q1);\
s0 = _mm256_shuffle_epi32(s0,225);\
q0 = _mm256_shuffle_epi32(q0,225);\
s1 = _mm256_unpacklo_epi64(s1,s2);\
q1 = _mm256_unpacklo_epi64(q1,q2);\
r0 = _mm256_unpackhi_epi64(r0,s2);\
p0 = _mm256_unpackhi_epi64(p0,q2);\
s2 = _mm256_load_si256(&r0);\
q2 = _mm256_load_si256(&p0);\
s3 = _mm256_load_si256(&r2);\
q3 = _mm256_load_si256(&p2);\
#define MIXTON768(r0,r1,r2,r3,s0,s1,s2,p0,p1,p2,p3,q0,q1,q2)\
s0 = _mm256_load_si256(&r0);\
q0 = _mm256_load_si256(&p0);\
s1 = _mm256_load_si256(&r2);\
q1 = _mm256_load_si256(&p2);\
r0 = _mm256_unpackhi_epi32(r0,r1);\
p0 = _mm256_unpackhi_epi32(p0,p1);\
r2 = _mm256_unpackhi_epi32(r2,r3);\
p2 = _mm256_unpackhi_epi32(p2,p3);\
s0 = _mm256_unpacklo_epi32(s0,r1);\
q0 = _mm256_unpacklo_epi32(q0,p1);\
s1 = _mm256_unpacklo_epi32(s1,r3);\
q1 = _mm256_unpacklo_epi32(q1,p3);\
r1 = _mm256_load_si256(&r0);\
p1 = _mm256_load_si256(&p0);\
r0 = _mm256_unpackhi_epi64(r0,r2);\
p0 = _mm256_unpackhi_epi64(p0,p2);\
s0 = _mm256_unpackhi_epi64(s0,s1);\
q0 = _mm256_unpackhi_epi64(q0,q1);\
r1 = _mm256_unpacklo_epi64(r1,r2);\
p1 = _mm256_unpacklo_epi64(p1,p2);\
s2 = _mm256_load_si256(&r0);\
q2 = _mm256_load_si256(&p0);\
s1 = _mm256_load_si256(&r1);\
q1 = _mm256_load_si256(&p1);\
#define NMLTOM1024(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3)\
s1 = _mm256_load_si256(&r3);\
q1 = _mm256_load_si256(&p3);\
s3 = _mm256_load_si256(&r3);\
q3 = _mm256_load_si256(&p3);\
s1 = _mm256_unpackhi_epi32(s1,r2);\
q1 = _mm256_unpackhi_epi32(q1,p2);\
s3 = _mm256_unpacklo_epi32(s3,r2);\
q3 = _mm256_unpacklo_epi32(q3,p2);\
s0 = _mm256_load_si256(&s1);\
q0 = _mm256_load_si256(&q1);\
s2 = _mm256_load_si256(&s3);\
q2 = _mm256_load_si256(&q3);\
r3 = _mm256_load_si256(&r1);\
p3 = _mm256_load_si256(&p1);\
r1 = _mm256_unpacklo_epi32(r1,r0);\
p1 = _mm256_unpacklo_epi32(p1,p0);\
r3 = _mm256_unpackhi_epi32(r3,r0);\
p3 = _mm256_unpackhi_epi32(p3,p0);\
s0 = _mm256_unpackhi_epi64(s0,r3);\
q0 = _mm256_unpackhi_epi64(q0,p3);\
s1 = _mm256_unpacklo_epi64(s1,r3);\
q1 = _mm256_unpacklo_epi64(q1,p3);\
s2 = _mm256_unpackhi_epi64(s2,r1);\
q2 = _mm256_unpackhi_epi64(q2,p1);\
s3 = _mm256_unpacklo_epi64(s3,r1);\
q3 = _mm256_unpacklo_epi64(q3,p1);
#define MIXTON1024(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3)\
NMLTOM1024(r0,r1,r2,r3,s0,s1,s2,s3,p0,p1,p2,p3,q0,q1,q2,q3);
/* initial values of chaining variables */
static const uint32 IV[40] __attribute((aligned(32))) = {
0xdbf78465,0x4eaa6fb4,0x44b051e0,0x6d251e69,
0xdef610bb,0xee058139,0x90152df4,0x6e292011,
0xde099fa3,0x70eee9a0,0xd9d2f256,0xc3b44b95,
0x746cd581,0xcf1ccf0e,0x8fc944b3,0x5d9b0557,
0xad659c05,0x04016ce5,0x5dba5781,0xf7efc89d,
0x8b264ae7,0x24aa230a,0x666d1836,0x0306194f,
0x204b1f67,0xe571f7d7,0x36d79cce,0x858075d5,
0x7cde72ce,0x14bcb808,0x57e9e923,0x35870c6a,
0xaffb4363,0xc825b7c7,0x5ec41e22,0x6c68e9be,
0x03e86cea,0xb07224cc,0x0fc688f1,0xf5df3999
};
/* Round Constants */
static const uint32 CNS_INIT[128] __attribute((aligned(32))) = {
0xb213afa5,0xfc20d9d2,0xb6de10ed,0x303994a6,
0xe028c9bf,0xe25e72c1,0x01685f3d,0xe0337818,
0xc84ebe95,0x34552e25,0x70f47aae,0xc0e65299,
0x44756f91,0xe623bb72,0x05a17cf4,0x441ba90d,
0x4e608a22,0x7ad8818f,0x0707a3d4,0x6cc33a12,
0x7e8fce32,0x5c58a4a4,0xbd09caca,0x7f34d442,
0x56d858fe,0x8438764a,0x1c1e8f51,0xdc56983e,
0x956548be,0x1e38e2e7,0xf4272b28,0x9389217f,
0x343b138f,0xbb6de032,0x707a3d45,0x1e00108f,
0xfe191be2,0x78e38b9d,0x144ae5cc,0xe5a8bce6,
0xd0ec4e3d,0xedb780c8,0xaeb28562,0x7800423d,
0x3cb226e5,0x27586719,0xfaa7ae2b,0x5274baf4,
0x2ceb4882,0xd9847356,0xbaca1589,0x8f5b7882,
0x5944a28e,0x36eda57f,0x2e48f1c1,0x26889ba7,
0xb3ad2208,0xa2c78434,0x40a46f3e,0x96e1db12,
0xa1c4c355,0x703aace7,0xb923c704,0x9a226e9d,
0x00000000,0x00000000,0x00000000,0xf0d2e9e3,
0x00000000,0x00000000,0x00000000,0x5090d577,
0x00000000,0x00000000,0x00000000,0xac11d7fa,
0x00000000,0x00000000,0x00000000,0x2d1925ab,
0x00000000,0x00000000,0x00000000,0x1bcb66f2,
0x00000000,0x00000000,0x00000000,0xb46496ac,
0x00000000,0x00000000,0x00000000,0x6f2d9bc9,
0x00000000,0x00000000,0x00000000,0xd1925ab0,
0x00000000,0x00000000,0x00000000,0x78602649,
0x00000000,0x00000000,0x00000000,0x29131ab6,
0x00000000,0x00000000,0x00000000,0x8edae952,
0x00000000,0x00000000,0x00000000,0x0fc053c3,
0x00000000,0x00000000,0x00000000,0x3b6ba548,
0x00000000,0x00000000,0x00000000,0x3f014f0c,
0x00000000,0x00000000,0x00000000,0xedae9520,
0x00000000,0x00000000,0x00000000,0xfc053c31
};
/***************************************************/
/* Round function */
/* state: hash context */
void rnd512_2way( luffa_2way_context *state, __m256i *msg )
{
__m256i t0, t1;
__m256i *chainv = state->chainv;
__m256i msg0, msg1;
__m256i tmp[2];
__m256i x[8];
const __m256i MASK = m256_const2_64( 0, 0x00000000ffffffff );
t0 = chainv[0];
t1 = chainv[1];
t0 = _mm256_xor_si256( t0, chainv[2] );
t1 = _mm256_xor_si256( t1, chainv[3] );
t0 = _mm256_xor_si256( t0, chainv[4] );
t1 = _mm256_xor_si256( t1, chainv[5] );
t0 = _mm256_xor_si256( t0, chainv[6] );
t1 = _mm256_xor_si256( t1, chainv[7] );
t0 = _mm256_xor_si256( t0, chainv[8] );
t1 = _mm256_xor_si256( t1, chainv[9] );
MULT2( t0, t1, MASK );
msg0 = _mm256_shuffle_epi32( msg[0], 27 );
msg1 = _mm256_shuffle_epi32( msg[1], 27 );
chainv[0] = _mm256_xor_si256( chainv[0], t0 );
chainv[1] = _mm256_xor_si256( chainv[1], t1 );
chainv[2] = _mm256_xor_si256( chainv[2], t0 );
chainv[3] = _mm256_xor_si256( chainv[3], t1 );
chainv[4] = _mm256_xor_si256( chainv[4], t0 );
chainv[5] = _mm256_xor_si256( chainv[5], t1 );
chainv[6] = _mm256_xor_si256( chainv[6], t0 );
chainv[7] = _mm256_xor_si256( chainv[7], t1 );
chainv[8] = _mm256_xor_si256( chainv[8], t0 );
chainv[9] = _mm256_xor_si256( chainv[9], t1 );
t0 = chainv[0];
t1 = chainv[1];
MULT2( chainv[0], chainv[1], MASK );
chainv[0] = _mm256_xor_si256( chainv[0], chainv[2] );
chainv[1] = _mm256_xor_si256( chainv[1], chainv[3] );
MULT2( chainv[2], chainv[3], MASK );
chainv[2] = _mm256_xor_si256(chainv[2], chainv[4]);
chainv[3] = _mm256_xor_si256(chainv[3], chainv[5]);
MULT2( chainv[4], chainv[5], MASK );
chainv[4] = _mm256_xor_si256(chainv[4], chainv[6]);
chainv[5] = _mm256_xor_si256(chainv[5], chainv[7]);
MULT2( chainv[6], chainv[7], MASK );
chainv[6] = _mm256_xor_si256(chainv[6], chainv[8]);
chainv[7] = _mm256_xor_si256(chainv[7], chainv[9]);
MULT2( chainv[8], chainv[9], MASK );
chainv[8] = _mm256_xor_si256( chainv[8], t0 );
chainv[9] = _mm256_xor_si256( chainv[9], t1 );
t0 = chainv[8];
t1 = chainv[9];
MULT2( chainv[8], chainv[9], MASK );
chainv[8] = _mm256_xor_si256( chainv[8], chainv[6] );
chainv[9] = _mm256_xor_si256( chainv[9], chainv[7] );
MULT2( chainv[6], chainv[7], MASK );
chainv[6] = _mm256_xor_si256( chainv[6], chainv[4] );
chainv[7] = _mm256_xor_si256( chainv[7], chainv[5] );
MULT2( chainv[4], chainv[5], MASK );
chainv[4] = _mm256_xor_si256( chainv[4], chainv[2] );
chainv[5] = _mm256_xor_si256( chainv[5], chainv[3] );
MULT2( chainv[2], chainv[3], MASK );
chainv[2] = _mm256_xor_si256( chainv[2], chainv[0] );
chainv[3] = _mm256_xor_si256( chainv[3], chainv[1] );
MULT2( chainv[0], chainv[1], MASK );
chainv[0] = _mm256_xor_si256( _mm256_xor_si256( chainv[0], t0 ), msg0 );
chainv[1] = _mm256_xor_si256( _mm256_xor_si256( chainv[1], t1 ), msg1 );
MULT2( msg0, msg1, MASK );
chainv[2] = _mm256_xor_si256( chainv[2], msg0 );
chainv[3] = _mm256_xor_si256( chainv[3], msg1 );
MULT2( msg0, msg1, MASK );
chainv[4] = _mm256_xor_si256( chainv[4], msg0 );
chainv[5] = _mm256_xor_si256( chainv[5], msg1 );
MULT2( msg0, msg1, MASK );
chainv[6] = _mm256_xor_si256( chainv[6], msg0 );
chainv[7] = _mm256_xor_si256( chainv[7], msg1 );
MULT2( msg0, msg1, MASK );
chainv[8] = _mm256_xor_si256( chainv[8], msg0 );
chainv[9] = _mm256_xor_si256( chainv[9], msg1 );
MULT2( msg0, msg1, MASK );
chainv[3] = _mm256_or_si256( _mm256_slli_epi32( chainv[3], 1 ),
_mm256_srli_epi32( chainv[3], 31 ) );
chainv[5] = _mm256_or_si256( _mm256_slli_epi32( chainv[5], 2 ),
_mm256_srli_epi32( chainv[5], 30 ) );
chainv[7] = _mm256_or_si256( _mm256_slli_epi32( chainv[7], 3 ),
_mm256_srli_epi32( chainv[7], 29 ) );
chainv[9] = _mm256_or_si256( _mm256_slli_epi32( chainv[9], 4 ),
_mm256_srli_epi32( chainv[9], 28 ) );
NMLTOM1024( chainv[0], chainv[2], chainv[4], chainv[6],
x[0], x[1], x[2], x[3],
chainv[1],chainv[3],chainv[5],chainv[7],
x[4], x[5], x[6], x[7] );
STEP_PART( &x[0], cns( 0), cns( 1), &tmp[0] );
STEP_PART( &x[0], cns( 2), cns( 3), &tmp[0] );
STEP_PART( &x[0], cns( 4), cns( 5), &tmp[0] );
STEP_PART( &x[0], cns( 6), cns( 7), &tmp[0] );
STEP_PART( &x[0], cns( 8), cns( 9), &tmp[0] );
STEP_PART( &x[0], cns(10), cns(11), &tmp[0] );
STEP_PART( &x[0], cns(12), cns(13), &tmp[0] );
STEP_PART( &x[0], cns(14), cns(15), &tmp[0] );
MIXTON1024( x[0], x[1], x[2], x[3],
chainv[0], chainv[2], chainv[4],chainv[6],
x[4], x[5], x[6], x[7],
chainv[1],chainv[3],chainv[5],chainv[7]);
/* Process last 256-bit block */
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(16), cns(17),
tmp[0], tmp[1] );
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(18), cns(19),
tmp[0], tmp[1] );
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(20), cns(21),
tmp[0], tmp[1] );
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(22), cns(23),
tmp[0], tmp[1] );
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(24), cns(25),
tmp[0], tmp[1] );
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(26), cns(27),
tmp[0], tmp[1] );
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(28), cns(29),
tmp[0], tmp[1] );
STEP_PART2( chainv[8], chainv[9], t0, t1, cns(30), cns(31),
tmp[0], tmp[1] );
}
/***************************************************/
/* Finalization function */
/* state: hash context */
/* b[8]: hash values */
void finalization512_2way( luffa_2way_context *state, uint32 *b )
{
uint32 hash[8] __attribute((aligned(64)));
__m256i* chainv = state->chainv;
__m256i t[2];
__m256i zero[2];
zero[0] = zero[1] = m256_zero;
const __m256i shuff_bswap32 = m256_const_64( 0x1c1d1e1f18191a1b,
0x1415161710111213,
0x0c0d0e0f08090a0b,
0x0405060700010203 );
/*---- blank round with m=0 ----*/
rnd512_2way( state, zero );
t[0] = chainv[0];
t[1] = chainv[1];
t[0] = _mm256_xor_si256( t[0], chainv[2] );
t[1] = _mm256_xor_si256( t[1], chainv[3] );
t[0] = _mm256_xor_si256( t[0], chainv[4] );
t[1] = _mm256_xor_si256( t[1], chainv[5] );
t[0] = _mm256_xor_si256( t[0], chainv[6] );
t[1] = _mm256_xor_si256( t[1], chainv[7] );
t[0] = _mm256_xor_si256( t[0], chainv[8] );
t[1] = _mm256_xor_si256( t[1], chainv[9] );
t[0] = _mm256_shuffle_epi32( t[0], 27 );
t[1] = _mm256_shuffle_epi32( t[1], 27 );
_mm256_store_si256( (__m256i*)&hash[0], t[0] );
_mm256_store_si256( (__m256i*)&hash[8], t[1] );
casti_m256i( b, 0 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 0 ), shuff_bswap32 );
casti_m256i( b, 1 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 1 ), shuff_bswap32 );
rnd512_2way( state, zero );
t[0] = chainv[0];
t[1] = chainv[1];
t[0] = _mm256_xor_si256( t[0], chainv[2] );
t[1] = _mm256_xor_si256( t[1], chainv[3] );
t[0] = _mm256_xor_si256( t[0], chainv[4] );
t[1] = _mm256_xor_si256( t[1], chainv[5] );
t[0] = _mm256_xor_si256( t[0], chainv[6] );
t[1] = _mm256_xor_si256( t[1], chainv[7] );
t[0] = _mm256_xor_si256( t[0], chainv[8] );
t[1] = _mm256_xor_si256( t[1], chainv[9] );
t[0] = _mm256_shuffle_epi32( t[0], 27 );
t[1] = _mm256_shuffle_epi32( t[1], 27 );
_mm256_store_si256( (__m256i*)&hash[0], t[0] );
_mm256_store_si256( (__m256i*)&hash[8], t[1] );
casti_m256i( b, 2 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 0 ), shuff_bswap32 );
casti_m256i( b, 3 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 1 ), shuff_bswap32 );
}
int luffa_2way_init( luffa_2way_context *state, int hashbitlen )
{
state->hashbitlen = hashbitlen;
__m128i *iv = (__m128i*)IV;
state->chainv[0] = m256_const1_128( iv[0] );
state->chainv[1] = m256_const1_128( iv[1] );
state->chainv[2] = m256_const1_128( iv[2] );
state->chainv[3] = m256_const1_128( iv[3] );
state->chainv[4] = m256_const1_128( iv[4] );
state->chainv[5] = m256_const1_128( iv[5] );
state->chainv[6] = m256_const1_128( iv[6] );
state->chainv[7] = m256_const1_128( iv[7] );
state->chainv[8] = m256_const1_128( iv[8] );
state->chainv[9] = m256_const1_128( iv[9] );
((__m256i*)state->buffer)[0] = m256_zero;
((__m256i*)state->buffer)[1] = m256_zero;
return 0;
}
// Do not call luffa_update_close after having called luffa_update.
// Once luffa_update has been called only call luffa_update or luffa_close.
int luffa_2way_update( luffa_2way_context *state, const void *data,
size_t len )
{
__m256i *vdata = (__m256i*)data;
__m256i *buffer = (__m256i*)state->buffer;
__m256i msg[2];
int i;
int blocks = (int)len >> 5;
const __m256i shuff_bswap32 = m256_const_64( 0x1c1d1e1f18191a1b,
0x1415161710111213,
0x0c0d0e0f08090a0b,
0x0405060700010203 );
state-> rembytes = (int)len & 0x1F;
// full blocks
for ( i = 0; i < blocks; i++, vdata+=2 )
{
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = _mm256_shuffle_epi8( vdata[ 1 ], shuff_bswap32 );
rnd512_2way( state, msg );
}
// 16 byte partial block exists for 80 byte len
// store in buffer for transform in final for midstate to work
if ( state->rembytes )
{
// remaining data bytes
buffer[0] = _mm256_shuffle_epi8( vdata[0], shuff_bswap32 );
buffer[1] = m256_const2_64( 0, 0x0000000080000000 );
}
return 0;
}
int luffa_2way_close( luffa_2way_context *state, void *hashval )
{
__m256i *buffer = (__m256i*)state->buffer;
__m256i msg[2];
// transform pad block
if ( state->rembytes )
// not empty, data is in buffer
rnd512_2way( state, buffer );
else
{ // empty pad block, constant data
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
msg[1] = m256_zero;
rnd512_2way( state, msg );
}
finalization512_2way( state, (uint32*)hashval );
if ( state->hashbitlen > 512 )
finalization512_2way( state, (uint32*)( hashval+32 ) );
return 0;
}
int luffa_2way_update_close( luffa_2way_context *state,
void *output, const void *data, size_t inlen )
{
// Optimized for integrals of 16 bytes, good for 64 and 80 byte len
const __m256i *vdata = (__m256i*)data;
__m256i msg[2];
int i;
const int blocks = (int)( inlen >> 5 );
const __m256i shuff_bswap32 = m256_const_64( 0x1c1d1e1f18191a1b,
0x1415161710111213,
0x0c0d0e0f08090a0b,
0x0405060700010203 );
state->rembytes = inlen & 0x1F;
// full blocks
for ( i = 0; i < blocks; i++, vdata+=2 )
{
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = _mm256_shuffle_epi8( vdata[ 1 ], shuff_bswap32 );
rnd512_2way( state, msg );
}
// 16 byte partial block exists for 80 byte len
if ( state->rembytes )
{
// padding of partial block
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m256_const2_64( 0, 0x0000000080000000 );
rnd512_2way( state, msg );
}
else
{
// empty pad block
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
msg[1] = m256_zero;
rnd512_2way( state, msg );
}
finalization512_2way( state, (uint32*)output );
if ( state->hashbitlen > 512 )
finalization512_2way( state, (uint32*)( output+32 ) );
return 0;
}
#endif

View File

@@ -51,12 +51,30 @@
#define LIMIT_512 128
/*********************************/
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
uint32 buffer[8*2] __attribute((aligned(64)));
__m256i chainv[10] __attribute((aligned(32))); /* Chaining values */
uint32 buffer[8*4];
__m512i chainv[10]; /* Chaining values */
int hashbitlen;
int rembytes;
} luffa_2way_context;
} luffa_4way_context __attribute((aligned(128)));
int luffa_4way_init( luffa_4way_context *state, int hashbitlen );
int luffa_4way_update( luffa_4way_context *state, const void *data,
size_t len );
int luffa_4way_close( luffa_4way_context *state, void *hashval );
int luffa_4way_update_close( luffa_4way_context *state, void *output,
const void *data, size_t inlen );
#endif
typedef struct {
uint32 buffer[8*2];
__m256i chainv[10]; /* Chaining values */
int hashbitlen;
int rembytes;
} luffa_2way_context __attribute((aligned(128)));
int luffa_2way_init( luffa_2way_context *state, int hashbitlen );
int luffa_2way_update( luffa_2way_context *state, const void *data,

View File

@@ -0,0 +1,69 @@
#if !defined(LUFFA_HASH_2WAY_H__)
#define LUFFA_HASH_2WAY_H__ 1
/*
* luffa_for_sse2.h
* Version 2.0 (Sep 15th 2009)
*
* Copyright (C) 2008-2009 Hitachi, Ltd. All rights reserved.
*
* Hitachi, Ltd. is the owner of this software and hereby grant
* the U.S. Government and any interested party the right to use
* this software for the purposes of the SHA-3 evaluation process,
* notwithstanding that this software is copyrighted.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if defined(__AVX2__)
#include <immintrin.h>
#include "algo/sha/sha3-defs.h"
#include "simd-utils.h"
/* The length of digests*/
#define DIGEST_BIT_LEN_224 224
#define DIGEST_BIT_LEN_256 256
#define DIGEST_BIT_LEN_384 384
#define DIGEST_BIT_LEN_512 512
/*********************************/
/* The parameters of Luffa */
#define MSG_BLOCK_BIT_LEN 256 /*The bit length of a message block*/
#define MSG_BLOCK_BYTE_LEN (MSG_BLOCK_BIT_LEN >> 3) /* The byte length
* of a message block*/
/* The number of blocks in Luffa */
#define WIDTH_224 3
#define WIDTH_256 3
#define WIDTH_384 4
#define WIDTH_512 5
/* The limit of the length of message */
#define LIMIT_224 64
#define LIMIT_256 64
#define LIMIT_384 128
#define LIMIT_512 128
/*********************************/
typedef struct {
uint32 buffer[8*2] __attribute((aligned(64)));
__m256i chainv[10] __attribute((aligned(32))); /* Chaining values */
int hashbitlen;
int rembytes;
} luffa_2way_context;
int luffa_2way_init( luffa_2way_context *state, int hashbitlen );
int luffa_2way_update( luffa_2way_context *state, const void *data,
size_t len );
int luffa_2way_close( luffa_2way_context *state, void *hashval );
int luffa_2way_update_close( luffa_2way_context *state, void *output,
const void *data, size_t inlen );
#endif
#endif

View File

@@ -542,7 +542,9 @@ static void finalization512( hashState_luffa *state, uint32 *b )
__m256i* chainv = (__m256i*)state->chainv;
__m256i t;
const __m128i zero = m128_zero;
const __m256i shuff_bswap32 = m256_const2_64( 0x0c0d0e0f08090a0b,
const __m256i shuff_bswap32 = m256_const_64( 0x1c1d1e1f18191a1b,
0x1415161710111213,
0x0c0d0e0f08090a0b,
0x0405060700010203 );
rnd512( state, zero, zero );

View File

@@ -5,7 +5,6 @@
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/cubehash/cubehash_sse2.h"
#if defined (LYRA2REV3_8WAY)
typedef struct {
@@ -14,7 +13,7 @@ typedef struct {
bmw256_8way_context bmw;
} lyra2v3_8way_ctx_holder;
static lyra2v3_8way_ctx_holder l2v3_8way_ctx;
static __thread lyra2v3_8way_ctx_holder l2v3_8way_ctx;
bool init_lyra2rev3_8way_ctx()
{
@@ -38,7 +37,7 @@ void lyra2rev3_8way_hash( void *state, const void *input )
lyra2v3_8way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &l2v3_8way_ctx, sizeof(l2v3_8way_ctx) );
blake256_8way( &ctx.blake, input, 80 );
blake256_8way( &ctx.blake, input + (64*8), 16 );
blake256_8way_close( &ctx.blake, vhash );
dintrlv_8x32( hash0, hash1, hash2, hash3,
@@ -91,7 +90,7 @@ int scanhash_lyra2rev3_8way( struct work *work, const uint32_t max_nonce,
{
uint32_t hash[8*8] __attribute__ ((aligned (64)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t *hash7 = &hash[7<<3];
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
@@ -99,12 +98,15 @@ int scanhash_lyra2rev3_8way( struct work *work, const uint32_t max_nonce,
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
const int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id;
if ( opt_benchmark )
( (uint32_t*)ptarget )[7] = 0x0000ff;
if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff;
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake256_8way_init( &l2v3_8way_ctx.blake );
blake256_8way( &l2v3_8way_ctx.blake, vdata, 64 );
do
{
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
@@ -133,14 +135,14 @@ int scanhash_lyra2rev3_8way( struct work *work, const uint32_t max_nonce,
#if defined (LYRA2REV3_4WAY)
typedef struct {
blake256_4way_context blake;
cubehashParam cube;
bmw256_4way_context bmw;
} lyra2v3_4way_ctx_holder;
static lyra2v3_4way_ctx_holder l2v3_4way_ctx;
//static lyra2v3_4way_ctx_holder l2v3_4way_ctx;
static __thread lyra2v3_4way_ctx_holder l2v3_4way_ctx;
bool init_lyra2rev3_4way_ctx()
{
@@ -160,7 +162,8 @@ void lyra2rev3_4way_hash( void *state, const void *input )
lyra2v3_4way_ctx_holder ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &l2v3_4way_ctx, sizeof(l2v3_4way_ctx) );
blake256_4way( &ctx.blake, input, 80 );
// blake256_4way( &ctx.blake, input, 80 );
blake256_4way( &ctx.blake, input + (64*4), 16 );
blake256_4way_close( &ctx.blake, vhash );
dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
@@ -206,6 +209,10 @@ int scanhash_lyra2rev3_4way( struct work *work, const uint32_t max_nonce,
( (uint32_t*)ptarget )[7] = 0x0000ff;
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake256_4way_init( &l2v3_4way_ctx.blake );
blake256_4way( &l2v3_4way_ctx.blake, vdata, 64 );
do
{
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );

View File

@@ -3,22 +3,129 @@
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#if defined(NIST5_4WAY)
#include "algo/blake/blake-hash-4way.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
void nist5hash_4way( void *out, const void *input )
#if defined(NIST5_8WAY)
void nist5hash_8way( void *out, const void *input )
{
uint64_t vhash[8*16] __attribute__ ((aligned (128)));
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t hash4[8] __attribute__ ((aligned (64)));
uint64_t hash5[8] __attribute__ ((aligned (64)));
uint64_t hash6[8] __attribute__ ((aligned (64)));
uint64_t hash7[8] __attribute__ ((aligned (64)));
blake512_8way_context ctx_blake;
hashState_groestl ctx_groestl;
jh512_8way_context ctx_jh;
skein512_8way_context ctx_skein;
keccak512_8way_context ctx_keccak;
blake512_8way_init( &ctx_blake );
blake512_8way_update( &ctx_blake, input, 80 );
blake512_8way_close( &ctx_blake, vhash );
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash0,
(const char*)hash0, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash1,
(const char*)hash1, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash2,
(const char*)hash2, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash3,
(const char*)hash3, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash4,
(const char*)hash4, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash5,
(const char*)hash5, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash6,
(const char*)hash6, 512 );
init_groestl( &ctx_groestl, 64 );
update_and_final_groestl( &ctx_groestl, (char*)hash7,
(const char*)hash7, 512 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 512 );
jh512_8way_init( &ctx_jh );
jh512_8way_update( &ctx_jh, vhash, 64 );
jh512_8way_close( &ctx_jh, vhash );
keccak512_8way_init( &ctx_keccak );
keccak512_8way_update( &ctx_keccak, vhash, 64 );
keccak512_8way_close( &ctx_keccak, vhash );
skein512_8way_init( &ctx_skein );
skein512_8way_update( &ctx_skein, vhash, 64 );
skein512_8way_close( &ctx_skein, out );
}
int scanhash_nist5_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[16*8] __attribute__ ((aligned (128)));
uint32_t vdata[24*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do {
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
nist5hash_8way( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane<<1 ] < Htarg )
{
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
} while ( ( n < max_nonce-8 ) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(NIST5_4WAY)
void nist5hash_4way( void *out, const void *input )
{
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t vhash[8*4] __attribute__ ((aligned (64)));
blake512_4way_context ctx_blake;
hashState_groestl ctx_groestl;
jh512_4way_context ctx_jh;
@@ -62,40 +169,20 @@ void nist5hash_4way( void *out, const void *input )
int scanhash_nist5_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[4*24] __attribute__ ((aligned (128)));
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[25]);
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
uint64_t htmax[] = { 0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000 };
uint32_t masks[] = { 0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0 };
int thr_id = mythr->id;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
for ( int m=0; m < 6; m++ )
{
if (Htarg <= htmax[m])
{
uint32_t mask = masks[m];
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
@@ -103,7 +190,7 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce,
nist5hash_4way( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if ( ( hash7[ lane ] & mask ) == 0 )
if ( hash7[ lane<<1 ] < Htarg )
{
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
@@ -113,11 +200,8 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce,
}
}
n += 4;
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
break;
}
}
*hashes_done = n - first_nonce + 1;
} while ( ( n < max_nonce-4 ) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -2,8 +2,11 @@
bool register_nist5_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
#if defined (NIST5_4WAY)
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT;
#if defined (NIST5_8WAY)
gate->scanhash = (void*)&scanhash_nist5_8way;
gate->hash = (void*)&nist5hash_8way;
#elif defined (NIST5_4WAY)
gate->scanhash = (void*)&scanhash_nist5_4way;
gate->hash = (void*)&nist5hash_4way;
#else

View File

@@ -1,14 +1,23 @@
#ifndef __NIST5_GATE_H__
#define __NIST5_GATE_H__
#define __NIST5_GATE_H__ 1
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__) && defined(__AES__)
#define NIST5_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define NIST5_8WAY 1
#elif defined(__AVX2__) && defined(__AES__)
#define NIST5_4WAY 1
#endif
#if defined(NIST5_4WAY)
#if defined(NIST5_8WAY)
void nist5hash_8way( void *state, const void *input );
int scanhash_nist5_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(NIST5_4WAY)
void nist5hash_4way( void *state, const void *input );

View File

@@ -1,12 +1,8 @@
#include "cpuminer-config.h"
#include "quark-gate.h"
#if defined (QUARK_4WAY)
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/skein/skein-hash-4way.h"
@@ -14,6 +10,258 @@
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#if defined (QUARK_8WAY)
typedef struct {
blake512_8way_context blake;
bmw512_8way_context bmw;
hashState_groestl groestl;
jh512_8way_context jh;
skein512_8way_context skein;
keccak512_8way_context keccak;
} quark_8way_ctx_holder;
quark_8way_ctx_holder quark_8way_ctx __attribute__ ((aligned (128)));
void init_quark_8way_ctx()
{
blake512_8way_init( &quark_8way_ctx.blake );
bmw512_8way_init( &quark_8way_ctx.bmw );
init_groestl( &quark_8way_ctx.groestl, 64 );
skein512_8way_init( &quark_8way_ctx.skein );
jh512_8way_init( &quark_8way_ctx.jh );
keccak512_8way_init( &quark_8way_ctx.keccak );
}
void quark_8way_hash( void *state, const void *input )
{
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
uint64_t vhashA[8*8] __attribute__ ((aligned (64)));
uint64_t vhashB[8*8] __attribute__ ((aligned (64)));
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t hash4[8] __attribute__ ((aligned (64)));
uint64_t hash5[8] __attribute__ ((aligned (64)));
uint64_t hash6[8] __attribute__ ((aligned (64)));
uint64_t hash7[8] __attribute__ ((aligned (64)));
__m512i* vh = (__m512i*)vhash;
__m512i* vhA = (__m512i*)vhashA;
__m512i* vhB = (__m512i*)vhashB;
__mmask8 vh_mask;
quark_8way_ctx_holder ctx;
const uint32_t mask = 8;
const __m512i bit3_mask = m512_const1_64( mask );
const __m512i zero = _mm512_setzero_si512();
memcpy( &ctx, &quark_8way_ctx, sizeof(quark_8way_ctx) );
blake512_8way_update( &ctx.blake, input, 80 );
blake512_8way_close( &ctx.blake, vhash );
bmw512_8way_update( &ctx.bmw, vhash, 64 );
bmw512_8way_close( &ctx.bmw, vhash );
// AVX 512 cmpeq returns a bit mask instead of a vector mask.
// This should simplify things but the logic doesn't seem to be working.
// The problem appears to be related to the test to skip a hash if it isn't
// to be used. Skipping the test for all 8 way hashes seems to have
// fixed it. The hash selection blending works if the hash is produced
// but the hash wasn't being produced when it should.
// Both decisions are based on the same data, the __mmask8. It works
// as a blend mask but not in a logical comparison, maybe the type is the
// problem. Maybe a cast to int or movm is needed to make it work.
// It's now moot because the hash can only be skipped 1 in 256 iterations
// when hashing parallel 8 ways.
// The performance impact of the workaround should be negligible.
// It's a problem for another day.
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, 512 );
if ( hash0[0] & mask )
{
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(char*)hash0, 512 );
}
if ( hash1[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(char*)hash1, 512 );
}
if ( hash2[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(char*)hash2, 512 );
}
if ( hash3[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(char*)hash3, 512 );
}
if ( hash4[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash4,
(char*)hash4, 512 );
}
if ( hash5[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash5,
(char*)hash5, 512 );
}
if ( hash6[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash6,
(char*)hash6, 512 );
}
if ( hash7[0] & mask )
{
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash7,
(char*)hash7, 512 );
}
intrlv_8x64( vhashA, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 512 );
if ( vh_mask & 0xff )
{
skein512_8way_update( &ctx.skein, vhash, 64 );
skein512_8way_close( &ctx.skein, vhashB );
}
mm512_blend_hash_8x64( vh, vhA, vhB, vh_mask );
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
reinit_groestl( &ctx.groestl );
update_and_final_groestl( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
512 );
jh512_8way_update( &ctx.jh, vhash, 64 );
jh512_8way_close( &ctx.jh, vhash );
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
if ( ( vh_mask & 0xff ) != 0xff )
{
blake512_8way_init( &ctx.blake );
blake512_8way_update( &ctx.blake, vhash, 64 );
blake512_8way_close( &ctx.blake, vhashA );
}
if ( vh_mask & 0xff )
{
bmw512_8way_init( &ctx.bmw );
bmw512_8way_update( &ctx.bmw, vhash, 64 );
bmw512_8way_close( &ctx.bmw, vhashB );
}
mm512_blend_hash_8x64( vh, vhA, vhB, vh_mask );
keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhash );
skein512_8way_init( &ctx.skein );
skein512_8way_update( &ctx.skein, vhash, 64 );
skein512_8way_close( &ctx.skein, vhash );
vh_mask = _mm512_cmpeq_epi64_mask( _mm512_and_si512( vh[0], bit3_mask ),
zero );
if ( ( vh_mask & 0xff ) != 0xff )
{
keccak512_8way_init( &ctx.keccak );
keccak512_8way_update( &ctx.keccak, vhash, 64 );
keccak512_8way_close( &ctx.keccak, vhashA );
}
if ( vh_mask & 0xff )
{
jh512_8way_init( &ctx.jh );
jh512_8way_update( &ctx.jh, vhash, 64 );
jh512_8way_close( &ctx.jh, vhashB );
}
// Final blend, directly to state, only need 32 bytes.
casti_m512i( state,0 ) = _mm512_mask_blend_epi64( vh_mask, vhA[0], vhB[0] );
casti_m512i( state,1 ) = _mm512_mask_blend_epi64( vh_mask, vhA[1], vhB[1] );
casti_m512i( state,2 ) = _mm512_mask_blend_epi64( vh_mask, vhA[2], vhB[2] );
casti_m512i( state,3 ) = _mm512_mask_blend_epi64( vh_mask, vhA[3], vhB[3] );
}
int scanhash_quark_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[24*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
quark_8way_hash( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 8; i++ )
if ( ( hash7[ i<<1 ] & 0xFFFFFF00 ) == 0 )
{
extr_lane_8x64( lane_hash, hash, i, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, lane_hash, mythr, i );
}
}
n += 8;
} while ( ( n < max_nonce-8 ) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (QUARK_4WAY)
typedef struct {
blake512_4way_context blake;
bmw512_4way_context bmw;
@@ -91,7 +339,7 @@ void quark_4way_hash( void *state, const void *input )
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
if ( mm256_anybits0( vh_mask ) )
if ( mm256_anybits1( vh_mask ) )
{
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhashB );
@@ -117,14 +365,14 @@ void quark_4way_hash( void *state, const void *input )
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
if ( mm256_anybits1( vh_mask ) )
if ( mm256_anybits0( vh_mask ) )
{
blake512_4way_init( &ctx.blake );
blake512_4way( &ctx.blake, vhash, 64 );
blake512_4way_close( &ctx.blake, vhashA );
}
if ( mm256_anybits0( vh_mask ) )
if ( mm256_anybits1( vh_mask ) )
{
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
@@ -142,14 +390,14 @@ void quark_4way_hash( void *state, const void *input )
vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero );
if ( mm256_anybits1( vh_mask ) )
if ( mm256_anybits0( vh_mask ) )
{
keccak512_4way_init( &ctx.keccak );
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhashA );
}
if ( mm256_anybits0( vh_mask ) )
if ( mm256_anybits1( vh_mask ) )
{
jh512_4way_init( &ctx.jh );
jh512_4way( &ctx.jh, vhash, 64 );

View File

@@ -2,7 +2,11 @@
bool register_quark_algo( algo_gate_t* gate )
{
#if defined (QUARK_4WAY)
#if defined (QUARK_8WAY)
init_quark_8way_ctx();
gate->scanhash = (void*)&scanhash_quark_8way;
gate->hash = (void*)&quark_8way_hash;
#elif defined (QUARK_4WAY)
init_quark_4way_ctx();
gate->scanhash = (void*)&scanhash_quark_4way;
gate->hash = (void*)&quark_4way_hash;
@@ -11,7 +15,7 @@ bool register_quark_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_quark;
gate->hash = (void*)&quark_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT;
return true;
};

View File

@@ -4,13 +4,22 @@
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__) && defined(__AES__)
#define QUARK_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define QUARK_8WAY 1
#elif defined(__AVX2__) && defined(__AES__)
#define QUARK_4WAY 1
#endif
bool register_quark_algo( algo_gate_t* gate );
#if defined(QUARK_4WAY)
#if defined(QUARK_8WAY)
void quark_8way_hash( void *state, const void *input );
int scanhash_quark_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_quark_8way_ctx();
#elif defined(QUARK_4WAY)
void quark_4way_hash( void *state, const void *input );
int scanhash_quark_4way( struct work *work, uint32_t max_nonce,

View File

@@ -1,7 +1,4 @@
#include "qubit-gate.h"
#if defined(QUBIT_2WAY)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
@@ -12,6 +9,160 @@
#include "algo/shavite/sph_shavite.h"
#include "algo/echo/aes_ni/hash_api.h"
#if defined(QUBIT_4WAY)
typedef struct
{
luffa_4way_context luffa;
cubehashParam cube;
sph_shavite512_context shavite;
simd_4way_context simd;
hashState_echo echo;
} qubit_4way_ctx_holder;
qubit_4way_ctx_holder qubit_4way_ctx;
void init_qubit_4way_ctx()
{
cubehashInit(&qubit_4way_ctx.cube,512,16,32);
sph_shavite512_init(&qubit_4way_ctx.shavite);
simd_4way_init( &qubit_4way_ctx.simd, 512 );
init_echo(&qubit_4way_ctx.echo, 512);
};
void qubit_4way_hash( void *output, const void *input )
{
uint64_t vhash[8*4] __attribute__ ((aligned (128)));
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
qubit_4way_ctx_holder ctx;
memcpy( &ctx, &qubit_4way_ctx, sizeof(qubit_4way_ctx) );
luffa_4way_update( &ctx.luffa, input + (64<<2), 16 );
luffa_4way_close( &ctx.luffa, vhash );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhash, 512 );
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );
memcpy( &ctx.cube, &qubit_2way_ctx.cube, sizeof(cubehashParam) );
cubehashUpdateDigest( &ctx.cube, (byte*)hash1, (const byte*) hash1, 64 );
memcpy( &ctx.cube, &qubit_2way_ctx.cube, sizeof(cubehashParam) );
cubehashUpdateDigest( &ctx.cube, (byte*)hash2, (const byte*) hash2, 64 );
memcpy( &ctx.cube, &qubit_2way_ctx.cube, sizeof(cubehashParam) );
cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*) hash3, 64 );
sph_shavite512( &ctx.shavite, hash0, 64 );
sph_shavite512_close( &ctx.shavite, hash0 );
memcpy( &ctx.shavite, &qubit_2way_ctx.shavite,
sizeof(sph_shavite512_context) );
sph_shavite512( &ctx.shavite, hash1, 64 );
sph_shavite512_close( &ctx.shavite, hash1 );
memcpy( &ctx.shavite, &qubit_2way_ctx.shavite,
sizeof(sph_shavite512_context) );
sph_shavite512( &ctx.shavite, hash2, 64 );
sph_shavite512_close( &ctx.shavite, hash2 );
memcpy( &ctx.shavite, &qubit_2way_ctx.shavite,
sizeof(sph_shavite512_context) );
sph_shavite512( &ctx.shavite, hash3, 64 );
sph_shavite512_close( &ctx.shavite, hash3 );
intrlv_4x128( vhash, hash0, hash1, hash2, hash3, 512 );
simd_4way_update_close( &ctx.simd, vhash, vhash, 512 );
dintrlv_4x128( hash0, hash1, hash2, hash3, vhash, 512 );
update_final_echo( &ctx.echo, (BitSequence *)hash0,
(const BitSequence *) hash0, 512 );
memcpy( &ctx.echo, &qubit_2way_ctx.echo, sizeof(hashState_echo) );
update_final_echo( &ctx.echo, (BitSequence *)hash1,
(const BitSequence *) hash1, 512 );
memcpy( &ctx.echo, &qubit_2way_ctx.echo, sizeof(hashState_echo) );
update_final_echo( &ctx.echo, (BitSequence *)hash2,
(const BitSequence *) hash2, 512 );
memcpy( &ctx.echo, &qubit_2way_ctx.echo, sizeof(hashState_echo) );
update_final_echo( &ctx.echo, (BitSequence *)hash3,
(const BitSequence *) hash3, 512 );
memcpy( output, hash0, 32 );
memcpy( output+32, hash1, 32 );
memcpy( output+64, hash2, 32 );
memcpy( output+96, hash3, 32 );
}
int scanhash_qubit_4way( struct work *work,uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*16] __attribute__ ((aligned (128)));
uint32_t vdata[4*24] __attribute__ ((aligned (64)));
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19];
const uint32_t first_nonce = pdata[19];
uint32_t *noncep = vdata + 64+3; // 4*16 + 3
int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
uint64_t htmax[] = { 0, 0xF, 0xFF,
0xFFF, 0xFFFF, 0x10000000 };
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
casti_m512i( endiandata, 0 ) = mm512_bswap_32( casti_m512i( pdata, 0 ) );
casti_m512i( endiandata, 1 ) = mm512_bswap_32( casti_m512i( pdata, 1 ) );
casti_m512i( endiandata, 4 ) = mm512_bswap_32( casti_m512i( pdata, 4 ) );
uint64_t *edata = (uint64_t*)endiandata;
intrlv_4x128( (uint64_t*)vdata, edata, edata, 640 );
luffa_4way_init( &qubit_4way_ctx.luffa, 512 );
luffa_4way_update( &qubit_4way_ctx.luffa, vdata, 64 );
for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];
do
{
be32enc( noncep, n );
be32enc( noncep+4, n+1 );
be32enc( noncep+8, n+2 );
be32enc( noncep+12, n+3 );
qubit_4way_hash( hash, vdata );
pdata[19] = n;
if ( !( hash[7] & mask ) )
if ( fulltest( hash, ptarget) && !opt_benchmark )
{
pdata[19] = n;
submit_lane_solution( work, hash, mythr, 0 );
}
if ( !( (hash+8)[7] & mask ) )
if ( fulltest( hash+8, ptarget) && !opt_benchmark )
{
pdata[19] = n+1;
submit_lane_solution( work, hash+8, mythr, 1 );
}
if ( !( hash+16[7] & mask ) )
if ( fulltest( hash, ptarget) && !opt_benchmark )
{
pdata[19] = n+2;
submit_lane_solution( work, hash, mythr, 2 );
}
if ( !( (hash+24)[7] & mask ) )
if ( fulltest( hash+8, ptarget) && !opt_benchmark )
{
pdata[19] = n+3;
submit_lane_solution( work, hash+8, mythr, 3 );
}
n += 4;
} while ( ( n < max_nonce-4 ) && !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(QUBIT_2WAY)
typedef struct
{
luffa_2way_context luffa;

View File

@@ -2,6 +2,13 @@
bool register_qubit_algo( algo_gate_t* gate )
{
/*
#if defined (QUBIT_4WAY)
init_qubit_2way_ctx();
gate->scanhash = (void*)&scanhash_qubit_4way;
gate->hash = (void*)&qubit_4way_hash;
#elif defined (QUBIT_4WAY)
*/
#if defined (QUBIT_2WAY)
init_qubit_2way_ctx();
gate->scanhash = (void*)&scanhash_qubit_2way;

View File

@@ -4,12 +4,26 @@
#include "algo-gate-api.h"
#include <stdint.h>
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define QUBIT_2WAY 1
#elif defined(__AVX2__) && defined(__AES__)
*/
#if defined(__AVX2__) && defined(__AES__)
#define QUBIT_2WAY
#define QUBIT_2WAY 1
#endif
bool register_qubit_algo( algo_gate_t* gate );
/*
#if defined(QUBIT_4WAY)
void qubit_4way_hash( void *state, const void *input );
int scanhash_qubit_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void init_qubit_4way_ctx();
#elif defined(QUBIT_2WAY)
*/
#if defined(QUBIT_2WAY)
void qubit_2way_hash( void *state, const void *input );

View File

@@ -1,538 +0,0 @@
#if 0
#include <stddef.h>
#include <string.h>
#include "sha2-hash-4way.h"
#if defined(__AVX2__)
// naming convention for variables and macros
// VARx: AVX2 8 way 32 bit
// VARy: MMX 2 way 32 bit
// VARz: scalar integer 32 bit
static const uint32_t H256[8] =
{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
static const uint32_t K256[64] =
{
0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3,
0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7,
0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13,
0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3,
0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5,
0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2
};
#define CHx(X, Y, Z) \
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( Y, Z ), X ), Z )
#define CHy(X, Y, Z) \
_mm_xor_si64( _mm_and_si64( _mm_xor_si64( Y, Z ), X ), Z )
#define CHz(X, Y, Z) ((( (Y) ^ (Z) ) & (X) ) ^ (Z) )
#define MAJx(X, Y, Z) \
_mm256_or_si256( _mm256_and_si256( X, Y ), \
_mm256_and_si256( _mm256_or_si256( X, Y ), Z ) )
#define MAJy(X, Y, Z) \
_mm_or_si64( _mm_and_si64( X, Y ), \
_mm_and_si64( _mm_or_si64( X, Y ), Z ) )
#define MAJz(X, Y, Z) ( ( (X) & (Y) ) | ( ( (X) | (Y) ) & (Z) ) )
#define BSG2_0x(x) \
_mm256_xor_si256( _mm256_xor_si256( \
mm256_ror_32(x,2), mm256_ror_32(x,13) ), _mm256_srli_epi32(x,22) )
#define BSG2_0y(x) \
_mm_xor_si64( _mm_xor_si64( \
mm64_ror_32(x,2), mm64_ror_32(x,13) ), _mm_srli_pi32(x,22) )
#define BSG2_0z(x) ( u32_ror_32(x,2) ^ u32_ror_32(x,13) ^ ((x)>>22) )
#define BSG2_1x(x) \
_mm256_xor_si256( _mm256_xor_si256( \
mm256_ror_32(x,6), mm256_ror_32(x,11) ), _mm256_srli_epi32(x,25) )
#define BSG2_1y(x) \
_mm_xor_si64( _mm_xor_si64( \
mm64_ror_32(x,6), mm64_ror_32(x,11) ), _mm_srli_pi32(x,25) )
#define BSG2_1z(x) ( u32_ror_32(x,6) ^ u32_ror_32(x,11) ^ ((x)>>25) )
#define SSG2_0x(x) \
_mm256_xor_si256( _mm256_xor_si256( \
mm256_ror_32(x,7), mm256_ror_32(x,18) ), _mm256_srli_epi32(x,3) )
#define SSG2_0y(x) \
_mm_xor_si64( _mm_xor_si64( \
mm64_ror_32(x,7), mm64_ror_32(x,18) ), _mm_srli_pi32(x,3) )
#define SSG2_0z(x) (( u32_ror_32(x,7) ^ u32_ror_32(x,18) ) ^ ((x)>>3) )
#define SSG2_1x(x) \
_mm256_xor_si256( _mm256_xor_si256( \
mm256_ror_32(x,17), mm256_ror_32(x,19) ), _mm256_srli_epi32(x,10) )
#define SSG2_1y(x) \
_mm_xor_si64( _mm_xor_si64( \
mm64_ror_32(x,17), mm64_ror_32(x,19) ), _mm_srli_pi32(x,10) )
#define SSG2_1z(x) ( u32_ror_32(x,17) ^ u32_ror_32(x,19) ^ ((x)>>10) )
#define SHA2x_MEXP( a, b, c, d ) \
_mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \
SSG2_1x( Wx[a] ), Wx[b] ), SSG2_0x( Wx[c] ) ), Wx[d] )
#define SHA2y_MEXP( a, b, c, d ) \
_mm_add_pi32( _mm_add_pi32( _mm_add_pi32( \
SSG2_1y( Wy[a] ), Wy[b] ), SSG2_0y( Wy[c] ) ), Wy[d] )
#define SHA2z_MEXP( a, b, c, d ) \
( SSG2_1z( Wz[a] ) + Wz[b] + SSG2_0z( Wz[c] ) + Wz[d] )
#define SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx, \
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy, \
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, i, j) \
do { \
__m256i T1x, T2x; \
__m64 T1y, T2y; \
uint32_t T1z, T2z; \
T1x = _mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \
_mm256_add_epi32( Hx, BSG2_1x(Ex) ), CHx(Ex, Fx, Gx) ), \
_mm256_set1_epi32( K256[( (j)+(i) )] ) ), Wx[i] ); \
T1y = _mm_add_pi32( _mm_add_pi32( _mm_add_pi32( \
_mm_add_pi32( Hy, BSG2_1y(Ey) ), CHy(Ey, Fy, Gy) ), \
_mm_set1_pi32( K256[( (j)+(i) )] ) ), Wy[i] ); \
T1z = Hz + BSG2_1z( Ez ) + CHz( Ez, Fz, Gz ) + K256[ ((j)+(i)) ] + Wz[i]; \
T2x = _mm256_add_epi32( BSG2_0x(Ax), MAJx(Ax, Bx, Cx) ); \
T2y = _mm_add_pi32( BSG2_0y(Ay), MAJy(Ay, By, Cy) ); \
T2z = BSG2_0z( Az ) + MAJz( Az, Bz, Cz ); \
Dx = _mm256_add_epi32( Dx, T1x ); \
Dy = _mm_add_pi32( Dy, T1y ); \
Dz = Dz + T1z; \
Hx = _mm256_add_epi32( T1x, T2x ); \
Hy = _mm_add_pi32( T1y, T2y ); \
Hz = T1z + T2z; \
} while (0)
void sha256_11way_round( __m256i *inx, __m256i rx[8], __m64 *iny, __m64 ry[8],
uint32_t *inz, uint32_t rz[8] )
{
__m256i Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx;
__m256i Wx[16];
__m64 Ay, By, Cy, Dy, Ey, Fy, Gy, Hy;
__m64 Wy[16];
uint32_t Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz;
uint32_t Wz[16];
Wx[ 0] = mm256_bswap_32( inx[ 0] );
Wy[ 0] = mm64_bswap_32( iny[ 0] );
Wz[ 0] = bswap_32( inz[ 0] );
Wx[ 1] = mm256_bswap_32( inx[ 1] );
Wy[ 1] = mm64_bswap_32( iny[ 1] );
Wz[ 1] = bswap_32( inz[ 1] );
Wx[ 2] = mm256_bswap_32( inx[ 2] );
Wy[ 2] = mm64_bswap_32( iny[ 2] );
Wz[ 2] = bswap_32( inz[ 2] );
Wx[ 3] = mm256_bswap_32( inx[ 3] );
Wy[ 3] = mm64_bswap_32( iny[ 3] );
Wz[ 3] = bswap_32( inz[ 3] );
Wx[ 4] = mm256_bswap_32( inx[ 4] );
Wy[ 4] = mm64_bswap_32( iny[ 4] );
Wz[ 4] = bswap_32( inz[ 4] );
Wx[ 5] = mm256_bswap_32( inx[ 5] );
Wy[ 5] = mm64_bswap_32( iny[ 5] );
Wz[ 5] = bswap_32( inz[ 5] );
Wx[ 6] = mm256_bswap_32( inx[ 6] );
Wy[ 6] = mm64_bswap_32( iny[ 6] );
Wz[ 6] = bswap_32( inz[ 6] );
Wx[ 7] = mm256_bswap_32( inx[ 7] );
Wy[ 7] = mm64_bswap_32( iny[ 7] );
Wz[ 7] = bswap_32( inz[ 7] );
Wx[ 8] = mm256_bswap_32( inx[ 8] );
Wy[ 8] = mm64_bswap_32( iny[ 8] );
Wz[ 8] = bswap_32( inz[ 8] );
Wx[ 9] = mm256_bswap_32( inx[ 9] );
Wy[ 9] = mm64_bswap_32( iny[ 9] );
Wz[ 9] = bswap_32( inz[ 9] );
Wx[10] = mm256_bswap_32( inx[10] );
Wy[10] = mm64_bswap_32( iny[10] );
Wz[10] = bswap_32( inz[10] );
Wx[11] = mm256_bswap_32( inx[11] );
Wy[11] = mm64_bswap_32( iny[11] );
Wz[11] = bswap_32( inz[11] );
Wx[12] = mm256_bswap_32( inx[12] );
Wy[12] = mm64_bswap_32( iny[12] );
Wz[12] = bswap_32( inz[12] );
Wx[13] = mm256_bswap_32( inx[13] );
Wy[13] = mm64_bswap_32( iny[13] );
Wz[13] = bswap_32( inz[13] );
Wx[14] = mm256_bswap_32( inx[14] );
Wy[14] = mm64_bswap_32( iny[14] );
Wz[14] = bswap_32( inz[14] );
Wx[15] = mm256_bswap_32( inx[15] );
Wy[15] = mm64_bswap_32( iny[15] );
Wz[15] = bswap_32( inz[15] );
Ax = rx[0]; Ay = ry[0]; Az = rz[0];
Bx = rx[1]; By = ry[1]; Bz = rz[1];
Cx = rx[2]; Cy = ry[2]; Cz = rz[2];
Dx = rx[3]; Dy = ry[3]; Dz = rz[3];
Ex = rx[4]; Ey = ry[4]; Ez = rz[4];
Fx = rx[5]; Fy = ry[5]; Fz = rz[5];
Gx = rx[6]; Gy = ry[6]; Gz = rz[6];
Hx = rx[7]; Hy = ry[7]; Hz = rz[7];
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, 0, 0 );
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
Hz, Az, Bz, Cz, Dz, Ez, Fz, Gz, 1, 0 );
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
Gz, Hz, Az, Bz, Cz, Dz, Ez, Fz, 2, 0 );
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
Fz, Gz, Hz, Az, Bz, Cz, Dz, Ez, 3, 0 );
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
Ez, Fz, Gz, Hz, Az, Bz, Cz, Dz, 4, 0 );
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
Dz, Ez, Fz, Gz, Hz, Az, Bz, Cz, 5, 0 );
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
Cz, Dz, Ez, Fz, Gz, Hz, Az, Bz, 6, 0 );
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
Bz, Cz, Dz, Ez, Fz, Gz, Hz, Az, 7, 0 );
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, 8, 0 );
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
Hz, Az, Bz, Cz, Dz, Ez, Fz, Gz, 9, 0 );
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
Gz, Hz, Az, Bz, Cz, Dz, Ez, Fz, 10, 0 );
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
Fz, Gz, Hz, Az, Bz, Cz, Dz, Ez, 11, 0 );
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
Ez, Fz, Gz, Hz, Az, Bz, Cz, Dz, 12, 0 );
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
Dz, Ez, Fz, Gz, Hz, Az, Bz, Cz, 13, 0 );
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
Cz, Dz, Ez, Fz, Gz, Hz, Az, Bz, 14, 0 );
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
Bz, Cz, Dz, Ez, Fz, Gz, Hz, Az, 15, 0 );
for ( int j = 16; j < 64; j += 16 )
{
Wx[ 0] = SHA2x_MEXP( 14, 9, 1, 0 );
Wy[ 0] = SHA2y_MEXP( 14, 9, 1, 0 );
Wz[ 0] = SHA2z_MEXP( 14, 9, 1, 0 );
Wx[ 1] = SHA2x_MEXP( 15, 10, 2, 1 );
Wy[ 1] = SHA2y_MEXP( 15, 10, 2, 1 );
Wz[ 1] = SHA2z_MEXP( 15, 10, 2, 1 );
Wx[ 2] = SHA2x_MEXP( 0, 11, 3, 2 );
Wy[ 2] = SHA2y_MEXP( 0, 11, 3, 2 );
Wz[ 2] = SHA2z_MEXP( 0, 11, 3, 2 );
Wx[ 3] = SHA2x_MEXP( 1, 12, 4, 3 );
Wy[ 3] = SHA2y_MEXP( 1, 12, 4, 3 );
Wz[ 3] = SHA2z_MEXP( 1, 12, 4, 3 );
Wx[ 4] = SHA2x_MEXP( 2, 13, 5, 4 );
Wy[ 4] = SHA2y_MEXP( 2, 13, 5, 4 );
Wz[ 4] = SHA2z_MEXP( 2, 13, 5, 4 );
Wx[ 5] = SHA2x_MEXP( 3, 14, 6, 5 );
Wy[ 5] = SHA2y_MEXP( 3, 14, 6, 5 );
Wz[ 5] = SHA2z_MEXP( 3, 14, 6, 5 );
Wx[ 6] = SHA2x_MEXP( 4, 15, 7, 6 );
Wy[ 6] = SHA2y_MEXP( 4, 15, 7, 6 );
Wz[ 6] = SHA2z_MEXP( 4, 15, 7, 6 );
Wx[ 7] = SHA2x_MEXP( 5, 0, 8, 7);
Wy[ 7] = SHA2y_MEXP( 5, 0, 8, 7);
Wz[ 7] = SHA2z_MEXP( 5, 0, 8, 7);
Wx[ 8] = SHA2x_MEXP( 6, 1, 9, 8);
Wy[ 8] = SHA2y_MEXP( 6, 1, 9, 8);
Wz[ 8] = SHA2z_MEXP( 6, 1, 9, 8);
Wx[ 9] = SHA2x_MEXP( 7, 2, 10, 9 );
Wy[ 9] = SHA2y_MEXP( 7, 2, 10, 9);
Wz[ 9] = SHA2z_MEXP( 7, 2, 10, 9);
Wx[10] = SHA2x_MEXP( 8, 3, 11, 10 );
Wy[10] = SHA2y_MEXP( 8, 3, 11, 10);
Wz[10] = SHA2z_MEXP( 8, 3, 11, 10);
Wx[11] = SHA2x_MEXP( 9, 4, 12, 11);
Wy[11] = SHA2y_MEXP( 9, 4, 12, 11);
Wz[11] = SHA2z_MEXP( 9, 4, 12, 11 );
Wx[12] = SHA2x_MEXP( 10, 5, 13, 12 );
Wy[12] = SHA2y_MEXP( 10, 5, 13, 12 );
Wz[12] = SHA2z_MEXP( 10, 5, 13, 12 );
Wx[13] = SHA2x_MEXP( 11, 6, 14, 13 );
Wy[13] = SHA2y_MEXP( 11, 6, 14, 13 );
Wz[13] = SHA2z_MEXP( 11, 6, 14, 13 );
Wx[14] = SHA2x_MEXP( 12, 7, 15, 14 );
Wy[14] = SHA2y_MEXP( 12, 7, 15, 14 );
Wz[14] = SHA2z_MEXP( 12, 7, 15, 14 );
Wx[15] = SHA2x_MEXP( 13, 8, 0, 15 );
Wy[15] = SHA2y_MEXP( 13, 8, 0, 15 );
Wz[15] = SHA2z_MEXP( 13, 8, 0, 15 );
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, 0, j );
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
Hz, Az, Bz, Cz, Dz, Ez, Fz, Gz, 1, j );
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
Gz, Hz, Az, Bz, Cz, Dz, Ez, Fz, 2, j );
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
Fz, Gz, Hz, Az, Bz, Cz, Dz, Ez, 3, j );
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
Ez, Fz, Gz, Hz, Az, Bz, Cz, Dz, 4, j );
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
Dz, Ez, Fz, Gz, Hz, Az, Bz, Cz, 5, j );
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
Cz, Dz, Ez, Fz, Gz, Hz, Az, Bz, 6, j );
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
Bz, Cz, Dz, Ez, Fz, Gz, Hz, Az, 7, j );
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, 8, j );
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
Hz, Az, Bz, Cz, Dz, Ez, Fz, Gz, 9, j );
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
Gz, Hz, Az, Bz, Cz, Dz, Ez, Fz, 10, j );
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
Fz, Gz, Hz, Az, Bz, Cz, Dz, Ez, 11, j );
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
Ez, Fz, Gz, Hz, Az, Bz, Cz, Dz, 12, j );
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
Dz, Ez, Fz, Gz, Hz, Az, Bz, Cz, 13, j );
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
Cz, Dz, Ez, Fz, Gz, Hz, Az, Bz, 14, j );
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
Bz, Cz, Dz, Ez, Fz, Gz, Hz, Az, 15, j );
}
rx[0] = _mm256_add_epi32( rx[0], Ax );
ry[0] = _mm_add_pi32( ry[0], Ay );
rz[0] = rz[0]+ Az;
rx[1] = _mm256_add_epi32( rx[1], Bx );
ry[1] = _mm_add_pi32( ry[1], By );
rz[1] = rz[1]+ Bz;
rx[2] = _mm256_add_epi32( rx[2], Cx );
ry[2] = _mm_add_pi32( ry[2], Cy );
rz[3] = rz[3]+ Dz;
rx[4] = _mm256_add_epi32( rx[4], Ex );
ry[4] = _mm_add_pi32( ry[4], Ey );
rz[4] = rz[4]+ Ez;
rx[5] = _mm256_add_epi32( rx[5], Fx );
ry[5] = _mm_add_pi32( ry[5], Fy );
rz[5] = rz[5]+ Fz;
rx[6] = _mm256_add_epi32( rx[6], Gx );
ry[6] = _mm_add_pi32( ry[6], Gy );
rz[6] = rz[6]+ Gz;
rx[7] = _mm256_add_epi32( rx[7], Hx );
ry[7] = _mm_add_pi32( ry[7], Hy );
rz[7] = rz[7]+ Hz;
}
void sha256_11way_init( sha256_11way_context *ctx )
{
ctx->count_high = ctx->count_low = 0;
ctx->valx[0] = _mm256_set1_epi32( H256[0] );
ctx->valy[0] = _mm_set1_pi32( H256[0] );
ctx->valx[1] = _mm256_set1_epi32( H256[0] );
ctx->valy[1] = _mm_set1_pi32( H256[0] );
ctx->valx[2] = _mm256_set1_epi32( H256[0] );
ctx->valy[2] = _mm_set1_pi32( H256[0] );
ctx->valx[3] = _mm256_set1_epi32( H256[0] );
ctx->valy[3] = _mm_set1_pi32( H256[0] );
ctx->valx[4] = _mm256_set1_epi32( H256[0] );
ctx->valy[4] = _mm_set1_pi32( H256[0] );
ctx->valx[5] = _mm256_set1_epi32( H256[0] );
ctx->valy[5] = _mm_set1_pi32( H256[0] );
ctx->valx[6] = _mm256_set1_epi32( H256[0] );
ctx->valy[6] = _mm_set1_pi32( H256[0] );
ctx->valx[7] = _mm256_set1_epi32( H256[0] );
ctx->valy[7] = _mm_set1_pi32( H256[0] );
memcpy( ctx->valz, H256, 32 );
}
void sha256_11way_update( sha256_11way_context *ctx, const void *datax,
const void *datay, const void *dataz, size_t len )
{
__m256i *vdatax = (__m256i*) datax;
__m64 *vdatay = (__m64*) datay;
uint32_t *idataz = (uint32_t*)dataz;
size_t ptr;
const int buf_size = 64;
ptr = (unsigned)ctx->count_low & (buf_size - 1U);
while ( len > 0 )
{
size_t clen;
uint32_t clow, clow2;
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_256( ctx->bufx + (ptr>>2), vdatax + (ptr>>2), clen>>2 );
memcpy_m64( ctx->bufy + (ptr>>2), vdatay + (ptr>>2), clen>>2 );
memcpy ( ctx->bufz + ptr, idataz + ptr, clen );
ptr += clen;
len -= clen;
if ( ptr == buf_size )
{
sha256_11way_round( ctx->bufx, ctx->valx,
ctx->bufy, ctx->valy,
ctx->bufz, ctx->valz );
ptr = 0;
}
clow = ctx->count_low;
clow2 = clow + clen;
ctx->count_low = clow2;
if ( clow2 < clow )
ctx->count_high++;
}
}
void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void *dsty,
void *dstz)
{
unsigned ptr, u;
uint32_t low, high;
const int buf_size = 64;
const int pad = buf_size - 8;
ptr = (unsigned)ctx->count_low & (buf_size - 1U);
ctx->bufx[ ptr>>2 ] = _mm256_set1_epi32( 0x80 );
ctx->bufy[ ptr>>2 ] = _mm_set1_pi32( 0x80 );
ctx->bufz[ ptr>>2 ] = 0x80;
ptr += 4;
if ( ptr > pad )
{
memset_zero_256( ctx->bufx + (ptr>>2), (buf_size - ptr) >> 2 );
memset_zero_m64( ctx->bufy + (ptr>>2), (buf_size - ptr) >> 2 );
memset( ctx->bufz + (ptr>>2), 0, (buf_size - ptr) >> 2 );
sha256_11way_round( ctx->bufx, ctx->valx,
ctx->bufy, ctx->valy,
ctx->bufz, ctx->valz );
memset_zero_256( ctx->bufx, pad >> 2 );
memset_zero_m64( ctx->bufy, pad >> 2 );
memset( ctx->bufz, 0, pad >> 2 );
}
else
{
memset_zero_256( ctx->bufx + (ptr>>2), (pad - ptr) >> 2 );
memset_zero_m64( ctx->bufy + (ptr>>2), (pad - ptr) >> 2 );
memset( ctx->bufz + (ptr>>2), 0, (pad - ptr) >> 2 );
}
low = ctx->count_low;
high = (ctx->count_high << 3) | (low >> 29);
low = low << 3;
ctx->bufx[ pad >> 2 ] =
mm256_bswap_32( _mm256_set1_epi32( high ) );
ctx->bufy[ pad >> 2 ] =
mm64_bswap_32( _mm_set1_pi32( high ) );
ctx->bufz[ pad >> 2 ] =
bswap_32( high );
ctx->bufx[ ( pad+4 ) >> 2 ] =
mm256_bswap_32( _mm256_set1_epi32( low ) );
ctx->bufy[ ( pad+4 ) >> 2 ] =
mm64_bswap_32( _mm_set1_pi32( low ) );
ctx->bufz[ ( pad+4 ) >> 2 ] =
bswap_32( low );
sha256_11way_round( ctx->bufx, ctx->valx,
ctx->bufy, ctx->valy,
ctx->bufz, ctx->valz );
for ( u = 0; u < 8; u ++ )
{
casti_m256i( dstx, u ) = mm256_bswap_32( ctx->valx[u] );
casti_m64 ( dsty, u ) = mm64_bswap_32( ctx->valy[u] );
((uint32_t*)dstz)[u] = bswap_32( ctx->valz[u] );
}
}
#endif
#endif // 0

View File

@@ -5,137 +5,6 @@
#include <stdio.h>
#include "sha-hash-4way.h"
#if defined(SHA256T_11WAY)
static __thread sha256_11way_context sha256_ctx11 __attribute__ ((aligned (64)));
void sha256t_11way_hash( void *outx, void *outy, void *outz, const void *inpx,
const void *inpy, const void*inpz )
{
uint32_t hashx[8*8] __attribute__ ((aligned (64)));
uint32_t hashy[8*2] __attribute__ ((aligned (64)));
uint32_t hashz[8] __attribute__ ((aligned (64)));
sha256_11way_context ctx;
const void *inpx64 = inpx+(64<<3);
const void *inpy64 = inpy+(64<<1);
const void *inpz64 = inpz+ 64;
memcpy( &ctx, &sha256_ctx11, sizeof ctx );
sha256_11way_update( &ctx, inpx64, inpy64, inpz64, 16 );
sha256_11way_close( &ctx, hashx, hashy, hashz );
sha256_11way_init( &ctx );
sha256_11way_update( &ctx, hashx, hashy, hashz, 32 );
sha256_11way_close( &ctx, hashx, hashy, hashz );
sha256_11way_init( &ctx );
sha256_11way_update( &ctx, hashx, hashy, hashz, 32 );
sha256_11way_close( &ctx, outx, outy, outz );
}
int scanhash_sha256t_11way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t datax[20*8] __attribute__ ((aligned (64)));
uint32_t datay[20*2] __attribute__ ((aligned (32)));
uint32_t dataz[20] __attribute__ ((aligned (32)));
uint32_t hashx[8*8] __attribute__ ((aligned (32)));
uint32_t hashy[8*2] __attribute__ ((aligned (32)));
uint32_t hashz[8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7;
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
__m256i *noncex = (__m256i*) datax + 19;
__m64 *noncey = (__m64*) datay + 19;
uint32_t *noncez = (uint32_t*)dataz + 19;
int thr_id = mythr->id; // thr_id arg is deprecated
int i;
const uint64_t htmax[] = { 0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000 };
const uint32_t masks[] = { 0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0 };
// Use dataz (scalar) to stage bswapped data for the vectors.
casti_m256i( dataz, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
casti_m256i( dataz, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
casti_m128i( dataz, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
intrlv_8x32( datax, dataz, dataz, dataz, dataz,
dataz, dataz, dataz, dataz, 640 );
mm64_interleave_2x32( datay, dataz, dataz, 640 );
sha256_11way_init( &sha256_ctx11 );
sha256_11way_update( &sha256_ctx11, datax, datay, dataz, 64 );
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];
do
{
*noncex = mm256_bswap_32(
_mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n ) );
*noncey = mm64_bswap_32( _mm_set_pi32( n+9, n+8 ) );
*noncez = bswap_32( n+10 );
pdata[19] = n;
sha256t_11way_hash( hashx, hashy, hashz, datax, datay, dataz );
if ( opt_benchmark ) { n += 11; continue; }
hash7 = &(hashx[7<<3]);
for ( i = 0; i < 8; i++ ) if ( !( hash7[ i ] & mask ) )
{
// deinterleave hash for lane
extr_lane_8x32( lane_hash, hashx, i, 256 );
if ( fulltest( lane_hash, ptarget ) )
{
pdata[19] = n + i;
submit_lane_solution( work, lane_hash, mythr, i );
}
}
hash7 = &(hashy[7<<1]);
for( i = 0; i < 2; i++ ) if ( !(hash7[ 0] & mask ) )
{
mm64_extr_lane_2x32( lane_hash, hashy, i, 256 );
if ( fulltest( lane_hash, ptarget ) )
{
pdata[19] = n + 8 + i;
submit_lane_solution( work, lane_hash, mythr, i+8 );
}
}
if ( !(hashz[7] & mask ) && fulltest( hashz, ptarget ) )
{
pdata[19] = n+10;
submit_lane_solution( work, hashz, mythr, 10 );
}
n += 11;
} while ( (n < max_nonce-12) && !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif
#if defined(SHA256T_8WAY)
static __thread sha256_8way_context sha256_ctx8 __attribute__ ((aligned (64)));

View File

@@ -285,7 +285,9 @@ void sha512_4way_close( sha512_4way_context *sc, void *dst )
unsigned ptr;
const int buf_size = 128;
const int pad = buf_size - 16;
const __m256i shuff_bswap64 = m256_const2_64( 0x08090a0b0c0d0e0f,
const __m256i shuff_bswap64 = m256_const_64( 0x18191a1b1c1d1e1f,
0x1011121314151617,
0x08090a0b0c0d0e0f,
0x0001020304050607 );
ptr = (unsigned)sc->count & (buf_size - 1U);

View File

@@ -83,13 +83,14 @@ HashReturn init_sd(hashState_sd *state, int hashbitlen) {
char *init;
#ifndef NO_PRECOMPUTED_IV
if (hashbitlen == 224)
r=InitIV(state, hashbitlen, IV_224);
else if (hashbitlen == 256)
r=InitIV(state, hashbitlen, IV_256);
else if (hashbitlen == 384)
r=InitIV(state, hashbitlen, IV_384);
else if (hashbitlen == 512)
// if (hashbitlen == 224)
// r=InitIV(state, hashbitlen, IV_224);
// else if (hashbitlen == 256)
// r=InitIV(state, hashbitlen, IV_256);
// else if (hashbitlen == 384)
// r=InitIV(state, hashbitlen, IV_384);
// else
if (hashbitlen == 512)
r=InitIV(state, hashbitlen, IV_512);
else
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -7,15 +7,37 @@
#include "simd-utils.h"
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
uint32_t A[ 32*2 ] __attribute__((aligned(64)));
uint8_t buffer[ 128*2 ] __attribute__((aligned(64)));
uint32_t A[ 32*4 ];
uint8_t buffer[ 128*4 ];
uint64_t count;
unsigned int hashbitlen;
unsigned int blocksize;
unsigned int n_feistels;
} simd_2way_context;
} simd_4way_context __attribute__((aligned(128)));
int simd_4way_init( simd_4way_context *state, int hashbitlen );
int simd_4way_update( simd_4way_context *state, const void *data,
int databitlen );
int simd_4way_close( simd_4way_context *state, void *hashval );
int simd_4way_update_close( simd_4way_context *state, void *hashval,
const void *data, int databitlen );
#endif
typedef struct {
uint32_t A[ 32*2 ];
uint8_t buffer[ 128*2 ];
uint64_t count;
unsigned int hashbitlen;
unsigned int blocksize;
unsigned int n_feistels;
} simd_2way_context __attribute__((aligned(128)));
int simd_2way_init( simd_2way_context *state, int hashbitlen );
int simd_2way_update( simd_2way_context *state, const void *data,

View File

@@ -2,17 +2,140 @@
#include <string.h>
#include <stdint.h>
#include "skein-hash-4way.h"
// 8 way is faster than SHA on Icelake
// SHA is faster than 4 way on Ryzen
//
#if defined(__SHA__)
#include <openssl/sha.h>
#else
#include "algo/sha/sha-hash-4way.h"
#endif
#include "algo/sha/sha-hash-4way.h"
#if defined (SKEIN_4WAY)
#if defined (SKEIN_8WAY)
void skeinhash_8way( void *state, const void *input )
{
uint64_t vhash64[8*8] __attribute__ ((aligned (128)));
skein512_8way_context ctx_skein;
//#if defined(__SHA__)
// uint32_t hash0[16] __attribute__ ((aligned (64)));
// uint32_t hash1[16] __attribute__ ((aligned (64)));
// uint32_t hash2[16] __attribute__ ((aligned (64)));
// uint32_t hash3[16] __attribute__ ((aligned (64)));
// uint32_t hash4[16] __attribute__ ((aligned (64)));
// uint32_t hash5[16] __attribute__ ((aligned (64)));
// uint32_t hash6[16] __attribute__ ((aligned (64)));
// uint32_t hash7[16] __attribute__ ((aligned (64)));
// SHA256_CTX ctx_sha256;
//#else
uint32_t vhash32[16*8] __attribute__ ((aligned (128)));
sha256_8way_context ctx_sha256;
//#endif
skein512_8way_init( &ctx_skein );
skein512_8way_update( &ctx_skein, input, 80 );
skein512_8way_close( &ctx_skein, vhash64 );
/*
#if defined(__SHA__)
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash64, 512 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash0, 64 );
SHA256_Final( (unsigned char*)hash0, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash1, 64 );
SHA256_Final( (unsigned char*)hash1, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash2, 64 );
SHA256_Final( (unsigned char*)hash2, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash3, 64 );
SHA256_Final( (unsigned char*)hash3, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash4, 64 );
SHA256_Final( (unsigned char*)hash4, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash5, 64 );
SHA256_Final( (unsigned char*)hash5, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash6, 64 );
SHA256_Final( (unsigned char*)hash6, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash7, 64 );
SHA256_Final( (unsigned char*)hash7, &ctx_sha256 );
intrlv_8x32( state, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
hash7, 256 );
#else
*/
rintrlv_8x64_8x32( vhash32, vhash64, 512 );
// dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
// vhash64, 512 );
// intrlv_8x32( vhash32, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
// hash7, 512 );
sha256_8way_init( &ctx_sha256 );
sha256_8way( &ctx_sha256, vhash32, 64 );
sha256_8way_close( &ctx_sha256, state );
//#endif
}
int scanhash_skein_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*8] __attribute__ ((aligned (128)));
uint32_t hash[16*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
skeinhash_8way( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane ] <= Htarg )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (SKEIN_4WAY)
void skeinhash_4way( void *state, const void *input )
{
uint64_t vhash64[16*4] __attribute__ ((aligned (64)));
uint64_t vhash64[8*4] __attribute__ ((aligned (128)));
skein512_4way_context ctx_skein;
#if defined(__SHA__)
uint32_t hash0[16] __attribute__ ((aligned (64)));
@@ -26,7 +149,7 @@ void skeinhash_4way( void *state, const void *input )
#endif
skein512_4way_init( &ctx_skein );
skein512_4way( &ctx_skein, input, 80 );
skein512_4way_update( &ctx_skein, input, 80 );
skein512_4way_close( &ctx_skein, vhash64 );
#if defined(__SHA__)
@@ -71,7 +194,7 @@ int scanhash_skein_4way( struct work *work, uint32_t max_nonce,
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
@@ -92,9 +215,9 @@ int scanhash_skein_4way( struct work *work, uint32_t max_nonce,
}
}
n += 4;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -4,8 +4,11 @@
bool register_skein_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT | SHA_OPT;
#if defined (SKEIN_4WAY)
gate->optimizations = AVX2_OPT | AVX512_OPT | SHA_OPT;
#if defined (SKEIN_8WAY)
gate->scanhash = (void*)&scanhash_skein_8way;
gate->hash = (void*)&skeinhash_8way;
#elif defined (SKEIN_4WAY)
gate->scanhash = (void*)&scanhash_skein_4way;
gate->hash = (void*)&skeinhash_4way;
#else
@@ -15,3 +18,20 @@ bool register_skein_algo( algo_gate_t* gate )
return true;
};
bool register_skein2_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT | AVX512_OPT;
#if defined (SKEIN_8WAY)
gate->scanhash = (void*)&scanhash_skein2_8way;
gate->hash = (void*)&skein2hash_8way;
#elif defined (SKEIN_4WAY)
gate->scanhash = (void*)&scanhash_skein2_4way;
gate->hash = (void*)&skein2hash_4way;
#else
gate->scanhash = (void*)&scanhash_skein2;
gate->hash = (void*)&skein2hash;
#endif
return true;
};

View File

@@ -1,23 +1,44 @@
#ifndef __SKEIN_GATE_H__
#define __SKEIN_GATE_H__
#define __SKEIN_GATE_H__ 1
#include <stdint.h>
#include "algo-gate-api.h"
#if defined(__AVX2__)
#define SKEIN_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define SKEIN_8WAY 1
#elif defined(__AVX2__)
#define SKEIN_4WAY 1
#endif
#if defined(SKEIN_4WAY)
#if defined(SKEIN_8WAY)
void skeinhash_8way( void *output, const void *input );
int scanhash_skein_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void skein2hash_8way( void *output, const void *input );
int scanhash_skein2_8way( struct work *work, uint32_t max_nonce,
uint64_t* hashes_done, struct thr_info *mythr );
#elif defined(SKEIN_4WAY)
void skeinhash_4way( void *output, const void *input );
int scanhash_skein_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
void skein2hash_4way( void *output, const void *input );
int scanhash_skein2_4way( struct work *work, uint32_t max_nonce,
uint64_t* hashes_done, struct thr_info *mythr );
#else
void skeinhash( void *output, const void *input );
int scanhash_skein( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void skein2hash( void *output, const void *input );
int scanhash_skein2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#endif

View File

@@ -36,7 +36,6 @@
#include <string.h>
#include "skein-hash-4way.h"
#ifdef __cplusplus
extern "C"{
#endif
@@ -45,6 +44,22 @@ extern "C"{
#pragma warning (disable: 4146)
#endif
/*
static const sph_u64 IV256[] = {
SPH_C64(0xCCD044A12FDB3E13), SPH_C64(0xE83590301A79A9EB),
SPH_C64(0x55AEA0614F816E6F), SPH_C64(0x2A2767A4AE9B94DB),
SPH_C64(0xEC06025E74DD7683), SPH_C64(0xE7A436CDC4746251),
SPH_C64(0xC36FBAF9393AD185), SPH_C64(0x3EEDBA1833EDFC13)
};
static const sph_u64 IV512[] = {
SPH_C64(0x4903ADFF749C51CE), SPH_C64(0x0D95DE399746DF03),
SPH_C64(0x8FD1934127C79BCE), SPH_C64(0x9A255629FF352CB1),
SPH_C64(0x5DB62599DF6CA7B0), SPH_C64(0xEABE394CA9D5C3F4),
SPH_C64(0x991112C71A75B523), SPH_C64(0xAE18A40B660FCC33)
};
*/
/*
* M9_ ## s ## _ ## i evaluates to s+i mod 9 (0 <= s <= 18, 0 <= i <= 7).
*/
@@ -270,8 +285,151 @@ extern "C"{
#define SKBI(k, s, i) XCAT(k, XCAT(XCAT(XCAT(M9_, s), _), i))
#define SKBT(t, s, v) XCAT(t, XCAT(XCAT(XCAT(M3_, s), _), v))
#define READ_STATE_BIG(sc) do { \
h0 = (sc)->h0; \
h1 = (sc)->h1; \
h2 = (sc)->h2; \
h3 = (sc)->h3; \
h4 = (sc)->h4; \
h5 = (sc)->h5; \
h6 = (sc)->h6; \
h7 = (sc)->h7; \
bcount = sc->bcount; \
} while (0)
#define WRITE_STATE_BIG(sc) do { \
(sc)->h0 = h0; \
(sc)->h1 = h1; \
(sc)->h2 = h2; \
(sc)->h3 = h3; \
(sc)->h4 = h4; \
(sc)->h5 = h5; \
(sc)->h6 = h6; \
(sc)->h7 = h7; \
sc->bcount = bcount; \
} while (0)
// AVX2 all scalar vars are now vectors representing 4 nonces in parallel
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define TFBIG_KINIT_8WAY( k0, k1, k2, k3, k4, k5, k6, k7, k8, t0, t1, t2 ) \
do { \
k8 = _mm512_xor_si512( _mm512_xor_si512( \
_mm512_xor_si512( _mm512_xor_si512( k0, k1 ), \
_mm512_xor_si512( k2, k3 ) ), \
_mm512_xor_si512( _mm512_xor_si512( k4, k5 ), \
_mm512_xor_si512( k6, k7 ) ) ), \
m512_const1_64( 0x1BD11BDAA9FC1A22) ); \
t2 = t0 ^ t1; \
} while (0)
#define TFBIG_ADDKEY_8WAY(w0, w1, w2, w3, w4, w5, w6, w7, k, t, s) \
do { \
w0 = _mm512_add_epi64( w0, SKBI(k,s,0) ); \
w1 = _mm512_add_epi64( w1, SKBI(k,s,1) ); \
w2 = _mm512_add_epi64( w2, SKBI(k,s,2) ); \
w3 = _mm512_add_epi64( w3, SKBI(k,s,3) ); \
w4 = _mm512_add_epi64( w4, SKBI(k,s,4) ); \
w5 = _mm512_add_epi64( w5, _mm512_add_epi64( SKBI(k,s,5), \
m512_const1_64( SKBT(t,s,0) ) ) ); \
w6 = _mm512_add_epi64( w6, _mm512_add_epi64( SKBI(k,s,6), \
m512_const1_64( SKBT(t,s,1) ) ) ); \
w7 = _mm512_add_epi64( w7, _mm512_add_epi64( SKBI(k,s,7), \
m512_const1_64( s ) ) ); \
} while (0)
#define TFBIG_MIX_8WAY(x0, x1, rc) \
do { \
x0 = _mm512_add_epi64( x0, x1 ); \
x1 = _mm512_xor_si512( mm512_rol_64( x1, rc ), x0 ); \
} while (0)
#define TFBIG_MIX8_8WAY(w0, w1, w2, w3, w4, w5, w6, w7, rc0, rc1, rc2, rc3) do { \
TFBIG_MIX_8WAY(w0, w1, rc0); \
TFBIG_MIX_8WAY(w2, w3, rc1); \
TFBIG_MIX_8WAY(w4, w5, rc2); \
TFBIG_MIX_8WAY(w6, w7, rc3); \
} while (0)
#define TFBIG_8WAY_4e(s) do { \
TFBIG_ADDKEY_8WAY(p0, p1, p2, p3, p4, p5, p6, p7, h, t, s); \
TFBIG_MIX8_8WAY(p0, p1, p2, p3, p4, p5, p6, p7, 46, 36, 19, 37); \
TFBIG_MIX8_8WAY(p2, p1, p4, p7, p6, p5, p0, p3, 33, 27, 14, 42); \
TFBIG_MIX8_8WAY(p4, p1, p6, p3, p0, p5, p2, p7, 17, 49, 36, 39); \
TFBIG_MIX8_8WAY(p6, p1, p0, p7, p2, p5, p4, p3, 44, 9, 54, 56); \
} while (0)
#define TFBIG_8WAY_4o(s) do { \
TFBIG_ADDKEY_8WAY(p0, p1, p2, p3, p4, p5, p6, p7, h, t, s); \
TFBIG_MIX8_8WAY(p0, p1, p2, p3, p4, p5, p6, p7, 39, 30, 34, 24); \
TFBIG_MIX8_8WAY(p2, p1, p4, p7, p6, p5, p0, p3, 13, 50, 10, 17); \
TFBIG_MIX8_8WAY(p4, p1, p6, p3, p0, p5, p2, p7, 25, 29, 39, 43); \
TFBIG_MIX8_8WAY(p6, p1, p0, p7, p2, p5, p4, p3, 8, 35, 56, 22); \
} while (0)
#define UBI_BIG_8WAY(etype, extra) \
do { \
sph_u64 t0, t1, t2; \
__m512i h8; \
__m512i m0 = buf[0]; \
__m512i m1 = buf[1]; \
__m512i m2 = buf[2]; \
__m512i m3 = buf[3]; \
__m512i m4 = buf[4]; \
__m512i m5 = buf[5]; \
__m512i m6 = buf[6]; \
__m512i m7 = buf[7]; \
\
__m512i p0 = m0; \
__m512i p1 = m1; \
__m512i p2 = m2; \
__m512i p3 = m3; \
__m512i p4 = m4; \
__m512i p5 = m5; \
__m512i p6 = m6; \
__m512i p7 = m7; \
t0 = SPH_T64(bcount << 6) + (sph_u64)(extra); \
t1 = (bcount >> 58) + ((sph_u64)(etype) << 55); \
TFBIG_KINIT_8WAY(h0, h1, h2, h3, h4, h5, h6, h7, h8, t0, t1, t2); \
TFBIG_8WAY_4e(0); \
TFBIG_8WAY_4o(1); \
TFBIG_8WAY_4e(2); \
TFBIG_8WAY_4o(3); \
TFBIG_8WAY_4e(4); \
TFBIG_8WAY_4o(5); \
TFBIG_8WAY_4e(6); \
TFBIG_8WAY_4o(7); \
TFBIG_8WAY_4e(8); \
TFBIG_8WAY_4o(9); \
TFBIG_8WAY_4e(10); \
TFBIG_8WAY_4o(11); \
TFBIG_8WAY_4e(12); \
TFBIG_8WAY_4o(13); \
TFBIG_8WAY_4e(14); \
TFBIG_8WAY_4o(15); \
TFBIG_8WAY_4e(16); \
TFBIG_8WAY_4o(17); \
TFBIG_ADDKEY_8WAY(p0, p1, p2, p3, p4, p5, p6, p7, h, t, 18); \
h0 = _mm512_xor_si512( m0, p0 );\
h1 = _mm512_xor_si512( m1, p1 );\
h2 = _mm512_xor_si512( m2, p2 );\
h3 = _mm512_xor_si512( m3, p3 );\
h4 = _mm512_xor_si512( m4, p4 );\
h5 = _mm512_xor_si512( m5, p5 );\
h6 = _mm512_xor_si512( m6, p6 );\
h7 = _mm512_xor_si512( m7, p7 );\
} while (0)
#define DECL_STATE_BIG_8WAY \
__m512i h0, h1, h2, h3, h4, h5, h6, h7; \
sph_u64 bcount;
#endif // AVX512
#define TFBIG_KINIT_4WAY( k0, k1, k2, k3, k4, k5, k6, k7, k8, t0, t1, t2 ) \
do { \
k8 = _mm256_xor_si256( _mm256_xor_si256( \
@@ -298,40 +456,35 @@ do { \
m256_const1_64( s ) ) ); \
} while (0)
#define TFBIG_MIX_4WAY(x0, x1, rc) \
do { \
x0 = _mm256_add_epi64( x0, x1 ); \
x1 = _mm256_xor_si256( mm256_rol_64( x1, rc ), x0 ); \
} while (0)
// typeless
#define TFBIG_MIX8(w0, w1, w2, w3, w4, w5, w6, w7, rc0, rc1, rc2, rc3) do { \
#define TFBIG_MIX8_4WAY(w0, w1, w2, w3, w4, w5, w6, w7, rc0, rc1, rc2, rc3) do { \
TFBIG_MIX_4WAY(w0, w1, rc0); \
TFBIG_MIX_4WAY(w2, w3, rc1); \
TFBIG_MIX_4WAY(w4, w5, rc2); \
TFBIG_MIX_4WAY(w6, w7, rc3); \
} while (0)
#define TFBIG_4e(s) do { \
#define TFBIG_4WAY_4e(s) do { \
TFBIG_ADDKEY_4WAY(p0, p1, p2, p3, p4, p5, p6, p7, h, t, s); \
TFBIG_MIX8(p0, p1, p2, p3, p4, p5, p6, p7, 46, 36, 19, 37); \
TFBIG_MIX8(p2, p1, p4, p7, p6, p5, p0, p3, 33, 27, 14, 42); \
TFBIG_MIX8(p4, p1, p6, p3, p0, p5, p2, p7, 17, 49, 36, 39); \
TFBIG_MIX8(p6, p1, p0, p7, p2, p5, p4, p3, 44, 9, 54, 56); \
TFBIG_MIX8_4WAY(p0, p1, p2, p3, p4, p5, p6, p7, 46, 36, 19, 37); \
TFBIG_MIX8_4WAY(p2, p1, p4, p7, p6, p5, p0, p3, 33, 27, 14, 42); \
TFBIG_MIX8_4WAY(p4, p1, p6, p3, p0, p5, p2, p7, 17, 49, 36, 39); \
TFBIG_MIX8_4WAY(p6, p1, p0, p7, p2, p5, p4, p3, 44, 9, 54, 56); \
} while (0)
#define TFBIG_4o(s) do { \
#define TFBIG_4WAY_4o(s) do { \
TFBIG_ADDKEY_4WAY(p0, p1, p2, p3, p4, p5, p6, p7, h, t, s); \
TFBIG_MIX8(p0, p1, p2, p3, p4, p5, p6, p7, 39, 30, 34, 24); \
TFBIG_MIX8(p2, p1, p4, p7, p6, p5, p0, p3, 13, 50, 10, 17); \
TFBIG_MIX8(p4, p1, p6, p3, p0, p5, p2, p7, 25, 29, 39, 43); \
TFBIG_MIX8(p6, p1, p0, p7, p2, p5, p4, p3, 8, 35, 56, 22); \
TFBIG_MIX8_4WAY(p0, p1, p2, p3, p4, p5, p6, p7, 39, 30, 34, 24); \
TFBIG_MIX8_4WAY(p2, p1, p4, p7, p6, p5, p0, p3, 13, 50, 10, 17); \
TFBIG_MIX8_4WAY(p4, p1, p6, p3, p0, p5, p2, p7, 25, 29, 39, 43); \
TFBIG_MIX8_4WAY(p6, p1, p0, p7, p2, p5, p4, p3, 8, 35, 56, 22); \
} while (0)
// scale buf offset by 4
#define UBI_BIG_4WAY(etype, extra) \
do { \
@@ -357,24 +510,24 @@ do { \
t0 = SPH_T64(bcount << 6) + (sph_u64)(extra); \
t1 = (bcount >> 58) + ((sph_u64)(etype) << 55); \
TFBIG_KINIT_4WAY(h0, h1, h2, h3, h4, h5, h6, h7, h8, t0, t1, t2); \
TFBIG_4e(0); \
TFBIG_4o(1); \
TFBIG_4e(2); \
TFBIG_4o(3); \
TFBIG_4e(4); \
TFBIG_4o(5); \
TFBIG_4e(6); \
TFBIG_4o(7); \
TFBIG_4e(8); \
TFBIG_4o(9); \
TFBIG_4e(10); \
TFBIG_4o(11); \
TFBIG_4e(12); \
TFBIG_4o(13); \
TFBIG_4e(14); \
TFBIG_4o(15); \
TFBIG_4e(16); \
TFBIG_4o(17); \
TFBIG_4WAY_4e(0); \
TFBIG_4WAY_4o(1); \
TFBIG_4WAY_4e(2); \
TFBIG_4WAY_4o(3); \
TFBIG_4WAY_4e(4); \
TFBIG_4WAY_4o(5); \
TFBIG_4WAY_4e(6); \
TFBIG_4WAY_4o(7); \
TFBIG_4WAY_4e(8); \
TFBIG_4WAY_4o(9); \
TFBIG_4WAY_4e(10); \
TFBIG_4WAY_4o(11); \
TFBIG_4WAY_4e(12); \
TFBIG_4WAY_4o(13); \
TFBIG_4WAY_4e(14); \
TFBIG_4WAY_4o(15); \
TFBIG_4WAY_4e(16); \
TFBIG_4WAY_4o(17); \
TFBIG_ADDKEY_4WAY(p0, p1, p2, p3, p4, p5, p6, p7, h, t, 18); \
h0 = _mm256_xor_si256( m0, p0 );\
h1 = _mm256_xor_si256( m1, p1 );\
@@ -391,45 +544,142 @@ do { \
__m256i h0, h1, h2, h3, h4, h5, h6, h7; \
sph_u64 bcount;
#define READ_STATE_BIG(sc) do { \
h0 = (sc)->h0; \
h1 = (sc)->h1; \
h2 = (sc)->h2; \
h3 = (sc)->h3; \
h4 = (sc)->h4; \
h5 = (sc)->h5; \
h6 = (sc)->h6; \
h7 = (sc)->h7; \
bcount = sc->bcount; \
} while (0)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define WRITE_STATE_BIG(sc) do { \
(sc)->h0 = h0; \
(sc)->h1 = h1; \
(sc)->h2 = h2; \
(sc)->h3 = h3; \
(sc)->h4 = h4; \
(sc)->h5 = h5; \
(sc)->h6 = h6; \
(sc)->h7 = h7; \
sc->bcount = bcount; \
} while (0)
void skein256_8way_init( skein256_8way_context *sc )
{
sc->h0 = m512_const1_64( 0xCCD044A12FDB3E13 );
sc->h1 = m512_const1_64( 0xE83590301A79A9EB );
sc->h2 = m512_const1_64( 0x55AEA0614F816E6F );
sc->h3 = m512_const1_64( 0x2A2767A4AE9B94DB );
sc->h4 = m512_const1_64( 0xEC06025E74DD7683 );
sc->h5 = m512_const1_64( 0xE7A436CDC4746251 );
sc->h6 = m512_const1_64( 0xC36FBAF9393AD185 );
sc->h7 = m512_const1_64( 0x3EEDBA1833EDFC13 );
sc->bcount = 0;
sc->ptr = 0;
}
/*
static const sph_u64 IV256[] = {
SPH_C64(0xCCD044A12FDB3E13), SPH_C64(0xE83590301A79A9EB),
SPH_C64(0x55AEA0614F816E6F), SPH_C64(0x2A2767A4AE9B94DB),
SPH_C64(0xEC06025E74DD7683), SPH_C64(0xE7A436CDC4746251),
SPH_C64(0xC36FBAF9393AD185), SPH_C64(0x3EEDBA1833EDFC13)
};
void skein512_8way_init( skein512_8way_context *sc )
{
sc->h0 = m512_const1_64( 0x4903ADFF749C51CE );
sc->h1 = m512_const1_64( 0x0D95DE399746DF03 );
sc->h2 = m512_const1_64( 0x8FD1934127C79BCE );
sc->h3 = m512_const1_64( 0x9A255629FF352CB1 );
sc->h4 = m512_const1_64( 0x5DB62599DF6CA7B0 );
sc->h5 = m512_const1_64( 0xEABE394CA9D5C3F4 );
sc->h6 = m512_const1_64( 0x991112C71A75B523 );
sc->h7 = m512_const1_64( 0xAE18A40B660FCC33 );
sc->bcount = 0;
sc->ptr = 0;
}
static void
skein_big_core_8way( skein512_8way_context *sc, const void *data,
size_t len )
{
__m512i *vdata = (__m512i*)data;
__m512i *buf;
size_t ptr;
unsigned first;
DECL_STATE_BIG_8WAY
buf = sc->buf;
ptr = sc->ptr;
const int buf_size = 64; // 64 * _m256i
if ( len <= buf_size - ptr )
{
memcpy_512( buf + (ptr>>3), vdata, len>>3 );
sc->ptr = ptr + len;
return;
}
READ_STATE_BIG( sc );
first = ( bcount == 0 ) << 7;
do {
size_t clen;
if ( ptr == buf_size )
{
bcount ++;
UBI_BIG_8WAY( 96 + first, 0 );
first = 0;
ptr = 0;
}
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_512( buf + (ptr>>3), vdata, clen>>3 );
ptr += clen;
vdata += (clen>>3);
len -= clen;
} while ( len > 0 );
WRITE_STATE_BIG( sc );
sc->ptr = ptr;
}
static void
skein_big_close_8way( skein512_8way_context *sc, unsigned ub, unsigned n,
void *dst, size_t out_len )
{
__m512i *buf;
size_t ptr;
unsigned et;
DECL_STATE_BIG_8WAY
buf = sc->buf;
ptr = sc->ptr;
const int buf_size = 64;
READ_STATE_BIG(sc);
memset_zero_512( buf + (ptr>>3), (buf_size - ptr) >> 3 );
et = 352 + ((bcount == 0) << 7);
UBI_BIG_8WAY( et, ptr );
memset_zero_512( buf, buf_size >> 3 );
bcount = 0;
UBI_BIG_8WAY( 510, 8 );
buf[0] = h0;
buf[1] = h1;
buf[2] = h2;
buf[3] = h3;
buf[4] = h4;
buf[5] = h5;
buf[6] = h6;
buf[7] = h7;
memcpy_512( dst, buf, out_len >> 3 );
}
void
skein256_8way_update(void *cc, const void *data, size_t len)
{
skein_big_core_8way(cc, data, len);
}
void
skein256_8way_close(void *cc, void *dst)
{
skein_big_close_8way(cc, 0, 0, dst, 32);
}
void
skein512_8way_update(void *cc, const void *data, size_t len)
{
skein_big_core_8way(cc, data, len);
}
void
skein512_8way_close(void *cc, void *dst)
{
skein_big_close_8way(cc, 0, 0, dst, 64);
}
#endif // AVX512
static const sph_u64 IV512[] = {
SPH_C64(0x4903ADFF749C51CE), SPH_C64(0x0D95DE399746DF03),
SPH_C64(0x8FD1934127C79BCE), SPH_C64(0x9A255629FF352CB1),
SPH_C64(0x5DB62599DF6CA7B0), SPH_C64(0xEABE394CA9D5C3F4),
SPH_C64(0x991112C71A75B523), SPH_C64(0xAE18A40B660FCC33)
};
*/
void skein256_4way_init( skein256_4way_context *sc )
{
@@ -517,19 +767,6 @@ skein_big_close_4way( skein512_4way_context *sc, unsigned ub, unsigned n,
ptr = sc->ptr;
const int buf_size = 64;
/*
* At that point, if ptr == 0, then the message was empty;
* otherwise, there is between 1 and 64 bytes (inclusive) which
* are yet to be processed. Either way, we complete the buffer
* to a full block with zeros (the Skein specification mandates
* that an empty message is padded so that there is at least
* one block to process).
*
* Once this block has been processed, we do it again, with
* a block full of zeros, for the output (that block contains
* the encoding of "0", over 8 bytes, then padded with zeros).
*/
READ_STATE_BIG(sc);
memset_zero_256( buf + (ptr>>3), (buf_size - ptr) >> 3 );
@@ -552,31 +789,8 @@ skein_big_close_4way( skein512_4way_context *sc, unsigned ub, unsigned n,
memcpy_256( dst, buf, out_len >> 3 );
}
/*
static const sph_u64 IV256[] = {
SPH_C64(0xCCD044A12FDB3E13), SPH_C64(0xE83590301A79A9EB),
SPH_C64(0x55AEA0614F816E6F), SPH_C64(0x2A2767A4AE9B94DB),
SPH_C64(0xEC06025E74DD7683), SPH_C64(0xE7A436CDC4746251),
SPH_C64(0xC36FBAF9393AD185), SPH_C64(0x3EEDBA1833EDFC13)
};
static const sph_u64 IV512[] = {
SPH_C64(0x4903ADFF749C51CE), SPH_C64(0x0D95DE399746DF03),
SPH_C64(0x8FD1934127C79BCE), SPH_C64(0x9A255629FF352CB1),
SPH_C64(0x5DB62599DF6CA7B0), SPH_C64(0xEABE394CA9D5C3F4),
SPH_C64(0x991112C71A75B523), SPH_C64(0xAE18A40B660FCC33)
};
*/
/*
void
skein256_4way_init(void *cc)
{
skein_big_init_4way(cc, IV256);
}
*/
void
skein256_4way(void *cc, const void *data, size_t len)
skein256_4way_update(void *cc, const void *data, size_t len)
{
skein_big_core_4way(cc, data, len);
}
@@ -587,16 +801,8 @@ skein256_4way_close(void *cc, void *dst)
skein_big_close_4way(cc, 0, 0, dst, 32);
}
/*
void
skein512_4way_init(void *cc)
{
skein_big_init_4way(cc, IV512);
}
*/
void
skein512_4way(void *cc, const void *data, size_t len)
skein512_4way_update(void *cc, const void *data, size_t len)
{
skein_big_core_4way(cc, data, len);
}

View File

@@ -55,29 +55,50 @@ extern "C"{
#define SPH_SIZE_skein256 256
#define SPH_SIZE_skein512 512
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct
{
__m256i buf[8] __attribute__ ((aligned (64)));
__m512i buf[8];
__m512i h0, h1, h2, h3, h4, h5, h6, h7;
size_t ptr;
sph_u64 bcount;
} sph_skein_8way_big_context __attribute__ ((aligned (128)));
typedef sph_skein_8way_big_context skein512_8way_context;
typedef sph_skein_8way_big_context skein256_8way_context;
void skein512_8way_init( skein512_8way_context *sc );
void skein512_8way_update( void *cc, const void *data, size_t len );
void skein512_8way_close( void *cc, void *dst );
void skein256_8way_init( skein256_8way_context *sc );
void skein256_8way_update( void *cc, const void *data, size_t len );
void skein256_8way_close( void *cc, void *dst );
#endif // AVX512
typedef struct
{
__m256i buf[8];
__m256i h0, h1, h2, h3, h4, h5, h6, h7;
size_t ptr;
sph_u64 bcount;
} sph_skein_4way_big_context;
} sph_skein_4way_big_context __attribute__ ((aligned (128)));
typedef sph_skein_4way_big_context skein512_4way_context;
typedef sph_skein_4way_big_context skein256_4way_context;
void skein512_4way_init( skein512_4way_context *sc );
void skein512_4way( void *cc, const void *data, size_t len );
void skein512_4way_update( void *cc, const void *data, size_t len );
void skein512_4way_close( void *cc, void *dst );
//void sph_skein512_addbits_and_close(
// void *cc, unsigned ub, unsigned n, void *dst);
#define skein512_4way skein512_4way_update
void skein256_4way_init( skein256_4way_context *sc );
void skein256_4way( void *cc, const void *data, size_t len );
void skein256_4way_update( void *cc, const void *data, size_t len );
void skein256_4way_close( void *cc, void *dst );
//void sph_skein256_addbits_and_close(
// void *cc, unsigned ub, unsigned n, void *dst);
#define skein256_4way skein256_4way_update
#ifdef __cplusplus
}

View File

@@ -1,9 +1,66 @@
#include "skein2-gate.h"
#include "skein-gate.h"
#include <string.h>
#include <stdint.h>
#include "skein-hash-4way.h"
#if defined(SKEIN2_4WAY)
#if defined(SKEIN_8WAY)
void skein2hash_8way( void *output, const void *input )
{
skein512_8way_context ctx;
uint64_t hash[16*8] __attribute__ ((aligned (128)));
skein512_8way_init( &ctx );
skein512_8way_update( &ctx, input, 80 );
skein512_8way_close( &ctx, hash );
skein512_8way_init( &ctx );
skein512_8way_update( &ctx, hash, 64 );
skein512_8way_close( &ctx, output );
}
int scanhash_skein2_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[16*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do
{
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
skein2hash_8way( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane<<1 ] <= Htarg )
{
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 8;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(SKEIN_4WAY)
void skein2hash_4way( void *output, const void *input )
{

View File

@@ -1,17 +0,0 @@
#include "skein2-gate.h"
#include <stdint.h>
#include "sph_skein.h"
bool register_skein2_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
#if defined (SKEIN2_4WAY)
gate->scanhash = (void*)&scanhash_skein2_4way;
gate->hash = (void*)&skein2hash_4way;
#else
gate->scanhash = (void*)&scanhash_skein2;
gate->hash = (void*)&skein2hash;
#endif
return true;
};

View File

@@ -1,20 +0,0 @@
#ifndef __SKEIN2GATE_H__
#define __SKEIN2_GATE_H__
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__)
#define SKEIN2_4WAY
#endif
#if defined(SKEIN2_4WAY)
void skein2hash_4way( void *output, const void *input );
int scanhash_skein2_4way( struct work *work, uint32_t max_nonce,
uint64_t* hashes_done, struct thr_info *mythr );
#endif
void skein2hash( void *output, const void *input );
int scanhash_skein2( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif

View File

@@ -1,4 +1,4 @@
#include "algo-gate-api.h"
#include "skein-gate.h"
#include <string.h>
#include <stdint.h>

View File

@@ -0,0 +1,369 @@
#include "Swifftx_sha3.h"
extern "C" {
#include "SWIFFTX.h"
}
#include <math.h>
#include <stdlib.h>
#include <string.h>
// The default salt value.
// This is the expansion of e (Euler's number) - the 19 digits after 2.71:
// 8281828459045235360.
// The above in base 256, from MSB to LSB:
BitSequence SWIF_saltValueChar[SWIF_HAIFA_SALT_SIZE] = {114, 238, 247, 26, 192, 28, 170, 160};
// All the IVs here below were produced from the decimal digits of e's expansion.
// The code can be found in 'ProduceRandomIV.c'.
// The initial value for 224 digest size.
const BitSequence SWIF_HAIFA_IV_224[SWIFFTX_OUTPUT_BLOCK_SIZE] =
{37, 242, 132, 2, 167, 81, 158, 237, 113, 77, 162, 60, 65, 236, 108, 246,
101, 72, 190, 109, 58, 205, 99, 6, 114, 169, 104, 114, 38, 146, 121, 142,
59, 98, 233, 84, 72, 227, 22, 199, 17, 102, 198, 145, 24, 178, 37, 1,
215, 245, 66, 120, 230, 193, 113, 253, 165, 218, 66, 134, 49, 231, 124, 204,
0};
// The initial value for 256 digest size.
const BitSequence SWIF_HAIFA_IV_256[SWIFFTX_OUTPUT_BLOCK_SIZE] =
{250, 50, 42, 40, 14, 233, 53, 48, 227, 42, 237, 187, 211, 120, 209, 234,
27, 144, 4, 61, 243, 244, 29, 247, 37, 162, 70, 11, 231, 196, 53, 6,
193, 240, 94, 126, 204, 132, 104, 46, 114, 29, 3, 104, 118, 184, 201, 3,
57, 77, 91, 101, 31, 155, 84, 199, 228, 39, 198, 42, 248, 198, 201, 178,
8};
// The initial value for 384 digest size.
const BitSequence SWIF_HAIFA_IV_384[SWIFFTX_OUTPUT_BLOCK_SIZE] =
{40, 145, 193, 100, 205, 171, 47, 76, 254, 10, 196, 41, 165, 207, 200, 79,
109, 13, 75, 201, 17, 172, 64, 162, 217, 22, 88, 39, 51, 30, 220, 151,
133, 73, 216, 233, 184, 203, 77, 0, 248, 13, 28, 199, 30, 147, 232, 242,
227, 124, 169, 174, 14, 45, 27, 87, 254, 73, 68, 136, 135, 159, 83, 152,
0};
// The initial value for 512 digest size.
const BitSequence SWIF_HAIFA_IV_512[SWIFFTX_OUTPUT_BLOCK_SIZE] =
{195, 126, 197, 167, 157, 114, 99, 126, 208, 105, 200, 90, 71, 195, 144, 138,
142, 122, 123, 116, 24, 214, 168, 173, 203, 183, 194, 210, 102, 117, 138, 42,
114, 118, 132, 33, 35, 149, 143, 163, 163, 183, 243, 175, 72, 22, 201, 255,
102, 243, 22, 187, 211, 167, 239, 76, 164, 70, 80, 182, 181, 212, 9, 185,
0};
///////////////////////////////////////////////////////////////////////////////////////////////
// NIST API implementation portion.
///////////////////////////////////////////////////////////////////////////////////////////////
int Swifftx::Init(int hashbitlen)
{
switch(hashbitlen)
{
case 224:
swifftxState.hashbitlen = hashbitlen;
// Initializes h_0 in HAIFA:
memcpy(swifftxState.currOutputBlock, SWIF_HAIFA_IV_224, SWIFFTX_OUTPUT_BLOCK_SIZE);
break;
case 256:
swifftxState.hashbitlen = hashbitlen;
memcpy(swifftxState.currOutputBlock, SWIF_HAIFA_IV_256, SWIFFTX_OUTPUT_BLOCK_SIZE);
break;
case 384:
swifftxState.hashbitlen = hashbitlen;
memcpy(swifftxState.currOutputBlock, SWIF_HAIFA_IV_384, SWIFFTX_OUTPUT_BLOCK_SIZE);
break;
case 512:
swifftxState.hashbitlen = hashbitlen;
memcpy(swifftxState.currOutputBlock, SWIF_HAIFA_IV_512, SWIFFTX_OUTPUT_BLOCK_SIZE);
break;
default:
return BAD_HASHBITLEN;
}
swifftxState.wasUpdated = false;
swifftxState.remainingSize = 0;
memset(swifftxState.remaining, 0, SWIF_HAIFA_INPUT_BLOCK_SIZE);
memset(swifftxState.numOfBitsChar, 0, SWIF_HAIFA_NUM_OF_BITS_SIZE);
// Initialize the salt with the default value.
memcpy(swifftxState.salt, SWIF_saltValueChar, SWIF_HAIFA_SALT_SIZE);
InitializeSWIFFTX();
return SUCCESS;
}
int Swifftx::Update(const BitSequence *data, DataLength databitlen)
{
// The size of input in bytes after putting the remaining data from previous invocation.
int sizeOfInputAfterRemaining = 0;
// The input block to compression function of SWIFFTX:
BitSequence currInputBlock[SWIFFTX_INPUT_BLOCK_SIZE] = {0};
// Whether we handled a single block.
bool wasSingleBlockHandled = false;
swifftxState.wasUpdated = true;
// Handle an empty message as required by NIST. Since 'Final()' is oblivious to the input
// (but of course uses the output of the compression function from the previous round,
// which is called h_{i-1} in HAIFA article), we have to do nothing here.
if (databitlen == 0)
return SUCCESS;
// If we had before an input with unaligned length, return an error
if (swifftxState.remainingSize % 8)
{
return INPUT_DATA_NOT_ALIGNED;
}
// Convert remaining size to bytes.
swifftxState.remainingSize /= 8;
// As long as we have enough data combined from (remaining + data) to fill input block
//NASTAVENIE RUND
while (((databitlen / 8) + swifftxState.remainingSize) >= SWIF_HAIFA_INPUT_BLOCK_SIZE)
{
// Fill the input block with data:
// 1. The output of the previous block:
memcpy(currInputBlock, swifftxState.currOutputBlock, SWIFFTX_OUTPUT_BLOCK_SIZE);
// 2. The input part of the block:
// 2a. The remaining data from the previous 'Update()' call:
if (swifftxState.remainingSize)
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE, swifftxState.remaining,
swifftxState.remainingSize);
// 2b. The input data that we have place for after the 'remaining':
sizeOfInputAfterRemaining = SWIFFTX_INPUT_BLOCK_SIZE - SWIFFTX_OUTPUT_BLOCK_SIZE
- ((int) swifftxState.remainingSize) - SWIF_HAIFA_NUM_OF_BITS_SIZE
- SWIF_HAIFA_SALT_SIZE;
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + swifftxState.remainingSize,
data, sizeOfInputAfterRemaining);
// 3. The #bits part of the block:
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + swifftxState.remainingSize
+ sizeOfInputAfterRemaining,
swifftxState.numOfBitsChar, SWIF_HAIFA_NUM_OF_BITS_SIZE);
// 4. The salt part of the block:
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + swifftxState.remainingSize
+ sizeOfInputAfterRemaining + SWIF_HAIFA_NUM_OF_BITS_SIZE,
swifftxState.salt, SWIF_HAIFA_SALT_SIZE);
ComputeSingleSWIFFTX(currInputBlock, swifftxState.currOutputBlock, false);
// Update the #bits field with SWIF_HAIFA_INPUT_BLOCK_SIZE.
AddToCurrInBase256(swifftxState.numOfBitsChar, SWIF_HAIFA_INPUT_BLOCK_SIZE * 8);
wasSingleBlockHandled = true;
data += sizeOfInputAfterRemaining;
databitlen -= (sizeOfInputAfterRemaining * 8);
swifftxState.remainingSize = 0;
}
// Update the swifftxState.remaining and swifftxState.remainingSize.
// remainingSize will be in bits after exiting 'Update()'.
if (wasSingleBlockHandled)
{
swifftxState.remainingSize = (unsigned int) databitlen; // now remaining size is in bits.
if (swifftxState.remainingSize)
memcpy(swifftxState.remaining, data, (swifftxState.remainingSize + 7) / 8);
}
else
{
memcpy(swifftxState.remaining + swifftxState.remainingSize, data,
(size_t) (databitlen + 7) / 8);
swifftxState.remainingSize = (swifftxState.remainingSize * 8) + (unsigned short) databitlen;
}
return SUCCESS;
}
int Swifftx::Final(BitSequence *hashval)
{
int i;
// Whether to add one last block. True if the padding appended to the last block overflows
// the block size.
bool toAddFinalBlock = false;
bool toPutOneInFinalBlock = false;
unsigned short oneShift = 0;
// The size of the last input block before the zeroes padding. We add 1 here because we
// include the final '1' bit in the calculation and 7 as we round the length to bytes.
unsigned short sizeOfLastInputBlock = (swifftxState.remainingSize + 1 + 7) / 8;
// The number of bytes of zero in the padding part.
// The padding contains:
// 1. A single 1 bit.
// 2. As many zeroes as needed.
// 3. The message length in bits. Occupies SWIF_HAIFA_NUM_OF_BITS_SIZE bytes.
// 4. The digest size. Maximum is 512, so we need 2 bytes.
// If the total number achieved is negative, add an additional block, as HAIFA specifies.
short numOfZeroBytesInPadding = (short) SWIFFTX_INPUT_BLOCK_SIZE - SWIFFTX_OUTPUT_BLOCK_SIZE
- sizeOfLastInputBlock - (2 * SWIF_HAIFA_NUM_OF_BITS_SIZE) - 2
- SWIF_HAIFA_SALT_SIZE;
// The input block to compression function of SWIFFTX:
BitSequence currInputBlock[SWIFFTX_INPUT_BLOCK_SIZE] = {0};
// The message length in base 256.
BitSequence messageLengthChar[SWIF_HAIFA_NUM_OF_BITS_SIZE] = {0};
// The digest size used for padding:
unsigned char digestSizeLSB = swifftxState.hashbitlen % 256;
unsigned char digestSizeMSB = (swifftxState.hashbitlen - digestSizeLSB) / 256;
if (numOfZeroBytesInPadding < 1)
toAddFinalBlock = true;
// Fill the input block with data:
// 1. The output of the previous block:
memcpy(currInputBlock, swifftxState.currOutputBlock, SWIFFTX_OUTPUT_BLOCK_SIZE);
// 2a. The input part of the block, which is the remaining data from the previous 'Update()'
// call, if exists and an extra '1' bit (maybe all we have is this extra 1):
// Add the last 1 in big-endian convention ...
if (swifftxState.remainingSize % 8 == 0)
{
swifftxState.remaining[sizeOfLastInputBlock - 1] = 0x80;
}
else
{
swifftxState.remaining[sizeOfLastInputBlock - 1] |= (1 << (7 - (swifftxState.remainingSize % 8)));
}
if (sizeOfLastInputBlock)
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE, swifftxState.remaining,
sizeOfLastInputBlock);
// Compute the message length in base 256:
for (i = 0; i < SWIF_HAIFA_NUM_OF_BITS_SIZE; ++i)
messageLengthChar[i] = swifftxState.numOfBitsChar[i];
if (sizeOfLastInputBlock)
AddToCurrInBase256(messageLengthChar, sizeOfLastInputBlock * 8);
if (!toAddFinalBlock)
{
// 2b. Put the zeroes:
memset(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + sizeOfLastInputBlock,
0, numOfZeroBytesInPadding);
// 2c. Pad the message length:
for (i = 0; i < SWIF_HAIFA_NUM_OF_BITS_SIZE; ++i)
currInputBlock[SWIFFTX_OUTPUT_BLOCK_SIZE + sizeOfLastInputBlock
+ numOfZeroBytesInPadding + i] = messageLengthChar[i];
// 2d. Pad the digest size:
currInputBlock[SWIFFTX_OUTPUT_BLOCK_SIZE + sizeOfLastInputBlock
+ numOfZeroBytesInPadding + SWIF_HAIFA_NUM_OF_BITS_SIZE] = digestSizeMSB;
currInputBlock[SWIFFTX_OUTPUT_BLOCK_SIZE + sizeOfLastInputBlock
+ numOfZeroBytesInPadding + SWIF_HAIFA_NUM_OF_BITS_SIZE + 1] = digestSizeLSB;
}
else
{
// 2b. Put the zeroes, if at all:
if ((SWIF_HAIFA_INPUT_BLOCK_SIZE - sizeOfLastInputBlock) > 0)
{
memset(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + sizeOfLastInputBlock,
0, SWIF_HAIFA_INPUT_BLOCK_SIZE - sizeOfLastInputBlock);
}
}
// 3. The #bits part of the block:
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + SWIF_HAIFA_INPUT_BLOCK_SIZE,
swifftxState.numOfBitsChar, SWIF_HAIFA_NUM_OF_BITS_SIZE);
// 4. The salt part of the block:
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + SWIF_HAIFA_INPUT_BLOCK_SIZE
+ SWIF_HAIFA_NUM_OF_BITS_SIZE,
swifftxState.salt,
SWIF_HAIFA_SALT_SIZE);
ComputeSingleSWIFFTX(currInputBlock, swifftxState.currOutputBlock, !toAddFinalBlock);
// If we have to add one more block, it is now:
if (toAddFinalBlock)
{
// 1. The previous output block, as usual.
memcpy(currInputBlock, swifftxState.currOutputBlock, SWIFFTX_OUTPUT_BLOCK_SIZE);
// 2a. Instead of the input, zeroes:
memset(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE , 0,
SWIF_HAIFA_INPUT_BLOCK_SIZE - SWIF_HAIFA_NUM_OF_BITS_SIZE - 2);
// 2b. Instead of the input, the message length:
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + SWIF_HAIFA_INPUT_BLOCK_SIZE
- SWIF_HAIFA_NUM_OF_BITS_SIZE - 2,
messageLengthChar,
SWIF_HAIFA_NUM_OF_BITS_SIZE);
// 2c. Instead of the input, the digest size:
currInputBlock[SWIFFTX_OUTPUT_BLOCK_SIZE + SWIF_HAIFA_INPUT_BLOCK_SIZE - 2] = digestSizeMSB;
currInputBlock[SWIFFTX_OUTPUT_BLOCK_SIZE + SWIF_HAIFA_INPUT_BLOCK_SIZE - 1] = digestSizeLSB;
// 3. The #bits part of the block, which is zero in case of additional block:
memset(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + SWIF_HAIFA_INPUT_BLOCK_SIZE,
0,
SWIF_HAIFA_NUM_OF_BITS_SIZE);
// 4. The salt part of the block:
memcpy(currInputBlock + SWIFFTX_OUTPUT_BLOCK_SIZE + SWIF_HAIFA_INPUT_BLOCK_SIZE
+ SWIF_HAIFA_NUM_OF_BITS_SIZE,
swifftxState.salt,
SWIF_HAIFA_SALT_SIZE);
ComputeSingleSWIFFTX(currInputBlock, swifftxState.currOutputBlock, true);
}
// Finally, copy the result into 'hashval'. In case the digest size is not 512bit, copy the
// first hashbitlen of them:
for (i = 0; i < (swifftxState.hashbitlen / 8); ++i)
hashval[i] = swifftxState.currOutputBlock[i];
return SUCCESS;
}
int Swifftx::Hash(int hashbitlen, const BitSequence *data, DataLength databitlen,
BitSequence *hashval)
{
int result;
//hashState state;
// The pointer to the current place in the input we take into the compression function.
DataLength currInputIndex = 0;
result = Swifftx::Init(hashbitlen);
if (result != SUCCESS)
return result;
for ( ; (databitlen / 8) > SWIF_HAIFA_INPUT_BLOCK_SIZE;
currInputIndex += SWIF_HAIFA_INPUT_BLOCK_SIZE, databitlen -= (SWIF_HAIFA_INPUT_BLOCK_SIZE * 8))
{
result = Swifftx::Update(data + currInputIndex, SWIF_HAIFA_INPUT_BLOCK_SIZE * 8);
if (result != SUCCESS)
return result;
}
// The length of the last block may be shorter than (SWIF_HAIFA_INPUT_BLOCK_SIZE * 8)
result = Swifftx::Update(data + currInputIndex, databitlen);
if (result != SUCCESS)
{
return result;
}
return Swifftx::Final(hashval);
}
///////////////////////////////////////////////////////////////////////////////////////////////
// Helper fuction implementation portion.
///////////////////////////////////////////////////////////////////////////////////////////////
void Swifftx::AddToCurrInBase256(BitSequence value[SWIF_HAIFA_NUM_OF_BITS_SIZE],
unsigned short toAdd)
{
unsigned char remainder = 0;
short i;
BitSequence currValueInBase256[8] = {0};
unsigned short currIndex = 7;
unsigned short temp = 0;
do
{
remainder = toAdd % 256;
currValueInBase256[currIndex--] = remainder;
toAdd -= remainder;
toAdd /= 256;
}
while(toAdd != 0);
for (i = 7; i >= 0; --i)
{
temp = value[i] + currValueInBase256[i];
if (temp > 255)
{
value[i] = temp % 256;
currValueInBase256[i - 1]++;
}
else
value[i] = (unsigned char) temp;
}
}

View File

@@ -0,0 +1,79 @@
#ifndef SWIFFTX_SHA3_H
#define SWIFFTX_SHA3_H
#include "sha3_interface.h"
#include "stdbool.h"
#include "stdint.h"
class Swifftx : public SHA3 {
#define SWIFFTX_INPUT_BLOCK_SIZE 256
#define SWIFFTX_OUTPUT_BLOCK_SIZE 65
#define SWIF_HAIFA_SALT_SIZE 8
#define SWIF_HAIFA_NUM_OF_BITS_SIZE 8
#define SWIF_HAIFA_INPUT_BLOCK_SIZE (SWIFFTX_INPUT_BLOCK_SIZE - SWIFFTX_OUTPUT_BLOCK_SIZE \
- SWIF_HAIFA_NUM_OF_BITS_SIZE - SWIF_HAIFA_SALT_SIZE)
typedef unsigned char BitSequence;
//const DataLength SWIF_SALT_VALUE;
#define SWIF_HAIFA_IV 0
/*const BitSequence SWIF_HAIFA_IV_224[SWIFFTX_OUTPUT_BLOCK_SIZE];
const BitSequence SWIF_HAIFA_IV_256[SWIFFTX_OUTPUT_BLOCK_SIZE];
const BitSequence SWIF_HAIFA_IV_384[SWIFFTX_OUTPUT_BLOCK_SIZE];
const BitSequence SWIF_HAIFA_IV_512[SWIFFTX_OUTPUT_BLOCK_SIZE];*/
typedef enum
{
SUCCESS = 0,
FAIL = 1,
BAD_HASHBITLEN = 2,
BAD_SALT_SIZE = 3,
SET_SALT_VALUE_FAILED = 4,
INPUT_DATA_NOT_ALIGNED = 5
} HashReturn;
typedef struct hashState {
unsigned short hashbitlen;
// The data remained after the recent call to 'Update()'.
BitSequence remaining[SWIF_HAIFA_INPUT_BLOCK_SIZE + 1];
// The size of the remaining data in bits.
// Is 0 in case there is no remaning data at all.
unsigned int remainingSize;
// The current output of the compression function. At the end will contain the final digest
// (which may be needed to be truncated, depending on hashbitlen).
BitSequence currOutputBlock[SWIFFTX_OUTPUT_BLOCK_SIZE];
// The value of '#bits hashed so far' field in HAIFA, in base 256.
BitSequence numOfBitsChar[SWIF_HAIFA_NUM_OF_BITS_SIZE];
// The salt value currently in use:
BitSequence salt[SWIF_HAIFA_SALT_SIZE];
// Indicates whether a single 'Update()' occured.
// Ater a call to 'Update()' the key and the salt values cannot be changed.
bool wasUpdated;
} hashState;
private:
int swifftxNumRounds;
hashState swifftxState;
public:
int Init(int hashbitlen);
int Update(const BitSequence *data, DataLength databitlen);
int Final(BitSequence *hashval);
int Hash(int hashbitlen, const BitSequence *data, DataLength databitlen,
BitSequence *hashval);
private:
static void AddToCurrInBase256(BitSequence value[SWIF_HAIFA_NUM_OF_BITS_SIZE], unsigned short toAdd);
};
#endif

View File

@@ -0,0 +1,21 @@
#pragma once
#include <cstdint>
namespace hash {
using BitSequence = unsigned char;
using DataLength = unsigned long long;
struct hash_interface {
virtual ~hash_interface() = default;
virtual int Init(int hash_bitsize) = 0;
virtual int Update(const BitSequence *data, DataLength data_bitsize) = 0;
virtual int Final(BitSequence *hash) = 0;
virtual int
Hash(int hash_bitsize, const BitSequence *data, DataLength data_bitsize, BitSequence *hash) = 0;
};
} // namespace hash

39
algo/swifftx/inttypes.h Normal file
View File

@@ -0,0 +1,39 @@
/*
inttypes.h
Contributors:
Created by Marek Michalkiewicz <marekm@linux.org.pl>
THIS SOFTWARE IS NOT COPYRIGHTED
This source code is offered for use in the public domain. You may
use, modify or distribute it freely.
This code is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESS OR IMPLIED ARE HEREBY
DISCLAIMED. This includes but is not limited to warranties of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef __INTTYPES_H_
#define __INTTYPES_H_
/* Use [u]intN_t if you need exactly N bits.
XXX - doesn't handle the -mint8 option. */
typedef signed char swift_int8_t;
typedef unsigned char swift_uint8_t;
typedef int swift_int16_t;
typedef unsigned int swift_uint16_t;
typedef long swift_int32_t;
typedef unsigned long swift_uint32_t;
typedef long long swift_int64_t;
typedef unsigned long long swift_uint64_t;
//typedef swift_int16_t intptr_t;
//typedef swift_uint16_t uintptr_t;
#endif

View File

@@ -0,0 +1,14 @@
#pragma once
#include <cstdint>
//#include <streams/hash/hash_interface.h>
#include "hash_interface.h"
namespace sha3 {
using BitSequence = hash::BitSequence;
using DataLength = hash::DataLength;
struct sha3_interface : hash::hash_interface {};
} // namespace sha3

47
algo/swifftx/stdbool.h Normal file
View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2000 Jeroen Ruigrok van der Werven <asmodai@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/include/stdbool.h,v 1.6 2002/08/16 07:33:14 alfred Exp $
*/
#ifndef _STDBOOL_H_
#define _STDBOOL_H_
#define __bool_true_false_are_defined 1
#ifndef __cplusplus
#define false 0
#define true 1
//#define bool _Bool
//#if __STDC_VERSION__ < 199901L && __GNUC__ < 3
//typedef int _Bool;
//#endif
typedef int bool;
#endif /* !__cplusplus */
#endif /* !_STDBOOL_H_ */

54
algo/swifftx/stdint.h Normal file
View File

@@ -0,0 +1,54 @@
#ifndef _SWIFFT_STDINT_H
#define _SWIFFT_STDINT_H
///////////////////////////////////////////////////////////////////////////////////////////////
//
// A note from SWIFFTX implementers:
//
// Although the submission was targeted for Microsoft Visual Studio 2005 compiler, we strived
// to make the code as portable as possible. This is why we preferred to use the types defined
// here, instead of Microsoft-specific types. We compiled the code with gcc to make this sure.
// However, we couldn't use this header as is, due to VS2005 compiler objections. This is why
// we commented out certain defines and clearly marked it.
// To compile our code on gcc you may define SYS_STDINT.
//
///////////////////////////////////////////////////////////////////////////////////////////////
#ifdef SYS_STDINT
#include <stdint.h>
#else
#include "inttypes.h"
// The following was commented out by SWIFFTX implementers:
// __BEGIN_DECLS
typedef swift_int8_t swifftx_int_least8_t;
typedef swift_int16_t swifftx_int_least16_t;
typedef swift_int32_t swifftx_int_least32_t;
typedef swift_uint8_t swifftx_uint_least8_t;
typedef swift_uint16_t swifftx_uint_least16_t;
typedef swift_uint32_t swifftx_uint_least32_t;
#ifndef __STRICT_ANSI__
typedef swift_int64_t swifftx_int_least64_t;
typedef swift_uint64_t swifftx_uint_least64_t;
#endif
/*typedef signed char int_fast8_t;
typedef signed long int int_fast16_t;
typedef signed long int int_fast32_t;
typedef signed long long int int_fast64_t;
typedef unsigned char uint_fast8_t;
typedef unsigned long int uint_fast16_t;
typedef unsigned long int uint_fast32_t;
typedef unsigned long long int uint_fast64_t;*/
// The following was commented out by SWIFFTX implementers:
// #include <endian.h>
// __END_DECLS
#endif
#endif

912
algo/swifftx/swifftx-4way.c Normal file
View File

@@ -0,0 +1,912 @@
///////////////////////////////////////////////////////////////////////////////////////////////
//
// SWIFFTX ANSI C OPTIMIZED 32BIT IMPLEMENTATION FOR NIST SHA-3 COMPETITION
//
// SWIFFTX.c
//
// October 2008
//
// This is the source file of the OPTIMIZED 32BIT implementation of SWIFFTX hash function.
// SWIFFTX is a candidate function for SHA-3 NIST competition.
// More details about SWIFFTX can be found in the accompanying submission documents.
//
///////////////////////////////////////////////////////////////////////////////////////////////
#include "swifftx.h"
// See the remarks concerning compatibility issues inside stdint.h.
#include "stdint.h"
// Remove this while using gcc:
//#include "stdbool.h"
#include <memory.h>
///////////////////////////////////////////////////////////////////////////////////////////////
// Constants and static tables portion.
///////////////////////////////////////////////////////////////////////////////////////////////
// In SWIFFTX we work over Z_257, so this is the modulus and the arithmetic is performed modulo
// this number.
#define FIELD_SIZE 257
// The size of FFT we use:
#define N 64
#define LOGN 6
#define EIGHTH_N (N / 8)
// The number of FFTS done on the input.
#define M (SWIFFTX_INPUT_BLOCK_SIZE / 8) // 32
// Omega is the 128th root of unity in Z_257.
// We choose w = 42.
#define OMEGA 42
// The size of the inner FFT lookup table:
#define W 8
// Calculates the sum and the difference of two numbers.
//
// Parameters:
// - A: the first operand. After the operation stores the sum of the two operands.
// - B: the second operand. After the operation stores the difference between the first and the
// second operands.
#define ADD_SUB_4WAY( A, B ) \
{ \
__m128i temp = B; \
B = _mm_sub_epi32( A, B ); \
A = _mm_add_epi32( A, temp ); \
}
//#define ADD_SUB(A, B) {register int temp = (B); B = ((A) - (B)); A = ((A) + (temp));}
// Quickly reduces an integer modulo 257.
//
// Parameters:
// - A: the input.
#define Q_REDUCE( A ) ( _mm_sub_epi32( \
_mm_and_epi32( A, m128_const1_32( 0xff ) ), \
_mm_srli_epi32( A, 8 ) ) )
//#define Q_REDUCE(A) (((A) & 0xff) - ((A) >> 8))
// Since we need to do the setup only once, this is the indicator variable:
static bool wasSetupDone = false;
// This array stores the powers of omegas that correspond to the indices, which are the input
// values. Known also as the "outer FFT twiddle factors".
swift_int16_t multipliers[N];
// This array stores the powers of omegas, multiplied by the corresponding values.
// We store this table to save computation time.
//
// To calculate the intermediate value of the compression function (the first out of two
// stages), we multiply the k-th bit of x_i by w^[(2i + 1) * k]. {x_i} is the input to the
// compression function, i is between 0 and 31, x_i is a 64-bit value.
// One can see the formula for this (intermediate) stage in the SWIFFT FSE 2008 paper --
// formula (2), section 3, page 6.
swift_int16_t fftTable[256 * EIGHTH_N];
// The A's we use in SWIFFTX shall be random elements of Z_257.
// We generated these A's from the decimal expansion of PI as follows: we converted each
// triple of digits into a decimal number d. If d < (257 * 3) we used (d % 257) for the next A
// element, otherwise move to the next triple of digits in the expansion. This guarntees that
// the A's are random, provided that PI digits are.
const swift_int16_t As[3 * M * N] =
{141, 78, 139, 75, 238, 205, 129, 126, 22, 245, 197, 169, 142, 118, 105, 78,
50, 149, 29, 208, 114, 34, 85, 117, 67, 148, 86, 256, 25, 49, 133, 93,
95, 36, 68, 231, 211, 102, 151, 128, 224, 117, 193, 27, 102, 187, 7, 105,
45, 130, 108, 124, 171, 151, 189, 128, 218, 134, 233, 165, 14, 201, 145, 134,
52, 203, 91, 96, 197, 69, 134, 213, 136, 93, 3, 249, 141, 16, 210, 73,
6, 92, 58, 74, 174, 6, 254, 91, 201, 107, 110, 76, 103, 11, 73, 16,
34, 209, 7, 127, 146, 254, 95, 176, 57, 13, 108, 245, 77, 92, 186, 117,
124, 97, 105, 118, 34, 74, 205, 122, 235, 53, 94, 238, 210, 227, 183, 11,
129, 159, 105, 183, 142, 129, 86, 21, 137, 138, 224, 223, 190, 188, 179, 188,
256, 25, 217, 176, 36, 176, 238, 127, 160, 210, 155, 148, 132, 0, 54, 127,
145, 6, 46, 85, 243, 95, 173, 123, 178, 207, 211, 183, 224, 173, 146, 35,
71, 114, 50, 22, 175, 1, 28, 19, 112, 129, 21, 34, 161, 159, 115, 52,
4, 193, 211, 92, 115, 49, 59, 217, 218, 96, 61, 81, 24, 202, 198, 89,
45, 128, 8, 51, 253, 87, 171, 35, 4, 188, 171, 10, 3, 137, 238, 73,
19, 208, 124, 163, 103, 177, 155, 147, 46, 84, 253, 233, 171, 241, 211, 217,
159, 48, 96, 79, 237, 18, 171, 226, 99, 1, 97, 195, 216, 163, 198, 95,
0, 201, 65, 228, 21, 153, 124, 230, 44, 35, 44, 108, 85, 156, 249, 207,
26, 222, 131, 1, 60, 242, 197, 150, 181, 19, 116, 213, 75, 98, 124, 240,
123, 207, 62, 255, 60, 143, 187, 157, 139, 9, 12, 104, 89, 49, 193, 146,
104, 196, 181, 82, 198, 253, 192, 191, 255, 122, 212, 104, 47, 20, 132, 208,
46, 170, 2, 69, 234, 36, 56, 163, 28, 152, 104, 238, 162, 56, 24, 58,
38, 150, 193, 254, 253, 125, 173, 35, 73, 126, 247, 239, 216, 6, 199, 15,
90, 12, 97, 122, 9, 84, 207, 127, 219, 72, 58, 30, 29, 182, 41, 192,
235, 248, 237, 74, 72, 176, 210, 252, 45, 64, 165, 87, 202, 241, 236, 223,
151, 242, 119, 239, 52, 112, 169, 28, 13, 37, 160, 60, 158, 81, 133, 60,
16, 145, 249, 192, 173, 217, 214, 93, 141, 184, 54, 34, 161, 104, 157, 95,
38, 133, 218, 227, 211, 181, 9, 66, 137, 143, 77, 33, 248, 159, 4, 55,
228, 48, 99, 219, 222, 184, 15, 36, 254, 256, 157, 237, 87, 139, 209, 113,
232, 85, 126, 167, 197, 100, 103, 166, 64, 225, 125, 205, 117, 135, 84, 128,
231, 112, 90, 241, 28, 22, 210, 147, 186, 49, 230, 21, 108, 39, 194, 47,
123, 199, 107, 114, 30, 210, 250, 143, 59, 156, 131, 133, 221, 27, 76, 99,
208, 250, 78, 12, 211, 141, 95, 81, 195, 106, 8, 232, 150, 212, 205, 221,
11, 225, 87, 219, 126, 136, 137, 180, 198, 48, 68, 203, 239, 252, 194, 235,
142, 137, 174, 172, 190, 145, 250, 221, 182, 204, 1, 195, 130, 153, 83, 241,
161, 239, 211, 138, 11, 169, 155, 245, 174, 49, 10, 166, 16, 130, 181, 139,
222, 222, 112, 99, 124, 94, 51, 243, 133, 194, 244, 136, 35, 248, 201, 177,
178, 186, 129, 102, 89, 184, 180, 41, 149, 96, 165, 72, 225, 231, 134, 158,
199, 28, 249, 16, 225, 195, 10, 210, 164, 252, 138, 8, 35, 152, 213, 199,
82, 116, 97, 230, 63, 199, 241, 35, 79, 120, 54, 174, 67, 112, 1, 76,
69, 222, 194, 96, 82, 94, 25, 228, 196, 145, 155, 136, 228, 234, 46, 101,
246, 51, 103, 166, 246, 75, 9, 200, 161, 4, 108, 35, 129, 168, 208, 144,
50, 14, 13, 220, 41, 132, 122, 127, 194, 9, 232, 234, 107, 28, 187, 8,
51, 141, 97, 221, 225, 9, 113, 170, 166, 102, 135, 22, 231, 185, 227, 187,
110, 145, 251, 146, 76, 22, 146, 228, 7, 53, 64, 25, 62, 198, 130, 190,
221, 232, 169, 64, 188, 199, 237, 249, 173, 218, 196, 191, 48, 224, 5, 113,
100, 166, 160, 21, 191, 197, 61, 162, 149, 171, 240, 183, 129, 231, 123, 204,
192, 179, 134, 15, 47, 161, 142, 177, 239, 234, 186, 237, 231, 53, 208, 95,
146, 36, 225, 231, 89, 142, 93, 248, 137, 124, 83, 39, 69, 77, 89, 208,
182, 48, 85, 147, 244, 164, 246, 68, 38, 190, 220, 35, 202, 91, 157, 151,
201, 240, 185, 218, 4, 152, 2, 132, 177, 88, 190, 196, 229, 74, 220, 135,
137, 196, 11, 47, 5, 251, 106, 144, 163, 60, 222, 127, 52, 57, 202, 102,
64, 140, 110, 206, 23, 182, 39, 245, 1, 163, 157, 186, 163, 80, 7, 230,
44, 249, 176, 102, 164, 125, 147, 120, 18, 191, 186, 125, 64, 65, 198, 157,
164, 213, 95, 61, 13, 181, 208, 91, 242, 197, 158, 34, 98, 169, 91, 14,
17, 93, 157, 17, 65, 30, 183, 6, 139, 58, 255, 108, 100, 136, 209, 144,
164, 6, 237, 33, 210, 110, 57, 126, 197, 136, 125, 244, 165, 151, 168, 3,
143, 251, 247, 155, 136, 130, 88, 14, 74, 121, 250, 133, 21, 226, 185, 232,
118, 132, 89, 64, 204, 161, 2, 70, 224, 159, 35, 204, 123, 180, 13, 52,
231, 57, 25, 78, 66, 69, 97, 42, 198, 84, 176, 59, 8, 232, 125, 134,
193, 2, 232, 109, 216, 69, 90, 142, 32, 38, 249, 37, 75, 180, 184, 188,
19, 47, 120, 87, 146, 70, 232, 120, 191, 45, 33, 38, 19, 248, 110, 110,
44, 64, 2, 84, 244, 228, 252, 228, 170, 123, 38, 144, 213, 144, 171, 212,
243, 87, 189, 46, 128, 110, 84, 77, 65, 183, 61, 184, 101, 44, 168, 68,
14, 106, 105, 8, 227, 211, 166, 39, 152, 43, 52, 254, 197, 55, 119, 89,
168, 65, 53, 138, 177, 56, 219, 0, 58, 121, 148, 18, 44, 100, 215, 103,
145, 229, 117, 196, 91, 89, 113, 143, 172, 239, 249, 184, 154, 39, 112, 65,
204, 42, 84, 38, 155, 151, 151, 16, 100, 87, 174, 162, 145, 147, 149, 186,
237, 145, 134, 144, 198, 235, 213, 163, 48, 230, 24, 47, 57, 71, 127, 0,
150, 219, 12, 81, 197, 150, 131, 13, 169, 63, 175, 184, 48, 235, 65, 243,
149, 200, 163, 254, 202, 114, 247, 67, 143, 250, 126, 228, 80, 130, 216, 214,
36, 2, 230, 33, 119, 125, 3, 142, 237, 100, 3, 152, 197, 174, 244, 129,
232, 30, 206, 199, 39, 210, 220, 43, 237, 221, 201, 54, 179, 42, 28, 133,
246, 203, 198, 177, 0, 28, 194, 85, 223, 109, 155, 147, 221, 60, 133, 108,
157, 254, 26, 75, 157, 185, 49, 142, 31, 137, 71, 43, 63, 64, 237, 148,
237, 172, 159, 160, 155, 254, 234, 224, 140, 193, 114, 140, 62, 109, 136, 39,
255, 8, 158, 146, 128, 49, 222, 96, 57, 209, 180, 249, 202, 127, 113, 231,
78, 178, 46, 33, 228, 215, 104, 31, 207, 186, 82, 41, 42, 39, 103, 119,
123, 133, 243, 254, 238, 156, 90, 186, 37, 212, 33, 107, 252, 51, 177, 36,
237, 76, 159, 245, 93, 214, 97, 56, 190, 38, 160, 94, 105, 222, 220, 158,
49, 16, 191, 52, 120, 87, 179, 2, 27, 144, 223, 230, 184, 6, 129, 227,
69, 47, 215, 181, 162, 139, 72, 200, 45, 163, 159, 62, 2, 221, 124, 40,
159, 242, 35, 208, 179, 166, 98, 67, 178, 68, 143, 225, 178, 146, 187, 159,
57, 66, 176, 192, 236, 250, 168, 224, 122, 43, 159, 120, 133, 165, 122, 64,
87, 74, 161, 241, 9, 87, 90, 24, 255, 113, 203, 220, 57, 139, 197, 159,
31, 151, 27, 140, 77, 162, 7, 27, 84, 228, 187, 220, 53, 126, 162, 242,
84, 181, 223, 103, 86, 177, 207, 31, 140, 18, 207, 256, 201, 166, 96, 23,
233, 103, 197, 84, 161, 75, 59, 149, 138, 154, 119, 92, 16, 53, 116, 97,
220, 114, 35, 45, 77, 209, 40, 196, 71, 22, 81, 178, 110, 14, 3, 180,
110, 129, 112, 47, 18, 61, 134, 78, 73, 79, 254, 232, 125, 180, 205, 54,
220, 119, 63, 89, 181, 52, 77, 109, 151, 77, 80, 207, 144, 25, 20, 6,
208, 47, 201, 206, 192, 14, 73, 176, 256, 201, 207, 87, 216, 60, 56, 73,
92, 243, 179, 113, 49, 59, 55, 168, 121, 137, 69, 154, 95, 57, 187, 47,
129, 4, 15, 92, 6, 116, 69, 196, 48, 134, 84, 81, 111, 56, 38, 176,
239, 6, 128, 72, 242, 134, 36, 221, 59, 48, 242, 68, 130, 110, 171, 89,
13, 220, 48, 29, 5, 75, 104, 233, 91, 129, 105, 162, 44, 113, 163, 163,
85, 147, 190, 111, 197, 80, 213, 153, 81, 68, 203, 33, 161, 165, 10, 61,
120, 252, 0, 205, 28, 42, 193, 64, 39, 37, 83, 175, 5, 218, 215, 174,
128, 121, 231, 11, 150, 145, 135, 197, 136, 91, 193, 5, 107, 88, 82, 6,
4, 188, 256, 70, 40, 2, 167, 57, 169, 203, 115, 254, 215, 172, 84, 80,
188, 167, 34, 137, 43, 243, 2, 79, 178, 38, 188, 135, 233, 194, 208, 13,
11, 151, 231, 196, 12, 122, 162, 56, 17, 114, 191, 207, 90, 132, 64, 238,
187, 6, 198, 176, 240, 88, 118, 236, 15, 226, 166, 22, 193, 229, 82, 246,
213, 64, 37, 63, 31, 243, 252, 37, 156, 38, 175, 204, 138, 141, 211, 82,
106, 217, 97, 139, 153, 56, 129, 218, 158, 9, 83, 26, 87, 112, 71, 21,
250, 5, 65, 141, 68, 116, 231, 113, 10, 218, 99, 205, 201, 92, 157, 4,
97, 46, 49, 220, 72, 139, 103, 171, 149, 129, 193, 19, 69, 245, 43, 31,
58, 68, 36, 195, 159, 22, 54, 34, 233, 141, 205, 100, 226, 96, 22, 192,
41, 231, 24, 79, 234, 138, 30, 120, 117, 216, 172, 197, 172, 107, 86, 29,
181, 151, 0, 6, 146, 186, 68, 55, 54, 58, 213, 182, 60, 231, 33, 232,
77, 210, 216, 154, 80, 51, 141, 122, 68, 148, 219, 122, 254, 48, 64, 175,
41, 115, 62, 243, 141, 81, 119, 121, 5, 68, 121, 88, 239, 29, 230, 90,
135, 159, 35, 223, 168, 112, 49, 37, 146, 60, 126, 134, 42, 145, 115, 90,
73, 133, 211, 86, 120, 141, 122, 241, 127, 56, 130, 36, 174, 75, 83, 246,
112, 45, 136, 194, 201, 115, 1, 156, 114, 167, 208, 12, 176, 147, 32, 170,
251, 100, 102, 220, 122, 210, 6, 49, 75, 201, 38, 105, 132, 135, 126, 102,
13, 121, 76, 228, 202, 20, 61, 213, 246, 13, 207, 42, 148, 168, 37, 253,
34, 94, 141, 185, 18, 234, 157, 109, 104, 64, 250, 125, 49, 236, 86, 48,
196, 77, 75, 237, 156, 103, 225, 19, 110, 229, 22, 68, 177, 93, 221, 181,
152, 153, 61, 108, 101, 74, 247, 195, 127, 216, 30, 166, 168, 61, 83, 229,
120, 156, 96, 120, 201, 124, 43, 27, 253, 250, 120, 143, 89, 235, 189, 243,
150, 7, 127, 119, 149, 244, 84, 185, 134, 34, 128, 193, 236, 234, 132, 117,
137, 32, 145, 184, 44, 121, 51, 76, 11, 228, 142, 251, 39, 77, 228, 251,
41, 58, 246, 107, 125, 187, 9, 240, 35, 8, 11, 162, 242, 220, 158, 163,
2, 184, 163, 227, 242, 2, 100, 101, 2, 78, 129, 34, 89, 28, 26, 157,
79, 31, 107, 250, 194, 156, 186, 69, 212, 66, 41, 180, 139, 42, 211, 253,
256, 239, 29, 129, 104, 248, 182, 68, 1, 189, 48, 226, 36, 229, 3, 158,
41, 53, 241, 22, 115, 174, 16, 163, 224, 19, 112, 219, 177, 233, 42, 27,
250, 134, 18, 28, 145, 122, 68, 34, 134, 31, 147, 17, 39, 188, 150, 76,
45, 42, 167, 249, 12, 16, 23, 182, 13, 79, 121, 3, 70, 197, 239, 44,
86, 177, 255, 81, 64, 171, 138, 131, 73, 110, 44, 201, 254, 198, 146, 91,
48, 9, 104, 31, 29, 161, 101, 31, 138, 180, 231, 233, 79, 137, 61, 236,
140, 15, 249, 218, 234, 119, 99, 195, 110, 137, 237, 207, 8, 31, 45, 24,
90, 155, 203, 253, 192, 203, 65, 176, 210, 171, 142, 214, 220, 122, 136, 237,
189, 186, 147, 40, 80, 254, 173, 33, 191, 46, 192, 26, 108, 255, 228, 205,
61, 76, 39, 107, 225, 126, 228, 182, 140, 251, 143, 134, 252, 168, 221, 8,
185, 85, 60, 233, 147, 244, 87, 137, 8, 140, 96, 80, 53, 45, 175, 160,
124, 189, 112, 37, 144, 19, 70, 17, 170, 242, 2, 3, 28, 95, 120, 199,
212, 43, 9, 117, 86, 151, 101, 241, 200, 145, 241, 19, 178, 69, 204, 197,
227, 166, 94, 7, 193, 45, 247, 234, 19, 187, 212, 212, 236, 125, 33, 95,
198, 121, 122, 103, 77, 155, 235, 49, 25, 237, 249, 11, 162, 7, 238, 24,
16, 150, 129, 25, 152, 17, 42, 67, 247, 162, 77, 154, 31, 133, 55, 137,
79, 119, 153, 10, 86, 28, 244, 186, 41, 169, 106, 44, 10, 49, 110, 179,
32, 133, 155, 244, 61, 70, 131, 168, 170, 39, 231, 252, 32, 69, 92, 238,
239, 35, 132, 136, 236, 167, 90, 32, 123, 88, 69, 22, 20, 89, 145, 166,
30, 118, 75, 4, 49, 31, 225, 54, 11, 50, 56, 191, 246, 1, 187, 33,
119, 107, 139, 68, 19, 240, 131, 55, 94, 113, 31, 252, 12, 179, 121, 2,
120, 252, 0, 76, 41, 80, 185, 42, 62, 121, 105, 159, 121, 109, 111, 98,
7, 118, 86, 29, 210, 70, 231, 179, 223, 229, 164, 70, 62, 47, 0, 206,
204, 178, 168, 120, 224, 166, 99, 25, 103, 63, 246, 224, 117, 204, 75, 124,
140, 133, 110, 110, 222, 88, 151, 118, 46, 37, 22, 143, 158, 40, 2, 50,
153, 94, 190, 199, 13, 198, 127, 211, 180, 90, 183, 98, 0, 142, 210, 154,
100, 187, 67, 231, 202, 100, 198, 235, 252, 160, 247, 124, 247, 14, 121, 221,
57, 88, 253, 243, 185, 89, 45, 249, 221, 194, 108, 175, 193, 119, 50, 141,
223, 133, 136, 64, 176, 250, 129, 100, 124, 94, 181, 159, 99, 185, 177, 240,
135, 42, 103, 52, 202, 208, 143, 186, 193, 103, 154, 237, 102, 88, 225, 161,
50, 188, 191, 109, 12, 87, 19, 227, 247, 183, 13, 52, 205, 170, 205, 146,
89, 160, 18, 105, 192, 73, 231, 225, 184, 157, 252, 220, 61, 59, 169, 183,
221, 20, 141, 20, 158, 101, 245, 7, 245, 225, 118, 137, 84, 55, 19, 27,
164, 110, 35, 25, 202, 94, 150, 46, 91, 152, 130, 1, 7, 46, 16, 237,
171, 109, 19, 200, 65, 38, 10, 213, 70, 96, 126, 226, 185, 225, 181, 46,
10, 165, 11, 123, 53, 158, 22, 147, 64, 22, 227, 69, 182, 237, 197, 37,
39, 49, 186, 223, 139, 128, 55, 36, 166, 178, 220, 20, 98, 172, 166, 253,
45, 0, 120, 180, 189, 185, 158, 159, 196, 6, 214, 79, 141, 52, 156, 107,
5, 109, 142, 159, 33, 64, 190, 133, 95, 132, 95, 202, 160, 63, 186, 23,
231, 107, 163, 33, 234, 15, 244, 77, 108, 49, 51, 7, 164, 87, 142, 99,
240, 202, 47, 256, 118, 190, 196, 178, 217, 42, 39, 153, 21, 192, 232, 202,
14, 82, 179, 64, 233, 4, 219, 10, 133, 78, 43, 144, 146, 216, 202, 81,
71, 252, 8, 201, 68, 256, 85, 233, 164, 88, 176, 30, 5, 152, 126, 179,
249, 84, 140, 190, 159, 54, 118, 98, 2, 159, 27, 133, 74, 121, 239, 196,
71, 149, 119, 135, 102, 20, 87, 112, 44, 75, 221, 3, 151, 158, 5, 98,
152, 25, 97, 106, 63, 171, 240, 79, 234, 240, 230, 92, 76, 70, 173, 196,
36, 225, 218, 133, 64, 240, 150, 41, 146, 66, 133, 51, 134, 73, 170, 238,
140, 90, 45, 89, 46, 147, 96, 169, 174, 174, 244, 151, 90, 40, 32, 74,
38, 154, 246, 57, 31, 14, 189, 151, 83, 243, 197, 183, 220, 185, 53, 225,
51, 106, 188, 208, 222, 248, 93, 13, 93, 215, 131, 25, 142, 185, 113, 222,
131, 215, 149, 50, 159, 85, 32, 5, 205, 192, 2, 227, 42, 214, 197, 42,
126, 182, 68, 123, 109, 36, 237, 179, 170, 199, 77, 256, 5, 128, 214, 243,
137, 177, 170, 253, 179, 180, 153, 236, 100, 196, 216, 231, 198, 37, 192, 80,
121, 221, 246, 1, 16, 246, 29, 78, 64, 148, 124, 38, 96, 125, 28, 20,
48, 51, 73, 187, 139, 208, 98, 253, 221, 188, 84, 129, 1, 205, 95, 205,
117, 79, 71, 126, 134, 237, 19, 184, 137, 125, 129, 178, 223, 54, 188, 112,
30, 7, 225, 228, 205, 184, 233, 87, 117, 22, 58, 10, 8, 42, 2, 114,
254, 19, 17, 13, 150, 92, 233, 179, 63, 12, 60, 171, 127, 35, 50, 5,
195, 113, 241, 25, 249, 184, 166, 44, 221, 35, 151, 116, 8, 54, 195, 89,
218, 186, 132, 5, 41, 89, 226, 177, 11, 41, 87, 172, 5, 23, 20, 59,
228, 94, 76, 33, 137, 43, 151, 221, 61, 232, 4, 120, 93, 217, 80, 228,
228, 6, 58, 25, 62, 84, 91, 48, 209, 20, 247, 243, 55, 106, 80, 79,
235, 34, 20, 180, 146, 2, 236, 13, 236, 206, 243, 222, 204, 83, 148, 213,
214, 117, 237, 98, 0, 90, 204, 168, 32, 41, 126, 67, 191, 74, 27, 255,
26, 75, 240, 113, 185, 105, 167, 154, 112, 67, 151, 63, 161, 134, 239, 176,
42, 87, 249, 130, 45, 242, 17, 100, 107, 120, 212, 218, 237, 76, 231, 162,
175, 172, 118, 155, 92, 36, 124, 17, 121, 71, 13, 9, 82, 126, 147, 142,
218, 148, 138, 80, 163, 106, 164, 123, 140, 129, 35, 42, 186, 154, 228, 214,
75, 73, 8, 253, 42, 153, 232, 164, 95, 24, 110, 90, 231, 197, 90, 196,
57, 164, 252, 181, 31, 7, 97, 256, 35, 77, 200, 212, 99, 179, 92, 227,
17, 180, 49, 176, 9, 188, 13, 182, 93, 44, 128, 219, 134, 92, 151, 6,
23, 126, 200, 109, 66, 30, 140, 180, 146, 134, 67, 200, 7, 9, 223, 168,
186, 221, 3, 154, 150, 165, 43, 53, 138, 27, 86, 213, 235, 160, 70, 2,
240, 20, 89, 212, 84, 141, 168, 246, 183, 227, 30, 167, 138, 185, 253, 83,
52, 143, 236, 94, 59, 65, 89, 218, 194, 157, 164, 156, 111, 95, 202, 168,
245, 256, 151, 28, 222, 194, 72, 130, 217, 134, 253, 77, 246, 100, 76, 32,
254, 174, 182, 193, 14, 237, 74, 1, 74, 26, 135, 216, 152, 208, 112, 38,
181, 62, 25, 71, 61, 234, 254, 97, 191, 23, 92, 256, 190, 205, 6, 16,
134, 147, 210, 219, 148, 59, 73, 185, 24, 247, 174, 143, 116, 220, 128, 144,
111, 126, 101, 98, 130, 136, 101, 102, 69, 127, 24, 168, 146, 226, 226, 207,
176, 122, 149, 254, 134, 196, 22, 151, 197, 21, 50, 205, 116, 154, 65, 116,
177, 224, 127, 77, 177, 159, 225, 69, 176, 54, 100, 104, 140, 8, 11, 126,
11, 188, 185, 159, 107, 16, 254, 142, 80, 28, 5, 157, 104, 57, 109, 82,
102, 80, 173, 242, 238, 207, 57, 105, 237, 160, 59, 189, 189, 199, 26, 11,
190, 156, 97, 118, 20, 12, 254, 189, 165, 147, 142, 199, 5, 213, 64, 133,
108, 217, 133, 60, 94, 28, 116, 136, 47, 165, 125, 42, 183, 143, 14, 129,
223, 70, 212, 205, 181, 180, 3, 201, 182, 46, 57, 104, 239, 60, 99, 181,
220, 231, 45, 79, 156, 89, 149, 143, 190, 103, 153, 61, 235, 73, 136, 20,
89, 243, 16, 130, 247, 141, 134, 93, 80, 68, 85, 84, 8, 72, 194, 4,
242, 110, 19, 133, 199, 70, 172, 92, 132, 254, 67, 74, 36, 94, 13, 90,
154, 184, 9, 109, 118, 243, 214, 71, 36, 95, 0, 90, 201, 105, 112, 215,
69, 196, 224, 210, 236, 242, 155, 211, 37, 134, 69, 113, 157, 97, 68, 26,
230, 149, 219, 180, 20, 76, 172, 145, 154, 40, 129, 8, 93, 56, 162, 124,
207, 233, 105, 19, 3, 183, 155, 134, 8, 244, 213, 78, 139, 88, 156, 37,
51, 152, 111, 102, 112, 250, 114, 252, 201, 241, 133, 24, 136, 153, 5, 90,
210, 197, 216, 24, 131, 17, 147, 246, 13, 86, 3, 253, 179, 237, 101, 114,
243, 191, 207, 2, 220, 133, 244, 53, 87, 125, 154, 158, 197, 20, 8, 83,
32, 191, 38, 241, 204, 22, 168, 59, 217, 123, 162, 82, 21, 50, 130, 89,
239, 253, 195, 56, 253, 74, 147, 125, 234, 199, 250, 28, 65, 193, 22, 237,
193, 94, 58, 229, 139, 176, 69, 42, 179, 164, 150, 168, 246, 214, 86, 174,
59, 117, 15, 19, 76, 37, 214, 238, 153, 226, 154, 45, 109, 114, 198, 107,
45, 70, 238, 196, 142, 252, 244, 71, 123, 136, 134, 188, 99, 132, 25, 42,
240, 0, 196, 33, 26, 124, 256, 145, 27, 102, 153, 35, 28, 132, 221, 167,
138, 133, 41, 170, 95, 224, 40, 139, 239, 153, 1, 106, 255, 106, 170, 163,
127, 44, 155, 232, 194, 119, 232, 117, 239, 143, 108, 41, 3, 9, 180, 256,
144, 113, 133, 200, 79, 69, 128, 216, 31, 50, 102, 209, 249, 136, 150, 154,
182, 51, 228, 39, 127, 142, 87, 15, 94, 92, 187, 245, 31, 236, 64, 58,
114, 11, 17, 166, 189, 152, 218, 34, 123, 39, 58, 37, 153, 91, 63, 121,
31, 34, 12, 254, 106, 96, 171, 14, 155, 247, 214, 69, 24, 98, 3, 204,
202, 194, 207, 30, 253, 44, 119, 70, 14, 96, 82, 250, 63, 6, 232, 38,
89, 144, 102, 191, 82, 254, 20, 222, 96, 162, 110, 6, 159, 58, 200, 226,
98, 128, 42, 70, 84, 247, 128, 211, 136, 54, 143, 166, 60, 118, 99, 218,
27, 193, 85, 81, 219, 223, 46, 41, 23, 233, 152, 222, 36, 236, 54, 181,
56, 50, 4, 207, 129, 92, 78, 88, 197, 251, 131, 105, 31, 172, 38, 131,
19, 204, 129, 47, 227, 106, 202, 183, 23, 6, 77, 224, 102, 147, 11, 218,
131, 132, 60, 192, 208, 223, 236, 23, 103, 115, 89, 18, 185, 171, 70, 174,
139, 0, 100, 160, 221, 11, 228, 60, 12, 122, 114, 12, 157, 235, 148, 57,
83, 62, 173, 131, 169, 126, 85, 99, 93, 243, 81, 80, 29, 245, 206, 82,
236, 227, 166, 14, 230, 213, 144, 97, 27, 111, 99, 164, 105, 150, 89, 111,
252, 118, 140, 232, 120, 183, 137, 213, 232, 157, 224, 33, 134, 118, 186, 80,
159, 2, 186, 193, 54, 242, 25, 237, 232, 249, 226, 213, 90, 149, 90, 160,
118, 69, 64, 37, 10, 183, 109, 246, 30, 52, 219, 69, 189, 26, 116, 220,
50, 244, 243, 243, 139, 137, 232, 98, 38, 45, 256, 143, 171, 101, 73, 238,
123, 45, 194, 167, 250, 123, 12, 29, 136, 237, 141, 21, 89, 96, 199, 44,
8, 214, 208, 17, 113, 41, 137, 26, 166, 155, 89, 85, 54, 58, 97, 160,
50, 239, 58, 71, 21, 157, 139, 12, 37, 198, 182, 131, 149, 134, 16, 204,
164, 181, 248, 166, 52, 216, 136, 201, 37, 255, 187, 240, 5, 101, 147, 231,
14, 163, 253, 134, 146, 216, 8, 54, 224, 90, 220, 195, 75, 215, 186, 58,
71, 204, 124, 105, 239, 53, 16, 85, 69, 163, 195, 223, 33, 38, 69, 88,
88, 203, 99, 55, 176, 13, 156, 204, 236, 99, 194, 134, 75, 247, 126, 129,
160, 124, 233, 206, 139, 144, 154, 45, 233, 51, 206, 61, 60, 55, 205, 107,
84, 108, 96, 188, 203, 31, 89, 20, 115, 144, 137, 90, 237, 78, 231, 185,
120, 217, 1, 176, 169, 30, 155, 176, 100, 113, 53, 42, 193, 108, 14, 121,
176, 158, 137, 92, 178, 44, 110, 249, 108, 234, 94, 101, 128, 12, 250, 173,
72, 202, 232, 66, 139, 152, 189, 18, 32, 197, 9, 238, 246, 55, 119, 183,
196, 119, 113, 247, 191, 100, 200, 245, 46, 16, 234, 112, 136, 116, 232, 48,
176, 108, 11, 237, 14, 153, 93, 177, 124, 72, 67, 121, 135, 143, 45, 18,
97, 251, 184, 172, 136, 55, 213, 8, 103, 12, 221, 212, 13, 160, 116, 91,
237, 127, 218, 190, 103, 131, 77, 82, 36, 100, 22, 252, 79, 69, 54, 26,
65, 182, 115, 142, 247, 20, 89, 81, 188, 244, 27, 120, 240, 248, 13, 230,
67, 133, 32, 201, 129, 87, 9, 245, 66, 88, 166, 34, 46, 184, 119, 218,
144, 235, 163, 40, 138, 134, 127, 217, 64, 227, 116, 67, 55, 202, 130, 48,
199, 42, 251, 112, 124, 153, 123, 194, 243, 49, 250, 12, 78, 157, 167, 134,
210, 73, 156, 102, 21, 88, 216, 123, 45, 11, 208, 18, 47, 187, 20, 43,
3, 180, 124, 2, 136, 176, 77, 111, 138, 139, 91, 225, 126, 8, 74, 255,
88, 192, 193, 239, 138, 204, 139, 194, 166, 130, 252, 184, 140, 168, 30, 177,
121, 98, 131, 124, 69, 171, 75, 49, 184, 34, 76, 122, 202, 115, 184, 253,
120, 182, 33, 251, 1, 74, 216, 217, 243, 168, 70, 162, 119, 158, 197, 198,
61, 89, 7, 5, 54, 199, 211, 170, 23, 226, 44, 247, 165, 195, 7, 225,
91, 23, 50, 15, 51, 208, 106, 94, 12, 31, 43, 112, 146, 139, 246, 182,
113, 1, 97, 15, 66, 2, 51, 76, 164, 184, 237, 200, 218, 176, 72, 98,
33, 135, 38, 147, 140, 229, 50, 94, 81, 187, 129, 17, 238, 168, 146, 203,
181, 99, 164, 3, 104, 98, 255, 189, 114, 142, 86, 102, 229, 102, 80, 129,
64, 84, 79, 161, 81, 156, 128, 111, 164, 197, 18, 15, 55, 196, 198, 191,
28, 113, 117, 96, 207, 253, 19, 158, 231, 13, 53, 130, 252, 211, 58, 180,
212, 142, 7, 219, 38, 81, 62, 109, 167, 113, 33, 56, 97, 185, 157, 130,
186, 129, 119, 182, 196, 26, 54, 110, 65, 170, 166, 236, 30, 22, 162, 0,
106, 12, 248, 33, 48, 72, 159, 17, 76, 244, 172, 132, 89, 171, 196, 76,
254, 166, 76, 218, 226, 3, 52, 220, 238, 181, 179, 144, 225, 23, 3, 166,
158, 35, 228, 154, 204, 23, 203, 71, 134, 189, 18, 168, 236, 141, 117, 138,
2, 132, 78, 57, 154, 21, 250, 196, 184, 40, 161, 40, 10, 178, 134, 120,
132, 123, 101, 82, 205, 121, 55, 140, 231, 56, 231, 71, 206, 246, 198, 150,
146, 192, 45, 105, 242, 1, 125, 18, 176, 46, 222, 122, 19, 80, 113, 133,
131, 162, 81, 51, 98, 168, 247, 161, 139, 39, 63, 162, 22, 153, 170, 92,
91, 130, 174, 200, 45, 112, 99, 164, 132, 184, 191, 186, 200, 167, 86, 145,
167, 227, 130, 44, 12, 158, 172, 249, 204, 17, 54, 249, 16, 200, 21, 174,
67, 223, 105, 201, 50, 36, 133, 203, 244, 131, 228, 67, 29, 195, 91, 91,
55, 107, 167, 154, 170, 137, 218, 183, 169, 61, 99, 175, 128, 23, 142, 183,
66, 255, 59, 187, 66, 85, 212, 109, 168, 82, 16, 43, 67, 139, 114, 176,
216, 255, 130, 94, 152, 79, 183, 64, 100, 23, 214, 82, 34, 230, 48, 15,
242, 130, 50, 241, 81, 32, 5, 125, 183, 182, 184, 99, 248, 109, 159, 210,
226, 61, 119, 129, 39, 149, 78, 214, 107, 78, 147, 124, 228, 18, 143, 188,
84, 180, 233, 119, 64, 39, 158, 133, 177, 168, 6, 150, 80, 117, 150, 56,
49, 72, 49, 37, 30, 242, 49, 142, 33, 156, 34, 44, 44, 72, 58, 22,
249, 46, 168, 80, 25, 196, 64, 174, 97, 179, 244, 134, 213, 105, 63, 151,
21, 90, 168, 90, 245, 28, 157, 65, 250, 232, 188, 27, 99, 160, 156, 127,
68, 193, 10, 80, 205, 36, 138, 229, 12, 223, 70, 169, 251, 41, 48, 94,
41, 177, 99, 256, 158, 0, 6, 83, 231, 191, 120, 135, 157, 146, 218, 213,
160, 7, 47, 234, 98, 211, 79, 225, 179, 95, 175, 105, 185, 79, 115, 0,
104, 14, 65, 124, 15, 188, 52, 9, 253, 27, 132, 137, 13, 127, 75, 238,
185, 253, 33, 8, 52, 157, 164, 68, 232, 188, 69, 28, 209, 233, 5, 129,
216, 90, 252, 212, 33, 200, 222, 9, 112, 15, 43, 36, 226, 114, 15, 249,
217, 8, 148, 22, 147, 23, 143, 67, 222, 116, 235, 250, 212, 210, 39, 142,
108, 64, 209, 83, 73, 66, 99, 34, 17, 29, 45, 151, 244, 114, 28, 241,
144, 208, 146, 179, 132, 89, 217, 198, 252, 219, 205, 165, 75, 107, 11, 173,
76, 6, 196, 247, 152, 216, 248, 91, 209, 178, 57, 250, 174, 60, 79, 123,
18, 135, 9, 241, 230, 159, 184, 68, 156, 251, 215, 9, 113, 234, 75, 235,
103, 194, 205, 129, 230, 45, 96, 73, 157, 20, 200, 212, 212, 228, 161, 7,
231, 228, 108, 43, 198, 87, 140, 140, 4, 182, 164, 3, 53, 104, 250, 213,
85, 38, 89, 61, 52, 187, 35, 204, 86, 249, 100, 71, 248, 213, 163, 215,
66, 106, 252, 129, 40, 111, 47, 24, 186, 221, 85, 205, 199, 237, 122, 181,
32, 46, 182, 135, 33, 251, 142, 34, 208, 242, 128, 255, 4, 234, 15, 33,
167, 222, 32, 186, 191, 34, 255, 244, 98, 240, 228, 204, 30, 142, 32, 70,
69, 83, 110, 151, 10, 243, 141, 21, 223, 69, 61, 37, 59, 209, 102, 114,
223, 33, 129, 254, 255, 103, 86, 247, 235, 72, 126, 177, 102, 226, 102, 30,
149, 221, 62, 247, 251, 120, 163, 173, 57, 202, 204, 24, 39, 106, 120, 143,
202, 176, 191, 147, 37, 38, 51, 133, 47, 245, 157, 132, 154, 71, 183, 111,
30, 180, 18, 202, 82, 96, 170, 91, 157, 181, 212, 140, 256, 8, 196, 121,
149, 79, 66, 127, 113, 78, 4, 197, 84, 256, 111, 222, 102, 63, 228, 104,
136, 223, 67, 193, 93, 154, 249, 83, 204, 101, 200, 234, 84, 252, 230, 195,
43, 140, 120, 242, 89, 63, 166, 233, 209, 94, 43, 170, 126, 5, 205, 78,
112, 80, 143, 151, 146, 248, 137, 203, 45, 183, 61, 1, 155, 8, 102, 59,
68, 212, 230, 61, 254, 191, 128, 223, 176, 123, 229, 27, 146, 120, 96, 165,
213, 12, 232, 40, 186, 225, 66, 105, 200, 195, 212, 110, 237, 238, 151, 19,
12, 171, 150, 82, 7, 228, 79, 52, 15, 78, 62, 43, 21, 154, 114, 21,
12, 212, 256, 232, 125, 127, 5, 51, 37, 252, 136, 13, 47, 195, 168, 191,
231, 55, 57, 251, 214, 116, 15, 86, 210, 41, 249, 242, 119, 27, 250, 203,
107, 69, 90, 43, 206, 154, 127, 54, 100, 78, 187, 54, 244, 177, 234, 167,
202, 136, 209, 171, 69, 114, 133, 173, 26, 139, 78, 141, 128, 32, 124, 39,
45, 218, 96, 68, 90, 44, 67, 62, 83, 190, 188, 256, 103, 42, 102, 64,
249, 0, 141, 11, 61, 69, 70, 66, 233, 237, 29, 200, 251, 157, 71, 51,
64, 133, 113, 76, 35, 125, 76, 137, 217, 145, 35, 69, 226, 180, 56, 249,
156, 163, 176, 237, 81, 54, 85, 169, 115, 211, 129, 70, 248, 40, 252, 192,
194, 101, 247, 8, 181, 124, 217, 191, 194, 93, 99, 127, 117, 177, 144, 151,
228, 121, 32, 11, 89, 81, 26, 29, 183, 76, 249, 132, 179, 70, 34, 102,
20, 66, 87, 63, 124, 205, 174, 177, 87, 219, 73, 218, 91, 87, 176, 72,
15, 211, 47, 61, 251, 165, 39, 247, 146, 70, 150, 57, 1, 212, 36, 162,
39, 38, 16, 216, 3, 50, 116, 200, 32, 234, 77, 181, 155, 19, 90, 188,
36, 6, 254, 46, 46, 203, 25, 230, 181, 196, 4, 151, 225, 65, 122, 216,
168, 86, 158, 131, 136, 16, 49, 102, 233, 64, 154, 88, 228, 52, 146, 69,
93, 157, 243, 121, 70, 209, 126, 213, 88, 145, 236, 65, 70, 96, 204, 47,
10, 200, 77, 8, 103, 150, 48, 153, 5, 37, 52, 235, 209, 31, 181, 126,
83, 142, 224, 140, 6, 32, 200, 171, 160, 179, 115, 229, 75, 194, 208, 39,
59, 223, 52, 247, 38, 197, 135, 1, 6, 189, 106, 114, 168, 5, 211, 222,
44, 63, 90, 160, 116, 172, 170, 133, 125, 138, 39, 131, 23, 178, 10, 214,
36, 93, 28, 59, 68, 17, 123, 25, 255, 184, 204, 102, 194, 214, 129, 94,
159, 245, 112, 141, 62, 11, 61, 197, 124, 221, 205, 11, 79, 71, 201, 54,
58, 150, 29, 121, 87, 46, 240, 201, 68, 20, 194, 209, 47, 152, 158, 174,
193, 164, 120, 255, 216, 165, 247, 58, 85, 130, 220, 23, 122, 223, 188, 98,
21, 70, 72, 170, 150, 237, 76, 143, 112, 238, 206, 146, 215, 110, 4, 250,
68, 44, 174, 177, 30, 98, 143, 241, 180, 127, 113, 48, 0, 1, 179, 199,
59, 106, 201, 114, 29, 86, 173, 133, 217, 44, 200, 141, 107, 172, 16, 60,
82, 58, 239, 94, 141, 234, 186, 235, 109, 173, 249, 139, 141, 59, 100, 248,
84, 144, 49, 160, 51, 207, 164, 103, 74, 97, 146, 202, 193, 125, 168, 134,
236, 111, 135, 121, 59, 145, 168, 200, 181, 173, 109, 2, 255, 6, 9, 245,
90, 202, 214, 143, 121, 65, 85, 232, 132, 77, 228, 84, 26, 54, 184, 15,
161, 29, 177, 79, 43, 0, 156, 184, 163, 165, 62, 90, 179, 93, 45, 239,
1, 16, 120, 189, 127, 47, 74, 166, 20, 214, 233, 226, 89, 217, 229, 26,
156, 53, 162, 60, 21, 3, 192, 72, 111, 51, 53, 101, 181, 208, 88, 82,
179, 160, 219, 113, 240, 108, 43, 224, 162, 147, 62, 14, 95, 81, 205, 4,
160, 177, 225, 115, 29, 69, 235, 168, 148, 29, 128, 114, 124, 129, 172, 165,
215, 231, 214, 86, 160, 44, 157, 91, 248, 183, 73, 164, 56, 181, 162, 92,
141, 118, 127, 240, 196, 77, 0, 9, 244, 79, 250, 100, 195, 25, 255, 85,
94, 35, 212, 137, 107, 34, 110, 20, 200, 104, 17, 32, 231, 43, 150, 159,
231, 216, 223, 190, 226, 109, 162, 197, 87, 92, 224, 11, 111, 73, 60, 225,
238, 73, 246, 169, 19, 217, 119, 38, 121, 118, 70, 82, 99, 241, 110, 67,
31, 76, 146, 215, 124, 240, 31, 103, 139, 224, 75, 160, 31, 78, 93, 4,
64, 9, 103, 223, 6, 227, 119, 85, 116, 81, 21, 43, 46, 206, 234, 132,
85, 99, 22, 131, 135, 97, 86, 13, 234, 188, 21, 14, 89, 169, 207, 238,
219, 177, 190, 72, 157, 41, 114, 140, 92, 141, 186, 1, 63, 107, 225, 184,
118, 150, 153, 254, 241, 106, 120, 210, 104, 144, 151, 161, 88, 206, 125, 164,
15, 211, 173, 49, 146, 241, 71, 36, 58, 201, 46, 27, 33, 187, 91, 162,
117, 19, 210, 213, 187, 97, 193, 50, 190, 114, 217, 60, 61, 167, 207, 213,
213, 53, 135, 34, 156, 91, 115, 119, 46, 99, 242, 1, 90, 52, 198, 227,
201, 91, 216, 146, 210, 82, 121, 38, 73, 133, 182, 193, 132, 148, 246, 75,
109, 157, 179, 113, 176, 134, 205, 159, 148, 58, 103, 171, 132, 156, 133, 147,
161, 231, 39, 100, 175, 97, 125, 28, 183, 129, 135, 191, 202, 181, 29, 218,
43, 104, 148, 203, 189, 204, 4, 182, 169, 1, 134, 122, 141, 202, 13, 187,
177, 112, 162, 35, 231, 6, 8, 241, 99, 6, 191, 45, 113, 113, 101, 104};
// The S-Box we use for further linearity breaking.
// We created it by taking the digits of decimal expansion of e.
// The code that created it can be found in 'ProduceRandomSBox.c'.
unsigned char SBox[256] = {
//0 1 2 3 4 5 6 7 8 9 A B C D E F
0x7d, 0xd1, 0x70, 0x0b, 0xfa, 0x39, 0x18, 0xc3, 0xf3, 0xbb, 0xa7, 0xd4, 0x84, 0x25, 0x3b, 0x3c, // 0
0x2c, 0x15, 0x69, 0x9a, 0xf9, 0x27, 0xfb, 0x02, 0x52, 0xba, 0xa8, 0x4b, 0x20, 0xb5, 0x8b, 0x3a, // 1
0x88, 0x8e, 0x26, 0xcb, 0x71, 0x5e, 0xaf, 0xad, 0x0c, 0xac, 0xa1, 0x93, 0xc6, 0x78, 0xce, 0xfc, // 2
0x2a, 0x76, 0x17, 0x1f, 0x62, 0xc2, 0x2e, 0x99, 0x11, 0x37, 0x65, 0x40, 0xfd, 0xa0, 0x03, 0xc1, // 3
0xca, 0x48, 0xe2, 0x9b, 0x81, 0xe4, 0x1c, 0x01, 0xec, 0x68, 0x7a, 0x5a, 0x50, 0xf8, 0x0e, 0xa3, // 4
0xe8, 0x61, 0x2b, 0xa2, 0xeb, 0xcf, 0x8c, 0x3d, 0xb4, 0x95, 0x13, 0x08, 0x46, 0xab, 0x91, 0x7b, // 5
0xea, 0x55, 0x67, 0x9d, 0xdd, 0x29, 0x6a, 0x8f, 0x9f, 0x22, 0x4e, 0xf2, 0x57, 0xd2, 0xa9, 0xbd, // 6
0x38, 0x16, 0x5f, 0x4c, 0xf7, 0x9e, 0x1b, 0x2f, 0x30, 0xc7, 0x41, 0x24, 0x5c, 0xbf, 0x05, 0xf6, // 7
0x0a, 0x31, 0xa5, 0x45, 0x21, 0x33, 0x6b, 0x6d, 0x6c, 0x86, 0xe1, 0xa4, 0xe6, 0x92, 0x9c, 0xdf, // 8
0xe7, 0xbe, 0x28, 0xe3, 0xfe, 0x06, 0x4d, 0x98, 0x80, 0x04, 0x96, 0x36, 0x3e, 0x14, 0x4a, 0x34, // 9
0xd3, 0xd5, 0xdb, 0x44, 0xcd, 0xf5, 0x54, 0xdc, 0x89, 0x09, 0x90, 0x42, 0x87, 0xff, 0x7e, 0x56, // A
0x5d, 0x59, 0xd7, 0x23, 0x75, 0x19, 0x97, 0x73, 0x83, 0x64, 0x53, 0xa6, 0x1e, 0xd8, 0xb0, 0x49, // B
0x3f, 0xef, 0xbc, 0x7f, 0x43, 0xf0, 0xc9, 0x72, 0x0f, 0x63, 0x79, 0x2d, 0xc0, 0xda, 0x66, 0xc8, // C
0x32, 0xde, 0x47, 0x07, 0xb8, 0xe9, 0x1d, 0xc4, 0x85, 0x74, 0x82, 0xcc, 0x60, 0x51, 0x77, 0x0d, // D
0xaa, 0x35, 0xed, 0x58, 0x7c, 0x5b, 0xb9, 0x94, 0x6e, 0x8d, 0xb1, 0xc5, 0xb7, 0xee, 0xb6, 0xae, // E
0x10, 0xe0, 0xd6, 0xd9, 0xe5, 0x4f, 0xf1, 0x12, 0x00, 0xd0, 0xf4, 0x1a, 0x6f, 0x8a, 0xb3, 0xb2 }; // F
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Helper functions definition portion.
//
///////////////////////////////////////////////////////////////////////////////////////////////
// Don't vectorize, move decl to header file
// Translates an input array with values in base 257 to output array with values in base 256.
// Returns the carry bit.
//
// Parameters:
// - input: the input array of size EIGHTH_N. Each value in the array is a number in Z_257.
// The MSB is assumed to be the last one in the array.
// - output: the input array encoded in base 256.
//
// Returns:
// - The carry bit (MSB).
swift_int16_t TranslateToBase256(swift_int32_t input[EIGHTH_N], unsigned char output[EIGHTH_N]);
// Translates an input integer into the range (-FIELD_SIZE / 2) <= result <= (FIELD_SIZE / 2).
//
// Parameters:
// - x: the input integer.
//
// Returns:
// - The result, which equals (x MOD FIELD_SIZE), such that |result| <= (FIELD_SIZE / 2).
int Center(int x);
// Calculates bit reversal permutation.
//
// Parameters:
// - input: the input to reverse.
// - numOfBits: the number of bits in the input to reverse.
//
// Returns:
// - The resulting number, which is obtained from the input by reversing its bits.
int ReverseBits(int input, int numOfBits);
// Initializes the FFT fast lookup table.
// Shall be called only once.
void InitializeSWIFFTX();
// Calculates the FFT.
//
// Parameters:
// - input: the input to the FFT.
// - output: the resulting output.
void FFT(const unsigned char input[EIGHTH_N], swift_int32_t *output);
///////////////////////////////////////////////////////////////////////////////////////////////
// Helper functions implementation portion.
///////////////////////////////////////////////////////////////////////////////////////////////
// Don't vectorize, delete this copy.
swift_int16_t TranslateToBase256(swift_int32_t input[EIGHTH_N], unsigned char output[EIGHTH_N])
{
swift_int32_t pairs[EIGHTH_N / 2];
int i;
for (i = 0; i < EIGHTH_N; i += 2)
{
// input[i] + 257 * input[i + 1]
pairs[i >> 1] = input[i] + input[i + 1] + (input[i + 1] << 8);
}
for (i = (EIGHTH_N / 2) - 1; i > 0; --i)
{
int j;
for (j = i - 1; j < (EIGHTH_N / 2) - 1; ++j)
{
// pairs[j + 1] * 513, because 257^2 = 513 % 256^2.
register swift_int32_t temp = pairs[j] + pairs[j + 1] + (pairs[j + 1] << 9);
pairs[j] = temp & 0xffff;
pairs[j + 1] += (temp >> 16);
}
}
for (i = 0; i < EIGHTH_N; i += 2)
{
output[i] = (unsigned char) (pairs[i >> 1] & 0xff);
output[i + 1] = (unsigned char) ((pairs[i >> 1] >> 8) & 0xff);
}
return (pairs[EIGHTH_N/2 - 1] >> 16);
}
int Center(int x)
{
int result = x % FIELD_SIZE;
if (result > (FIELD_SIZE / 2))
result -= FIELD_SIZE;
if (result < (FIELD_SIZE / -2))
result += FIELD_SIZE;
return result;
}
int ReverseBits(int input, int numOfBits)
{
register int reversed = 0;
for (input |= numOfBits; input > 1; input >>= 1)
reversed = (reversed << 1) | (input & 1);
return reversed;
}
void InitializeSWIFFTX()
{
int i, j, k, x;
// The powers of OMEGA
int omegaPowers[2 * N];
omegaPowers[0] = 1;
if (wasSetupDone)
return;
for (i = 1; i < (2 * N); ++i)
{
omegaPowers[i] = Center(omegaPowers[i - 1] * OMEGA);
}
for (i = 0; i < (N / W); ++i)
{
for (j = 0; j < W; ++j)
{
multipliers[(i << 3) + j] = omegaPowers[ReverseBits(i, N / W) * (2 * j + 1)];
}
}
for (x = 0; x < 256; ++x)
{
for (j = 0; j < 8; ++j)
{
register int temp = 0;
for (k = 0; k < 8; ++k)
{
temp += omegaPowers[(EIGHTH_N * (2 * j + 1) * ReverseBits(k, W)) % (2 * N)]
* ((x >> k) & 1);
}
fftTable[(x << 3) + j] = Center(temp);
}
}
wasSetupDone = true;
}
// input should be deinterleaved in contiguos memory
// output and F are 4x32
// multipliers & fftTable are scalar 16
void FFT_4way(const unsigned char input[EIGHTH_N], swift_int32_t *output)
{
swift_int16_t *mult = multipliers;
m128_swift_int32_t F[64];
for (int i = 0; i < 8; i++)
{
int j = i<<3;
// Need to isolate bytes in input, 8 bytes per lane.
// Each iteration of the loop process one input vector
// Each lane reads a different index to ffttable.
// deinterleave the input!
// load table with 4 lanes from different indexes into fftTable
// extract bytes into m128 4x16
// mutiply by vectorized mult
// input[lane][byte]
__m128i table;
table = _mm_set_epi32( fftTable[ input[3][i] ],
fftTable[ input[2][i] ],
fftTable[ input[1][i] ],
fftTable[ input[0][i] ] );
F[i ] = _mm_mullo_epi32( mm128_const1_32( mult[j+0] ), table );
table = _mm_set_epi32( fftTable[ input[3][i+1] ]
fftTable[ input[2][i+1] ]
fftTable[ input[1][i+1] ]
fftTable[ input[0][i+1] ] );
F[i+8] = _mm_mullo_epi32( mm128_const1_32( mult[j+0] ), table );
m128_swift_int16_t *table = &( fftTable[input[i] << 3] );
F[i ] = _mm_mullo_epi32( mm128_const1_32( mult[j+0] ),
mm128_const1_32( table[0] ) );
F[i+ 8] = _mm_mullo_epi32( mm128_const1_32( mult[j+1] ),
mm128_const1_32( table[1] ) );
F[i+16] = _mm_mullo_epi32( mm128_const1_32( mult[j+2] ),
mm128_const1_32( table[2] ) );
F[i+24] = _mm_mullo_epi32( mm128_const1_32( mult[j+3] ),
mm128_const1_32( table[3] ) );
F[i+32] = _mm_mullo_epi32( mm128_const1_32( mult[j+4] ),
mm128_const1_32( table[4] ) );
F[i+40] = _mm_mullo_epi32( mm128_const1_32( mult[j+5] ),
mm128_const1_32( table[5] ) );
F[i+48] = _mm_mullo_epi32( mm128_const1_32( mult[j+6] ),
mm128_const1_32( table[6] ) );
F[i+56] = _mm_mullo_epi32( mm128_const1_32( mult[j+7] ),
mm128_const1_32( table[7] ) );
}
for ( int i = 0; i < 8; i++ )
{
int j = i<<3;
ADD_SUB_4WAY( F[j ], F[j+1] );
ADD_SUB_4WAY( F[j+2], F[j+3] );
ADD_SUB_4WAY( F[j+4], F[j+5] );
ADD_SUB_4WAY( F[j+6], F[j+7] );
F[j+3] = _mm_slli_epi32( F[j+3], 4 );
F[j+7] = _mm_slli_epi32( F[j+7], 4 );
ADD_SUB_4WAY( F[j ], F[j+2] );
ADD_SUB_4WAY( F[j+1], F[j+3] );
ADD_SUB_4WAY( F[j+4], F[j+6] );
ADD_SUB_4WAY( F[j+5], F[j+7] );
F[j+5] = _mm_slli_epi32( F[j+5], 2 );
F[j+6] = _mm_slli_epi32( F[j+6], 4 );
F[j+7] = _mm_slli_epi32( F[j+7], 6 );
ADD_SUB_4WAY( F[j ], F[j+4] );
ADD_SUB_4WAY( F[j+1], F[j+5] );
ADD_SUB_4WAY( F[j+2], F[j+6] );
ADD_SUB_4WAY( F[j+3], F[j+7] );
output[i ] = Q_REDUCE_4WAY( F[j ] );
output[i+ 8] = Q_REDUCE_4WAY( F[j+1] );
output[i+16] = Q_REDUCE_4WAY( F[j+2] );
output[i+24] = Q_REDUCE_4WAY( F[j+3] );
output[i+32] = Q_REDUCE_4WAY( F[j+4] );
output[i+40] = Q_REDUCE_4WAY( F[j+5] );
output[i+48] = Q_REDUCE_4WAY( F[j+6] );
output[i+56] = Q_REDUCE_4WAY( F[j+7] );
}
}
// Calculates the FFT part of SWIFFT.
// We divided the SWIFFT calculation into two, because that way we could save 2 computations of
// the FFT part, since in the first stage of SWIFFTX the difference between the first 3 SWIFFTs
// is only the A's part.
//
// Parameters:
// - input: the input to FFT.
// - m: the input size divided by 8. The function performs m FFTs.
// - output: will store the result.
void SWIFFTFFT(const unsigned char *input, int m, swift_int32_t *output)
{
int i;
for (i = 0;
i < m;
i++, input += EIGHTH_N, output += N)
{
FFT(input, output);
}
}
// Calculates the 'sum' part of SWIFFT, including the base change at the end.
// We divided the SWIFFT calculation into two, because that way we could save 2 computations of
// the FFT part, since in the first stage of SWIFFTX the difference between the first 3 SWIFFTs
// is only the A's part.
//
// Parameters:
// - input: the input. Of size 64 * m.
// - m: the input size divided by 64.
// - output: will store the result.
// - a: the coefficients in the sum. Of size 64 * m.
void SWIFFTSum(const swift_int32_t *input, int m, unsigned char *output, const swift_int16_t *a)
{
int i, j;
swift_int32_t result[N];
register swift_int16_t carry = 0;
for (j = 0; j < N; ++j)
{
register swift_int32_t sum = 0;
const register swift_int32_t *f = input + j;
const register swift_int16_t *k = a + j;
for (i = 0; i < m; i++, f += N,k += N)
{
sum += (*f) * (*k);
}
result[j] = sum;
}
for (j = 0; j < N; ++j)
{
result[j] = ((FIELD_SIZE << 22) + result[j]) % FIELD_SIZE;
}
for (j = 0; j < 8; ++j)
{
int register carryBit = TranslateToBase256(result + (j << 3), output + (j << 3));
carry |= carryBit << j;
}
output[N] = carry;
}
// On entry input is interleaved 4x64. SIZE is *4 lanes / 8 bytes,
// multiply by 2.
void ComputeSingleSWIFFTX_4way( unsigned char input[SWIFFTX_INPUT_BLOCK_SIZE],
unsigned char output[SWIFFTX_OUTPUT_BLOCK_SIZE],
bool doSmooth)
{
int i;
// Will store the result of the FFT parts:
m128_swift_int32_t fftOut[N * M];
// swift_int32_t fftOut[N * M];
unsigned char intermediate[N * 3 + 8];
unsigned char carry0,carry1,carry2;
// Do the three SWIFFTS while remembering the three carry bytes (each carry byte gets
// overriden by the following SWIFFT):
// 1. Compute the FFT of the input - the common part for the first 3 SWIFFTs:
SWIFFTFFT(input, M, fftOut);
// 2. Compute the sums of the 3 SWIFFTs, each using a different set of coefficients:
// 2a. The first SWIFFT:
SWIFFTSum(fftOut, M, intermediate, As);
// Remember the carry byte:
carry0 = intermediate[N];
// 2b. The second one:
SWIFFTSum(fftOut, M, intermediate + N, As + (M * N));
carry1 = intermediate[2 * N];
// 2c. The third one:
SWIFFTSum(fftOut, M, intermediate + (2 * N), As + 2 * (M * N));
carry2 = intermediate[3 * N];
//2d. Put three carry bytes in their place
intermediate[3 * N] = carry0;
intermediate[(3 * N) + 1] = carry1;
intermediate[(3 * N) + 2] = carry2;
// Padding intermediate output with 5 zeroes.
memset(intermediate + (3 * N) + 3, 0, 5);
// Apply the S-Box:
for (i = 0; i < (3 * N) + 8; ++i)
{
intermediate[i] = SBox[intermediate[i]];
}
// 3. The final and last SWIFFT:
SWIFFTFFT(intermediate, 3 * (N/8) + 1, fftOut);
SWIFFTSum(fftOut, 3 * (N/8) + 1, output, As);
if (doSmooth)
{
unsigned char sum[N];
register int i, j;
memset(sum, 0, N);
for (i = 0; i < (N + 1) * 8; ++i)
{
register const swift_int16_t *AsRow;
register int AShift;
if (!(output[i >> 3] & (1 << (i & 7))))
{
continue;
}
AsRow = As + N * M + (i & ~(N - 1)) ;
AShift = i & 63;
for (j = AShift; j < N; ++j)
{
sum[j] += AsRow[j - AShift];
}
for(j = 0; j < AShift; ++j)
{
sum[j] -= AsRow[N - AShift + j];
}
}
for (i = 0; i < N; ++i)
{
output[i] = sum[i];
}
output[N] = 0;
}
}

1243
algo/swifftx/swifftx.c Normal file

File diff suppressed because it is too large Load Diff

1155
algo/swifftx/swifftx.c.bak Normal file

File diff suppressed because it is too large Load Diff

78
algo/swifftx/swifftx.h Normal file
View File

@@ -0,0 +1,78 @@
///////////////////////////////////////////////////////////////////////////////////////////////
//
// SWIFFTX ANSI C OPTIMIZED 32BIT IMPLEMENTATION FOR NIST SHA-3 COMPETITION
//
// SWIFFTX.h
//
// October 2008
//
// This file is the exact copy from the reference implementation.
//
///////////////////////////////////////////////////////////////////////////////////////////////
#ifndef __SWIFFTX__
#define __SWIFFTX__
#ifdef __cplusplus
extern "C"{
#endif
// See the remarks concerning compatibility issues inside stdint.h.
//#include <stdint.h>
#include <stdbool.h>
#include "stdint.h"
//#include "stdbool.h"
//#include "SHA3swift.h"
// The size of SWIFFTX input in bytes.
#define SWIFFTX_INPUT_BLOCK_SIZE 256
// The size of output block in bytes. The compression function of SWIFFT outputs a block of
// this size (i.e., this is the size of the resulting hash value).
#define SWIFFTX_OUTPUT_BLOCK_SIZE 65
// Computes the result of a single SWIFFT operation.
// This is the simple implementation, where our main concern is to show our design principles.
// It is made more efficient in the optimized version, by using FFT instead of DFT, and
// through other speed-up techniques.
//
// Parameters:
// - input: the input string. Consists of 8*m input bytes, where each octet passes the DFT
// processing.
// - m: the length of the input in bytes.
// - output: the resulting hash value of SWIFFT, of size 65 bytes (520 bit). This is the
// result of summing the dot products of the DFTS with the A's after applying the base
// change transformation
// - A: the A's coefficients to work with (since every SWIFFT in SWIFFTX uses different As).
// A single application of SWIFFT uses 64*m A's.
void ComputeSingleSWIFFT(unsigned char *input, unsigned short m,
unsigned char output[SWIFFTX_OUTPUT_BLOCK_SIZE],
const swift_int16_t *a);
// Computes the result of a single SWIFFTX operation.
// NOTE: for simplicity we use 'ComputeSingleSWIFFT()' as a subroutine. This is only to show
// the design idea. In the optimized versions we don't do this for efficiency concerns, since
// there we compute the first part (which doesn't involve the A coefficients) only once for all
// of the 3 invocations of SWIFFT. This enables us to introduce a significant speedup.
//
// Parameters:
// - input: the input input of 256 bytes (2048 bit).
// - output: the resulting hash value of SWIFFT, of size 64 bytes (512 bit).
// - doSMooth: if true, a final smoothing stage is performed and the output is of size 512 bits.
//
// Returns:
// - Success value.
void ComputeSingleSWIFFTX( unsigned char input[SWIFFTX_INPUT_BLOCK_SIZE],
unsigned char output[SWIFFTX_OUTPUT_BLOCK_SIZE] );
void ComputeSingleSWIFFTX_smooth( unsigned char input[SWIFFTX_INPUT_BLOCK_SIZE],
unsigned char output[SWIFFTX_OUTPUT_BLOCK_SIZE], bool doSmooth);
// Calculates the powers of OMEGA and generates the bit reversal permutation.
// You must call this function before doing SWIFFT/X, otherwise you will get zeroes everywhere.
void InitializeSWIFFTX();
#ifdef __cplusplus
}
#endif
#endif // __SWIFFTX__

View File

@@ -45,12 +45,12 @@ void init_tt8_4way_ctx()
void timetravel_4way_hash(void *output, const void *input)
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t vhashX[8*4] __attribute__ ((aligned (64)));
uint64_t vhashY[8*4] __attribute__ ((aligned (64)));
uint64_t hash0[10] __attribute__ ((aligned (64)));
uint64_t hash1[10] __attribute__ ((aligned (64)));
uint64_t hash2[10] __attribute__ ((aligned (64)));
uint64_t hash3[10] __attribute__ ((aligned (64)));
uint64_t vhashX[10*4] __attribute__ ((aligned (64)));
uint64_t vhashY[10*4] __attribute__ ((aligned (64)));
uint64_t *vhashA, *vhashB;
tt8_4way_ctx_holder ctx __attribute__ ((aligned (64)));
uint32_t dataLen = 64;

View File

@@ -51,12 +51,12 @@ void init_tt10_4way_ctx()
void timetravel10_4way_hash(void *output, const void *input)
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t vhashX[8*4] __attribute__ ((aligned (64)));
uint64_t vhashY[8*4] __attribute__ ((aligned (64)));
uint64_t hash0[10] __attribute__ ((aligned (64)));
uint64_t hash1[10] __attribute__ ((aligned (64)));
uint64_t hash2[10] __attribute__ ((aligned (64)));
uint64_t hash3[10] __attribute__ ((aligned (64)));
uint64_t vhashX[10*4] __attribute__ ((aligned (64)));
uint64_t vhashY[10*4] __attribute__ ((aligned (64)));
uint64_t *vhashA, *vhashB;
tt10_4way_ctx_holder ctx __attribute__ ((aligned (64)));
uint32_t dataLen = 64;

View File

@@ -3,22 +3,121 @@
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#if defined(TRIBUS_4WAY)
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/echo/aes_ni/hash_api.h"
//hashState_echo tribus_4way_ctx __attribute__ ((aligned (64)));
static __thread jh512_4way_context ctx_mid;
/*
void init_tribus_4way_ctx()
#if defined(TRIBUS_8WAY)
static __thread jh512_8way_context ctx_mid;
void tribus_hash_8way( void *state, const void *input )
{
init_echo( &tribus_4way_ctx, 512 );
uint64_t vhash[8*8] __attribute__ ((aligned (128)));
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
uint64_t hash2[8] __attribute__ ((aligned (64)));
uint64_t hash3[8] __attribute__ ((aligned (64)));
uint64_t hash4[8] __attribute__ ((aligned (64)));
uint64_t hash5[8] __attribute__ ((aligned (64)));
uint64_t hash6[8] __attribute__ ((aligned (64)));
uint64_t hash7[8] __attribute__ ((aligned (64)));
jh512_8way_context ctx_jh;
keccak512_8way_context ctx_keccak;
hashState_echo ctx_echo;
memcpy( &ctx_jh, &ctx_mid, sizeof(ctx_mid) );
jh512_8way_update( &ctx_jh, input + (64<<3), 16 );
jh512_8way_close( &ctx_jh, vhash );
keccak512_8way_init( &ctx_keccak );
keccak512_8way_update( &ctx_keccak, vhash, 64 );
keccak512_8way_close( &ctx_keccak, vhash );
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, 512 );
// hash echo serially
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash0,
(const BitSequence *) hash0, 512 );
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash1,
(const BitSequence *) hash1, 512 );
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash2,
(const BitSequence *) hash2, 512 );
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash3,
(const BitSequence *) hash3, 512 );
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash4,
(const BitSequence *) hash4, 512 );
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash5,
(const BitSequence *) hash5, 512 );
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash6,
(const BitSequence *) hash6, 512 );
init_echo( &ctx_echo, 512 );
update_final_echo( &ctx_echo, (BitSequence *) hash7,
(const BitSequence *) hash7, 512 );
memcpy( state, hash0, 32 );
memcpy( state+32, hash1, 32 );
memcpy( state+64, hash2, 32 );
memcpy( state+96, hash3, 32 );
memcpy( state+128, hash4, 32 );
memcpy( state+160, hash5, 32 );
memcpy( state+192, hash6, 32 );
memcpy( state+224, hash7, 32 );
}
*/
void tribus_hash_4way(void *state, const void *input)
int scanhash_tribus_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t n = pdata[19];
__m512i *noncev = (__m512i*)vdata + 9; // aligned
int thr_id = mythr->id;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
jh512_8way_init( &ctx_mid );
jh512_8way_update( &ctx_mid, vdata, 64 );
do {
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
tribus_hash_8way( hash, vdata );
pdata[19] = n;
for ( int i = 0; i < 8; i++ )
if ( (hash+(i<<3))[7] < Htarg )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 8;
} while ( ( n < max_nonce-8 ) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce;
return 0;
}
#elif defined(TRIBUS_4WAY)
static __thread jh512_4way_context ctx_mid;
void tribus_hash_4way( void *state, const void *input )
{
uint64_t hash0[8] __attribute__ ((aligned (64)));
uint64_t hash1[8] __attribute__ ((aligned (64)));
@@ -30,11 +129,11 @@ void tribus_hash_4way(void *state, const void *input)
hashState_echo ctx_echo;
memcpy( &ctx_jh, &ctx_mid, sizeof(ctx_mid) );
jh512_4way( &ctx_jh, input + (64<<2), 16 );
jh512_4way_update( &ctx_jh, input + (64<<2), 16 );
jh512_4way_close( &ctx_jh, vhash );
keccak512_4way_init( &ctx_keccak );
keccak512_4way( &ctx_keccak, vhash, 64 );
keccak512_4way_update( &ctx_keccak, vhash, 64 );
keccak512_4way_close( &ctx_keccak, vhash );
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
@@ -60,7 +159,7 @@ void tribus_hash_4way(void *state, const void *input)
}
int scanhash_tribus_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr)
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*8] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
@@ -70,35 +169,13 @@ int scanhash_tribus_4way( struct work *work, uint32_t max_nonce,
const uint32_t Htarg = ptarget[7];
uint32_t n = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
uint64_t htmax[] = { 0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000 };
uint32_t masks[] = { 0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0 };
int thr_id = mythr->id;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
// precalc midstate
// doing it one way then then interleaving would be faster but too
// complicated tto interleave context.
jh512_4way_init( &ctx_mid );
jh512_4way( &ctx_mid, vdata, 64 );
jh512_4way_update( &ctx_mid, vdata, 64 );
for ( int m = 0; m < 6; m++ )
{
if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
@@ -108,19 +185,16 @@ int scanhash_tribus_4way( struct work *work, uint32_t max_nonce,
pdata[19] = n;
for ( int i = 0; i < 4; i++ )
if ( ( !( (hash+(i<<3))[7] & mask ) )
&& fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
if ( (hash+(i<<3))[7] < Htarg )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart);
break;
}
}
} while ( ( n < max_nonce-4 ) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -2,9 +2,11 @@
bool register_tribus_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT;
#if defined (TRIBUS_4WAY)
// init_tribus_4way_ctx();
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT;
#if defined (TRIBUS_8WAY)
gate->scanhash = (void*)&scanhash_tribus_8way;
gate->hash = (void*)&tribus_hash_8way;
#elif defined (TRIBUS_4WAY)
gate->scanhash = (void*)&scanhash_tribus_4way;
gate->hash = (void*)&tribus_hash_4way;
#else

View File

@@ -1,16 +1,23 @@
#ifndef TRIBUS_GATE_H__
#define TRIBUS_GATE_H__
#define TRIBUS_GATE_H__ 1
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__) && defined(__AES__)
#define TRIBUS_4WAY
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define TRIBUS_8WAY 1
#elif defined(__AVX2__) && defined(__AES__)
#define TRIBUS_4WAY 1
#endif
#if defined(TRIBUS_4WAY)
#if defined(TRIBUS_8WAY)
//void init_tribus_4way_ctx();
void tribus_hash_8way( void *state, const void *input );
int scanhash_tribus_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined(TRIBUS_4WAY)
void tribus_hash_4way( void *state, const void *input );

View File

@@ -108,7 +108,7 @@ void x12_4way_hash( void *state, const void *input )
intrlv_2x128( vhash, hash2, hash3, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
intrlv_2x128( hash2, hash3, vhash, 512 );
dintrlv_2x128( hash2, hash3, vhash, 512 );
// 8 Cubehash
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 );

311
algo/x22/x22i-4way.c Normal file
View File

@@ -0,0 +1,311 @@
#include "x22i-gate.h"
#if defined(X22I_4WAY)
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/luffa/luffa-hash-2way.h"
#include "algo/cubehash/cube-hash-2way.h"
#include "algo/shavite/shavite-hash-2way.h"
#include "algo/simd/simd-hash-2way.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/shabal-hash-4way.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/sha/sha-hash-4way.h"
#include "algo/haval/haval-hash-4way.h"
#include "algo/tiger/sph_tiger.h"
#include "algo/lyra2/lyra2.h"
#include "algo/gost/sph_gost.h"
#include "algo/swifftx/swifftx.h"
union _x22i_4way_ctx_overlay
{
blake512_4way_context blake;
bmw512_4way_context bmw;
hashState_groestl groestl;
hashState_echo echo;
skein512_4way_context skein;
jh512_4way_context jh;
keccak512_4way_context keccak;
luffa_2way_context luffa;
cube_2way_context cube;
shavite512_2way_context shavite;
simd_2way_context simd;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
shabal512_4way_context shabal;
sph_whirlpool_context whirlpool;
sha512_4way_context sha512;
haval256_5_4way_context haval;
sph_tiger_context tiger;
sph_gost512_context gost;
sha256_4way_context sha256;
};
typedef union _x22i_4way_ctx_overlay x22i_ctx_overlay;
void x22i_4way_hash( void *output, const void *input )
{
uint64_t hash0[8*4] __attribute__ ((aligned (64)));
uint64_t hash1[8*4] __attribute__ ((aligned (64)));
uint64_t hash2[8*4] __attribute__ ((aligned (64)));
uint64_t hash3[8*4] __attribute__ ((aligned (64)));
uint64_t vhash[8*4] __attribute__ ((aligned (64)));
uint64_t vhashA[8*4] __attribute__ ((aligned (64)));
uint64_t vhashB[8*4] __attribute__ ((aligned (64)));
// unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
unsigned char hashA0[64] __attribute__((aligned(64))) = {0};
unsigned char hashA1[64] __attribute__((aligned(32))) = {0};
unsigned char hashA2[64] __attribute__((aligned(32))) = {0};
unsigned char hashA3[64] __attribute__((aligned(32))) = {0};
x22i_ctx_overlay ctx;
blake512_4way_init( &ctx.blake );
blake512_4way( &ctx.blake, input, 80 );
blake512_4way_close( &ctx.blake, vhash );
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash0,
(const char*)hash0, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash1,
(const char*)hash1, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash2,
(const char*)hash2, 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash3,
(const char*)hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
jh512_4way_init( &ctx.jh );
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
keccak512_4way_init( &ctx.keccak );
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
luffa_2way_init( &ctx.luffa, 512 );
luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 );
cube_2way_init( &ctx.cube, 512, 16, 32 );
cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 );
cube_2way_init( &ctx.cube, 512, 16, 32 );
cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 );
shavite512_2way_init( &ctx.shavite );
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 );
simd_2way_init( &ctx.simd, 512 );
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
dintrlv_2x128_512( hash0, hash1, vhashA );
dintrlv_2x128_512( hash2, hash3, vhashB );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash0,
(const BitSequence*)hash0, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash1,
(const BitSequence*)hash1, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash2,
(const BitSequence*)hash2, 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash3,
(const BitSequence*)hash3, 512 );
intrlv_4x64_512( vhash, hash0, hash1, hash2, hash3 );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32_512( &hash0[8], &hash1[8], &hash2[8], &hash3[8], vhash );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, &hash0[8], 64 );
sph_whirlpool_close( &ctx.whirlpool, &hash0[16] );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, &hash1[8], 64 );
sph_whirlpool_close( &ctx.whirlpool, &hash1[16] );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, &hash2[8], 64 );
sph_whirlpool_close( &ctx.whirlpool, &hash2[16] );
sph_whirlpool_init( &ctx.whirlpool );
sph_whirlpool( &ctx.whirlpool, &hash3[8], 64 );
sph_whirlpool_close( &ctx.whirlpool, &hash3[16] );
intrlv_4x64_512( vhash, &hash0[16], &hash1[16], &hash2[16], &hash3[16] );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, 64 );
sha512_4way_close( &ctx.sha512, vhash );
dintrlv_4x64_512( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash );
// InitializeSWIFFTX();
ComputeSingleSWIFFTX((unsigned char*)hash0, (unsigned char*)hashA0);
ComputeSingleSWIFFTX((unsigned char*)hash1, (unsigned char*)hashA1);
ComputeSingleSWIFFTX((unsigned char*)hash2, (unsigned char*)hashA2);
ComputeSingleSWIFFTX((unsigned char*)hash3, (unsigned char*)hashA3);
intrlv_4x32_512( vhashA, hashA0, hashA1, hashA2, hashA3 );
memset( vhash, 0, 64*4 );
haval256_5_4way_init( &ctx.haval );
haval256_5_4way( &ctx.haval, vhashA, 64 );
haval256_5_4way_close( &ctx.haval, vhash );
dintrlv_4x32_512( hash0, hash1, hash2, hash3, vhash );
memset( hashA0, 0, 64 );
memset( hashA1, 0, 64 );
memset( hashA2, 0, 64 );
memset( hashA3, 0, 64 );
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) hash0, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA0);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) hash1, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA1);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) hash2, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA2);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) hash3, 64);
sph_tiger_close(&ctx.tiger, (void*) hashA3);
memset( hash0, 0, 64 );
memset( hash1, 0, 64 );
memset( hash2, 0, 64 );
memset( hash3, 0, 64 );
LYRA2RE( (void*) hash0, 32, (const void*) hashA0, 32, (const void*) hashA0,
32, 1, 4, 4 );
LYRA2RE( (void*) hash1, 32, (const void*) hashA1, 32, (const void*) hashA1,
32, 1, 4, 4 );
LYRA2RE( (void*) hash2, 32, (const void*) hashA2, 32, (const void*) hashA2,
32, 1, 4, 4 );
LYRA2RE( (void*) hash3, 32, (const void*) hashA3, 32, (const void*) hashA3,
32, 1, 4, 4 );
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) hash0, 64);
sph_gost512_close(&ctx.gost, (void*) hash0);
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) hash1, 64);
sph_gost512_close(&ctx.gost, (void*) hash1);
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) hash2, 64);
sph_gost512_close(&ctx.gost, (void*) hash2);
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) hash3, 64);
sph_gost512_close(&ctx.gost, (void*) hash3);
intrlv_4x32_512( vhash, hash0, hash1, hash2, hash3 );
sha256_4way_init( &ctx.sha256 );
sha256_4way( &ctx.sha256, vhash, 64 );
sha256_4way_close( &ctx.sha256, output );
// memcpy(output, hash, 32);
}
int scanhash_x22i_4way( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
InitializeSWIFFTX();
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x22i_4way_hash( hash, vdata );
for ( int lane = 0; lane < 4; lane++ )
if unlikely( ( hash7[ lane ] <= Htarg ) )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_lane_solution( work, lane_hash, mythr, lane );
}
}
n += 4;
} while ( likely( ( n < max_nonce - 4 ) && !work_restart[thr_id].restart ) );
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif // X22I_4WAY

28
algo/x22/x22i-gate.c Normal file
View File

@@ -0,0 +1,28 @@
#include "x22i-gate.h"
bool register_x22i_algo( algo_gate_t* gate )
{
#if defined (X22I_4WAY)
gate->scanhash = (void*)&scanhash_x22i_4way;
gate->hash = (void*)&x22i_4way_hash;
#else
gate->scanhash = (void*)&scanhash_x22i;
gate->hash = (void*)&x22i_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | SHA_OPT;
return true;
};
bool register_x25x_algo( algo_gate_t* gate )
{
#if defined (X22I_4WAY)
gate->scanhash = (void*)&scanhash_x25x_4way;
gate->hash = (void*)&x25x_4way_hash;
#else
gate->scanhash = (void*)&scanhash_x25x;
gate->hash = (void*)&x25x_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | SHA_OPT;
return true;
};

35
algo/x22/x22i-gate.h Normal file
View File

@@ -0,0 +1,35 @@
#ifndef X22I_GATE_H__
#define X22I_GATE_H__ 1
#include "algo-gate-api.h"
#include "simd-utils.h"
#include <stdint.h>
#include <unistd.h>
#if defined(__AVX2__) && defined(__AES__)
#define X22I_4WAY
#endif
bool register_x22i__algo( algo_gate_t* gate );
#if defined(X22I_4WAY)
void x22i_4way_hash( void *state, const void *input );
int scanhash_x22i_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x25x_4way_hash( void *state, const void *input );
int scanhash_x25x_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
void x22i_hash( void *state, const void *input );
int scanhash_x22i( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void x25x_hash( void *state, const void *input );
int scanhash_x25x( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif // X22I_GATE_H__

202
algo/x22/x22i.c Normal file
View File

@@ -0,0 +1,202 @@
#include "algo/blake/sph_blake.h"
#include "algo/bmw/sph_bmw.h"
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#else
#include "algo/groestl/sph_groestl.h"
#include "algo/echo/sph_echo.h"
#endif
#include "algo/skein/sph_skein.h"
#include "algo/jh/sph_jh.h"
#include "algo/keccak/sph_keccak.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/simd/nist.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/sph_shabal.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include <openssl/sha.h>
#include "algo/haval/sph-haval.h"
#include "algo/tiger/sph_tiger.h"
#include "algo/lyra2/lyra2.h"
#include "algo/gost/sph_gost.h"
#include "algo/swifftx/swifftx.h"
#include "x22i-gate.h"
union _x22i_context_overlay
{
sph_blake512_context blake;
sph_bmw512_context bmw;
#if defined(__AES__)
hashState_groestl groestl;
hashState_echo echo;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
#endif
sph_jh512_context jh;
sph_keccak512_context keccak;
sph_skein512_context skein;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sph_shabal512_context shabal;
sph_whirlpool_context whirlpool;
SHA512_CTX sha512;
sph_haval256_5_context haval;
sph_tiger_context tiger;
sph_gost512_context gost;
SHA256_CTX sha256;
};
typedef union _x22i_context_overlay x22i_context_overlay;
void x22i_hash( void *output, const void *input )
{
unsigned char hash[64 * 4] __attribute__((aligned(64))) = {0};
unsigned char hash2[65] __attribute__((aligned(64))) = {0};
x22i_context_overlay ctx;
sph_blake512_init(&ctx.blake);
sph_blake512(&ctx.blake, input, 80);
sph_blake512_close(&ctx.blake, hash);
sph_bmw512_init(&ctx.bmw);
sph_bmw512(&ctx.bmw, (const void*) hash, 64);
sph_bmw512_close(&ctx.bmw, hash);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init( &ctx.groestl );
sph_groestl512( &ctx.groestl, hash, 64 );
sph_groestl512_close( &ctx.groestl, hash );
#endif
sph_skein512_init(&ctx.skein);
sph_skein512(&ctx.skein, (const void*) hash, 64);
sph_skein512_close(&ctx.skein, hash);
sph_jh512_init(&ctx.jh);
sph_jh512(&ctx.jh, (const void*) hash, 64);
sph_jh512_close(&ctx.jh, hash);
sph_keccak512_init(&ctx.keccak);
sph_keccak512(&ctx.keccak, (const void*) hash, 64);
sph_keccak512_close(&ctx.keccak, hash);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)hash,
(const BitSequence*)hash, 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) hash,
(const byte*)hash, 64 );
sph_shavite512_init(&ctx.shavite);
sph_shavite512(&ctx.shavite, (const void*) hash, 64);
sph_shavite512_close(&ctx.shavite, hash);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)hash,
(const BitSequence*)hash, 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)hash,
(const BitSequence*)hash, 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512( &ctx.echo, hash, 64 );
sph_echo512_close( &ctx.echo, hash );
#endif
sph_hamsi512_init(&ctx.hamsi);
sph_hamsi512(&ctx.hamsi, (const void*) hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
sph_fugue512_init(&ctx.fugue);
sph_fugue512(&ctx.fugue, (const void*) hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
sph_shabal512_init(&ctx.shabal);
sph_shabal512(&ctx.shabal, (const void*) hash, 64);
sph_shabal512_close(&ctx.shabal, &hash[64]);
sph_whirlpool_init(&ctx.whirlpool);
sph_whirlpool (&ctx.whirlpool, (const void*) &hash[64], 64);
sph_whirlpool_close(&ctx.whirlpool, &hash[128]);
SHA512_Init( &ctx.sha512 );
SHA512_Update( &ctx.sha512, (const void*) &hash[128], 64);
SHA512_Final( (void*) &hash[192], &ctx.sha512 );
ComputeSingleSWIFFTX((unsigned char*)hash, (unsigned char*)hash2);
memset(hash, 0, 64);
sph_haval256_5_init(&ctx.haval);
sph_haval256_5(&ctx.haval,(const void*) hash2, 64);
sph_haval256_5_close(&ctx.haval,hash);
memset(hash2, 0, 64);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) hash, 64);
sph_tiger_close(&ctx.tiger, (void*) hash2);
memset(hash, 0, 64);
LYRA2RE((void*) hash, 32, (const void*) hash2, 32, (const void*) hash2, 32, 1, 4, 4);
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) hash, 64);
sph_gost512_close(&ctx.gost, (void*) hash);
SHA256_Init( &ctx.sha256 );
SHA256_Update( &ctx.sha256, (const void*) hash, 64 );
SHA256_Final( (unsigned char*) hash, &ctx.sha256 );
memcpy(output, hash, 32);
}
int scanhash_x22i( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t n = first_nonce;
const int thr_id = mythr->id;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
for (int k=0; k < 20; k++)
be32enc(&endiandata[k], pdata[k]);
InitializeSWIFFTX();
do
{
pdata[19] = ++n;
be32enc( &endiandata[19], n );
x22i_hash( hash, endiandata );
if ( hash[7] < Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
submit_solution( work, hash, mythr );
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = pdata[19] - first_nonce;
return 0;
}

402
algo/x22/x25x-4way.c Normal file
View File

@@ -0,0 +1,402 @@
#include "x22i-gate.h"
#if defined(X22I_4WAY)
#include "algo/blake/blake-hash-4way.h"
#include "algo/bmw/bmw-hash-4way.h"
#include "algo/skein/skein-hash-4way.h"
#include "algo/jh/jh-hash-4way.h"
#include "algo/keccak/keccak-hash-4way.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/shabal/shabal-hash-4way.h"
#include "algo/sha/sha-hash-4way.h"
#include "algo/haval/haval-hash-4way.h"
#include "algo/blake/blake2s-hash-4way.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/simd/nist.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/tiger/sph_tiger.h"
#include "algo/lyra2/lyra2.h"
#include "algo/gost/sph_gost.h"
#include "algo/swifftx/swifftx.h"
#include "algo/panama/sph_panama.h"
#include "algo/lanehash/lane.h"
union _x25x_4way_ctx_overlay
{
blake512_4way_context blake;
bmw512_4way_context bmw;
hashState_groestl groestl;
hashState_echo echo;
skein512_4way_context skein;
jh512_4way_context jh;
keccak512_4way_context keccak;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
hashState_sd simd;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
shabal512_4way_context shabal;
sph_whirlpool_context whirlpool;
sha512_4way_context sha512;
haval256_5_4way_context haval;
sph_tiger_context tiger;
sph_gost512_context gost;
sha256_4way_context sha256;
sph_panama_context panama;
blake2s_4way_state blake2s;
};
typedef union _x25x_4way_ctx_overlay x25x_4way_ctx_overlay;
void x25x_shuffle( void *hash )
{
// Simple shuffle algorithm, instead of just reversing
#define X25X_SHUFFLE_BLOCKS (24 * 64 / 2)
#define X25X_SHUFFLE_ROUNDS 12
static const uint16_t x25x_round_const[X25X_SHUFFLE_ROUNDS] =
{
0x142c, 0x5830, 0x678c, 0xe08c, 0x3c67, 0xd50d, 0xb1d8, 0xecb2,
0xd7ee, 0x6783, 0xfa6c, 0x4b9c
};
uint16_t* block_pointer = (uint16_t*)hash;
for ( int r = 0; r < X25X_SHUFFLE_ROUNDS; r++ )
{
for ( int i = 0; i < X25X_SHUFFLE_BLOCKS; i++ )
{
uint16_t block_value = block_pointer[ X25X_SHUFFLE_BLOCKS - i - 1 ];
block_pointer[i] ^= block_pointer[ block_value % X25X_SHUFFLE_BLOCKS ]
+ ( x25x_round_const[r] << (i % 16) );
}
}
#undef X25X_SHUFFLE_BLOCKS
#undef X25X_SHUFFLE_ROUNDS
}
void x25x_4way_hash( void *output, const void *input )
{
unsigned char hash0[25][64] __attribute__((aligned(64))) = {0};
unsigned char hash1[25][64] __attribute__((aligned(64))) = {0};
unsigned char hash2[25][64] __attribute__((aligned(64))) = {0};
unsigned char hash3[25][64] __attribute__((aligned(64))) = {0};
uint64_t vhash[8*4] __attribute__ ((aligned (64)));
unsigned char vhashA[24][64*4] __attribute__ ((aligned (64)));
x25x_4way_ctx_overlay ctx __attribute__ ((aligned (64)));
blake512_4way_init( &ctx.blake );
blake512_4way( &ctx.blake, input, 80 );
blake512_4way_close( &ctx.blake, vhash );
dintrlv_4x64_512( &hash0[0], &hash1[0], &hash2[0], &hash3[0], vhash );
bmw512_4way_init( &ctx.bmw );
bmw512_4way( &ctx.bmw, vhash, 64 );
bmw512_4way_close( &ctx.bmw, vhash );
dintrlv_4x64_512( &hash0[1], &hash1[1], &hash2[1], &hash3[1], vhash );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)&hash0[2],
(const char*)&hash0[1], 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)&hash1[2],
(const char*)&hash1[1], 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)&hash2[2],
(const char*)&hash2[1], 512 );
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)&hash3[2],
(const char*)&hash3[1], 512 );
intrlv_4x64_512( vhash, &hash0[2], &hash1[2], &hash2[2], &hash3[2] );
skein512_4way_init( &ctx.skein );
skein512_4way( &ctx.skein, vhash, 64 );
skein512_4way_close( &ctx.skein, vhash );
dintrlv_4x64_512( &hash0[3], &hash1[3], &hash2[3], &hash3[3], vhash );
jh512_4way_init( &ctx.jh );
jh512_4way( &ctx.jh, vhash, 64 );
jh512_4way_close( &ctx.jh, vhash );
dintrlv_4x64_512( &hash0[4], &hash1[4], &hash2[4], &hash3[4], vhash );
keccak512_4way_init( &ctx.keccak );
keccak512_4way( &ctx.keccak, vhash, 64 );
keccak512_4way_close( &ctx.keccak, vhash );
dintrlv_4x64_512( &hash0[5], &hash1[5], &hash2[5], &hash3[5], vhash );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash0[6],
(const BitSequence*)&hash0[5], 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash1[6],
(const BitSequence*)&hash1[5], 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash2[6],
(const BitSequence*)&hash2[5], 64 );
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash3[6],
(const BitSequence*)&hash3[5], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) &hash0[7],
(const byte*)&hash0[6], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) &hash1[7],
(const byte*)&hash1[6], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) &hash2[7],
(const byte*)&hash2[6], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) &hash3[7],
(const byte*)&hash3[6], 64 );
sph_shavite512_init(&ctx.shavite);
sph_shavite512(&ctx.shavite, (const void*) &hash0[7], 64);
sph_shavite512_close(&ctx.shavite, &hash0[8]);
sph_shavite512_init(&ctx.shavite);
sph_shavite512(&ctx.shavite, (const void*) &hash1[7], 64);
sph_shavite512_close(&ctx.shavite, &hash1[8]);
sph_shavite512_init(&ctx.shavite);
sph_shavite512(&ctx.shavite, (const void*) &hash2[7], 64);
sph_shavite512_close(&ctx.shavite, &hash2[8]);
sph_shavite512_init(&ctx.shavite);
sph_shavite512(&ctx.shavite, (const void*) &hash3[7], 64);
sph_shavite512_close(&ctx.shavite, &hash3[8]);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)&hash0[9],
(const BitSequence*)&hash0[8], 512 );
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)&hash1[9],
(const BitSequence*)&hash1[8], 512 );
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)&hash2[9],
(const BitSequence*)&hash2[8], 512 );
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)&hash3[9],
(const BitSequence*)&hash3[8], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)&hash0[10],
(const BitSequence*)&hash0[9], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)&hash1[10],
(const BitSequence*)&hash1[9], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)&hash2[10],
(const BitSequence*)&hash2[9], 512 );
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)&hash3[10],
(const BitSequence*)&hash3[9], 512 );
intrlv_4x64_512( vhash, &hash0[10], &hash1[10], &hash2[10], &hash3[10] );
hamsi512_4way_init( &ctx.hamsi );
hamsi512_4way( &ctx.hamsi, vhash, 64 );
hamsi512_4way_close( &ctx.hamsi, vhash );
dintrlv_4x64_512( &hash0[11], &hash1[11], &hash2[11], &hash3[11], vhash );
sph_fugue512_init(&ctx.fugue);
sph_fugue512(&ctx.fugue, (const void*) &hash0[11], 64);
sph_fugue512_close(&ctx.fugue, &hash0[12]);
sph_fugue512_init(&ctx.fugue);
sph_fugue512(&ctx.fugue, (const void*) &hash1[11], 64);
sph_fugue512_close(&ctx.fugue, &hash1[12]);
sph_fugue512_init(&ctx.fugue);
sph_fugue512(&ctx.fugue, (const void*) &hash2[11], 64);
sph_fugue512_close(&ctx.fugue, &hash2[12]);
sph_fugue512_init(&ctx.fugue);
sph_fugue512(&ctx.fugue, (const void*) &hash3[11], 64);
sph_fugue512_close(&ctx.fugue, &hash3[12]);
intrlv_4x32_512( vhash, &hash0[12], &hash1[12], &hash2[12], &hash3[12] );
shabal512_4way_init( &ctx.shabal );
shabal512_4way( &ctx.shabal, vhash, 64 );
shabal512_4way_close( &ctx.shabal, vhash );
dintrlv_4x32_512( &hash0[13], &hash1[13], &hash2[13], &hash3[13], vhash );
sph_whirlpool_init(&ctx.whirlpool);
sph_whirlpool (&ctx.whirlpool, (const void*) &hash0[13], 64);
sph_whirlpool_close(&ctx.whirlpool, &hash0[14]);
sph_whirlpool_init(&ctx.whirlpool);
sph_whirlpool (&ctx.whirlpool, (const void*) &hash1[13], 64);
sph_whirlpool_close(&ctx.whirlpool, &hash1[14]);
sph_whirlpool_init(&ctx.whirlpool);
sph_whirlpool (&ctx.whirlpool, (const void*) &hash2[13], 64);
sph_whirlpool_close(&ctx.whirlpool, &hash2[14]);
sph_whirlpool_init(&ctx.whirlpool);
sph_whirlpool (&ctx.whirlpool, (const void*) &hash3[13], 64);
sph_whirlpool_close(&ctx.whirlpool, &hash3[14]);
intrlv_4x64_512( vhash, &hash0[14], &hash1[14], &hash2[14], &hash3[14] );
sha512_4way_init( &ctx.sha512 );
sha512_4way( &ctx.sha512, vhash, 64 );
sha512_4way_close( &ctx.sha512, vhash );
dintrlv_4x64_512( &hash0[15], &hash1[15], &hash2[15], &hash3[15], vhash );
ComputeSingleSWIFFTX((unsigned char*)&hash0[12], (unsigned char*)&hash0[16]);
ComputeSingleSWIFFTX((unsigned char*)&hash1[12], (unsigned char*)&hash1[16]);
ComputeSingleSWIFFTX((unsigned char*)&hash2[12], (unsigned char*)&hash2[16]);
ComputeSingleSWIFFTX((unsigned char*)&hash3[12], (unsigned char*)&hash3[16]);
intrlv_4x32_512( &vhashA, &hash0[16], &hash1[16], &hash2[16], &hash3[16] );
memset( vhash, 0, 64*4 );
haval256_5_4way_init( &ctx.haval );
haval256_5_4way( &ctx.haval, vhashA, 64 );
haval256_5_4way_close( &ctx.haval, vhash );
dintrlv_4x32_512( &hash0[17], &hash1[17], &hash2[17], &hash3[17], vhash );
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) &hash0[17], 64);
sph_tiger_close(&ctx.tiger, (void*) &hash0[18]);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) &hash1[17], 64);
sph_tiger_close(&ctx.tiger, (void*) &hash1[18]);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) &hash2[17], 64);
sph_tiger_close(&ctx.tiger, (void*) &hash2[18]);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) &hash3[17], 64);
sph_tiger_close(&ctx.tiger, (void*) &hash3[18]);
LYRA2RE( (void*)&hash0[19], 32, (const void*)&hash0[18], 32,
(const void*)&hash0[18], 32, 1, 4, 4 );
LYRA2RE( (void*)&hash1[19], 32, (const void*)&hash1[18], 32,
(const void*)&hash1[18], 32, 1, 4, 4 );
LYRA2RE( (void*)&hash2[19], 32, (const void*)&hash2[18], 32,
(const void*)&hash2[18], 32, 1, 4, 4 );
LYRA2RE( (void*)&hash3[19], 32, (const void*)&hash3[18], 32,
(const void*)&hash3[18], 32, 1, 4, 4 );
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) &hash0[19], 64);
sph_gost512_close(&ctx.gost, (void*) &hash0[20]);
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) &hash1[19], 64);
sph_gost512_close(&ctx.gost, (void*) &hash1[20]);
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) &hash2[19], 64);
sph_gost512_close(&ctx.gost, (void*) &hash2[20]);
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) &hash3[19], 64);
sph_gost512_close(&ctx.gost, (void*) &hash3[20]);
intrlv_4x32_512( vhashA, &hash0[20], &hash1[20], &hash2[20], &hash3[20] );
memset( vhash, 0, 64*4 );
sha256_4way_init( &ctx.sha256 );
sha256_4way( &ctx.sha256, vhashA, 64 );
sha256_4way_close( &ctx.sha256, vhash );
dintrlv_4x32_512( &hash0[21], &hash1[21], &hash2[21], &hash3[21], vhash );
sph_panama_init(&ctx.panama);
sph_panama (&ctx.panama, (const void*) &hash0[21], 64 );
sph_panama_close(&ctx.panama, (void*) &hash0[22]);
sph_panama_init(&ctx.panama);
sph_panama (&ctx.panama, (const void*) &hash1[21], 64 );
sph_panama_close(&ctx.panama, (void*) &hash1[22]);
sph_panama_init(&ctx.panama);
sph_panama (&ctx.panama, (const void*) &hash2[21], 64 );
sph_panama_close(&ctx.panama, (void*) &hash2[22]);
sph_panama_init(&ctx.panama);
sph_panama (&ctx.panama, (const void*) &hash3[21], 64 );
sph_panama_close(&ctx.panama, (void*) &hash3[22]);
laneHash(512, (const BitSequence*)&hash0[22], 512, (BitSequence*)&hash0[23]);
laneHash(512, (const BitSequence*)&hash1[22], 512, (BitSequence*)&hash1[23]);
laneHash(512, (const BitSequence*)&hash2[22], 512, (BitSequence*)&hash2[23]);
laneHash(512, (const BitSequence*)&hash3[22], 512, (BitSequence*)&hash3[23]);
x25x_shuffle( hash0 );
x25x_shuffle( hash1 );
x25x_shuffle( hash2 );
x25x_shuffle( hash3 );
intrlv_4x32_512( &vhashA[ 0], &hash0[ 0], &hash1[ 0], &hash2[ 0], &hash3[ 0] );
intrlv_4x32_512( &vhashA[ 1], &hash0[ 1], &hash1[ 1], &hash2[ 1], &hash3[ 1] );
intrlv_4x32_512( &vhashA[ 2], &hash0[ 2], &hash1[ 2], &hash2[ 2], &hash3[ 2] );
intrlv_4x32_512( &vhashA[ 3], &hash0[ 3], &hash1[ 3], &hash2[ 3], &hash3[ 3] );
intrlv_4x32_512( &vhashA[ 4], &hash0[ 4], &hash1[ 4], &hash2[ 4], &hash3[ 4] );
intrlv_4x32_512( &vhashA[ 5], &hash0[ 5], &hash1[ 5], &hash2[ 5], &hash3[ 5] );
intrlv_4x32_512( &vhashA[ 6], &hash0[ 6], &hash1[ 6], &hash2[ 6], &hash3[ 6] );
intrlv_4x32_512( &vhashA[ 7], &hash0[ 7], &hash1[ 7], &hash2[ 7], &hash3[ 7] );
intrlv_4x32_512( &vhashA[ 8], &hash0[ 8], &hash1[ 8], &hash2[ 8], &hash3[ 8] );
intrlv_4x32_512( &vhashA[ 9], &hash0[ 9], &hash1[ 9], &hash2[ 9], &hash3[ 9] );
intrlv_4x32_512( &vhashA[10], &hash0[10], &hash1[10], &hash2[10], &hash3[10] );
intrlv_4x32_512( &vhashA[11], &hash0[11], &hash1[11], &hash2[11], &hash3[11] );
intrlv_4x32_512( &vhashA[12], &hash0[12], &hash1[12], &hash2[12], &hash3[12] );
intrlv_4x32_512( &vhashA[13], &hash0[13], &hash1[13], &hash2[13], &hash3[13] );
intrlv_4x32_512( &vhashA[14], &hash0[14], &hash1[14], &hash2[14], &hash3[14] );
intrlv_4x32_512( &vhashA[15], &hash0[15], &hash1[15], &hash2[15], &hash3[15] );
intrlv_4x32_512( &vhashA[16], &hash0[16], &hash1[16], &hash2[16], &hash3[16] );
intrlv_4x32_512( &vhashA[17], &hash0[17], &hash1[17], &hash2[17], &hash3[17] );
intrlv_4x32_512( &vhashA[18], &hash0[18], &hash1[18], &hash2[18], &hash3[18] );
intrlv_4x32_512( &vhashA[19], &hash0[19], &hash1[19], &hash2[19], &hash3[19] );
intrlv_4x32_512( &vhashA[20], &hash0[20], &hash1[20], &hash2[20], &hash3[20] );
intrlv_4x32_512( &vhashA[21], &hash0[21], &hash1[21], &hash2[21], &hash3[21] );
intrlv_4x32_512( &vhashA[22], &hash0[22], &hash1[22], &hash2[22], &hash3[22] );
intrlv_4x32_512( &vhashA[23], &hash0[23], &hash1[23], &hash2[23], &hash3[23] );
blake2s_4way_init( &ctx.blake2s, 32 );
blake2s_4way_full_blocks( &ctx.blake2s, vhash, vhashA, 64*24 );
dintrlv_4x32( &hash0[24], &hash1[24], &hash2[24], &hash3[24], vhash, 256 );
memcpy(output, &hash0[24], 32);
memcpy(output+32, &hash1[24], 32);
memcpy(output+64, &hash2[24], 32);
memcpy(output+96, &hash3[24], 32);
}
int scanhash_x25x_4way( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[4*16] __attribute__ ((aligned (64)));
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 9; // aligned
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t Htarg = ptarget[7];
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
InitializeSWIFFTX();
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do
{
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
x25x_4way_hash( hash, vdata );
for ( int i = 0; i < 4; i++ )
if ( unlikely( (hash+(i<<3))[7] <= Htarg ) )
if( likely( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark ) )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( likely( ( n < max_nonce - 4 ) && !work_restart[thr_id].restart ) );
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

236
algo/x22/x25x.c Normal file
View File

@@ -0,0 +1,236 @@
#include "x22i-gate.h"
#include "algo/blake/sph_blake.h"
#include "algo/bmw/sph_bmw.h"
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#else
#include "algo/groestl/sph_groestl.h"
#include "algo/echo/sph_echo.h"
#endif
#include "algo/skein/sph_skein.h"
#include "algo/jh/sph_jh.h"
#include "algo/keccak/sph_keccak.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/simd/nist.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/sph_shabal.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include <openssl/sha.h>
#include "algo/haval/sph-haval.h"
#include "algo/tiger/sph_tiger.h"
#include "algo/lyra2/lyra2.h"
#include "algo/gost/sph_gost.h"
#include "algo/swifftx/swifftx.h"
#include "algo/blake/sph-blake2s.h"
#include "algo/panama/sph_panama.h"
#include "algo/lanehash/lane.h"
union _x25x_context_overlay
{
sph_blake512_context blake;
sph_bmw512_context bmw;
#if defined(__AES__)
hashState_groestl groestl;
hashState_echo echo;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
#endif
sph_jh512_context jh;
sph_keccak512_context keccak;
sph_skein512_context skein;
hashState_luffa luffa;
cubehashParam cube;
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sph_shabal512_context shabal;
sph_whirlpool_context whirlpool;
SHA512_CTX sha512;
sph_haval256_5_context haval;
sph_tiger_context tiger;
sph_gost512_context gost;
SHA256_CTX sha256;
sph_panama_context panama;
blake2s_state blake2s;
};
typedef union _x25x_context_overlay x25x_context_overlay;
void x25x_hash( void *output, const void *input )
{
unsigned char hash[25][64] __attribute__((aligned(64))) = {0};
x25x_context_overlay ctx;
sph_blake512_init(&ctx.blake);
sph_blake512(&ctx.blake, input, 80);
sph_blake512_close(&ctx.blake, &hash[0] );
sph_bmw512_init(&ctx.bmw);
sph_bmw512(&ctx.bmw, (const void*) &hash[0], 64);
sph_bmw512_close(&ctx.bmw, &hash[1]);
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)&hash[2],
(const char*)&hash[1], 512 );
#else
sph_groestl512_init( &ctx.groestl );
sph_groestl512( &ctx.groestl, &hash[1], 64 );
sph_groestl512_close( &ctx.groestl, &hash[2] );
#endif
sph_skein512_init(&ctx.skein);
sph_skein512(&ctx.skein, (const void*) &hash[2], 64);
sph_skein512_close(&ctx.skein, &hash[3]);
sph_jh512_init(&ctx.jh);
sph_jh512(&ctx.jh, (const void*) &hash[3], 64);
sph_jh512_close(&ctx.jh, &hash[4]);
sph_keccak512_init(&ctx.keccak);
sph_keccak512(&ctx.keccak, (const void*) &hash[4], 64);
sph_keccak512_close(&ctx.keccak, &hash[5]);
init_luffa( &ctx.luffa, 512 );
update_and_final_luffa( &ctx.luffa, (BitSequence*)&hash[6],
(const BitSequence*)&hash[5], 64 );
cubehashInit( &ctx.cube, 512, 16, 32 );
cubehashUpdateDigest( &ctx.cube, (byte*) &hash[7],
(const byte*)&hash[6], 64 );
sph_shavite512_init(&ctx.shavite);
sph_shavite512(&ctx.shavite, (const void*) &hash[7], 64);
sph_shavite512_close(&ctx.shavite, &hash[8]);
init_sd( &ctx.simd, 512 );
update_final_sd( &ctx.simd, (BitSequence*)&hash[9],
(const BitSequence*)&hash[8], 512 );
#if defined(__AES__)
init_echo( &ctx.echo, 512 );
update_final_echo ( &ctx.echo, (BitSequence*)&hash[10],
(const BitSequence*)&hash[9], 512 );
#else
sph_echo512_init( &ctx.echo );
sph_echo512( &ctx.echo, &hash[9], 64 );
sph_echo512_close( &ctx.echo, &hash[10] );
#endif
sph_hamsi512_init(&ctx.hamsi);
sph_hamsi512(&ctx.hamsi, (const void*) &hash[10], 64);
sph_hamsi512_close(&ctx.hamsi, &hash[11]);
sph_fugue512_init(&ctx.fugue);
sph_fugue512(&ctx.fugue, (const void*) &hash[11], 64);
sph_fugue512_close(&ctx.fugue, &hash[12]);
sph_shabal512_init(&ctx.shabal);
sph_shabal512(&ctx.shabal, (const void*) &hash[12], 64);
sph_shabal512_close(&ctx.shabal, &hash[13]);
sph_whirlpool_init(&ctx.whirlpool);
sph_whirlpool (&ctx.whirlpool, (const void*) &hash[13], 64);
sph_whirlpool_close(&ctx.whirlpool, &hash[14]);
SHA512_Init( &ctx.sha512 );
SHA512_Update( &ctx.sha512, (const void*) &hash[14], 64);
SHA512_Final( (void*) &hash[15], &ctx.sha512 );
ComputeSingleSWIFFTX((unsigned char*)&hash[12], (unsigned char*)&hash[16]);
sph_haval256_5_init(&ctx.haval);
sph_haval256_5(&ctx.haval,(const void*) &hash[16], 64);
sph_haval256_5_close(&ctx.haval,&hash[17]);
sph_tiger_init(&ctx.tiger);
sph_tiger (&ctx.tiger, (const void*) &hash[17], 64);
sph_tiger_close(&ctx.tiger, (void*) &hash[18]);
LYRA2RE( (void*)&hash[19], 32, (const void*)&hash[18], 32,
(const void*)&hash[18], 32, 1, 4, 4 );
sph_gost512_init(&ctx.gost);
sph_gost512 (&ctx.gost, (const void*) &hash[19], 64);
sph_gost512_close(&ctx.gost, (void*) &hash[20]);
SHA256_Init( &ctx.sha256 );
SHA256_Update( &ctx.sha256, (const void*) &hash[20], 64 );
SHA256_Final( (unsigned char*) &hash[21], &ctx.sha256 );
sph_panama_init(&ctx.panama);
sph_panama (&ctx.panama, (const void*) &hash[21], 64 );
sph_panama_close(&ctx.panama, (void*) &hash[22]);
laneHash(512, (const BitSequence*) &hash[22], 512, (BitSequence*) &hash[23]);
// Simple shuffle algorithm, instead of just reversing
#define X25X_SHUFFLE_BLOCKS (24 * 64 / 2)
#define X25X_SHUFFLE_ROUNDS 12
static const uint16_t x25x_round_const[X25X_SHUFFLE_ROUNDS] =
{
0x142c, 0x5830, 0x678c, 0xe08c, 0x3c67, 0xd50d, 0xb1d8, 0xecb2,
0xd7ee, 0x6783, 0xfa6c, 0x4b9c
};
uint16_t* block_pointer = (uint16_t*)hash;
for ( int r = 0; r < X25X_SHUFFLE_ROUNDS; r++ )
{
for ( int i = 0; i < X25X_SHUFFLE_BLOCKS; i++ )
{
uint16_t block_value = block_pointer[ X25X_SHUFFLE_BLOCKS - i - 1 ];
block_pointer[i] ^= block_pointer[ block_value % X25X_SHUFFLE_BLOCKS ]
+ ( x25x_round_const[r] << (i % 16) );
}
}
#undef X25X_SHUFFLE_BLOCKS
#undef X25X_SHUFFLE_ROUNDS
blake2s_simple( (uint8_t*)&hash[24], (const void*)(&hash[0]), 64 * 24 );
memcpy(output, &hash[24], 32);
}
int scanhash_x25x( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t endiandata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t n = first_nonce;
const int thr_id = mythr->id;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x08ff;
for (int k=0; k < 20; k++)
be32enc(&endiandata[k], pdata[k]);
InitializeSWIFFTX();
do
{
pdata[19] = ++n;
be32enc( &endiandata[19], n );
x25x_hash( hash, endiandata );
if ( hash[7] < Htarg )
if ( fulltest( hash, ptarget ) && !opt_benchmark )
submit_solution( work, hash, mythr );
} while ( n < max_nonce && !work_restart[thr_id].restart );
*hashes_done = pdata[19] - first_nonce;
return 0;
}

View File

@@ -49,6 +49,7 @@
* no slowdown from the prefixes is generally observed on AMD CPUs supporting
* XOP, some slowdown is sometimes observed on Intel CPUs with AVX.
*/
/*
#ifdef __XOP__
#warning "Note: XOP is enabled. That's great."
#elif defined(__AVX__)
@@ -60,6 +61,7 @@
#else
#warning "Note: building generic code for non-x86. That's OK."
#endif
*/
/*
* The SSE4 code version has fewer instructions than the generic SSE2 version,

View File

@@ -1,7 +1,7 @@
#!/bin/bash
#
# This script is not intended for users, it is only used for compile testing
# during develpment. Howver the information contained my provide cimpilation
# during develpment. However the information contained may provide compilation
# tips to users.
make distclean || echo clean
@@ -16,7 +16,8 @@ mv cpuminer cpuminer-avx512
make clean || echo clean
rm -f config.status
CFLAGS="-O3 -march=core-avx2 -Wall" ./configure --with-curl
# GCC 9 doesn't include AES with core-avx2
CFLAGS="-O3 -march=core-avx2 -maes -Wall" ./configure --with-curl
make -j 16
strip -s cpuminer.exe
mv cpuminer.exe cpuminer-avx2.exe
@@ -25,7 +26,7 @@ mv cpuminer cpuminer-avx2
make clean || echo clean
rm -f config.status
CFLAGS="-O3 -march=corei7-avx -Wall" ./configure --with-curl
CFLAGS="-O3 -march=corei7-avx -maes -Wall" ./configure --with-curl
make -j 16
strip -s cpuminer.exe
mv cpuminer.exe cpuminer-aes-avx.exe

20
configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.10.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.10.1.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='cpuminer-opt'
PACKAGE_TARNAME='cpuminer-opt'
PACKAGE_VERSION='3.9.10'
PACKAGE_STRING='cpuminer-opt 3.9.10'
PACKAGE_VERSION='3.10.1'
PACKAGE_STRING='cpuminer-opt 3.10.1'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures cpuminer-opt 3.9.10 to adapt to many kinds of systems.
\`configure' configures cpuminer-opt 3.10.1 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1404,7 +1404,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of cpuminer-opt 3.9.10:";;
short | recursive ) echo "Configuration of cpuminer-opt 3.10.1:";;
esac
cat <<\_ACEOF
@@ -1509,7 +1509,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
cpuminer-opt configure 3.9.10
cpuminer-opt configure 3.10.1
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by cpuminer-opt $as_me 3.9.10, which was
It was created by cpuminer-opt $as_me 3.10.1, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2993,7 +2993,7 @@ fi
# Define the identity of the package.
PACKAGE='cpuminer-opt'
VERSION='3.9.10'
VERSION='3.10.1'
cat >>confdefs.h <<_ACEOF
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by cpuminer-opt $as_me 3.9.10, which was
This file was extended by cpuminer-opt $as_me 3.10.1, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -6756,7 +6756,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
cpuminer-opt config.status 3.9.10
cpuminer-opt config.status 3.10.1
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View File

@@ -1,4 +1,4 @@
AC_INIT([cpuminer-opt], [3.9.10])
AC_INIT([cpuminer-opt], [3.10.1])
AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM

View File

@@ -53,6 +53,8 @@
#if HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
// GCC 9 warning sysctl.h is deprecated
#include <sys/sysctl.h>
#endif
#endif
@@ -3325,7 +3327,7 @@ static void show_credits()
{
printf("\n ********** "PACKAGE_NAME" "PACKAGE_VERSION" *********** \n");
printf(" A CPU miner with multi algo support and optimized for CPUs\n");
printf(" with AES_NI and AVX2 and SHA extensions.\n");
printf(" with AES_NI and AVX2, AVX512 and SHA extensions.\n");
printf(" BTC donation address: 12tdvfF7KmAsihBXQXynT6E6th2c2pByTT\n\n");
}
@@ -3339,12 +3341,15 @@ bool check_cpu_capability ()
bool cpu_has_avx2 = has_avx2();
bool cpu_has_sha = has_sha();
bool cpu_has_avx512 = has_avx512();
bool cpu_has_vaes = has_vaes();
bool sw_has_aes = false;
bool sw_has_sse2 = false;
bool sw_has_sse42 = false;
bool sw_has_avx = false;
bool sw_has_avx2 = false;
bool sw_has_avx512 = false;
bool sw_has_sha = false;
bool sw_has_vaes = false;
set_t algo_features = algo_gate.optimizations;
bool algo_has_sse2 = set_incl( SSE2_OPT, algo_features );
bool algo_has_aes = set_incl( AES_OPT, algo_features );
@@ -3352,17 +3357,22 @@ bool check_cpu_capability ()
bool algo_has_avx2 = set_incl( AVX2_OPT, algo_features );
bool algo_has_avx512 = set_incl( AVX512_OPT, algo_features );
bool algo_has_sha = set_incl( SHA_OPT, algo_features );
bool algo_has_vaes = set_incl( VAES_OPT, algo_features );
bool use_aes;
bool use_sse2;
bool use_sse42;
bool use_avx2;
bool use_avx512;
bool use_sha;
bool use_vaes;
bool use_none;
#ifdef __AES__
sw_has_aes = true;
#endif
#ifdef __SSE2__
sw_has_sse2 = true;
#endif
#ifdef __SSE4_2__
sw_has_sse42 = true;
#endif
@@ -3372,12 +3382,16 @@ bool check_cpu_capability ()
#ifdef __AVX2__
sw_has_avx2 = true;
#endif
#if (defined(__AVX512F__) && defined(__AVX51DQF__) && defined(__AVX51BW__) && defined(__AVX512VL__))
#if (defined(__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512BW__) && defined(__AVX512VL__))
sw_has_avx512 = true;
#endif
#ifdef __SHA__
sw_has_sha = true;
#endif
#ifdef __VAES__
sw_has_vaes = true;
#endif
// #if !((__AES__) || (__SSE2__))
// printf("Neither __AES__ nor __SSE2__ defined.\n");
@@ -3397,33 +3411,36 @@ bool check_cpu_capability ()
#endif
printf("CPU features:");
if ( cpu_has_sse2 ) printf( " SSE2" );
if ( cpu_has_aes ) printf( " AES" );
if ( cpu_has_sse42 ) printf( " SSE4.2" );
if ( cpu_has_avx ) printf( " AVX" );
if ( cpu_has_avx2 ) printf( " AVX2" );
if ( cpu_has_avx512 ) printf( " AVX512" );
if ( cpu_has_vaes ) printf( " VAES" );
if ( cpu_has_sha ) printf( " SHA" );
else if ( cpu_has_aes ) printf( " AES" );
if ( cpu_has_avx512 ) printf( " AVX512" );
else if ( cpu_has_avx2 ) printf( " AVX2" );
else if ( cpu_has_avx ) printf( " AVX" );
else if ( cpu_has_sse42 ) printf( " SSE4.2" );
else if ( cpu_has_sse2 ) printf( " SSE2" );
printf(".\nSW features: SSE2");
if ( sw_has_aes ) printf( " AES" );
if ( sw_has_sse42 ) printf( " SSE4.2" );
if ( sw_has_avx ) printf( " AVX" );
if ( sw_has_avx2 ) printf( " AVX2" );
if ( sw_has_avx512 ) printf( " AVX512" );
printf(".\nSW features:");
if ( sw_has_vaes ) printf( " VAES" );
else if ( sw_has_aes ) printf( " AES" );
if ( sw_has_sha ) printf( " SHA" );
if ( sw_has_avx512 ) printf( " AVX512" );
else if ( sw_has_avx2 ) printf( " AVX2" );
else if ( sw_has_avx ) printf( " AVX" );
else if ( sw_has_sse42 ) printf( " SSE4.2" );
else if ( sw_has_sse2 ) printf( " SSE2" );
printf(".\nAlgo features:");
if ( algo_features == EMPTY_SET ) printf( " None" );
else
{
if ( algo_has_sse2 ) printf( " SSE2" );
if ( algo_has_aes ) printf( " AES" );
if ( algo_has_sse42 ) printf( " SSE4.2" );
if ( algo_has_avx2 ) printf( " AVX2" );
if ( algo_has_avx512 ) printf( " AVX512" );
if ( algo_has_vaes ) printf( " VAES" );
else if ( algo_has_aes ) printf( " AES" );
if ( algo_has_sha ) printf( " SHA" );
if ( algo_has_avx512 ) printf( " AVX512" );
else if ( algo_has_avx2 ) printf( " AVX2" );
else if ( algo_has_sse42 ) printf( " SSE4.2" );
else if ( algo_has_sse2 ) printf( " SSE2" );
}
printf(".\n");
@@ -3461,15 +3478,17 @@ bool check_cpu_capability ()
use_avx2 = cpu_has_avx2 && sw_has_avx2 && algo_has_avx2;
use_avx512 = cpu_has_avx512 && sw_has_avx512 && algo_has_avx512;
use_sha = cpu_has_sha && sw_has_sha && algo_has_sha;
use_vaes = cpu_has_vaes && sw_has_vaes && algo_has_vaes;
use_none = !( use_sse2 || use_aes || use_sse42 || use_avx512 || use_avx2 ||
use_sha );
use_sha || use_vaes );
// Display best options
printf( "Start mining with" );
if ( use_none ) printf( " no optimizations" );
else
{
if ( use_aes ) printf( " AES" );
if ( use_vaes ) printf( " VAES" );
else if ( use_aes ) printf( " AES" );
if ( use_avx512 ) printf( " AVX512" );
else if ( use_avx2 ) printf( " AVX2" );
else if ( use_sse42 ) printf( " SSE4.2" );

Some files were not shown because too many files have changed in this diff Show More