mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
5 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
92b3733925 | ||
![]() |
19cc88d102 | ||
![]() |
a053690170 | ||
![]() |
3c5e8921b7 | ||
![]() |
f3333b0070 |
@@ -163,6 +163,8 @@ cpuminer_SOURCES = \
|
||||
algo/sha/sph_sha2big.c \
|
||||
algo/sha/sha256-hash-4way.c \
|
||||
algo/sha/sha512-hash-4way.c \
|
||||
algo/sha/sha256-hash-opt.c \
|
||||
algo/sha/sha256-hash-2way-ni.c \
|
||||
algo/sha/hmac-sha256-hash.c \
|
||||
algo/sha/hmac-sha256-hash-4way.c \
|
||||
algo/sha/sha2.c \
|
||||
@@ -196,6 +198,7 @@ cpuminer_SOURCES = \
|
||||
algo/verthash/Verthash.c \
|
||||
algo/verthash/fopen_utf8.c \
|
||||
algo/verthash/tiny_sha3/sha3.c \
|
||||
algo/verthash/tiny_sha3/sha3-4way.c \
|
||||
algo/whirlpool/sph_whirlpool.c \
|
||||
algo/whirlpool/whirlpool-hash-4way.c \
|
||||
algo/whirlpool/whirlpool-gate.c \
|
||||
|
@@ -135,7 +135,7 @@ Supported Algorithms
|
||||
x14 X14
|
||||
x15 X15
|
||||
x16r
|
||||
x16rv2 Ravencoin (RVN)
|
||||
x16rv2
|
||||
x16rt Gincoin (GIN)
|
||||
x16rt-veil Veil (VEIL)
|
||||
x16s Pigeoncoin (PGN)
|
||||
|
@@ -64,6 +64,11 @@ source code obtained from the author's official repository. The exact
|
||||
procedure is documented in the build instructions for Windows:
|
||||
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
|
||||
|
||||
Some DLL filess may already be installed on the system by Windows or third
|
||||
party packages. They often will work and may be used instead of the included
|
||||
file. Without a compelling reason to do so it's recommended to use the included
|
||||
files as they are packaged.
|
||||
|
||||
If you like this software feel free to donate:
|
||||
|
||||
BTC: 12tdvfF7KmAsihBXQXynT6E6th2c2pByTT
|
||||
|
@@ -65,6 +65,36 @@ If not what makes it happen or not happen?
|
||||
Change Log
|
||||
----------
|
||||
|
||||
v3.17.0
|
||||
|
||||
AVX512 optimized using ternary logic instructions.
|
||||
Faster sha256t on all CPU architectures: AVX512 +30%, SHA +30%, AVX2 +9%.
|
||||
Use SHA on supported CPUs to produce merkle hash.
|
||||
Fixed byte order in Extranonce2 log & replaced Block height with Job ID.
|
||||
|
||||
v3.16.5
|
||||
|
||||
#329: Fixed GBT incorrect target diff in stats, second attempt.
|
||||
Fixed formatting error in share result log when --no-color option is used.
|
||||
|
||||
v3.16.4
|
||||
|
||||
Faster sha512 and sha256 when not using SHA CPU extension.
|
||||
#329: Fixed GBT incorrect target diff in stats.
|
||||
|
||||
v3.16.3
|
||||
|
||||
#313 Fix compile error with GCC 11.
|
||||
Incremental improvements to verthash.
|
||||
|
||||
v3.16.2
|
||||
|
||||
Verthash: midstate prehash optimization for all architectures.
|
||||
Verthash: AVX2 optimization.
|
||||
GBT: added support for Bech32 addresses.
|
||||
Linux: added CPU frequency to benchmark log.
|
||||
Fixed integer overflow in time calculations.
|
||||
|
||||
v3.16.1
|
||||
|
||||
New options for verthash:
|
||||
@@ -72,16 +102,12 @@ New options for verthash:
|
||||
data file, default is "verthash.dat" in the current directory.
|
||||
--verify to perform the data file integrity check at startup, default is
|
||||
not to verify data file integrity.
|
||||
|
||||
Support for creation of default verthash data file if:
|
||||
1) --data-file option is not used,
|
||||
2) no default data file is found in the current directory, and,
|
||||
3) --verify option is used.
|
||||
|
||||
More detailed logs related to verthash data file.
|
||||
|
||||
Small verthash performance improvement.
|
||||
|
||||
Fixed detection of corrupt stats caused by networking issues.
|
||||
|
||||
v3.16.0
|
||||
@@ -107,7 +133,6 @@ RPC getmininginfo method.
|
||||
v3.15.5
|
||||
|
||||
Fix stratum jobs lost if 2 jobs received in less than one second.
|
||||
|
||||
|
||||
v3.15.4
|
||||
|
||||
|
@@ -17,7 +17,7 @@
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
ALIGN(128) typedef struct {
|
||||
typedef struct ALIGN( 64 ) {
|
||||
__m512i b[16]; // input buffer
|
||||
__m512i h[8]; // chained state
|
||||
uint64_t t[2]; // total number of bytes
|
||||
@@ -35,7 +35,7 @@ void blake2b_8way_final( blake2b_8way_ctx *ctx, void *out );
|
||||
#if defined(__AVX2__)
|
||||
|
||||
// state context
|
||||
ALIGN(128) typedef struct {
|
||||
typedef struct ALIGN( 64 ) {
|
||||
__m256i b[16]; // input buffer
|
||||
__m256i h[8]; // chained state
|
||||
uint64_t t[2]; // total number of bytes
|
||||
|
@@ -60,7 +60,7 @@ typedef struct __blake2s_nway_param
|
||||
} blake2s_nway_param;
|
||||
#pragma pack(pop)
|
||||
|
||||
ALIGN( 64 ) typedef struct __blake2s_4way_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_4way_state
|
||||
{
|
||||
__m128i h[8];
|
||||
uint8_t buf[ BLAKE2S_BLOCKBYTES * 4 ];
|
||||
@@ -80,7 +80,7 @@ int blake2s_4way_full_blocks( blake2s_4way_state *S, void *out,
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
ALIGN( 64 ) typedef struct __blake2s_8way_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_8way_state
|
||||
{
|
||||
__m256i h[8];
|
||||
uint8_t buf[ BLAKE2S_BLOCKBYTES * 8 ];
|
||||
@@ -101,7 +101,7 @@ int blake2s_8way_full_blocks( blake2s_8way_state *S, void *out,
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
ALIGN( 128 ) typedef struct __blake2s_16way_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_16way_state
|
||||
{
|
||||
__m512i h[8];
|
||||
uint8_t buf[ BLAKE2S_BLOCKBYTES * 16 ];
|
||||
|
@@ -323,7 +323,7 @@ int blake2s_final( blake2s_state *S, uint8_t *out, uint8_t outlen )
|
||||
|
||||
int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen )
|
||||
{
|
||||
blake2s_state S[1];
|
||||
blake2s_state S;
|
||||
|
||||
/* Verify parameters */
|
||||
if ( NULL == in ) return -1;
|
||||
@@ -334,15 +334,15 @@ int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen
|
||||
|
||||
if( keylen > 0 )
|
||||
{
|
||||
if( blake2s_init_key( S, outlen, key, keylen ) < 0 ) return -1;
|
||||
if( blake2s_init_key( &S, outlen, key, keylen ) < 0 ) return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( blake2s_init( S, outlen ) < 0 ) return -1;
|
||||
if( blake2s_init( &S, outlen ) < 0 ) return -1;
|
||||
}
|
||||
|
||||
blake2s_update( S, ( uint8_t * )in, inlen );
|
||||
blake2s_final( S, out, outlen );
|
||||
blake2s_update( &S, ( uint8_t * )in, inlen );
|
||||
blake2s_final( &S, out, outlen );
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -116,7 +116,7 @@ extern "C" {
|
||||
uint8_t personal[BLAKE2S_PERSONALBYTES]; // 32
|
||||
} blake2s_param;
|
||||
|
||||
ALIGN( 64 ) typedef struct __blake2s_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_state
|
||||
{
|
||||
uint32_t h[8];
|
||||
uint32_t t[2];
|
||||
|
@@ -18,7 +18,7 @@
|
||||
#endif
|
||||
|
||||
// state context
|
||||
ALIGN(64) typedef struct {
|
||||
typedef ALIGN(64) struct {
|
||||
uint8_t b[128]; // input buffer
|
||||
uint64_t h[8]; // chained state
|
||||
uint64_t t[2]; // total number of bytes
|
||||
|
@@ -1293,32 +1293,26 @@ void compress_big_8way( const __m512i *M, const __m512i H[16],
|
||||
mm512_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
|
||||
|
||||
#define DH1L( m, sl, sr, a, b, c ) \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_xor_si512( M[m], \
|
||||
_mm512_xor_si512( _mm512_slli_epi64( xh, sl ), \
|
||||
_mm512_srli_epi64( qt[a], sr ) ) ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
|
||||
_mm512_add_epi64( mm512_xor3( M[m], _mm512_slli_epi64( xh, sl ), \
|
||||
_mm512_srli_epi64( qt[a], sr ) ), \
|
||||
mm512_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH1R( m, sl, sr, a, b, c ) \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_xor_si512( M[m], \
|
||||
_mm512_xor_si512( _mm512_srli_epi64( xh, sl ), \
|
||||
_mm512_slli_epi64( qt[a], sr ) ) ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
|
||||
_mm512_add_epi64( mm512_xor3( M[m], _mm512_srli_epi64( xh, sl ), \
|
||||
_mm512_slli_epi64( qt[a], sr ) ), \
|
||||
mm512_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH2L( m, rl, sl, h, a, b, c ) \
|
||||
_mm512_add_epi64( _mm512_add_epi64( \
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
|
||||
_mm512_xor_si512( _mm512_slli_epi64( xl, sl ), \
|
||||
_mm512_xor_si512( qt[b], qt[c] ) ) );
|
||||
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
mm512_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm512_xor3( _mm512_slli_epi64( xl, sl ), qt[b], qt[c] ) )
|
||||
|
||||
#define DH2R( m, rl, sr, h, a, b, c ) \
|
||||
_mm512_add_epi64( _mm512_add_epi64( \
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
|
||||
_mm512_xor_si512( _mm512_srli_epi64( xl, sr ), \
|
||||
_mm512_xor_si512( qt[b], qt[c] ) ) );
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
mm512_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm512_xor3( _mm512_srli_epi64( xl, sr ), qt[b], qt[c] ) )
|
||||
|
||||
|
||||
dH[ 0] = DH1L( 0, 5, 5, 16, 24, 0 );
|
||||
|
@@ -560,22 +560,14 @@ do { \
|
||||
__m512i dm = _mm512_and_si512( db, m512_one_64 ) ; \
|
||||
dm = mm512_negate_32( _mm512_or_si512( dm, \
|
||||
_mm512_slli_epi64( dm, 32 ) ) ); \
|
||||
m0 = _mm512_xor_si512( m0, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[0] ) ) ); \
|
||||
m1 = _mm512_xor_si512( m1, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[1] ) ) ); \
|
||||
m2 = _mm512_xor_si512( m2, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[2] ) ) ); \
|
||||
m3 = _mm512_xor_si512( m3, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[3] ) ) ); \
|
||||
m4 = _mm512_xor_si512( m4, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[4] ) ) ); \
|
||||
m5 = _mm512_xor_si512( m5, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[5] ) ) ); \
|
||||
m6 = _mm512_xor_si512( m6, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[6] ) ) ); \
|
||||
m7 = _mm512_xor_si512( m7, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[7] ) ) ); \
|
||||
m0 = mm512_xorand( m0, dm, m512_const1_64( tp[0] ) ); \
|
||||
m1 = mm512_xorand( m1, dm, m512_const1_64( tp[1] ) ); \
|
||||
m2 = mm512_xorand( m2, dm, m512_const1_64( tp[2] ) ); \
|
||||
m3 = mm512_xorand( m3, dm, m512_const1_64( tp[3] ) ); \
|
||||
m4 = mm512_xorand( m4, dm, m512_const1_64( tp[4] ) ); \
|
||||
m5 = mm512_xorand( m5, dm, m512_const1_64( tp[5] ) ); \
|
||||
m6 = mm512_xorand( m6, dm, m512_const1_64( tp[6] ) ); \
|
||||
m7 = mm512_xorand( m7, dm, m512_const1_64( tp[7] ) ); \
|
||||
tp += 8; \
|
||||
db = _mm512_srli_epi64( db, 1 ); \
|
||||
} \
|
||||
@@ -585,20 +577,13 @@ do { \
|
||||
do { \
|
||||
__m512i t; \
|
||||
t = a; \
|
||||
a = _mm512_and_si512( a, c ); \
|
||||
a = _mm512_xor_si512( a, d ); \
|
||||
c = _mm512_xor_si512( c, b ); \
|
||||
c = _mm512_xor_si512( c, a ); \
|
||||
d = _mm512_or_si512( d, t ); \
|
||||
d = _mm512_xor_si512( d, b ); \
|
||||
a = mm512_xorand( d, a, c ); \
|
||||
c = mm512_xor3( a, b, c ); \
|
||||
b = mm512_xoror( b, d, t ); \
|
||||
t = _mm512_xor_si512( t, c ); \
|
||||
b = d; \
|
||||
d = _mm512_or_si512( d, t ); \
|
||||
d = _mm512_xor_si512( d, a ); \
|
||||
a = _mm512_and_si512( a, b ); \
|
||||
t = _mm512_xor_si512( t, a ); \
|
||||
b = _mm512_xor_si512( b, d ); \
|
||||
b = _mm512_xor_si512( b, t ); \
|
||||
d = mm512_xoror( a, b, t ); \
|
||||
t = mm512_xorand( t, a, b ); \
|
||||
b = mm512_xor3( b, d, t ); \
|
||||
a = c; \
|
||||
c = b; \
|
||||
b = d; \
|
||||
@@ -609,14 +594,12 @@ do { \
|
||||
do { \
|
||||
a = mm512_rol_32( a, 13 ); \
|
||||
c = mm512_rol_32( c, 3 ); \
|
||||
b = _mm512_xor_si512( b, _mm512_xor_si512( a, c ) ); \
|
||||
d = _mm512_xor_si512( d, _mm512_xor_si512( c, \
|
||||
_mm512_slli_epi32( a, 3 ) ) ); \
|
||||
b = mm512_xor3( a, b, c ); \
|
||||
d = mm512_xor3( d, c, _mm512_slli_epi32( a, 3 ) ); \
|
||||
b = mm512_rol_32( b, 1 ); \
|
||||
d = mm512_rol_32( d, 7 ); \
|
||||
a = _mm512_xor_si512( a, _mm512_xor_si512( b, d ) ); \
|
||||
c = _mm512_xor_si512( c, _mm512_xor_si512( d, \
|
||||
_mm512_slli_epi32( b, 7 ) ) ); \
|
||||
a = mm512_xor3( a, b, d ); \
|
||||
c = mm512_xor3( c, d, _mm512_slli_epi32( b, 7 ) ); \
|
||||
a = mm512_rol_32( a, 5 ); \
|
||||
c = mm512_rol_32( c, 22 ); \
|
||||
} while (0)
|
||||
|
@@ -522,50 +522,53 @@ do { \
|
||||
|
||||
// Haval-256 8 way 32 bit avx2
|
||||
|
||||
#if defined (__AVX512VL__)
|
||||
|
||||
// ( ~( a ^ b ) ) & c
|
||||
#define mm256_andnotxor( a, b, c ) \
|
||||
_mm256_ternarylogic_epi32( a, b, c, 0x82 )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_andnotxor( a, b, c ) \
|
||||
_mm256_andnot_si256( _mm256_xor_si256( a, b ), c )
|
||||
|
||||
#endif
|
||||
|
||||
#define F1_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( x0, \
|
||||
_mm256_xor_si256( _mm256_and_si256(_mm256_xor_si256( x0, x4 ), x1 ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x2, x5 ), \
|
||||
_mm256_and_si256( x3, x6 ) ) ) ) \
|
||||
mm256_xor3( x0, mm256_andxor( x1, x0, x4 ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x2, x5 ), \
|
||||
_mm256_and_si256( x3, x6 ) ) ) \
|
||||
|
||||
#define F2_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x2, \
|
||||
_mm256_xor_si256( _mm256_andnot_si256( x3, x1 ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x4, x5 ), \
|
||||
_mm256_xor_si256( x6, x0 ) ) ) ), \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x4, _mm256_xor_si256( x1, x5 ) ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x3, x5 ), x0 ) ) ) \
|
||||
mm256_xor3( mm256_andxor( x2, _mm256_andnot_si256( x3, x1 ), \
|
||||
mm256_xor3( _mm256_and_si256( x4, x5 ), x6, x0 ) ), \
|
||||
mm256_andxor( x4, x1, x5 ), \
|
||||
mm256_xorand( x0, x3, x5 ) ) \
|
||||
|
||||
#define F3_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x3, \
|
||||
_mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
|
||||
_mm256_xor_si256( x6, x0 ) ) ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256(_mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ) ), x0 ) )
|
||||
mm256_xor3( x0, \
|
||||
_mm256_and_si256( x3, \
|
||||
mm256_xor3( _mm256_and_si256( x1, x2 ), x6, x0 ) ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ) ) )
|
||||
|
||||
#define F4_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x3, \
|
||||
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
|
||||
_mm256_or_si256( x4, x6 ) ), x5 ) ), \
|
||||
_mm256_and_si256( x4, \
|
||||
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( mm256_not(x2), x5 ), \
|
||||
_mm256_xor_si256( x1, x6 ) ), x0 ) ) ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x2, x6 ), x0 ) )
|
||||
|
||||
mm256_xor3( \
|
||||
mm256_andxor( x3, x5, \
|
||||
_mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
|
||||
_mm256_or_si256( x4, x6 ) ) ), \
|
||||
_mm256_and_si256( x4, \
|
||||
mm256_xor3( x0, _mm256_andnot_si256( x2, x5 ), \
|
||||
_mm256_xor_si256( x1, x6 ) ) ), \
|
||||
mm256_xorand( x0, x2, x6 ) )
|
||||
|
||||
#define F5_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x0, \
|
||||
mm256_not( _mm256_xor_si256( \
|
||||
_mm256_and_si256( _mm256_and_si256( x1, x2 ), x3 ), x5 ) ) ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ) ), \
|
||||
_mm256_and_si256( x3, x6 ) ) )
|
||||
mm256_andnotxor( mm256_and3( x1, x2, x3 ), x5, x0 ), \
|
||||
mm256_xor3( _mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ), \
|
||||
_mm256_and_si256( x3, x6 ) ) )
|
||||
|
||||
#define FP3_1_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
F1_8W(x1, x0, x3, x5, x6, x2, x4)
|
||||
|
@@ -51,15 +51,15 @@ extern "C"{
|
||||
do { \
|
||||
__m512i cc = _mm512_set1_epi64( c ); \
|
||||
x3 = mm512_not( x3 ); \
|
||||
x0 = _mm512_xor_si512( x0, _mm512_andnot_si512( x2, cc ) ); \
|
||||
tmp = _mm512_xor_si512( cc, _mm512_and_si512( x0, x1 ) ); \
|
||||
x0 = _mm512_xor_si512( x0, _mm512_and_si512( x2, x3 ) ); \
|
||||
x3 = _mm512_xor_si512( x3, _mm512_andnot_si512( x1, x2 ) ); \
|
||||
x1 = _mm512_xor_si512( x1, _mm512_and_si512( x0, x2 ) ); \
|
||||
x2 = _mm512_xor_si512( x2, _mm512_andnot_si512( x3, x0 ) ); \
|
||||
x0 = _mm512_xor_si512( x0, _mm512_or_si512( x1, x3 ) ); \
|
||||
x3 = _mm512_xor_si512( x3, _mm512_and_si512( x1, x2 ) ); \
|
||||
x1 = _mm512_xor_si512( x1, _mm512_and_si512( tmp, x0 ) ); \
|
||||
x0 = mm512_xorandnot( x0, x2, cc ); \
|
||||
tmp = mm512_xorand( cc, x0, x1 ); \
|
||||
x0 = mm512_xorand( x0, x2, x3 ); \
|
||||
x3 = mm512_xorandnot( x3, x1, x2 ); \
|
||||
x1 = mm512_xorand( x1, x0, x2 ); \
|
||||
x2 = mm512_xorandnot( x2, x3, x0 ); \
|
||||
x0 = mm512_xoror( x0, x1, x3 ); \
|
||||
x3 = mm512_xorand( x3, x1, x2 ); \
|
||||
x1 = mm512_xorand( x1, tmp, x0 ); \
|
||||
x2 = _mm512_xor_si512( x2, tmp ); \
|
||||
} while (0)
|
||||
|
||||
@@ -67,11 +67,11 @@ do { \
|
||||
do { \
|
||||
x4 = _mm512_xor_si512( x4, x1 ); \
|
||||
x5 = _mm512_xor_si512( x5, x2 ); \
|
||||
x6 = _mm512_xor_si512( x6, _mm512_xor_si512( x3, x0 ) ); \
|
||||
x6 = mm512_xor3( x6, x3, x0 ); \
|
||||
x7 = _mm512_xor_si512( x7, x0 ); \
|
||||
x0 = _mm512_xor_si512( x0, x5 ); \
|
||||
x1 = _mm512_xor_si512( x1, x6 ); \
|
||||
x2 = _mm512_xor_si512( x2, _mm512_xor_si512( x7, x4 ) ); \
|
||||
x2 = mm512_xor3( x2, x7, x4 ); \
|
||||
x3 = _mm512_xor_si512( x3, x4 ); \
|
||||
} while (0)
|
||||
|
||||
@@ -318,12 +318,12 @@ static const sph_u64 C[] = {
|
||||
#define Wz_8W(x, c, n) \
|
||||
do { \
|
||||
__m512i t = _mm512_slli_epi64( _mm512_and_si512(x ## h, (c)), (n) ); \
|
||||
x ## h = _mm512_or_si512( _mm512_and_si512( \
|
||||
_mm512_srli_epi64(x ## h, (n)), (c)), t ); \
|
||||
x ## h = mm512_orand( t, _mm512_srli_epi64( x ## h, (n) ), (c) ); \
|
||||
t = _mm512_slli_epi64( _mm512_and_si512(x ## l, (c)), (n) ); \
|
||||
x ## l = _mm512_or_si512( _mm512_and_si512((x ## l >> (n)), (c)), t ); \
|
||||
x ## l = mm512_orand( t, (x ## l >> (n)), (c) ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define W80(x) Wz_8W(x, m512_const1_64( 0x5555555555555555 ), 1 )
|
||||
#define W81(x) Wz_8W(x, m512_const1_64( 0x3333333333333333 ), 2 )
|
||||
#define W82(x) Wz_8W(x, m512_const1_64( 0x0F0F0F0F0F0F0F0F ), 4 )
|
||||
|
@@ -76,6 +76,9 @@ static const uint64_t RC[] = {
|
||||
#define OR64(d, a, b) (d = _mm512_or_si512(a,b))
|
||||
#define NOT64(d, s) (d = _mm512_xor_si512(s,m512_neg1))
|
||||
#define ROL64(d, v, n) (d = mm512_rol_64(v, n))
|
||||
#define XOROR(d, a, b, c) (d = mm512_xoror(a, b, c))
|
||||
#define XORAND(d, a, b, c) (d = mm512_xorand(a, b, c))
|
||||
|
||||
|
||||
#include "keccak-macros.c"
|
||||
|
||||
@@ -238,6 +241,8 @@ keccak512_8way_close(void *cc, void *dst)
|
||||
#undef NOT64
|
||||
#undef ROL64
|
||||
#undef KECCAK_F_1600
|
||||
#undef XOROR
|
||||
#undef XORAND
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
@@ -255,6 +260,8 @@ keccak512_8way_close(void *cc, void *dst)
|
||||
#define OR64(d, a, b) (d = _mm256_or_si256(a,b))
|
||||
#define NOT64(d, s) (d = _mm256_xor_si256(s,m256_neg1))
|
||||
#define ROL64(d, v, n) (d = mm256_rol_64(v, n))
|
||||
#define XOROR(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_or_si256(b, c)))
|
||||
#define XORAND(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_and_si256(b, c)))
|
||||
|
||||
#include "keccak-macros.c"
|
||||
|
||||
@@ -419,5 +426,7 @@ keccak512_4way_close(void *cc, void *dst)
|
||||
#undef NOT64
|
||||
#undef ROL64
|
||||
#undef KECCAK_F_1600
|
||||
#undef XOROR
|
||||
#undef XORAND
|
||||
|
||||
#endif // AVX2
|
||||
|
@@ -110,20 +110,34 @@
|
||||
#ifdef KHI_XO
|
||||
#undef KHI_XO
|
||||
#endif
|
||||
|
||||
#define KHI_XO(d, a, b, c) do { \
|
||||
XOROR(d, a, b, c); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
#define KHI_XO(d, a, b, c) do { \
|
||||
DECL64(kt); \
|
||||
OR64(kt, b, c); \
|
||||
XOR64(d, a, kt); \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
#ifdef KHI_XA
|
||||
#undef KHI_XA
|
||||
#endif
|
||||
|
||||
#define KHI_XA(d, a, b, c) do { \
|
||||
XORAND(d, a, b, c); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
#define KHI_XA(d, a, b, c) do { \
|
||||
DECL64(kt); \
|
||||
AND64(kt, b, c); \
|
||||
XOR64(d, a, kt); \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
#ifdef KHI
|
||||
#undef KHI
|
||||
|
@@ -97,6 +97,21 @@ do { \
|
||||
MIXWORD4W(*(x+3),*(x+7),*t,*(t+1));\
|
||||
ADD_CONSTANT4W(*x, *(x+4), c0, c1);
|
||||
|
||||
#define SUBCRUMB4W(a0,a1,a2,a3,t)\
|
||||
t = a0;\
|
||||
a0 = mm512_xoror( a3, a0, a1 ); \
|
||||
a2 = _mm512_xor_si512(a2,a3);\
|
||||
a1 = _mm512_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 xnor (a3 & t) */ \
|
||||
a3 = mm512_xorand( a2, a3, t ); \
|
||||
a2 = mm512_xorand( a1, a2, a0);\
|
||||
a1 = _mm512_or_si512(a1,a3);\
|
||||
a3 = _mm512_xor_si512(a3,a2);\
|
||||
t = _mm512_xor_si512(t,a1);\
|
||||
a2 = _mm512_and_si512(a2,a1);\
|
||||
a1 = mm512_xnor(a1,a0);\
|
||||
a0 = t;
|
||||
|
||||
/*
|
||||
#define SUBCRUMB4W(a0,a1,a2,a3,t)\
|
||||
t = _mm512_load_si512(&a0);\
|
||||
a0 = _mm512_or_si512(a0,a1);\
|
||||
@@ -115,7 +130,25 @@ do { \
|
||||
a2 = _mm512_and_si512(a2,a1);\
|
||||
a1 = _mm512_xor_si512(a1,a0);\
|
||||
a0 = _mm512_load_si512(&t);
|
||||
*/
|
||||
|
||||
#define MIXWORD4W(a,b,t1,t2)\
|
||||
b = _mm512_xor_si512(a,b);\
|
||||
t1 = _mm512_slli_epi32(a,2);\
|
||||
t2 = _mm512_srli_epi32(a,30);\
|
||||
a = mm512_xoror( b, t1, t2 ); \
|
||||
t1 = _mm512_slli_epi32(b,14);\
|
||||
t2 = _mm512_srli_epi32(b,18);\
|
||||
b = _mm512_or_si512(t1,t2);\
|
||||
b = mm512_xoror( a, t1, t2 ); \
|
||||
t1 = _mm512_slli_epi32(a,10);\
|
||||
t2 = _mm512_srli_epi32(a,22);\
|
||||
a = mm512_xoror( b, t1, t2 ); \
|
||||
t1 = _mm512_slli_epi32(b,1);\
|
||||
t2 = _mm512_srli_epi32(b,31);\
|
||||
b = _mm512_or_si512(t1,t2);
|
||||
|
||||
/*
|
||||
#define MIXWORD4W(a,b,t1,t2)\
|
||||
b = _mm512_xor_si512(a,b);\
|
||||
t1 = _mm512_slli_epi32(a,2);\
|
||||
@@ -133,6 +166,7 @@ do { \
|
||||
t1 = _mm512_slli_epi32(b,1);\
|
||||
t2 = _mm512_srli_epi32(b,31);\
|
||||
b = _mm512_or_si512(t1,t2);
|
||||
*/
|
||||
|
||||
#define STEP_PART24W(a0,a1,t0,t1,c0,c1,tmp0,tmp1)\
|
||||
a1 = _mm512_shuffle_epi32(a1,147);\
|
||||
@@ -248,17 +282,10 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
|
||||
__m512i tmp[2];
|
||||
__m512i x[8];
|
||||
|
||||
t0 = chainv[0];
|
||||
t1 = chainv[1];
|
||||
|
||||
t0 = _mm512_xor_si512( t0, chainv[2] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[3] );
|
||||
t0 = _mm512_xor_si512( t0, chainv[4] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[5] );
|
||||
t0 = _mm512_xor_si512( t0, chainv[6] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[7] );
|
||||
t0 = _mm512_xor_si512( t0, chainv[8] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[9] );
|
||||
t0 = mm512_xor3( chainv[0], chainv[2], chainv[4] );
|
||||
t1 = mm512_xor3( chainv[1], chainv[3], chainv[5] );
|
||||
t0 = mm512_xor3( t0, chainv[6], chainv[8] );
|
||||
t1 = mm512_xor3( t1, chainv[7], chainv[9] );
|
||||
|
||||
MULT24W( t0, t1 );
|
||||
|
||||
@@ -319,8 +346,8 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
|
||||
chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] );
|
||||
|
||||
MULT24W( chainv[0], chainv[1] );
|
||||
chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 );
|
||||
chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 );
|
||||
chainv[0] = mm512_xor3( chainv[0], t0, msg0 );
|
||||
chainv[1] = mm512_xor3( chainv[1], t1, msg1 );
|
||||
|
||||
MULT24W( msg0, msg1 );
|
||||
chainv[2] = _mm512_xor_si512( chainv[2], msg0 );
|
||||
@@ -398,19 +425,11 @@ void finalization512_4way( luffa_4way_context *state, uint32 *b )
|
||||
|
||||
/*---- blank round with m=0 ----*/
|
||||
rnd512_4way( state, zero );
|
||||
|
||||
t[0] = chainv[0];
|
||||
t[1] = chainv[1];
|
||||
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[2] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[3] );
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[4] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[5] );
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[6] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[7] );
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[8] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[9] );
|
||||
|
||||
|
||||
t[0] = mm512_xor3( chainv[0], chainv[2], chainv[4] );
|
||||
t[1] = mm512_xor3( chainv[1], chainv[3], chainv[5] );
|
||||
t[0] = mm512_xor3( t[0], chainv[6], chainv[8] );
|
||||
t[1] = mm512_xor3( t[1], chainv[7], chainv[9] );
|
||||
t[0] = _mm512_shuffle_epi32( t[0], 27 );
|
||||
t[1] = _mm512_shuffle_epi32( t[1], 27 );
|
||||
|
||||
@@ -676,8 +695,6 @@ do { \
|
||||
a1 = _mm256_or_si256( _mm256_srli_si256(a1,4), _mm256_slli_si256(b,12) ); \
|
||||
} while(0)
|
||||
|
||||
// confirm pointer arithmetic
|
||||
// ok but use array indexes
|
||||
#define STEP_PART(x,c0,c1,t)\
|
||||
SUBCRUMB(*x,*(x+1),*(x+2),*(x+3),*t);\
|
||||
SUBCRUMB(*(x+5),*(x+6),*(x+7),*(x+4),*t);\
|
||||
@@ -688,23 +705,23 @@ do { \
|
||||
ADD_CONSTANT(*x, *(x+4), c0, c1);
|
||||
|
||||
#define SUBCRUMB(a0,a1,a2,a3,t)\
|
||||
t = _mm256_load_si256(&a0);\
|
||||
t = a0;\
|
||||
a0 = _mm256_or_si256(a0,a1);\
|
||||
a2 = _mm256_xor_si256(a2,a3);\
|
||||
a1 = _mm256_andnot_si256(a1, m256_neg1 );\
|
||||
a1 = mm256_not( a1 );\
|
||||
a0 = _mm256_xor_si256(a0,a3);\
|
||||
a3 = _mm256_and_si256(a3,t);\
|
||||
a1 = _mm256_xor_si256(a1,a3);\
|
||||
a3 = _mm256_xor_si256(a3,a2);\
|
||||
a2 = _mm256_and_si256(a2,a0);\
|
||||
a0 = _mm256_andnot_si256(a0, m256_neg1 );\
|
||||
a0 = mm256_not( a0 );\
|
||||
a2 = _mm256_xor_si256(a2,a1);\
|
||||
a1 = _mm256_or_si256(a1,a3);\
|
||||
t = _mm256_xor_si256(t,a1);\
|
||||
a3 = _mm256_xor_si256(a3,a2);\
|
||||
a2 = _mm256_and_si256(a2,a1);\
|
||||
a1 = _mm256_xor_si256(a1,a0);\
|
||||
a0 = _mm256_load_si256(&t);\
|
||||
a0 = t;\
|
||||
|
||||
#define MIXWORD(a,b,t1,t2)\
|
||||
b = _mm256_xor_si256(a,b);\
|
||||
|
@@ -312,10 +312,26 @@ do { \
|
||||
BUPDATE1_8W( 7, 1 ); \
|
||||
} while (0)
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
#define GAMMA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = _mm256_ternarylogic_epi32( a ## n0, a ## n2, a ## n1, 0x4b ) )
|
||||
|
||||
#define THETA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = mm256_xor3( a ## n0, a ## n1, a ## n4 ) )
|
||||
|
||||
#else
|
||||
|
||||
#define GAMMA_8W(n0, n1, n2, n4) \
|
||||
(g ## n0 = _mm256_xor_si256( a ## n0, \
|
||||
_mm256_or_si256( a ## n1, mm256_not( a ## n2 ) ) ) )
|
||||
|
||||
#define THETA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = _mm256_xor_si256( a ## n0, _mm256_xor_si256( a ## n1, \
|
||||
a ## n4 ) ) )
|
||||
|
||||
#endif
|
||||
|
||||
#define PI_ALL_8W do { \
|
||||
a0 = g0; \
|
||||
a1 = mm256_rol_32( g7, 1 ); \
|
||||
@@ -336,9 +352,6 @@ do { \
|
||||
a16 = mm256_rol_32( g10, 8 ); \
|
||||
} while (0)
|
||||
|
||||
#define THETA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = _mm256_xor_si256( a ## n0, _mm256_xor_si256( a ## n1, \
|
||||
a ## n4 ) ) )
|
||||
|
||||
#define SIGMA_ALL_8W do { \
|
||||
a0 = _mm256_xor_si256( g0, m256_one_32 ); \
|
||||
|
@@ -127,10 +127,8 @@ void quark_8way_hash( void *state, const void *input )
|
||||
|
||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
if ( ( vh_mask & 0x0f ) != 0x0f )
|
||||
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
|
||||
if ( ( vh_mask & 0xf0 ) != 0xf0 )
|
||||
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
|
||||
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
|
||||
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
|
||||
|
||||
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
|
||||
|
||||
@@ -139,22 +137,14 @@ void quark_8way_hash( void *state, const void *input )
|
||||
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
|
||||
vhash, 512 );
|
||||
|
||||
if ( hash0[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||
if ( hash1[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
|
||||
if ( hash2[0] & 8)
|
||||
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
|
||||
if ( hash3[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||
if ( hash4[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
|
||||
if ( hash5[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
|
||||
if ( hash6[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
|
||||
if ( hash7[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
|
||||
|
||||
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
|
||||
512 );
|
||||
|
@@ -59,6 +59,8 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data,
|
||||
size_t len );
|
||||
void sha256_4way_close( sha256_4way_context *sc, void *dst );
|
||||
void sha256_4way_full( void *dst, const void *data, size_t len );
|
||||
void sha256_4way_transform( __m128i *state_out, const __m128i *data,
|
||||
const __m128i *state_in );
|
||||
|
||||
#endif // SSE2
|
||||
|
||||
@@ -77,6 +79,8 @@ void sha256_8way_init( sha256_8way_context *sc );
|
||||
void sha256_8way_update( sha256_8way_context *sc, const void *data, size_t len );
|
||||
void sha256_8way_close( sha256_8way_context *sc, void *dst );
|
||||
void sha256_8way_full( void *dst, const void *data, size_t len );
|
||||
void sha256_8way_transform( __m256i *state_out, const __m256i *data,
|
||||
const __m256i *state_in );
|
||||
|
||||
#endif // AVX2
|
||||
|
||||
@@ -95,6 +99,12 @@ void sha256_16way_init( sha256_16way_context *sc );
|
||||
void sha256_16way_update( sha256_16way_context *sc, const void *data, size_t len );
|
||||
void sha256_16way_close( sha256_16way_context *sc, void *dst );
|
||||
void sha256_16way_full( void *dst, const void *data, size_t len );
|
||||
void sha256_16way_transform( __m512i *state_out, const __m512i *data,
|
||||
const __m512i *state_in );
|
||||
void sha256_16way_prehash_3rounds( __m512i *state_mid, const __m512i *W,
|
||||
const __m512i *state_in );
|
||||
void sha256_16way_final_rounds( __m512i *state_out, const __m512i *data,
|
||||
const __m512i *state_in, const __m512i *state_mid );
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
|
@@ -195,8 +195,28 @@ static void sha256d_80_swap(uint32_t *hash, const uint32_t *data)
|
||||
hash[i] = swab32(hash[i]);
|
||||
}
|
||||
|
||||
extern void sha256d(unsigned char *hash, const unsigned char *data, int len)
|
||||
#if defined (__SHA__)
|
||||
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
|
||||
void sha256d(unsigned char *hash, const unsigned char *data, int len)
|
||||
{
|
||||
sph_sha256_context ctx __attribute__ ((aligned (64)));
|
||||
|
||||
sph_sha256_init( &ctx );
|
||||
sph_sha256( &ctx, data, len );
|
||||
sph_sha256_close( &ctx, hash );
|
||||
|
||||
sph_sha256_init( &ctx );
|
||||
sph_sha256( &ctx, hash, 32 );
|
||||
sph_sha256_close( &ctx, hash );
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void sha256d(unsigned char *hash, const unsigned char *data, int len)
|
||||
{
|
||||
|
||||
uint32_t S[16], T[16];
|
||||
int i, r;
|
||||
|
||||
@@ -220,6 +240,8 @@ extern void sha256d(unsigned char *hash, const unsigned char *data, int len)
|
||||
be32enc((uint32_t *)hash + i, T[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline void sha256d_preextend(uint32_t *W)
|
||||
{
|
||||
W[16] = s1(W[14]) + W[ 9] + s0(W[ 1]) + W[ 0];
|
||||
|
345
algo/sha/sha256-hash-2way-ni.c
Normal file
345
algo/sha/sha256-hash-2way-ni.c
Normal file
@@ -0,0 +1,345 @@
|
||||
/* Intel SHA extensions using C intrinsics */
|
||||
/* Written and place in public domain by Jeffrey Walton */
|
||||
/* Based on code from Intel, and by Sean Gulley for */
|
||||
/* the miTLS project. */
|
||||
|
||||
// A stripped down version with byte swapping removed.
|
||||
|
||||
#if defined(__SHA__)
|
||||
|
||||
#include "sha256-hash-opt.h"
|
||||
|
||||
void sha256_ni2way_transform( uint32_t *out_X, uint32_t*out_Y,
|
||||
const void *msg_X, const void *msg_Y,
|
||||
const uint32_t *in_X, const uint32_t *in_Y )
|
||||
{
|
||||
__m128i STATE0_X, STATE1_X, STATE0_Y, STATE1_Y;
|
||||
__m128i MSG_X, MSG_Y, TMP_X, TMP_Y;
|
||||
__m128i TMSG0_X, TMSG1_X, TMSG2_X, TMSG3_X;
|
||||
__m128i TMSG0_Y, TMSG1_Y, TMSG2_Y, TMSG3_Y;
|
||||
__m128i ABEF_SAVE_X, CDGH_SAVE_X,ABEF_SAVE_Y, CDGH_SAVE_Y;
|
||||
|
||||
// Load initial values
|
||||
TMP_X = _mm_load_si128((__m128i*) &in_X[0]);
|
||||
STATE1_X = _mm_load_si128((__m128i*) &in_X[4]);
|
||||
TMP_Y = _mm_load_si128((__m128i*) &in_Y[0]);
|
||||
STATE1_Y = _mm_load_si128((__m128i*) &in_Y[4]);
|
||||
|
||||
TMP_X = _mm_shuffle_epi32(TMP_X, 0xB1); // CDAB
|
||||
TMP_Y = _mm_shuffle_epi32(TMP_Y, 0xB1); // CDAB
|
||||
STATE1_X = _mm_shuffle_epi32(STATE1_X, 0x1B); // EFGH
|
||||
STATE1_Y = _mm_shuffle_epi32(STATE1_Y, 0x1B); // EFGH
|
||||
STATE0_X = _mm_alignr_epi8(TMP_X, STATE1_X, 8); // ABEF
|
||||
STATE0_Y = _mm_alignr_epi8(TMP_Y, STATE1_Y, 8); // ABEF
|
||||
STATE1_X = _mm_blend_epi16(STATE1_X, TMP_X, 0xF0); // CDGH
|
||||
STATE1_Y = _mm_blend_epi16(STATE1_Y, TMP_Y, 0xF0); // CDGH
|
||||
|
||||
// Save current hash
|
||||
ABEF_SAVE_X = STATE0_X;
|
||||
ABEF_SAVE_Y = STATE0_Y;
|
||||
CDGH_SAVE_X = STATE1_X;
|
||||
CDGH_SAVE_Y = STATE1_Y;
|
||||
|
||||
// Rounds 0-3
|
||||
TMSG0_X = _mm_load_si128((const __m128i*) (msg_X));
|
||||
TMSG0_Y = _mm_load_si128((const __m128i*) (msg_Y));
|
||||
TMP_X = _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL);
|
||||
MSG_X = _mm_add_epi32( TMSG0_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32( TMSG0_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
|
||||
// Rounds 4-7
|
||||
TMSG1_X = _mm_load_si128((const __m128i*) (msg_X+16));
|
||||
TMSG1_Y = _mm_load_si128((const __m128i*) (msg_Y+16));
|
||||
TMP_X = _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL);
|
||||
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG0_X = _mm_sha256msg1_epu32(TMSG0_X, TMSG1_X);
|
||||
TMSG0_Y = _mm_sha256msg1_epu32(TMSG0_Y, TMSG1_Y);
|
||||
|
||||
// Rounds 8-11
|
||||
TMSG2_X = _mm_load_si128((const __m128i*) (msg_X+32));
|
||||
TMSG2_Y = _mm_load_si128((const __m128i*) (msg_Y+32));
|
||||
TMP_X = _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG1_X = _mm_sha256msg1_epu32(TMSG1_X, TMSG2_X);
|
||||
TMSG1_Y = _mm_sha256msg1_epu32(TMSG1_Y, TMSG2_Y);
|
||||
|
||||
// Rounds 12-15
|
||||
TMSG3_X = _mm_load_si128((const __m128i*) (msg_X+48));
|
||||
TMSG3_Y = _mm_load_si128((const __m128i*) (msg_Y+48));
|
||||
TMP_X = _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG3_X, TMSG2_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG3_Y, TMSG2_Y, 4);
|
||||
TMSG0_X = _mm_add_epi32(TMSG0_X, TMP_X);
|
||||
TMSG0_Y = _mm_add_epi32(TMSG0_Y, TMP_Y);
|
||||
TMSG0_X = _mm_sha256msg2_epu32(TMSG0_X, TMSG3_X);
|
||||
TMSG0_Y = _mm_sha256msg2_epu32(TMSG0_Y, TMSG3_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG2_X = _mm_sha256msg1_epu32(TMSG2_X, TMSG3_X);
|
||||
TMSG2_Y = _mm_sha256msg1_epu32(TMSG2_Y, TMSG3_Y);
|
||||
|
||||
// Rounds 16-19
|
||||
TMP_X = _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG0_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG0_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG0_X, TMSG3_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG0_Y, TMSG3_Y, 4);
|
||||
TMSG1_X = _mm_add_epi32(TMSG1_X, TMP_X);
|
||||
TMSG1_Y = _mm_add_epi32(TMSG1_Y, TMP_Y);
|
||||
TMSG1_X = _mm_sha256msg2_epu32(TMSG1_X, TMSG0_X);
|
||||
TMSG1_Y = _mm_sha256msg2_epu32(TMSG1_Y, TMSG0_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG3_X = _mm_sha256msg1_epu32(TMSG3_X, TMSG0_X);
|
||||
TMSG3_Y = _mm_sha256msg1_epu32(TMSG3_Y, TMSG0_Y);
|
||||
|
||||
// Rounds 20-23
|
||||
TMP_X = _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL);
|
||||
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG1_X, TMSG0_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG1_Y, TMSG0_Y, 4);
|
||||
TMSG2_X = _mm_add_epi32(TMSG2_X, TMP_X);
|
||||
TMSG2_Y = _mm_add_epi32(TMSG2_Y, TMP_Y);
|
||||
TMSG2_X = _mm_sha256msg2_epu32(TMSG2_X, TMSG1_X);
|
||||
TMSG2_Y = _mm_sha256msg2_epu32(TMSG2_Y, TMSG1_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG0_X = _mm_sha256msg1_epu32(TMSG0_X, TMSG1_X);
|
||||
TMSG0_Y = _mm_sha256msg1_epu32(TMSG0_Y, TMSG1_Y);
|
||||
|
||||
// Rounds 24-27
|
||||
TMP_X = _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG2_X, TMSG1_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG2_Y, TMSG1_Y, 4);
|
||||
TMSG3_X = _mm_add_epi32(TMSG3_X, TMP_X);
|
||||
TMSG3_Y = _mm_add_epi32(TMSG3_Y, TMP_Y);
|
||||
TMSG3_X = _mm_sha256msg2_epu32(TMSG3_X, TMSG2_X);
|
||||
TMSG3_Y = _mm_sha256msg2_epu32(TMSG3_Y, TMSG2_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG1_X = _mm_sha256msg1_epu32(TMSG1_X, TMSG2_X);
|
||||
TMSG1_Y = _mm_sha256msg1_epu32(TMSG1_Y, TMSG2_Y);
|
||||
|
||||
// Rounds 28-31
|
||||
TMP_X = _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG3_X, TMSG2_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG3_Y, TMSG2_Y, 4);
|
||||
TMSG0_X = _mm_add_epi32(TMSG0_X, TMP_X);
|
||||
TMSG0_Y = _mm_add_epi32(TMSG0_Y, TMP_Y);
|
||||
TMSG0_X = _mm_sha256msg2_epu32(TMSG0_X, TMSG3_X);
|
||||
TMSG0_Y = _mm_sha256msg2_epu32(TMSG0_Y, TMSG3_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG2_X = _mm_sha256msg1_epu32(TMSG2_X, TMSG3_X);
|
||||
TMSG2_Y = _mm_sha256msg1_epu32(TMSG2_Y, TMSG3_Y);
|
||||
|
||||
// Rounds 32-35
|
||||
TMP_X = _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG0_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG0_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG0_X, TMSG3_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG0_Y, TMSG3_Y, 4);
|
||||
TMSG1_X = _mm_add_epi32(TMSG1_X, TMP_X);
|
||||
TMSG1_Y = _mm_add_epi32(TMSG1_Y, TMP_Y);
|
||||
TMSG1_X = _mm_sha256msg2_epu32(TMSG1_X, TMSG0_X);
|
||||
TMSG1_Y = _mm_sha256msg2_epu32(TMSG1_Y, TMSG0_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG3_X = _mm_sha256msg1_epu32(TMSG3_X, TMSG0_X);
|
||||
TMSG3_Y = _mm_sha256msg1_epu32(TMSG3_Y, TMSG0_Y);
|
||||
|
||||
// Rounds 36-39
|
||||
TMP_X = _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X);
|
||||
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X);
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG1_X, TMSG0_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG1_Y, TMSG0_Y, 4);
|
||||
TMSG2_X = _mm_add_epi32(TMSG2_X, TMP_X);
|
||||
TMSG2_Y = _mm_add_epi32(TMSG2_Y, TMP_Y);
|
||||
TMSG2_X = _mm_sha256msg2_epu32(TMSG2_X, TMSG1_X);
|
||||
TMSG2_Y = _mm_sha256msg2_epu32(TMSG2_Y, TMSG1_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG0_X = _mm_sha256msg1_epu32(TMSG0_X, TMSG1_X);
|
||||
TMSG0_Y = _mm_sha256msg1_epu32(TMSG0_Y, TMSG1_Y);
|
||||
|
||||
// Rounds 40-43
|
||||
TMP_X = _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X);
|
||||
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X);
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG2_X, TMSG1_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG2_Y, TMSG1_Y, 4);
|
||||
TMSG3_X = _mm_add_epi32(TMSG3_X, TMP_X);
|
||||
TMSG3_Y = _mm_add_epi32(TMSG3_Y, TMP_Y);
|
||||
TMSG3_X = _mm_sha256msg2_epu32(TMSG3_X, TMSG2_X);
|
||||
TMSG3_Y = _mm_sha256msg2_epu32(TMSG3_Y, TMSG2_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG1_X = _mm_sha256msg1_epu32(TMSG1_X, TMSG2_X);
|
||||
TMSG1_Y = _mm_sha256msg1_epu32(TMSG1_Y, TMSG2_Y);
|
||||
|
||||
// Rounds 44-47
|
||||
TMP_X = _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X);
|
||||
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X);
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG3_X, TMSG2_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG3_Y, TMSG2_Y, 4);
|
||||
TMSG0_X = _mm_add_epi32(TMSG0_X, TMP_X);
|
||||
TMSG0_Y = _mm_add_epi32(TMSG0_Y, TMP_Y);
|
||||
TMSG0_X = _mm_sha256msg2_epu32(TMSG0_X, TMSG3_X);
|
||||
TMSG0_Y = _mm_sha256msg2_epu32(TMSG0_Y, TMSG3_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG2_X = _mm_sha256msg1_epu32(TMSG2_X, TMSG3_X);
|
||||
TMSG2_Y = _mm_sha256msg1_epu32(TMSG2_Y, TMSG3_Y);
|
||||
|
||||
// Rounds 48-51
|
||||
TMP_X = _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG0_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG0_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG0_X, TMSG3_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG0_Y, TMSG3_Y, 4);
|
||||
TMSG1_X = _mm_add_epi32(TMSG1_X, TMP_X);
|
||||
TMSG1_Y = _mm_add_epi32(TMSG1_Y, TMP_Y);
|
||||
TMSG1_X = _mm_sha256msg2_epu32(TMSG1_X, TMSG0_X);
|
||||
TMSG1_Y = _mm_sha256msg2_epu32(TMSG1_Y, TMSG0_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
TMSG3_X = _mm_sha256msg1_epu32(TMSG3_X, TMSG0_X);
|
||||
TMSG3_Y = _mm_sha256msg1_epu32(TMSG3_Y, TMSG0_Y);
|
||||
|
||||
// Rounds 52-55
|
||||
TMP_X = _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL);
|
||||
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X );
|
||||
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X );
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG1_X, TMSG0_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG1_Y, TMSG0_Y, 4);
|
||||
TMSG2_X = _mm_add_epi32(TMSG2_X, TMP_X);
|
||||
TMSG2_Y = _mm_add_epi32(TMSG2_Y, TMP_Y);
|
||||
TMSG2_X = _mm_sha256msg2_epu32(TMSG2_X, TMSG1_X);
|
||||
TMSG2_Y = _mm_sha256msg2_epu32(TMSG2_Y, TMSG1_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
|
||||
// Rounds 56-59
|
||||
TMP_X = _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL);
|
||||
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X);
|
||||
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X);
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
TMP_X = _mm_alignr_epi8(TMSG2_X, TMSG1_X, 4);
|
||||
TMP_Y = _mm_alignr_epi8(TMSG2_Y, TMSG1_Y, 4);
|
||||
TMSG3_X = _mm_add_epi32(TMSG3_X, TMP_X);
|
||||
TMSG3_Y = _mm_add_epi32(TMSG3_Y, TMP_Y);
|
||||
TMSG3_X = _mm_sha256msg2_epu32(TMSG3_X, TMSG2_X);
|
||||
TMSG3_Y = _mm_sha256msg2_epu32(TMSG3_Y, TMSG2_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
|
||||
// Rounds 60-63
|
||||
TMP_X = _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL);
|
||||
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X);
|
||||
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X);
|
||||
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
|
||||
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
|
||||
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
|
||||
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
|
||||
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
|
||||
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
|
||||
|
||||
// Add values back to state
|
||||
STATE0_X = _mm_add_epi32(STATE0_X, ABEF_SAVE_X);
|
||||
STATE1_X = _mm_add_epi32(STATE1_X, CDGH_SAVE_X);
|
||||
STATE0_Y = _mm_add_epi32(STATE0_Y, ABEF_SAVE_Y);
|
||||
STATE1_Y = _mm_add_epi32(STATE1_Y, CDGH_SAVE_Y);
|
||||
|
||||
TMP_X = _mm_shuffle_epi32(STATE0_X, 0x1B); // FEBA
|
||||
TMP_Y = _mm_shuffle_epi32(STATE0_Y, 0x1B); // FEBA
|
||||
STATE1_X = _mm_shuffle_epi32(STATE1_X, 0xB1); // DCHG
|
||||
STATE1_Y = _mm_shuffle_epi32(STATE1_Y, 0xB1); // DCHG
|
||||
STATE0_X = _mm_blend_epi16(TMP_X, STATE1_X, 0xF0); // DCBA
|
||||
STATE0_Y = _mm_blend_epi16(TMP_Y, STATE1_Y, 0xF0); // DCBA
|
||||
STATE1_X = _mm_alignr_epi8(STATE1_X, TMP_X, 8); // ABEF
|
||||
STATE1_Y = _mm_alignr_epi8(STATE1_Y, TMP_Y, 8); // ABEF
|
||||
|
||||
// Save state
|
||||
_mm_store_si128((__m128i*) &out_X[0], STATE0_X);
|
||||
_mm_store_si128((__m128i*) &out_X[4], STATE1_X);
|
||||
_mm_store_si128((__m128i*) &out_Y[0], STATE0_Y);
|
||||
_mm_store_si128((__m128i*) &out_Y[4], STATE1_Y);
|
||||
}
|
||||
|
||||
#endif
|
@@ -74,9 +74,20 @@ static const uint32_t K256[64] =
|
||||
#define CHs(X, Y, Z) \
|
||||
_mm_xor_si128( _mm_and_si128( _mm_xor_si128( Y, Z ), X ), Z )
|
||||
|
||||
/*
|
||||
#define MAJs(X, Y, Z) \
|
||||
_mm_or_si128( _mm_and_si128( X, Y ), \
|
||||
_mm_and_si128( _mm_or_si128( X, Y ), Z ) )
|
||||
*/
|
||||
/*
|
||||
#define MAJs(X, Y, Z) \
|
||||
_mm_xor_si128( Y, _mm_and_si128( _mm_xor_si128( X, Y ), \
|
||||
_mm_xor_si128( Y, Z ) ) )
|
||||
*/
|
||||
|
||||
#define MAJs(X, Y, Z) \
|
||||
_mm_xor_si128( Y, _mm_and_si128( X_xor_Y = _mm_xor_si128( X, Y ), \
|
||||
Y_xor_Z ) )
|
||||
|
||||
#define BSG2_0(x) \
|
||||
_mm_xor_si128( _mm_xor_si128( \
|
||||
@@ -94,6 +105,7 @@ static const uint32_t K256[64] =
|
||||
_mm_xor_si128( _mm_xor_si128( \
|
||||
mm128_ror_32(x, 17), mm128_ror_32(x, 19) ), _mm_srli_epi32(x, 10) )
|
||||
|
||||
/*
|
||||
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
|
||||
do { \
|
||||
__m128i K = _mm_set1_epi32( K256[( (j)+(i) )] ); \
|
||||
@@ -122,9 +134,9 @@ do { \
|
||||
H = _mm_add_epi32( T1, T2 ); \
|
||||
D = _mm_add_epi32( D, T1 ); \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
|
||||
do { \
|
||||
__m128i T1, T2; \
|
||||
@@ -132,16 +144,98 @@ do { \
|
||||
T1 = _mm_add_epi32( H, mm128_add4_32( BSG2_1(E), CHs(E, F, G), \
|
||||
K, W[i] ) ); \
|
||||
T2 = _mm_add_epi32( BSG2_0(A), MAJs(A, B, C) ); \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = _mm_add_epi32( D, T1 ); \
|
||||
H = _mm_add_epi32( T1, T2 ); \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
|
||||
void sha256_4way_transform( __m128i *state_out, const __m128i *data,
|
||||
const __m128i *state_in )
|
||||
{
|
||||
__m128i A, B, C, D, E, F, G, H, X_xor_Y, Y_xor_Z;
|
||||
__m128i W[16];
|
||||
|
||||
memcpy_128( W, data, 16 );
|
||||
|
||||
A = state_in[0];
|
||||
B = state_in[1];
|
||||
C = state_in[2];
|
||||
D = state_in[3];
|
||||
E = state_in[4];
|
||||
F = state_in[5];
|
||||
G = state_in[6];
|
||||
H = state_in[7];
|
||||
Y_xor_Z = _mm_xor_si128( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
|
||||
for ( int j = 16; j < 64; j += 16 )
|
||||
{
|
||||
W[ 0] = SHA2s_MEXP( 14, 9, 1, 0 );
|
||||
W[ 1] = SHA2s_MEXP( 15, 10, 2, 1 );
|
||||
W[ 2] = SHA2s_MEXP( 0, 11, 3, 2 );
|
||||
W[ 3] = SHA2s_MEXP( 1, 12, 4, 3 );
|
||||
W[ 4] = SHA2s_MEXP( 2, 13, 5, 4 );
|
||||
W[ 5] = SHA2s_MEXP( 3, 14, 6, 5 );
|
||||
W[ 6] = SHA2s_MEXP( 4, 15, 7, 6 );
|
||||
W[ 7] = SHA2s_MEXP( 5, 0, 8, 7 );
|
||||
W[ 8] = SHA2s_MEXP( 6, 1, 9, 8 );
|
||||
W[ 9] = SHA2s_MEXP( 7, 2, 10, 9 );
|
||||
W[10] = SHA2s_MEXP( 8, 3, 11, 10 );
|
||||
W[11] = SHA2s_MEXP( 9, 4, 12, 11 );
|
||||
W[12] = SHA2s_MEXP( 10, 5, 13, 12 );
|
||||
W[13] = SHA2s_MEXP( 11, 6, 14, 13 );
|
||||
W[14] = SHA2s_MEXP( 12, 7, 15, 14 );
|
||||
W[15] = SHA2s_MEXP( 13, 8, 0, 15 );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
|
||||
}
|
||||
|
||||
state_out[0] = _mm_add_epi32( state_in[0], A );
|
||||
state_out[1] = _mm_add_epi32( state_in[1], B );
|
||||
state_out[2] = _mm_add_epi32( state_in[2], C );
|
||||
state_out[3] = _mm_add_epi32( state_in[3], D );
|
||||
state_out[4] = _mm_add_epi32( state_in[4], E );
|
||||
state_out[5] = _mm_add_epi32( state_in[5], F );
|
||||
state_out[6] = _mm_add_epi32( state_in[6], G );
|
||||
state_out[7] = _mm_add_epi32( state_in[7], H );
|
||||
}
|
||||
|
||||
static void
|
||||
sha256_4way_round( sha256_4way_context *ctx, __m128i *in, __m128i r[8] )
|
||||
{
|
||||
register __m128i A, B, C, D, E, F, G, H;
|
||||
register __m128i A, B, C, D, E, F, G, H, X_xor_Y, Y_xor_Z;
|
||||
__m128i W[16];
|
||||
|
||||
mm128_block_bswap_32( W, in );
|
||||
@@ -170,6 +264,8 @@ sha256_4way_round( sha256_4way_context *ctx, __m128i *in, __m128i r[8] )
|
||||
H = m128_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
}
|
||||
|
||||
Y_xor_Z = _mm_xor_si128( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
@@ -321,10 +417,8 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
high = (sc->count_high << 3) | (low >> 29);
|
||||
low = low << 3;
|
||||
|
||||
sc->buf[ pad >> 2 ] =
|
||||
mm128_bswap_32( m128_const1_32( high ) );
|
||||
sc->buf[ ( pad+4 ) >> 2 ] =
|
||||
mm128_bswap_32( m128_const1_32( low ) );
|
||||
sc->buf[ pad >> 2 ] = m128_const1_32( bswap_32( high ) );
|
||||
sc->buf[( pad+4 ) >> 2 ] = m128_const1_32( bswap_32( low ) );
|
||||
sha256_4way_round( sc, sc->buf, sc->val );
|
||||
|
||||
mm128_block_bswap_32( dst, sc->val );
|
||||
@@ -342,12 +436,39 @@ void sha256_4way_full( void *dst, const void *data, size_t len )
|
||||
|
||||
// SHA-256 8 way
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
#define CHx(X, Y, Z) \
|
||||
_mm256_ternarylogic_epi32( X, Y, Z, 0xca )
|
||||
|
||||
#define MAJx(X, Y, Z) \
|
||||
_mm256_ternarylogic_epi32( X, Y, Z, 0xe8 )
|
||||
|
||||
#define BSG2_0x(x) \
|
||||
mm256_xor3( mm256_ror_32(x, 2), mm256_ror_32(x, 13), mm256_ror_32(x, 22) )
|
||||
|
||||
#define BSG2_1x(x) \
|
||||
mm256_xor3( mm256_ror_32(x, 6), mm256_ror_32(x, 11), mm256_ror_32(x, 25) )
|
||||
|
||||
#define SSG2_0x(x) \
|
||||
mm256_xor3( mm256_ror_32(x, 7), mm256_ror_32(x, 18), _mm256_srli_epi32(x, 3) )
|
||||
|
||||
#define SSG2_1x(x) \
|
||||
mm256_xor3( mm256_ror_32(x, 17), mm256_ror_32(x, 19), _mm256_srli_epi32(x, 10) )
|
||||
|
||||
#else // AVX2
|
||||
|
||||
#define CHx(X, Y, Z) \
|
||||
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( Y, Z ), X ), Z )
|
||||
|
||||
#define MAJx(X, Y, Z) \
|
||||
_mm256_or_si256( _mm256_and_si256( X, Y ), \
|
||||
_mm256_and_si256( _mm256_or_si256( X, Y ), Z ) )
|
||||
_mm256_xor_si256( Y, _mm256_and_si256( _mm256_xor_si256( X, Y ), \
|
||||
_mm256_xor_si256( Y, Z ) ) )
|
||||
/*
|
||||
#define MAJx(X, Y, Z) \
|
||||
_mm256_xor_si256( Y, _mm256_and_si256( X_xor_Y = _mm256_xor_si256( X, Y ), \
|
||||
Y_xor_Z ) )
|
||||
*/
|
||||
|
||||
#define BSG2_0x(x) \
|
||||
_mm256_xor_si256( _mm256_xor_si256( \
|
||||
@@ -365,6 +486,8 @@ void sha256_4way_full( void *dst, const void *data, size_t len )
|
||||
_mm256_xor_si256( _mm256_xor_si256( \
|
||||
mm256_ror_32(x, 17), mm256_ror_32(x, 19) ), _mm256_srli_epi32(x, 10) )
|
||||
|
||||
#endif // AVX512 else AVX2
|
||||
|
||||
#define SHA2x_MEXP( a, b, c, d ) \
|
||||
mm256_add4_32( SSG2_1x( W[a] ), W[b], SSG2_0x( W[c] ), W[d] );
|
||||
|
||||
@@ -379,8 +502,89 @@ do { \
|
||||
H = _mm256_add_epi32( T1, T2 ); \
|
||||
} while (0)
|
||||
|
||||
void sha256_8way_transform( __m256i *state_out, const __m256i *data,
|
||||
const __m256i *state_in )
|
||||
{
|
||||
__m256i A, B, C, D, E, F, G, H;
|
||||
__m256i W[16];
|
||||
|
||||
memcpy_256( W, data, 16 );
|
||||
|
||||
A = state_in[0];
|
||||
B = state_in[1];
|
||||
C = state_in[2];
|
||||
D = state_in[3];
|
||||
E = state_in[4];
|
||||
F = state_in[5];
|
||||
G = state_in[6];
|
||||
H = state_in[7];
|
||||
|
||||
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
|
||||
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
|
||||
for ( int j = 16; j < 64; j += 16 )
|
||||
{
|
||||
W[ 0] = SHA2x_MEXP( 14, 9, 1, 0 );
|
||||
W[ 1] = SHA2x_MEXP( 15, 10, 2, 1 );
|
||||
W[ 2] = SHA2x_MEXP( 0, 11, 3, 2 );
|
||||
W[ 3] = SHA2x_MEXP( 1, 12, 4, 3 );
|
||||
W[ 4] = SHA2x_MEXP( 2, 13, 5, 4 );
|
||||
W[ 5] = SHA2x_MEXP( 3, 14, 6, 5 );
|
||||
W[ 6] = SHA2x_MEXP( 4, 15, 7, 6 );
|
||||
W[ 7] = SHA2x_MEXP( 5, 0, 8, 7 );
|
||||
W[ 8] = SHA2x_MEXP( 6, 1, 9, 8 );
|
||||
W[ 9] = SHA2x_MEXP( 7, 2, 10, 9 );
|
||||
W[10] = SHA2x_MEXP( 8, 3, 11, 10 );
|
||||
W[11] = SHA2x_MEXP( 9, 4, 12, 11 );
|
||||
W[12] = SHA2x_MEXP( 10, 5, 13, 12 );
|
||||
W[13] = SHA2x_MEXP( 11, 6, 14, 13 );
|
||||
W[14] = SHA2x_MEXP( 12, 7, 15, 14 );
|
||||
W[15] = SHA2x_MEXP( 13, 8, 0, 15 );
|
||||
|
||||
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
|
||||
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
|
||||
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
|
||||
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
|
||||
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
|
||||
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
|
||||
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
|
||||
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
|
||||
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
|
||||
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
|
||||
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
|
||||
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
|
||||
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
|
||||
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
|
||||
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
|
||||
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
|
||||
}
|
||||
|
||||
state_out[0] = _mm256_add_epi32( state_in[0], A );
|
||||
state_out[1] = _mm256_add_epi32( state_in[1], B );
|
||||
state_out[2] = _mm256_add_epi32( state_in[2], C );
|
||||
state_out[3] = _mm256_add_epi32( state_in[3], D );
|
||||
state_out[4] = _mm256_add_epi32( state_in[4], E );
|
||||
state_out[5] = _mm256_add_epi32( state_in[5], F );
|
||||
state_out[6] = _mm256_add_epi32( state_in[6], G );
|
||||
state_out[7] = _mm256_add_epi32( state_in[7], H );
|
||||
}
|
||||
|
||||
static void
|
||||
sha256_8way_round( sha256_8way_context *ctx, __m256i *in, __m256i r[8] )
|
||||
sha256_8way_round( sha256_8way_context *ctx, __m256i *in, __m256i r[8] )
|
||||
{
|
||||
register __m256i A, B, C, D, E, F, G, H;
|
||||
__m256i W[16];
|
||||
@@ -566,10 +770,8 @@ void sha256_8way_close( sha256_8way_context *sc, void *dst )
|
||||
high = (sc->count_high << 3) | (low >> 29);
|
||||
low = low << 3;
|
||||
|
||||
sc->buf[ pad >> 2 ] =
|
||||
mm256_bswap_32( m256_const1_32( high ) );
|
||||
sc->buf[ ( pad+4 ) >> 2 ] =
|
||||
mm256_bswap_32( m256_const1_32( low ) );
|
||||
sc->buf[ pad >> 2 ] = m256_const1_32( bswap_32( high ) );
|
||||
sc->buf[ ( pad+4 ) >> 2 ] = m256_const1_32( bswap_32( low ) );
|
||||
|
||||
sha256_8way_round( sc, sc->buf, sc->val );
|
||||
|
||||
@@ -589,27 +791,22 @@ void sha256_8way_full( void *dst, const void *data, size_t len )
|
||||
// SHA-256 16 way
|
||||
|
||||
#define CHx16(X, Y, Z) \
|
||||
_mm512_xor_si512( _mm512_and_si512( _mm512_xor_si512( Y, Z ), X ), Z )
|
||||
_mm512_ternarylogic_epi32( X, Y, Z, 0xca )
|
||||
|
||||
#define MAJx16(X, Y, Z) \
|
||||
_mm512_or_si512( _mm512_and_si512( X, Y ), \
|
||||
_mm512_and_si512( _mm512_or_si512( X, Y ), Z ) )
|
||||
_mm512_ternarylogic_epi32( X, Y, Z, 0xe8 )
|
||||
|
||||
#define BSG2_0x16(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_32(x, 2), mm512_ror_32(x, 13) ), mm512_ror_32( x, 22) )
|
||||
mm512_xor3( mm512_ror_32(x, 2), mm512_ror_32(x, 13), mm512_ror_32(x, 22) )
|
||||
|
||||
#define BSG2_1x16(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_32(x, 6), mm512_ror_32(x, 11) ), mm512_ror_32( x, 25) )
|
||||
mm512_xor3( mm512_ror_32(x, 6), mm512_ror_32(x, 11), mm512_ror_32(x, 25) )
|
||||
|
||||
#define SSG2_0x16(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_32(x, 7), mm512_ror_32(x, 18) ), _mm512_srli_epi32(x, 3) )
|
||||
mm512_xor3( mm512_ror_32(x, 7), mm512_ror_32(x, 18), _mm512_srli_epi32(x, 3) )
|
||||
|
||||
#define SSG2_1x16(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_32(x, 17), mm512_ror_32(x, 19) ), _mm512_srli_epi32(x, 10) )
|
||||
mm512_xor3( mm512_ror_32(x, 17), mm512_ror_32(x, 19), _mm512_srli_epi32(x, 10) )
|
||||
|
||||
#define SHA2x16_MEXP( a, b, c, d ) \
|
||||
mm512_add4_32( SSG2_1x16( W[a] ), W[b], SSG2_0x16( W[c] ), W[d] );
|
||||
@@ -625,10 +822,216 @@ do { \
|
||||
H = _mm512_add_epi32( T1, T2 ); \
|
||||
} while (0)
|
||||
|
||||
// Tranform one 16 lane by 64 byte message block and update state.
|
||||
// Calling function is responsible for initializing the state, setting
|
||||
// correct byte order, counting bits and padding of the final block.
|
||||
// It's faster for multiple rounds of sha256 (sha256d/t/q) by eliminating
|
||||
// redundant byte swapping.
|
||||
//
|
||||
void sha256_16way_transform( __m512i *state_out, const __m512i *data,
|
||||
const __m512i *state_in )
|
||||
{
|
||||
__m512i A, B, C, D, E, F, G, H;
|
||||
__m512i W[16];
|
||||
|
||||
memcpy_512( W, data, 16 );
|
||||
|
||||
A = state_in[0];
|
||||
B = state_in[1];
|
||||
C = state_in[2];
|
||||
D = state_in[3];
|
||||
E = state_in[4];
|
||||
F = state_in[5];
|
||||
G = state_in[6];
|
||||
H = state_in[7];
|
||||
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
|
||||
for ( int j = 16; j < 64; j += 16 )
|
||||
{
|
||||
W[ 0] = SHA2x16_MEXP( 14, 9, 1, 0 );
|
||||
W[ 1] = SHA2x16_MEXP( 15, 10, 2, 1 );
|
||||
W[ 2] = SHA2x16_MEXP( 0, 11, 3, 2 );
|
||||
W[ 3] = SHA2x16_MEXP( 1, 12, 4, 3 );
|
||||
W[ 4] = SHA2x16_MEXP( 2, 13, 5, 4 );
|
||||
W[ 5] = SHA2x16_MEXP( 3, 14, 6, 5 );
|
||||
W[ 6] = SHA2x16_MEXP( 4, 15, 7, 6 );
|
||||
W[ 7] = SHA2x16_MEXP( 5, 0, 8, 7 );
|
||||
W[ 8] = SHA2x16_MEXP( 6, 1, 9, 8 );
|
||||
W[ 9] = SHA2x16_MEXP( 7, 2, 10, 9 );
|
||||
W[10] = SHA2x16_MEXP( 8, 3, 11, 10 );
|
||||
W[11] = SHA2x16_MEXP( 9, 4, 12, 11 );
|
||||
W[12] = SHA2x16_MEXP( 10, 5, 13, 12 );
|
||||
W[13] = SHA2x16_MEXP( 11, 6, 14, 13 );
|
||||
W[14] = SHA2x16_MEXP( 12, 7, 15, 14 );
|
||||
W[15] = SHA2x16_MEXP( 13, 8, 0, 15 );
|
||||
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
|
||||
}
|
||||
|
||||
state_out[0] = _mm512_add_epi32( state_in[0], A );
|
||||
state_out[1] = _mm512_add_epi32( state_in[1], B );
|
||||
state_out[2] = _mm512_add_epi32( state_in[2], C );
|
||||
state_out[3] = _mm512_add_epi32( state_in[3], D );
|
||||
state_out[4] = _mm512_add_epi32( state_in[4], E );
|
||||
state_out[5] = _mm512_add_epi32( state_in[5], F );
|
||||
state_out[6] = _mm512_add_epi32( state_in[6], G );
|
||||
state_out[7] = _mm512_add_epi32( state_in[7], H );
|
||||
}
|
||||
|
||||
// Aggresive prehashing
|
||||
void sha256_16way_prehash_3rounds( __m512i *state_mid, const __m512i *W,
|
||||
const __m512i *state_in )
|
||||
{
|
||||
__m512i A, B, C, D, E, F, G, H;
|
||||
|
||||
A = _mm512_load_si512( state_in );
|
||||
B = _mm512_load_si512( state_in + 1 );
|
||||
C = _mm512_load_si512( state_in + 2 );
|
||||
D = _mm512_load_si512( state_in + 3 );
|
||||
E = _mm512_load_si512( state_in + 4 );
|
||||
F = _mm512_load_si512( state_in + 5 );
|
||||
G = _mm512_load_si512( state_in + 6 );
|
||||
H = _mm512_load_si512( state_in + 7 );
|
||||
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
|
||||
_mm512_store_si512( state_mid , A );
|
||||
_mm512_store_si512( state_mid + 1, B );
|
||||
_mm512_store_si512( state_mid + 2, C );
|
||||
_mm512_store_si512( state_mid + 3, D );
|
||||
_mm512_store_si512( state_mid + 4, E );
|
||||
_mm512_store_si512( state_mid + 5, F );
|
||||
_mm512_store_si512( state_mid + 6, G );
|
||||
_mm512_store_si512( state_mid + 7, H );
|
||||
}
|
||||
|
||||
void sha256_16way_final_rounds( __m512i *state_out, const __m512i *data,
|
||||
const __m512i *state_in, const __m512i *state_mid )
|
||||
{
|
||||
__m512i A, B, C, D, E, F, G, H;
|
||||
__m512i W[16];
|
||||
|
||||
memcpy_512( W, data, 16 );
|
||||
|
||||
A = _mm512_load_si512( state_mid );
|
||||
B = _mm512_load_si512( state_mid + 1 );
|
||||
C = _mm512_load_si512( state_mid + 2 );
|
||||
D = _mm512_load_si512( state_mid + 3 );
|
||||
E = _mm512_load_si512( state_mid + 4 );
|
||||
F = _mm512_load_si512( state_mid + 5 );
|
||||
G = _mm512_load_si512( state_mid + 6 );
|
||||
H = _mm512_load_si512( state_mid + 7 );
|
||||
|
||||
// SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
// SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
// SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
|
||||
for ( int j = 16; j < 64; j += 16 )
|
||||
{
|
||||
W[ 0] = SHA2x16_MEXP( 14, 9, 1, 0 );
|
||||
W[ 1] = SHA2x16_MEXP( 15, 10, 2, 1 );
|
||||
W[ 2] = SHA2x16_MEXP( 0, 11, 3, 2 );
|
||||
W[ 3] = SHA2x16_MEXP( 1, 12, 4, 3 );
|
||||
W[ 4] = SHA2x16_MEXP( 2, 13, 5, 4 );
|
||||
W[ 5] = SHA2x16_MEXP( 3, 14, 6, 5 );
|
||||
W[ 6] = SHA2x16_MEXP( 4, 15, 7, 6 );
|
||||
W[ 7] = SHA2x16_MEXP( 5, 0, 8, 7 );
|
||||
W[ 8] = SHA2x16_MEXP( 6, 1, 9, 8 );
|
||||
W[ 9] = SHA2x16_MEXP( 7, 2, 10, 9 );
|
||||
W[10] = SHA2x16_MEXP( 8, 3, 11, 10 );
|
||||
W[11] = SHA2x16_MEXP( 9, 4, 12, 11 );
|
||||
W[12] = SHA2x16_MEXP( 10, 5, 13, 12 );
|
||||
W[13] = SHA2x16_MEXP( 11, 6, 14, 13 );
|
||||
W[14] = SHA2x16_MEXP( 12, 7, 15, 14 );
|
||||
W[15] = SHA2x16_MEXP( 13, 8, 0, 15 );
|
||||
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
|
||||
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
|
||||
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
|
||||
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
|
||||
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
|
||||
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
|
||||
}
|
||||
|
||||
A = _mm512_add_epi32( A, _mm512_load_si512( state_in ) );
|
||||
B = _mm512_add_epi32( B, _mm512_load_si512( state_in + 1 ) );
|
||||
C = _mm512_add_epi32( C, _mm512_load_si512( state_in + 2 ) );
|
||||
D = _mm512_add_epi32( D, _mm512_load_si512( state_in + 3 ) );
|
||||
E = _mm512_add_epi32( E, _mm512_load_si512( state_in + 4 ) );
|
||||
F = _mm512_add_epi32( F, _mm512_load_si512( state_in + 5 ) );
|
||||
G = _mm512_add_epi32( G, _mm512_load_si512( state_in + 6 ) );
|
||||
H = _mm512_add_epi32( H, _mm512_load_si512( state_in + 7 ) );
|
||||
|
||||
_mm512_store_si512( state_out , A );
|
||||
_mm512_store_si512( state_out + 1, B );
|
||||
_mm512_store_si512( state_out + 2, C );
|
||||
_mm512_store_si512( state_out + 3, D );
|
||||
_mm512_store_si512( state_out + 4, E );
|
||||
_mm512_store_si512( state_out + 5, F );
|
||||
_mm512_store_si512( state_out + 6, G );
|
||||
_mm512_store_si512( state_out + 7, H );
|
||||
}
|
||||
|
||||
static void
|
||||
sha256_16way_round( sha256_16way_context *ctx, __m512i *in, __m512i r[8] )
|
||||
{
|
||||
register __m512i A, B, C, D, E, F, G, H;
|
||||
register __m512i A, B, C, D, E, F, G, H;
|
||||
__m512i W[16];
|
||||
|
||||
mm512_block_bswap_32( W , in );
|
||||
@@ -657,6 +1060,7 @@ sha256_16way_round( sha256_16way_context *ctx, __m512i *in, __m512i r[8] )
|
||||
H = m512_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
}
|
||||
|
||||
|
||||
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
@@ -800,10 +1204,8 @@ void sha256_16way_close( sha256_16way_context *sc, void *dst )
|
||||
high = (sc->count_high << 3) | (low >> 29);
|
||||
low = low << 3;
|
||||
|
||||
sc->buf[ pad >> 2 ] =
|
||||
mm512_bswap_32( m512_const1_32( high ) );
|
||||
sc->buf[ ( pad+4 ) >> 2 ] =
|
||||
mm512_bswap_32( m512_const1_32( low ) );
|
||||
sc->buf[ pad >> 2 ] = m512_const1_32( bswap_32( high ) );
|
||||
sc->buf[ ( pad+4 ) >> 2 ] = m512_const1_32( bswap_32( low ) );
|
||||
|
||||
sha256_16way_round( sc, sc->buf, sc->val );
|
||||
|
||||
|
@@ -3,23 +3,24 @@
|
||||
/* Based on code from Intel, and by Sean Gulley for */
|
||||
/* the miTLS project. */
|
||||
|
||||
// A drop in replacement for the function of the same name in sph_sha2.c.
|
||||
// A stripped down version with byte swapping removed.
|
||||
|
||||
#if defined(__SHA__)
|
||||
|
||||
#include "simd-utils.h"
|
||||
#include "sha256-hash-opt.h"
|
||||
|
||||
static void sha2_round( const uint8_t input[], uint32_t state[8] )
|
||||
void sha256_opt_transform( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in )
|
||||
{
|
||||
__m128i STATE0, STATE1;
|
||||
__m128i MSG, TMP, MASK;
|
||||
__m128i MSG, TMP;
|
||||
__m128i TMSG0, TMSG1, TMSG2, TMSG3;
|
||||
__m128i ABEF_SAVE, CDGH_SAVE;
|
||||
|
||||
// Load initial values
|
||||
TMP = _mm_load_si128((__m128i*) &state[0]);
|
||||
STATE1 = _mm_load_si128((__m128i*) &state[4]);
|
||||
MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
|
||||
TMP = _mm_load_si128((__m128i*) &state_in[0]);
|
||||
STATE1 = _mm_load_si128((__m128i*) &state_in[4]);
|
||||
// MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
|
||||
|
||||
TMP = _mm_shuffle_epi32(TMP, 0xB1); // CDAB
|
||||
STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH
|
||||
@@ -31,8 +32,8 @@ static void sha2_round( const uint8_t input[], uint32_t state[8] )
|
||||
CDGH_SAVE = STATE1;
|
||||
|
||||
// Rounds 0-3
|
||||
MSG = _mm_load_si128((const __m128i*) (input+0));
|
||||
TMSG0 = _mm_shuffle_epi8(MSG, MASK);
|
||||
TMSG0 = _mm_load_si128((const __m128i*) (input+0));
|
||||
// TMSG0 = _mm_shuffle_epi8(MSG, MASK);
|
||||
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
@@ -40,7 +41,7 @@ static void sha2_round( const uint8_t input[], uint32_t state[8] )
|
||||
|
||||
// Rounds 4-7
|
||||
TMSG1 = _mm_load_si128((const __m128i*) (input+16));
|
||||
TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
|
||||
// TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
|
||||
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
@@ -49,7 +50,7 @@ static void sha2_round( const uint8_t input[], uint32_t state[8] )
|
||||
|
||||
// Rounds 8-11
|
||||
TMSG2 = _mm_load_si128((const __m128i*) (input+32));
|
||||
TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
|
||||
// TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
|
||||
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
@@ -58,7 +59,7 @@ static void sha2_round( const uint8_t input[], uint32_t state[8] )
|
||||
|
||||
// Rounds 12-15
|
||||
TMSG3 = _mm_load_si128((const __m128i*) (input+48));
|
||||
TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
|
||||
// TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
|
||||
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
|
||||
@@ -192,9 +193,8 @@ static void sha2_round( const uint8_t input[], uint32_t state[8] )
|
||||
STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); // ABEF
|
||||
|
||||
// Save state
|
||||
_mm_store_si128((__m128i*) &state[0], STATE0);
|
||||
_mm_store_si128((__m128i*) &state[4], STATE1);
|
||||
_mm_store_si128((__m128i*) &state_out[0], STATE0);
|
||||
_mm_store_si128((__m128i*) &state_out[4], STATE1);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
18
algo/sha/sha256-hash-opt.h
Normal file
18
algo/sha/sha256-hash-opt.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef SHA2_HASH_OPT_H__
|
||||
#define SHA2_HASH_OPT_H__ 1
|
||||
|
||||
#include <stddef.h>
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__SHA__)
|
||||
|
||||
void sha256_opt_transform( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in );
|
||||
|
||||
// 2 way with interleaved instructions
|
||||
void sha256_ni2way_transform( uint32_t *out_X, uint32_t*out_Y,
|
||||
const void *msg_X, const void *msg_Y,
|
||||
const uint32_t *in_X, const uint32_t *in_Y );
|
||||
|
||||
#endif
|
||||
#endif
|
252
algo/sha/sha256d-4way.c
Normal file
252
algo/sha/sha256d-4way.c
Normal file
@@ -0,0 +1,252 @@
|
||||
#include "sha256t-gate.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "sha-hash-4way.h"
|
||||
|
||||
#if defined(SHA256D_16WAY)
|
||||
|
||||
int scanhash_sha256d_16way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m512i block[16] __attribute__ ((aligned (64)));
|
||||
__m512i hash32[8] __attribute__ ((aligned (32)));
|
||||
__m512i initstate[8] __attribute__ ((aligned (32)));
|
||||
__m512i midstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
__m512i vdata[20] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 16;
|
||||
uint32_t n = first_nonce;
|
||||
__m512i *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const __m512i last_byte = m512_const1_32( 0x80000000 );
|
||||
const __m512i sixteen = m512_const1_32( 16 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = m512_const1_32( pdata[i] );
|
||||
|
||||
*noncev = _mm512_set_epi32( n+15, n+14, n+13, n+12, n+11, n+10, n+9, n+8,
|
||||
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
|
||||
|
||||
// initialize state
|
||||
initstate[0] = m512_const1_64( 0x6A09E6676A09E667 );
|
||||
initstate[1] = m512_const1_64( 0xBB67AE85BB67AE85 );
|
||||
initstate[2] = m512_const1_64( 0x3C6EF3723C6EF372 );
|
||||
initstate[3] = m512_const1_64( 0xA54FF53AA54FF53A );
|
||||
initstate[4] = m512_const1_64( 0x510E527F510E527F );
|
||||
initstate[5] = m512_const1_64( 0x9B05688C9B05688C );
|
||||
initstate[6] = m512_const1_64( 0x1F83D9AB1F83D9AB );
|
||||
initstate[7] = m512_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_16way_transform( midstate, vdata, initstate );
|
||||
|
||||
do
|
||||
{
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy_512( block, vdata + 16, 4 );
|
||||
block[ 4] = last_byte;
|
||||
memset_zero_512( block + 5, 10 );
|
||||
block[15] = m512_const1_32( 80*8 ); // bit count
|
||||
sha256_16way_transform( hash32, block, midstate );
|
||||
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy_512( block, hash32, 8 );
|
||||
block[ 8] = last_byte;
|
||||
memset_zero_512( block + 9, 6 );
|
||||
block[15] = m512_const1_32( 32*8 ); // bit count
|
||||
sha256_16way_transform( hash32, block, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
mm512_block_bswap_32( hash32, hash32 );
|
||||
|
||||
for ( int lane = 0; lane < 16; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_16x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev, sixteen );
|
||||
n += 16;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(SHA256D_8WAY)
|
||||
|
||||
int scanhash_sha256d_8way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m256i block[16] __attribute__ ((aligned (64)));
|
||||
__m256i hash32[8] __attribute__ ((aligned (32)));
|
||||
__m256i initstate[8] __attribute__ ((aligned (32)));
|
||||
__m256i midstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
__m256i vdata[20] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 8;
|
||||
uint32_t n = first_nonce;
|
||||
__m256i *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const __m256i last_byte = m256_const1_32( 0x80000000 );
|
||||
const __m256i eight = m256_const1_32( 8 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = m256_const1_32( pdata[i] );
|
||||
|
||||
*noncev = _mm256_set_epi32( n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
|
||||
|
||||
// initialize state
|
||||
initstate[0] = m256_const1_64( 0x6A09E6676A09E667 );
|
||||
initstate[1] = m256_const1_64( 0xBB67AE85BB67AE85 );
|
||||
initstate[2] = m256_const1_64( 0x3C6EF3723C6EF372 );
|
||||
initstate[3] = m256_const1_64( 0xA54FF53AA54FF53A );
|
||||
initstate[4] = m256_const1_64( 0x510E527F510E527F );
|
||||
initstate[5] = m256_const1_64( 0x9B05688C9B05688C );
|
||||
initstate[6] = m256_const1_64( 0x1F83D9AB1F83D9AB );
|
||||
initstate[7] = m256_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_8way_transform( midstate, vdata, initstate );
|
||||
|
||||
do
|
||||
{
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy_256( block, vdata + 16, 4 );
|
||||
block[ 4] = last_byte;
|
||||
memset_zero_256( block + 5, 10 );
|
||||
block[15] = m256_const1_32( 80*8 ); // bit count
|
||||
sha256_8way_transform( hash32, block, midstate );
|
||||
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy_256( block, hash32, 8 );
|
||||
block[ 8] = last_byte;
|
||||
memset_zero_256( block + 9, 6 );
|
||||
block[15] = m256_const1_32( 32*8 ); // bit count
|
||||
sha256_8way_transform( hash32, block, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
mm256_block_bswap_32( hash32, hash32 );
|
||||
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_8x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev, eight );
|
||||
n += 8;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(SHA256D_4WAY)
|
||||
|
||||
int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m128i block[16] __attribute__ ((aligned (64)));
|
||||
__m128i hash32[8] __attribute__ ((aligned (32)));
|
||||
__m128i initstate[8] __attribute__ ((aligned (32)));
|
||||
__m128i midstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
__m128i vdata[20] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
__m128i *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const __m128i last_byte = m128_const1_32( 0x80000000 );
|
||||
const __m128i four = m128_const1_32( 4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = m128_const1_32( pdata[i] );
|
||||
|
||||
*noncev = _mm_set_epi32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
// initialize state
|
||||
initstate[0] = m128_const1_64( 0x6A09E6676A09E667 );
|
||||
initstate[1] = m128_const1_64( 0xBB67AE85BB67AE85 );
|
||||
initstate[2] = m128_const1_64( 0x3C6EF3723C6EF372 );
|
||||
initstate[3] = m128_const1_64( 0xA54FF53AA54FF53A );
|
||||
initstate[4] = m128_const1_64( 0x510E527F510E527F );
|
||||
initstate[5] = m128_const1_64( 0x9B05688C9B05688C );
|
||||
initstate[6] = m128_const1_64( 0x1F83D9AB1F83D9AB );
|
||||
initstate[7] = m128_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform( midstate, vdata, initstate );
|
||||
|
||||
do
|
||||
{
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy_128( block, vdata + 16, 4 );
|
||||
block[ 4] = last_byte;
|
||||
memset_zero_128( block + 5, 10 );
|
||||
block[15] = m128_const1_32( 80*8 ); // bit count
|
||||
sha256_4way_transform( hash32, block, midstate );
|
||||
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy_128( block, hash32, 8 );
|
||||
block[ 8] = last_byte;
|
||||
memset_zero_128( block + 9, 6 );
|
||||
block[15] = m128_const1_32( 32*8 ); // bit count
|
||||
sha256_4way_transform( hash32, block, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
mm128_block_bswap_32( hash32, hash32 );
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm_add_epi32( *noncev, four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -7,133 +7,173 @@
|
||||
|
||||
#if defined(SHA256T_16WAY)
|
||||
|
||||
static __thread sha256_16way_context sha256_ctx16 __attribute__ ((aligned (64)));
|
||||
|
||||
void sha256t_16way_hash( void* output, const void* input )
|
||||
{
|
||||
uint32_t vhash[8*16] __attribute__ ((aligned (64)));
|
||||
sha256_16way_context ctx;
|
||||
memcpy( &ctx, &sha256_ctx16, sizeof ctx );
|
||||
|
||||
sha256_16way_update( &ctx, input + (64<<4), 16 );
|
||||
sha256_16way_close( &ctx, vhash );
|
||||
|
||||
sha256_16way_init( &ctx );
|
||||
sha256_16way_update( &ctx, vhash, 32 );
|
||||
sha256_16way_close( &ctx, vhash );
|
||||
|
||||
sha256_16way_init( &ctx );
|
||||
sha256_16way_update( &ctx, vhash, 32 );
|
||||
sha256_16way_close( &ctx, output );
|
||||
}
|
||||
|
||||
int scanhash_sha256t_16way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t vdata[20*16] __attribute__ ((aligned (64)));
|
||||
uint32_t hash32[8*16] __attribute__ ((aligned (32)));
|
||||
__m512i block[16] __attribute__ ((aligned (64)));
|
||||
__m512i hash32[8] __attribute__ ((aligned (32)));
|
||||
__m512i initstate[8] __attribute__ ((aligned (32)));
|
||||
__m512i midstate[8] __attribute__ ((aligned (32)));
|
||||
__m512i midstate2[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = &(hash32[7<<4]);
|
||||
__m512i vdata[20] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 16;
|
||||
uint32_t n = first_nonce;
|
||||
__m512i *noncev = (__m512i*)vdata + 19; // aligned
|
||||
__m512i *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const __m512i last_byte = m512_const1_32( 0x80000000 );
|
||||
const __m512i sixteen = m512_const1_32( 16 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = m512_const1_32( pdata[i] );
|
||||
|
||||
mm512_bswap32_intrlv80_16x32( vdata, pdata );
|
||||
*noncev = _mm512_set_epi32( n+15, n+14, n+13, n+12, n+11, n+10, n+9, n+8,
|
||||
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
|
||||
sha256_16way_init( &sha256_ctx16 );
|
||||
sha256_16way_update( &sha256_ctx16, vdata, 64 );
|
||||
|
||||
// initialize state
|
||||
initstate[0] = m512_const1_64( 0x6A09E6676A09E667 );
|
||||
initstate[1] = m512_const1_64( 0xBB67AE85BB67AE85 );
|
||||
initstate[2] = m512_const1_64( 0x3C6EF3723C6EF372 );
|
||||
initstate[3] = m512_const1_64( 0xA54FF53AA54FF53A );
|
||||
initstate[4] = m512_const1_64( 0x510E527F510E527F );
|
||||
initstate[5] = m512_const1_64( 0x9B05688C9B05688C );
|
||||
initstate[6] = m512_const1_64( 0x1F83D9AB1F83D9AB );
|
||||
initstate[7] = m512_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
|
||||
// hash first 64 byte block of data
|
||||
sha256_16way_transform( midstate, vdata, initstate );
|
||||
|
||||
// Do 3 rounds on the first 12 bytes of the next block
|
||||
sha256_16way_prehash_3rounds( midstate2, vdata + 16, midstate );
|
||||
|
||||
do
|
||||
{
|
||||
pdata[19] = n;
|
||||
sha256t_16way_hash( hash32, vdata );
|
||||
for ( int lane = 0; lane < 16; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_16x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev, m512_const1_32( 16 ) );
|
||||
n += 16;
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy_512( block, vdata + 16, 4 );
|
||||
block[ 4] = last_byte;
|
||||
memset_zero_512( block + 5, 10 );
|
||||
block[15] = m512_const1_32( 80*8 ); // bit count
|
||||
sha256_16way_final_rounds( hash32, block, midstate, midstate2 );
|
||||
// sha256_16way_transform( hash32, block, midstate );
|
||||
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy_512( block, hash32, 8 );
|
||||
block[ 8] = last_byte;
|
||||
memset_zero_512( block + 9, 6 );
|
||||
block[15] = m512_const1_32( 32*8 ); // bit count
|
||||
sha256_16way_transform( hash32, block, initstate );
|
||||
|
||||
// 3. 32 byte hash from 2.
|
||||
memcpy_512( block, hash32, 8 );
|
||||
sha256_16way_transform( hash32, block, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
mm512_block_bswap_32( hash32, hash32 );
|
||||
|
||||
for ( int lane = 0; lane < 16; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_16x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev, sixteen );
|
||||
n += 16;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(SHA256T_8WAY)
|
||||
|
||||
static __thread sha256_8way_context sha256_ctx8 __attribute__ ((aligned (64)));
|
||||
|
||||
void sha256t_8way_hash( void* output, const void* input )
|
||||
{
|
||||
uint32_t vhash[8*8] __attribute__ ((aligned (64)));
|
||||
sha256_8way_context ctx;
|
||||
memcpy( &ctx, &sha256_ctx8, sizeof ctx );
|
||||
|
||||
sha256_8way_update( &ctx, input + (64<<3), 16 );
|
||||
sha256_8way_close( &ctx, vhash );
|
||||
|
||||
sha256_8way_init( &ctx );
|
||||
sha256_8way_update( &ctx, vhash, 32 );
|
||||
sha256_8way_close( &ctx, vhash );
|
||||
|
||||
sha256_8way_init( &ctx );
|
||||
sha256_8way_update( &ctx, vhash, 32 );
|
||||
sha256_8way_close( &ctx, output );
|
||||
}
|
||||
|
||||
int scanhash_sha256t_8way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
||||
uint32_t hash32[8*8] __attribute__ ((aligned (32)));
|
||||
__m256i block[16] __attribute__ ((aligned (64)));
|
||||
__m256i hash32[8] __attribute__ ((aligned (32)));
|
||||
__m256i initstate[8] __attribute__ ((aligned (32)));
|
||||
__m256i midstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = &(hash32[7<<3]);
|
||||
__m256i vdata[20] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 8;
|
||||
uint32_t n = first_nonce;
|
||||
__m256i *noncev = (__m256i*)vdata + 19; // aligned
|
||||
__m256i *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const __m256i last_byte = m256_const1_32( 0x80000000 );
|
||||
const __m256i eight = m256_const1_32( 8 );
|
||||
|
||||
mm256_bswap32_intrlv80_8x32( vdata, pdata );
|
||||
*noncev = _mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n );
|
||||
sha256_8way_init( &sha256_ctx8 );
|
||||
sha256_8way_update( &sha256_ctx8, vdata, 64 );
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = m256_const1_32( pdata[i] );
|
||||
|
||||
*noncev = _mm256_set_epi32( n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
|
||||
|
||||
// initialize state
|
||||
initstate[0] = m256_const1_64( 0x6A09E6676A09E667 );
|
||||
initstate[1] = m256_const1_64( 0xBB67AE85BB67AE85 );
|
||||
initstate[2] = m256_const1_64( 0x3C6EF3723C6EF372 );
|
||||
initstate[3] = m256_const1_64( 0xA54FF53AA54FF53A );
|
||||
initstate[4] = m256_const1_64( 0x510E527F510E527F );
|
||||
initstate[5] = m256_const1_64( 0x9B05688C9B05688C );
|
||||
initstate[6] = m256_const1_64( 0x1F83D9AB1F83D9AB );
|
||||
initstate[7] = m256_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_8way_transform( midstate, vdata, initstate );
|
||||
|
||||
do
|
||||
{
|
||||
pdata[19] = n;
|
||||
sha256t_8way_hash( hash32, vdata );
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_8x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev, m256_const1_32( 8 ) );
|
||||
n += 8;
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy_256( block, vdata + 16, 4 );
|
||||
block[ 4] = last_byte;
|
||||
memset_zero_256( block + 5, 10 );
|
||||
block[15] = m256_const1_32( 80*8 ); // bit count
|
||||
sha256_8way_transform( hash32, block, midstate );
|
||||
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy_256( block, hash32, 8 );
|
||||
block[ 8] = last_byte;
|
||||
memset_zero_256( block + 9, 6 );
|
||||
block[15] = m256_const1_32( 32*8 ); // bit count
|
||||
sha256_8way_transform( hash32, block, initstate );
|
||||
|
||||
// 3. 32 byte hash from 2.
|
||||
memcpy_256( block, hash32, 8 );
|
||||
sha256_8way_transform( hash32, block, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
mm256_block_bswap_32( hash32, hash32 );
|
||||
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_8x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev, eight );
|
||||
n += 8;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
@@ -144,82 +184,84 @@ int scanhash_sha256t_8way( struct work *work, const uint32_t max_nonce,
|
||||
|
||||
#if defined(SHA256T_4WAY)
|
||||
|
||||
static __thread sha256_4way_context sha256_ctx4 __attribute__ ((aligned (64)));
|
||||
|
||||
void sha256t_4way_hash( void* output, const void* input )
|
||||
{
|
||||
uint32_t vhash[8*4] __attribute__ ((aligned (64)));
|
||||
sha256_4way_context ctx;
|
||||
memcpy( &ctx, &sha256_ctx4, sizeof ctx );
|
||||
|
||||
sha256_4way_update( &ctx, input + (64<<2), 16 );
|
||||
sha256_4way_close( &ctx, vhash );
|
||||
|
||||
sha256_4way_init( &ctx );
|
||||
sha256_4way_update( &ctx, vhash, 32 );
|
||||
sha256_4way_close( &ctx, vhash );
|
||||
|
||||
sha256_4way_init( &ctx );
|
||||
sha256_4way_update( &ctx, vhash, 32 );
|
||||
sha256_4way_close( &ctx, output );
|
||||
}
|
||||
|
||||
int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||
uint32_t hash[8*4] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
||||
uint32_t *hash7 = &(hash[7<<2]);
|
||||
__m128i block[16] __attribute__ ((aligned (64)));
|
||||
__m128i hash32[8] __attribute__ ((aligned (32)));
|
||||
__m128i initstate[8] __attribute__ ((aligned (32)));
|
||||
__m128i midstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
__m128i vdata[20] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
__m128i *noncev = (__m128i*)vdata + 19; // aligned
|
||||
__m128i *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const __m128i last_byte = m128_const1_32( 0x80000000 );
|
||||
const __m128i four = m128_const1_32( 4 );
|
||||
|
||||
const uint64_t htmax[] = { 0,
|
||||
0xF,
|
||||
0xFF,
|
||||
0xFFF,
|
||||
0xFFFF,
|
||||
0x10000000 };
|
||||
const uint32_t masks[] = { 0xFFFFFFFF,
|
||||
0xFFFFFFF0,
|
||||
0xFFFFFF00,
|
||||
0xFFFFF000,
|
||||
0xFFFF0000,
|
||||
0 };
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = m128_const1_32( pdata[i] );
|
||||
|
||||
mm128_bswap32_intrlv80_4x32( vdata, pdata );
|
||||
sha256_4way_init( &sha256_ctx4 );
|
||||
sha256_4way_update( &sha256_ctx4, vdata, 64 );
|
||||
*noncev = _mm_set_epi32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
|
||||
// initialize state
|
||||
initstate[0] = m128_const1_64( 0x6A09E6676A09E667 );
|
||||
initstate[1] = m128_const1_64( 0xBB67AE85BB67AE85 );
|
||||
initstate[2] = m128_const1_64( 0x3C6EF3723C6EF372 );
|
||||
initstate[3] = m128_const1_64( 0xA54FF53AA54FF53A );
|
||||
initstate[4] = m128_const1_64( 0x510E527F510E527F );
|
||||
initstate[5] = m128_const1_64( 0x9B05688C9B05688C );
|
||||
initstate[6] = m128_const1_64( 0x1F83D9AB1F83D9AB );
|
||||
initstate[7] = m128_const1_64( 0x5BE0CD195BE0CD19 );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform( midstate, vdata, initstate );
|
||||
|
||||
do
|
||||
{
|
||||
const uint32_t mask = masks[m];
|
||||
do {
|
||||
*noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) );
|
||||
pdata[19] = n;
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy_128( block, vdata + 16, 4 );
|
||||
block[ 4] = last_byte;
|
||||
memset_zero_128( block + 5, 10 );
|
||||
block[15] = m128_const1_32( 80*8 ); // bit count
|
||||
sha256_4way_transform( hash32, block, midstate );
|
||||
|
||||
sha256t_4way_hash( hash, vdata );
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy_128( block, hash32, 8 );
|
||||
block[ 8] = last_byte;
|
||||
memset_zero_128( block + 9, 6 );
|
||||
block[15] = m128_const1_32( 32*8 ); // bit count
|
||||
sha256_4way_transform( hash32, block, initstate );
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( !( hash7[ lane ] & mask ) )
|
||||
// 3. 32 byte hash from 2.
|
||||
memcpy_128( block, hash32, 8 );
|
||||
sha256_4way_transform( hash32, block, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
mm128_block_bswap_32( hash32, hash32 );
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash, lane, 256 );
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
} while ( (n < max_nonce - 4) && !work_restart[thr_id].restart );
|
||||
break;
|
||||
}
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm_add_epi32( *noncev, four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -5,17 +5,13 @@ bool register_sha256t_algo( algo_gate_t* gate )
|
||||
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
|
||||
#if defined(SHA256T_16WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha256t_16way;
|
||||
gate->hash = (void*)&sha256t_16way_hash;
|
||||
#elif defined(__SHA__)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256t;
|
||||
gate->hash = (void*)&sha256t_hash;
|
||||
#elif defined(SHA256T_8WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha256t_8way;
|
||||
gate->hash = (void*)&sha256t_8way_hash;
|
||||
#else
|
||||
gate->scanhash = (void*)&scanhash_sha256t_4way;
|
||||
gate->hash = (void*)&sha256t_4way_hash;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
@@ -17,7 +17,6 @@ bool register_sha256q_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(SHA256T_16WAY)
|
||||
|
||||
void sha256t_16way_hash( void *output, const void *input );
|
||||
int scanhash_sha256t_16way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void sha256q_16way_hash( void *output, const void *input );
|
||||
@@ -27,7 +26,6 @@ int scanhash_sha256q_16way( struct work *work, uint32_t max_nonce,
|
||||
|
||||
#if defined(SHA256T_8WAY)
|
||||
|
||||
void sha256t_8way_hash( void *output, const void *input );
|
||||
int scanhash_sha256t_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void sha256q_8way_hash( void *output, const void *input );
|
||||
@@ -37,7 +35,6 @@ int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce,
|
||||
|
||||
#if defined(SHA256T_4WAY)
|
||||
|
||||
void sha256t_4way_hash( void *output, const void *input );
|
||||
int scanhash_sha256t_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
void sha256q_4way_hash( void *output, const void *input );
|
||||
@@ -45,10 +42,13 @@ int scanhash_sha256q_4way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
#endif
|
||||
|
||||
#if defined(__SHA__)
|
||||
|
||||
int sha256t_hash( void *output, const void *input );
|
||||
int scanhash_sha256t( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#endif
|
||||
|
||||
int sha256q_hash( void *output, const void *input );
|
||||
int scanhash_sha256q( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
@@ -3,10 +3,14 @@
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
//#include "algo/sha/sph_sha2.h"
|
||||
#include "sha256-hash-opt.h"
|
||||
|
||||
#if defined(__SHA__)
|
||||
|
||||
// Only used on CPUs with SHA
|
||||
|
||||
/*
|
||||
static __thread sph_sha256_context sha256t_ctx __attribute__ ((aligned (64)));
|
||||
|
||||
void sha256t_midstate( const void* input )
|
||||
@@ -37,12 +41,21 @@ int sha256t_hash( void* output, const void* input )
|
||||
|
||||
return 1;
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
int scanhash_sha256t( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t edata[20] __attribute__((aligned(64)));
|
||||
uint32_t hash[8] __attribute__((aligned(64)));
|
||||
uint32_t block[16] __attribute__ ((aligned (64)));
|
||||
uint32_t hash32[8] __attribute__ ((aligned (32)));
|
||||
uint32_t initstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t midstate[8] __attribute__ ((aligned (32)));
|
||||
|
||||
|
||||
|
||||
// uint32_t edata[20] __attribute__((aligned(64)));
|
||||
// uint32_t hash[8] __attribute__((aligned(64)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
@@ -50,24 +63,148 @@ int scanhash_sha256t( struct work *work, uint32_t max_nonce,
|
||||
uint32_t n = first_nonce;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
__m128i shuf_bswap32 =
|
||||
_mm_set_epi64x( 0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL );
|
||||
|
||||
mm128_bswap32_80( edata, pdata );
|
||||
sha256t_midstate( edata );
|
||||
// mm128_bswap32_80( edata, pdata );
|
||||
// sha256t_midstate( edata );
|
||||
|
||||
// initialize state
|
||||
initstate[0] = 0x6A09E667;
|
||||
initstate[1] = 0xBB67AE85;
|
||||
initstate[2] = 0x3C6EF372;
|
||||
initstate[3] = 0xA54FF53A;
|
||||
initstate[4] = 0x510E527F;
|
||||
initstate[5] = 0x9B05688C;
|
||||
initstate[6] = 0x1F83D9AB;
|
||||
initstate[7] = 0x5BE0CD19;
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_opt_transform( midstate, pdata, initstate );
|
||||
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
if ( likely( sha256t_hash( hash, edata ) ) )
|
||||
if ( unlikely( valid_hash( hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy( block, pdata + 16, 16 );
|
||||
block[ 4] = 0x80000000;
|
||||
memset( block + 5, 0, 40 );
|
||||
block[15] = 80*8; // bit count
|
||||
sha256_opt_transform( hash32, block, midstate );
|
||||
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy( block, hash32, 32 );
|
||||
block[ 8] = 0x80000000;
|
||||
memset( block + 9, 0, 24 );
|
||||
block[15] = 32*8; // bit count
|
||||
sha256_opt_transform( hash32, block, initstate );
|
||||
|
||||
// 3. 32 byte hash from 2.
|
||||
memcpy( block, hash32, 32 );
|
||||
sha256_opt_transform( hash32, block, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
casti_m128i( hash32, 0 ) =
|
||||
_mm_shuffle_epi8( casti_m128i( hash32, 0 ), shuf_bswap32 );
|
||||
casti_m128i( hash32, 1 ) =
|
||||
_mm_shuffle_epi8( casti_m128i( hash32, 1 ), shuf_bswap32 );
|
||||
|
||||
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
submit_solution( work, hash32, mythr );
|
||||
n++;
|
||||
} while ( n < last_nonce && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
int scanhash_sha256t( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t block0[16] __attribute__ ((aligned (64)));
|
||||
uint32_t block1[16] __attribute__ ((aligned (64)));
|
||||
uint32_t hash0[8] __attribute__ ((aligned (32)));
|
||||
uint32_t hash1[8] __attribute__ ((aligned (32)));
|
||||
uint32_t initstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t midstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 1;
|
||||
uint32_t n = first_nonce;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
__m128i shuf_bswap32 =
|
||||
_mm_set_epi64x( 0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL );
|
||||
|
||||
// initialize state
|
||||
initstate[0] = 0x6A09E667;
|
||||
initstate[1] = 0xBB67AE85;
|
||||
initstate[2] = 0x3C6EF372;
|
||||
initstate[3] = 0xA54FF53A;
|
||||
initstate[4] = 0x510E527F;
|
||||
initstate[5] = 0x9B05688C;
|
||||
initstate[6] = 0x1F83D9AB;
|
||||
initstate[7] = 0x5BE0CD19;
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_opt_transform( midstate, pdata, initstate );
|
||||
|
||||
do
|
||||
{
|
||||
// 1. final 16 bytes of data, with padding
|
||||
memcpy( block0, pdata + 16, 16 );
|
||||
memcpy( block1, pdata + 16, 16 );
|
||||
block0[ 3] = n;
|
||||
block1[ 3] = n+1;
|
||||
block0[ 4] = block1[ 4] = 0x80000000;
|
||||
memset( block0 + 5, 0, 40 );
|
||||
memset( block1 + 5, 0, 40 );
|
||||
block0[15] = block1[15] = 80*8; // bit count
|
||||
sha256_ni2way_transform( hash0, hash1, block0, block1, midstate, midstate );
|
||||
|
||||
// 2. 32 byte hash from 1.
|
||||
memcpy( block0, hash0, 32 );
|
||||
memcpy( block1, hash1, 32 );
|
||||
block0[ 8] = block1[ 8] = 0x80000000;
|
||||
memset( block0 + 9, 0, 24 );
|
||||
memset( block1 + 9, 0, 24 );
|
||||
block0[15] = block1[15] = 32*8; // bit count
|
||||
sha256_ni2way_transform( hash0, hash1, block0, block1, initstate, initstate );
|
||||
|
||||
// 3. 32 byte hash from 2.
|
||||
memcpy( block0, hash0, 32 );
|
||||
memcpy( block1, hash1, 32 );
|
||||
sha256_ni2way_transform( hash0, hash1, block0, block1, initstate, initstate );
|
||||
|
||||
// byte swap final hash for testing
|
||||
casti_m128i( hash0, 0 ) =
|
||||
_mm_shuffle_epi8( casti_m128i( hash0, 0 ), shuf_bswap32 );
|
||||
casti_m128i( hash0, 1 ) =
|
||||
_mm_shuffle_epi8( casti_m128i( hash0, 1 ), shuf_bswap32 );
|
||||
casti_m128i( hash1, 0 ) =
|
||||
_mm_shuffle_epi8( casti_m128i( hash1, 0 ), shuf_bswap32 );
|
||||
casti_m128i( hash1, 1 ) =
|
||||
_mm_shuffle_epi8( casti_m128i( hash1, 1 ), shuf_bswap32 );
|
||||
|
||||
if ( unlikely( valid_hash( hash0, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n;
|
||||
submit_solution( work, hash0, mythr );
|
||||
}
|
||||
if ( unlikely( valid_hash( hash1, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n+1;
|
||||
submit_solution( work, hash1, mythr );
|
||||
}
|
||||
n += 2;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -96,74 +96,22 @@ static const uint64_t K512[80] =
|
||||
// SHA-512 8 way 64 bit
|
||||
|
||||
#define CH8W(X, Y, Z) \
|
||||
_mm512_xor_si512( _mm512_and_si512( _mm512_xor_si512( Y, Z ), X ), Z )
|
||||
_mm512_ternarylogic_epi64( X, Y, Z, 0xca )
|
||||
|
||||
#define MAJ8W(X, Y, Z) \
|
||||
_mm512_or_si512( _mm512_and_si512( X, Y ), \
|
||||
_mm512_and_si512( _mm512_or_si512( X, Y ), Z ) )
|
||||
_mm512_ternarylogic_epi64( X, Y, Z, 0xe8 )
|
||||
|
||||
#define BSG8W_5_0(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_64(x, 28), mm512_ror_64(x, 34) ), mm512_ror_64(x, 39) )
|
||||
mm512_xor3( mm512_ror_64(x, 28), mm512_ror_64(x, 34), mm512_ror_64(x, 39) )
|
||||
|
||||
#define BSG8W_5_1(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_64(x, 14), mm512_ror_64(x, 18) ), mm512_ror_64(x, 41) )
|
||||
mm512_xor3( mm512_ror_64(x, 14), mm512_ror_64(x, 18), mm512_ror_64(x, 41) )
|
||||
|
||||
#define SSG8W_5_0(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_64(x, 1), mm512_ror_64(x, 8) ), _mm512_srli_epi64(x, 7) )
|
||||
mm512_xor3( mm512_ror_64(x, 1), mm512_ror_64(x, 8), _mm512_srli_epi64(x, 7) )
|
||||
|
||||
#define SSG8W_5_1(x) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( \
|
||||
mm512_ror_64(x, 19), mm512_ror_64(x, 61) ), _mm512_srli_epi64(x, 6) )
|
||||
|
||||
static inline __m512i ssg8w_512_add( __m512i w0, __m512i w1 )
|
||||
{
|
||||
__m512i w0a, w1a, w0b, w1b;
|
||||
w0a = mm512_ror_64( w0, 1 );
|
||||
w1a = mm512_ror_64( w1,19 );
|
||||
w0b = mm512_ror_64( w0, 8 );
|
||||
w1b = mm512_ror_64( w1,61 );
|
||||
w0a = _mm512_xor_si512( w0a, w0b );
|
||||
w1a = _mm512_xor_si512( w1a, w1b );
|
||||
w0b = _mm512_srli_epi64( w0, 7 );
|
||||
w1b = _mm512_srli_epi64( w1, 6 );
|
||||
w0a = _mm512_xor_si512( w0a, w0b );
|
||||
w1a = _mm512_xor_si512( w1a, w1b );
|
||||
return _mm512_add_epi64( w0a, w1a );
|
||||
}
|
||||
|
||||
|
||||
#define SSG8W_512x2_0( w0, w1, i ) do \
|
||||
{ \
|
||||
__m512i X0a, X1a, X0b, X1b; \
|
||||
X0a = mm512_ror_64( W[i-15], 1 ); \
|
||||
X1a = mm512_ror_64( W[i-14], 1 ); \
|
||||
X0b = mm512_ror_64( W[i-15], 8 ); \
|
||||
X1b = mm512_ror_64( W[i-14], 8 ); \
|
||||
X0a = _mm512_xor_si512( X0a, X0b ); \
|
||||
X1a = _mm512_xor_si512( X1a, X1b ); \
|
||||
X0b = _mm512_srli_epi64( W[i-15], 7 ); \
|
||||
X1b = _mm512_srli_epi64( W[i-14], 7 ); \
|
||||
w0 = _mm512_xor_si512( X0a, X0b ); \
|
||||
w1 = _mm512_xor_si512( X1a, X1b ); \
|
||||
} while(0)
|
||||
|
||||
#define SSG8W_512x2_1( w0, w1, i ) do \
|
||||
{ \
|
||||
__m512i X0a, X1a, X0b, X1b; \
|
||||
X0a = mm512_ror_64( W[i-2],19 ); \
|
||||
X1a = mm512_ror_64( W[i-1],19 ); \
|
||||
X0b = mm512_ror_64( W[i-2],61 ); \
|
||||
X1b = mm512_ror_64( W[i-1],61 ); \
|
||||
X0a = _mm512_xor_si512( X0a, X0b ); \
|
||||
X1a = _mm512_xor_si512( X1a, X1b ); \
|
||||
X0b = _mm512_srli_epi64( W[i-2], 6 ); \
|
||||
X1b = _mm512_srli_epi64( W[i-1], 6 ); \
|
||||
w0 = _mm512_xor_si512( X0a, X0b ); \
|
||||
w1 = _mm512_xor_si512( X1a, X1b ); \
|
||||
} while(0)
|
||||
mm512_xor3( mm512_ror_64(x, 19), mm512_ror_64(x, 61), _mm512_srli_epi64(x, 6) )
|
||||
|
||||
#define SHA3_8WAY_STEP(A, B, C, D, E, F, G, H, i) \
|
||||
do { \
|
||||
@@ -187,8 +135,8 @@ sha512_8way_round( sha512_8way_context *ctx, __m512i *in, __m512i r[8] )
|
||||
mm512_block_bswap_64( W+8, in+8 );
|
||||
|
||||
for ( i = 16; i < 80; i++ )
|
||||
W[i] = _mm512_add_epi64( ssg8w_512_add( W[i-15], W[i-2] ),
|
||||
_mm512_add_epi64( W[ i- 7 ], W[ i-16 ] ) );
|
||||
W[i] = mm512_add4_64( SSG8W_5_0( W[i-15] ), SSG8W_5_1( W[i-2] ),
|
||||
W[ i- 7 ], W[ i-16 ] );
|
||||
|
||||
if ( ctx->initialized )
|
||||
{
|
||||
@@ -319,14 +267,20 @@ void sha512_8way_close( sha512_8way_context *sc, void *dst )
|
||||
|
||||
// SHA-512 4 way 64 bit
|
||||
|
||||
/*
|
||||
|
||||
#define CH(X, Y, Z) \
|
||||
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( Y, Z ), X ), Z )
|
||||
|
||||
/*
|
||||
#define MAJ(X, Y, Z) \
|
||||
_mm256_or_si256( _mm256_and_si256( X, Y ), \
|
||||
_mm256_and_si256( _mm256_or_si256( X, Y ), Z ) )
|
||||
*/
|
||||
|
||||
#define MAJ(X, Y, Z) \
|
||||
_mm256_xor_si256( Y, _mm256_and_si256( X_xor_Y = _mm256_xor_si256( X, Y ), \
|
||||
Y_xor_Z ) )
|
||||
|
||||
#define BSG5_0(x) \
|
||||
mm256_ror_64( _mm256_xor_si256( mm256_ror_64( \
|
||||
_mm256_xor_si256( mm256_ror_64( x, 5 ), x ), 6 ), x ), 28 )
|
||||
@@ -334,7 +288,7 @@ void sha512_8way_close( sha512_8way_context *sc, void *dst )
|
||||
#define BSG5_1(x) \
|
||||
mm256_ror_64( _mm256_xor_si256( mm256_ror_64( \
|
||||
_mm256_xor_si256( mm256_ror_64( x, 23 ), x ), 4 ), x ), 14 )
|
||||
*/
|
||||
|
||||
/*
|
||||
#define BSG5_0(x) \
|
||||
_mm256_xor_si256( _mm256_xor_si256( \
|
||||
@@ -402,7 +356,7 @@ static inline __m256i ssg512_add( __m256i w0, __m256i w1 )
|
||||
w1 = _mm256_xor_si256( X1a, X1b ); \
|
||||
} while(0)
|
||||
*/
|
||||
|
||||
/*
|
||||
#define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \
|
||||
do { \
|
||||
__m256i K = _mm256_set1_epi64x( K512[ i ] ); \
|
||||
@@ -431,7 +385,7 @@ do { \
|
||||
H = _mm256_add_epi64( T1, T2 ); \
|
||||
D = _mm256_add_epi64( D, T1 ); \
|
||||
} while (0)
|
||||
|
||||
*/
|
||||
/*
|
||||
#define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \
|
||||
do { \
|
||||
@@ -445,7 +399,7 @@ do { \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
#define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \
|
||||
do { \
|
||||
__m256i T1, T2; \
|
||||
@@ -453,16 +407,17 @@ do { \
|
||||
T1 = _mm256_add_epi64( H, mm256_add4_64( BSG5_1(E), CH(E, F, G), \
|
||||
K, W[i] ) ); \
|
||||
T2 = _mm256_add_epi64( BSG5_0(A), MAJ(A, B, C) ); \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = _mm256_add_epi64( D, T1 ); \
|
||||
H = _mm256_add_epi64( T1, T2 ); \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
|
||||
static void
|
||||
sha512_4way_round( sha512_4way_context *ctx, __m256i *in, __m256i r[8] )
|
||||
{
|
||||
int i;
|
||||
register __m256i A, B, C, D, E, F, G, H;
|
||||
register __m256i A, B, C, D, E, F, G, H, X_xor_Y, Y_xor_Z;
|
||||
__m256i W[80];
|
||||
|
||||
mm256_block_bswap_64( W , in );
|
||||
@@ -495,6 +450,8 @@ sha512_4way_round( sha512_4way_context *ctx, __m256i *in, __m256i r[8] )
|
||||
H = m256_const1_64( 0x5BE0CD19137E2179 );
|
||||
}
|
||||
|
||||
Y_xor_Z = _mm256_xor_si256( B, C );
|
||||
|
||||
for ( i = 0; i < 80; i += 8 )
|
||||
{
|
||||
SHA3_4WAY_STEP( A, B, C, D, E, F, G, H, i + 0 );
|
||||
|
@@ -40,8 +40,8 @@
|
||||
#endif
|
||||
|
||||
#define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
|
||||
#define MAJ(X, Y, Z) (((Y) & (Z)) | (((Y) | (Z)) & (X)))
|
||||
|
||||
//#define MAJ(X, Y, Z) (((Y) & (Z)) | (((Y) | (Z)) & (X)))
|
||||
#define MAJ( X, Y, Z ) ( Y ^ ( ( X ^ Y ) & ( Y ^ Z ) ) )
|
||||
#define ROTR SPH_ROTR32
|
||||
|
||||
#define BSG2_0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
|
||||
@@ -73,7 +73,194 @@ static const sph_u32 H256[8] = {
|
||||
|
||||
#if defined(__SHA__)
|
||||
|
||||
#include "sha256-hash-opt.c"
|
||||
#include "simd-utils.h"
|
||||
|
||||
static void sha2_round( const uint8_t input[], uint32_t state[8] )
|
||||
{
|
||||
__m128i STATE0, STATE1;
|
||||
__m128i MSG, TMP, MASK;
|
||||
__m128i TMSG0, TMSG1, TMSG2, TMSG3;
|
||||
__m128i ABEF_SAVE, CDGH_SAVE;
|
||||
|
||||
// Load initial values
|
||||
TMP = _mm_load_si128((__m128i*) &state[0]);
|
||||
STATE1 = _mm_load_si128((__m128i*) &state[4]);
|
||||
MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
|
||||
|
||||
TMP = _mm_shuffle_epi32(TMP, 0xB1); // CDAB
|
||||
STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH
|
||||
STATE0 = _mm_alignr_epi8(TMP, STATE1, 8); // ABEF
|
||||
STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0); // CDGH
|
||||
|
||||
// Save current hash
|
||||
ABEF_SAVE = STATE0;
|
||||
CDGH_SAVE = STATE1;
|
||||
|
||||
// Rounds 0-3
|
||||
MSG = _mm_load_si128((const __m128i*) (input+0));
|
||||
TMSG0 = _mm_shuffle_epi8(MSG, MASK);
|
||||
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
|
||||
// Rounds 4-7
|
||||
TMSG1 = _mm_load_si128((const __m128i*) (input+16));
|
||||
TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
|
||||
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
|
||||
|
||||
// Rounds 8-11
|
||||
TMSG2 = _mm_load_si128((const __m128i*) (input+32));
|
||||
TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
|
||||
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
|
||||
|
||||
// Rounds 12-15
|
||||
TMSG3 = _mm_load_si128((const __m128i*) (input+48));
|
||||
TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
|
||||
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
|
||||
TMSG0 = _mm_add_epi32(TMSG0, TMP);
|
||||
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
|
||||
|
||||
// Rounds 16-19
|
||||
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
|
||||
TMSG1 = _mm_add_epi32(TMSG1, TMP);
|
||||
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
|
||||
|
||||
// Rounds 20-23
|
||||
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
|
||||
TMSG2 = _mm_add_epi32(TMSG2, TMP);
|
||||
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
|
||||
|
||||
// Rounds 24-27
|
||||
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
|
||||
TMSG3 = _mm_add_epi32(TMSG3, TMP);
|
||||
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
|
||||
|
||||
// Rounds 28-31
|
||||
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
|
||||
TMSG0 = _mm_add_epi32(TMSG0, TMP);
|
||||
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
|
||||
|
||||
// Rounds 32-35
|
||||
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
|
||||
TMSG1 = _mm_add_epi32(TMSG1, TMP);
|
||||
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
|
||||
|
||||
// Rounds 36-39
|
||||
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
|
||||
TMSG2 = _mm_add_epi32(TMSG2, TMP);
|
||||
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
|
||||
|
||||
// Rounds 40-43
|
||||
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
|
||||
TMSG3 = _mm_add_epi32(TMSG3, TMP);
|
||||
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
|
||||
|
||||
// Rounds 44-47
|
||||
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
|
||||
TMSG0 = _mm_add_epi32(TMSG0, TMP);
|
||||
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
|
||||
|
||||
// Rounds 48-51
|
||||
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
|
||||
TMSG1 = _mm_add_epi32(TMSG1, TMP);
|
||||
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
|
||||
|
||||
// Rounds 52-55
|
||||
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
|
||||
TMSG2 = _mm_add_epi32(TMSG2, TMP);
|
||||
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
|
||||
// Rounds 56-59
|
||||
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
|
||||
TMSG3 = _mm_add_epi32(TMSG3, TMP);
|
||||
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
|
||||
// Rounds 60-63
|
||||
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
|
||||
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
|
||||
MSG = _mm_shuffle_epi32(MSG, 0x0E);
|
||||
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
|
||||
|
||||
// Add values back to state
|
||||
STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
|
||||
STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
|
||||
|
||||
TMP = _mm_shuffle_epi32(STATE0, 0x1B); // FEBA
|
||||
STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); // DCHG
|
||||
STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); // DCBA
|
||||
STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); // ABEF
|
||||
|
||||
// Save state
|
||||
_mm_store_si128((__m128i*) &state[0], STATE0);
|
||||
_mm_store_si128((__m128i*) &state[4], STATE1);
|
||||
}
|
||||
|
||||
#else // no SHA
|
||||
|
||||
|
@@ -38,7 +38,8 @@
|
||||
#if SPH_64
|
||||
|
||||
#define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
|
||||
#define MAJ(X, Y, Z) (((X) & (Y)) | (((X) | (Y)) & (Z)))
|
||||
//#define MAJ(X, Y, Z) (((X) & (Y)) | (((X) | (Y)) & (Z)))
|
||||
#define MAJ( X, Y, Z ) ( Y ^ ( ( X ^ Y ) & ( Y ^ Z ) ) )
|
||||
|
||||
#define ROTR64 SPH_ROTR64
|
||||
|
||||
|
@@ -310,12 +310,13 @@ do { \
|
||||
|
||||
#define PERM_ELT8(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \
|
||||
do { \
|
||||
xa0 = _mm256_xor_si256( xm, _mm256_xor_si256( xb1, _mm256_xor_si256( \
|
||||
xa0 = mm256_xor3( xm, xb1, _mm256_xor_si256( \
|
||||
_mm256_andnot_si256( xb3, xb2 ), \
|
||||
_mm256_mullo_epi32( _mm256_xor_si256( xa0, _mm256_xor_si256( xc, \
|
||||
_mm256_mullo_epi32( mm256_rol_32( xa1, 15 ), _mm256_set1_epi32(5UL) ) \
|
||||
) ), _mm256_set1_epi32(3UL) ) ) ) ); \
|
||||
xb0 = mm256_not( _mm256_xor_si256( xa0, mm256_rol_32( xb0, 1 ) ) ); \
|
||||
_mm256_mullo_epi32( mm256_xor3( xa0, xc, \
|
||||
_mm256_mullo_epi32( mm256_rol_32( xa1, 15 ), \
|
||||
_mm256_set1_epi32(5UL) ) ), \
|
||||
_mm256_set1_epi32(3UL) ) ) ); \
|
||||
xb0 = mm256_xnor( xa0, mm256_rol_32( xb0, 1 ) ); \
|
||||
} while (0)
|
||||
|
||||
#define PERM_STEP_0_8 do { \
|
||||
|
@@ -309,22 +309,16 @@ static const uint64_t IV512[] = {
|
||||
sc->bcount = bcount; \
|
||||
} while (0)
|
||||
|
||||
// AVX2 all scalar vars are now vectors representing 4 nonces in parallel
|
||||
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define TFBIG_KINIT_8WAY( k0, k1, k2, k3, k4, k5, k6, k7, k8, t0, t1, t2 ) \
|
||||
do { \
|
||||
k8 = _mm512_xor_si512( _mm512_xor_si512( \
|
||||
_mm512_xor_si512( _mm512_xor_si512( k0, k1 ), \
|
||||
_mm512_xor_si512( k2, k3 ) ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( k4, k5 ), \
|
||||
_mm512_xor_si512( k6, k7 ) ) ), \
|
||||
m512_const1_64( 0x1BD11BDAA9FC1A22) ); \
|
||||
k8 = mm512_xor3( mm512_xor3( k0, k1, k2 ), mm512_xor3( k3, k4, k5 ), \
|
||||
mm512_xor3( k6, k7, m512_const1_64( 0x1BD11BDAA9FC1A22) ));\
|
||||
t2 = t0 ^ t1; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define TFBIG_ADDKEY_8WAY(w0, w1, w2, w3, w4, w5, w6, w7, k, t, s) \
|
||||
do { \
|
||||
w0 = _mm512_add_epi64( w0, SKBI(k,s,0) ); \
|
||||
@@ -340,7 +334,6 @@ do { \
|
||||
m512_const1_64( s ) ) ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define TFBIG_MIX_8WAY(x0, x1, rc) \
|
||||
do { \
|
||||
x0 = _mm512_add_epi64( x0, x1 ); \
|
||||
|
@@ -44,8 +44,8 @@ int verthash_info_init(verthash_info_t* info, const char* file_name)
|
||||
if ( opt_data_file || !opt_verify )
|
||||
{
|
||||
if ( opt_data_file )
|
||||
applog( LOG_ERR,
|
||||
"Verthash data file not found or invalid: %s", info->fileName );
|
||||
applog( LOG_ERR, "Verthash data file not found or invalid: %s",
|
||||
info->fileName );
|
||||
else
|
||||
{
|
||||
applog( LOG_ERR,
|
||||
@@ -134,87 +134,133 @@ static inline uint32_t fnv1a(const uint32_t a, const uint32_t b)
|
||||
return (a ^ b) * 0x1000193;
|
||||
}
|
||||
|
||||
void verthash_hash(const unsigned char* blob_bytes,
|
||||
const size_t blob_size,
|
||||
const unsigned char(*input)[VH_HEADER_SIZE],
|
||||
unsigned char(*output)[VH_HASH_OUT_SIZE])
|
||||
#if 0
|
||||
static void rotate_indexes( uint32_t *p )
|
||||
{
|
||||
unsigned char p1[VH_HASH_OUT_SIZE] __attribute__ ((aligned (64)));
|
||||
sha3(&input[0], VH_HEADER_SIZE, &p1[0], VH_HASH_OUT_SIZE);
|
||||
|
||||
unsigned char p0[VH_N_SUBSET];
|
||||
|
||||
unsigned char input_header[VH_HEADER_SIZE] __attribute__ ((aligned (64)));
|
||||
memcpy(input_header, input, VH_HEADER_SIZE);
|
||||
|
||||
for (size_t i = 0; i < VH_N_ITER; ++i)
|
||||
{
|
||||
input_header[0] += 1;
|
||||
sha3(&input_header[0], VH_HEADER_SIZE, p0 + i * VH_P0_SIZE, VH_P0_SIZE);
|
||||
}
|
||||
|
||||
uint32_t* p0_index = (uint32_t*)p0;
|
||||
uint32_t seek_indexes[VH_N_INDEXES] __attribute__ ((aligned (64)));
|
||||
|
||||
for ( size_t x = 0; x < VH_N_ROT; ++x )
|
||||
{
|
||||
memcpy( seek_indexes + x * (VH_N_SUBSET / sizeof(uint32_t)),
|
||||
p0, VH_N_SUBSET);
|
||||
|
||||
//#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
// 512 bit vector processing is actually slower because it reduces the CPU
|
||||
// clock significantly, which also slows mem access. The AVX512 rol instruction
|
||||
// is still available for smaller vectors.
|
||||
|
||||
// for ( size_t y = 0; y < VH_N_SUBSET / sizeof(uint32_t); y += 16 )
|
||||
// {
|
||||
// __m512i *p0_v = (__m512i*)( p0_index + y );
|
||||
// *p0_v = mm512_rol_32( *p0_v, 1 );
|
||||
// }
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
for ( size_t y = 0; y < VH_N_SUBSET / sizeof(uint32_t); y += 8 )
|
||||
{
|
||||
__m256i *p0_v = (__m256i*)( p0_index + y );
|
||||
*p0_v = mm256_rol_32( *p0_v, 1 );
|
||||
}
|
||||
for ( size_t x = 0; x < VH_N_SUBSET / sizeof(__m256i); x += 8 )
|
||||
{
|
||||
__m256i *px = (__m256i*)p + x;
|
||||
|
||||
px[0] = mm256_rol_32( px[0], 1 );
|
||||
px[1] = mm256_rol_32( px[1], 1 );
|
||||
px[2] = mm256_rol_32( px[2], 1 );
|
||||
px[3] = mm256_rol_32( px[3], 1 );
|
||||
px[4] = mm256_rol_32( px[4], 1 );
|
||||
px[5] = mm256_rol_32( px[5], 1 );
|
||||
px[6] = mm256_rol_32( px[6], 1 );
|
||||
px[7] = mm256_rol_32( px[7], 1 );
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for ( size_t y = 0; y < VH_N_SUBSET / sizeof(uint32_t); y += 4 )
|
||||
{
|
||||
__m128i *p0_v = (__m128i*)( p0_index + y );
|
||||
*p0_v = mm128_rol_32( *p0_v, 1 );
|
||||
}
|
||||
for ( size_t x = 0; x < VH_N_SUBSET / sizeof(__m128i); x += 8 )
|
||||
{
|
||||
__m128i *px = (__m128i*)p0_index + x;
|
||||
|
||||
px[0] = mm128_rol_32( px[0], 1 );
|
||||
px[1] = mm128_rol_32( px[1], 1 );
|
||||
px[2] = mm128_rol_32( px[2], 1 );
|
||||
px[3] = mm128_rol_32( px[3], 1 );
|
||||
px[4] = mm128_rol_32( px[4], 1 );
|
||||
px[5] = mm128_rol_32( px[5], 1 );
|
||||
px[6] = mm128_rol_32( px[6], 1 );
|
||||
px[7] = mm128_rol_32( px[7], 1 );
|
||||
}
|
||||
|
||||
#endif
|
||||
/*
|
||||
for ( size_t x = 0; x < VH_N_SUBSET / sizeof(uint32_t); ++x )
|
||||
p[x] = ( p[x] << 1 ) | ( p[x] >> 31 );
|
||||
*/
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline uint32_t rotl32( uint32_t a, size_t r )
|
||||
{
|
||||
return ( a << r ) | ( a >> (32-r) );
|
||||
}
|
||||
|
||||
// Vectorized and targetted version of fnv1a
|
||||
#if defined (__AVX2__)
|
||||
|
||||
#define MULXOR \
|
||||
*(__m256i*)hash = _mm256_mullo_epi32( _mm256_xor_si256( \
|
||||
*(__m256i*)hash, *(__m256i*)blob_off ), k );
|
||||
|
||||
#elif defined(__SSE41__)
|
||||
|
||||
#define MULXOR \
|
||||
casti_m128i( hash, 0 ) = _mm_mullo_epi32( _mm_xor_si128( \
|
||||
casti_m128i( hash, 0 ), casti_m128i( blob_off, 0 ) ), k ); \
|
||||
casti_m128i( hash, 1 ) = _mm_mullo_epi32( _mm_xor_si128( \
|
||||
casti_m128i( hash, 1 ), casti_m128i( blob_off, 1 ) ), k );
|
||||
|
||||
#else
|
||||
|
||||
#define MULXOR \
|
||||
for ( size_t j = 0; j < VH_HASH_OUT_SIZE / sizeof(uint32_t); j++ ) \
|
||||
hash[j] = fnv1a( hash[j], blob_off[j] ); \
|
||||
|
||||
#endif
|
||||
|
||||
// for (size_t y = 0; y < VH_N_SUBSET / sizeof(uint32_t); ++y)
|
||||
// {
|
||||
// *(p0_index + y) = ( *(p0_index + y) << 1 )
|
||||
// | ( 1 & (*(p0_index + y) >> 31) );
|
||||
// }
|
||||
}
|
||||
#define UPDATE_ACCUMULATOR \
|
||||
accumulator = fnv1a( accumulator, blob_off[0] ); \
|
||||
accumulator = fnv1a( accumulator, blob_off[1] ); \
|
||||
accumulator = fnv1a( accumulator, blob_off[2] ); \
|
||||
accumulator = fnv1a( accumulator, blob_off[3] ); \
|
||||
accumulator = fnv1a( accumulator, blob_off[4] ); \
|
||||
accumulator = fnv1a( accumulator, blob_off[5] ); \
|
||||
accumulator = fnv1a( accumulator, blob_off[6] ); \
|
||||
accumulator = fnv1a( accumulator, blob_off[7] )
|
||||
|
||||
uint32_t* p1_32 = (uint32_t*)p1;
|
||||
uint32_t* blob_bytes_32 = (uint32_t*)blob_bytes;
|
||||
uint32_t value_accumulator = 0x811c9dc5;
|
||||
const uint32_t mdiv = ((blob_size - VH_HASH_OUT_SIZE) / VH_BYTE_ALIGNMENT) + 1;
|
||||
for (size_t i = 0; i < VH_N_INDEXES; i++)
|
||||
{
|
||||
const uint32_t offset = (fnv1a(seek_indexes[i], value_accumulator) % mdiv) * VH_BYTE_ALIGNMENT / sizeof(uint32_t);
|
||||
const uint32_t *blob_off = blob_bytes_32 + offset;
|
||||
for (size_t i2 = 0; i2 < VH_HASH_OUT_SIZE / sizeof(uint32_t); i2++)
|
||||
{
|
||||
const uint32_t value = *( blob_off + i2 );
|
||||
uint32_t* p1_ptr = p1_32 + i2;
|
||||
*p1_ptr = fnv1a( *p1_ptr, value );
|
||||
value_accumulator = fnv1a( value_accumulator, value );
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(output, p1, VH_HASH_OUT_SIZE);
|
||||
// first pass no rotate
|
||||
#define ROUND_0 \
|
||||
for ( size_t i = 0; i < VH_N_SUBSET / sizeof(uint32_t); i++ ) \
|
||||
{ \
|
||||
const uint32_t *blob_off = blob + \
|
||||
( ( fnv1a( subset[i], accumulator ) % mdiv ) \
|
||||
* ( VH_BYTE_ALIGNMENT / sizeof(uint32_t) ) ); \
|
||||
UPDATE_ACCUMULATOR; \
|
||||
MULXOR; \
|
||||
}
|
||||
|
||||
// subsequent passes rotate by r on demand, no need for mass rotate
|
||||
#define ROUND_r( r ) \
|
||||
for ( size_t i = 0; i < VH_N_SUBSET / sizeof(uint32_t); i++ ) \
|
||||
{ \
|
||||
const uint32_t *blob_off = blob + \
|
||||
( ( fnv1a( rotl32( subset[i], r ), accumulator ) % mdiv ) \
|
||||
* ( VH_BYTE_ALIGNMENT / sizeof(uint32_t) ) ); \
|
||||
UPDATE_ACCUMULATOR; \
|
||||
MULXOR; \
|
||||
}
|
||||
|
||||
void verthash_hash( const void *blob_bytes, const size_t blob_size,
|
||||
const void *input, void *output )
|
||||
{
|
||||
uint32_t hash[ VH_HASH_OUT_SIZE / 4 ] __attribute__ ((aligned (64)));
|
||||
uint32_t subset[ VH_N_SUBSET / 4 ] __attribute__ ((aligned (64)));
|
||||
const uint32_t *blob = (const uint32_t*)blob_bytes;
|
||||
uint32_t accumulator = 0x811c9dc5;
|
||||
const uint32_t mdiv = ( ( blob_size - VH_HASH_OUT_SIZE )
|
||||
/ VH_BYTE_ALIGNMENT ) + 1;
|
||||
#if defined (__AVX2__)
|
||||
const __m256i k = _mm256_set1_epi32( 0x1000193 );
|
||||
#elif defined(__SSE41__)
|
||||
const __m128i k = _mm_set1_epi32( 0x1000193 );
|
||||
#endif
|
||||
|
||||
sha3( input, VH_HEADER_SIZE, hash, VH_HASH_OUT_SIZE );
|
||||
verthash_sha3_512_final_8( subset, ( (uint64_t*)input )[ 9 ] );
|
||||
|
||||
ROUND_0;
|
||||
for ( size_t r = 1; r < VH_N_ROT; ++r )
|
||||
ROUND_r( r );
|
||||
|
||||
memcpy( output, hash, VH_HASH_OUT_SIZE );
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
@@ -47,11 +47,11 @@ void verthash_info_free(verthash_info_t* info);
|
||||
//! Generate verthash data file and save it to specified location.
|
||||
int verthash_generate_data_file(const char* output_file_name);
|
||||
|
||||
void verthash_hash(const unsigned char* blob_bytes,
|
||||
const size_t blob_size,
|
||||
const unsigned char(*input)[VH_HEADER_SIZE],
|
||||
unsigned char(*output)[VH_HASH_OUT_SIZE]);
|
||||
void verthash_hash( const void *blob_bytes, const size_t blob_size,
|
||||
const void *input, void *output );
|
||||
|
||||
void verthash_sha3_512_prehash_72( const void *input );
|
||||
void verthash_sha3_512_final_8( void *hash, const uint64_t nonce );
|
||||
|
||||
#endif // !Verthash_INCLUDE_ONCE
|
||||
|
||||
|
301
algo/verthash/tiny_sha3/sha3-4way.c
Normal file
301
algo/verthash/tiny_sha3/sha3-4way.c
Normal file
@@ -0,0 +1,301 @@
|
||||
#if defined(__AVX2__)
|
||||
|
||||
// sha3-4way.c
|
||||
// 19-Nov-11 Markku-Juhani O. Saarinen <mjos@iki.fi>
|
||||
// vectorization by JayDDee 2021-03-27
|
||||
//
|
||||
// Revised 07-Aug-15 to match with official release of FIPS PUB 202 "SHA3"
|
||||
// Revised 03-Sep-15 for portability + OpenSSL - style API
|
||||
|
||||
#include "sha3-4way.h"
|
||||
|
||||
// constants
|
||||
static const uint64_t keccakf_rndc[24] = {
|
||||
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
|
||||
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
|
||||
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
|
||||
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
|
||||
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
|
||||
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
|
||||
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
|
||||
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
|
||||
};
|
||||
|
||||
void sha3_4way_keccakf( __m256i st[25] )
|
||||
{
|
||||
int i, j, r;
|
||||
__m256i t, bc[5];
|
||||
|
||||
for ( r = 0; r < KECCAKF_ROUNDS; r++ )
|
||||
{
|
||||
// Theta
|
||||
bc[0] = _mm256_xor_si256( st[0],
|
||||
mm256_xor4( st[5], st[10], st[15], st[20] ) );
|
||||
bc[1] = _mm256_xor_si256( st[1],
|
||||
mm256_xor4( st[6], st[11], st[16], st[21] ) );
|
||||
bc[2] = _mm256_xor_si256( st[2],
|
||||
mm256_xor4( st[7], st[12], st[17], st[22] ) );
|
||||
bc[3] = _mm256_xor_si256( st[3],
|
||||
mm256_xor4( st[8], st[13], st[18], st[23] ) );
|
||||
bc[4] = _mm256_xor_si256( st[4],
|
||||
mm256_xor4( st[9], st[14], st[19], st[24] ) );
|
||||
|
||||
for ( i = 0; i < 5; i++ )
|
||||
{
|
||||
t = _mm256_xor_si256( bc[ (i+4) % 5 ],
|
||||
mm256_rol_64( bc[ (i+1) % 5 ], 1 ) );
|
||||
st[ i ] = _mm256_xor_si256( st[ i ], t );
|
||||
st[ i+5 ] = _mm256_xor_si256( st[ i+ 5 ], t );
|
||||
st[ i+10 ] = _mm256_xor_si256( st[ i+10 ], t );
|
||||
st[ i+15 ] = _mm256_xor_si256( st[ i+15 ], t );
|
||||
st[ i+20 ] = _mm256_xor_si256( st[ i+20 ], t );
|
||||
}
|
||||
|
||||
// Rho Pi
|
||||
#define RHO_PI( i, c ) \
|
||||
bc[0] = st[ i ]; \
|
||||
st[ i ] = mm256_rol_64( t, c ); \
|
||||
t = bc[0]
|
||||
|
||||
t = st[1];
|
||||
|
||||
RHO_PI( 10, 1 );
|
||||
RHO_PI( 7, 3 );
|
||||
RHO_PI( 11, 6 );
|
||||
RHO_PI( 17, 10 );
|
||||
RHO_PI( 18, 15 );
|
||||
RHO_PI( 3, 21 );
|
||||
RHO_PI( 5, 28 );
|
||||
RHO_PI( 16, 36 );
|
||||
RHO_PI( 8, 45 );
|
||||
RHO_PI( 21, 55 );
|
||||
RHO_PI( 24, 2 );
|
||||
RHO_PI( 4, 14 );
|
||||
RHO_PI( 15, 27 );
|
||||
RHO_PI( 23, 41 );
|
||||
RHO_PI( 19, 56 );
|
||||
RHO_PI( 13, 8 );
|
||||
RHO_PI( 12, 25 );
|
||||
RHO_PI( 2, 43 );
|
||||
RHO_PI( 20, 62 );
|
||||
RHO_PI( 14, 18 );
|
||||
RHO_PI( 22, 39 );
|
||||
RHO_PI( 9, 61 );
|
||||
RHO_PI( 6, 20 );
|
||||
RHO_PI( 1, 44 );
|
||||
|
||||
#undef RHO_PI
|
||||
|
||||
// Chi
|
||||
for ( j = 0; j < 25; j += 5 )
|
||||
{
|
||||
memcpy( bc, &st[ j ], 5*32 );
|
||||
st[ j ] = _mm256_xor_si256( st[ j ],
|
||||
_mm256_andnot_si256( bc[1], bc[2] ) );
|
||||
st[ j+1 ] = _mm256_xor_si256( st[ j+1 ],
|
||||
_mm256_andnot_si256( bc[2], bc[3] ) );
|
||||
st[ j+2 ] = _mm256_xor_si256( st[ j+2 ],
|
||||
_mm256_andnot_si256( bc[3], bc[4] ) );
|
||||
st[ j+3 ] = _mm256_xor_si256( st[ j+3 ],
|
||||
_mm256_andnot_si256( bc[4], bc[0] ) );
|
||||
st[ j+4 ] = _mm256_xor_si256( st[ j+4 ],
|
||||
_mm256_andnot_si256( bc[0], bc[1] ) );
|
||||
}
|
||||
|
||||
// Iota
|
||||
st[0] = _mm256_xor_si256( st[0],
|
||||
_mm256_set1_epi64x( keccakf_rndc[ r ] ) );
|
||||
}
|
||||
}
|
||||
|
||||
int sha3_4way_init( sha3_4way_ctx_t *c, int mdlen )
|
||||
{
|
||||
for ( int i = 0; i < 25; i++ ) c->st[ i ] = m256_zero;
|
||||
c->mdlen = mdlen;
|
||||
c->rsiz = 200 - 2 * mdlen;
|
||||
c->pt = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int sha3_4way_update( sha3_4way_ctx_t *c, const void *data, size_t len )
|
||||
{
|
||||
size_t i;
|
||||
int j = c->pt;
|
||||
const int rsiz = c->rsiz / 8;
|
||||
const int l = len / 8;
|
||||
|
||||
for ( i = 0; i < l; i++ )
|
||||
{
|
||||
c->st[ j ] = _mm256_xor_si256( c->st[ j ],
|
||||
( (const __m256i*)data )[i] );
|
||||
j++;
|
||||
if ( j >= rsiz )
|
||||
{
|
||||
sha3_4way_keccakf( c->st );
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
c->pt = j;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int sha3_4way_final( void *md, sha3_4way_ctx_t *c )
|
||||
{
|
||||
c->st[ c->pt ] = _mm256_xor_si256( c->st[ c->pt ],
|
||||
m256_const1_64( 6 ) );
|
||||
c->st[ c->rsiz / 8 - 1 ] =
|
||||
_mm256_xor_si256( c->st[ c->rsiz / 8 - 1 ],
|
||||
m256_const1_64( 0x8000000000000000 ) );
|
||||
sha3_4way_keccakf( c->st );
|
||||
memcpy( md, c->st, c->mdlen * 4 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
void *sha3_4way( const void *in, size_t inlen, void *md, int mdlen )
|
||||
{
|
||||
sha3_4way_ctx_t ctx;
|
||||
sha3_4way_init( &ctx, mdlen);
|
||||
sha3_4way_update( &ctx, in, inlen );
|
||||
sha3_4way_final( md, &ctx );
|
||||
return md;
|
||||
}
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
void sha3_8way_keccakf( __m512i st[25] )
|
||||
{
|
||||
int i, j, r;
|
||||
__m512i t, bc[5];
|
||||
|
||||
// actual iteration
|
||||
for ( r = 0; r < KECCAKF_ROUNDS; r++ )
|
||||
{
|
||||
|
||||
// Theta
|
||||
for ( i = 0; i < 5; i++ )
|
||||
bc[i] = _mm512_xor_si512( st[i],
|
||||
mm512_xor4( st[ i+5 ], st[ i+10 ], st[ i+15 ], st[i+20 ] ) );
|
||||
|
||||
for ( i = 0; i < 5; i++ )
|
||||
{
|
||||
t = _mm512_xor_si512( bc[(i + 4) % 5],
|
||||
_mm512_rol_epi64( bc[(i + 1) % 5], 1 ) );
|
||||
for ( j = 0; j < 25; j += 5 )
|
||||
st[j + i] = _mm512_xor_si512( st[j + i], t );
|
||||
}
|
||||
|
||||
// Rho Pi
|
||||
#define RHO_PI( i, c ) \
|
||||
bc[0] = st[ i ]; \
|
||||
st[ i ] = _mm512_rol_epi64( t, c ); \
|
||||
t = bc[0]
|
||||
|
||||
t = st[1];
|
||||
|
||||
RHO_PI( 10, 1 );
|
||||
RHO_PI( 7, 3 );
|
||||
RHO_PI( 11, 6 );
|
||||
RHO_PI( 17, 10 );
|
||||
RHO_PI( 18, 15 );
|
||||
RHO_PI( 3, 21 );
|
||||
RHO_PI( 5, 28 );
|
||||
RHO_PI( 16, 36 );
|
||||
RHO_PI( 8, 45 );
|
||||
RHO_PI( 21, 55 );
|
||||
RHO_PI( 24, 2 );
|
||||
RHO_PI( 4, 14 );
|
||||
RHO_PI( 15, 27 );
|
||||
RHO_PI( 23, 41 );
|
||||
RHO_PI( 19, 56 );
|
||||
RHO_PI( 13, 8 );
|
||||
RHO_PI( 12, 25 );
|
||||
RHO_PI( 2, 43 );
|
||||
RHO_PI( 20, 62 );
|
||||
RHO_PI( 14, 18 );
|
||||
RHO_PI( 22, 39 );
|
||||
RHO_PI( 9, 61 );
|
||||
RHO_PI( 6, 20 );
|
||||
RHO_PI( 1, 44 );
|
||||
|
||||
#undef RHO_PI
|
||||
|
||||
// Chi
|
||||
for ( j = 0; j < 25; j += 5 )
|
||||
{
|
||||
for ( i = 0; i < 5; i++ )
|
||||
bc[i] = st[j + i];
|
||||
for ( i = 0; i < 5; i++ )
|
||||
st[ j+i ] = _mm512_xor_si512( st[ j+i ], _mm512_andnot_si512(
|
||||
bc[ (i+1) % 5 ], bc[ (i+2) % 5 ] ) );
|
||||
}
|
||||
|
||||
// Iota
|
||||
st[0] = _mm512_xor_si512( st[0], _mm512_set1_epi64( keccakf_rndc[r] ) );
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the context for SHA3
|
||||
|
||||
int sha3_8way_init( sha3_8way_ctx_t *c, int mdlen )
|
||||
{
|
||||
for ( int i = 0; i < 25; i++ ) c->st[ i ] = m512_zero;
|
||||
c->mdlen = mdlen;
|
||||
c->rsiz = 200 - 2 * mdlen;
|
||||
c->pt = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// update state with more data
|
||||
|
||||
int sha3_8way_update( sha3_8way_ctx_t *c, const void *data, size_t len )
|
||||
{
|
||||
size_t i;
|
||||
int j = c->pt;
|
||||
const int rsiz = c->rsiz / 8;
|
||||
const int l = len / 8;
|
||||
|
||||
for ( i = 0; i < l; i++ )
|
||||
{
|
||||
c->st[ j ] = _mm512_xor_si512( c->st[ j ],
|
||||
( (const __m512i*)data )[i] );
|
||||
j++;
|
||||
if ( j >= rsiz )
|
||||
{
|
||||
sha3_8way_keccakf( c->st );
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
c->pt = j;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// finalize and output a hash
|
||||
|
||||
int sha3_8way_final( void *md, sha3_8way_ctx_t *c )
|
||||
{
|
||||
c->st[ c->pt ] =
|
||||
_mm512_xor_si512( c->st[ c->pt ],
|
||||
m512_const1_64( 6 ) );
|
||||
c->st[ c->rsiz / 8 - 1 ] =
|
||||
_mm512_xor_si512( c->st[ c->rsiz / 8 - 1 ],
|
||||
m512_const1_64( 0x8000000000000000 ) );
|
||||
sha3_8way_keccakf( c->st );
|
||||
memcpy( md, c->st, c->mdlen * 8 );
|
||||
return 1;
|
||||
}
|
||||
|
||||
// compute a SHA-3 hash (md) of given byte length from "in"
|
||||
|
||||
void *sha3_8way( const void *in, size_t inlen, void *md, int mdlen )
|
||||
{
|
||||
sha3_8way_ctx_t sha3;
|
||||
sha3_8way_init( &sha3, mdlen);
|
||||
sha3_8way_update( &sha3, in, inlen );
|
||||
sha3_8way_final( md, &sha3 );
|
||||
return md;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
#endif // AVX2
|
67
algo/verthash/tiny_sha3/sha3-4way.h
Normal file
67
algo/verthash/tiny_sha3/sha3-4way.h
Normal file
@@ -0,0 +1,67 @@
|
||||
// sha3.h
|
||||
// 19-Nov-11 Markku-Juhani O. Saarinen <mjos@iki.fi>
|
||||
// 2021-03-27 JayDDee
|
||||
//
|
||||
#ifndef SHA3_4WAY_H
|
||||
#define SHA3_4WAY_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef KECCAKF_ROUNDS
|
||||
#define KECCAKF_ROUNDS 24
|
||||
#endif
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__m256i st[25]; // 64-bit words * 4 lanes
|
||||
int pt, rsiz, mdlen; // these don't overflow
|
||||
} sha3_4way_ctx_t __attribute__ ((aligned (64)));;
|
||||
|
||||
// Compression function.
|
||||
void sha3_4way_keccakf( __m256i st[25] );
|
||||
|
||||
// OpenSSL - like interfece
|
||||
int sha3_4way_init( sha3_4way_ctx_t *c, int mdlen ); // mdlen = hash output in bytes
|
||||
int sha3_4way_update( sha3_4way_ctx_t *c, const void *data, size_t len );
|
||||
int sha3_4way_final( void *md, sha3_4way_ctx_t *c ); // digest goes to md
|
||||
|
||||
// compute a sha3 hash (md) of given byte length from "in"
|
||||
void *sha3_4way( const void *in, size_t inlen, void *md, int mdlen );
|
||||
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
// state context
|
||||
typedef struct
|
||||
{
|
||||
__m512i st[25]; // 64-bit words * 8 lanes
|
||||
int pt, rsiz, mdlen; // these don't overflow
|
||||
} sha3_8way_ctx_t __attribute__ ((aligned (64)));;
|
||||
|
||||
// Compression function.
|
||||
void sha3_8way_keccakf( __m512i st[25] );
|
||||
|
||||
// OpenSSL - like interfece
|
||||
int sha3_8way_init( sha3_8way_ctx_t *c, int mdlen ); // mdlen = hash output in bytes
|
||||
int sha3_8way_update( sha3_8way_ctx_t *c, const void *data, size_t len );
|
||||
int sha3_8way_final( void *md, sha3_8way_ctx_t *c ); // digest goes to md
|
||||
|
||||
// compute a sha3 hash (md) of given byte length from "in"
|
||||
void *sha3_8way( const void *in, size_t inlen, void *md, int mdlen );
|
||||
|
||||
#endif // AVX512
|
||||
#endif // AVX2
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -5,6 +5,7 @@
|
||||
// Revised 03-Sep-15 for portability + OpenSSL - style API
|
||||
|
||||
#include "sha3.h"
|
||||
#include <string.h>
|
||||
|
||||
// update the state with given number of rounds
|
||||
|
||||
@@ -21,6 +22,7 @@ void sha3_keccakf(uint64_t st[25])
|
||||
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
|
||||
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
|
||||
};
|
||||
/*
|
||||
const int keccakf_rotc[24] = {
|
||||
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
|
||||
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
|
||||
@@ -29,6 +31,7 @@ void sha3_keccakf(uint64_t st[25])
|
||||
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
|
||||
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
|
||||
};
|
||||
*/
|
||||
|
||||
// variables
|
||||
int i, j, r;
|
||||
@@ -60,14 +63,50 @@ void sha3_keccakf(uint64_t st[25])
|
||||
st[j + i] ^= t;
|
||||
}
|
||||
|
||||
|
||||
// Rho Pi
|
||||
#define RHO_PI( i, c ) \
|
||||
bc[0] = st[ i ]; \
|
||||
st[ i ] = ROTL64( t, c ); \
|
||||
t = bc[0]
|
||||
|
||||
t = st[1];
|
||||
|
||||
RHO_PI( 10, 1 );
|
||||
RHO_PI( 7, 3 );
|
||||
RHO_PI( 11, 6 );
|
||||
RHO_PI( 17, 10 );
|
||||
RHO_PI( 18, 15 );
|
||||
RHO_PI( 3, 21 );
|
||||
RHO_PI( 5, 28 );
|
||||
RHO_PI( 16, 36 );
|
||||
RHO_PI( 8, 45 );
|
||||
RHO_PI( 21, 55 );
|
||||
RHO_PI( 24, 2 );
|
||||
RHO_PI( 4, 14 );
|
||||
RHO_PI( 15, 27 );
|
||||
RHO_PI( 23, 41 );
|
||||
RHO_PI( 19, 56 );
|
||||
RHO_PI( 13, 8 );
|
||||
RHO_PI( 12, 25 );
|
||||
RHO_PI( 2, 43 );
|
||||
RHO_PI( 20, 62 );
|
||||
RHO_PI( 14, 18 );
|
||||
RHO_PI( 22, 39 );
|
||||
RHO_PI( 9, 61 );
|
||||
RHO_PI( 6, 20 );
|
||||
RHO_PI( 1, 44 );
|
||||
|
||||
#undef RHO_PI
|
||||
|
||||
/*
|
||||
for (i = 0; i < 24; i++) {
|
||||
j = keccakf_piln[i];
|
||||
bc[0] = st[j];
|
||||
st[j] = ROTL64(t, keccakf_rotc[i]);
|
||||
t = bc[0];
|
||||
}
|
||||
*/
|
||||
|
||||
// Chi
|
||||
for (j = 0; j < 25; j += 5) {
|
||||
@@ -118,17 +157,20 @@ int sha3_init(sha3_ctx_t *c, int mdlen)
|
||||
int sha3_update(sha3_ctx_t *c, const void *data, size_t len)
|
||||
{
|
||||
size_t i;
|
||||
int j;
|
||||
int j = c->pt / 8;
|
||||
const int rsiz = c->rsiz / 8;
|
||||
const int l = len / 8;
|
||||
|
||||
j = c->pt;
|
||||
for (i = 0; i < len; i++) {
|
||||
c->st.b[j++] ^= ((const uint8_t *) data)[i];
|
||||
if (j >= c->rsiz) {
|
||||
sha3_keccakf(c->st.q);
|
||||
for ( i = 0; i < l; i++ )
|
||||
{
|
||||
c->st.q[ j++ ] ^= ( ((const uint64_t *) data) [i] );
|
||||
if ( j >= rsiz )
|
||||
{
|
||||
sha3_keccakf( c->st.q );
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
c->pt = j;
|
||||
c->pt = j*8;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -137,16 +179,10 @@ int sha3_update(sha3_ctx_t *c, const void *data, size_t len)
|
||||
|
||||
int sha3_final(void *md, sha3_ctx_t *c)
|
||||
{
|
||||
int i;
|
||||
|
||||
c->st.b[c->pt] ^= 0x06;
|
||||
c->st.b[c->rsiz - 1] ^= 0x80;
|
||||
c->st.q[ c->pt / 8 ] ^= 6;
|
||||
c->st.q[ c->rsiz / 8 - 1 ] ^= 0x8000000000000000;
|
||||
sha3_keccakf(c->st.q);
|
||||
|
||||
for (i = 0; i < c->mdlen; i++) {
|
||||
((uint8_t *) md)[i] = c->st.b[i];
|
||||
}
|
||||
|
||||
memcpy( md, c->st.q, c->mdlen );
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -155,7 +191,6 @@ int sha3_final(void *md, sha3_ctx_t *c)
|
||||
void *sha3(const void *in, size_t inlen, void *md, int mdlen)
|
||||
{
|
||||
sha3_ctx_t sha3;
|
||||
|
||||
sha3_init(&sha3, mdlen);
|
||||
sha3_update(&sha3, in, inlen);
|
||||
sha3_final(md, &sha3);
|
||||
|
@@ -1,6 +1,7 @@
|
||||
#include "algo-gate-api.h"
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
#include "Verthash.h"
|
||||
#include "tiny_sha3/sha3-4way.h"
|
||||
|
||||
static verthash_info_t verthashInfo;
|
||||
|
||||
@@ -12,13 +13,88 @@ static const uint8_t verthashDatFileHash_bytes[32] =
|
||||
0x29, 0xec, 0xf8, 0x8f, 0x8a, 0xd4, 0x76, 0x39,
|
||||
0xb6, 0xed, 0xed, 0xaf, 0xd7, 0x21, 0xaa, 0x48 };
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
static __thread sha3_4way_ctx_t sha3_mid_ctxA;
|
||||
static __thread sha3_4way_ctx_t sha3_mid_ctxB;
|
||||
|
||||
#else
|
||||
|
||||
static __thread sha3_ctx_t sha3_mid_ctx[8];
|
||||
|
||||
#endif
|
||||
|
||||
void verthash_sha3_512_prehash_72( const void *input )
|
||||
{
|
||||
#if defined(__AVX2__)
|
||||
|
||||
__m256i vin[10];
|
||||
mm256_intrlv80_4x64( vin, input );
|
||||
|
||||
sha3_4way_init( &sha3_mid_ctxA, 64 );
|
||||
sha3_4way_init( &sha3_mid_ctxB, 64 );
|
||||
|
||||
vin[0] = _mm256_add_epi8( vin[0], _mm256_set_epi64x( 4,3,2,1 ) );
|
||||
sha3_4way_update( &sha3_mid_ctxA, vin, 72 );
|
||||
|
||||
vin[0] = _mm256_add_epi8( vin[0], _mm256_set1_epi64x( 4 ) );
|
||||
sha3_4way_update( &sha3_mid_ctxB, vin, 72 );
|
||||
|
||||
#else
|
||||
|
||||
char in[80] __attribute__ ((aligned (64)));
|
||||
memcpy( in, input, 80 );
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
{
|
||||
in[0] += 1;
|
||||
sha3_init( &sha3_mid_ctx[i], 64 );
|
||||
sha3_update( &sha3_mid_ctx[i], in, 72 );
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
void verthash_sha3_512_final_8( void *hash, const uint64_t nonce )
|
||||
{
|
||||
#if defined(__AVX2__)
|
||||
|
||||
__m256i vhashA[ 10 ] __attribute__ ((aligned (64)));
|
||||
__m256i vhashB[ 10 ] __attribute__ ((aligned (64)));
|
||||
|
||||
sha3_4way_ctx_t ctx;
|
||||
const __m256i vnonce = _mm256_set1_epi64x( nonce );
|
||||
|
||||
memcpy( &ctx, &sha3_mid_ctxA, sizeof ctx );
|
||||
sha3_4way_update( &ctx, &vnonce, 8 );
|
||||
sha3_4way_final( vhashA, &ctx );
|
||||
|
||||
memcpy( &ctx, &sha3_mid_ctxB, sizeof ctx );
|
||||
sha3_4way_update( &ctx, &vnonce, 8 );
|
||||
sha3_4way_final( vhashB, &ctx );
|
||||
|
||||
dintrlv_4x64( hash, hash+64, hash+128, hash+192, vhashA, 512 );
|
||||
dintrlv_4x64( hash+256, hash+320, hash+384, hash+448, vhashB, 512 );
|
||||
|
||||
#else
|
||||
|
||||
for ( int i = 0; i < 8; i++ )
|
||||
{
|
||||
sha3_ctx_t ctx;
|
||||
memcpy( &ctx, &sha3_mid_ctx[i], sizeof ctx );
|
||||
sha3_update( &ctx, &nonce, 8 );
|
||||
sha3_final( hash + i*64, &ctx );
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
int scanhash_verthash( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t edata[20] __attribute__((aligned(64)));
|
||||
uint32_t hash[8] __attribute__((aligned(64)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 1;
|
||||
uint32_t n = first_nonce;
|
||||
@@ -26,12 +102,13 @@ int scanhash_verthash( struct work *work, uint32_t max_nonce,
|
||||
const bool bench = opt_benchmark;
|
||||
|
||||
mm128_bswap32_80( edata, pdata );
|
||||
verthash_sha3_512_prehash_72( edata );
|
||||
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
verthash_hash( verthashInfo.data, verthashInfo.dataSize,
|
||||
(const unsigned char (*)[80]) edata,
|
||||
(unsigned char (*)[32]) hash );
|
||||
edata, hash );
|
||||
if ( valid_hash( hash, ptarget ) && !bench )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
@@ -44,22 +121,20 @@ int scanhash_verthash( struct work *work, uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char *default_verthash_data_file = "verthash.dat";
|
||||
static const char *default_verthash_data_file = "verthash.dat";
|
||||
|
||||
bool register_verthash_algo( algo_gate_t* gate )
|
||||
{
|
||||
|
||||
opt_target_factor = 256.0;
|
||||
gate->scanhash = (void*)&scanhash_verthash;
|
||||
gate->optimizations = AVX2_OPT;
|
||||
|
||||
// verthash data file
|
||||
char *verthash_data_file = opt_data_file ? opt_data_file
|
||||
: default_verthash_data_file;
|
||||
const char *verthash_data_file = opt_data_file ? opt_data_file
|
||||
: default_verthash_data_file;
|
||||
|
||||
int vhLoadResult = verthash_info_init( &verthashInfo, verthash_data_file );
|
||||
if (vhLoadResult == 0) // No Error
|
||||
{
|
||||
// and verify data file(if it was enabled)
|
||||
if ( opt_verify )
|
||||
{
|
||||
uint8_t vhDataFileHash[32] = { 0 };
|
||||
@@ -78,12 +153,12 @@ bool register_verthash_algo( algo_gate_t* gate )
|
||||
}
|
||||
}
|
||||
else
|
||||
|
||||
{
|
||||
// Handle Verthash error codes
|
||||
if ( vhLoadResult == 1 )
|
||||
{
|
||||
applog( LOG_ERR, "Verthash data file not found: %s", verthash_data_file );
|
||||
applog( LOG_ERR, "Verthash data file not found: %s",
|
||||
verthash_data_file );
|
||||
if ( !opt_data_file )
|
||||
applog( LOG_NOTICE, "Add '--verify' to create verthash.dat");
|
||||
}
|
||||
|
20
configure
vendored
20
configure
vendored
@@ -1,6 +1,6 @@
|
||||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.16.1.
|
||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.17.0.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||
@@ -577,8 +577,8 @@ MAKEFLAGS=
|
||||
# Identity of this package.
|
||||
PACKAGE_NAME='cpuminer-opt'
|
||||
PACKAGE_TARNAME='cpuminer-opt'
|
||||
PACKAGE_VERSION='3.16.1'
|
||||
PACKAGE_STRING='cpuminer-opt 3.16.1'
|
||||
PACKAGE_VERSION='3.17.0'
|
||||
PACKAGE_STRING='cpuminer-opt 3.17.0'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
|
||||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures cpuminer-opt 3.16.1 to adapt to many kinds of systems.
|
||||
\`configure' configures cpuminer-opt 3.17.0 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
@@ -1404,7 +1404,7 @@ fi
|
||||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 3.16.1:";;
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 3.17.0:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
@@ -1509,7 +1509,7 @@ fi
|
||||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
cpuminer-opt configure 3.16.1
|
||||
cpuminer-opt configure 3.17.0
|
||||
generated by GNU Autoconf 2.69
|
||||
|
||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
|
||||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by cpuminer-opt $as_me 3.16.1, which was
|
||||
It was created by cpuminer-opt $as_me 3.17.0, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
$ $0 $@
|
||||
@@ -2993,7 +2993,7 @@ fi
|
||||
|
||||
# Define the identity of the package.
|
||||
PACKAGE='cpuminer-opt'
|
||||
VERSION='3.16.1'
|
||||
VERSION='3.17.0'
|
||||
|
||||
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by cpuminer-opt $as_me 3.16.1, which was
|
||||
This file was extended by cpuminer-opt $as_me 3.17.0, which was
|
||||
generated by GNU Autoconf 2.69. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
@@ -6756,7 +6756,7 @@ _ACEOF
|
||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||
ac_cs_version="\\
|
||||
cpuminer-opt config.status 3.16.1
|
||||
cpuminer-opt config.status 3.17.0
|
||||
configured by $0, generated by GNU Autoconf 2.69,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
AC_INIT([cpuminer-opt], [3.16.1])
|
||||
AC_INIT([cpuminer-opt], [3.17.0])
|
||||
|
||||
AC_PREREQ([2.59c])
|
||||
AC_CANONICAL_SYSTEM
|
||||
|
104
cpu-miner.c
104
cpu-miner.c
@@ -447,8 +447,10 @@ static bool work_decode( const json_t *val, struct work *work )
|
||||
|
||||
if ( !allow_mininginfo )
|
||||
net_diff = algo_gate.calc_network_diff( work );
|
||||
else
|
||||
net_diff = hash_to_diff( work->target );
|
||||
|
||||
work->targetdiff = hash_to_diff( work->target );
|
||||
work->targetdiff = net_diff;
|
||||
stratum_diff = last_targetdiff = work->targetdiff;
|
||||
work->sharediff = 0;
|
||||
algo_gate.decode_extra_data( work, &net_blocks );
|
||||
@@ -482,13 +484,17 @@ static bool get_mininginfo( CURL *curl, struct work *work )
|
||||
// "networkhashps": 56475980
|
||||
if ( res )
|
||||
{
|
||||
// net_diff is a global that is set from the work hash target by
|
||||
// both getwork and GBT. Don't overwrite it, define a local to override
|
||||
// the global.
|
||||
double net_diff = 0.;
|
||||
json_t *key = json_object_get( res, "difficulty" );
|
||||
if ( key )
|
||||
{
|
||||
if ( json_is_object( key ) )
|
||||
key = json_object_get( key, "proof-of-work" );
|
||||
if ( json_is_real( key ) )
|
||||
net_diff = work->targetdiff = json_real_value( key );
|
||||
net_diff = json_real_value( key );
|
||||
}
|
||||
|
||||
key = json_object_get( res, "networkhashps" );
|
||||
@@ -555,7 +561,11 @@ static bool gbt_work_decode( const json_t *val, struct work *work )
|
||||
if ( !s )
|
||||
continue;
|
||||
if ( !strcmp( s, "segwit" ) || !strcmp( s, "!segwit" ) )
|
||||
{
|
||||
segwit = true;
|
||||
if ( opt_debug )
|
||||
applog( LOG_INFO, "GBT: SegWit is enabled" );
|
||||
}
|
||||
}
|
||||
}
|
||||
// Segwit END
|
||||
@@ -904,7 +914,8 @@ static bool gbt_work_decode( const json_t *val, struct work *work )
|
||||
}
|
||||
for ( i = 0; i < ARRAY_SIZE( work->target ); i++ )
|
||||
work->target[7 - i] = be32dec( target + i );
|
||||
|
||||
net_diff = work->targetdiff = hash_to_diff( work->target );
|
||||
|
||||
tmp = json_object_get( val, "workid" );
|
||||
if ( tmp )
|
||||
{
|
||||
@@ -954,25 +965,25 @@ void scale_hash_for_display ( double* hashrate, char* prefix )
|
||||
else { *prefix = 'Y'; *hashrate /= 1e24; }
|
||||
}
|
||||
|
||||
static inline void sprintf_et( char *str, int seconds )
|
||||
static inline void sprintf_et( char *str, long unsigned int seconds )
|
||||
{
|
||||
// sprintf doesn't like uint64_t, Linux thinks it's long, Windows long long.
|
||||
unsigned int min = seconds / 60;
|
||||
unsigned int sec = seconds % 60;
|
||||
unsigned int hrs = min / 60;
|
||||
long unsigned int min = seconds / 60;
|
||||
long unsigned int sec = seconds % 60;
|
||||
long unsigned int hrs = min / 60;
|
||||
|
||||
if ( unlikely( hrs ) )
|
||||
{
|
||||
unsigned int years = hrs / (24*365);
|
||||
unsigned int days = hrs / 24;
|
||||
if ( years )
|
||||
sprintf( str, "%uy%ud", years, years % 365 );
|
||||
else if ( days ) //0d00h
|
||||
sprintf( str, "%ud%02uh", days, hrs % 24 );
|
||||
long unsigned int days = hrs / 24;
|
||||
long unsigned int years = days / 365;
|
||||
if ( years ) // 0y000d
|
||||
sprintf( str, "%luy%lud", years, years % 365 );
|
||||
else if ( days ) // 0d00h
|
||||
sprintf( str, "%lud%02luh", days, hrs % 24 );
|
||||
else // 0h00m
|
||||
sprintf( str, "%uh%02um", hrs, min % 60 );
|
||||
sprintf( str, "%luh%02lum", hrs, min % 60 );
|
||||
}
|
||||
else // 0m00s
|
||||
sprintf( str, "%um%02us", min, sec );
|
||||
sprintf( str, "%lum%02lus", min, sec );
|
||||
}
|
||||
|
||||
const long double exp32 = EXP32; // 2**32
|
||||
@@ -1071,7 +1082,8 @@ void report_summary_log( bool force )
|
||||
|
||||
double share_time = (double)et.tv_sec + (double)et.tv_usec / 1e6;
|
||||
double ghrate = global_hashrate;
|
||||
double shrate = safe_div( exp32 * last_targetdiff * (double)(accepts),
|
||||
double target_diff = exp32 * last_targetdiff;
|
||||
double shrate = safe_div( target_diff * (double)(accepts),
|
||||
share_time, 0. );
|
||||
double sess_hrate = safe_div( exp32 * norm_diff_sum,
|
||||
(double)uptime.tv_sec, 0. );
|
||||
@@ -1099,12 +1111,12 @@ void report_summary_log( bool force )
|
||||
|
||||
if ( accepted_share_count < submitted_share_count )
|
||||
{
|
||||
double ltd = exp32 * last_targetdiff;
|
||||
double lost_ghrate = uptime.tv_sec == 0 ? 0.
|
||||
: ltd * (double)(submitted_share_count - accepted_share_count )
|
||||
: target_diff
|
||||
* (double)(submitted_share_count - accepted_share_count )
|
||||
/ (double)uptime.tv_sec;
|
||||
double lost_shrate = share_time == 0. ? 0.
|
||||
: ltd * (double)(submits - accepts ) / share_time;
|
||||
: target_diff * (double)(submits - accepts ) / share_time;
|
||||
char lshr_units[4] = {0};
|
||||
char lghr_units[4] = {0};
|
||||
scale_hash_for_display( &lost_shrate, lshr_units );
|
||||
@@ -1140,7 +1152,7 @@ void report_summary_log( bool force )
|
||||
if ( mismatch )
|
||||
{
|
||||
if ( mismatch != 1 )
|
||||
applog(LOG_WARNING,"Share count mismatch: %d, stats may be incorrect", mismatch );
|
||||
applog(LOG_WARNING,"Share count mismatch: %d, stats may be inaccurate", mismatch );
|
||||
else
|
||||
applog(LOG_INFO,"Share count mismatch, submitted share may still be pending" );
|
||||
}
|
||||
@@ -1160,7 +1172,8 @@ static int share_result( int result, struct work *work,
|
||||
char bres[48];
|
||||
bool solved = false;
|
||||
bool stale = false;
|
||||
char *acol = NULL, *bcol = NULL, *scol = NULL, *rcol = NULL;
|
||||
char *acol, *bcol, *scol, *rcol;
|
||||
acol = bcol = scol = rcol = "\0";
|
||||
|
||||
pthread_mutex_lock( &stats_lock );
|
||||
|
||||
@@ -1202,7 +1215,7 @@ static int share_result( int result, struct work *work,
|
||||
sprintf( sres, "S%d", stale_share_count );
|
||||
sprintf( rres, "R%d", rejected_share_count );
|
||||
if unlikely( ( my_stats.net_diff > 0. )
|
||||
&& ( my_stats.share_diff >= net_diff ) )
|
||||
&& ( my_stats.share_diff >= my_stats.net_diff ) )
|
||||
{
|
||||
solved = true;
|
||||
solved_block_count++;
|
||||
@@ -2080,10 +2093,10 @@ static void stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
|
||||
sctx->block_height, net_diff, g_work->job_id );
|
||||
else if ( !opt_quiet )
|
||||
{
|
||||
unsigned char *xnonce2str = abin2hex( g_work->xnonce2,
|
||||
g_work->xnonce2_len );
|
||||
applog( LOG_INFO, "Extranonce2 %s, Block %d, Net Diff %.5g",
|
||||
xnonce2str, sctx->block_height, net_diff );
|
||||
unsigned char *xnonce2str = bebin2hex( g_work->xnonce2,
|
||||
g_work->xnonce2_len );
|
||||
applog( LOG_INFO, "Extranonce2 %s, Block %d, Job %s",
|
||||
xnonce2str, sctx->block_height, g_work->job_id );
|
||||
free( xnonce2str );
|
||||
}
|
||||
|
||||
@@ -2166,11 +2179,11 @@ static void *miner_thread( void *userdata )
|
||||
/* Set worker threads to nice 19 and then preferentially to SCHED_IDLE
|
||||
* and if that fails, then SCHED_BATCH. No need for this to be an
|
||||
* error if it fails */
|
||||
if (!opt_benchmark && opt_priority == 0)
|
||||
if ( !opt_priority )
|
||||
{
|
||||
setpriority(PRIO_PROCESS, 0, 19);
|
||||
if ( !thr_id && !opt_quiet )
|
||||
applog(LOG_INFO, "Miner thread priority %d (nice 19)", opt_priority );
|
||||
if ( !thr_id && opt_debug )
|
||||
applog(LOG_INFO, "Default miner thread priority %d (nice 19)", opt_priority );
|
||||
drop_policy();
|
||||
}
|
||||
else
|
||||
@@ -2187,9 +2200,12 @@ static void *miner_thread( void *userdata )
|
||||
case 4: prio = -10; break;
|
||||
case 5: prio = -15;
|
||||
}
|
||||
if ( !( thr_id || opt_quiet ) )
|
||||
applog( LOG_INFO, "Miner thread priority %d (nice %d)",
|
||||
if ( !thr_id )
|
||||
{
|
||||
applog( LOG_INFO, "User set miner thread priority %d (nice %d)",
|
||||
opt_priority, prio );
|
||||
applog( LOG_WARNING, "High priority mining threads may cause system instability");
|
||||
}
|
||||
#endif
|
||||
setpriority(PRIO_PROCESS, 0, prio);
|
||||
if ( opt_priority == 0 )
|
||||
@@ -2434,13 +2450,17 @@ static void *miner_thread( void *userdata )
|
||||
char hr_units[2] = {0,0};
|
||||
scale_hash_for_display( &hashrate, hr_units );
|
||||
sprintf( hr, "%.2f", hashrate );
|
||||
#if ((defined(_WIN64) || defined(__WINDOWS__)) || defined(_WIN32))
|
||||
#if (defined(_WIN64) || defined(__WINDOWS__) || defined(_WIN32))
|
||||
applog( LOG_NOTICE, "Total: %s %sH/s", hr, hr_units );
|
||||
#else
|
||||
applog( LOG_NOTICE, "Total: %s %sH/s, CPU temp: %dC",
|
||||
hr, hr_units, (uint32_t)cpu_temp(0) );
|
||||
float lo_freq = 0., hi_freq = 0.;
|
||||
linux_cpu_hilo_freq( &lo_freq, &hi_freq );
|
||||
applog( LOG_NOTICE,
|
||||
"Total: %s %sH/s, Temp: %dC, Freq: %.3f/%.3f GHz",
|
||||
hr, hr_units, (uint32_t)cpu_temp(0), lo_freq / 1e6,
|
||||
hi_freq / 1e6 );
|
||||
#endif
|
||||
}
|
||||
}
|
||||
} // benchmark
|
||||
|
||||
// conditional mining
|
||||
@@ -2730,10 +2750,10 @@ static void *stratum_thread(void *userdata )
|
||||
stratum.url = strdup( rpc_url );
|
||||
applog(LOG_BLUE, "Connection changed to %s", short_url);
|
||||
}
|
||||
else // if ( !opt_quiet )
|
||||
else
|
||||
applog(LOG_WARNING, "Stratum connection reset");
|
||||
// reset stats queue as well
|
||||
s_get_ptr = s_put_ptr = 0;
|
||||
if ( s_get_ptr != s_put_ptr ) s_get_ptr = s_put_ptr = 0;
|
||||
}
|
||||
|
||||
while ( !stratum.curl )
|
||||
@@ -2780,13 +2800,15 @@ static void *stratum_thread(void *userdata )
|
||||
else
|
||||
{
|
||||
applog(LOG_WARNING, "Stratum connection interrupted");
|
||||
stratum_disconnect( &stratum );
|
||||
// stratum_disconnect( &stratum );
|
||||
stratum_need_reset = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
applog(LOG_ERR, "Stratum connection timeout");
|
||||
stratum_disconnect( &stratum );
|
||||
stratum_need_reset = true;
|
||||
// stratum_disconnect( &stratum );
|
||||
}
|
||||
|
||||
} // loop
|
||||
@@ -3385,8 +3407,6 @@ void parse_arg(int key, char *arg )
|
||||
v = atoi(arg);
|
||||
if (v < 0 || v > 5) /* sanity check */
|
||||
show_usage_and_exit(1);
|
||||
// option is deprecated, show warning
|
||||
applog( LOG_WARNING, "High priority mining threads may cause system instability");
|
||||
opt_priority = v;
|
||||
break;
|
||||
case 'N': // N parameter for various scrypt algos
|
||||
|
3
miner.h
3
miner.h
@@ -307,6 +307,7 @@ extern json_t *json_rpc_call( CURL *curl, const char *url, const char *userpass,
|
||||
extern void cbin2hex(char *out, const char *in, size_t len);
|
||||
void bin2hex( char *s, const unsigned char *p, size_t len );
|
||||
char *abin2hex( const unsigned char *p, size_t len );
|
||||
char *bebin2hex( const unsigned char *p, size_t len );
|
||||
bool hex2bin( unsigned char *p, const char *hexstr, size_t len );
|
||||
bool jobj_binary( const json_t *obj, const char *key, void *buf,
|
||||
size_t buflen );
|
||||
@@ -900,7 +901,7 @@ Options:\n\
|
||||
--benchmark run in offline benchmark mode\n\
|
||||
--cpu-affinity set process affinity to cpu core(s), mask 0x3 for cores 0 and 1\n\
|
||||
--cpu-priority set process priority (default: 0 idle, 2 normal to 5 highest)\n\
|
||||
-b, --api-bind IP/Port for the miner API (default: 127.0.0.1:4048)\n\
|
||||
-b, --api-bind=address[:port] IP address for the miner API, default port is 4048)\n\
|
||||
--api-remote Allow remote control\n\
|
||||
--max-temp=N Only mine if cpu temp is less than specified value (linux)\n\
|
||||
--max-rate=N[KMG] Only mine if net hashrate is less than specified value\n\
|
||||
|
@@ -1225,37 +1225,6 @@ static inline void intrlv_4x64( void *dst, const void *src0,
|
||||
d[31] = _mm_unpackhi_epi64( s2[7], s3[7] );
|
||||
}
|
||||
|
||||
/*
|
||||
static inline void intrlv_4x64( void *dst, void *src0,
|
||||
void *src1, void *src2, void *src3, int bit_len )
|
||||
{
|
||||
uint64_t *d = (uint64_t*)dst;
|
||||
uint64_t *s0 = (uint64_t*)src0;
|
||||
uint64_t *s1 = (uint64_t*)src1;
|
||||
uint64_t *s2 = (uint64_t*)src2;
|
||||
uint64_t *s3 = (uint64_t*)src3;
|
||||
d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s2[ 0]; d[ 3] = s3[ 0];
|
||||
d[ 4] = s0[ 1]; d[ 5] = s1[ 1]; d[ 6] = s2[ 1]; d[ 7] = s3[ 1];
|
||||
d[ 8] = s0[ 2]; d[ 9] = s1[ 2]; d[ 10] = s2[ 2]; d[ 11] = s3[ 2];
|
||||
d[ 12] = s0[ 3]; d[ 13] = s1[ 3]; d[ 14] = s2[ 3]; d[ 15] = s3[ 3];
|
||||
if ( bit_len <= 256 ) return;
|
||||
d[ 16] = s0[ 4]; d[ 17] = s1[ 4]; d[ 18] = s2[ 4]; d[ 19] = s3[ 4];
|
||||
d[ 20] = s0[ 5]; d[ 21] = s1[ 5]; d[ 22] = s2[ 5]; d[ 23] = s3[ 5];
|
||||
d[ 24] = s0[ 6]; d[ 25] = s1[ 6]; d[ 26] = s2[ 6]; d[ 27] = s3[ 6];
|
||||
d[ 28] = s0[ 7]; d[ 29] = s1[ 7]; d[ 30] = s2[ 7]; d[ 31] = s3[ 7];
|
||||
if ( bit_len <= 512 ) return;
|
||||
d[ 32] = s0[ 8]; d[ 33] = s1[ 8]; d[ 34] = s2[ 8]; d[ 35] = s3[ 8];
|
||||
d[ 36] = s0[ 9]; d[ 37] = s1[ 9]; d[ 38] = s2[ 9]; d[ 39] = s3[ 9];
|
||||
if ( bit_len <= 640 ) return;
|
||||
d[ 40] = s0[10]; d[ 41] = s1[10]; d[ 42] = s2[10]; d[ 43] = s3[10];
|
||||
d[ 44] = s0[11]; d[ 45] = s1[11]; d[ 46] = s2[11]; d[ 47] = s3[11];
|
||||
d[ 48] = s0[12]; d[ 49] = s1[12]; d[ 50] = s2[12]; d[ 51] = s3[12];
|
||||
d[ 52] = s0[13]; d[ 53] = s1[13]; d[ 54] = s2[13]; d[ 55] = s3[13];
|
||||
d[ 56] = s0[14]; d[ 57] = s1[14]; d[ 58] = s2[14]; d[ 59] = s3[14];
|
||||
d[ 60] = s0[15]; d[ 61] = s1[15]; d[ 62] = s2[15]; d[ 63] = s3[15];
|
||||
}
|
||||
*/
|
||||
|
||||
static inline void intrlv_4x64_512( void *dst, const void *src0,
|
||||
const void *src1, const void *src2, const void *src3 )
|
||||
{
|
||||
@@ -1282,26 +1251,6 @@ static inline void intrlv_4x64_512( void *dst, const void *src0,
|
||||
d[15] = _mm_unpackhi_epi64( s2[3], s3[3] );
|
||||
}
|
||||
|
||||
/*
|
||||
static inline void intrlv_4x64_512( void *dst, const void *src0,
|
||||
const void *src1, const void *src2, const void *src3 )
|
||||
{
|
||||
uint64_t *d = (uint64_t*)dst;
|
||||
const uint64_t *s0 = (const uint64_t*)src0;
|
||||
const uint64_t *s1 = (const uint64_t*)src1;
|
||||
const uint64_t *s2 = (const uint64_t*)src2;
|
||||
const uint64_t *s3 = (const uint64_t*)src3;
|
||||
d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s2[ 0]; d[ 3] = s3[ 0];
|
||||
d[ 4] = s0[ 1]; d[ 5] = s1[ 1]; d[ 6] = s2[ 1]; d[ 7] = s3[ 1];
|
||||
d[ 8] = s0[ 2]; d[ 9] = s1[ 2]; d[ 10] = s2[ 2]; d[ 11] = s3[ 2];
|
||||
d[ 12] = s0[ 3]; d[ 13] = s1[ 3]; d[ 14] = s2[ 3]; d[ 15] = s3[ 3];
|
||||
d[ 16] = s0[ 4]; d[ 17] = s1[ 4]; d[ 18] = s2[ 4]; d[ 19] = s3[ 4];
|
||||
d[ 20] = s0[ 5]; d[ 21] = s1[ 5]; d[ 22] = s2[ 5]; d[ 23] = s3[ 5];
|
||||
d[ 24] = s0[ 6]; d[ 25] = s1[ 6]; d[ 26] = s2[ 6]; d[ 27] = s3[ 6];
|
||||
d[ 28] = s0[ 7]; d[ 29] = s1[ 7]; d[ 30] = s2[ 7]; d[ 31] = s3[ 7];
|
||||
}
|
||||
*/
|
||||
|
||||
static inline void dintrlv_4x64( void *dst0, void *dst1, void *dst2,
|
||||
void *dst3, const void *src, const int bit_len )
|
||||
{
|
||||
@@ -1347,38 +1296,6 @@ static inline void dintrlv_4x64( void *dst0, void *dst1, void *dst2,
|
||||
d3[7] = _mm_unpackhi_epi64( s[29], s[31] );
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
static inline void dintrlv_4x64( void *dst0, void *dst1, void *dst2,
|
||||
void *dst3, const void *src, int bit_len )
|
||||
{
|
||||
uint64_t *d0 = (uint64_t*)dst0;
|
||||
uint64_t *d1 = (uint64_t*)dst1;
|
||||
uint64_t *d2 = (uint64_t*)dst2;
|
||||
uint64_t *d3 = (uint64_t*)dst3;
|
||||
const uint64_t *s = (const uint64_t*)src;
|
||||
d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d2[ 0] = s[ 2]; d3[ 0] = s[ 3];
|
||||
d0[ 1] = s[ 4]; d1[ 1] = s[ 5]; d2[ 1] = s[ 6]; d3[ 1] = s[ 7];
|
||||
d0[ 2] = s[ 8]; d1[ 2] = s[ 9]; d2[ 2] = s[10]; d3[ 2] = s[11];
|
||||
d0[ 3] = s[12]; d1[ 3] = s[13]; d2[ 3] = s[14]; d3[ 3] = s[15];
|
||||
if ( bit_len <= 256 ) return;
|
||||
d0[ 4] = s[16]; d1[ 4] = s[17]; d2[ 4] = s[18]; d3[ 4] = s[19];
|
||||
d0[ 5] = s[20]; d1[ 5] = s[21]; d2[ 5] = s[22]; d3[ 5] = s[23];
|
||||
d0[ 6] = s[24]; d1[ 6] = s[25]; d2[ 6] = s[26]; d3[ 6] = s[27];
|
||||
d0[ 7] = s[28]; d1[ 7] = s[29]; d2[ 7] = s[30]; d3[ 7] = s[31];
|
||||
if ( bit_len <= 512 ) return;
|
||||
d0[ 8] = s[32]; d1[ 8] = s[33]; d2[ 8] = s[34]; d3[ 8] = s[35];
|
||||
d0[ 9] = s[36]; d1[ 9] = s[37]; d2[ 9] = s[38]; d3[ 9] = s[39];
|
||||
if ( bit_len <= 640 ) return;
|
||||
d0[10] = s[40]; d1[10] = s[41]; d2[10] = s[42]; d3[10] = s[43];
|
||||
d0[11] = s[44]; d1[11] = s[45]; d2[11] = s[46]; d3[11] = s[47];
|
||||
d0[12] = s[48]; d1[12] = s[49]; d2[12] = s[50]; d3[12] = s[51];
|
||||
d0[13] = s[52]; d1[13] = s[53]; d2[13] = s[54]; d3[13] = s[55];
|
||||
d0[14] = s[56]; d1[14] = s[57]; d2[14] = s[58]; d3[14] = s[59];
|
||||
d0[15] = s[60]; d1[15] = s[61]; d2[15] = s[62]; d3[15] = s[63];
|
||||
}
|
||||
*/
|
||||
|
||||
static inline void dintrlv_4x64_512( void *dst0, void *dst1, void *dst2,
|
||||
void *dst3, const void *src )
|
||||
{
|
||||
@@ -1405,26 +1322,6 @@ static inline void dintrlv_4x64_512( void *dst0, void *dst1, void *dst2,
|
||||
d3[3] = _mm_unpackhi_epi64( s[13], s[15] );
|
||||
}
|
||||
|
||||
/*
|
||||
static inline void dintrlv_4x64_512( void *dst0, void *dst1, void *dst2,
|
||||
void *dst3, const void *src )
|
||||
{
|
||||
uint64_t *d0 = (uint64_t*)dst0;
|
||||
uint64_t *d1 = (uint64_t*)dst1;
|
||||
uint64_t *d2 = (uint64_t*)dst2;
|
||||
uint64_t *d3 = (uint64_t*)dst3;
|
||||
const uint64_t *s = (const uint64_t*)src;
|
||||
d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d2[ 0] = s[ 2]; d3[ 0] = s[ 3];
|
||||
d0[ 1] = s[ 4]; d1[ 1] = s[ 5]; d2[ 1] = s[ 6]; d3[ 1] = s[ 7];
|
||||
d0[ 2] = s[ 8]; d1[ 2] = s[ 9]; d2[ 2] = s[10]; d3[ 2] = s[11];
|
||||
d0[ 3] = s[12]; d1[ 3] = s[13]; d2[ 3] = s[14]; d3[ 3] = s[15];
|
||||
d0[ 4] = s[16]; d1[ 4] = s[17]; d2[ 4] = s[18]; d3[ 4] = s[19];
|
||||
d0[ 5] = s[20]; d1[ 5] = s[21]; d2[ 5] = s[22]; d3[ 5] = s[23];
|
||||
d0[ 6] = s[24]; d1[ 6] = s[25]; d2[ 6] = s[26]; d3[ 6] = s[27];
|
||||
d0[ 7] = s[28]; d1[ 7] = s[29]; d2[ 7] = s[30]; d3[ 7] = s[31];
|
||||
}
|
||||
*/
|
||||
|
||||
static inline void extr_lane_4x64( void *d, const void *s,
|
||||
const int lane, const int bit_len )
|
||||
{
|
||||
@@ -1440,9 +1337,41 @@ static inline void extr_lane_4x64( void *d, const void *s,
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
// Doesn't really need AVX2, just SSSE3, but is only used with AVX2 code.
|
||||
|
||||
// There a alignment problems with the source buffer on Wwindows,
|
||||
// can't use 256 bit bswap.
|
||||
static inline void mm256_intrlv80_4x64( void *d, const void *src )
|
||||
{
|
||||
__m128i s0 = casti_m128i( src,0 );
|
||||
__m128i s1 = casti_m128i( src,1 );
|
||||
__m128i s2 = casti_m128i( src,2 );
|
||||
__m128i s3 = casti_m128i( src,3 );
|
||||
__m128i s4 = casti_m128i( src,4 );
|
||||
|
||||
casti_m128i( d, 0 ) =
|
||||
casti_m128i( d, 1 ) = _mm_shuffle_epi32( s0, 0x44 );
|
||||
casti_m128i( d, 2 ) =
|
||||
casti_m128i( d, 3 ) = _mm_shuffle_epi32( s0, 0xee );
|
||||
|
||||
casti_m128i( d, 4 ) =
|
||||
casti_m128i( d, 5 ) = _mm_shuffle_epi32( s1, 0x44 );
|
||||
casti_m128i( d, 6 ) =
|
||||
casti_m128i( d, 7 ) = _mm_shuffle_epi32( s1, 0xee );
|
||||
|
||||
casti_m128i( d, 8 ) =
|
||||
casti_m128i( d, 9 ) = _mm_shuffle_epi32( s2, 0x44 );
|
||||
casti_m128i( d, 10 ) =
|
||||
casti_m128i( d, 11 ) = _mm_shuffle_epi32( s2, 0xee );
|
||||
|
||||
casti_m128i( d, 12 ) =
|
||||
casti_m128i( d, 13 ) = _mm_shuffle_epi32( s3, 0x44 );
|
||||
casti_m128i( d, 14 ) =
|
||||
casti_m128i( d, 15 ) = _mm_shuffle_epi32( s3, 0xee );
|
||||
|
||||
casti_m128i( d, 16 ) =
|
||||
casti_m128i( d, 17 ) = _mm_shuffle_epi32( s4, 0x44 );
|
||||
casti_m128i( d, 18 ) =
|
||||
casti_m128i( d, 19 ) = _mm_shuffle_epi32( s4, 0xee );
|
||||
}
|
||||
|
||||
static inline void mm256_bswap32_intrlv80_4x64( void *d, const void *src )
|
||||
{
|
||||
@@ -1636,40 +1565,6 @@ static inline void intrlv_8x64_512( void *dst, const void *src0,
|
||||
d[31] = _mm_unpackhi_epi64( s6[3], s7[3] );
|
||||
}
|
||||
|
||||
/*
|
||||
#define ILEAVE_8x64( i ) do \
|
||||
{ \
|
||||
uint64_t *d = (uint64_t*)(dst) + ( (i) << 3 ); \
|
||||
d[0] = *( (const uint64_t*)(s0) +(i) ); \
|
||||
d[1] = *( (const uint64_t*)(s1) +(i) ); \
|
||||
d[2] = *( (const uint64_t*)(s2) +(i) ); \
|
||||
d[3] = *( (const uint64_t*)(s3) +(i) ); \
|
||||
d[4] = *( (const uint64_t*)(s4) +(i) ); \
|
||||
d[5] = *( (const uint64_t*)(s5) +(i) ); \
|
||||
d[6] = *( (const uint64_t*)(s6) +(i) ); \
|
||||
d[7] = *( (const uint64_t*)(s7) +(i) ); \
|
||||
} while(0)
|
||||
|
||||
static inline void intrlv_8x64( void *dst, const void *s0,
|
||||
const void *s1, const void *s2, const void *s3, const void *s4,
|
||||
const void *s5, const void *s6, const void *s7, int bit_len )
|
||||
{
|
||||
ILEAVE_8x64( 0 ); ILEAVE_8x64( 1 );
|
||||
ILEAVE_8x64( 2 ); ILEAVE_8x64( 3 );
|
||||
if ( bit_len <= 256 ) return;
|
||||
ILEAVE_8x64( 4 ); ILEAVE_8x64( 5 );
|
||||
ILEAVE_8x64( 6 ); ILEAVE_8x64( 7 );
|
||||
if ( bit_len <= 512 ) return;
|
||||
ILEAVE_8x64( 8 ); ILEAVE_8x64( 9 );
|
||||
if ( bit_len <= 640 ) return;
|
||||
ILEAVE_8x64( 10 ); ILEAVE_8x64( 11 );
|
||||
ILEAVE_8x64( 12 ); ILEAVE_8x64( 13 );
|
||||
ILEAVE_8x64( 14 ); ILEAVE_8x64( 15 );
|
||||
}
|
||||
|
||||
#undef ILEAVE_8x64
|
||||
*/
|
||||
|
||||
|
||||
static inline void dintrlv_8x64( void *dst0, void *dst1, void *dst2,
|
||||
void *dst3, void *dst4, void *dst5, void *dst6, void *dst7,
|
||||
@@ -1815,39 +1710,6 @@ static inline void dintrlv_8x64_512( void *dst0, void *dst1, void *dst2,
|
||||
d7[3] = _mm_unpackhi_epi64( s[27], s[31] );
|
||||
}
|
||||
|
||||
/*
|
||||
#define DLEAVE_8x64( i ) do \
|
||||
{ \
|
||||
const uint64_t *s = (const uint64_t*)(src) + ( (i) << 3 ); \
|
||||
*( (uint64_t*)(d0) +(i) ) = s[0]; \
|
||||
*( (uint64_t*)(d1) +(i) ) = s[1]; \
|
||||
*( (uint64_t*)(d2) +(i) ) = s[2]; \
|
||||
*( (uint64_t*)(d3) +(i) ) = s[3]; \
|
||||
*( (uint64_t*)(d4) +(i) ) = s[4]; \
|
||||
*( (uint64_t*)(d5) +(i) ) = s[5]; \
|
||||
*( (uint64_t*)(d6) +(i) ) = s[6]; \
|
||||
*( (uint64_t*)(d7) +(i) ) = s[7]; \
|
||||
} while(0)
|
||||
|
||||
static inline void dintrlv_8x64( void *d0, void *d1, void *d2, void *d3,
|
||||
void *d4, void *d5, void *d6, void *d7, const void *src, int bit_len )
|
||||
{
|
||||
DLEAVE_8x64( 0 ); DLEAVE_8x64( 1 );
|
||||
DLEAVE_8x64( 2 ); DLEAVE_8x64( 3 );
|
||||
if ( bit_len <= 256 ) return;
|
||||
DLEAVE_8x64( 4 ); DLEAVE_8x64( 5 );
|
||||
DLEAVE_8x64( 6 ); DLEAVE_8x64( 7 );
|
||||
if ( bit_len <= 512 ) return;
|
||||
DLEAVE_8x64( 8 ); DLEAVE_8x64( 9 );
|
||||
if ( bit_len <= 640 ) return;
|
||||
DLEAVE_8x64( 10 ); DLEAVE_8x64( 11 );
|
||||
DLEAVE_8x64( 12 ); DLEAVE_8x64( 13 );
|
||||
DLEAVE_8x64( 14 ); DLEAVE_8x64( 15 );
|
||||
}
|
||||
|
||||
#undef DLEAVE_8x64
|
||||
*/
|
||||
|
||||
static inline void extr_lane_8x64( void *d, const void *s,
|
||||
const int lane, const int bit_len )
|
||||
{
|
||||
|
@@ -178,7 +178,7 @@ static inline __m128i mm128_mask_32( const __m128i v, const int m )
|
||||
// Basic operations without equivalent SIMD intrinsic
|
||||
|
||||
// Bitwise not (~v)
|
||||
#define mm128_not( v ) _mm_xor_si128( (v), m128_neg1 )
|
||||
#define mm128_not( v ) _mm_xor_si128( v, m128_neg1 )
|
||||
|
||||
// Unary negation of elements (-v)
|
||||
#define mm128_negate_64( v ) _mm_sub_epi64( m128_zero, v )
|
||||
@@ -263,7 +263,8 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
|
||||
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX512VL__)
|
||||
//#if defined(__AVX512F__) && defined(__AVX512VL__)
|
||||
|
||||
#define mm128_ror_64 _mm_ror_epi64
|
||||
#define mm128_rol_64 _mm_rol_epi64
|
||||
@@ -291,16 +292,13 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
#define mm128_swap_64( v ) _mm_shuffle_epi32( v, 0x4e )
|
||||
#define mm128_ror_1x32( v ) _mm_shuffle_epi32( v, 0x39 )
|
||||
#define mm128_rol_1x32( v ) _mm_shuffle_epi32( v, 0x93 )
|
||||
//#define mm128_swap_64( v ) _mm_alignr_epi8( v, v, 8 )
|
||||
//#define mm128_ror_1x32( v ) _mm_alignr_epi8( v, v, 4 )
|
||||
//#define mm128_rol_1x32( v ) _mm_alignr_epi8( v, v, 12 )
|
||||
|
||||
// Swap 32 bit elements in 64 bit lanes
|
||||
#define mm128_swap64_32( v ) _mm_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
|
||||
// Rotate right by c bytes
|
||||
// Rotate right by c bytes, no SSE2 equivalent.
|
||||
static inline __m128i mm128_ror_x8( const __m128i v, const int c )
|
||||
{ return _mm_alignr_epi8( v, v, c ); }
|
||||
|
||||
|
@@ -18,7 +18,7 @@
|
||||
#define mm256_mov64_256( i ) _mm256_castsi128_si256( mm128_mov64_128( i ) )
|
||||
#define mm256_mov32_256( i ) _mm256_castsi128_si256( mm128_mov32_128( i ) )
|
||||
|
||||
// Mo0ve low element of vector to integer.
|
||||
// Move low element of vector to integer.
|
||||
#define mm256_mov256_64( v ) mm128_mov128_64( _mm256_castsi256_si128( v ) )
|
||||
#define mm256_mov256_32( v ) mm128_mov128_32( _mm256_castsi256_si128( v ) )
|
||||
|
||||
@@ -42,7 +42,7 @@ static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2,
|
||||
// 128 bit vector argument
|
||||
#define m256_const1_128( v ) \
|
||||
_mm256_permute4x64_epi64( _mm256_castsi128_si256( v ), 0x44 )
|
||||
// 64 bit integer argument
|
||||
// 64 bit integer argument zero extended to 128 bits.
|
||||
#define m256_const1_i128( i ) m256_const1_128( mm128_mov64_128( i ) )
|
||||
#define m256_const1_64( i ) _mm256_broadcastq_epi64( mm128_mov64_128( i ) )
|
||||
#define m256_const1_32( i ) _mm256_broadcastd_epi32( mm128_mov32_128( i ) )
|
||||
@@ -136,9 +136,84 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
#define mm256_add4_8( a, b, c, d ) \
|
||||
_mm256_add_epi8( _mm256_add_epi8( a, b ), _mm256_add_epi8( c, d ) )
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
// AVX512 has ternary logic that supports any 3 input boolean expression.
|
||||
|
||||
// a ^ b ^ c
|
||||
#define mm256_xor3( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0x96 )
|
||||
|
||||
// legacy convenience only
|
||||
#define mm256_xor4( a, b, c, d ) \
|
||||
_mm256_xor_si256( a, mm256_xor3( b, c, d ) )
|
||||
|
||||
// a & b & c
|
||||
#define mm256_and3( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0x80 )
|
||||
|
||||
// a | b | c
|
||||
#define mm256_or3( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0xfe )
|
||||
|
||||
// a ^ ( b & c )
|
||||
#define mm256_xorand( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0x78 )
|
||||
|
||||
// a & ( b ^ c )
|
||||
#define mm256_andxor( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0x60 )
|
||||
|
||||
// a ^ ( b | c )
|
||||
#define mm256_xoror( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0x1e )
|
||||
|
||||
// a ^ ( ~b & c )
|
||||
#define mm256_xorandnot( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0xd2 )
|
||||
|
||||
// a | ( b & c )
|
||||
#define mm256_orand( a, b, c ) \
|
||||
_mm256_ternarylogic_epi64( a, b, c, 0xf8 )
|
||||
|
||||
// ~( a ^ b ), same as (~a) ^ b
|
||||
#define mm256_xnor( a, b ) \
|
||||
_mm256_ternarylogic_epi64( a, b, b, 0x81 )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_xor3( a, b, c ) \
|
||||
_mm256_xor_si256( a, _mm256_xor_si256( b, c ) )
|
||||
|
||||
#define mm256_xor4( a, b, c, d ) \
|
||||
_mm256_xor_si256( _mm256_xor_si256( a, b ), _mm256_xor_si256( c, d ) )
|
||||
|
||||
#define mm256_and3( a, b, c ) \
|
||||
_mm256_and_si256( a, _mm256_and_si256( b, c ) )
|
||||
|
||||
#define mm256_or3( a, b, c ) \
|
||||
_mm256_or_si256( a, _mm256_or_si256( b, c ) )
|
||||
|
||||
#define mm256_xorand( a, b, c ) \
|
||||
_mm256_xor_si256( a, _mm256_and_si256( b, c ) )
|
||||
|
||||
#define mm256_andxor( a, b, c ) \
|
||||
_mm256_and_si256( a, _mm256_xor_si256( b, c ))
|
||||
|
||||
#define mm256_xoror( a, b, c ) \
|
||||
_mm256_xor_si256( a, _mm256_or_si256( b, c ) )
|
||||
|
||||
#define mm256_xorandnot( a, b, c ) \
|
||||
_mm256_xor_si256( a, _mm256_andnot_si256( b, c ) )
|
||||
|
||||
#define mm256_orand( a, b, c ) \
|
||||
_mm256_or_si256( a, _mm256_and_si256( b, c ) )
|
||||
|
||||
#define mm256_xnor( a, b ) \
|
||||
mm256_not( _mm256_xor_si256( a, b ) )
|
||||
|
||||
#endif
|
||||
|
||||
//
|
||||
// Bit rotations.
|
||||
//
|
||||
@@ -168,7 +243,10 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
_mm256_srli_epi32( v, 32-(c) ) )
|
||||
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
// The spec says both F & VL are required, but just in case AMD
|
||||
// decides to implement ROL/R without AVX512F.
|
||||
#if defined(__AVX512VL__)
|
||||
//#if defined(__AVX512F__) && defined(__AVX512VL__)
|
||||
|
||||
// AVX512, control must be 8 bit immediate.
|
||||
|
||||
@@ -198,21 +276,14 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
//
|
||||
// Rotate elements accross all lanes.
|
||||
//
|
||||
// AVX2 has no full vector permute for elements less than 32 bits.
|
||||
// AVX512 has finer granularity full vector permutes.
|
||||
// AVX512 has full vector alignr which might be faster, especially for 32 bit
|
||||
// Swap 128 bit elements in 256 bit vector.
|
||||
#define mm256_swap_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
|
||||
|
||||
// Rotate 256 bit vector by one 64 bit element
|
||||
#define mm256_ror_1x64( v ) _mm256_permute4x64_epi64( v, 0x39 )
|
||||
#define mm256_rol_1x64( v ) _mm256_permute4x64_epi64( v, 0x93 )
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
static inline __m256i mm256_swap_128( const __m256i v )
|
||||
{ return _mm256_alignr_epi64( v, v, 2 ); }
|
||||
|
||||
static inline __m256i mm256_ror_1x64( const __m256i v )
|
||||
{ return _mm256_alignr_epi64( v, v, 1 ); }
|
||||
|
||||
static inline __m256i mm256_rol_1x64( const __m256i v )
|
||||
{ return _mm256_alignr_epi64( v, v, 3 ); }
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__)
|
||||
|
||||
static inline __m256i mm256_ror_1x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 1 ); }
|
||||
@@ -220,21 +291,8 @@ static inline __m256i mm256_ror_1x32( const __m256i v )
|
||||
static inline __m256i mm256_rol_1x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 7 ); }
|
||||
|
||||
static inline __m256i mm256_ror_3x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 3 ); }
|
||||
|
||||
static inline __m256i mm256_rol_3x32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 5 ); }
|
||||
|
||||
#else // AVX2
|
||||
|
||||
// Swap 128 bit elements in 256 bit vector.
|
||||
#define mm256_swap_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
|
||||
|
||||
// Rotate 256 bit vector by one 64 bit element
|
||||
#define mm256_ror_1x64( v ) _mm256_permute4x64_epi64( v, 0x39 )
|
||||
#define mm256_rol_1x64( v ) _mm256_permute4x64_epi64( v, 0x93 )
|
||||
|
||||
// Rotate 256 bit vector by one 32 bit element.
|
||||
#define mm256_ror_1x32( v ) \
|
||||
_mm256_permutevar8x32_epi32( v, \
|
||||
@@ -246,17 +304,6 @@ static inline __m256i mm256_rol_3x32( const __m256i v )
|
||||
m256_const_64( 0x0000000600000005, 0x0000000400000003, \
|
||||
0x0000000200000001, 0x0000000000000007 )
|
||||
|
||||
// Rotate 256 bit vector by three 32 bit elements (96 bits).
|
||||
#define mm256_ror_3x32( v ) \
|
||||
_mm256_permutevar8x32_epi32( v, \
|
||||
m256_const_64( 0x0000000200000001, 0x0000000000000007, \
|
||||
0x0000000600000005, 0x0000000400000003 )
|
||||
|
||||
#define mm256_rol_3x32( v ) \
|
||||
_mm256_permutevar8x32_epi32( v, \
|
||||
m256_const_64( 0x0000000400000003, 0x0000000200000001, \
|
||||
0x0000000000000007, 0x0000000600000005 )
|
||||
|
||||
#endif // AVX512 else AVX2
|
||||
|
||||
//
|
||||
|
@@ -61,7 +61,7 @@
|
||||
//
|
||||
// Additionally, permutations using smaller vectors can be more efficient
|
||||
// if the permutation doesn't cross lane boundaries, typically 128 bits,
|
||||
// and the smnaller vector can use an imm comtrol.
|
||||
// and the smaller vector can use an imm comtrol.
|
||||
//
|
||||
// If the permutation doesn't cross lane boundaries a shuffle instructions
|
||||
// can be used with imm control instead of permute.
|
||||
@@ -107,7 +107,7 @@ static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
|
||||
return v.m512i;
|
||||
}
|
||||
|
||||
// Equivalent of set1, broadcast lo element all elements.
|
||||
// Equivalent of set1, broadcast lo element to all elements.
|
||||
static inline __m512i m512_const1_256( const __m256i v )
|
||||
{ return _mm512_inserti64x4( _mm512_castsi256_si512( v ), v, 1 ); }
|
||||
|
||||
@@ -166,7 +166,9 @@ static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
|
||||
// Basic operations without SIMD equivalent
|
||||
|
||||
// ~x
|
||||
#define mm512_not( x ) _mm512_xor_si512( x, m512_neg1 )
|
||||
// #define mm512_not( x ) _mm512_xor_si512( x, m512_neg1 )
|
||||
static inline __m512i mm512_not( const __m512i x )
|
||||
{ return _mm512_ternarylogic_epi64( x, x, x, 1 ); }
|
||||
|
||||
// -x
|
||||
#define mm512_negate_64( x ) _mm512_sub_epi64( m512_zero, x )
|
||||
@@ -221,11 +223,61 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
#define mm512_add4_8( a, b, c, d ) \
|
||||
_mm512_add_epi8( _mm512_add_epi8( a, b ), _mm512_add_epi8( c, d ) )
|
||||
|
||||
#define mm512_xor4( a, b, c, d ) \
|
||||
_mm512_xor_si512( _mm512_xor_si512( a, b ), _mm512_xor_si512( c, d ) )
|
||||
|
||||
|
||||
//
|
||||
// Ternary logic uses 8 bit truth table to define any 3 input logical
|
||||
// operation using any number or combinations of AND, OR XOR, NOT.
|
||||
|
||||
// a ^ b ^ c
|
||||
#define mm512_xor3( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0x96 )
|
||||
|
||||
// legacy convenience only
|
||||
#define mm512_xor4( a, b, c, d ) \
|
||||
_mm512_xor_si512( a, mm512_xor3( b, c, d ) )
|
||||
|
||||
// a & b & c
|
||||
#define mm512_and3( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0x80 )
|
||||
|
||||
// a | b | c
|
||||
#define mm512_or3( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0xfe )
|
||||
|
||||
// a ^ ( b & c )
|
||||
#define mm512_xorand( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0x78 )
|
||||
|
||||
// a & ( b ^ c )
|
||||
#define mm512_andxor( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0x60 )
|
||||
|
||||
// a ^ ( b & c )
|
||||
#define mm512_xoror( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0x1e )
|
||||
|
||||
// a ^ ( ~b & c ) [ xor( a, andnot( b, c ) ]
|
||||
#define mm512_xorandnot( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0xd2 )
|
||||
|
||||
// a | ( b & c )
|
||||
#define mm512_orand( a, b, c ) \
|
||||
_mm512_ternarylogic_epi64( a, b, c, 0xf8 )
|
||||
|
||||
// Some 2 input operations that don't have their own instruction mnemonic.
|
||||
|
||||
// ~( a | b )
|
||||
#define mm512_nor( a, b ) \
|
||||
_mm512_ternarylogic_epi64( a, b, b, 0x01 )
|
||||
|
||||
// ~( a ^ b ), same as (~a) ^ b
|
||||
#define mm512_xnor( a, b ) \
|
||||
_mm512_ternarylogic_epi64( a, b, b, 0x81 )
|
||||
|
||||
// ~( a & b )
|
||||
#define mm512_nand( a, b ) \
|
||||
_mm512_ternarylogic_epi64( a, b, b, 0xef )
|
||||
|
||||
|
||||
// Bit rotations.
|
||||
|
||||
// AVX512F has built-in fixed and variable bit rotation for 64 & 32 bit
|
||||
|
165
util.c
165
util.c
@@ -795,6 +795,15 @@ char *abin2hex(const unsigned char *p, size_t len)
|
||||
return s;
|
||||
}
|
||||
|
||||
char *bebin2hex(const unsigned char *p, size_t len)
|
||||
{
|
||||
char *s = (char*) malloc((len * 2) + 1);
|
||||
if (!s) return NULL;
|
||||
for ( size_t i = 0, j = len - 1; i < len; i++, j-- )
|
||||
sprintf( s + ( i*2 ), "%02x", (unsigned int) p[ j ] );
|
||||
return s;
|
||||
}
|
||||
|
||||
bool hex2bin(unsigned char *p, const char *hexstr, size_t len)
|
||||
{
|
||||
char hex_byte[3];
|
||||
@@ -943,6 +952,140 @@ bool jobj_binary(const json_t *obj, const char *key, void *buf, size_t buflen)
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t bech32_polymod_step(uint32_t pre) {
|
||||
uint8_t b = pre >> 25;
|
||||
return ((pre & 0x1FFFFFF) << 5) ^
|
||||
(-((b >> 0) & 1) & 0x3b6a57b2UL) ^
|
||||
(-((b >> 1) & 1) & 0x26508e6dUL) ^
|
||||
(-((b >> 2) & 1) & 0x1ea119faUL) ^
|
||||
(-((b >> 3) & 1) & 0x3d4233ddUL) ^
|
||||
(-((b >> 4) & 1) & 0x2a1462b3UL);
|
||||
}
|
||||
|
||||
static const int8_t bech32_charset_rev[128] = {
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
|
||||
15, -1, 10, 17, 21, 20, 26, 30, 7, 5, -1, -1, -1, -1, -1, -1,
|
||||
-1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1,
|
||||
1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1,
|
||||
-1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1,
|
||||
1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1
|
||||
};
|
||||
|
||||
static bool bech32_decode(char *hrp, uint8_t *data, size_t *data_len, const char *input) {
|
||||
uint32_t chk = 1;
|
||||
size_t i;
|
||||
size_t input_len = strlen(input);
|
||||
size_t hrp_len;
|
||||
int have_lower = 0, have_upper = 0;
|
||||
if (input_len < 8 || input_len > 90) {
|
||||
return false;
|
||||
}
|
||||
*data_len = 0;
|
||||
while (*data_len < input_len && input[(input_len - 1) - *data_len] != '1') {
|
||||
++(*data_len);
|
||||
}
|
||||
hrp_len = input_len - (1 + *data_len);
|
||||
if (1 + *data_len >= input_len || *data_len < 6) {
|
||||
return false;
|
||||
}
|
||||
*(data_len) -= 6;
|
||||
for (i = 0; i < hrp_len; ++i) {
|
||||
int ch = input[i];
|
||||
if (ch < 33 || ch > 126) {
|
||||
return false;
|
||||
}
|
||||
if (ch >= 'a' && ch <= 'z') {
|
||||
have_lower = 1;
|
||||
} else if (ch >= 'A' && ch <= 'Z') {
|
||||
have_upper = 1;
|
||||
ch = (ch - 'A') + 'a';
|
||||
}
|
||||
hrp[i] = ch;
|
||||
chk = bech32_polymod_step(chk) ^ (ch >> 5);
|
||||
}
|
||||
hrp[i] = 0;
|
||||
chk = bech32_polymod_step(chk);
|
||||
for (i = 0; i < hrp_len; ++i) {
|
||||
chk = bech32_polymod_step(chk) ^ (input[i] & 0x1f);
|
||||
}
|
||||
++i;
|
||||
while (i < input_len) {
|
||||
int v = (input[i] & 0x80) ? -1 : bech32_charset_rev[(int)input[i]];
|
||||
if (input[i] >= 'a' && input[i] <= 'z') have_lower = 1;
|
||||
if (input[i] >= 'A' && input[i] <= 'Z') have_upper = 1;
|
||||
if (v == -1) {
|
||||
return false;
|
||||
}
|
||||
chk = bech32_polymod_step(chk) ^ v;
|
||||
if (i + 6 < input_len) {
|
||||
data[i - (1 + hrp_len)] = v;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
if (have_lower && have_upper) {
|
||||
return false;
|
||||
}
|
||||
return chk == 1;
|
||||
}
|
||||
|
||||
static bool convert_bits(uint8_t *out, size_t *outlen, int outbits, const uint8_t *in, size_t inlen, int inbits, int pad) {
|
||||
uint32_t val = 0;
|
||||
int bits = 0;
|
||||
uint32_t maxv = (((uint32_t)1) << outbits) - 1;
|
||||
while (inlen--) {
|
||||
val = (val << inbits) | *(in++);
|
||||
bits += inbits;
|
||||
while (bits >= outbits) {
|
||||
bits -= outbits;
|
||||
out[(*outlen)++] = (val >> bits) & maxv;
|
||||
}
|
||||
}
|
||||
if (pad) {
|
||||
if (bits) {
|
||||
out[(*outlen)++] = (val << (outbits - bits)) & maxv;
|
||||
}
|
||||
} else if (((val << (outbits - bits)) & maxv) || bits >= inbits) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool segwit_addr_decode(int *witver, uint8_t *witdata, size_t *witdata_len, const char *addr) {
|
||||
uint8_t data[84];
|
||||
char hrp_actual[84];
|
||||
size_t data_len;
|
||||
if (!bech32_decode(hrp_actual, data, &data_len, addr)) return false;
|
||||
if (data_len == 0 || data_len > 65) return false;
|
||||
if (data[0] > 16) return false;
|
||||
*witdata_len = 0;
|
||||
if (!convert_bits(witdata, witdata_len, 8, data + 1, data_len - 1, 5, 0)) return false;
|
||||
if (*witdata_len < 2 || *witdata_len > 40) return false;
|
||||
if (data[0] == 0 && *witdata_len != 20 && *witdata_len != 32) return false;
|
||||
*witver = data[0];
|
||||
return true;
|
||||
}
|
||||
|
||||
static size_t bech32_to_script(uint8_t *out, size_t outsz, const char *addr) {
|
||||
uint8_t witprog[40];
|
||||
size_t witprog_len;
|
||||
int witver;
|
||||
|
||||
if (!segwit_addr_decode(&witver, witprog, &witprog_len, addr))
|
||||
return 0;
|
||||
if (outsz < witprog_len + 2)
|
||||
return 0;
|
||||
out[0] = witver ? (0x50 + witver) : 0;
|
||||
out[1] = witprog_len;
|
||||
memcpy(out + 2, witprog, witprog_len);
|
||||
|
||||
if ( opt_debug )
|
||||
applog( LOG_INFO, "Coinbase address uses Bech32 coding");
|
||||
|
||||
return witprog_len + 2;
|
||||
}
|
||||
|
||||
size_t address_to_script( unsigned char *out, size_t outsz, const char *addr )
|
||||
{
|
||||
unsigned char addrbin[ pk_buffer_size_max ];
|
||||
@@ -950,12 +1093,15 @@ size_t address_to_script( unsigned char *out, size_t outsz, const char *addr )
|
||||
size_t rv;
|
||||
|
||||
if ( !b58dec( addrbin, outsz, addr ) )
|
||||
return 0;
|
||||
return bech32_to_script( out, outsz, addr );
|
||||
|
||||
addrver = b58check( addrbin, outsz, addr );
|
||||
if ( addrver < 0 )
|
||||
return 0;
|
||||
|
||||
if ( opt_debug )
|
||||
applog( LOG_INFO, "Coinbase address uses B58 coding");
|
||||
|
||||
switch ( addrver )
|
||||
{
|
||||
case 5: /* Bitcoin script hash */
|
||||
@@ -1486,9 +1632,6 @@ static bool stratum_parse_extranonce(struct stratum_ctx *sctx, json_t *params, i
|
||||
if ( !opt_quiet ) /* pool dynamic change */
|
||||
applog( LOG_INFO, "Stratum extranonce1= %s, extranonce2 size= %d",
|
||||
xnonce1, xn2_size);
|
||||
// if (pndx == 0 && opt_debug)
|
||||
// applog(LOG_DEBUG, "Stratum set nonce %s with extranonce2 size=%d",
|
||||
// xnonce1, xn2_size);
|
||||
|
||||
return true;
|
||||
out:
|
||||
@@ -1638,8 +1781,6 @@ bool stratum_authorize(struct stratum_ctx *sctx, const char *user, const char *p
|
||||
opt_extranonce = false;
|
||||
goto out;
|
||||
}
|
||||
if ( !opt_quiet )
|
||||
applog( LOG_INFO, "Extranonce subscription enabled" );
|
||||
|
||||
sret = stratum_recv_line( sctx );
|
||||
if ( sret )
|
||||
@@ -1657,10 +1798,14 @@ bool stratum_authorize(struct stratum_ctx *sctx, const char *user, const char *p
|
||||
if ( !stratum_handle_method( sctx, sret ) )
|
||||
applog( LOG_WARNING, "Stratum answer id is not correct!" );
|
||||
}
|
||||
res_val = json_object_get( extra, "result" );
|
||||
// if (opt_debug && (!res_val || json_is_false(res_val)))
|
||||
// applog(LOG_DEBUG, "extranonce subscribe not supported");
|
||||
json_decref( extra );
|
||||
else
|
||||
{
|
||||
res_val = json_object_get( extra, "result" );
|
||||
if ( opt_debug && ( !res_val || json_is_false( res_val ) ) )
|
||||
applog( LOG_DEBUG,
|
||||
"Method extranonce.subscribe is not supported" );
|
||||
}
|
||||
json_decref( extra );
|
||||
}
|
||||
free(sret);
|
||||
}
|
||||
|
Reference in New Issue
Block a user