mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
26b9429589 | ||
![]() |
e043698442 | ||
![]() |
46dca7a493 |
20
Makefile.am
20
Makefile.am
@@ -79,11 +79,6 @@ cpuminer_SOURCES = \
|
||||
algo/hamsi/hamsi-hash-4way.c \
|
||||
algo/haval/haval.c \
|
||||
algo/haval/haval-hash-4way.c \
|
||||
algo/hodl/aes.c \
|
||||
algo/hodl/hodl-gate.c \
|
||||
algo/hodl/hodl-wolf.c \
|
||||
algo/hodl/sha512_avx.c \
|
||||
algo/hodl/sha512_avx2.c \
|
||||
algo/jh/sph_jh.c \
|
||||
algo/jh/jh-hash-4way.c \
|
||||
algo/jh/jha-gate.c \
|
||||
@@ -148,6 +143,8 @@ cpuminer_SOURCES = \
|
||||
algo/scrypt/scrypt.c \
|
||||
algo/scrypt/scrypt-core-4way.c \
|
||||
algo/scrypt/neoscrypt.c \
|
||||
algo/sha/sha1.c \
|
||||
algo/sha/sha1-hash.c \
|
||||
algo/sha/sha256-hash.c \
|
||||
algo/sha/sph_sha2.c \
|
||||
algo/sha/sph_sha2big.c \
|
||||
@@ -156,7 +153,6 @@ cpuminer_SOURCES = \
|
||||
algo/sha/hmac-sha256-hash.c \
|
||||
algo/sha/hmac-sha256-hash-4way.c \
|
||||
algo/sha/sha256d.c \
|
||||
algo/sha/sha2.c \
|
||||
algo/sha/sha256d-4way.c \
|
||||
algo/sha/sha256t-gate.c \
|
||||
algo/sha/sha256t-4way.c \
|
||||
@@ -279,20 +275,10 @@ cpuminer_SOURCES = \
|
||||
algo/yespower/yespower-ref.c \
|
||||
algo/yespower/yespower-blake2b-ref.c
|
||||
|
||||
|
||||
disable_flags =
|
||||
|
||||
if USE_ASM
|
||||
cpuminer_SOURCES += asm/neoscrypt_asm.S
|
||||
if ARCH_x86
|
||||
cpuminer_SOURCES += asm/sha2-x86.S asm/scrypt-x86.S
|
||||
endif
|
||||
if ARCH_x86_64
|
||||
cpuminer_SOURCES += asm/sha2-x64.S asm/scrypt-x64.S
|
||||
endif
|
||||
if ARCH_ARM
|
||||
cpuminer_SOURCES += asm/sha2-arm.S asm/scrypt-arm.S
|
||||
endif
|
||||
else
|
||||
disable_flags += -DNOASM
|
||||
endif
|
||||
@@ -302,7 +288,7 @@ if HAVE_WINDOWS
|
||||
endif
|
||||
|
||||
cpuminer_LDFLAGS = @LDFLAGS@
|
||||
cpuminer_LDADD = @LIBCURL@ @JANSSON_LIBS@ @PTHREAD_LIBS@ @WS2_LIBS@ -lssl -lcrypto -lgmp
|
||||
cpuminer_LDADD = @LIBCURL@ @JANSSON_LIBS@ @PTHREAD_LIBS@ @WS2_LIBS@ -lgmp
|
||||
cpuminer_CPPFLAGS = @LIBCURL_CPPFLAGS@ $(ALL_INCLUDES)
|
||||
cpuminer_CFLAGS = -Wno-pointer-sign -Wno-pointer-to-int-cast $(disable_flags)
|
||||
|
||||
|
@@ -73,6 +73,29 @@ If not what makes it happen or not happen?
|
||||
Change Log
|
||||
----------
|
||||
|
||||
v23.8
|
||||
|
||||
Cpuminer-opt is no longer dependant on OpenSSL.
|
||||
Removed Hodl algo.
|
||||
Removed legacy Sha256 & Scrypt ASM code.
|
||||
ARM: Echo AES is working and enabled for x17.
|
||||
|
||||
v23.7
|
||||
|
||||
Fixed blakes2s, broken in v3.23.4.
|
||||
ARM: SHA2 extension tested and working.
|
||||
ARM: sha512256d fully optimized.
|
||||
ARM: X17 more optimizations.
|
||||
ARM: AES extension working for Shavite.
|
||||
ARM errata: CPU features AES & SHA256 are not reported when available.
|
||||
|
||||
v23.6
|
||||
|
||||
ARM: Sha256dt, Sha256t, Sha256d 4-way now working and fully optimized for NEON, SHA also enabled but untested.
|
||||
x86: Sha256dt, Sha256t, Sha256d faster SSE2 4-way.
|
||||
ARM: Scrypt, Scryptn2 fully optimized for NEON, SHA also enabled but untested.
|
||||
Linux: added a log when miner is started as root to discourage doing so.
|
||||
|
||||
v23.5
|
||||
|
||||
New version numbering drops the leading 3, the major version will now be the calendar year, the minor version identifies planned releases during the year.
|
||||
|
@@ -310,7 +310,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
|
||||
case ALGO_GROESTL: rc = register_groestl_algo ( gate ); break;
|
||||
case ALGO_HEX: rc = register_hex_algo ( gate ); break;
|
||||
case ALGO_HMQ1725: rc = register_hmq1725_algo ( gate ); break;
|
||||
case ALGO_HODL: rc = register_hodl_algo ( gate ); break;
|
||||
case ALGO_JHA: rc = register_jha_algo ( gate ); break;
|
||||
case ALGO_KECCAK: rc = register_keccak_algo ( gate ); break;
|
||||
case ALGO_KECCAKC: rc = register_keccakc_algo ( gate ); break;
|
||||
|
@@ -136,10 +136,10 @@ static void fill_block( __m256i *state, const block *ref_block,
|
||||
|
||||
#else // SSE2
|
||||
|
||||
static void fill_block( v128_t *state, const block *ref_block,
|
||||
static void fill_block( v128u64_t *state, const block *ref_block,
|
||||
block *next_block, int with_xor )
|
||||
{
|
||||
v128_t block_XY[ARGON2_OWORDS_IN_BLOCK];
|
||||
v128u64_t block_XY[ARGON2_OWORDS_IN_BLOCK];
|
||||
unsigned int i;
|
||||
|
||||
if ( with_xor )
|
||||
@@ -242,7 +242,7 @@ void fill_segment(const argon2_instance_t *instance,
|
||||
#elif defined(__AVX2__)
|
||||
__m256i state[ARGON2_HWORDS_IN_BLOCK];
|
||||
#else
|
||||
v128_t state[ARGON2_OWORDS_IN_BLOCK];
|
||||
v128u64_t state[ARGON2_OWORDS_IN_BLOCK];
|
||||
#endif
|
||||
// int data_independent_addressing;
|
||||
|
||||
|
@@ -23,56 +23,46 @@
|
||||
|
||||
#if !defined(__AVX512F__)
|
||||
|
||||
|
||||
#if !defined(__AVX2__)
|
||||
|
||||
|
||||
static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y) {
|
||||
const v128_t z = v128_mulw32(x, y);
|
||||
return v128_add64(v128_add64(x, y), v128_add64(z, z));
|
||||
static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
|
||||
{
|
||||
const v128u64_t z = v128_mulw32( x, y );
|
||||
return (v128u32_t)v128_add64( v128_add64( (v128u64_t)x, (v128u64_t)y ),
|
||||
v128_add64( z, z ) );
|
||||
}
|
||||
|
||||
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
|
||||
do { \
|
||||
A0 = fBlaMka(A0, B0); \
|
||||
A1 = fBlaMka(A1, B1); \
|
||||
\
|
||||
D0 = v128_xor(D0, A0); \
|
||||
D1 = v128_xor(D1, A1); \
|
||||
\
|
||||
D0 = v128_ror64(D0, 32); \
|
||||
D1 = v128_ror64(D1, 32); \
|
||||
\
|
||||
C0 = fBlaMka(C0, D0); \
|
||||
C1 = fBlaMka(C1, D1); \
|
||||
\
|
||||
B0 = v128_xor(B0, C0); \
|
||||
B1 = v128_xor(B1, C1); \
|
||||
\
|
||||
B0 = v128_ror64(B0, 24); \
|
||||
B1 = v128_ror64(B1, 24); \
|
||||
} while ((void)0, 0)
|
||||
#define G1( A0, B0, C0, D0, A1, B1, C1, D1 ) \
|
||||
{ \
|
||||
A0 = fBlaMka( A0, B0 ); \
|
||||
A1 = fBlaMka( A1, B1 ); \
|
||||
D0 = v128_xor( D0, A0 ); \
|
||||
D1 = v128_xor( D1, A1 ); \
|
||||
D0 = v128_ror64( D0, 32 ); \
|
||||
D1 = v128_ror64( D1, 32 ); \
|
||||
C0 = fBlaMka( C0, D0 ); \
|
||||
C1 = fBlaMka( C1, D1 ); \
|
||||
B0 = v128_xor( B0, C0 ); \
|
||||
B1 = v128_xor( B1, C1 ); \
|
||||
B0 = v128_ror64( B0, 24 ); \
|
||||
B1 = v128_ror64( B1, 24 ); \
|
||||
}
|
||||
|
||||
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
|
||||
do { \
|
||||
A0 = fBlaMka(A0, B0); \
|
||||
A1 = fBlaMka(A1, B1); \
|
||||
\
|
||||
D0 = v128_xor(D0, A0); \
|
||||
D1 = v128_xor(D1, A1); \
|
||||
\
|
||||
D0 = v128_ror64(D0, 16); \
|
||||
D1 = v128_ror64(D1, 16); \
|
||||
\
|
||||
C0 = fBlaMka(C0, D0); \
|
||||
C1 = fBlaMka(C1, D1); \
|
||||
\
|
||||
B0 = v128_xor(B0, C0); \
|
||||
B1 = v128_xor(B1, C1); \
|
||||
\
|
||||
B0 = v128_ror64(B0, 63); \
|
||||
B1 = v128_ror64(B1, 63); \
|
||||
} while ((void)0, 0)
|
||||
#define G2( A0, B0, C0, D0, A1, B1, C1, D1 ) \
|
||||
{ \
|
||||
A0 = fBlaMka( A0, B0 ); \
|
||||
A1 = fBlaMka( A1, B1 ); \
|
||||
D0 = v128_xor( D0, A0 ); \
|
||||
D1 = v128_xor( D1, A1 ); \
|
||||
D0 = v128_ror64( D0, 16 ); \
|
||||
D1 = v128_ror64( D1, 16 ); \
|
||||
C0 = fBlaMka( C0, D0 ); \
|
||||
C1 = fBlaMka( C1, D1 ); \
|
||||
B0 = v128_xor( B0, C0 ); \
|
||||
B1 = v128_xor( B1, C1 ); \
|
||||
B0 = v128_ror64( B0, 63 ); \
|
||||
B1 = v128_ror64( B1, 63 ); \
|
||||
}
|
||||
|
||||
#if defined(__SSSE3__) || defined(__ARM_NEON)
|
||||
|
||||
|
@@ -465,6 +465,7 @@ void blake512_update(blake512_context *sc, const void *data, size_t len)
|
||||
{
|
||||
if ( ( sc->T0 = sc->T0 + 1024 ) < 1024 )
|
||||
sc->T1 += 1;
|
||||
|
||||
blake512_transform( sc->H, (uint64_t*)sc->buf, sc->T0, sc->T1 );
|
||||
sc->ptr = 0;
|
||||
}
|
||||
@@ -474,7 +475,7 @@ void blake512_update(blake512_context *sc, const void *data, size_t len)
|
||||
void blake512_close( blake512_context *sc, void *dst )
|
||||
{
|
||||
unsigned char buf[128] __attribute__((aligned(32)));
|
||||
size_t ptr, k;
|
||||
size_t ptr;
|
||||
unsigned bit_len;
|
||||
uint64_t th, tl;
|
||||
|
||||
@@ -517,11 +518,8 @@ void blake512_close( blake512_context *sc, void *dst )
|
||||
*(uint64_t*)(buf + 120) = bswap_64( tl );
|
||||
blake512_update( sc, buf, 128 );
|
||||
}
|
||||
|
||||
//TODO vectored bswap
|
||||
|
||||
for ( k = 0; k < 8; k ++ )
|
||||
((uint64_t*)dst)[k] = bswap_64( sc->H[k] );
|
||||
v128_block_bswap64_512( dst, sc->H );
|
||||
}
|
||||
|
||||
void blake512_full( blake512_context *sc, void *dst, const void *data,
|
||||
@@ -1779,13 +1777,11 @@ blake64_4way_close( blake_4x64_big_context *sc, void *dst )
|
||||
v256_64( 0x0100000000000000ULL ) );
|
||||
buf[112>>3] = v256_64( bswap_64( th ) );
|
||||
buf[120>>3] = v256_64( bswap_64( tl ) );
|
||||
|
||||
blake64_4way( sc, buf + (ptr>>3), 128 - ptr );
|
||||
}
|
||||
else
|
||||
{
|
||||
memset_zero_256( buf + (ptr>>3) + 1, (120 - ptr) >> 3 );
|
||||
|
||||
blake64_4way( sc, buf + (ptr>>3), 128 - ptr );
|
||||
sc->T0 = 0xFFFFFFFFFFFFFC00ULL;
|
||||
sc->T1 = 0xFFFFFFFFFFFFFFFFULL;
|
||||
@@ -1793,9 +1789,9 @@ blake64_4way_close( blake_4x64_big_context *sc, void *dst )
|
||||
buf[104>>3] = v256_64( 0x0100000000000000ULL );
|
||||
buf[112>>3] = v256_64( bswap_64( th ) );
|
||||
buf[120>>3] = v256_64( bswap_64( tl ) );
|
||||
|
||||
blake64_4way( sc, buf, 128 );
|
||||
}
|
||||
|
||||
mm256_block_bswap_64( (__m256i*)dst, sc->H );
|
||||
}
|
||||
|
||||
@@ -1960,21 +1956,21 @@ void blake512_2x64_compress( blake_2x64_big_context *sc )
|
||||
#else // SSE2 & NEON
|
||||
|
||||
M0 = v128_bswap64( sc->buf[ 0] );
|
||||
M1 = v128_bswap64( sc->buf[ 0] );
|
||||
M2 = v128_bswap64( sc->buf[ 0] );
|
||||
M3 = v128_bswap64( sc->buf[ 0] );
|
||||
M4 = v128_bswap64( sc->buf[ 0] );
|
||||
M5 = v128_bswap64( sc->buf[ 0] );
|
||||
M6 = v128_bswap64( sc->buf[ 0] );
|
||||
M7 = v128_bswap64( sc->buf[ 0] );
|
||||
M8 = v128_bswap64( sc->buf[ 0] );
|
||||
M9 = v128_bswap64( sc->buf[ 0] );
|
||||
MA = v128_bswap64( sc->buf[ 0] );
|
||||
MB = v128_bswap64( sc->buf[ 0] );
|
||||
MC = v128_bswap64( sc->buf[ 0] );
|
||||
MD = v128_bswap64( sc->buf[ 0] );
|
||||
ME = v128_bswap64( sc->buf[ 0] );
|
||||
MF = v128_bswap64( sc->buf[ 0] );
|
||||
M1 = v128_bswap64( sc->buf[ 1] );
|
||||
M2 = v128_bswap64( sc->buf[ 2] );
|
||||
M3 = v128_bswap64( sc->buf[ 3] );
|
||||
M4 = v128_bswap64( sc->buf[ 4] );
|
||||
M5 = v128_bswap64( sc->buf[ 5] );
|
||||
M6 = v128_bswap64( sc->buf[ 6] );
|
||||
M7 = v128_bswap64( sc->buf[ 7] );
|
||||
M8 = v128_bswap64( sc->buf[ 8] );
|
||||
M9 = v128_bswap64( sc->buf[ 9] );
|
||||
MA = v128_bswap64( sc->buf[10] );
|
||||
MB = v128_bswap64( sc->buf[11] );
|
||||
MC = v128_bswap64( sc->buf[12] );
|
||||
MD = v128_bswap64( sc->buf[13] );
|
||||
ME = v128_bswap64( sc->buf[14] );
|
||||
MF = v128_bswap64( sc->buf[15] );
|
||||
|
||||
#endif
|
||||
|
||||
@@ -2235,7 +2231,6 @@ blake64_2x64( blake_2x64_big_context *sc, const void *data, size_t len)
|
||||
v128u64_t *buf;
|
||||
size_t ptr;
|
||||
const int buf_size = 128; // sizeof/8
|
||||
DECL_STATE_2X64
|
||||
|
||||
buf = sc->buf;
|
||||
ptr = sc->ptr;
|
||||
@@ -2247,7 +2242,6 @@ blake64_2x64( blake_2x64_big_context *sc, const void *data, size_t len)
|
||||
return;
|
||||
}
|
||||
|
||||
READ_STATE64(sc);
|
||||
while ( len > 0 )
|
||||
{
|
||||
size_t clen;
|
||||
@@ -2260,13 +2254,12 @@ blake64_2x64( blake_2x64_big_context *sc, const void *data, size_t len)
|
||||
len -= clen;
|
||||
if ( ptr == buf_size )
|
||||
{
|
||||
if ( (T0 = T0 + 1024 ) < 1024 )
|
||||
T1 = T1 + 1;
|
||||
if ( (sc->T0 = sc->T0 + 1024 ) < 1024 )
|
||||
sc->T1 = sc->T1 + 1;
|
||||
blake512_2x64_compress( sc );
|
||||
ptr = 0;
|
||||
}
|
||||
}
|
||||
WRITE_STATE64(sc);
|
||||
sc->ptr = ptr;
|
||||
}
|
||||
|
||||
@@ -2280,37 +2273,35 @@ blake64_2x64_close( blake_2x64_big_context *sc, void *dst )
|
||||
|
||||
ptr = sc->ptr;
|
||||
bit_len = ((unsigned)ptr << 3);
|
||||
buf[ptr>>3] = v128_64( 0x80 );
|
||||
sc->buf[ptr>>3] = v128_64( 0x80 );
|
||||
tl = sc->T0 + bit_len;
|
||||
th = sc->T1;
|
||||
if (ptr == 0 )
|
||||
{
|
||||
sc->T0 = 0xFFFFFFFFFFFFFC00ULL;
|
||||
sc->T1 = 0xFFFFFFFFFFFFFFFFULL;
|
||||
sc->T0 = 0xFFFFFFFFFFFFFC00ULL;
|
||||
sc->T1 = 0xFFFFFFFFFFFFFFFFULL;
|
||||
}
|
||||
else if ( sc->T0 == 0 )
|
||||
{
|
||||
sc->T0 = 0xFFFFFFFFFFFFFC00ULL + bit_len;
|
||||
sc->T1 = sc->T1 - 1;
|
||||
sc->T0 = 0xFFFFFFFFFFFFFC00ULL + bit_len;
|
||||
sc->T1 = sc->T1 - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
sc->T0 -= 1024 - bit_len;
|
||||
}
|
||||
|
||||
sc->T0 -= 1024 - bit_len;
|
||||
|
||||
if ( ptr <= 104 )
|
||||
{
|
||||
v128_memset_zero( buf + (ptr>>3) + 1, (104-ptr) >> 3 );
|
||||
buf[104>>3] = v128_or( buf[104>>3], v128_64( 0x0100000000000000ULL ) );
|
||||
buf[112>>3] = v128_64( bswap_64( th ) );
|
||||
buf[120>>3] = v128_64( bswap_64( tl ) );
|
||||
|
||||
blake64_2x64( sc, buf + (ptr>>3), 128 - ptr );
|
||||
v128_memset_zero( sc->buf + (ptr>>3) + 1, (104-ptr) >> 3 );
|
||||
sc->buf[104>>3] = v128_or( sc->buf[104>>3],
|
||||
v128_64( 0x0100000000000000ULL ) );
|
||||
sc->buf[112>>3] = v128_64( bswap_64( th ) );
|
||||
sc->buf[120>>3] = v128_64( bswap_64( tl ) );
|
||||
blake64_2x64( sc, sc->buf + (ptr>>3), 128 - ptr );
|
||||
}
|
||||
else
|
||||
{
|
||||
v128_memset_zero( buf + (ptr>>3) + 1, (120 - ptr) >> 3 );
|
||||
blake64_2x64( sc, buf + (ptr>>3), 128 - ptr );
|
||||
v128_memset_zero( sc->buf + (ptr>>3) + 1, (120 - ptr) >> 3 );
|
||||
blake64_2x64( sc, sc->buf + (ptr>>3), 128 - ptr );
|
||||
sc->T0 = 0xFFFFFFFFFFFFFC00ULL;
|
||||
sc->T1 = 0xFFFFFFFFFFFFFFFFULL;
|
||||
v128_memset_zero( buf, 112>>3 );
|
||||
@@ -2319,6 +2310,7 @@ blake64_2x64_close( blake_2x64_big_context *sc, void *dst )
|
||||
buf[120>>3] = v128_64( bswap_64( tl ) );
|
||||
blake64_2x64( sc, buf, 128 );
|
||||
}
|
||||
|
||||
v128_block_bswap64( (v128u64_t*)dst, sc->H );
|
||||
}
|
||||
|
||||
@@ -2326,7 +2318,6 @@ blake64_2x64_close( blake_2x64_big_context *sc, void *dst )
|
||||
void blake512_2x64_full( blake_2x64_big_context *sc, void * dst,
|
||||
const void *data, size_t len )
|
||||
{
|
||||
|
||||
// init
|
||||
|
||||
casti_v128u64( sc->H, 0 ) = v128_64( 0x6A09E667F3BCC908 );
|
||||
|
@@ -21,112 +21,92 @@
|
||||
#include "hash_api.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
MYALIGN const unsigned int _k_s0F[] = {0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F};
|
||||
MYALIGN const unsigned int _k_ipt[] = {0x5A2A7000, 0xC2B2E898, 0x52227808, 0xCABAE090, 0x317C4D00, 0x4C01307D, 0xB0FDCC81, 0xCD80B1FC};
|
||||
MYALIGN const unsigned int _k_opt[] = {0xD6B66000, 0xFF9F4929, 0xDEBE6808, 0xF7974121, 0x50BCEC00, 0x01EDBD51, 0xB05C0CE0, 0xE10D5DB1};
|
||||
MYALIGN const unsigned int _k_inv[] = {0x0D080180, 0x0E05060F, 0x0A0B0C02, 0x04070309, 0x0F0B0780, 0x01040A06, 0x02050809, 0x030D0E0C};
|
||||
MYALIGN const unsigned int _k_sb1[] = {0xCB503E00, 0xB19BE18F, 0x142AF544, 0xA5DF7A6E, 0xFAE22300, 0x3618D415, 0x0D2ED9EF, 0x3BF7CCC1};
|
||||
MYALIGN const unsigned int _k_sb2[] = {0x0B712400, 0xE27A93C6, 0xBC982FCD, 0x5EB7E955, 0x0AE12900, 0x69EB8840, 0xAB82234A, 0xC2A163C8};
|
||||
MYALIGN const unsigned int _k_sb3[] = {0xC0211A00, 0x53E17249, 0xA8B2DA89, 0xFB68933B, 0xF0030A00, 0x5FF35C55, 0xA6ACFAA5, 0xF956AF09};
|
||||
MYALIGN const unsigned int _k_sb4[] = {0x3FD64100, 0xE1E937A0, 0x49087E9F, 0xA876DE97, 0xC393EA00, 0x3D50AED7, 0x876D2914, 0xBA44FE79};
|
||||
MYALIGN const unsigned int _k_sb5[] = {0xF4867F00, 0x5072D62F, 0x5D228BDB, 0x0DA9A4F9, 0x3971C900, 0x0B487AC2, 0x8A43F0FB, 0x81B332B8};
|
||||
MYALIGN const unsigned int _k_sb7[] = {0xFFF75B00, 0xB20845E9, 0xE1BAA416, 0x531E4DAC, 0x3390E000, 0x62A3F282, 0x21C1D3B1, 0x43125170};
|
||||
MYALIGN const unsigned int _k_sbo[] = {0x6FBDC700, 0xD0D26D17, 0xC502A878, 0x15AABF7A, 0x5FBB6A00, 0xCFE474A5, 0x412B35FA, 0x8E1E90D1};
|
||||
MYALIGN const unsigned int _k_h63[] = {0x63636363, 0x63636363, 0x63636363, 0x63636363};
|
||||
MYALIGN const unsigned int _k_hc6[] = {0xc6c6c6c6, 0xc6c6c6c6, 0xc6c6c6c6, 0xc6c6c6c6};
|
||||
MYALIGN const unsigned int _k_h5b[] = {0x5b5b5b5b, 0x5b5b5b5b, 0x5b5b5b5b, 0x5b5b5b5b};
|
||||
MYALIGN const unsigned int _k_h4e[] = {0x4e4e4e4e, 0x4e4e4e4e, 0x4e4e4e4e, 0x4e4e4e4e};
|
||||
MYALIGN const unsigned int _k_h0e[] = {0x0e0e0e0e, 0x0e0e0e0e, 0x0e0e0e0e, 0x0e0e0e0e};
|
||||
MYALIGN const unsigned int _k_h15[] = {0x15151515, 0x15151515, 0x15151515, 0x15151515};
|
||||
MYALIGN const unsigned int _k_aesmix1[] = {0x0f0a0500, 0x030e0904, 0x07020d08, 0x0b06010c};
|
||||
MYALIGN const unsigned int _k_aesmix2[] = {0x000f0a05, 0x04030e09, 0x0807020d, 0x0c0b0601};
|
||||
MYALIGN const unsigned int _k_aesmix3[] = {0x05000f0a, 0x0904030e, 0x0d080702, 0x010c0b06};
|
||||
MYALIGN const unsigned int _k_aesmix4[] = {0x0a05000f, 0x0e090403, 0x020d0807, 0x06010c0b};
|
||||
const uint32_t const1[] __attribute__ ((aligned (32))) =
|
||||
{ 0x00000001, 0x00000000, 0x00000000, 0x00000000 };
|
||||
const uint32_t mul2mask[] __attribute__ ((aligned (16))) =
|
||||
{ 0x00001b00, 0x00000000, 0x00000000, 0x00000000 };
|
||||
const uint32_t lsbmask[] __attribute__ ((aligned (16))) =
|
||||
{ 0x01010101, 0x01010101, 0x01010101, 0x01010101 };
|
||||
const uint32_t invshiftrows[] __attribute__ ((aligned (16))) =
|
||||
{ 0x070a0d00, 0x0b0e0104, 0x0f020508, 0x0306090c };
|
||||
|
||||
#define ECHO_SUBBYTES4( state, j ) \
|
||||
state[0][j] = v128_aesenc( state[0][j], k1 ); \
|
||||
k1 = v128_add32( k1, cast_v128(const1) ); \
|
||||
state[1][j] = v128_aesenc( state[1][j], k1 ); \
|
||||
k1 = v128_add32( k1, cast_v128(const1) ); \
|
||||
state[2][j] = v128_aesenc( state[2][j], k1 ); \
|
||||
k1 = v128_add32( k1, cast_v128(const1) ); \
|
||||
state[3][j] = v128_aesenc( state[3][j], k1 ); \
|
||||
k1 = v128_add32( k1, cast_v128(const1) ); \
|
||||
state[0][j] = v128_aesenc_nokey( state[0][j] ); \
|
||||
state[1][j] = v128_aesenc_nokey( state[1][j] ); \
|
||||
state[2][j] = v128_aesenc_nokey( state[2][j] ); \
|
||||
state[3][j] = v128_aesenc_nokey( state[3][j] )
|
||||
|
||||
MYALIGN const unsigned int const1[] = {0x00000001, 0x00000000, 0x00000000, 0x00000000};
|
||||
MYALIGN const unsigned int mul2mask[] = {0x00001b00, 0x00000000, 0x00000000, 0x00000000};
|
||||
MYALIGN const unsigned int lsbmask[] = {0x01010101, 0x01010101, 0x01010101, 0x01010101};
|
||||
MYALIGN const unsigned int invshiftrows[] = {0x070a0d00, 0x0b0e0104, 0x0f020508, 0x0306090c};
|
||||
MYALIGN const unsigned int zero[] = {0x00000000, 0x00000000, 0x00000000, 0x00000000};
|
||||
MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x25d9ab57, 0xfd5ba600, 0x2a8c71d7, 0x1eb845e3, 0xc96f9234};
|
||||
#define ECHO_SUBBYTES( state, i, j ) \
|
||||
state[i][j] = v128_aesenc( state[i][j], k1 ); \
|
||||
k1 = v128_add32( k1, cast_v128(const1) ); \
|
||||
state[i][j] = v128_aesenc_nokey( state[i][j] )
|
||||
|
||||
|
||||
#define ECHO_SUBBYTES4(state, j) \
|
||||
state[0][j] = v128_aesenc(state[0][j], k1);\
|
||||
k1 = v128_add32(k1, cast_v128(const1));\
|
||||
state[1][j] = v128_aesenc(state[1][j], k1);\
|
||||
k1 = v128_add32(k1, cast_v128(const1));\
|
||||
state[2][j] = v128_aesenc(state[2][j], k1);\
|
||||
k1 = v128_add32(k1, cast_v128(const1));\
|
||||
state[3][j] = v128_aesenc(state[3][j], k1);\
|
||||
k1 = v128_add32(k1, cast_v128(const1));\
|
||||
state[0][j] = v128_aesenc(state[0][j], v128_zero ); \
|
||||
state[1][j] = v128_aesenc(state[1][j], v128_zero ); \
|
||||
state[2][j] = v128_aesenc(state[2][j], v128_zero ); \
|
||||
state[3][j] = v128_aesenc(state[3][j], v128_zero )
|
||||
|
||||
#define ECHO_SUBBYTES(state, i, j) \
|
||||
state[i][j] = v128_aesenc(state[i][j], k1);\
|
||||
k1 = v128_add32(k1, cast_v128(const1));\
|
||||
state[i][j] = v128_aesenc(state[i][j], cast_v128(zero))
|
||||
|
||||
#define ECHO_MIXBYTES(state1, state2, j, t1, t2, s2) \
|
||||
s2 = v128_add8(state1[0][j], state1[0][j]);\
|
||||
t1 = v128_sr16(state1[0][j], 7);\
|
||||
t1 = v128_and(t1, cast_v128(lsbmask));\
|
||||
t2 = v128_shuffle8(cast_v128(mul2mask), t1);\
|
||||
s2 = v128_xor(s2, t2);\
|
||||
state2[0][j] = s2;\
|
||||
state2[1][j] = state1[0][j];\
|
||||
state2[2][j] = state1[0][j];\
|
||||
state2[3][j] = v128_xor(s2, state1[0][j]);\
|
||||
s2 = v128_add8(state1[1][(j + 1) & 3], state1[1][(j + 1) & 3]);\
|
||||
t1 = v128_sr16(state1[1][(j + 1) & 3], 7);\
|
||||
t1 = v128_and(t1, cast_v128(lsbmask));\
|
||||
t2 = v128_shuffle8(cast_v128(mul2mask), t1);\
|
||||
s2 = v128_xor(s2, t2);\
|
||||
state2[0][j] = v128_xor3(state2[0][j], s2, state1[1][(j + 1) & 3] );\
|
||||
state2[1][j] = v128_xor(state2[1][j], s2);\
|
||||
state2[2][j] = v128_xor(state2[2][j], state1[1][(j + 1) & 3]);\
|
||||
state2[3][j] = v128_xor(state2[3][j], state1[1][(j + 1) & 3]);\
|
||||
s2 = v128_add8(state1[2][(j + 2) & 3], state1[2][(j + 2) & 3]);\
|
||||
t1 = v128_sr16(state1[2][(j + 2) & 3], 7);\
|
||||
t1 = v128_and(t1, cast_v128(lsbmask));\
|
||||
t2 = v128_shuffle8(cast_v128(mul2mask), t1);\
|
||||
s2 = v128_xor(s2, t2);\
|
||||
state2[0][j] = v128_xor(state2[0][j], state1[2][(j + 2) & 3]);\
|
||||
state2[1][j] = v128_xor3(state2[1][j], s2, state1[2][(j + 2) & 3] );\
|
||||
state2[2][j] = v128_xor(state2[2][j], s2);\
|
||||
state2[3][j] = v128_xor(state2[3][j], state1[2][(j + 2) & 3]);\
|
||||
s2 = v128_add8(state1[3][(j + 3) & 3], state1[3][(j + 3) & 3]);\
|
||||
t1 = v128_sr16(state1[3][(j + 3) & 3], 7);\
|
||||
t1 = v128_and(t1, cast_v128(lsbmask));\
|
||||
t2 = v128_shuffle8(cast_v128(mul2mask), t1);\
|
||||
s2 = v128_xor(s2, t2);\
|
||||
state2[0][j] = v128_xor(state2[0][j], state1[3][(j + 3) & 3]);\
|
||||
state2[1][j] = v128_xor(state2[1][j], state1[3][(j + 3) & 3]);\
|
||||
state2[2][j] = v128_xor3(state2[2][j], s2, state1[3][(j + 3) & 3] );\
|
||||
state2[3][j] = v128_xor(state2[3][j], s2)
|
||||
#define ECHO_MIXBYTES( state1, state2, j, t1, t2, s2 ) \
|
||||
s2 = v128_add8( state1[0][j], state1[0][j] ); \
|
||||
t1 = v128_sr16( state1[0][j], 7 ); \
|
||||
t1 = v128_and( t1, cast_v128(lsbmask) ); \
|
||||
t2 = v128_shuffle8( cast_v128(mul2mask), t1 ); \
|
||||
s2 = v128_xor( s2, t2 ); \
|
||||
state2[0][j] = s2; \
|
||||
state2[1][j] = state1[0][j]; \
|
||||
state2[2][j] = state1[0][j]; \
|
||||
state2[3][j] = v128_xor(s2, state1[0][j] ); \
|
||||
s2 = v128_add8( state1[1][(j + 1) & 3], state1[1][(j + 1) & 3] ); \
|
||||
t1 = v128_sr16( state1[1][(j + 1) & 3], 7 ); \
|
||||
t1 = v128_and( t1, cast_v128(lsbmask) ); \
|
||||
t2 = v128_shuffle8( cast_v128(mul2mask), t1 ); \
|
||||
s2 = v128_xor( s2, t2 ); \
|
||||
state2[0][j] = v128_xor3( state2[0][j], s2, state1[1][(j + 1) & 3] );\
|
||||
state2[1][j] = v128_xor( state2[1][j], s2 ); \
|
||||
state2[2][j] = v128_xor( state2[2][j], state1[1][(j + 1) & 3] ); \
|
||||
state2[3][j] = v128_xor( state2[3][j], state1[1][(j + 1) & 3] ); \
|
||||
s2 = v128_add8( state1[2][(j + 2) & 3], state1[2][(j + 2) & 3] ); \
|
||||
t1 = v128_sr16( state1[2][(j + 2) & 3], 7 ); \
|
||||
t1 = v128_and( t1, cast_v128(lsbmask) ); \
|
||||
t2 = v128_shuffle8( cast_v128(mul2mask), t1 ); \
|
||||
s2 = v128_xor( s2, t2 ); \
|
||||
state2[0][j] = v128_xor( state2[0][j], state1[2][(j + 2) & 3] ); \
|
||||
state2[1][j] = v128_xor3( state2[1][j], s2, state1[2][(j + 2) & 3] ); \
|
||||
state2[2][j] = v128_xor( state2[2][j], s2 ); \
|
||||
state2[3][j] = v128_xor( state2[3][j], state1[2][(j + 2) & 3] ); \
|
||||
s2 = v128_add8( state1[3][(j + 3) & 3], state1[3][(j + 3) & 3] ); \
|
||||
t1 = v128_sr16( state1[3][(j + 3) & 3], 7 ); \
|
||||
t1 = v128_and( t1, cast_v128(lsbmask) ); \
|
||||
t2 = v128_shuffle8( cast_v128(mul2mask), t1 ); \
|
||||
s2 = v128_xor( s2, t2 ); \
|
||||
state2[0][j] = v128_xor( state2[0][j], state1[3][(j + 3) & 3] ); \
|
||||
state2[1][j] = v128_xor( state2[1][j], state1[3][(j + 3) & 3] ); \
|
||||
state2[2][j] = v128_xor3( state2[2][j], s2, state1[3][(j + 3) & 3] ); \
|
||||
state2[3][j] = v128_xor( state2[3][j], s2 )
|
||||
|
||||
|
||||
#define ECHO_ROUND_UNROLL2 \
|
||||
ECHO_SUBBYTES4(_state, 0);\
|
||||
ECHO_SUBBYTES4(_state, 1);\
|
||||
ECHO_SUBBYTES4(_state, 2);\
|
||||
ECHO_SUBBYTES4(_state, 3);\
|
||||
ECHO_MIXBYTES(_state, _state2, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 3, t1, t2, s2);\
|
||||
ECHO_SUBBYTES4(_state2, 0);\
|
||||
ECHO_SUBBYTES4(_state2, 1);\
|
||||
ECHO_SUBBYTES4(_state2, 2);\
|
||||
ECHO_SUBBYTES4(_state2, 3);\
|
||||
ECHO_MIXBYTES(_state2, _state, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
|
||||
{ \
|
||||
ECHO_SUBBYTES4( _state, 0 ); \
|
||||
ECHO_SUBBYTES4( _state, 1 ); \
|
||||
ECHO_SUBBYTES4( _state, 2 ); \
|
||||
ECHO_SUBBYTES4( _state, 3 ); \
|
||||
ECHO_MIXBYTES( _state, _state2, 0, t1, t2, s2 ); \
|
||||
ECHO_MIXBYTES( _state, _state2, 1, t1, t2, s2 ); \
|
||||
ECHO_MIXBYTES( _state, _state2, 2, t1, t2, s2 ); \
|
||||
ECHO_MIXBYTES( _state, _state2, 3, t1, t2, s2 ); \
|
||||
ECHO_SUBBYTES4( _state2, 0 ); \
|
||||
ECHO_SUBBYTES4( _state2, 1 ); \
|
||||
ECHO_SUBBYTES4( _state2, 2 ); \
|
||||
ECHO_SUBBYTES4( _state2, 3 ); \
|
||||
ECHO_MIXBYTES( _state2, _state, 0, t1, t2, s2 ); \
|
||||
ECHO_MIXBYTES( _state2, _state, 1, t1, t2, s2 ); \
|
||||
ECHO_MIXBYTES( _state2, _state, 2, t1, t2, s2 ); \
|
||||
ECHO_MIXBYTES( _state2, _state, 3, t1, t2, s2 ); \
|
||||
}
|
||||
|
||||
/*
|
||||
#define ECHO_ROUND_UNROLL2 \
|
||||
|
@@ -61,9 +61,12 @@ static const v128u64_t SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
#if defined(__ARM_NEON)
|
||||
|
||||
// No fast shuffle on NEON
|
||||
static const uint32x4_t vmask_d8 = { 3, 1, 2, 0 };
|
||||
//static const uint32x4_t vmask_d8 = { 3, 1, 2, 0 };
|
||||
static const v128u32_t BLEND_MASK = { 0xffffffff, 0, 0, 0xffffffff };
|
||||
|
||||
#define gr_shuffle32( v ) v128_shufflev32( v, vmask_d8 )
|
||||
#define gr_shuffle32( v ) v128_blendv( v128_qrev32( v ), v, BLEND_MASK )
|
||||
|
||||
//#define gr_shuffle32( v ) v128_shufflev32( v, vmask_d8 )
|
||||
|
||||
#else
|
||||
|
||||
|
@@ -35,7 +35,7 @@
|
||||
#include <stdio.h>
|
||||
#include "hamsi-hash-4way.h"
|
||||
|
||||
static const uint32_t HAMSI_IV512[] =
|
||||
static const uint32_t HAMSI_IV512[] __attribute__ ((aligned (32))) =
|
||||
{
|
||||
0x73746565, 0x6c706172, 0x6b204172, 0x656e6265,
|
||||
0x72672031, 0x302c2062, 0x75732032, 0x3434362c,
|
||||
@@ -43,7 +43,8 @@ static const uint32_t HAMSI_IV512[] =
|
||||
0x65766572, 0x6c65652c, 0x2042656c, 0x6769756d
|
||||
};
|
||||
|
||||
static const uint32_t alpha_n[] = {
|
||||
static const uint32_t alpha_n[] __attribute__ ((aligned (32))) =
|
||||
{
|
||||
0xff00f0f0, 0xccccaaaa, 0xf0f0cccc, 0xff00aaaa,
|
||||
0xccccaaaa, 0xf0f0ff00, 0xaaaacccc, 0xf0f0ff00,
|
||||
0xf0f0cccc, 0xaaaaff00, 0xccccff00, 0xaaaaf0f0,
|
||||
@@ -54,7 +55,8 @@ static const uint32_t alpha_n[] = {
|
||||
0xff00cccc, 0xaaaaf0f0, 0xff00aaaa, 0xccccf0f0
|
||||
};
|
||||
|
||||
static const uint32_t alpha_f[] = {
|
||||
static const uint32_t alpha_f[] __attribute__ ((aligned (32))) =
|
||||
{
|
||||
0xcaf9639c, 0x0ff0f9c0, 0x639c0ff0, 0xcaf9f9c0,
|
||||
0x0ff0f9c0, 0x639ccaf9, 0xf9c00ff0, 0x639ccaf9,
|
||||
0x639c0ff0, 0xf9c0caf9, 0x0ff0caf9, 0xf9c0639c,
|
||||
@@ -69,7 +71,8 @@ static const uint32_t alpha_f[] = {
|
||||
|
||||
/* Note: this table lists bits within each byte from least
|
||||
siginificant to most significant. */
|
||||
static const uint32_t T512[64][16] = {
|
||||
static const uint32_t T512[64][16] __attribute__ ((aligned (32))) =
|
||||
{
|
||||
{ 0xef0b0270, 0x3afd0000, 0x5dae0000, 0x69490000,
|
||||
0x9b0f3c06, 0x4405b5f9, 0x66140a51, 0x924f5d0a,
|
||||
0xc96b0030, 0xe7250000, 0x2f840000, 0x264f0000,
|
||||
@@ -1936,7 +1939,7 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
|
||||
#if defined(__SSE4_2__) || defined(__ARM_NEON)
|
||||
|
||||
#define DECL_STATE_2x64 \
|
||||
v128_t c0, c1, c2, c3, c4, c5, c6, c7; \
|
||||
v128u64_t c0, c1, c2, c3, c4, c5, c6, c7; \
|
||||
|
||||
#define READ_STATE_2x64(sc) \
|
||||
c0 = sc->h[0]; \
|
||||
@@ -1960,13 +1963,13 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
|
||||
|
||||
#define INPUT_2x64 \
|
||||
{ \
|
||||
v128_t db = *buf; \
|
||||
const v128_t zero = v128_zero; \
|
||||
v128u64_t db = *buf; \
|
||||
const v128u64_t zero = v128_64( 0ull ); \
|
||||
const uint64_t *tp = (const uint64_t*)T512; \
|
||||
m0 = m1 = m2 = m3 = m4 = m5 = m6 = m7 = zero; \
|
||||
for ( int i = 63; i >= 0; i-- ) \
|
||||
{ \
|
||||
v128_t dm = v128_cmpgt64( zero, v128_sl64( db, i ) ); \
|
||||
v128u64_t dm = v128_cmpgt64( zero, v128_sl64( db, i ) ); \
|
||||
m0 = v128_xor( m0, v128_and( dm, v128_64( tp[0] ) ) ); \
|
||||
m1 = v128_xor( m1, v128_and( dm, v128_64( tp[1] ) ) ); \
|
||||
m2 = v128_xor( m2, v128_and( dm, v128_64( tp[2] ) ) ); \
|
||||
@@ -1982,7 +1985,7 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
|
||||
// v3 no ternary logic, 15 instructions, 9 TL equivalent instructions
|
||||
#define SBOX_2x64( a, b, c, d ) \
|
||||
{ \
|
||||
v128_t tb, td; \
|
||||
v128u64_t tb, td; \
|
||||
td = v128_xorand( d, a, c ); \
|
||||
tb = v128_xoror( b, d, a ); \
|
||||
c = v128_xor3( c, td, b ); \
|
||||
@@ -2010,7 +2013,7 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
|
||||
|
||||
#define ROUND_2x64( alpha ) \
|
||||
{ \
|
||||
v128_t t0, t1, t2, t3, t4, t5; \
|
||||
v128u64_t t0, t1, t2, t3, t4, t5; \
|
||||
const v128_t mask = v128_64( 0x00000000ffffffff ); \
|
||||
s0 = v128_xor( s0, alpha[ 0] ); \
|
||||
s1 = v128_xor( s1, alpha[ 1] ); \
|
||||
@@ -2107,7 +2110,7 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
|
||||
|
||||
#define P_2x64 \
|
||||
{ \
|
||||
v128_t alpha[16]; \
|
||||
v128u64_t alpha[16]; \
|
||||
const uint64_t A0 = ( (uint64_t*)alpha_n )[0]; \
|
||||
for( int i = 0; i < 16; i++ ) \
|
||||
alpha[i] = v128_64( ( (uint64_t*)alpha_n )[i] ); \
|
||||
@@ -2126,7 +2129,7 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
|
||||
|
||||
#define PF_2x64 \
|
||||
{ \
|
||||
v128_t alpha[16]; \
|
||||
v128u64_t alpha[16]; \
|
||||
const uint64_t A0 = ( (uint64_t*)alpha_f )[0]; \
|
||||
for( int i = 0; i < 16; i++ ) \
|
||||
alpha[i] = v128_64( ( (uint64_t*)alpha_f )[i] ); \
|
||||
@@ -2193,7 +2196,7 @@ void hamsi64_big( hamsi_2x64_context *sc, v128_t *buf, size_t num )
|
||||
|
||||
void hamsi64_big_final( hamsi_2x64_context *sc, v128_t *buf )
|
||||
{
|
||||
v128_t m0, m1, m2, m3, m4, m5, m6, m7;
|
||||
v128u64_t m0, m1, m2, m3, m4, m5, m6, m7;
|
||||
DECL_STATE_2x64;
|
||||
READ_STATE_2x64( sc );
|
||||
INPUT_2x64;
|
||||
@@ -2231,15 +2234,15 @@ void hamsi512_2x64_update( hamsi_2x64_context *sc, const void *data,
|
||||
|
||||
void hamsi512_2x64_close( hamsi_2x64_context *sc, void *dst )
|
||||
{
|
||||
v128_t pad[1];
|
||||
v128u32_t pad;
|
||||
uint32_t ch, cl;
|
||||
|
||||
ch = bswap_32( sc->count_high );
|
||||
cl = bswap_32( sc->count_low + ( sc->partial_len << 3 ) );
|
||||
pad[0] = v128_64( ((uint64_t)cl << 32 ) | (uint64_t)ch );
|
||||
pad = v128_64( ((uint64_t)cl << 32 ) | (uint64_t)ch );
|
||||
sc->buf[0] = v128_64( 0x80 );
|
||||
hamsi64_big( sc, sc->buf, 1 );
|
||||
hamsi64_big_final( sc, pad );
|
||||
hamsi64_big_final( sc, &pad );
|
||||
|
||||
v128_block_bswap32( (v128_t*)dst, sc->h );
|
||||
}
|
||||
@@ -2260,4 +2263,4 @@ void hamsi512_2x64( void *dst, const void *data, size_t len )
|
||||
hamsi512_2x64_close( &sc, dst );
|
||||
}
|
||||
|
||||
#endif // SSE4.1 or NEON
|
||||
#endif // SSE4.2 or NEON
|
||||
|
183
algo/hodl/aes.c
183
algo/hodl/aes.c
@@ -1,183 +0,0 @@
|
||||
#include <stdint.h>
|
||||
#include "miner.h"
|
||||
|
||||
#if defined(__AES__)
|
||||
|
||||
#include <x86intrin.h>
|
||||
#include "wolf-aes.h"
|
||||
|
||||
static inline void ExpandAESKey256_sub1(__m128i *tmp1, __m128i *tmp2)
|
||||
{
|
||||
__m128i tmp4;
|
||||
*tmp2 = _mm_shuffle_epi32(*tmp2, 0xFF);
|
||||
tmp4 = _mm_slli_si128(*tmp1, 0x04);
|
||||
*tmp1 = _mm_xor_si128(*tmp1, tmp4);
|
||||
tmp4 = _mm_slli_si128(tmp4, 0x04);
|
||||
*tmp1 = _mm_xor_si128(*tmp1, tmp4);
|
||||
tmp4 = _mm_slli_si128(tmp4, 0x04);
|
||||
*tmp1 = _mm_xor_si128(*tmp1, tmp4);
|
||||
*tmp1 = _mm_xor_si128(*tmp1, *tmp2);
|
||||
}
|
||||
|
||||
static inline void ExpandAESKey256_sub2(__m128i *tmp1, __m128i *tmp3)
|
||||
{
|
||||
__m128i tmp2, tmp4;
|
||||
|
||||
tmp4 = _mm_aeskeygenassist_si128(*tmp1, 0x00);
|
||||
tmp2 = _mm_shuffle_epi32(tmp4, 0xAA);
|
||||
tmp4 = _mm_slli_si128(*tmp3, 0x04);
|
||||
*tmp3 = _mm_xor_si128(*tmp3, tmp4);
|
||||
tmp4 = _mm_slli_si128(tmp4, 0x04);
|
||||
*tmp3 = _mm_xor_si128(*tmp3, tmp4);
|
||||
tmp4 = _mm_slli_si128(tmp4, 0x04);
|
||||
*tmp3 = _mm_xor_si128(*tmp3, tmp4);
|
||||
*tmp3 = _mm_xor_si128(*tmp3, tmp2);
|
||||
}
|
||||
|
||||
// Special thanks to Intel for helping me
|
||||
// with ExpandAESKey256() and its subroutines
|
||||
void ExpandAESKey256(__m128i *keys, const __m128i *KeyBuf)
|
||||
{
|
||||
__m128i tmp1, tmp2, tmp3;
|
||||
|
||||
tmp1 = keys[0] = KeyBuf[0];
|
||||
tmp3 = keys[1] = KeyBuf[1];
|
||||
|
||||
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x01);
|
||||
ExpandAESKey256_sub1(&tmp1, &tmp2);
|
||||
keys[2] = tmp1;
|
||||
ExpandAESKey256_sub2(&tmp1, &tmp3);
|
||||
keys[3] = tmp3;
|
||||
|
||||
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x02);
|
||||
ExpandAESKey256_sub1(&tmp1, &tmp2);
|
||||
keys[4] = tmp1;
|
||||
ExpandAESKey256_sub2(&tmp1, &tmp3);
|
||||
keys[5] = tmp3;
|
||||
|
||||
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x04);
|
||||
ExpandAESKey256_sub1(&tmp1, &tmp2);
|
||||
keys[6] = tmp1;
|
||||
ExpandAESKey256_sub2(&tmp1, &tmp3);
|
||||
keys[7] = tmp3;
|
||||
|
||||
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x08);
|
||||
ExpandAESKey256_sub1(&tmp1, &tmp2);
|
||||
keys[8] = tmp1;
|
||||
ExpandAESKey256_sub2(&tmp1, &tmp3);
|
||||
keys[9] = tmp3;
|
||||
|
||||
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x10);
|
||||
ExpandAESKey256_sub1(&tmp1, &tmp2);
|
||||
keys[10] = tmp1;
|
||||
ExpandAESKey256_sub2(&tmp1, &tmp3);
|
||||
keys[11] = tmp3;
|
||||
|
||||
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x20);
|
||||
ExpandAESKey256_sub1(&tmp1, &tmp2);
|
||||
keys[12] = tmp1;
|
||||
ExpandAESKey256_sub2(&tmp1, &tmp3);
|
||||
keys[13] = tmp3;
|
||||
|
||||
tmp2 = _mm_aeskeygenassist_si128(tmp3, 0x40);
|
||||
ExpandAESKey256_sub1(&tmp1, &tmp2);
|
||||
keys[14] = tmp1;
|
||||
}
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
//#ifdef __AVX__
|
||||
|
||||
#define AESENC(i,j) \
|
||||
State[j] = _mm_aesenc_si128(State[j], ExpandedKey[j][i]);
|
||||
|
||||
#define AESENC_N(i) \
|
||||
AESENC(i,0) \
|
||||
AESENC(i,1) \
|
||||
AESENC(i,2) \
|
||||
AESENC(i,3) \
|
||||
AESENC(i,4) \
|
||||
AESENC(i,5) \
|
||||
AESENC(i,6) \
|
||||
AESENC(i,7) \
|
||||
|
||||
|
||||
static inline void AES256Core(__m128i* State, __m128i ExpandedKey[][16])
|
||||
{
|
||||
const uint32_t N = AES_PARALLEL_N;
|
||||
|
||||
for(int j=0; j<N; ++j) {
|
||||
State[j] = _mm_xor_si128(State[j], ExpandedKey[j][0]);
|
||||
}
|
||||
|
||||
AESENC_N(1)
|
||||
AESENC_N(2)
|
||||
AESENC_N(3)
|
||||
AESENC_N(4)
|
||||
AESENC_N(5)
|
||||
AESENC_N(6)
|
||||
AESENC_N(7)
|
||||
AESENC_N(8)
|
||||
AESENC_N(9)
|
||||
AESENC_N(10)
|
||||
AESENC_N(11)
|
||||
AESENC_N(12)
|
||||
AESENC_N(13)
|
||||
|
||||
for(int j=0; j<N; ++j) {
|
||||
State[j] = _mm_aesenclast_si128(State[j], ExpandedKey[j][14]);
|
||||
}
|
||||
}
|
||||
|
||||
void AES256CBC(__m128i** data, const __m128i** next, __m128i ExpandedKey[][16], __m128i* IV)
|
||||
{
|
||||
const uint32_t N = AES_PARALLEL_N;
|
||||
__m128i State[N];
|
||||
for(int j=0; j<N; ++j) {
|
||||
State[j] = _mm_xor_si128( _mm_xor_si128(data[j][0], next[j][0]), IV[j]);
|
||||
}
|
||||
|
||||
AES256Core(State, ExpandedKey);
|
||||
for(int j=0; j<N; ++j) {
|
||||
data[j][0] = State[j];
|
||||
}
|
||||
|
||||
for(int i = 1; i < BLOCK_COUNT; ++i) {
|
||||
for(int j=0; j<N; ++j) {
|
||||
State[j] = _mm_xor_si128( _mm_xor_si128(data[j][i], next[j][i]), data[j][i - 1]);
|
||||
}
|
||||
AES256Core(State, ExpandedKey);
|
||||
for(int j=0; j<N; ++j) {
|
||||
data[j][i] = State[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else // NO AVX
|
||||
|
||||
static inline __m128i AES256Core(__m128i State, const __m128i *ExpandedKey)
|
||||
{
|
||||
State = _mm_xor_si128(State, ExpandedKey[0]);
|
||||
|
||||
for(int i = 1; i < 14; ++i) State = _mm_aesenc_si128(State, ExpandedKey[i]);
|
||||
|
||||
return(_mm_aesenclast_si128(State, ExpandedKey[14]));
|
||||
}
|
||||
|
||||
void AES256CBC(__m128i *Ciphertext, const __m128i *Plaintext, const __m128i *ExpandedKey, __m128i IV, uint32_t BlockCount)
|
||||
{
|
||||
__m128i State = _mm_xor_si128(Plaintext[0], IV);
|
||||
State = AES256Core(State, ExpandedKey);
|
||||
Ciphertext[0] = State;
|
||||
|
||||
for(int i = 1; i < BlockCount; ++i)
|
||||
{
|
||||
State = _mm_xor_si128(Plaintext[i], Ciphertext[i - 1]);
|
||||
State = AES256Core(State, ExpandedKey);
|
||||
Ciphertext[i] = State;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -1,75 +0,0 @@
|
||||
#ifndef HODL_BYTESWAP_H
|
||||
#define HODL_BYTESWAP_H 1
|
||||
|
||||
#define __bswap_constant_16(x) \
|
||||
((unsigned short int) ((((x) >> 8) & 0xff) | (((x) & 0xff) << 8)))
|
||||
|
||||
static __inline unsigned short int
|
||||
__bswap_16 (unsigned short int __bsx)
|
||||
{
|
||||
return __bswap_constant_16 (__bsx);
|
||||
}
|
||||
|
||||
// LE
|
||||
# define htobe16(x) __bswap_16 (x)
|
||||
# define htole16(x) (x)
|
||||
# define be16toh(x) __bswap_16 (x)
|
||||
# define le16toh(x) (x)
|
||||
|
||||
// BE
|
||||
//# define htole16(x) __bswap_16 (x)
|
||||
//# define htobe16(x) (x)
|
||||
//# define le16toh(x) __bswap_16 (x)
|
||||
//# define be16toh(x) (x)
|
||||
|
||||
#define __bswap_constant_32(x) \
|
||||
((((x) & 0xff000000) >> 24) | (((x) & 0x00ff0000) >> 8) | \
|
||||
(((x) & 0x0000ff00) << 8) | (((x) & 0x000000ff) << 24))
|
||||
|
||||
static __inline unsigned int
|
||||
__bswap_32 (unsigned int __bsx)
|
||||
{
|
||||
return __builtin_bswap32 (__bsx);
|
||||
}
|
||||
|
||||
// LE
|
||||
# define htobe32(x) __bswap_32 (x)
|
||||
# define htole32(x) (x)
|
||||
# define be32toh(x) __bswap_32 (x)
|
||||
# define le32toh(x) (x)
|
||||
|
||||
// BE
|
||||
//# define htole32(x) __bswap_32 (x)
|
||||
//# define htobe32(x) (x)
|
||||
//# define le32toh(x) __bswap_32 (x)
|
||||
//# define be32toh(x) (x)
|
||||
|
||||
# define __bswap_constant_64(x) \
|
||||
((((x) & 0xff00000000000000ull) >> 56) \
|
||||
| (((x) & 0x00ff000000000000ull) >> 40) \
|
||||
| (((x) & 0x0000ff0000000000ull) >> 24) \
|
||||
| (((x) & 0x000000ff00000000ull) >> 8) \
|
||||
| (((x) & 0x00000000ff000000ull) << 8) \
|
||||
| (((x) & 0x0000000000ff0000ull) << 24) \
|
||||
| (((x) & 0x000000000000ff00ull) << 40) \
|
||||
| (((x) & 0x00000000000000ffull) << 56))
|
||||
|
||||
static __inline uint64_t
|
||||
__bswap_64 (uint64_t __bsx)
|
||||
{
|
||||
return __bswap_constant_64 (__bsx);
|
||||
}
|
||||
|
||||
// LE
|
||||
# define htobe64(x) __bswap_64 (x)
|
||||
# define htole64(x) (x)
|
||||
# define be64toh(x) __bswap_64 (x)
|
||||
# define le64toh(x) (x)
|
||||
|
||||
// BE
|
||||
//# define htole64(x) __bswap_64 (x)
|
||||
//# define htobe64(x) (x)
|
||||
//# define le64toh(x) __bswap_64 (x)
|
||||
//# define be64toh(x) (x)
|
||||
|
||||
#endif
|
@@ -1,185 +0,0 @@
|
||||
#include <memory.h>
|
||||
//#include <mm_malloc.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "hodl-gate.h"
|
||||
#include "hodl-wolf.h"
|
||||
|
||||
#define HODL_NSTARTLOC_INDEX 20
|
||||
#define HODL_NFINALCALC_INDEX 21
|
||||
|
||||
static struct work hodl_work;
|
||||
|
||||
pthread_barrier_t hodl_barrier;
|
||||
|
||||
// All references to this buffer are local to this file, so no args
|
||||
// need to be passed.
|
||||
unsigned char *hodl_scratchbuf = NULL;
|
||||
|
||||
void hodl_le_build_stratum_request( char* req, struct work* work,
|
||||
struct stratum_ctx *sctx )
|
||||
{
|
||||
uint32_t ntime, nonce, nstartloc, nfinalcalc;
|
||||
char ntimestr[9], noncestr[9], nstartlocstr[9], nfinalcalcstr[9];
|
||||
unsigned char *xnonce2str;
|
||||
|
||||
le32enc( &ntime, work->data[ algo_gate.ntime_index ] );
|
||||
le32enc( &nonce, work->data[ algo_gate.nonce_index ] );
|
||||
bin2hex( ntimestr, (char*)(&ntime), sizeof(uint32_t) );
|
||||
bin2hex( noncestr, (char*)(&nonce), sizeof(uint32_t) );
|
||||
xnonce2str = abin2hex(work->xnonce2, work->xnonce2_len );
|
||||
le32enc( &nstartloc, work->data[ HODL_NSTARTLOC_INDEX ] );
|
||||
le32enc( &nfinalcalc, work->data[ HODL_NFINALCALC_INDEX ] );
|
||||
bin2hex( nstartlocstr, (char*)(&nstartloc), sizeof(uint32_t) );
|
||||
bin2hex( nfinalcalcstr, (char*)(&nfinalcalc), sizeof(uint32_t) );
|
||||
sprintf( req, "{\"method\": \"mining.submit\", \"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\":4}",
|
||||
rpc_user, work->job_id, xnonce2str, ntimestr, noncestr,
|
||||
nstartlocstr, nfinalcalcstr );
|
||||
free( xnonce2str );
|
||||
}
|
||||
|
||||
char* hodl_malloc_txs_request( struct work *work )
|
||||
{
|
||||
char* req;
|
||||
json_t *val;
|
||||
char data_str[2 * sizeof(work->data) + 1];
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < ARRAY_SIZE(work->data); i++ )
|
||||
be32enc( work->data + i, work->data[i] );
|
||||
|
||||
bin2hex( data_str, (unsigned char *)work->data, 88 );
|
||||
if ( work->workid )
|
||||
{
|
||||
char *params;
|
||||
val = json_object();
|
||||
json_object_set_new( val, "workid", json_string( work->workid ) );
|
||||
params = json_dumps( val, 0 );
|
||||
json_decref( val );
|
||||
req = malloc( 128 + 2*88 + strlen( work->txs ) + strlen( params ) );
|
||||
sprintf( req,
|
||||
"{\"method\": \"submitblock\", \"params\": [\"%s%s\", %s], \"id\":1}\r\n",
|
||||
data_str, work->txs, params);
|
||||
free( params );
|
||||
}
|
||||
else
|
||||
{
|
||||
req = malloc( 128 + 2*88 + strlen(work->txs));
|
||||
sprintf( req,
|
||||
"{\"method\": \"submitblock\", \"params\": [\"%s%s\"], \"id\":1}\r\n",
|
||||
data_str, work->txs);
|
||||
}
|
||||
return req;
|
||||
}
|
||||
|
||||
void hodl_build_block_header( struct work* g_work, uint32_t version,
|
||||
uint32_t *prevhash, uint32_t *merkle_tree,
|
||||
uint32_t ntime, uint32_t nbits )
|
||||
{
|
||||
int i;
|
||||
|
||||
memset( g_work->data, 0, sizeof(g_work->data) );
|
||||
g_work->data[0] = version;
|
||||
|
||||
if ( have_stratum )
|
||||
for ( i = 0; i < 8; i++ )
|
||||
g_work->data[ 1+i ] = le32dec( prevhash + i );
|
||||
else
|
||||
for (i = 0; i < 8; i++)
|
||||
g_work->data[ 8-i ] = le32dec( prevhash + i );
|
||||
|
||||
for ( i = 0; i < 8; i++ )
|
||||
g_work->data[ 9+i ] = be32dec( merkle_tree + i );
|
||||
|
||||
g_work->data[ algo_gate.ntime_index ] = ntime;
|
||||
g_work->data[ algo_gate.nbits_index ] = nbits;
|
||||
g_work->data[22] = 0x80000000;
|
||||
g_work->data[31] = 0x00000280;
|
||||
}
|
||||
|
||||
// called only by thread 0, saves a backup of g_work
|
||||
void hodl_get_new_work( struct work* work, struct work* g_work)
|
||||
{
|
||||
// pthread_rwlock_rdlock( &g_work_lock );
|
||||
|
||||
work_free( &hodl_work );
|
||||
work_copy( &hodl_work, g_work );
|
||||
hodl_work.data[ algo_gate.nonce_index ] = ( clock() + rand() ) % 9999;
|
||||
|
||||
// pthread_rwlock_unlock( &g_work_lock );
|
||||
}
|
||||
|
||||
json_t *hodl_longpoll_rpc_call( CURL *curl, int *err, char* lp_url )
|
||||
{
|
||||
json_t *val;
|
||||
char *req = NULL;
|
||||
|
||||
if ( have_gbt )
|
||||
{
|
||||
req = malloc( strlen( gbt_lp_req ) + strlen( lp_id ) + 1 );
|
||||
sprintf( req, gbt_lp_req, lp_id );
|
||||
}
|
||||
val = json_rpc_call( curl, lp_url, rpc_userpass,
|
||||
req ? req : getwork_req, err, JSON_RPC_LONGPOLL );
|
||||
free( req );
|
||||
return val;
|
||||
}
|
||||
|
||||
// called by every thread, copies the backup to each thread's work.
|
||||
void hodl_resync_threads( int thr_id, struct work* work )
|
||||
{
|
||||
int nonce_index = algo_gate.nonce_index;
|
||||
pthread_barrier_wait( &hodl_barrier );
|
||||
if ( memcmp( work->data, hodl_work.data, algo_gate.work_cmp_size ) )
|
||||
{
|
||||
work_free( work );
|
||||
work_copy( work, &hodl_work );
|
||||
}
|
||||
work->data[ nonce_index ] = swab32( hodl_work.data[ nonce_index ] );
|
||||
work_restart[thr_id].restart = 0;
|
||||
}
|
||||
|
||||
bool hodl_do_this_thread( int thr_id )
|
||||
{
|
||||
return ( thr_id == 0 );
|
||||
}
|
||||
|
||||
int hodl_scanhash( struct work* work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
#if defined(__AES__)
|
||||
GenRandomGarbage( (CacheEntry*)hodl_scratchbuf, work->data, mythr->id );
|
||||
pthread_barrier_wait( &hodl_barrier );
|
||||
return scanhash_hodl_wolf( work, max_nonce, hashes_done, mythr );
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
bool register_hodl_algo( algo_gate_t* gate )
|
||||
{
|
||||
#if !defined(__AES__)
|
||||
applog( LOG_ERR, "Only CPUs with AES are supported, use legacy version.");
|
||||
return false;
|
||||
#endif
|
||||
|
||||
if ( GARBAGE_SIZE % opt_n_threads )
|
||||
applog( LOG_WARNING,"WARNING: Thread count must be power of 2. Miner may crash or produce invalid hash!" );
|
||||
|
||||
pthread_barrier_init( &hodl_barrier, NULL, opt_n_threads );
|
||||
gate->optimizations = SSE42_OPT | AES_OPT | AVX2_OPT;
|
||||
gate->scanhash = (void*)&hodl_scanhash;
|
||||
gate->get_new_work = (void*)&hodl_get_new_work;
|
||||
gate->longpoll_rpc_call = (void*)&hodl_longpoll_rpc_call;
|
||||
gate->build_stratum_request = (void*)&hodl_le_build_stratum_request;
|
||||
gate->malloc_txs_request = (void*)&hodl_malloc_txs_request;
|
||||
gate->build_block_header = (void*)&hodl_build_block_header;
|
||||
gate->resync_threads = (void*)&hodl_resync_threads;
|
||||
gate->do_this_thread = (void*)&hodl_do_this_thread;
|
||||
gate->work_cmp_size = 76;
|
||||
hodl_scratchbuf = (unsigned char*)mm_malloc( 1 << 30, 64 );
|
||||
allow_getwork = false;
|
||||
opt_target_factor = 8388608.0;
|
||||
return ( hodl_scratchbuf != NULL );
|
||||
}
|
||||
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#include "algo-gate-api.h"
|
||||
|
||||
extern unsigned char *hodl_scratchbuf;
|
||||
|
||||
bool register_hodl_algo ( algo_gate_t* gate );
|
||||
|
@@ -1,225 +0,0 @@
|
||||
#include <string.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/sha.h>
|
||||
#include "simd-utils.h"
|
||||
#include "sha512-avx.h"
|
||||
#include "wolf-aes.h"
|
||||
#include "hodl-gate.h"
|
||||
#include "hodl-wolf.h"
|
||||
#include "miner.h"
|
||||
#include "algo/sha/sha256d.h"
|
||||
|
||||
#if defined(__AES__)
|
||||
|
||||
void GenerateGarbageCore( CacheEntry *Garbage, int ThreadID, int ThreadCount,
|
||||
void *MidHash )
|
||||
{
|
||||
const int Chunk = TOTAL_CHUNKS / ThreadCount;
|
||||
const uint32_t StartChunk = ThreadID * Chunk;
|
||||
const uint32_t EndChunk = StartChunk + Chunk;
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
//#ifdef __AVX__
|
||||
uint64_t* TempBufs[ SHA512_PARALLEL_N ] ;
|
||||
uint64_t* desination[ SHA512_PARALLEL_N ];
|
||||
|
||||
for ( int i=0; i < SHA512_PARALLEL_N; ++i )
|
||||
{
|
||||
TempBufs[i] = (uint64_t*)malloc( 32 );
|
||||
memcpy( TempBufs[i], MidHash, 32 );
|
||||
}
|
||||
|
||||
for ( uint32_t i = StartChunk; i < EndChunk; i += SHA512_PARALLEL_N )
|
||||
{
|
||||
for ( int j = 0; j < SHA512_PARALLEL_N; ++j )
|
||||
{
|
||||
( (uint32_t*)TempBufs[j] )[0] = i + j;
|
||||
desination[j] = (uint64_t*)( (uint8_t *)Garbage + ( (i+j)
|
||||
* GARBAGE_CHUNK_SIZE ) );
|
||||
}
|
||||
sha512Compute32b_parallel( TempBufs, desination );
|
||||
}
|
||||
|
||||
for ( int i = 0; i < SHA512_PARALLEL_N; ++i )
|
||||
free( TempBufs[i] );
|
||||
#else
|
||||
uint32_t TempBuf[8];
|
||||
memcpy( TempBuf, MidHash, 32 );
|
||||
|
||||
for ( uint32_t i = StartChunk; i < EndChunk; ++i )
|
||||
{
|
||||
TempBuf[0] = i;
|
||||
SHA512( ( uint8_t *)TempBuf, 32,
|
||||
( (uint8_t *)Garbage ) + ( i * GARBAGE_CHUNK_SIZE ) );
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
void Rev256(uint32_t *Dest, const uint32_t *Src)
|
||||
{
|
||||
for(int i = 0; i < 8; ++i) Dest[i] = swab32(Src[i]);
|
||||
}
|
||||
*/
|
||||
|
||||
int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
#if defined(__SSE4_2__)
|
||||
//#ifdef __AVX__
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
int threadNumber = mythr->id;
|
||||
CacheEntry *Garbage = (CacheEntry*)hodl_scratchbuf;
|
||||
CacheEntry Cache[AES_PARALLEL_N] __attribute__ ((aligned (64)));
|
||||
__m128i* data[AES_PARALLEL_N];
|
||||
const __m128i* next[AES_PARALLEL_N];
|
||||
uint32_t CollisionCount = 0;
|
||||
|
||||
for ( int n=0; n<AES_PARALLEL_N; ++n )
|
||||
{
|
||||
data[n] = Cache[n].dqwords;
|
||||
}
|
||||
|
||||
// Search for pattern in psuedorandom data
|
||||
int searchNumber = COMPARE_SIZE / opt_n_threads;
|
||||
int startLoc = threadNumber * searchNumber;
|
||||
|
||||
for ( int32_t k = startLoc; k < startLoc + searchNumber && !work_restart[threadNumber].restart; k += AES_PARALLEL_N )
|
||||
{
|
||||
// copy data to first l2 cache
|
||||
for ( int n=0; n<AES_PARALLEL_N; ++n )
|
||||
{
|
||||
memcpy(Cache[n].dwords, Garbage + k + n, GARBAGE_SLICE_SIZE);
|
||||
}
|
||||
|
||||
for(int j = 0; j < AES_ITERATIONS; ++j)
|
||||
{
|
||||
__m128i ExpKey[AES_PARALLEL_N][16];
|
||||
__m128i ivs[AES_PARALLEL_N];
|
||||
|
||||
// use last 4 bytes of first cache as next location
|
||||
for(int n=0; n<AES_PARALLEL_N; ++n) {
|
||||
uint32_t nextLocation = Cache[n].dwords[(GARBAGE_SLICE_SIZE >> 2) - 1] & (COMPARE_SIZE - 1); //% COMPARE_SIZE;
|
||||
next[n] = Garbage[nextLocation].dqwords;
|
||||
|
||||
__m128i last[2];
|
||||
last[0] = _mm_xor_si128(Cache[n].dqwords[254], next[n][254]);
|
||||
last[1] = _mm_xor_si128(Cache[n].dqwords[255], next[n][255]);
|
||||
|
||||
// Key is last 32b of Cache
|
||||
// IV is last 16b of Cache
|
||||
ExpandAESKey256(ExpKey[n], last);
|
||||
ivs[n] = last[1];
|
||||
}
|
||||
AES256CBC(data, next, ExpKey, ivs);
|
||||
}
|
||||
|
||||
for(int n=0; n<AES_PARALLEL_N; ++n)
|
||||
if((Cache[n].dwords[(GARBAGE_SLICE_SIZE >> 2) - 1] & (COMPARE_SIZE - 1)) < 1000)
|
||||
{
|
||||
uint32_t BlockHdr[22], FinalPoW[8];
|
||||
|
||||
swab32_array( BlockHdr, pdata, 20 );
|
||||
|
||||
BlockHdr[20] = k + n;
|
||||
BlockHdr[21] = Cache[n].dwords[(GARBAGE_SLICE_SIZE >> 2) - 2];
|
||||
|
||||
sha256d( (uint8_t *)FinalPoW, (uint8_t *)BlockHdr, 88 );
|
||||
CollisionCount++;
|
||||
if( FinalPoW[7] <= ptarget[7] )
|
||||
{
|
||||
pdata[20] = swab32( BlockHdr[20] );
|
||||
pdata[21] = swab32( BlockHdr[21] );
|
||||
*hashes_done = CollisionCount;
|
||||
submit_solution( work, FinalPoW, mythr );
|
||||
return(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*hashes_done = CollisionCount;
|
||||
return(0);
|
||||
|
||||
|
||||
#else // no AVX
|
||||
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t BlockHdr[22], FinalPoW[8];
|
||||
CacheEntry *Garbage = (CacheEntry*)hodl_scratchbuf;
|
||||
CacheEntry Cache;
|
||||
uint32_t CollisionCount = 0;
|
||||
int threadNumber = mythr->id;
|
||||
|
||||
swab32_array( BlockHdr, pdata, 20 );
|
||||
// Search for pattern in psuedorandom data
|
||||
int searchNumber = COMPARE_SIZE / opt_n_threads;
|
||||
int startLoc = threadNumber * searchNumber;
|
||||
|
||||
if ( opt_debug )
|
||||
applog( LOG_DEBUG,"Hash target= %08lx", ptarget[7] );
|
||||
|
||||
for(int32_t k = startLoc; k < startLoc + searchNumber && !work_restart[threadNumber].restart; k++)
|
||||
{
|
||||
// copy data to first l2 cache
|
||||
memcpy(Cache.dwords, Garbage + k, GARBAGE_SLICE_SIZE);
|
||||
for(int j = 0; j < AES_ITERATIONS; j++)
|
||||
{
|
||||
CacheEntry TmpXOR;
|
||||
__m128i ExpKey[16];
|
||||
|
||||
// use last 4 bytes of first cache as next location
|
||||
uint32_t nextLocation = Cache.dwords[(GARBAGE_SLICE_SIZE >> 2)
|
||||
- 1] & (COMPARE_SIZE - 1); //% COMPARE_SIZE;
|
||||
|
||||
// Copy data from indicated location to second l2 cache -
|
||||
memcpy(&TmpXOR, Garbage + nextLocation, GARBAGE_SLICE_SIZE);
|
||||
//XOR location data into second cache
|
||||
for( int i = 0; i < (GARBAGE_SLICE_SIZE >> 4); ++i )
|
||||
TmpXOR.dqwords[i] = _mm_xor_si128( Cache.dqwords[i],
|
||||
TmpXOR.dqwords[i] );
|
||||
// Key is last 32b of TmpXOR
|
||||
// IV is last 16b of TmpXOR
|
||||
|
||||
ExpandAESKey256( ExpKey, TmpXOR.dqwords +
|
||||
(GARBAGE_SLICE_SIZE / sizeof(__m128i)) - 2 );
|
||||
AES256CBC( Cache.dqwords, TmpXOR.dqwords, ExpKey,
|
||||
TmpXOR.dqwords[ (GARBAGE_SLICE_SIZE / sizeof(__m128i))
|
||||
- 1 ], 256 ); }
|
||||
// use last X bits as solution
|
||||
if( ( Cache.dwords[ (GARBAGE_SLICE_SIZE >> 2) - 1 ]
|
||||
& (COMPARE_SIZE - 1) ) < 1000 )
|
||||
{
|
||||
BlockHdr[20] = k;
|
||||
BlockHdr[21] = Cache.dwords[ (GARBAGE_SLICE_SIZE >> 2) - 2 ];
|
||||
sha256d( (uint8_t *)FinalPoW, (uint8_t *)BlockHdr, 88 );
|
||||
CollisionCount++;
|
||||
if( FinalPoW[7] <= ptarget[7] )
|
||||
{
|
||||
pdata[20] = swab32( BlockHdr[20] );
|
||||
pdata[21] = swab32( BlockHdr[21] );
|
||||
*hashes_done = CollisionCount;
|
||||
submit_solution( work, FinalPoW, mythr );
|
||||
return(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*hashes_done = CollisionCount;
|
||||
return(0);
|
||||
|
||||
#endif // AVX else
|
||||
|
||||
}
|
||||
|
||||
void GenRandomGarbage(CacheEntry *Garbage, uint32_t *pdata, int thr_id)
|
||||
{
|
||||
uint32_t BlockHdr[20], MidHash[8];
|
||||
swab32_array( BlockHdr, pdata, 20 );
|
||||
sha256d((uint8_t *)MidHash, (uint8_t *)BlockHdr, 80);
|
||||
GenerateGarbageCore(Garbage, thr_id, opt_n_threads, MidHash);
|
||||
}
|
||||
|
||||
#endif // AES
|
||||
|
@@ -1,27 +0,0 @@
|
||||
#ifndef __HODL_H
|
||||
#define __HODL_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "simd-utils.h"
|
||||
#include "miner.h"
|
||||
|
||||
#define AES_ITERATIONS 15
|
||||
|
||||
#define GARBAGE_SIZE (1 << 30)
|
||||
#define GARBAGE_CHUNK_SIZE (1 << 6)
|
||||
#define GARBAGE_SLICE_SIZE (1 << 12)
|
||||
#define TOTAL_CHUNKS (1 << 24) // GARBAGE_SIZE / GARBAGE_CHUNK_SIZE
|
||||
#define COMPARE_SIZE (1 << 18) // GARBAGE_SIZE / GARBAGE_SLICE_SIZE
|
||||
|
||||
typedef union _CacheEntry
|
||||
{
|
||||
uint32_t dwords[GARBAGE_SLICE_SIZE >> 2] __attribute__((aligned(16)));
|
||||
v128_t dqwords[GARBAGE_SLICE_SIZE >> 4] __attribute__((aligned(16)));
|
||||
} CacheEntry;
|
||||
|
||||
int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
void GenRandomGarbage( CacheEntry *Garbage, uint32_t *pdata, int thr_id);
|
||||
|
||||
#endif // __HODL_H
|
@@ -1,208 +0,0 @@
|
||||
.TH MINERD 1 "March 2016" "cpuminer 2.4.3"
|
||||
.SH NAME
|
||||
hodlminer \- CPU miner for Hodlcoin
|
||||
.SH SYNOPSIS
|
||||
.B hodlminer
|
||||
[\fIOPTION\fR]...
|
||||
.SH DESCRIPTION
|
||||
.B hodlminer
|
||||
is a multi-threaded CPU miner for Hodlcoin.
|
||||
It supports the getwork and getblocktemplate (BIP 22) methods,
|
||||
as well as the Stratum mining protocol.
|
||||
.PP
|
||||
In its normal mode of operation, \fBhodlminer\fR connects to a mining server
|
||||
(specified with the \fB\-o\fR option), receives work from it and starts hashing.
|
||||
As soon as a solution is found, it is submitted to the same mining server,
|
||||
which can accept or reject it.
|
||||
When using getwork or getblocktemplate,
|
||||
\fBhodlminer\fR can take advantage of long polling, if the server supports it;
|
||||
in any case, fresh work is fetched as needed.
|
||||
When using the Stratum protocol this is not possible,
|
||||
and the server is responsible for sending fresh work at least every minute;
|
||||
if it fails to do so,
|
||||
\fBhodlminer\fR may drop the connection and try reconnecting again.
|
||||
.PP
|
||||
By default, \fBhodlminer\fR writes all its messages to standard error.
|
||||
On systems that have a syslog, the \fB\-\-syslog\fR option can be used
|
||||
to write to it instead.
|
||||
.PP
|
||||
On start, the nice value of all miner threads is set to 19.
|
||||
On Linux, the scheduling policy is also changed to SCHED_IDLE,
|
||||
or to SCHED_BATCH if that fails.
|
||||
On multiprocessor systems, \fBhodlminer\fR
|
||||
automatically sets the CPU affinity of miner threads
|
||||
if the number of threads is a multiple of the number of processors.
|
||||
.SH EXAMPLES
|
||||
To connect to the Hodlcoin mining pool that provides a Stratum server
|
||||
at hodl.blockquarry.com on port 8332, authenticating as worker "user.worker" with password "x":
|
||||
.PP
|
||||
.nf
|
||||
.RS
|
||||
hodlminer \-o stratum+tcp://hodl.blockquarry.com:8332 \-u user.worker -p x -q
|
||||
.RE
|
||||
.fi
|
||||
.PP
|
||||
To mine to a local Hodlcoin instance running on port 18332,
|
||||
authenticating with username "rpcuser" and password "rpcpass":
|
||||
.PP
|
||||
.nf
|
||||
.RS
|
||||
hodlminer \-a hodl \-o http://localhost:18332 \-O rpcuser:rpcpass \\
|
||||
\-\-coinbase\-addr=mpXwg4jMtRhuSpVq4xS3HFHmCmWp9NyGKt
|
||||
.RE
|
||||
.fi
|
||||
.PP
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-a\fR, \fB\-\-algo\fR=\fIALGORITHM\fR
|
||||
Set the hashing algorithm to use.
|
||||
Default is hodl.
|
||||
Possible values are:
|
||||
.RS 11
|
||||
.TP 10
|
||||
.B hodl
|
||||
.TP
|
||||
\fB\-\-benchmark\fR
|
||||
Run in offline benchmark mode.
|
||||
.TP
|
||||
\fB\-B\fR, \fB\-\-background\fR
|
||||
Run in the background as a daemon.
|
||||
.TP
|
||||
\fB\-\-cert\fR=\fIFILE\fR
|
||||
Set an SSL certificate to use with the mining server.
|
||||
Only supported when using the HTTPS protocol.
|
||||
.TP
|
||||
\fB\-\-coinbase\-addr\fR=\fIADDRESS\fR
|
||||
Set a payout address for solo mining.
|
||||
This is only used in getblocktemplate mode,
|
||||
and only if the server does not provide a coinbase transaction.
|
||||
.TP
|
||||
\fB\-\-coinbase\-sig\fR=\fITEXT\fR
|
||||
Set a string to be included in the coinbase (if allowed by the server).
|
||||
This is only used in getblocktemplate mode.
|
||||
.TP
|
||||
\fB\-c\fR, \fB\-\-config\fR=\fIFILE\fR
|
||||
Load options from a configuration file.
|
||||
\fIFILE\fR must contain a JSON object
|
||||
mapping long options to their arguments (as strings),
|
||||
or to \fBtrue\fR if no argument is required.
|
||||
Sample configuration file:
|
||||
|
||||
.nf
|
||||
{
|
||||
"url": "stratum+tcp://hodl.blockquarry.com:8332",
|
||||
"userpass": "foo:bar",
|
||||
"retry-pause": "10",
|
||||
"quiet": true
|
||||
}
|
||||
.fi
|
||||
.TP
|
||||
\fB\-D\fR, \fB\-\-debug\fR
|
||||
Enable debug output.
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
Print a help message and exit.
|
||||
.TP
|
||||
\fB\-\-no\-gbt\fR
|
||||
Do not use the getblocktemplate RPC method.
|
||||
.TP
|
||||
\fB\-\-no\-getwork\fR
|
||||
Do not use the getwork RPC method.
|
||||
.TP
|
||||
\fB\-\-no\-longpoll\fR
|
||||
Do not use long polling.
|
||||
.TP
|
||||
\fB\-\-no\-redirect\fR
|
||||
Ignore requests from the server to switch to a different URL.
|
||||
.TP
|
||||
\fB\-\-no\-stratum\fR
|
||||
Do not switch to Stratum, even if the server advertises support for it.
|
||||
.TP
|
||||
\fB\-o\fR, \fB\-\-url\fR=[\fISCHEME\fR://][\fIUSERNAME\fR[:\fIPASSWORD\fR]@]\fIHOST\fR:\fIPORT\fR[/\fIPATH\fR]
|
||||
Set the URL of the mining server to connect to.
|
||||
Supported schemes are \fBhttp\fR, \fBhttps\fR, \fBstratum+tcp\fR
|
||||
and \fBstratum+tcps\fR.
|
||||
If no scheme is specified, http is assumed.
|
||||
Specifying a \fIPATH\fR is only supported for HTTP and HTTPS.
|
||||
Specifying credentials has the same effect as using the \fB\-O\fR option.
|
||||
|
||||
By default, on HTTP and HTTPS,
|
||||
the miner tries to use the getblocktemplate RPC method,
|
||||
and falls back to using getwork if getblocktemplate is unavailable.
|
||||
This behavior can be modified by using the \fB\-\-no\-gbt\fR
|
||||
and \fB\-\-no\-getwork\fR options.
|
||||
.TP
|
||||
\fB\-O\fR, \fB\-\-userpass\fR=\fIUSERNAME\fR:\fIPASSWORD\fR
|
||||
Set the credentials to use for connecting to the mining server.
|
||||
Any value previously set with \fB\-u\fR or \fB\-p\fR is discarded.
|
||||
.TP
|
||||
\fB\-p\fR, \fB\-\-pass\fR=\fIPASSWORD\fR
|
||||
Set the password to use for connecting to the mining server.
|
||||
Any password previously set with \fB\-O\fR is discarded.
|
||||
.TP
|
||||
\fB\-P\fR, \fB\-\-protocol\-dump\fR
|
||||
Enable output of all protocol-level activities.
|
||||
.TP
|
||||
\fB\-q\fR, \fB\-\-quiet\fR
|
||||
Disable per-thread hashmeter output.
|
||||
.TP
|
||||
\fB\-r\fR, \fB\-\-retries\fR=\fIN\fR
|
||||
Set the maximum number of times to retry if a network call fails.
|
||||
If not specified, the miner will retry indefinitely.
|
||||
.TP
|
||||
\fB\-R\fR, \fB\-\-retry\-pause\fR=\fISECONDS\fR
|
||||
Set how long to wait between retries. Default is 30 seconds.
|
||||
.TP
|
||||
\fB\-s\fR, \fB\-\-scantime\fR=\fISECONDS\fR
|
||||
Set an upper bound on the time the miner can go without fetching fresh work.
|
||||
This setting has no effect in Stratum mode or when long polling is activated.
|
||||
Default is 5 seconds.
|
||||
.TP
|
||||
\fB\-S\fR, \fB\-\-syslog\fR
|
||||
Log to the syslog facility instead of standard error.
|
||||
.TP
|
||||
\fB\-t\fR, \fB\-\-threads\fR=\fIN\fR
|
||||
Set the number of miner threads.
|
||||
If not specified, the miner will try to detect the number of available processors
|
||||
and use that.
|
||||
.TP
|
||||
\fB\-T\fR, \fB\-\-timeout\fR=\fISECONDS\fR
|
||||
Set a timeout for long polling.
|
||||
.TP
|
||||
\fB\-u\fR, \fB\-\-user\fR=\fIUSERNAME\fR
|
||||
Set the username to use for connecting to the mining server.
|
||||
Any username previously set with \fB\-O\fR is discarded.
|
||||
.TP
|
||||
\fB\-V\fR, \fB\-\-version\fR
|
||||
Display version information and quit.
|
||||
.TP
|
||||
\fB\-x\fR, \fB\-\-proxy\fR=[\fISCHEME\fR://][\fIUSERNAME\fR:\fIPASSWORD\fR@]\fIHOST\fR:\fIPORT\fR
|
||||
Connect to the mining server through a proxy.
|
||||
Supported schemes are: \fBhttp\fR, \fBsocks4\fR, \fBsocks5\fR.
|
||||
Since libcurl 7.18.0, the following are also supported:
|
||||
\fBsocks4a\fR, \fBsocks5h\fR (SOCKS5 with remote name resolving).
|
||||
If no scheme is specified, the proxy is treated as an HTTP proxy.
|
||||
.SH ENVIRONMENT
|
||||
The following environment variables can be specified in lower case or upper case;
|
||||
the lower-case version has precedence. \fBhttp_proxy\fR is an exception
|
||||
as it is only available in lower case.
|
||||
.PP
|
||||
.RS
|
||||
.TP
|
||||
\fBhttp_proxy\fR [\fISCHEME\fR://]\fIHOST\fR:\fIPORT\fR
|
||||
Sets the proxy server to use for HTTP.
|
||||
.TP
|
||||
\fBHTTPS_PROXY\fR [\fISCHEME\fR://]\fIHOST\fR:\fIPORT\fR
|
||||
Sets the proxy server to use for HTTPS.
|
||||
.TP
|
||||
\fBALL_PROXY\fR [\fISCHEME\fR://]\fIHOST\fR:\fIPORT\fR
|
||||
Sets the proxy server to use if no protocol-specific proxy is set.
|
||||
.RE
|
||||
.PP
|
||||
Using an environment variable to set the proxy has the same effect as
|
||||
using the \fB\-x\fR option.
|
||||
.SH AUTHOR
|
||||
Most of the code in the current version of minerd was written by
|
||||
Pooler <pooler@litecoinpool.org> with contributions from others.
|
||||
|
||||
The original minerd was written by Jeff Garzik <jeff@garzik.org>.
|
@@ -1,50 +0,0 @@
|
||||
#ifndef _SHA512_H
|
||||
#define _SHA512_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "simd-utils.h"
|
||||
|
||||
//SHA-512 block size
|
||||
#define SHA512_BLOCK_SIZE 128
|
||||
//SHA-512 digest size
|
||||
#define SHA512_DIGEST_SIZE 64
|
||||
|
||||
/*
|
||||
#ifndef __AVX2__
|
||||
#ifndef __AVX__
|
||||
#error "Either AVX or AVX2 supported needed"
|
||||
#endif // __AVX__
|
||||
#endif // __AVX2__
|
||||
*/
|
||||
|
||||
typedef struct
|
||||
{
|
||||
#ifdef __AVX2__
|
||||
__m256i h[8];
|
||||
__m256i w[80];
|
||||
#elif defined(__SSE4_2__)
|
||||
//#elif defined(__AVX__)
|
||||
v128_t h[8];
|
||||
v128_t w[80];
|
||||
#else
|
||||
int dummy;
|
||||
#endif
|
||||
} Sha512Context;
|
||||
|
||||
#ifdef __AVX2__
|
||||
#define SHA512_PARALLEL_N 8
|
||||
#elif defined(__SSE4_2__)
|
||||
//#elif defined(__AVX__)
|
||||
#define SHA512_PARALLEL_N 4
|
||||
#else
|
||||
#define SHA512_PARALLEL_N 1 // dummy value
|
||||
#endif
|
||||
|
||||
//SHA-512 related functions
|
||||
void sha512Compute32b_parallel(
|
||||
uint64_t *data[SHA512_PARALLEL_N],
|
||||
uint64_t *digest[SHA512_PARALLEL_N]);
|
||||
|
||||
void sha512ProcessBlock(Sha512Context contexti[2] );
|
||||
|
||||
#endif
|
@@ -1,235 +0,0 @@
|
||||
#ifndef __AVX2__
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
//#ifdef __AVX__
|
||||
|
||||
//Dependencies
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#include <sys/endian.h>
|
||||
#endif
|
||||
|
||||
#if defined(__CYGWIN__)
|
||||
#include <endian.h>
|
||||
#endif
|
||||
|
||||
#include "tmmintrin.h"
|
||||
#include "smmintrin.h"
|
||||
|
||||
#include "sha512-avx.h"
|
||||
#if ((defined(_WIN64) || defined(__WINDOWS__)))
|
||||
#include "hodl-endian.h"
|
||||
#endif
|
||||
|
||||
//SHA-512 auxiliary functions
|
||||
#define Ch(x, y, z) (((x) & (y)) | (~(x) & (z)))
|
||||
#define Maj(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
|
||||
#define SIGMA1(x) (ROR64(x, 28) ^ ROR64(x, 34) ^ ROR64(x, 39))
|
||||
#define SIGMA2(x) (ROR64(x, 14) ^ ROR64(x, 18) ^ ROR64(x, 41))
|
||||
#define SIGMA3(x) (ROR64(x, 1) ^ ROR64(x, 8) ^ SHR64(x, 7))
|
||||
#define SIGMA4(x) (ROR64(x, 19) ^ ROR64(x, 61) ^ SHR64(x, 6))
|
||||
|
||||
//Rotate right operation
|
||||
#define ROR64(a, n) _mm_or_si128(_mm_srli_epi64(a, n), _mm_slli_epi64(a, 64 - n))
|
||||
|
||||
//Shift right operation
|
||||
#define SHR64(a, n) _mm_srli_epi64(a, n)
|
||||
|
||||
__m128i mm_htobe_epi64(__m128i a) {
|
||||
__m128i mask = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7);
|
||||
return _mm_shuffle_epi8(a, mask);
|
||||
}
|
||||
|
||||
__m128i mm_betoh_epi64(__m128i a) {
|
||||
return mm_htobe_epi64(a);
|
||||
}
|
||||
|
||||
//SHA-512 padding
|
||||
static const uint8_t padding[128] =
|
||||
{
|
||||
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
};
|
||||
|
||||
//SHA-512 constants
|
||||
static const uint64_t k[80] =
|
||||
{
|
||||
0x428A2F98D728AE22, 0x7137449123EF65CD, 0xB5C0FBCFEC4D3B2F, 0xE9B5DBA58189DBBC,
|
||||
0x3956C25BF348B538, 0x59F111F1B605D019, 0x923F82A4AF194F9B, 0xAB1C5ED5DA6D8118,
|
||||
0xD807AA98A3030242, 0x12835B0145706FBE, 0x243185BE4EE4B28C, 0x550C7DC3D5FFB4E2,
|
||||
0x72BE5D74F27B896F, 0x80DEB1FE3B1696B1, 0x9BDC06A725C71235, 0xC19BF174CF692694,
|
||||
0xE49B69C19EF14AD2, 0xEFBE4786384F25E3, 0x0FC19DC68B8CD5B5, 0x240CA1CC77AC9C65,
|
||||
0x2DE92C6F592B0275, 0x4A7484AA6EA6E483, 0x5CB0A9DCBD41FBD4, 0x76F988DA831153B5,
|
||||
0x983E5152EE66DFAB, 0xA831C66D2DB43210, 0xB00327C898FB213F, 0xBF597FC7BEEF0EE4,
|
||||
0xC6E00BF33DA88FC2, 0xD5A79147930AA725, 0x06CA6351E003826F, 0x142929670A0E6E70,
|
||||
0x27B70A8546D22FFC, 0x2E1B21385C26C926, 0x4D2C6DFC5AC42AED, 0x53380D139D95B3DF,
|
||||
0x650A73548BAF63DE, 0x766A0ABB3C77B2A8, 0x81C2C92E47EDAEE6, 0x92722C851482353B,
|
||||
0xA2BFE8A14CF10364, 0xA81A664BBC423001, 0xC24B8B70D0F89791, 0xC76C51A30654BE30,
|
||||
0xD192E819D6EF5218, 0xD69906245565A910, 0xF40E35855771202A, 0x106AA07032BBD1B8,
|
||||
0x19A4C116B8D2D0C8, 0x1E376C085141AB53, 0x2748774CDF8EEB99, 0x34B0BCB5E19B48A8,
|
||||
0x391C0CB3C5C95A63, 0x4ED8AA4AE3418ACB, 0x5B9CCA4F7763E373, 0x682E6FF3D6B2B8A3,
|
||||
0x748F82EE5DEFB2FC, 0x78A5636F43172F60, 0x84C87814A1F0AB72, 0x8CC702081A6439EC,
|
||||
0x90BEFFFA23631E28, 0xA4506CEBDE82BDE9, 0xBEF9A3F7B2C67915, 0xC67178F2E372532B,
|
||||
0xCA273ECEEA26619C, 0xD186B8C721C0C207, 0xEADA7DD6CDE0EB1E, 0xF57D4F7FEE6ED178,
|
||||
0x06F067AA72176FBA, 0x0A637DC5A2C898A6, 0x113F9804BEF90DAE, 0x1B710B35131C471B,
|
||||
0x28DB77F523047D84, 0x32CAAB7B40C72493, 0x3C9EBE0A15C9BEBC, 0x431D67C49C100D4C,
|
||||
0x4CC5D4BECB3E42B6, 0x597F299CFC657E2A, 0x5FCB6FAB3AD6FAEC, 0x6C44198C4A475817
|
||||
};
|
||||
|
||||
|
||||
void sha512Compute32b_parallel(uint64_t *data[SHA512_PARALLEL_N], uint64_t *digest[SHA512_PARALLEL_N]) {
|
||||
Sha512Context context[2];
|
||||
context[0].h[0] = _mm_set1_epi64x(0x6A09E667F3BCC908);
|
||||
context[0].h[1] = _mm_set1_epi64x(0xBB67AE8584CAA73B);
|
||||
context[0].h[2] = _mm_set1_epi64x(0x3C6EF372FE94F82B);
|
||||
context[0].h[3] = _mm_set1_epi64x(0xA54FF53A5F1D36F1);
|
||||
context[0].h[4] = _mm_set1_epi64x(0x510E527FADE682D1);
|
||||
context[0].h[5] = _mm_set1_epi64x(0x9B05688C2B3E6C1F);
|
||||
context[0].h[6] = _mm_set1_epi64x(0x1F83D9ABFB41BD6B);
|
||||
context[0].h[7] = _mm_set1_epi64x(0x5BE0CD19137E2179);
|
||||
|
||||
context[1].h[0] = _mm_set1_epi64x(0x6A09E667F3BCC908);
|
||||
context[1].h[1] = _mm_set1_epi64x(0xBB67AE8584CAA73B);
|
||||
context[1].h[2] = _mm_set1_epi64x(0x3C6EF372FE94F82B);
|
||||
context[1].h[3] = _mm_set1_epi64x(0xA54FF53A5F1D36F1);
|
||||
context[1].h[4] = _mm_set1_epi64x(0x510E527FADE682D1);
|
||||
context[1].h[5] = _mm_set1_epi64x(0x9B05688C2B3E6C1F);
|
||||
context[1].h[6] = _mm_set1_epi64x(0x1F83D9ABFB41BD6B);
|
||||
context[1].h[7] = _mm_set1_epi64x(0x5BE0CD19137E2179);
|
||||
|
||||
for(int i=0; i<4; ++i) {
|
||||
context[0].w[i] = _mm_set_epi64x ( data[1][i], data[0][i] );
|
||||
context[1].w[i] = _mm_set_epi64x ( data[3][i], data[2][i] );
|
||||
}
|
||||
for(int i=0; i<10; ++i) {
|
||||
context[0].w[i+4] = _mm_set1_epi64x( ((uint64_t*)padding)[i] );
|
||||
context[1].w[i+4] = _mm_set1_epi64x( ((uint64_t*)padding)[i] );
|
||||
}
|
||||
|
||||
//Length of the original message (before padding)
|
||||
uint64_t totalSize = 32 * 8;
|
||||
|
||||
//Append the length of the original message
|
||||
context[0].w[14] = _mm_set1_epi64x(0);
|
||||
context[0].w[15] = _mm_set1_epi64x(htobe64(totalSize));
|
||||
|
||||
context[1].w[14] = _mm_set1_epi64x(0);
|
||||
context[1].w[15] = _mm_set1_epi64x(htobe64(totalSize));
|
||||
|
||||
//Calculate the message digest
|
||||
sha512ProcessBlock(context);
|
||||
|
||||
//Convert from host byte order to big-endian byte order
|
||||
for (int i = 0; i < 8; i++) {
|
||||
context[0].h[i] = mm_htobe_epi64(context[0].h[i]);
|
||||
context[1].h[i] = mm_htobe_epi64(context[1].h[i]);
|
||||
}
|
||||
|
||||
//Copy the resulting digest
|
||||
for(int i=0; i<8; ++i) {
|
||||
digest[0][i] = _mm_extract_epi64(context[0].h[i], 0);
|
||||
digest[1][i] = _mm_extract_epi64(context[0].h[i], 1);
|
||||
digest[2][i] = _mm_extract_epi64(context[1].h[i], 0);
|
||||
digest[3][i] = _mm_extract_epi64(context[1].h[i], 1);
|
||||
}
|
||||
}
|
||||
|
||||
#define blk0(n, i) (block[n][i] = mm_betoh_epi64(block[n][i]))
|
||||
#define blk(n, i) (block[n][i] = block[n][i - 16] + SIGMA3(block[n][i - 15]) + \
|
||||
SIGMA4(block[n][i - 2]) + block[n][i - 7])
|
||||
|
||||
#define ROUND512(a,b,c,d,e,f,g,h) \
|
||||
T0 += (h[0]) + SIGMA2(e[0]) + Ch((e[0]), (f[0]), (g[0])) + k[i]; \
|
||||
T1 += (h[1]) + SIGMA2(e[1]) + Ch((e[1]), (f[1]), (g[1])) + k[i]; \
|
||||
(d[0]) += T0; \
|
||||
(d[1]) += T1; \
|
||||
(h[0]) = T0 + SIGMA1(a[0]) + Maj((a[0]), (b[0]), (c[0])); \
|
||||
(h[1]) = T1 + SIGMA1(a[1]) + Maj((a[1]), (b[1]), (c[1])); \
|
||||
i++
|
||||
|
||||
#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
|
||||
T0 = blk0(0, i); \
|
||||
T1 = blk0(1, i); \
|
||||
ROUND512(a,b,c,d,e,f,g,h)
|
||||
|
||||
#define ROUND512_16_TO_80(a,b,c,d,e,f,g,h) \
|
||||
T0 = blk(0, i); \
|
||||
T1 = blk(1, i); \
|
||||
ROUND512(a,b,c,d,e,f,g,h)
|
||||
|
||||
#define R512_0 \
|
||||
ROUND512_0_TO_15(a, b, c, d, e, f, g, h); \
|
||||
ROUND512_0_TO_15(h, a, b, c, d, e, f, g); \
|
||||
ROUND512_0_TO_15(g, h, a, b, c, d, e, f); \
|
||||
ROUND512_0_TO_15(f, g, h, a, b, c, d, e); \
|
||||
ROUND512_0_TO_15(e, f, g, h, a, b, c, d); \
|
||||
ROUND512_0_TO_15(d, e, f, g, h, a, b, c); \
|
||||
ROUND512_0_TO_15(c, d, e, f, g, h, a, b); \
|
||||
ROUND512_0_TO_15(b, c, d, e, f, g, h, a)
|
||||
|
||||
#define R512_16 \
|
||||
ROUND512_16_TO_80(a, b, c, d, e, f, g, h); \
|
||||
ROUND512_16_TO_80(h, a, b, c, d, e, f, g); \
|
||||
ROUND512_16_TO_80(g, h, a, b, c, d, e, f); \
|
||||
ROUND512_16_TO_80(f, g, h, a, b, c, d, e); \
|
||||
ROUND512_16_TO_80(e, f, g, h, a, b, c, d); \
|
||||
ROUND512_16_TO_80(d, e, f, g, h, a, b, c); \
|
||||
ROUND512_16_TO_80(c, d, e, f, g, h, a, b); \
|
||||
ROUND512_16_TO_80(b, c, d, e, f, g, h, a)
|
||||
|
||||
#define INIT(x,n) \
|
||||
x[0] = context[0].h[n]; \
|
||||
x[1] = context[1].h[n]; \
|
||||
|
||||
void sha512ProcessBlock(Sha512Context context[2])
|
||||
{
|
||||
__m128i* block[2];
|
||||
block[0] = context[0].w;
|
||||
block[1] = context[1].w;
|
||||
|
||||
__m128i T0, T1;
|
||||
__m128i a[2], b[2], c[2], d[2], e[2], f[2], g[2], h[2];
|
||||
INIT(a, 0)
|
||||
INIT(b, 1)
|
||||
INIT(c, 2)
|
||||
INIT(d, 3)
|
||||
INIT(e, 4)
|
||||
INIT(f, 5)
|
||||
INIT(g, 6)
|
||||
INIT(h, 7)
|
||||
|
||||
int i = 0;
|
||||
R512_0; R512_0;
|
||||
for(int j=0; j<8; ++j) {
|
||||
R512_16;
|
||||
}
|
||||
|
||||
context[0].h[0] += a[0];
|
||||
context[0].h[1] += b[0];
|
||||
context[0].h[2] += c[0];
|
||||
context[0].h[3] += d[0];
|
||||
context[0].h[4] += e[0];
|
||||
context[0].h[5] += f[0];
|
||||
context[0].h[6] += g[0];
|
||||
context[0].h[7] += h[0];
|
||||
|
||||
context[1].h[0] += a[1];
|
||||
context[1].h[1] += b[1];
|
||||
context[1].h[2] += c[1];
|
||||
context[1].h[3] += d[1];
|
||||
context[1].h[4] += e[1];
|
||||
context[1].h[5] += f[1];
|
||||
context[1].h[6] += g[1];
|
||||
context[1].h[7] += h[1];
|
||||
}
|
||||
|
||||
#endif // __AVX__
|
||||
#endif // __AVX2__
|
@@ -1,241 +0,0 @@
|
||||
#ifdef __AVX2__
|
||||
|
||||
//Dependencies
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#include <sys/endian.h>
|
||||
#endif
|
||||
|
||||
#if defined(__CYGWIN__)
|
||||
#include <endian.h>
|
||||
#endif
|
||||
|
||||
#include "tmmintrin.h"
|
||||
#include "smmintrin.h"
|
||||
#include "immintrin.h"
|
||||
|
||||
#include "sha512-avx.h"
|
||||
#if ((defined(_WIN64) || defined(__WINDOWS__)))
|
||||
#include "hodl-endian.h"
|
||||
#endif
|
||||
|
||||
//SHA-512 auxiliary functions
|
||||
#define Ch(x, y, z) (((x) & (y)) | (~(x) & (z)))
|
||||
#define Maj(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
|
||||
#define SIGMA1(x) (ROR64(x, 28) ^ ROR64(x, 34) ^ ROR64(x, 39))
|
||||
#define SIGMA2(x) (ROR64(x, 14) ^ ROR64(x, 18) ^ ROR64(x, 41))
|
||||
#define SIGMA3(x) (ROR64(x, 1) ^ ROR64(x, 8) ^ SHR64(x, 7))
|
||||
#define SIGMA4(x) (ROR64(x, 19) ^ ROR64(x, 61) ^ SHR64(x, 6))
|
||||
|
||||
//Rotate right operation
|
||||
#define ROR64(a, n) _mm256_or_si256(_mm256_srli_epi64(a, n), _mm256_slli_epi64(a, 64 - n))
|
||||
|
||||
//Shift right operation
|
||||
#define SHR64(a, n) _mm256_srli_epi64(a, n)
|
||||
|
||||
__m256i mm256_htobe_epi64(__m256i a) {
|
||||
__m256i mask = _mm256_set_epi8(
|
||||
24,25,26,27,28,29,30,31,
|
||||
16,17,18,19,20,21,22,23,
|
||||
8, 9, 10, 11, 12, 13, 14, 15,
|
||||
0, 1, 2, 3, 4, 5, 6, 7);
|
||||
return _mm256_shuffle_epi8(a, mask);
|
||||
}
|
||||
|
||||
__m256i mm256_betoh_epi64(__m256i a) {
|
||||
return mm256_htobe_epi64(a);
|
||||
}
|
||||
|
||||
//SHA-512 padding
|
||||
static const uint8_t padding[128] =
|
||||
{
|
||||
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
};
|
||||
|
||||
//SHA-512 constants
|
||||
static const uint64_t k[80] =
|
||||
{
|
||||
0x428A2F98D728AE22, 0x7137449123EF65CD, 0xB5C0FBCFEC4D3B2F, 0xE9B5DBA58189DBBC,
|
||||
0x3956C25BF348B538, 0x59F111F1B605D019, 0x923F82A4AF194F9B, 0xAB1C5ED5DA6D8118,
|
||||
0xD807AA98A3030242, 0x12835B0145706FBE, 0x243185BE4EE4B28C, 0x550C7DC3D5FFB4E2,
|
||||
0x72BE5D74F27B896F, 0x80DEB1FE3B1696B1, 0x9BDC06A725C71235, 0xC19BF174CF692694,
|
||||
0xE49B69C19EF14AD2, 0xEFBE4786384F25E3, 0x0FC19DC68B8CD5B5, 0x240CA1CC77AC9C65,
|
||||
0x2DE92C6F592B0275, 0x4A7484AA6EA6E483, 0x5CB0A9DCBD41FBD4, 0x76F988DA831153B5,
|
||||
0x983E5152EE66DFAB, 0xA831C66D2DB43210, 0xB00327C898FB213F, 0xBF597FC7BEEF0EE4,
|
||||
0xC6E00BF33DA88FC2, 0xD5A79147930AA725, 0x06CA6351E003826F, 0x142929670A0E6E70,
|
||||
0x27B70A8546D22FFC, 0x2E1B21385C26C926, 0x4D2C6DFC5AC42AED, 0x53380D139D95B3DF,
|
||||
0x650A73548BAF63DE, 0x766A0ABB3C77B2A8, 0x81C2C92E47EDAEE6, 0x92722C851482353B,
|
||||
0xA2BFE8A14CF10364, 0xA81A664BBC423001, 0xC24B8B70D0F89791, 0xC76C51A30654BE30,
|
||||
0xD192E819D6EF5218, 0xD69906245565A910, 0xF40E35855771202A, 0x106AA07032BBD1B8,
|
||||
0x19A4C116B8D2D0C8, 0x1E376C085141AB53, 0x2748774CDF8EEB99, 0x34B0BCB5E19B48A8,
|
||||
0x391C0CB3C5C95A63, 0x4ED8AA4AE3418ACB, 0x5B9CCA4F7763E373, 0x682E6FF3D6B2B8A3,
|
||||
0x748F82EE5DEFB2FC, 0x78A5636F43172F60, 0x84C87814A1F0AB72, 0x8CC702081A6439EC,
|
||||
0x90BEFFFA23631E28, 0xA4506CEBDE82BDE9, 0xBEF9A3F7B2C67915, 0xC67178F2E372532B,
|
||||
0xCA273ECEEA26619C, 0xD186B8C721C0C207, 0xEADA7DD6CDE0EB1E, 0xF57D4F7FEE6ED178,
|
||||
0x06F067AA72176FBA, 0x0A637DC5A2C898A6, 0x113F9804BEF90DAE, 0x1B710B35131C471B,
|
||||
0x28DB77F523047D84, 0x32CAAB7B40C72493, 0x3C9EBE0A15C9BEBC, 0x431D67C49C100D4C,
|
||||
0x4CC5D4BECB3E42B6, 0x597F299CFC657E2A, 0x5FCB6FAB3AD6FAEC, 0x6C44198C4A475817
|
||||
};
|
||||
|
||||
|
||||
void sha512Compute32b_parallel(uint64_t *data[SHA512_PARALLEL_N], uint64_t *digest[SHA512_PARALLEL_N]) {
|
||||
Sha512Context context[2];
|
||||
context[0].h[0] = _mm256_set1_epi64x(0x6A09E667F3BCC908);
|
||||
context[0].h[1] = _mm256_set1_epi64x(0xBB67AE8584CAA73B);
|
||||
context[0].h[2] = _mm256_set1_epi64x(0x3C6EF372FE94F82B);
|
||||
context[0].h[3] = _mm256_set1_epi64x(0xA54FF53A5F1D36F1);
|
||||
context[0].h[4] = _mm256_set1_epi64x(0x510E527FADE682D1);
|
||||
context[0].h[5] = _mm256_set1_epi64x(0x9B05688C2B3E6C1F);
|
||||
context[0].h[6] = _mm256_set1_epi64x(0x1F83D9ABFB41BD6B);
|
||||
context[0].h[7] = _mm256_set1_epi64x(0x5BE0CD19137E2179);
|
||||
|
||||
context[1].h[0] = _mm256_set1_epi64x(0x6A09E667F3BCC908);
|
||||
context[1].h[1] = _mm256_set1_epi64x(0xBB67AE8584CAA73B);
|
||||
context[1].h[2] = _mm256_set1_epi64x(0x3C6EF372FE94F82B);
|
||||
context[1].h[3] = _mm256_set1_epi64x(0xA54FF53A5F1D36F1);
|
||||
context[1].h[4] = _mm256_set1_epi64x(0x510E527FADE682D1);
|
||||
context[1].h[5] = _mm256_set1_epi64x(0x9B05688C2B3E6C1F);
|
||||
context[1].h[6] = _mm256_set1_epi64x(0x1F83D9ABFB41BD6B);
|
||||
context[1].h[7] = _mm256_set1_epi64x(0x5BE0CD19137E2179);
|
||||
|
||||
for(int i=0; i<4; ++i) {
|
||||
context[0].w[i] = _mm256_set_epi64x ( data[3][i], data[2][i], data[1][i], data[0][i] );
|
||||
context[1].w[i] = _mm256_set_epi64x ( data[7][i], data[6][i], data[5][i], data[4][i] );
|
||||
}
|
||||
for(int i=0; i<10; ++i) {
|
||||
context[0].w[i+4] = _mm256_set1_epi64x( ((uint64_t*)padding)[i] );
|
||||
context[1].w[i+4] = _mm256_set1_epi64x( ((uint64_t*)padding)[i] );
|
||||
}
|
||||
|
||||
//Length of the original message (before padding)
|
||||
uint64_t totalSize = 32 * 8;
|
||||
|
||||
//Append the length of the original message
|
||||
context[0].w[14] = _mm256_set1_epi64x(0);
|
||||
context[0].w[15] = _mm256_set1_epi64x(htobe64(totalSize));
|
||||
|
||||
context[1].w[14] = _mm256_set1_epi64x(0);
|
||||
context[1].w[15] = _mm256_set1_epi64x(htobe64(totalSize));
|
||||
|
||||
//Calculate the message digest
|
||||
sha512ProcessBlock(context);
|
||||
|
||||
//Convert from host byte order to big-endian byte order
|
||||
for (int i = 0; i < 8; i++) {
|
||||
context[0].h[i] = mm256_htobe_epi64(context[0].h[i]);
|
||||
context[1].h[i] = mm256_htobe_epi64(context[1].h[i]);
|
||||
}
|
||||
|
||||
//Copy the resulting digest
|
||||
for(int i=0; i<8; ++i) {
|
||||
digest[0][i] = _mm256_extract_epi64(context[0].h[i], 0);
|
||||
digest[1][i] = _mm256_extract_epi64(context[0].h[i], 1);
|
||||
digest[2][i] = _mm256_extract_epi64(context[0].h[i], 2);
|
||||
digest[3][i] = _mm256_extract_epi64(context[0].h[i], 3);
|
||||
|
||||
digest[4][i] = _mm256_extract_epi64(context[1].h[i], 0);
|
||||
digest[5][i] = _mm256_extract_epi64(context[1].h[i], 1);
|
||||
digest[6][i] = _mm256_extract_epi64(context[1].h[i], 2);
|
||||
digest[7][i] = _mm256_extract_epi64(context[1].h[i], 3);
|
||||
}
|
||||
}
|
||||
|
||||
#define blk0(n, i) (block[n][i] = mm256_betoh_epi64(block[n][i]))
|
||||
#define blk(n, i) (block[n][i] = block[n][i - 16] + SIGMA3(block[n][i - 15]) + \
|
||||
SIGMA4(block[n][i - 2]) + block[n][i - 7])
|
||||
|
||||
#define ROUND512(a,b,c,d,e,f,g,h) \
|
||||
T0 += (h[0]) + SIGMA2(e[0]) + Ch((e[0]), (f[0]), (g[0])) + k[i]; \
|
||||
T1 += (h[1]) + SIGMA2(e[1]) + Ch((e[1]), (f[1]), (g[1])) + k[i]; \
|
||||
(d[0]) += T0; \
|
||||
(d[1]) += T1; \
|
||||
(h[0]) = T0 + SIGMA1(a[0]) + Maj((a[0]), (b[0]), (c[0])); \
|
||||
(h[1]) = T1 + SIGMA1(a[1]) + Maj((a[1]), (b[1]), (c[1])); \
|
||||
i++
|
||||
|
||||
#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
|
||||
T0 = blk0(0, i); \
|
||||
T1 = blk0(1, i); \
|
||||
ROUND512(a,b,c,d,e,f,g,h)
|
||||
|
||||
#define ROUND512_16_TO_80(a,b,c,d,e,f,g,h) \
|
||||
T0 = blk(0, i); \
|
||||
T1 = blk(1, i); \
|
||||
ROUND512(a,b,c,d,e,f,g,h)
|
||||
|
||||
#define R512_0 \
|
||||
ROUND512_0_TO_15(a, b, c, d, e, f, g, h); \
|
||||
ROUND512_0_TO_15(h, a, b, c, d, e, f, g); \
|
||||
ROUND512_0_TO_15(g, h, a, b, c, d, e, f); \
|
||||
ROUND512_0_TO_15(f, g, h, a, b, c, d, e); \
|
||||
ROUND512_0_TO_15(e, f, g, h, a, b, c, d); \
|
||||
ROUND512_0_TO_15(d, e, f, g, h, a, b, c); \
|
||||
ROUND512_0_TO_15(c, d, e, f, g, h, a, b); \
|
||||
ROUND512_0_TO_15(b, c, d, e, f, g, h, a)
|
||||
|
||||
#define R512_16 \
|
||||
ROUND512_16_TO_80(a, b, c, d, e, f, g, h); \
|
||||
ROUND512_16_TO_80(h, a, b, c, d, e, f, g); \
|
||||
ROUND512_16_TO_80(g, h, a, b, c, d, e, f); \
|
||||
ROUND512_16_TO_80(f, g, h, a, b, c, d, e); \
|
||||
ROUND512_16_TO_80(e, f, g, h, a, b, c, d); \
|
||||
ROUND512_16_TO_80(d, e, f, g, h, a, b, c); \
|
||||
ROUND512_16_TO_80(c, d, e, f, g, h, a, b); \
|
||||
ROUND512_16_TO_80(b, c, d, e, f, g, h, a)
|
||||
|
||||
#define INIT(x,n) \
|
||||
x[0] = context[0].h[n]; \
|
||||
x[1] = context[1].h[n]; \
|
||||
|
||||
void sha512ProcessBlock(Sha512Context context[2])
|
||||
{
|
||||
__m256i* block[2];
|
||||
block[0] = context[0].w;
|
||||
block[1] = context[1].w;
|
||||
|
||||
__m256i T0, T1;
|
||||
__m256i a[2], b[2], c[2], d[2], e[2], f[2], g[2], h[2];
|
||||
INIT(a, 0)
|
||||
INIT(b, 1)
|
||||
INIT(c, 2)
|
||||
INIT(d, 3)
|
||||
INIT(e, 4)
|
||||
INIT(f, 5)
|
||||
INIT(g, 6)
|
||||
INIT(h, 7)
|
||||
|
||||
int i = 0;
|
||||
R512_0; R512_0;
|
||||
for(int j=0; j<8; ++j) {
|
||||
R512_16;
|
||||
}
|
||||
|
||||
context[0].h[0] += a[0];
|
||||
context[0].h[1] += b[0];
|
||||
context[0].h[2] += c[0];
|
||||
context[0].h[3] += d[0];
|
||||
context[0].h[4] += e[0];
|
||||
context[0].h[5] += f[0];
|
||||
context[0].h[6] += g[0];
|
||||
context[0].h[7] += h[0];
|
||||
|
||||
context[1].h[0] += a[1];
|
||||
context[1].h[1] += b[1];
|
||||
context[1].h[2] += c[1];
|
||||
context[1].h[3] += d[1];
|
||||
context[1].h[4] += e[1];
|
||||
context[1].h[5] += f[1];
|
||||
context[1].h[6] += g[1];
|
||||
context[1].h[7] += h[1];
|
||||
}
|
||||
|
||||
#endif // __AVX2__
|
@@ -1,25 +0,0 @@
|
||||
#ifndef __WOLF_AES_H
|
||||
#define __WOLF_AES_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "simd-utils.h"
|
||||
|
||||
void ExpandAESKey256(v128_t *keys, const v128_t *KeyBuf);
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
//#ifdef __AVX__
|
||||
|
||||
#define AES_PARALLEL_N 8
|
||||
#define BLOCK_COUNT 256
|
||||
|
||||
void AES256CBC( v128_t** data, const v128_t** next, v128_t ExpandedKey[][16],
|
||||
v128_t* IV );
|
||||
|
||||
#else
|
||||
|
||||
void AES256CBC( v128_t *Ciphertext, const v128_t *Plaintext,
|
||||
const v128_t *ExpandedKey, v128_t IV, uint32_t BlockCount );
|
||||
|
||||
#endif
|
||||
|
||||
#endif // __WOLF_AES_H
|
@@ -852,48 +852,10 @@ void jh512_4x64_ctx( jh_4x64_context *cc, void *dst, const void *data, size_t le
|
||||
|
||||
// SSE2 & NEON
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
//TODO enable for AVX10_256, not used with AVX512VL
|
||||
|
||||
#define v128_notxorandnot( a, b, c ) \
|
||||
_mm_ternarylogic_epi64( a, b, c, 0x2d )
|
||||
|
||||
#else
|
||||
|
||||
#define v128_notxorandnot( a, b, c ) \
|
||||
v128_xor( v128_not( a ), v128_andnot( b, c ) )
|
||||
|
||||
#endif
|
||||
|
||||
#define Sb(x0, x1, x2, x3, c) \
|
||||
{ \
|
||||
v128u64_t cc = v128_64( c ); \
|
||||
x3 = v128_not( x3 ); \
|
||||
x0 = v128_xor( x0, v128_andnot( x2, cc ) ); \
|
||||
tmp = v128_xor( cc, v128_and( x0, x1 ) ); \
|
||||
x0 = v128_xor( x0, v128_and( x2, x3 ) ); \
|
||||
x3 = v128_xor( x3, v128_andnot( x1, x2 ) ); \
|
||||
x1 = v128_xor( x1, v128_and( x0, x2 ) ); \
|
||||
x2 = v128_xor( x2, v128_andnot( x3, x0 ) ); \
|
||||
x0 = v128_xor( x0, v128_or( x1, x3 ) ); \
|
||||
x3 = v128_xor( x3, v128_and( x1, x2 ) ); \
|
||||
x1 = v128_xor( x1, v128_and( tmp, x0 ) ); \
|
||||
x2 = v128_xor( x2, tmp ); \
|
||||
}
|
||||
|
||||
#define Lb(x0, x1, x2, x3, x4, x5, x6, x7) \
|
||||
{ \
|
||||
x4 = v128_xor( x4, x1 ); \
|
||||
x5 = v128_xor( x5, x2 ); \
|
||||
x6 = v128_xor( x6, v128_xor( x3, x0 ) ); \
|
||||
x7 = v128_xor( x7, x0 ); \
|
||||
x0 = v128_xor( x0, x5 ); \
|
||||
x1 = v128_xor( x1, x6 ); \
|
||||
x2 = v128_xor( x2, v128_xor( x7, x4 ) ); \
|
||||
x3 = v128_xor( x3, x4 ); \
|
||||
}
|
||||
|
||||
/*
|
||||
#define Sb(x0, x1, x2, x3, c) \
|
||||
{ \
|
||||
const v128u64_t cc = v128_64( c ); \
|
||||
@@ -920,7 +882,6 @@ void jh512_4x64_ctx( jh_4x64_context *cc, void *dst, const void *data, size_t le
|
||||
x2 = v128_xor3( x2, x7, x4 ); \
|
||||
x3 = v128_xor( x3, x4 ); \
|
||||
}
|
||||
*/
|
||||
|
||||
#undef Wz
|
||||
#define Wz(x, c, n) \
|
||||
|
@@ -563,7 +563,7 @@ static void keccak64x2_close( keccak64_ctx_v128 *kc, void *dst,
|
||||
{
|
||||
unsigned eb;
|
||||
union {
|
||||
v128_t tmp[lim + 1];
|
||||
v128_t tmp[140];
|
||||
uint64_t dummy; /* for alignment */
|
||||
} u;
|
||||
size_t j;
|
||||
|
@@ -33,43 +33,39 @@
|
||||
|
||||
#define MULT2( a0, a1 ) \
|
||||
{ \
|
||||
v128_t b = v128_xor( a0, _mm_maskz_shuffle_epi32( 0xb, a1, 0x10 ) ); \
|
||||
v128_t b = v128_xor( a0, _mm_maskz_shuffle_epi32( 0xb, a1, 0 ) ); \
|
||||
a0 = _mm_alignr_epi8( a1, b, 4 ); \
|
||||
a1 = _mm_alignr_epi8( b, a1, 4 ); \
|
||||
}
|
||||
|
||||
#elif defined(__SSE4_1__)
|
||||
|
||||
#define MULT2( a0, a1 ) do \
|
||||
#define MULT2( a0, a1 ) \
|
||||
{ \
|
||||
v128_t b = v128_xor( a0, \
|
||||
_mm_shuffle_epi32( mm128_mask_32( a1, 0xe ), 0x10 ) ); \
|
||||
v128_t b = _mm_shuffle_epi32( a1, 0 ); \
|
||||
b = v128_xor( a0, v128_mask32( b, 0x4 ) ); \
|
||||
a0 = _mm_alignr_epi8( a1, b, 4 ); \
|
||||
a1 = _mm_alignr_epi8( b, a1, 4 ); \
|
||||
} while(0)
|
||||
}
|
||||
|
||||
#elif defined(__ARM_NEON)
|
||||
|
||||
const uint32x4_t mask = { 0xffffffff, 0, 0xffffffff, 0xffffffff };
|
||||
|
||||
// { a1_0, 0, a1_0, a1_0 }
|
||||
#define MULT2( a0, a1 ) \
|
||||
{ \
|
||||
v128_t b = v128_xor( a0, \
|
||||
v128_and( v128_32( vgetq_lane_u32( a1, 0 ) ), mask ) ); \
|
||||
v128_t b = v128_xor( a0, v128_and( vdupq_laneq_u32( a1, 0 ), MASK ) ); \
|
||||
a0 = v128_alignr32( a1, b, 1 ); \
|
||||
a1 = v128_alignr32( b, a1, 1 ); \
|
||||
}
|
||||
|
||||
#else // assume SSE2
|
||||
|
||||
#define MULT2( a0, a1 ) do \
|
||||
#define MULT2( a0, a1 ) \
|
||||
{ \
|
||||
v128_t b = v128_xor( a0, \
|
||||
_mm_shuffle_epi32( v128_and( a1, MASK ), 0x10 ) ); \
|
||||
v128_t b = v128_xor( a0, v128_and( _mm_shuffle_epi32( a1, 0 ), MASK ) ); \
|
||||
a0 = v128_or( _mm_srli_si128( b, 4 ), _mm_slli_si128( a1, 12 ) ); \
|
||||
a1 = v128_or( _mm_srli_si128( a1, 4 ), _mm_slli_si128( b, 12 ) ); \
|
||||
} while(0)
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -137,8 +133,8 @@ const uint32x4_t mask = { 0xffffffff, 0, 0xffffffff, 0xffffffff };
|
||||
t0 = v128_shufll32( a1 ); \
|
||||
a1 = v128_unpacklo32( t0, a0 ); \
|
||||
t0 = v128_unpackhi32( t0, a0 ); \
|
||||
t1 = v128_swap64( t0 ); \
|
||||
a0 = v128_swap64( a1 ); \
|
||||
t1 = v128_rev64( t0 ); \
|
||||
a0 = v128_rev64( a1 ); \
|
||||
SUBCRUMB( t1, t0, a0, a1 ); \
|
||||
t0 = v128_unpacklo32( t0, t1 ); \
|
||||
a1 = v128_unpacklo32( a1, a0 ); \
|
||||
@@ -224,9 +220,10 @@ static const uint32_t CNS_INIT[128] __attribute((aligned(16))) = {
|
||||
};
|
||||
|
||||
|
||||
v128_t CNS128[32];
|
||||
static v128_t CNS128[32];
|
||||
|
||||
#if !defined(__SSE4_1__)
|
||||
v128_t MASK;
|
||||
static v128_t MASK;
|
||||
#endif
|
||||
|
||||
int init_luffa(hashState_luffa *state, int hashbitlen)
|
||||
@@ -235,13 +232,13 @@ int init_luffa(hashState_luffa *state, int hashbitlen)
|
||||
state->hashbitlen = hashbitlen;
|
||||
#if !defined(__SSE4_1__)
|
||||
/* set the lower 32 bits to '1' */
|
||||
MASK = v128_set32(0x00000000, 0x00000000, 0x00000000, 0xffffffff);
|
||||
MASK = v128_set32( 0xffffffff, 0, 0xffffffff, 0xffffffff );
|
||||
#endif
|
||||
/* set the 32-bit round constant values to the 128-bit data field */
|
||||
for ( i=0; i<32; i++ )
|
||||
CNS128[i] = v128_load( (v128_t*)&CNS_INIT[i*4] );
|
||||
for ( i=0; i<10; i++ )
|
||||
state->chainv[i] = v128_load( (v128_t*)&IV[i*4] );
|
||||
state->chainv[i] = v128_load( (v128_t*)&IV[i*4] );
|
||||
memset(state->buffer, 0, sizeof state->buffer );
|
||||
return 0;
|
||||
}
|
||||
@@ -268,7 +265,7 @@ int update_luffa( hashState_luffa *state, const void *data,
|
||||
// remaining data bytes
|
||||
casti_v128( state->buffer, 0 ) = v128_bswap32( cast_v128( data ) );
|
||||
// padding of partial block
|
||||
casti_v128( state->buffer, 1 ) = v128_set32( 0, 0, 0, 0x80000000 );
|
||||
casti_v128( state->buffer, 1 ) = v128_set32( 0, 0, 0, 0x80000000 );
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -327,7 +324,6 @@ int update_and_final_luffa( hashState_luffa *state, void* output,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int luffa_full( hashState_luffa *state, void* output, int hashbitlen,
|
||||
const void* data, size_t inlen )
|
||||
{
|
||||
@@ -336,13 +332,13 @@ int luffa_full( hashState_luffa *state, void* output, int hashbitlen,
|
||||
state->hashbitlen = hashbitlen;
|
||||
#if !defined(__SSE4_1__)
|
||||
/* set the lower 32 bits to '1' */
|
||||
MASK= v128_set64( 0, 0x00000000ffffffff );
|
||||
MASK= v128_set32( 0xffffffff, 0, 0xffffffff, 0xffffffff );
|
||||
#endif
|
||||
/* set the 32-bit round constant values to the 128-bit data field */
|
||||
for ( i=0; i<32; i++ )
|
||||
CNS128[i] = v128_load( (v128_t*)&CNS_INIT[i*4] );
|
||||
CNS128[i] = casti_v128( CNS_INIT, i );
|
||||
for ( i=0; i<10; i++ )
|
||||
state->chainv[i] = v128_load( (v128_t*)&IV[i*4] );
|
||||
state->chainv[i] = casti_v128( IV, i );
|
||||
memset(state->buffer, 0, sizeof state->buffer );
|
||||
|
||||
// update
|
||||
@@ -376,16 +372,15 @@ int luffa_full( hashState_luffa *state, void* output, int hashbitlen,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/***************************************************/
|
||||
/* Round function */
|
||||
/* state: hash context */
|
||||
|
||||
static void rnd512( hashState_luffa *state, v128_t msg1, v128_t msg0 )
|
||||
{
|
||||
v128_t t0, t1;
|
||||
v128_t *chainv = state->chainv;
|
||||
v128_t x0, x1, x2, x3, x4, x5, x6, x7;
|
||||
v128u32_t t0, t1;
|
||||
v128u32_t *chainv = state->chainv;
|
||||
v128u32_t x0, x1, x2, x3, x4, x5, x6, x7;
|
||||
|
||||
t0 = v128_xor3( chainv[0], chainv[2], chainv[4] );
|
||||
t1 = v128_xor3( chainv[1], chainv[3], chainv[5] );
|
||||
@@ -472,7 +467,7 @@ static void rnd512( hashState_luffa *state, v128_t msg1, v128_t msg0 )
|
||||
chainv[5] = v128_rol32( chainv[5], 2 );
|
||||
chainv[7] = v128_rol32( chainv[7], 3 );
|
||||
chainv[9] = v128_rol32( chainv[9], 4 );
|
||||
|
||||
|
||||
NMLTOM1024( chainv[0], chainv[2], chainv[4], chainv[6], x0, x1, x2, x3,
|
||||
chainv[1], chainv[3], chainv[5], chainv[7], x4, x5, x6, x7 );
|
||||
|
||||
|
@@ -11,7 +11,7 @@
|
||||
#endif
|
||||
#include "algo/keccak/sph_keccak.h"
|
||||
#include "algo/skein/sph_skein.h"
|
||||
#if !( defined(__AES__) || defined(__ARM_FEATURE_AES) )
|
||||
#if !defined(__AES__) // && !defined(__ARM_FEATURE_AES) )
|
||||
#include "algo/groestl/sph_groestl.h"
|
||||
#endif
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#define ALLIUM_16WAY 1
|
||||
#elif defined(__AVX2__)
|
||||
#define ALLIUM_8WAY 1
|
||||
#elif #defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#define ALLIUM_4WAY 1
|
||||
#endif
|
||||
|
||||
@@ -30,7 +30,7 @@ typedef union {
|
||||
cube_4way_2buf_context cube;
|
||||
skein256_8way_context skein;
|
||||
#if defined(__VAES__)
|
||||
groestl256_4way_context groestl;
|
||||
groestl256_4way_context groestl;
|
||||
#else
|
||||
hashState_groestl256 groestl;
|
||||
#endif
|
||||
@@ -465,12 +465,12 @@ typedef union
|
||||
{
|
||||
keccak256_2x64_context keccak;
|
||||
cubehashParam cube;
|
||||
#if defined(__x86_64__)
|
||||
//#if defined(__x86_64__)
|
||||
skein256_2x64_context skein;
|
||||
#else
|
||||
sph_skein512_context skein;
|
||||
#endif
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
//#else
|
||||
// sph_skein512_context skein;
|
||||
//#endif
|
||||
#if defined(__AES__) // || defined(__ARM_FEATURE_AES)
|
||||
hashState_groestl256 groestl;
|
||||
#else
|
||||
sph_groestl256_context groestl;
|
||||
@@ -516,7 +516,7 @@ static void allium_4way_hash( void *hash, const void *midstate_vars,
|
||||
LYRA2RE( hash2, 32, hash2, 32, hash2, 32, 1, 8, 8 );
|
||||
LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 );
|
||||
|
||||
#if defined(__x86_64__)
|
||||
//#if defined(__x86_64__)
|
||||
intrlv_2x64( vhashA, hash0, hash1, 256 );
|
||||
skein256_2x64_init( &ctx.skein );
|
||||
skein256_2x64_update( &ctx.skein, vhashA, 32 );
|
||||
@@ -527,6 +527,7 @@ static void allium_4way_hash( void *hash, const void *midstate_vars,
|
||||
skein256_2x64_update( &ctx.skein, vhashA, 32 );
|
||||
skein256_2x64_close( &ctx.skein, vhashA );
|
||||
dintrlv_2x64( hash2, hash3, vhashA, 256 );
|
||||
/*
|
||||
#else
|
||||
sph_skein256_init( &ctx.skein );
|
||||
sph_skein256( &ctx.skein, hash0, 32 );
|
||||
@@ -541,8 +542,8 @@ static void allium_4way_hash( void *hash, const void *midstate_vars,
|
||||
sph_skein256( &ctx.skein, hash3, 32 );
|
||||
sph_skein256_close( &ctx.skein, hash3 );
|
||||
#endif
|
||||
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
*/
|
||||
#if defined(__AES__) // || defined(__ARM_FEATURE_AES)
|
||||
groestl256_full( &ctx.groestl, hash0, hash0, 256 );
|
||||
groestl256_full( &ctx.groestl, hash1, hash1, 256 );
|
||||
groestl256_full( &ctx.groestl, hash2, hash2, 256 );
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#include "cpuminer-config.h"
|
||||
#include "algo-gate-api.h"
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
|
||||
#include <gmp.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
@@ -296,8 +298,14 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // not apple
|
||||
|
||||
bool register_m7m_algo( algo_gate_t *gate )
|
||||
{
|
||||
#if defined(__APPLE__)
|
||||
applog( LOG_ERR, "M7M algo is not supported on MacOS");
|
||||
return false;
|
||||
#else
|
||||
gate->optimizations = SHA_OPT;
|
||||
init_m7m_ctx();
|
||||
gate->scanhash = (void*)&scanhash_m7m_hash;
|
||||
@@ -307,6 +315,6 @@ bool register_m7m_algo( algo_gate_t *gate )
|
||||
gate->set_work_data_endian = (void*)&set_work_data_big_endian;
|
||||
opt_target_factor = 65536.0;
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@@ -2303,9 +2303,8 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2326,9 +2325,10 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3;
|
||||
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
@@ -2348,8 +2348,7 @@ static inline void salsa_simd128_shuffle_2buf( uint32_t *xa, uint32_t *xb )
|
||||
XB[2] = YB2;
|
||||
XA[3] = YA3;
|
||||
XB[3] = YB3;
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
@@ -2357,8 +2356,8 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
|
||||
v128_t *XA = (v128_t*)xa;
|
||||
v128_t *XB = (v128_t*)xb;
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
v128_t t0 = _mm_blend_epi16( XA[0], XA[2], 0xf0 );
|
||||
v128_t t1 = _mm_blend_epi16( XA[0], XA[2], 0x0f );
|
||||
@@ -2377,9 +2376,8 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XB[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2389,19 +2387,21 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t1 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t2 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
@@ -2457,9 +2457,7 @@ static inline void salsa_simd128_unshuffle_2buf( uint32_t* xa, uint32_t* xb )
|
||||
XB[2] = zb[2].m128;
|
||||
XA[3] = za[3].m128;
|
||||
XB[3] = zb[3].m128;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static void salsa8_simd128_2buf( uint32_t * const ba, uint32_t * const bb,
|
||||
@@ -2611,7 +2609,7 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
v128_t *XB = (v128_t*)xb;
|
||||
v128_t *XC = (v128_t*)xc;
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
v128_t t0 = _mm_blend_epi16( XA[0], XA[1], 0xcc );
|
||||
v128_t t1 = _mm_blend_epi16( XA[0], XA[1], 0x33 );
|
||||
@@ -2638,9 +2636,8 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XC[2] = _mm_blend_epi16( t0, t2, 0x0f );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0xc3 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2650,28 +2647,29 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
v128_t t2 = v128_blendv( XA[2], XA[3], mask_cc );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[2], mask_cc );
|
||||
XA[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XA[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XA[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XA[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XB[0], XB[1], mask_cc );
|
||||
t1 = v128_blendv( XB[1], XB[0], mask_cc );
|
||||
t2 = v128_blendv( XB[2], XB[3], mask_cc );
|
||||
t3 = v128_blendv( XB[3], XB[2], mask_cc );
|
||||
XB[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XB[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XB[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XB[3] = v128_blendv( t3, t1, mask_3c );
|
||||
t0 = v128_blendv( XC[0], XC[1], mask_cc );
|
||||
t1 = v128_blendv( XC[1], XC[0], mask_cc );
|
||||
t2 = v128_blendv( XC[2], XC[3], mask_cc );
|
||||
t3 = v128_blendv( XC[3], XC[2], mask_cc );
|
||||
XC[0] = v128_blendv( t0, t2, mask_f0 );
|
||||
XC[1] = v128_blendv( t1, t3, mask_3c );
|
||||
XC[2] = v128_blendv( t2, t0, mask_f0 );
|
||||
XC[1] = v128_blendv( t2, t0, mask_f0 );
|
||||
XC[2] = v128_blendv( t1, t3, mask_3c );
|
||||
XC[3] = v128_blendv( t3, t1, mask_3c );
|
||||
*/
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_t YA0, YA1, YA2, YA3, YB0, YB1, YB2, YB3, YC0, YC1, YC2, YC3;
|
||||
|
||||
YA0 = v128_set32( xa[15], xa[10], xa[ 5], xa[ 0] );
|
||||
@@ -2699,9 +2697,7 @@ static inline void salsa_simd128_shuffle_3buf( uint32_t *xa, uint32_t *xb,
|
||||
XA[3] = YA3;
|
||||
XB[3] = YB3;
|
||||
XC[3] = YC3;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
@@ -2738,9 +2734,8 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XC[2] = _mm_blend_epi16( t1, t3, 0xcc );
|
||||
XC[3] = _mm_blend_epi16( t1, t3, 0x33 );
|
||||
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#else // SSE2 or NEON
|
||||
|
||||
/*
|
||||
const v128u64_t mask_cc = v128_set64(0xffffffff00000000, 0xffffffff00000000);
|
||||
const v128u64_t mask_f0 = v128_set64(0xffffffffffffffff, 0);
|
||||
const v128u64_t mask_3c = v128_set64(0x00000000ffffffff, 0xffffffff00000000);
|
||||
@@ -2750,27 +2745,29 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
v128_t t2 = v128_blendv( XA[1], XA[3], mask_3c );
|
||||
v128_t t3 = v128_blendv( XA[3], XA[1], mask_3c );
|
||||
XA[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XA[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XA[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XA[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XB[0], XB[2], mask_f0 );
|
||||
t1 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t2 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t1 = v128_blendv( XB[2], XB[0], mask_f0 );
|
||||
t2 = v128_blendv( XB[1], XB[3], mask_3c );
|
||||
t3 = v128_blendv( XB[3], XB[1], mask_3c );
|
||||
XB[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XB[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XB[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XB[3] = v128_blendv( t3, t1, mask_cc );
|
||||
t0 = v128_blendv( XC[0], XC[2], mask_f0 );
|
||||
t1 = v128_blendv( XC[1], XC[3], mask_3c );
|
||||
t2 = v128_blendv( XC[2], XC[0], mask_f0 );
|
||||
t1 = v128_blendv( XC[2], XC[0], mask_f0 );
|
||||
t2 = v128_blendv( XC[1], XC[3], mask_3c );
|
||||
t3 = v128_blendv( XC[3], XC[1], mask_3c );
|
||||
XC[0] = v128_blendv( t0, t2, mask_cc );
|
||||
XC[1] = v128_blendv( t1, t3, mask_cc );
|
||||
XC[2] = v128_blendv( t2, t0, mask_cc );
|
||||
XC[1] = v128_blendv( t2, t0, mask_cc );
|
||||
XC[2] = v128_blendv( t1, t3, mask_cc );
|
||||
XC[3] = v128_blendv( t3, t1, mask_cc );
|
||||
*/
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
v128_ovly ya[4], za[4], yb[4], zb[4], yc[4], zc[4];
|
||||
|
||||
ya[0].m128 = XA[0];
|
||||
@@ -2850,9 +2847,7 @@ static inline void salsa_simd128_unshuffle_3buf( uint32_t* xa, uint32_t* xb,
|
||||
XA[3] = za[3].m128;
|
||||
XB[3] = zb[3].m128;
|
||||
XC[3] = zc[3].m128;
|
||||
|
||||
|
||||
#endif
|
||||
*/
|
||||
}
|
||||
|
||||
// Triple buffered, 3x memory usage
|
||||
|
@@ -35,41 +35,47 @@
|
||||
//#include <mm_malloc.h>
|
||||
#include "malloc-huge.h"
|
||||
|
||||
static const uint32_t keypad[12] = {
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000280
|
||||
};
|
||||
static const uint32_t innerpad[11] = {
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x000004a0
|
||||
};
|
||||
static const uint32_t outerpad[8] = {
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0x00000300
|
||||
};
|
||||
static const uint32_t finalblk[16] = {
|
||||
0x00000001, 0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000620
|
||||
};
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SCRYPT_THROUGHPUT 16
|
||||
#elif defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
#define SCRYPT_THROUGHPUT 2
|
||||
#elif defined(__AVX2__)
|
||||
#define SCRYPT_THROUGHPUT 8
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#define SCRYPT_THROUGHPUT 4
|
||||
#else
|
||||
#define SCRYPT_THROUGHPUT 1
|
||||
#endif
|
||||
|
||||
static const uint32_t sha256_initial_state[8] =
|
||||
static const uint32_t sha256_initial_state[8] __attribute((aligned(32))) =
|
||||
{
|
||||
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
|
||||
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
|
||||
};
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SCRYPT_THROUGHPUT 16
|
||||
#elif defined(__AVX2__)
|
||||
#define SCRYPT_THROUGHPUT 8
|
||||
#elif defined(__SHA__) // NEON?
|
||||
#define SCRYPT_THROUGHPUT 2
|
||||
#else
|
||||
#define SCRYPT_THROUGHPUT 4
|
||||
#endif
|
||||
|
||||
// static int scrypt_throughput = 0;
|
||||
|
||||
static int scratchbuf_size = 0;
|
||||
|
||||
static __thread uint32_t *scratchbuf = NULL;
|
||||
|
||||
#if (SCRYPT_THROUGHPUT == 1) || defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
|
||||
static const uint32_t keypad[12] __attribute((aligned(16))) =
|
||||
{
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000280
|
||||
};
|
||||
static const uint32_t innerpad[11] __attribute((aligned(16))) =
|
||||
{
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x000004a0
|
||||
};
|
||||
static const uint32_t outerpad[8] __attribute((aligned(16))) =
|
||||
{
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0x00000300
|
||||
};
|
||||
static const uint32_t finalblk[16] __attribute((aligned(16))) =
|
||||
{
|
||||
0x00000001, 0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000620
|
||||
};
|
||||
|
||||
|
||||
// change this to a constant to be used directly as input state arg
|
||||
// vectors still need an init function.
|
||||
static inline void sha256_init_state( uint32_t *state )
|
||||
@@ -155,7 +161,9 @@ static inline void PBKDF2_SHA256_128_32(uint32_t *tstate, uint32_t *ostate,
|
||||
output[i] = bswap_32( ostate[i] );
|
||||
}
|
||||
|
||||
#if defined(__SHA__)
|
||||
#endif // throughput 1
|
||||
//
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
|
||||
static inline void HMAC_SHA256_80_init_SHA_2BUF( const uint32_t *key0,
|
||||
const uint32_t *key1, uint32_t *tstate0, uint32_t *tstate1,
|
||||
@@ -266,7 +274,11 @@ static inline void PBKDF2_SHA256_128_32_SHA_2BUF( uint32_t *tstate0,
|
||||
|
||||
#endif // SHA
|
||||
|
||||
static const uint32_t keypad_4way[4 * 12] = {
|
||||
|
||||
|
||||
|
||||
static const uint32_t keypad_4way[ 4*12 ] __attribute((aligned(32))) =
|
||||
{
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
@@ -280,7 +292,8 @@ static const uint32_t keypad_4way[4 * 12] = {
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000280, 0x00000280, 0x00000280, 0x00000280
|
||||
};
|
||||
static const uint32_t innerpad_4way[4 * 11] = {
|
||||
static const uint32_t innerpad_4way[ 4*11 ] __attribute((aligned(32))) =
|
||||
{
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
@@ -293,7 +306,8 @@ static const uint32_t innerpad_4way[4 * 11] = {
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x000004a0, 0x000004a0, 0x000004a0, 0x000004a0
|
||||
};
|
||||
static const uint32_t outerpad_4way[4 * 8] = {
|
||||
static const uint32_t outerpad_4way[ 4*8 ] __attribute((aligned(32))) =
|
||||
{
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
@@ -1221,10 +1235,10 @@ static int scrypt_N_1_1_256_16way( const uint32_t *input, uint32_t *output,
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
#if ( SCRYPT_THROUGHPUT == 2 ) && ( defined(__SHA__) || defined(__ARM_FEATURE_SHA2) )
|
||||
|
||||
static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
uint32_t *midstate, int N, int thrid )
|
||||
static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, int N, int thrid )
|
||||
{
|
||||
uint32_t _ALIGN(128) tstate[ 2*8 ];
|
||||
uint32_t _ALIGN(128) ostate[ 2*8 ];
|
||||
@@ -1241,13 +1255,13 @@ static int scrypt_N_1_1_256_sha_2buf( const uint32_t *input, uint32_t *output,
|
||||
scrypt_core_simd128_2buf( W, scratchbuf, N );
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32_SHA_2BUF( tstate, tstate+8, ostate, ostate+8, W, W+32,
|
||||
output, output+8 );
|
||||
PBKDF2_SHA256_128_32_SHA_2BUF( tstate, tstate+8, ostate,
|
||||
ostate+8, W, W+32, output, output+8 );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // THROUGHPUT = 2 && SHA
|
||||
|
||||
#if ( SCRYPT_THROUGHPUT == 4 )
|
||||
|
||||
@@ -1267,13 +1281,10 @@ static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
|
||||
HMAC_SHA256_80_init( input, tstate, ostate );
|
||||
PBKDF2_SHA256_80_128( tstate, ostate, input, W );
|
||||
|
||||
HMAC_SHA256_80_init( input +20, tstate+ 8, ostate+ 8 );
|
||||
PBKDF2_SHA256_80_128( tstate+ 8, ostate+ 8, input +20, W+32 );
|
||||
|
||||
HMAC_SHA256_80_init( input +40, tstate+16, ostate+16 );
|
||||
PBKDF2_SHA256_80_128( tstate+16, ostate+16, input +40, W+64 );
|
||||
|
||||
HMAC_SHA256_80_init( input +60, tstate+24, ostate+24 );
|
||||
PBKDF2_SHA256_80_128( tstate+24, ostate+24, input +60, W+96 );
|
||||
|
||||
@@ -1303,11 +1314,8 @@ static int scrypt_N_1_1_256_4way_sha( const uint32_t *input, uint32_t *output,
|
||||
if ( work_restart[thrid].restart ) return 0;
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate, ostate, W, output );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+ 8, ostate+ 8, W+32, output+ 8 );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+16, ostate+16, W+64, output+16 );
|
||||
|
||||
PBKDF2_SHA256_128_32( tstate+24, ostate+24, W+96, output+24 );
|
||||
|
||||
return 1;
|
||||
@@ -1418,14 +1426,14 @@ extern int scanhash_scrypt( struct work *work, uint32_t max_nonce,
|
||||
rc = scrypt_N_1_1_256_8way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#elif ( SCRYPT_THROUGHPUT == 4 )
|
||||
#if defined(__SHA__)
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
rc = scrypt_N_1_1_256_4way_sha( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
rc = scrypt_N_1_1_256_4way( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#endif
|
||||
#elif ( SCRYPT_THROUGHPUT == 2 ) && defined(__SHA__)
|
||||
#elif ( SCRYPT_THROUGHPUT == 2 ) && ( defined(__SHA__) || defined(__ARM_FEATURE_SHA2) )
|
||||
rc = scrypt_N_1_1_256_sha_2buf( data, hash, midstate, opt_param_n,
|
||||
thr_id );
|
||||
#else
|
||||
@@ -1472,10 +1480,10 @@ bool scrypt_miner_thread_init( int thr_id )
|
||||
|
||||
bool register_scrypt_algo( algo_gate_t* gate )
|
||||
{
|
||||
#if defined(__SHA__)
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT;
|
||||
#if defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
gate->optimizations = SSE2_OPT | SHA_OPT | NEON_OPT;
|
||||
#else
|
||||
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX_OPT | AVX2_OPT | AVX512_OPT;
|
||||
gate->optimizations = SSE2_OPT | SSE42_OPT | AVX_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
|
||||
#endif
|
||||
gate->miner_thread_init =(void*)&scrypt_miner_thread_init;
|
||||
gate->scanhash = (void*)&scanhash_scrypt;
|
||||
@@ -1492,15 +1500,15 @@ bool register_scrypt_algo( algo_gate_t* gate )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 4 * 128; // 4 way
|
||||
#elif defined(__SHA__) || defined(__ARM_FEATURE_SHA2)
|
||||
// scrypt_throughput = 2;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
#elif defined(__AVX2__)
|
||||
// scrypt_throughput = 8;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
scratchbuf_size = opt_param_n * 3 * 128; // 3 buf
|
||||
else
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 way
|
||||
#elif defined(__SHA__)
|
||||
// scrypt_throughput = 4;
|
||||
scratchbuf_size = opt_param_n * 2 * 128; // 2 buf
|
||||
#else
|
||||
// scrypt_throughput = 4;
|
||||
if ( opt_param_n > 0x4000 )
|
||||
|
390
algo/sha/sha1-hash.c
Normal file
390
algo/sha/sha1-hash.c
Normal file
@@ -0,0 +1,390 @@
|
||||
#include "simd-utils.h"
|
||||
#include <stdint.h>
|
||||
#include "sha1-hash.h"
|
||||
|
||||
#if defined(__x86_64__) && defined(__SHA__)
|
||||
|
||||
#define sha1_opt_rounds( state_out, data, state_in ) \
|
||||
{ \
|
||||
__m128i ABCD, ABCD_SAVE, E0, E0_SAVE, E1; \
|
||||
__m128i MSG0, MSG1, MSG2, MSG3; \
|
||||
\
|
||||
ABCD = _mm_load_si128( (const __m128i*) state_in ); \
|
||||
E0 = _mm_set_epi32( state_in[4], 0, 0, 0 ); \
|
||||
ABCD = _mm_shuffle_epi32( ABCD, 0x1B ); \
|
||||
\
|
||||
ABCD_SAVE = ABCD; \
|
||||
E0_SAVE = E0; \
|
||||
\
|
||||
/* Rounds 0-3 */ \
|
||||
MSG0 = load_msg( data, 0 ); \
|
||||
E0 = _mm_add_epi32( E0, MSG0 ); \
|
||||
E1 = ABCD; \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 0 ); \
|
||||
\
|
||||
/* Rounds 4-7 */ \
|
||||
MSG1 = load_msg( data, 1 ); \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG1 ); \
|
||||
E0 = ABCD; \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 0 ); \
|
||||
MSG0 = _mm_sha1msg1_epu32( MSG0, MSG1 ); \
|
||||
\
|
||||
/* Rounds 8-11 */ \
|
||||
MSG2 = load_msg( data, 2 ); \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG2 ); \
|
||||
E1 = ABCD; \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 0 ); \
|
||||
MSG1 = _mm_sha1msg1_epu32( MSG1, MSG2 ); \
|
||||
MSG0 = _mm_xor_si128( MSG0, MSG2 ); \
|
||||
\
|
||||
/* Rounds 12-15 */ \
|
||||
MSG3 = load_msg( data, 3 ); \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG3 ); \
|
||||
E0 = ABCD; \
|
||||
MSG0 = _mm_sha1msg2_epu32( MSG0, MSG3 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 0 ); \
|
||||
MSG2 = _mm_sha1msg1_epu32( MSG2, MSG3 ); \
|
||||
MSG1 = _mm_xor_si128( MSG1, MSG3 ); \
|
||||
\
|
||||
/* Rounds 16-19 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG0 ); \
|
||||
E1 = ABCD; \
|
||||
MSG1 = _mm_sha1msg2_epu32( MSG1, MSG0 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 0 ); \
|
||||
MSG3 = _mm_sha1msg1_epu32( MSG3, MSG0 ); \
|
||||
MSG2 = _mm_xor_si128( MSG2, MSG0 ); \
|
||||
\
|
||||
/* Rounds 20-23 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG1 ); \
|
||||
E0 = ABCD; \
|
||||
MSG2 = _mm_sha1msg2_epu32( MSG2, MSG1 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 1 ); \
|
||||
MSG0 = _mm_sha1msg1_epu32( MSG0, MSG1 ); \
|
||||
MSG3 = _mm_xor_si128( MSG3, MSG1 ); \
|
||||
\
|
||||
/* Rounds 24-27 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG2 ); \
|
||||
E1 = ABCD; \
|
||||
MSG3 = _mm_sha1msg2_epu32( MSG3, MSG2 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 1 ); \
|
||||
MSG1 = _mm_sha1msg1_epu32( MSG1, MSG2 ); \
|
||||
MSG0 = _mm_xor_si128( MSG0, MSG2 ); \
|
||||
\
|
||||
/* Rounds 28-31 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG3 ); \
|
||||
E0 = ABCD; \
|
||||
MSG0 = _mm_sha1msg2_epu32( MSG0, MSG3 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 1 ); \
|
||||
MSG2 = _mm_sha1msg1_epu32( MSG2, MSG3 ); \
|
||||
MSG1 = _mm_xor_si128( MSG1, MSG3 ); \
|
||||
\
|
||||
/* Rounds 32-35 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG0 ); \
|
||||
E1 = ABCD; \
|
||||
MSG1 = _mm_sha1msg2_epu32( MSG1, MSG0 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 1 ); \
|
||||
MSG3 = _mm_sha1msg1_epu32( MSG3, MSG0 ); \
|
||||
MSG2 = _mm_xor_si128( MSG2, MSG0 ); \
|
||||
\
|
||||
/* Rounds 36-39 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG1 ); \
|
||||
E0 = ABCD; \
|
||||
MSG2 = _mm_sha1msg2_epu32( MSG2, MSG1 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 1 ); \
|
||||
MSG0 = _mm_sha1msg1_epu32( MSG0, MSG1 ); \
|
||||
MSG3 = _mm_xor_si128( MSG3, MSG1 ); \
|
||||
\
|
||||
/* Rounds 40-43 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG2 ); \
|
||||
E1 = ABCD; \
|
||||
MSG3 = _mm_sha1msg2_epu32( MSG3, MSG2 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 2 ); \
|
||||
MSG1 = _mm_sha1msg1_epu32( MSG1, MSG2 ); \
|
||||
MSG0 = _mm_xor_si128( MSG0, MSG2 ); \
|
||||
\
|
||||
/* Rounds 44-47 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG3 ); \
|
||||
E0 = ABCD; \
|
||||
MSG0 = _mm_sha1msg2_epu32( MSG0, MSG3 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 2 ); \
|
||||
MSG2 = _mm_sha1msg1_epu32( MSG2, MSG3 ); \
|
||||
MSG1 = _mm_xor_si128( MSG1, MSG3 ); \
|
||||
\
|
||||
/* Rounds 48-51 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG0 ); \
|
||||
E1 = ABCD; \
|
||||
MSG1 = _mm_sha1msg2_epu32( MSG1, MSG0 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 2 ); \
|
||||
MSG3 = _mm_sha1msg1_epu32( MSG3, MSG0 ); \
|
||||
MSG2 = _mm_xor_si128( MSG2, MSG0 ); \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG0 ); \
|
||||
E1 = ABCD; \
|
||||
MSG1 = _mm_sha1msg2_epu32( MSG1, MSG0 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 2 ); \
|
||||
MSG3 = _mm_sha1msg1_epu32( MSG3, MSG0 ); \
|
||||
MSG2 = _mm_xor_si128( MSG2, MSG0 ); \
|
||||
\
|
||||
/* Rounds 52-55 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG1 ); \
|
||||
E0 = ABCD; \
|
||||
MSG2 = _mm_sha1msg2_epu32( MSG2, MSG1 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 2 ); \
|
||||
MSG0 = _mm_sha1msg1_epu32( MSG0, MSG1 ); \
|
||||
MSG3 = _mm_xor_si128( MSG3, MSG1 ); \
|
||||
\
|
||||
/* Rounds 56-59 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG2 ); \
|
||||
E1 = ABCD; \
|
||||
MSG3 = _mm_sha1msg2_epu32( MSG3, MSG2 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 2 ); \
|
||||
MSG1 = _mm_sha1msg1_epu32( MSG1, MSG2 ); \
|
||||
MSG0 = _mm_xor_si128( MSG0, MSG2 ); \
|
||||
\
|
||||
/* Rounds 60-63 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG3 ); \
|
||||
E0 = ABCD; \
|
||||
MSG0 = _mm_sha1msg2_epu32( MSG0, MSG3 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 3 ); \
|
||||
MSG2 = _mm_sha1msg1_epu32( MSG2, MSG3 ); \
|
||||
MSG1 = _mm_xor_si128( MSG1, MSG3 ); \
|
||||
\
|
||||
/* Rounds 64-67 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG0 ); \
|
||||
E1 = ABCD; \
|
||||
MSG1 = _mm_sha1msg2_epu32( MSG1, MSG0 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 3 ); \
|
||||
MSG3 = _mm_sha1msg1_epu32( MSG3, MSG0 ); \
|
||||
MSG2 = _mm_xor_si128( MSG2, MSG0 ); \
|
||||
\
|
||||
/* Rounds 68-71 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG1 ); \
|
||||
E0 = ABCD; \
|
||||
MSG2 = _mm_sha1msg2_epu32( MSG2, MSG1 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 3 ); \
|
||||
MSG3 = _mm_xor_si128( MSG3, MSG1 ); \
|
||||
\
|
||||
/* Rounds 72-75 */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, MSG2 ); \
|
||||
E1 = ABCD; \
|
||||
MSG3 = _mm_sha1msg2_epu32( MSG3, MSG2 ); \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E0, 3 ); \
|
||||
\
|
||||
/* Rounds 76-79 */ \
|
||||
E1 = _mm_sha1nexte_epu32( E1, MSG3 ); \
|
||||
E0 = ABCD; \
|
||||
ABCD = _mm_sha1rnds4_epu32( ABCD, E1, 3 ); \
|
||||
\
|
||||
/* Combine state */ \
|
||||
E0 = _mm_sha1nexte_epu32( E0, E0_SAVE ); \
|
||||
ABCD = _mm_add_epi32( ABCD, ABCD_SAVE ); \
|
||||
\
|
||||
/* Save state */ \
|
||||
ABCD = _mm_shuffle_epi32( ABCD, 0x1B ); \
|
||||
_mm_store_si128( (__m128i*) state_out, ABCD ); \
|
||||
state_out[4] = _mm_extract_epi32( E0, 3 ); \
|
||||
}
|
||||
|
||||
|
||||
void sha1_x86_sha_transform_le( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in )
|
||||
{
|
||||
#define load_msg( m, i ) casti_v128( m, i )
|
||||
sha1_opt_rounds( state_out, input, state_in );
|
||||
#undef load_msg
|
||||
}
|
||||
|
||||
void sha1_x86_sha_transform_be( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in )
|
||||
{
|
||||
const __m128i MASK = _mm_set_epi64x( 0x0001020304050607ULL,
|
||||
0x08090a0b0c0d0e0fULL );
|
||||
#define load_msg( m, i ) _mm_shuffle_epi8( casti_v128( m, i ), MASK )
|
||||
sha1_opt_rounds( state_out, input, state_in );
|
||||
#undef load_msg
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) && defined(__ARM_FEATURE_SHA2)
|
||||
|
||||
#define sha1_neon_rounds( state_out, data, state_in ) \
|
||||
{ \
|
||||
uint32x4_t ABCD, ABCD_SAVED; \
|
||||
uint32x4_t TMP0, TMP1; \
|
||||
uint32x4_t MSG0, MSG1, MSG2, MSG3; \
|
||||
uint32_t E0, E0_SAVED, E1; \
|
||||
\
|
||||
/* Load state */ \
|
||||
ABCD = vld1q_u32( &state_in[0] ); \
|
||||
E0 = state_in[4]; \
|
||||
\
|
||||
/* Save state */ \
|
||||
ABCD_SAVED = ABCD; \
|
||||
E0_SAVED = E0; \
|
||||
\
|
||||
MSG0 = load_msg( data, 0 ); \
|
||||
MSG1 = load_msg( data, 1 ); \
|
||||
MSG2 = load_msg( data, 2 ); \
|
||||
MSG3 = load_msg( data, 3 ); \
|
||||
\
|
||||
TMP0 = vaddq_u32( MSG0, vdupq_n_u32( 0x5A827999 ) ); \
|
||||
TMP1 = vaddq_u32( MSG1, vdupq_n_u32( 0x5A827999 ) ); \
|
||||
\
|
||||
/* Rounds 0-3 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1cq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG2, vdupq_n_u32( 0x5A827999 ) ); \
|
||||
MSG0 = vsha1su0q_u32( MSG0, MSG1, MSG2 ); \
|
||||
\
|
||||
/* Rounds 4-7 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1cq_u32(ABCD, E1, TMP1); \
|
||||
TMP1 = vaddq_u32( MSG3, vdupq_n_u32( 0x5A827999 ) ); \
|
||||
MSG0 = vsha1su1q_u32( MSG0, MSG3 ); \
|
||||
MSG1 = vsha1su0q_u32( MSG1, MSG2, MSG3 ); \
|
||||
\
|
||||
/* Rounds 8-11 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1cq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG0, vdupq_n_u32( 0x5A827999 ) ); \
|
||||
MSG1 = vsha1su1q_u32( MSG1, MSG0 ); \
|
||||
MSG2 = vsha1su0q_u32( MSG2, MSG3, MSG0 ); \
|
||||
\
|
||||
/* Rounds 12-15 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1cq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG1, vdupq_n_u32( 0x6ED9EBA1 ) ); \
|
||||
MSG2 = vsha1su1q_u32( MSG2, MSG1 ); \
|
||||
MSG3 = vsha1su0q_u32( MSG3, MSG0, MSG1 ); \
|
||||
\
|
||||
/* Rounds 16-19 */\
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1cq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG2, vdupq_n_u32( 0x6ED9EBA1 ) ); \
|
||||
MSG3 = vsha1su1q_u32( MSG3, MSG2 ); \
|
||||
MSG0 = vsha1su0q_u32( MSG0, MSG1, MSG2 ); \
|
||||
\
|
||||
/* Rounds 20-23 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG3, vdupq_n_u32( 0x6ED9EBA1 ) ); \
|
||||
MSG0 = vsha1su1q_u32( MSG0, MSG3 ); \
|
||||
MSG1 = vsha1su0q_u32( MSG1, MSG2, MSG3 ); \
|
||||
\
|
||||
/* Rounds 24-27 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG0, vdupq_n_u32( 0x6ED9EBA1 ) ); \
|
||||
MSG1 = vsha1su1q_u32( MSG1, MSG0 ); \
|
||||
MSG2 = vsha1su0q_u32( MSG2, MSG3, MSG0 ); \
|
||||
\
|
||||
/* Rounds 28-31 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG1, vdupq_n_u32( 0x6ED9EBA1 ) ); \
|
||||
MSG2 = vsha1su1q_u32( MSG2, MSG1 ); \
|
||||
MSG3 = vsha1su0q_u32( MSG3, MSG0, MSG1 ); \
|
||||
\
|
||||
/* Rounds 32-35 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG2, vdupq_n_u32( 0x8F1BBCDC ) ); \
|
||||
MSG3 = vsha1su1q_u32( MSG3, MSG2 ); \
|
||||
MSG0 = vsha1su0q_u32( MSG0, MSG1, MSG2 ); \
|
||||
\
|
||||
/* Rounds 36-39 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG3, vdupq_n_u32( 0x8F1BBCDC ) ); \
|
||||
MSG0 = vsha1su1q_u32( MSG0, MSG3 ); \
|
||||
MSG1 = vsha1su0q_u32( MSG1, MSG2, MSG3 ); \
|
||||
\
|
||||
/* Rounds 40-43 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1mq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG0, vdupq_n_u32( 0x8F1BBCDC ) ); \
|
||||
MSG1 = vsha1su1q_u32( MSG1, MSG0 ); \
|
||||
MSG2 = vsha1su0q_u32( MSG2, MSG3, MSG0 ); \
|
||||
\
|
||||
/* Rounds 44-47 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1mq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG1, vdupq_n_u32( 0x8F1BBCDC ) ); \
|
||||
MSG2 = vsha1su1q_u32( MSG2, MSG1 ); \
|
||||
MSG3 = vsha1su0q_u32( MSG3, MSG0, MSG1 ); \
|
||||
\
|
||||
/* Rounds 48-51 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1mq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG2, vdupq_n_u32( 0x8F1BBCDC ) ); \
|
||||
MSG3 = vsha1su1q_u32( MSG3, MSG2 ); \
|
||||
MSG0 = vsha1su0q_u32( MSG0, MSG1, MSG2 ); \
|
||||
\
|
||||
/* Rounds 52-55 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1mq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG3, vdupq_n_u32( 0xCA62C1D6 ) ); \
|
||||
MSG0 = vsha1su1q_u32( MSG0, MSG3 ); \
|
||||
MSG1 = vsha1su0q_u32( MSG1, MSG2, MSG3 ); \
|
||||
\
|
||||
/* Rounds 56-59 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1mq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32( MSG0, vdupq_n_u32( 0xCA62C1D6 ) ); \
|
||||
MSG1 = vsha1su1q_u32( MSG1, MSG0 ); \
|
||||
MSG2 = vsha1su0q_u32( MSG2, MSG3, MSG0 ); \
|
||||
\
|
||||
/* Rounds 60-63 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG1, vdupq_n_u32( 0xCA62C1D6 ) ); \
|
||||
MSG2 = vsha1su1q_u32( MSG2, MSG1 ); \
|
||||
MSG3 = vsha1su0q_u32( MSG3, MSG0, MSG1 ); \
|
||||
\
|
||||
/* Rounds 64-67 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E0, TMP0 ); \
|
||||
TMP0 = vaddq_u32(MSG2, vdupq_n_u32( 0xCA62C1D6 ) ); \
|
||||
MSG3 = vsha1su1q_u32( MSG3, MSG2 ); \
|
||||
MSG0 = vsha1su0q_u32( MSG0, MSG1, MSG2 ); \
|
||||
\
|
||||
/* Rounds 68-71 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E1, TMP1 ); \
|
||||
TMP1 = vaddq_u32( MSG3, vdupq_n_u32( 0xCA62C1D6 ) ); \
|
||||
MSG0 = vsha1su1q_u32( MSG0, MSG3 ); \
|
||||
\
|
||||
/* Rounds 72-75 */ \
|
||||
E1 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E0, TMP0 ); \
|
||||
\
|
||||
/* Rounds 76-79 */ \
|
||||
E0 = vsha1h_u32( vgetq_lane_u32( ABCD, 0 ) ); \
|
||||
ABCD = vsha1pq_u32( ABCD, E1, TMP1 ); \
|
||||
\
|
||||
/* Combine state */ \
|
||||
E0 += E0_SAVED; \
|
||||
ABCD = vaddq_u32( ABCD_SAVED, ABCD ); \
|
||||
\
|
||||
/* Save state */ \
|
||||
vst1q_u32( &state_out[0], ABCD ); \
|
||||
state_out[4] = E0; \
|
||||
}
|
||||
|
||||
void sha1_neon_sha_transform_be( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in )
|
||||
{
|
||||
#define load_msg( m, i ) v128_bswap32( casti_v128( m, i ) );
|
||||
sha1_neon_rounds( state_out, input, state_in );
|
||||
#undef load_msg
|
||||
}
|
||||
|
||||
void sha1_neon_sha_transform_le( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in )
|
||||
{
|
||||
#define load_msg( m, i ) casti_v128( m, i );
|
||||
sha1_neon_rounds( state_out, input, state_in );
|
||||
#undef load_msg
|
||||
}
|
||||
|
||||
#endif
|
40
algo/sha/sha1-hash.h
Normal file
40
algo/sha/sha1-hash.h
Normal file
@@ -0,0 +1,40 @@
|
||||
#ifndef SHA1_HASH_H__
|
||||
#define SHA1_HASH_H__ 1
|
||||
|
||||
#include <stddef.h>
|
||||
#include "simd-utils.h"
|
||||
#include "cpuminer-config.h"
|
||||
#include "sph_sha1.h"
|
||||
|
||||
// SHA hooks for sha1, automaticaaly substituded in SPH
|
||||
#if defined(__x86_64__) && defined(__SHA__)
|
||||
|
||||
void sha1_x86_sha_transform_le( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in );
|
||||
|
||||
void sha1_x86_sha_transform_be( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in );
|
||||
|
||||
#define sha1_transform_le sha1_x86_sha_transform_le
|
||||
#define sha1_transform_be sha1_x86_sha_transform_be
|
||||
|
||||
#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_SHA2)
|
||||
|
||||
void sha1_neon_sha_transform_be( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in );
|
||||
void sha1_neon_sha_transform_le( uint32_t *state_out, const void *input,
|
||||
const uint32_t *state_in );
|
||||
|
||||
#define sha1_transform_le sha1_neon_sha_transform_le
|
||||
#define sha1_transform_be sha1_neon_sha_transform_be
|
||||
|
||||
#else
|
||||
|
||||
#define sha1_transform_le sph_sha1_transform_le
|
||||
#define sha1_transform_be sph_sha1_transform_be
|
||||
|
||||
#endif
|
||||
|
||||
#define sha1_full sph_sha1_full
|
||||
|
||||
#endif
|
400
algo/sha/sha1.c
Normal file
400
algo/sha/sha1.c
Normal file
@@ -0,0 +1,400 @@
|
||||
/* $Id: sha1.c 216 2010-06-08 09:46:57Z tp $ */
|
||||
/*
|
||||
* SHA-1 implementation.
|
||||
*
|
||||
* ==========================(LICENSE BEGIN)============================
|
||||
*
|
||||
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* ===========================(LICENSE END)=============================
|
||||
*
|
||||
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include "simd-utils.h"
|
||||
#include "sha1-hash.h"
|
||||
|
||||
#define F(B, C, D) ((((C) ^ (D)) & (B)) ^ (D))
|
||||
#define G(B, C, D) ((B) ^ (C) ^ (D))
|
||||
#define H(B, C, D) (((D) & (C)) | (((D) | (C)) & (B)))
|
||||
#define I(B, C, D) G(B, C, D)
|
||||
|
||||
#define ROTL rol32
|
||||
//#define ROTL SPH_ROTL32
|
||||
|
||||
#define K1 SPH_C32(0x5A827999)
|
||||
#define K2 SPH_C32(0x6ED9EBA1)
|
||||
#define K3 SPH_C32(0x8F1BBCDC)
|
||||
#define K4 SPH_C32(0xCA62C1D6)
|
||||
|
||||
static const sph_u32 IV[5] = {
|
||||
SPH_C32(0x67452301), SPH_C32(0xEFCDAB89),
|
||||
SPH_C32(0x98BADCFE), SPH_C32(0x10325476),
|
||||
SPH_C32(0xC3D2E1F0)
|
||||
};
|
||||
|
||||
/*
|
||||
* This macro defines the body for a SHA-1 compression function
|
||||
* implementation. The "in" parameter should evaluate, when applied to a
|
||||
* numerical input parameter from 0 to 15, to an expression which yields
|
||||
* the corresponding input block. The "r" parameter should evaluate to
|
||||
* an array or pointer expression designating the array of 5 words which
|
||||
* contains the input and output of the compression function.
|
||||
*/
|
||||
|
||||
#define SHA1_ROUND_BODY(in, r) do { \
|
||||
sph_u32 A, B, C, D, E; \
|
||||
sph_u32 W00, W01, W02, W03, W04, W05, W06, W07; \
|
||||
sph_u32 W08, W09, W10, W11, W12, W13, W14, W15; \
|
||||
\
|
||||
A = (r)[0]; \
|
||||
B = (r)[1]; \
|
||||
C = (r)[2]; \
|
||||
D = (r)[3]; \
|
||||
E = (r)[4]; \
|
||||
\
|
||||
W00 = in(0); \
|
||||
E = SPH_T32(ROTL(A, 5) + F(B, C, D) + E + W00 + K1); \
|
||||
B = ROTL(B, 30); \
|
||||
W01 = in(1); \
|
||||
D = SPH_T32(ROTL(E, 5) + F(A, B, C) + D + W01 + K1); \
|
||||
A = ROTL(A, 30); \
|
||||
W02 = in(2); \
|
||||
C = SPH_T32(ROTL(D, 5) + F(E, A, B) + C + W02 + K1); \
|
||||
E = ROTL(E, 30); \
|
||||
W03 = in(3); \
|
||||
B = SPH_T32(ROTL(C, 5) + F(D, E, A) + B + W03 + K1); \
|
||||
D = ROTL(D, 30); \
|
||||
W04 = in(4); \
|
||||
A = SPH_T32(ROTL(B, 5) + F(C, D, E) + A + W04 + K1); \
|
||||
C = ROTL(C, 30); \
|
||||
W05 = in(5); \
|
||||
E = SPH_T32(ROTL(A, 5) + F(B, C, D) + E + W05 + K1); \
|
||||
B = ROTL(B, 30); \
|
||||
W06 = in(6); \
|
||||
D = SPH_T32(ROTL(E, 5) + F(A, B, C) + D + W06 + K1); \
|
||||
A = ROTL(A, 30); \
|
||||
W07 = in(7); \
|
||||
C = SPH_T32(ROTL(D, 5) + F(E, A, B) + C + W07 + K1); \
|
||||
E = ROTL(E, 30); \
|
||||
W08 = in(8); \
|
||||
B = SPH_T32(ROTL(C, 5) + F(D, E, A) + B + W08 + K1); \
|
||||
D = ROTL(D, 30); \
|
||||
W09 = in(9); \
|
||||
A = SPH_T32(ROTL(B, 5) + F(C, D, E) + A + W09 + K1); \
|
||||
C = ROTL(C, 30); \
|
||||
W10 = in(10); \
|
||||
E = SPH_T32(ROTL(A, 5) + F(B, C, D) + E + W10 + K1); \
|
||||
B = ROTL(B, 30); \
|
||||
W11 = in(11); \
|
||||
D = SPH_T32(ROTL(E, 5) + F(A, B, C) + D + W11 + K1); \
|
||||
A = ROTL(A, 30); \
|
||||
W12 = in(12); \
|
||||
C = SPH_T32(ROTL(D, 5) + F(E, A, B) + C + W12 + K1); \
|
||||
E = ROTL(E, 30); \
|
||||
W13 = in(13); \
|
||||
B = SPH_T32(ROTL(C, 5) + F(D, E, A) + B + W13 + K1); \
|
||||
D = ROTL(D, 30); \
|
||||
W14 = in(14); \
|
||||
A = SPH_T32(ROTL(B, 5) + F(C, D, E) + A + W14 + K1); \
|
||||
C = ROTL(C, 30); \
|
||||
W15 = in(15); \
|
||||
E = SPH_T32(ROTL(A, 5) + F(B, C, D) + E + W15 + K1); \
|
||||
B = ROTL(B, 30); \
|
||||
W00 = ROTL(W13 ^ W08 ^ W02 ^ W00, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + F(A, B, C) + D + W00 + K1); \
|
||||
A = ROTL(A, 30); \
|
||||
W01 = ROTL(W14 ^ W09 ^ W03 ^ W01, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + F(E, A, B) + C + W01 + K1); \
|
||||
E = ROTL(E, 30); \
|
||||
W02 = ROTL(W15 ^ W10 ^ W04 ^ W02, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + F(D, E, A) + B + W02 + K1); \
|
||||
D = ROTL(D, 30); \
|
||||
W03 = ROTL(W00 ^ W11 ^ W05 ^ W03, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + F(C, D, E) + A + W03 + K1); \
|
||||
C = ROTL(C, 30); \
|
||||
W04 = ROTL(W01 ^ W12 ^ W06 ^ W04, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + G(B, C, D) + E + W04 + K2); \
|
||||
B = ROTL(B, 30); \
|
||||
W05 = ROTL(W02 ^ W13 ^ W07 ^ W05, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + G(A, B, C) + D + W05 + K2); \
|
||||
A = ROTL(A, 30); \
|
||||
W06 = ROTL(W03 ^ W14 ^ W08 ^ W06, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + G(E, A, B) + C + W06 + K2); \
|
||||
E = ROTL(E, 30); \
|
||||
W07 = ROTL(W04 ^ W15 ^ W09 ^ W07, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + G(D, E, A) + B + W07 + K2); \
|
||||
D = ROTL(D, 30); \
|
||||
W08 = ROTL(W05 ^ W00 ^ W10 ^ W08, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + G(C, D, E) + A + W08 + K2); \
|
||||
C = ROTL(C, 30); \
|
||||
W09 = ROTL(W06 ^ W01 ^ W11 ^ W09, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + G(B, C, D) + E + W09 + K2); \
|
||||
B = ROTL(B, 30); \
|
||||
W10 = ROTL(W07 ^ W02 ^ W12 ^ W10, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + G(A, B, C) + D + W10 + K2); \
|
||||
A = ROTL(A, 30); \
|
||||
W11 = ROTL(W08 ^ W03 ^ W13 ^ W11, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + G(E, A, B) + C + W11 + K2); \
|
||||
E = ROTL(E, 30); \
|
||||
W12 = ROTL(W09 ^ W04 ^ W14 ^ W12, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + G(D, E, A) + B + W12 + K2); \
|
||||
D = ROTL(D, 30); \
|
||||
W13 = ROTL(W10 ^ W05 ^ W15 ^ W13, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + G(C, D, E) + A + W13 + K2); \
|
||||
C = ROTL(C, 30); \
|
||||
W14 = ROTL(W11 ^ W06 ^ W00 ^ W14, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + G(B, C, D) + E + W14 + K2); \
|
||||
B = ROTL(B, 30); \
|
||||
W15 = ROTL(W12 ^ W07 ^ W01 ^ W15, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + G(A, B, C) + D + W15 + K2); \
|
||||
A = ROTL(A, 30); \
|
||||
W00 = ROTL(W13 ^ W08 ^ W02 ^ W00, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + G(E, A, B) + C + W00 + K2); \
|
||||
E = ROTL(E, 30); \
|
||||
W01 = ROTL(W14 ^ W09 ^ W03 ^ W01, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + G(D, E, A) + B + W01 + K2); \
|
||||
D = ROTL(D, 30); \
|
||||
W02 = ROTL(W15 ^ W10 ^ W04 ^ W02, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + G(C, D, E) + A + W02 + K2); \
|
||||
C = ROTL(C, 30); \
|
||||
W03 = ROTL(W00 ^ W11 ^ W05 ^ W03, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + G(B, C, D) + E + W03 + K2); \
|
||||
B = ROTL(B, 30); \
|
||||
W04 = ROTL(W01 ^ W12 ^ W06 ^ W04, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + G(A, B, C) + D + W04 + K2); \
|
||||
A = ROTL(A, 30); \
|
||||
W05 = ROTL(W02 ^ W13 ^ W07 ^ W05, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + G(E, A, B) + C + W05 + K2); \
|
||||
E = ROTL(E, 30); \
|
||||
W06 = ROTL(W03 ^ W14 ^ W08 ^ W06, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + G(D, E, A) + B + W06 + K2); \
|
||||
D = ROTL(D, 30); \
|
||||
W07 = ROTL(W04 ^ W15 ^ W09 ^ W07, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + G(C, D, E) + A + W07 + K2); \
|
||||
C = ROTL(C, 30); \
|
||||
W08 = ROTL(W05 ^ W00 ^ W10 ^ W08, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + H(B, C, D) + E + W08 + K3); \
|
||||
B = ROTL(B, 30); \
|
||||
W09 = ROTL(W06 ^ W01 ^ W11 ^ W09, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + H(A, B, C) + D + W09 + K3); \
|
||||
A = ROTL(A, 30); \
|
||||
W10 = ROTL(W07 ^ W02 ^ W12 ^ W10, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + H(E, A, B) + C + W10 + K3); \
|
||||
E = ROTL(E, 30); \
|
||||
W11 = ROTL(W08 ^ W03 ^ W13 ^ W11, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + H(D, E, A) + B + W11 + K3); \
|
||||
D = ROTL(D, 30); \
|
||||
W12 = ROTL(W09 ^ W04 ^ W14 ^ W12, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + H(C, D, E) + A + W12 + K3); \
|
||||
C = ROTL(C, 30); \
|
||||
W13 = ROTL(W10 ^ W05 ^ W15 ^ W13, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + H(B, C, D) + E + W13 + K3); \
|
||||
B = ROTL(B, 30); \
|
||||
W14 = ROTL(W11 ^ W06 ^ W00 ^ W14, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + H(A, B, C) + D + W14 + K3); \
|
||||
A = ROTL(A, 30); \
|
||||
W15 = ROTL(W12 ^ W07 ^ W01 ^ W15, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + H(E, A, B) + C + W15 + K3); \
|
||||
E = ROTL(E, 30); \
|
||||
W00 = ROTL(W13 ^ W08 ^ W02 ^ W00, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + H(D, E, A) + B + W00 + K3); \
|
||||
D = ROTL(D, 30); \
|
||||
W01 = ROTL(W14 ^ W09 ^ W03 ^ W01, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + H(C, D, E) + A + W01 + K3); \
|
||||
C = ROTL(C, 30); \
|
||||
W02 = ROTL(W15 ^ W10 ^ W04 ^ W02, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + H(B, C, D) + E + W02 + K3); \
|
||||
B = ROTL(B, 30); \
|
||||
W03 = ROTL(W00 ^ W11 ^ W05 ^ W03, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + H(A, B, C) + D + W03 + K3); \
|
||||
A = ROTL(A, 30); \
|
||||
W04 = ROTL(W01 ^ W12 ^ W06 ^ W04, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + H(E, A, B) + C + W04 + K3); \
|
||||
E = ROTL(E, 30); \
|
||||
W05 = ROTL(W02 ^ W13 ^ W07 ^ W05, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + H(D, E, A) + B + W05 + K3); \
|
||||
D = ROTL(D, 30); \
|
||||
W06 = ROTL(W03 ^ W14 ^ W08 ^ W06, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + H(C, D, E) + A + W06 + K3); \
|
||||
C = ROTL(C, 30); \
|
||||
W07 = ROTL(W04 ^ W15 ^ W09 ^ W07, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + H(B, C, D) + E + W07 + K3); \
|
||||
B = ROTL(B, 30); \
|
||||
W08 = ROTL(W05 ^ W00 ^ W10 ^ W08, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + H(A, B, C) + D + W08 + K3); \
|
||||
A = ROTL(A, 30); \
|
||||
W09 = ROTL(W06 ^ W01 ^ W11 ^ W09, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + H(E, A, B) + C + W09 + K3); \
|
||||
E = ROTL(E, 30); \
|
||||
W10 = ROTL(W07 ^ W02 ^ W12 ^ W10, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + H(D, E, A) + B + W10 + K3); \
|
||||
D = ROTL(D, 30); \
|
||||
W11 = ROTL(W08 ^ W03 ^ W13 ^ W11, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + H(C, D, E) + A + W11 + K3); \
|
||||
C = ROTL(C, 30); \
|
||||
W12 = ROTL(W09 ^ W04 ^ W14 ^ W12, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + I(B, C, D) + E + W12 + K4); \
|
||||
B = ROTL(B, 30); \
|
||||
W13 = ROTL(W10 ^ W05 ^ W15 ^ W13, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + I(A, B, C) + D + W13 + K4); \
|
||||
A = ROTL(A, 30); \
|
||||
W14 = ROTL(W11 ^ W06 ^ W00 ^ W14, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + I(E, A, B) + C + W14 + K4); \
|
||||
E = ROTL(E, 30); \
|
||||
W15 = ROTL(W12 ^ W07 ^ W01 ^ W15, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + I(D, E, A) + B + W15 + K4); \
|
||||
D = ROTL(D, 30); \
|
||||
W00 = ROTL(W13 ^ W08 ^ W02 ^ W00, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + I(C, D, E) + A + W00 + K4); \
|
||||
C = ROTL(C, 30); \
|
||||
W01 = ROTL(W14 ^ W09 ^ W03 ^ W01, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + I(B, C, D) + E + W01 + K4); \
|
||||
B = ROTL(B, 30); \
|
||||
W02 = ROTL(W15 ^ W10 ^ W04 ^ W02, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + I(A, B, C) + D + W02 + K4); \
|
||||
A = ROTL(A, 30); \
|
||||
W03 = ROTL(W00 ^ W11 ^ W05 ^ W03, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + I(E, A, B) + C + W03 + K4); \
|
||||
E = ROTL(E, 30); \
|
||||
W04 = ROTL(W01 ^ W12 ^ W06 ^ W04, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + I(D, E, A) + B + W04 + K4); \
|
||||
D = ROTL(D, 30); \
|
||||
W05 = ROTL(W02 ^ W13 ^ W07 ^ W05, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + I(C, D, E) + A + W05 + K4); \
|
||||
C = ROTL(C, 30); \
|
||||
W06 = ROTL(W03 ^ W14 ^ W08 ^ W06, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + I(B, C, D) + E + W06 + K4); \
|
||||
B = ROTL(B, 30); \
|
||||
W07 = ROTL(W04 ^ W15 ^ W09 ^ W07, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + I(A, B, C) + D + W07 + K4); \
|
||||
A = ROTL(A, 30); \
|
||||
W08 = ROTL(W05 ^ W00 ^ W10 ^ W08, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + I(E, A, B) + C + W08 + K4); \
|
||||
E = ROTL(E, 30); \
|
||||
W09 = ROTL(W06 ^ W01 ^ W11 ^ W09, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + I(D, E, A) + B + W09 + K4); \
|
||||
D = ROTL(D, 30); \
|
||||
W10 = ROTL(W07 ^ W02 ^ W12 ^ W10, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + I(C, D, E) + A + W10 + K4); \
|
||||
C = ROTL(C, 30); \
|
||||
W11 = ROTL(W08 ^ W03 ^ W13 ^ W11, 1); \
|
||||
E = SPH_T32(ROTL(A, 5) + I(B, C, D) + E + W11 + K4); \
|
||||
B = ROTL(B, 30); \
|
||||
W12 = ROTL(W09 ^ W04 ^ W14 ^ W12, 1); \
|
||||
D = SPH_T32(ROTL(E, 5) + I(A, B, C) + D + W12 + K4); \
|
||||
A = ROTL(A, 30); \
|
||||
W13 = ROTL(W10 ^ W05 ^ W15 ^ W13, 1); \
|
||||
C = SPH_T32(ROTL(D, 5) + I(E, A, B) + C + W13 + K4); \
|
||||
E = ROTL(E, 30); \
|
||||
W14 = ROTL(W11 ^ W06 ^ W00 ^ W14, 1); \
|
||||
B = SPH_T32(ROTL(C, 5) + I(D, E, A) + B + W14 + K4); \
|
||||
D = ROTL(D, 30); \
|
||||
W15 = ROTL(W12 ^ W07 ^ W01 ^ W15, 1); \
|
||||
A = SPH_T32(ROTL(B, 5) + I(C, D, E) + A + W15 + K4); \
|
||||
C = ROTL(C, 30); \
|
||||
\
|
||||
(r)[0] = SPH_T32(r[0] + A); \
|
||||
(r)[1] = SPH_T32(r[1] + B); \
|
||||
(r)[2] = SPH_T32(r[2] + C); \
|
||||
(r)[3] = SPH_T32(r[3] + D); \
|
||||
(r)[4] = SPH_T32(r[4] + E); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* One round of SHA-1. The data must be aligned for 32-bit access.
|
||||
*/
|
||||
#if ( defined(__x86_64__) && defined(__SHA__) ) || ( defined(__aarch64__) && defined(__ARM_FEATURE_SHA2) )
|
||||
|
||||
static void
|
||||
sha1_round( const unsigned char *data, sph_u32 r[5] )
|
||||
{
|
||||
sha1_transform_be( (uint32_t*)r, (uint32_t*)data, (const uint32_t*)r );
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void
|
||||
sha1_round( const unsigned char *data, sph_u32 r[5] )
|
||||
{
|
||||
#define SHA1_IN(x) sph_dec32be_aligned(data + (4 * (x)))
|
||||
SHA1_ROUND_BODY(SHA1_IN, r);
|
||||
#undef SHA1_IN
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* see sph_sha1.h */
|
||||
void
|
||||
sph_sha1_init(void *cc)
|
||||
{
|
||||
sph_sha1_context *sc;
|
||||
|
||||
sc = cc;
|
||||
memcpy(sc->val, IV, sizeof IV);
|
||||
#if SPH_64
|
||||
sc->count = 0;
|
||||
#else
|
||||
sc->count_high = sc->count_low = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define RFUN sha1_round
|
||||
#define HASH sha1
|
||||
#define BE32 1
|
||||
#include "md_helper.c"
|
||||
|
||||
/* see sph_sha1.h */
|
||||
void
|
||||
sph_sha1_close(void *cc, void *dst)
|
||||
{
|
||||
sha1_close(cc, dst, 5);
|
||||
sph_sha1_init(cc);
|
||||
}
|
||||
|
||||
/* see sph_sha1.h */
|
||||
void
|
||||
sph_sha1_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
|
||||
{
|
||||
sha1_addbits_and_close(cc, ub, n, dst, 5);
|
||||
sph_sha1_init(cc);
|
||||
}
|
||||
|
||||
/* see sph_sha1.h */
|
||||
void
|
||||
sph_sha1_comp(const sph_u32 msg[16], sph_u32 val[5])
|
||||
{
|
||||
#define SHA1_IN(x) msg[x]
|
||||
SHA1_ROUND_BODY(SHA1_IN, val);
|
||||
#undef SHA1_IN
|
||||
}
|
||||
|
||||
|
||||
void sph_sha1_full( void *hash, const void *msg, size_t len )
|
||||
{
|
||||
sph_sha1_context cc;
|
||||
sph_sha1_init( &cc );
|
||||
sph_sha1( &cc, msg, len );
|
||||
sph_sha1_close( &cc, hash );
|
||||
}
|
681
algo/sha/sha2.c
681
algo/sha/sha2.c
@@ -1,681 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011 ArtForz
|
||||
* Copyright 2011-2013 pooler
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version. See COPYING for more details.
|
||||
*/
|
||||
|
||||
#include "sha256d-4way.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#if defined(USE_ASM) && defined(__arm__) && defined(__APCS_32__)
|
||||
#define EXTERN_SHA256
|
||||
#endif
|
||||
|
||||
static const uint32_t sha256_h[8] = {
|
||||
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
|
||||
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
|
||||
};
|
||||
|
||||
static const uint32_t sha256_k[64] = {
|
||||
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
|
||||
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
|
||||
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
|
||||
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
|
||||
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
|
||||
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
|
||||
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
|
||||
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
|
||||
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
|
||||
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
|
||||
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
|
||||
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
|
||||
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
|
||||
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
|
||||
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
|
||||
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
||||
};
|
||||
|
||||
void sha256_init(uint32_t *state)
|
||||
{
|
||||
memcpy(state, sha256_h, 32);
|
||||
}
|
||||
|
||||
/* Elementary functions used by SHA256 */
|
||||
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
|
||||
#define Maj(x, y, z) ((x & (y | z)) | (y & z))
|
||||
#define ROTR(x, n) ((x >> n) | (x << (32 - n)))
|
||||
#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
|
||||
#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
|
||||
#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ (x >> 3))
|
||||
#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ (x >> 10))
|
||||
|
||||
/* SHA256 round function */
|
||||
#define RND(a, b, c, d, e, f, g, h, k) \
|
||||
do { \
|
||||
t0 = h + S1(e) + Ch(e, f, g) + k; \
|
||||
t1 = S0(a) + Maj(a, b, c); \
|
||||
d += t0; \
|
||||
h = t0 + t1; \
|
||||
} while (0)
|
||||
|
||||
/* Adjusted round function for rotating state */
|
||||
#define RNDr(S, W, i) \
|
||||
RND(S[(64 - i) % 8], S[(65 - i) % 8], \
|
||||
S[(66 - i) % 8], S[(67 - i) % 8], \
|
||||
S[(68 - i) % 8], S[(69 - i) % 8], \
|
||||
S[(70 - i) % 8], S[(71 - i) % 8], \
|
||||
W[i] + sha256_k[i])
|
||||
|
||||
#ifndef EXTERN_SHA256
|
||||
|
||||
/*
|
||||
* SHA256 block compression function. The 256-bit state is transformed via
|
||||
* the 512-bit input block to produce a new state.
|
||||
*/
|
||||
void sha256_transform(uint32_t *state, const uint32_t *block, int swap)
|
||||
{
|
||||
uint32_t W[64];
|
||||
uint32_t S[8];
|
||||
uint32_t t0, t1;
|
||||
int i;
|
||||
|
||||
/* 1. Prepare message schedule W. */
|
||||
if (swap) {
|
||||
for (i = 0; i < 16; i++)
|
||||
W[i] = swab32(block[i]);
|
||||
} else
|
||||
memcpy(W, block, 64);
|
||||
for (i = 16; i < 64; i += 2) {
|
||||
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
|
||||
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15];
|
||||
}
|
||||
|
||||
/* 2. Initialize working variables. */
|
||||
memcpy(S, state, 32);
|
||||
|
||||
/* 3. Mix. */
|
||||
RNDr(S, W, 0);
|
||||
RNDr(S, W, 1);
|
||||
RNDr(S, W, 2);
|
||||
RNDr(S, W, 3);
|
||||
RNDr(S, W, 4);
|
||||
RNDr(S, W, 5);
|
||||
RNDr(S, W, 6);
|
||||
RNDr(S, W, 7);
|
||||
RNDr(S, W, 8);
|
||||
RNDr(S, W, 9);
|
||||
RNDr(S, W, 10);
|
||||
RNDr(S, W, 11);
|
||||
RNDr(S, W, 12);
|
||||
RNDr(S, W, 13);
|
||||
RNDr(S, W, 14);
|
||||
RNDr(S, W, 15);
|
||||
RNDr(S, W, 16);
|
||||
RNDr(S, W, 17);
|
||||
RNDr(S, W, 18);
|
||||
RNDr(S, W, 19);
|
||||
RNDr(S, W, 20);
|
||||
RNDr(S, W, 21);
|
||||
RNDr(S, W, 22);
|
||||
RNDr(S, W, 23);
|
||||
RNDr(S, W, 24);
|
||||
RNDr(S, W, 25);
|
||||
RNDr(S, W, 26);
|
||||
RNDr(S, W, 27);
|
||||
RNDr(S, W, 28);
|
||||
RNDr(S, W, 29);
|
||||
RNDr(S, W, 30);
|
||||
RNDr(S, W, 31);
|
||||
RNDr(S, W, 32);
|
||||
RNDr(S, W, 33);
|
||||
RNDr(S, W, 34);
|
||||
RNDr(S, W, 35);
|
||||
RNDr(S, W, 36);
|
||||
RNDr(S, W, 37);
|
||||
RNDr(S, W, 38);
|
||||
RNDr(S, W, 39);
|
||||
RNDr(S, W, 40);
|
||||
RNDr(S, W, 41);
|
||||
RNDr(S, W, 42);
|
||||
RNDr(S, W, 43);
|
||||
RNDr(S, W, 44);
|
||||
RNDr(S, W, 45);
|
||||
RNDr(S, W, 46);
|
||||
RNDr(S, W, 47);
|
||||
RNDr(S, W, 48);
|
||||
RNDr(S, W, 49);
|
||||
RNDr(S, W, 50);
|
||||
RNDr(S, W, 51);
|
||||
RNDr(S, W, 52);
|
||||
RNDr(S, W, 53);
|
||||
RNDr(S, W, 54);
|
||||
RNDr(S, W, 55);
|
||||
RNDr(S, W, 56);
|
||||
RNDr(S, W, 57);
|
||||
RNDr(S, W, 58);
|
||||
RNDr(S, W, 59);
|
||||
RNDr(S, W, 60);
|
||||
RNDr(S, W, 61);
|
||||
RNDr(S, W, 62);
|
||||
RNDr(S, W, 63);
|
||||
|
||||
/* 4. Mix local working variables into global state */
|
||||
for (i = 0; i < 8; i++)
|
||||
state[i] += S[i];
|
||||
}
|
||||
|
||||
#endif /* EXTERN_SHA256 */
|
||||
|
||||
|
||||
static const uint32_t sha256d_hash1[16] = {
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x80000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000100
|
||||
};
|
||||
|
||||
// this performs the entire hash all over again, why?
|
||||
// because main function only does 56 rounds.
|
||||
|
||||
static void sha256d_80_swap(uint32_t *hash, const uint32_t *data)
|
||||
{
|
||||
uint32_t S[16];
|
||||
int i;
|
||||
|
||||
sha256_init(S);
|
||||
sha256_transform(S, data, 0);
|
||||
sha256_transform(S, data + 16, 0);
|
||||
memcpy(S + 8, sha256d_hash1 + 8, 32);
|
||||
sha256_init(hash);
|
||||
sha256_transform(hash, S, 0);
|
||||
for (i = 0; i < 8; i++)
|
||||
hash[i] = swab32(hash[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
#if defined (__SHA__)
|
||||
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
|
||||
void sha256d(unsigned char *hash, const unsigned char *data, int len)
|
||||
{
|
||||
sph_sha256_context ctx __attribute__ ((aligned (64)));
|
||||
|
||||
sph_sha256_init( &ctx );
|
||||
sph_sha256( &ctx, data, len );
|
||||
sph_sha256_close( &ctx, hash );
|
||||
|
||||
sph_sha256_init( &ctx );
|
||||
sph_sha256( &ctx, hash, 32 );
|
||||
sph_sha256_close( &ctx, hash );
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void sha256d(unsigned char *hash, const unsigned char *data, int len)
|
||||
{
|
||||
|
||||
uint32_t S[16], T[16];
|
||||
int i, r;
|
||||
|
||||
sha256_init(S);
|
||||
for (r = len; r > -9; r -= 64) {
|
||||
if (r < 64)
|
||||
memset(T, 0, 64);
|
||||
memcpy(T, data + len - r, r > 64 ? 64 : (r < 0 ? 0 : r));
|
||||
if (r >= 0 && r < 64)
|
||||
((unsigned char *)T)[r] = 0x80;
|
||||
for (i = 0; i < 16; i++)
|
||||
T[i] = be32dec(T + i);
|
||||
if (r < 56)
|
||||
T[15] = 8 * len;
|
||||
sha256_transform(S, T, 0);
|
||||
}
|
||||
memcpy(S + 8, sha256d_hash1 + 8, 32);
|
||||
sha256_init(T);
|
||||
sha256_transform(T, S, 0);
|
||||
for (i = 0; i < 8; i++)
|
||||
be32enc((uint32_t *)hash + i, T[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
*/
|
||||
|
||||
static inline void sha256d_preextend(uint32_t *W)
|
||||
{
|
||||
W[16] = s1(W[14]) + W[ 9] + s0(W[ 1]) + W[ 0];
|
||||
W[17] = s1(W[15]) + W[10] + s0(W[ 2]) + W[ 1];
|
||||
W[18] = s1(W[16]) + W[11] + W[ 2];
|
||||
W[19] = s1(W[17]) + W[12] + s0(W[ 4]);
|
||||
W[20] = W[13] + s0(W[ 5]) + W[ 4];
|
||||
W[21] = W[14] + s0(W[ 6]) + W[ 5];
|
||||
W[22] = W[15] + s0(W[ 7]) + W[ 6];
|
||||
W[23] = W[16] + s0(W[ 8]) + W[ 7];
|
||||
W[24] = W[17] + s0(W[ 9]) + W[ 8];
|
||||
W[25] = s0(W[10]) + W[ 9];
|
||||
W[26] = s0(W[11]) + W[10];
|
||||
W[27] = s0(W[12]) + W[11];
|
||||
W[28] = s0(W[13]) + W[12];
|
||||
W[29] = s0(W[14]) + W[13];
|
||||
W[30] = s0(W[15]) + W[14];
|
||||
W[31] = s0(W[16]) + W[15];
|
||||
}
|
||||
|
||||
static inline void sha256d_prehash(uint32_t *S, const uint32_t *W)
|
||||
{
|
||||
uint32_t t0, t1;
|
||||
RNDr(S, W, 0);
|
||||
RNDr(S, W, 1);
|
||||
RNDr(S, W, 2);
|
||||
}
|
||||
|
||||
#ifdef EXTERN_SHA256
|
||||
|
||||
void sha256d_ms(uint32_t *hash, uint32_t *W,
|
||||
const uint32_t *midstate, const uint32_t *prehash);
|
||||
|
||||
#else
|
||||
|
||||
static inline void sha256d_ms(uint32_t *hash, uint32_t *W,
|
||||
const uint32_t *midstate, const uint32_t *prehash)
|
||||
{
|
||||
uint32_t S[64];
|
||||
uint32_t t0, t1;
|
||||
int i;
|
||||
|
||||
S[18] = W[18];
|
||||
S[19] = W[19];
|
||||
S[20] = W[20];
|
||||
S[22] = W[22];
|
||||
S[23] = W[23];
|
||||
S[24] = W[24];
|
||||
S[30] = W[30];
|
||||
S[31] = W[31];
|
||||
|
||||
W[18] += s0(W[3]);
|
||||
W[19] += W[3];
|
||||
W[20] += s1(W[18]);
|
||||
W[21] = s1(W[19]);
|
||||
W[22] += s1(W[20]);
|
||||
W[23] += s1(W[21]);
|
||||
W[24] += s1(W[22]);
|
||||
W[25] = s1(W[23]) + W[18];
|
||||
W[26] = s1(W[24]) + W[19];
|
||||
W[27] = s1(W[25]) + W[20];
|
||||
W[28] = s1(W[26]) + W[21];
|
||||
W[29] = s1(W[27]) + W[22];
|
||||
W[30] += s1(W[28]) + W[23];
|
||||
W[31] += s1(W[29]) + W[24];
|
||||
for (i = 32; i < 64; i += 2) {
|
||||
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
|
||||
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15];
|
||||
}
|
||||
|
||||
memcpy(S, prehash, 32);
|
||||
|
||||
RNDr(S, W, 3);
|
||||
RNDr(S, W, 4);
|
||||
RNDr(S, W, 5);
|
||||
RNDr(S, W, 6);
|
||||
RNDr(S, W, 7);
|
||||
RNDr(S, W, 8);
|
||||
RNDr(S, W, 9);
|
||||
RNDr(S, W, 10);
|
||||
RNDr(S, W, 11);
|
||||
RNDr(S, W, 12);
|
||||
RNDr(S, W, 13);
|
||||
RNDr(S, W, 14);
|
||||
RNDr(S, W, 15);
|
||||
RNDr(S, W, 16);
|
||||
RNDr(S, W, 17);
|
||||
RNDr(S, W, 18);
|
||||
RNDr(S, W, 19);
|
||||
RNDr(S, W, 20);
|
||||
RNDr(S, W, 21);
|
||||
RNDr(S, W, 22);
|
||||
RNDr(S, W, 23);
|
||||
RNDr(S, W, 24);
|
||||
RNDr(S, W, 25);
|
||||
RNDr(S, W, 26);
|
||||
RNDr(S, W, 27);
|
||||
RNDr(S, W, 28);
|
||||
RNDr(S, W, 29);
|
||||
RNDr(S, W, 30);
|
||||
RNDr(S, W, 31);
|
||||
RNDr(S, W, 32);
|
||||
RNDr(S, W, 33);
|
||||
RNDr(S, W, 34);
|
||||
RNDr(S, W, 35);
|
||||
RNDr(S, W, 36);
|
||||
RNDr(S, W, 37);
|
||||
RNDr(S, W, 38);
|
||||
RNDr(S, W, 39);
|
||||
RNDr(S, W, 40);
|
||||
RNDr(S, W, 41);
|
||||
RNDr(S, W, 42);
|
||||
RNDr(S, W, 43);
|
||||
RNDr(S, W, 44);
|
||||
RNDr(S, W, 45);
|
||||
RNDr(S, W, 46);
|
||||
RNDr(S, W, 47);
|
||||
RNDr(S, W, 48);
|
||||
RNDr(S, W, 49);
|
||||
RNDr(S, W, 50);
|
||||
RNDr(S, W, 51);
|
||||
RNDr(S, W, 52);
|
||||
RNDr(S, W, 53);
|
||||
RNDr(S, W, 54);
|
||||
RNDr(S, W, 55);
|
||||
RNDr(S, W, 56);
|
||||
RNDr(S, W, 57);
|
||||
RNDr(S, W, 58);
|
||||
RNDr(S, W, 59);
|
||||
RNDr(S, W, 60);
|
||||
RNDr(S, W, 61);
|
||||
RNDr(S, W, 62);
|
||||
RNDr(S, W, 63);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
S[i] += midstate[i];
|
||||
|
||||
W[18] = S[18];
|
||||
W[19] = S[19];
|
||||
W[20] = S[20];
|
||||
W[22] = S[22];
|
||||
W[23] = S[23];
|
||||
W[24] = S[24];
|
||||
W[30] = S[30];
|
||||
W[31] = S[31];
|
||||
|
||||
memcpy(S + 8, sha256d_hash1 + 8, 32);
|
||||
S[16] = s1(sha256d_hash1[14]) + sha256d_hash1[ 9] + s0(S[ 1]) + S[ 0];
|
||||
S[17] = s1(sha256d_hash1[15]) + sha256d_hash1[10] + s0(S[ 2]) + S[ 1];
|
||||
S[18] = s1(S[16]) + sha256d_hash1[11] + s0(S[ 3]) + S[ 2];
|
||||
S[19] = s1(S[17]) + sha256d_hash1[12] + s0(S[ 4]) + S[ 3];
|
||||
S[20] = s1(S[18]) + sha256d_hash1[13] + s0(S[ 5]) + S[ 4];
|
||||
S[21] = s1(S[19]) + sha256d_hash1[14] + s0(S[ 6]) + S[ 5];
|
||||
S[22] = s1(S[20]) + sha256d_hash1[15] + s0(S[ 7]) + S[ 6];
|
||||
S[23] = s1(S[21]) + S[16] + s0(sha256d_hash1[ 8]) + S[ 7];
|
||||
S[24] = s1(S[22]) + S[17] + s0(sha256d_hash1[ 9]) + sha256d_hash1[ 8];
|
||||
S[25] = s1(S[23]) + S[18] + s0(sha256d_hash1[10]) + sha256d_hash1[ 9];
|
||||
S[26] = s1(S[24]) + S[19] + s0(sha256d_hash1[11]) + sha256d_hash1[10];
|
||||
S[27] = s1(S[25]) + S[20] + s0(sha256d_hash1[12]) + sha256d_hash1[11];
|
||||
S[28] = s1(S[26]) + S[21] + s0(sha256d_hash1[13]) + sha256d_hash1[12];
|
||||
S[29] = s1(S[27]) + S[22] + s0(sha256d_hash1[14]) + sha256d_hash1[13];
|
||||
S[30] = s1(S[28]) + S[23] + s0(sha256d_hash1[15]) + sha256d_hash1[14];
|
||||
S[31] = s1(S[29]) + S[24] + s0(S[16]) + sha256d_hash1[15];
|
||||
for (i = 32; i < 60; i += 2) {
|
||||
S[i] = s1(S[i - 2]) + S[i - 7] + s0(S[i - 15]) + S[i - 16];
|
||||
S[i+1] = s1(S[i - 1]) + S[i - 6] + s0(S[i - 14]) + S[i - 15];
|
||||
}
|
||||
S[60] = s1(S[58]) + S[53] + s0(S[45]) + S[44];
|
||||
|
||||
sha256_init(hash);
|
||||
|
||||
RNDr(hash, S, 0);
|
||||
RNDr(hash, S, 1);
|
||||
RNDr(hash, S, 2);
|
||||
RNDr(hash, S, 3);
|
||||
RNDr(hash, S, 4);
|
||||
RNDr(hash, S, 5);
|
||||
RNDr(hash, S, 6);
|
||||
RNDr(hash, S, 7);
|
||||
RNDr(hash, S, 8);
|
||||
RNDr(hash, S, 9);
|
||||
RNDr(hash, S, 10);
|
||||
RNDr(hash, S, 11);
|
||||
RNDr(hash, S, 12);
|
||||
RNDr(hash, S, 13);
|
||||
RNDr(hash, S, 14);
|
||||
RNDr(hash, S, 15);
|
||||
RNDr(hash, S, 16);
|
||||
RNDr(hash, S, 17);
|
||||
RNDr(hash, S, 18);
|
||||
RNDr(hash, S, 19);
|
||||
RNDr(hash, S, 20);
|
||||
RNDr(hash, S, 21);
|
||||
RNDr(hash, S, 22);
|
||||
RNDr(hash, S, 23);
|
||||
RNDr(hash, S, 24);
|
||||
RNDr(hash, S, 25);
|
||||
RNDr(hash, S, 26);
|
||||
RNDr(hash, S, 27);
|
||||
RNDr(hash, S, 28);
|
||||
RNDr(hash, S, 29);
|
||||
RNDr(hash, S, 30);
|
||||
RNDr(hash, S, 31);
|
||||
RNDr(hash, S, 32);
|
||||
RNDr(hash, S, 33);
|
||||
RNDr(hash, S, 34);
|
||||
RNDr(hash, S, 35);
|
||||
RNDr(hash, S, 36);
|
||||
RNDr(hash, S, 37);
|
||||
RNDr(hash, S, 38);
|
||||
RNDr(hash, S, 39);
|
||||
RNDr(hash, S, 40);
|
||||
RNDr(hash, S, 41);
|
||||
RNDr(hash, S, 42);
|
||||
RNDr(hash, S, 43);
|
||||
RNDr(hash, S, 44);
|
||||
RNDr(hash, S, 45);
|
||||
RNDr(hash, S, 46);
|
||||
RNDr(hash, S, 47);
|
||||
RNDr(hash, S, 48);
|
||||
RNDr(hash, S, 49);
|
||||
RNDr(hash, S, 50);
|
||||
RNDr(hash, S, 51);
|
||||
RNDr(hash, S, 52);
|
||||
RNDr(hash, S, 53);
|
||||
RNDr(hash, S, 54);
|
||||
RNDr(hash, S, 55);
|
||||
RNDr(hash, S, 56);
|
||||
|
||||
hash[2] += hash[6] + S1(hash[3]) + Ch(hash[3], hash[4], hash[5])
|
||||
+ S[57] + sha256_k[57];
|
||||
hash[1] += hash[5] + S1(hash[2]) + Ch(hash[2], hash[3], hash[4])
|
||||
+ S[58] + sha256_k[58];
|
||||
hash[0] += hash[4] + S1(hash[1]) + Ch(hash[1], hash[2], hash[3])
|
||||
+ S[59] + sha256_k[59];
|
||||
hash[7] += hash[3] + S1(hash[0]) + Ch(hash[0], hash[1], hash[2])
|
||||
+ S[60] + sha256_k[60]
|
||||
+ sha256_h[7];
|
||||
}
|
||||
|
||||
#endif /* EXTERN_SHA256 */
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
|
||||
void sha256d_ms_4way(uint32_t *hash, uint32_t *data,
|
||||
const uint32_t *midstate, const uint32_t *prehash);
|
||||
|
||||
static inline int scanhash_sha256d_4way_pooler( struct work *work,
|
||||
uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
|
||||
uint32_t _ALIGN(128) data[4 * 64];
|
||||
uint32_t _ALIGN(32) hash[4 * 8];
|
||||
uint32_t _ALIGN(32) midstate[4 * 8];
|
||||
uint32_t _ALIGN(32) prehash[4 * 8];
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
int thr_id = mythr->id;
|
||||
int i, j;
|
||||
|
||||
memcpy(data, pdata + 16, 64);
|
||||
sha256d_preextend(data);
|
||||
for (i = 31; i >= 0; i--)
|
||||
for (j = 0; j < 4; j++)
|
||||
data[i * 4 + j] = data[i];
|
||||
|
||||
sha256_init(midstate);
|
||||
sha256_transform(midstate, pdata, 0);
|
||||
memcpy(prehash, midstate, 32);
|
||||
sha256d_prehash(prehash, pdata + 16);
|
||||
for (i = 7; i >= 0; i--) {
|
||||
for (j = 0; j < 4; j++) {
|
||||
midstate[i * 4 + j] = midstate[i];
|
||||
prehash[i * 4 + j] = prehash[i];
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
for (i = 0; i < 4; i++)
|
||||
data[4 * 3 + i] = ++n;
|
||||
|
||||
sha256d_ms_4way(hash, data, midstate, prehash);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (swab32(hash[4 * 7 + i]) <= Htarg) {
|
||||
pdata[19] = data[4 * 3 + i];
|
||||
sha256d_80_swap(hash, pdata);
|
||||
if ( fulltest( hash, ptarget ) && !opt_benchmark )
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#ifdef HAVE_SHA256_8WAY
|
||||
|
||||
void sha256d_ms_8way(uint32_t *hash, uint32_t *data,
|
||||
const uint32_t *midstate, const uint32_t *prehash);
|
||||
|
||||
static inline int scanhash_sha256d_8way_pooler( struct work *work,
|
||||
uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
|
||||
uint32_t _ALIGN(128) data[8 * 64];
|
||||
uint32_t _ALIGN(32) hash[8 * 8];
|
||||
uint32_t _ALIGN(32) midstate[8 * 8];
|
||||
uint32_t _ALIGN(32) prehash[8 * 8];
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
int thr_id = mythr->id;
|
||||
int i, j;
|
||||
|
||||
memcpy(data, pdata + 16, 64);
|
||||
sha256d_preextend(data);
|
||||
for (i = 31; i >= 0; i--)
|
||||
for (j = 0; j < 8; j++)
|
||||
data[i * 8 + j] = data[i];
|
||||
|
||||
sha256_init(midstate);
|
||||
sha256_transform(midstate, pdata, 0);
|
||||
memcpy(prehash, midstate, 32);
|
||||
sha256d_prehash(prehash, pdata + 16);
|
||||
for (i = 7; i >= 0; i--) {
|
||||
for (j = 0; j < 8; j++) {
|
||||
midstate[i * 8 + j] = midstate[i];
|
||||
prehash[i * 8 + j] = prehash[i];
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
for (i = 0; i < 8; i++)
|
||||
data[8 * 3 + i] = ++n;
|
||||
|
||||
sha256d_ms_8way(hash, data, midstate, prehash);
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (swab32(hash[8 * 7 + i]) <= Htarg) {
|
||||
pdata[19] = data[8 * 3 + i];
|
||||
sha256d_80_swap(hash, pdata);
|
||||
if ( fulltest( hash, ptarget ) && !opt_benchmark )
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* HAVE_SHA256_8WAY */
|
||||
|
||||
int scanhash_sha256d_pooler( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
uint32_t _ALIGN(128) data[64];
|
||||
uint32_t _ALIGN(32) hash[8];
|
||||
uint32_t _ALIGN(32) midstate[8];
|
||||
uint32_t _ALIGN(32) prehash[8];
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
int thr_id = mythr->id; // thr_id arg is deprecated
|
||||
|
||||
#ifdef HAVE_SHA256_8WAY
|
||||
if ( sha256_use_8way() )
|
||||
return scanhash_sha256d_8way_pooler( work, max_nonce, hashes_done, mythr );
|
||||
#endif
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
if ( sha256_use_4way() )
|
||||
return scanhash_sha256d_4way_pooler( work, max_nonce, hashes_done, mythr );
|
||||
#endif
|
||||
|
||||
memcpy(data, pdata + 16, 64);
|
||||
sha256d_preextend(data);
|
||||
|
||||
sha256_init(midstate);
|
||||
sha256_transform(midstate, pdata, 0);
|
||||
memcpy(prehash, midstate, 32);
|
||||
sha256d_prehash(prehash, pdata + 16);
|
||||
|
||||
do {
|
||||
data[3] = ++n;
|
||||
sha256d_ms(hash, data, midstate, prehash);
|
||||
if (unlikely(swab32(hash[7]) <= Htarg))
|
||||
{
|
||||
pdata[19] = data[3];
|
||||
sha256d_80_swap(hash, pdata);
|
||||
if ( fulltest(hash, ptarget) && !opt_benchmark )
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
} while (likely(n < max_nonce && !work_restart[thr_id].restart));
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool register_sha256d_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
|
||||
#if defined(SHA256D_16WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha256d_16way;
|
||||
#elif defined(SHA256D_SHA)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256d_sha;
|
||||
#elif defined(SHA256D_NEON_SHA2)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256d_neon_sha2;
|
||||
//#elif defined(SHA256D_8WAY)
|
||||
// gate->scanhash = (void*)&scanhash_sha256d_8way;
|
||||
#else
|
||||
gate->scanhash = (void*)&scanhash_sha256d_pooler;
|
||||
// gate->scanhash = (void*)&scanhash_sha256d_4way;
|
||||
#endif
|
||||
// gate->hash = (void*)&sha256d;
|
||||
return true;
|
||||
};
|
||||
|
@@ -54,29 +54,29 @@ static const uint32_t K256[64] =
|
||||
v128_xor( v128_xor( \
|
||||
v128_ror32(x, 17), v128_ror32(x, 19) ), v128_sr32(x, 10) )
|
||||
|
||||
#define SHA2s_MEXP( a, b, c, d ) \
|
||||
#define SHA256_4X32_MEXP( a, b, c, d ) \
|
||||
v128_add4_32( SSG2_1( a ), b, SSG2_0( c ), d );
|
||||
|
||||
#define SHA256x4_MSG_EXPANSION( W ) \
|
||||
W[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] ); \
|
||||
W[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] ); \
|
||||
W[ 2] = SHA2s_MEXP( W[ 0], W[11], W[ 3], W[ 2] ); \
|
||||
W[ 3] = SHA2s_MEXP( W[ 1], W[12], W[ 4], W[ 3] ); \
|
||||
W[ 4] = SHA2s_MEXP( W[ 2], W[13], W[ 5], W[ 4] ); \
|
||||
W[ 5] = SHA2s_MEXP( W[ 3], W[14], W[ 6], W[ 5] ); \
|
||||
W[ 6] = SHA2s_MEXP( W[ 4], W[15], W[ 7], W[ 6] ); \
|
||||
W[ 7] = SHA2s_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] ); \
|
||||
W[ 8] = SHA2s_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] ); \
|
||||
W[ 9] = SHA2s_MEXP( W[ 7], W[ 2], W[10], W[ 9] ); \
|
||||
W[10] = SHA2s_MEXP( W[ 8], W[ 3], W[11], W[10] ); \
|
||||
W[11] = SHA2s_MEXP( W[ 9], W[ 4], W[12], W[11] ); \
|
||||
W[12] = SHA2s_MEXP( W[10], W[ 5], W[13], W[12] ); \
|
||||
W[13] = SHA2s_MEXP( W[11], W[ 6], W[14], W[13] ); \
|
||||
W[14] = SHA2s_MEXP( W[12], W[ 7], W[15], W[14] ); \
|
||||
W[15] = SHA2s_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
#define SHA256_4X32_MSG_EXPANSION( W ) \
|
||||
W[ 0] = SHA256_4X32_MEXP( W[14], W[ 9], W[ 1], W[ 0] ); \
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] ); \
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] ); \
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] ); \
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] ); \
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] ); \
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] ); \
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] ); \
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] ); \
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] ); \
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] ); \
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] ); \
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] ); \
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] ); \
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] ); \
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
|
||||
do { \
|
||||
#define SHA256_4X32_ROUND(A, B, C, D, E, F, G, H, i, j) \
|
||||
{ \
|
||||
v128_t T1, T2; \
|
||||
v128_t K = v128_32( K256[( (j)+(i) )] ); \
|
||||
T1 = v128_add32( H, v128_add4_32( BSG2_1(E), CHs(E, F, G), \
|
||||
@@ -85,31 +85,41 @@ do { \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = v128_add32( D, T1 ); \
|
||||
H = v128_add32( T1, T2 ); \
|
||||
} while (0)
|
||||
}
|
||||
|
||||
#define SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, j ) \
|
||||
#define SHA256_4X32_ROUND_NOMSG( A, B, C, D, E, F, G, H, i, j ) \
|
||||
{ \
|
||||
v128_t T1 = v128_add4_32( H, BSG2_1(E), CHs(E, F, G), \
|
||||
v128_32( K256[(i)+(j)] ) ); \
|
||||
v128_t T2 = v128_add32( BSG2_0(A), MAJs(A, B, C) ); \
|
||||
Y_xor_Z = X_xor_Y; \
|
||||
D = v128_add32( D, T1 ); \
|
||||
H = v128_add32( T1, T2 ); \
|
||||
}
|
||||
|
||||
#define SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, j ) \
|
||||
{ \
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C ); \
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, j ); \
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, j ); \
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, j ); \
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, j ); \
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, j ); \
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, j ); \
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, j ); \
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, j ); \
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, j ); \
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, j ); \
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, j ); \
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, j ); \
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, j ); \
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, j ); \
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, j ); \
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, j ); \
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, j ); \
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, j ); \
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, j ); \
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 3, j ); \
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, j ); \
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 5, j ); \
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 6, j ); \
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 7, j ); \
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 8, j ); \
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 9, j ); \
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 10, j ); \
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 11, j ); \
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 12, j ); \
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 13, j ); \
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 14, j ); \
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, j ); \
|
||||
}
|
||||
|
||||
// LE data, no need to byte swap
|
||||
static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
static inline void SHA256_4X32_TRANSFORM( v128_t *out, v128_t *W,
|
||||
const v128_t *in )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
@@ -123,13 +133,13 @@ static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
G = in[6];
|
||||
H = in[7];
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
|
||||
out[0] = v128_add32( in[0], A );
|
||||
out[1] = v128_add32( in[1], B );
|
||||
@@ -142,47 +152,37 @@ static inline void SHA256_4WAY_TRANSFORM( v128_t *out, v128_t *W,
|
||||
}
|
||||
|
||||
// LE data, no need to byte swap
|
||||
void sha256_4way_transform_le( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_transform_le( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in )
|
||||
{
|
||||
v128_t W[16];
|
||||
v128_memcpy( W, data, 16 );
|
||||
SHA256_4WAY_TRANSFORM( state_out, W, state_in );
|
||||
SHA256_4X32_TRANSFORM( state_out, W, state_in );
|
||||
}
|
||||
|
||||
// BE data, need to byte swap input data
|
||||
void sha256_4way_transform_be( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_transform_be( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in )
|
||||
{
|
||||
v128_t W[16];
|
||||
v128_block_bswap32( W, data );
|
||||
v128_block_bswap32( W+8, data+8 );
|
||||
SHA256_4WAY_TRANSFORM( state_out, W, state_in );
|
||||
SHA256_4X32_TRANSFORM( state_out, W, state_in );
|
||||
}
|
||||
|
||||
// prehash_3rounds & final_rounds are not working
|
||||
void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
const v128_t *W, const v128_t *state_in )
|
||||
void sha256_4x32_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
const v128_t *W, const v128_t *state_in )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
v128_t A, B, C, D, E, F, G, H, T1;
|
||||
|
||||
// precalculate constant part msg expansion for second iteration.
|
||||
X[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
X[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
X[ 2] = v128_add32( v128_add32( SSG2_1( X[ 0] ), W[11] ), W[ 2] );
|
||||
X[ 3] = v128_add32( v128_add32( SSG2_1( X[ 1] ), W[12] ), SSG2_0( W[ 4] ) );
|
||||
X[ 4] = v128_add32( v128_add32( W[13], SSG2_0( W[ 5] ) ), W[ 4] );
|
||||
X[ 5] = v128_add32( v128_add32( W[14], SSG2_0( W[ 6] ) ), W[ 5] );
|
||||
X[ 6] = v128_add32( v128_add32( W[15], SSG2_0( W[ 7] ) ), W[ 6] );
|
||||
X[ 7] = v128_add32( v128_add32( X[ 0], SSG2_0( W[ 8] ) ), W[ 7] );
|
||||
X[ 8] = v128_add32( v128_add32( X[ 1], SSG2_0( W[ 9] ) ), W[ 8] );
|
||||
X[ 9] = v128_add32( SSG2_0( W[10] ), W[ 9] );
|
||||
X[10] = v128_add32( SSG2_0( W[11] ), W[10] );
|
||||
X[11] = v128_add32( SSG2_0( W[12] ), W[11] );
|
||||
X[12] = v128_add32( SSG2_0( W[13] ), W[12] );
|
||||
X[13] = v128_add32( SSG2_0( W[14] ), W[13] );
|
||||
X[14] = v128_add32( SSG2_0( W[15] ), W[14] );
|
||||
X[15] = v128_add32( SSG2_0( X[ 0] ), W[15] );
|
||||
X[ 0] = v128_add32( SSG2_0( W[ 1] ), W[ 0] );
|
||||
X[ 1] = v128_add32( v128_add32( SSG2_1( W[15] ), SSG2_0( W[ 2] ) ), W[ 1] );
|
||||
X[ 2] = v128_add32( SSG2_1( X[ 0] ), W[ 2] );
|
||||
X[ 3] = v128_add32( SSG2_1( X[ 1] ), SSG2_0( W[ 4] ) );
|
||||
X[ 4] = SSG2_0( W[15] );
|
||||
X[ 5] = v128_add32( SSG2_0( X[ 0] ), W[15] );
|
||||
// W[0] for round 32
|
||||
X[ 6] = v128_add32( SSG2_0( X[ 1] ), X[ 0] );
|
||||
|
||||
A = v128_load( state_in );
|
||||
B = v128_load( state_in + 1 );
|
||||
@@ -194,11 +194,16 @@ void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
H = v128_load( state_in + 7 );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
|
||||
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, 0 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, 0 );
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, 0 );
|
||||
|
||||
// round 3 part 1, avoid nonces W[3]
|
||||
T1 = v128_add4_32( E, BSG2_1(B), CHs(B, C, D), v128_32( K256[3] ) );
|
||||
A = v128_add32( A, T1 );
|
||||
E = v128_add32( T1, v128_add32( BSG2_0(F), MAJs(F, G, H) ) );
|
||||
|
||||
v128_store( state_mid , A );
|
||||
v128_store( state_mid + 1, B );
|
||||
v128_store( state_mid + 2, C );
|
||||
@@ -209,7 +214,7 @@ void sha256_4way_prehash_3rounds( v128_t *state_mid, v128_t *X,
|
||||
v128_store( state_mid + 7, H );
|
||||
}
|
||||
|
||||
void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
void sha256_4x32_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in, const v128_t *state_mid, const v128_t *X )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H;
|
||||
@@ -226,45 +231,64 @@ void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
G = v128_load( state_mid + 6 );
|
||||
H = v128_load( state_mid + 7 );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( G, H );
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( F, G );
|
||||
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
// round 3 part 2, add nonces
|
||||
A = v128_add32( A, W[3] );
|
||||
E = v128_add32( E, W[3] );
|
||||
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( D, E, F, G, H, A, B, C, 5, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( C, D, E, F, G, H, A, B, 6, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( B, C, D, E, F, G, H, A, 7, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( A, B, C, D, E, F, G, H, 8, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( H, A, B, C, D, E, F, G, 9, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( G, H, A, B, C, D, E, F, 10, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( F, G, H, A, B, C, D, E, 11, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( E, F, G, H, A, B, C, D, 12, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( D, E, F, G, H, A, B, C, 13, 0 );
|
||||
SHA256_4X32_ROUND_NOMSG( C, D, E, F, G, H, A, B, 14, 0 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, 0 );
|
||||
|
||||
// update precalculated msg expansion with new nonce: W[3].
|
||||
W[ 0] = X[ 0];
|
||||
W[ 1] = X[ 1];
|
||||
W[ 2] = v128_add32( X[ 2], SSG2_0( W[ 3] ) );
|
||||
W[ 3] = v128_add32( X[ 3], W[ 3] );
|
||||
W[ 4] = v128_add32( X[ 4], SSG2_1( W[ 2] ) );
|
||||
W[ 5] = v128_add32( X[ 5], SSG2_1( W[ 3] ) );
|
||||
W[ 6] = v128_add32( X[ 6], SSG2_1( W[ 4] ) );
|
||||
W[ 7] = v128_add32( X[ 7], SSG2_1( W[ 5] ) );
|
||||
W[ 8] = v128_add32( X[ 8], SSG2_1( W[ 6] ) );
|
||||
W[ 9] = v128_add32( X[ 9], v128_add32( SSG2_1( W[ 7] ), W[ 2] ) );
|
||||
W[10] = v128_add32( X[10], v128_add32( SSG2_1( W[ 8] ), W[ 3] ) );
|
||||
W[11] = v128_add32( X[11], v128_add32( SSG2_1( W[ 9] ), W[ 4] ) );
|
||||
W[12] = v128_add32( X[12], v128_add32( SSG2_1( W[10] ), W[ 5] ) );
|
||||
W[13] = v128_add32( X[13], v128_add32( SSG2_1( W[11] ), W[ 6] ) );
|
||||
W[14] = v128_add32( X[14], v128_add32( SSG2_1( W[12] ), W[ 7] ) );
|
||||
W[15] = v128_add32( X[15], v128_add32( SSG2_1( W[13] ), W[ 8] ) );
|
||||
W[ 4] = v128_add32( W[ 4], SSG2_1( W[ 2] ) );
|
||||
W[ 5] = SSG2_1( W[ 3] );
|
||||
W[ 6] = v128_add32( W[15], SSG2_1( W[ 4] ) );
|
||||
W[ 7] = v128_add32( X[ 0], SSG2_1( W[ 5] ) );
|
||||
W[ 8] = v128_add32( X[ 1], SSG2_1( W[ 6] ) );
|
||||
W[ 9] = v128_add32( SSG2_1( W[ 7] ), W[ 2] );
|
||||
W[10] = v128_add32( SSG2_1( W[ 8] ), W[ 3] );
|
||||
W[11] = v128_add32( SSG2_1( W[ 9] ), W[ 4] );
|
||||
W[12] = v128_add32( SSG2_1( W[10] ), W[ 5] );
|
||||
W[13] = v128_add32( SSG2_1( W[11] ), W[ 6] );
|
||||
W[14] = v128_add32( X[ 4], v128_add32( SSG2_1( W[12] ), W[ 7] ) );
|
||||
W[15] = v128_add32( X[ 5], v128_add32( SSG2_1( W[13] ), W[ 8] ) );
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
|
||||
W[ 0] = v128_add32( X[ 6], v128_add32( SSG2_1( W[14] ), W[ 9] ) );
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 48 );
|
||||
|
||||
A = v128_add32( A, v128_load( state_in ) );
|
||||
B = v128_add32( B, v128_load( state_in + 1 ) );
|
||||
@@ -285,10 +309,11 @@ void sha256_4way_final_rounds( v128_t *state_out, const v128_t *data,
|
||||
v128_store( state_out + 7, H );
|
||||
}
|
||||
|
||||
|
||||
# if 0
|
||||
|
||||
// Working correctly but still slower
|
||||
int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
int sha256_4x32_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
const v128_t *state_in, const uint32_t *target )
|
||||
{
|
||||
v128_t A, B, C, D, E, F, G, H, T0, T1, T2;
|
||||
@@ -308,38 +333,38 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
const v128_t IV7 = H;
|
||||
const v128_t IV6 = G;
|
||||
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256x4_MSG_EXPANSION( W );
|
||||
SHA256x4_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 0 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 16 );
|
||||
SHA256_4X32_MSG_EXPANSION( W );
|
||||
SHA256_4X32_16ROUNDS( A, B, C, D, E, F, G, H, 32 );
|
||||
|
||||
W[ 0] = SHA2s_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
W[ 1] = SHA2s_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA2s_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA2s_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA2s_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA2s_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA2s_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA2s_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA2s_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA2s_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA2s_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA2s_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA2s_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
W[ 0] = SHA256_4X32_MEXP( W[14], W[ 9], W[ 1], W[ 0] );
|
||||
W[ 1] = SHA256_4X32_MEXP( W[15], W[10], W[ 2], W[ 1] );
|
||||
W[ 2] = SHA256_4X32_MEXP( W[ 0], W[11], W[ 3], W[ 2] );
|
||||
W[ 3] = SHA256_4X32_MEXP( W[ 1], W[12], W[ 4], W[ 3] );
|
||||
W[ 4] = SHA256_4X32_MEXP( W[ 2], W[13], W[ 5], W[ 4] );
|
||||
W[ 5] = SHA256_4X32_MEXP( W[ 3], W[14], W[ 6], W[ 5] );
|
||||
W[ 6] = SHA256_4X32_MEXP( W[ 4], W[15], W[ 7], W[ 6] );
|
||||
W[ 7] = SHA256_4X32_MEXP( W[ 5], W[ 0], W[ 8], W[ 7] );
|
||||
W[ 8] = SHA256_4X32_MEXP( W[ 6], W[ 1], W[ 9], W[ 8] );
|
||||
W[ 9] = SHA256_4X32_MEXP( W[ 7], W[ 2], W[10], W[ 9] );
|
||||
W[10] = SHA256_4X32_MEXP( W[ 8], W[ 3], W[11], W[10] );
|
||||
W[11] = SHA256_4X32_MEXP( W[ 9], W[ 4], W[12], W[11] );
|
||||
W[12] = SHA256_4X32_MEXP( W[10], W[ 5], W[13], W[12] );
|
||||
|
||||
v128_t X_xor_Y, Y_xor_Z = v128_xor( B, C );
|
||||
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 48 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 48 );
|
||||
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 48 );
|
||||
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 48 );
|
||||
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 48 );
|
||||
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 48 );
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 48 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 48 );
|
||||
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 48 );
|
||||
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 48 );
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 0, 48 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 1, 48 );
|
||||
SHA256_4X32_ROUND( G, H, A, B, C, D, E, F, 2, 48 );
|
||||
SHA256_4X32_ROUND( F, G, H, A, B, C, D, E, 3, 48 );
|
||||
SHA256_4X32_ROUND( E, F, G, H, A, B, C, D, 4, 48 );
|
||||
SHA256_4X32_ROUND( D, E, F, G, H, A, B, C, 5, 48 );
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 6, 48 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 7, 48 );
|
||||
SHA256_4X32_ROUND( A, B, C, D, E, F, G, H, 8, 48 );
|
||||
SHA256_4X32_ROUND( H, A, B, C, D, E, F, G, 9, 48 );
|
||||
|
||||
T0 = v128_add32( v128_32( K256[58] ),
|
||||
v128_add4_32( BSG2_1( C ), CHs( C, D, E ), W[10], F ) );
|
||||
@@ -368,7 +393,7 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
F = v128_add32( T0, v128_add32( BSG2_0( G ), MAJs( G, H, A ) ) );
|
||||
|
||||
// round 61 part 1
|
||||
W[13] = SHA2s_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
W[13] = SHA256_4X32_MEXP( W[11], W[ 6], W[14], W[13] );
|
||||
T0 = v128_add32( v128_32( K256[61] ),
|
||||
v128_add4_32( BSG2_1( H ), CHs( H, A, B ), W[13], C ) );
|
||||
G = v128_add32( G, T0 );
|
||||
@@ -401,11 +426,11 @@ int sha256_4way_transform_le_short( v128_t *state_out, const v128_t *data,
|
||||
C = v128_add32( T0, v128_add32( BSG2_0( D ), MAJs( D, E, F ) ) );
|
||||
|
||||
// rounds 62 & 63
|
||||
W[14] = SHA2s_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA2s_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
W[14] = SHA256_4X32_MEXP( W[12], W[ 7], W[15], W[14] );
|
||||
W[15] = SHA256_4X32_MEXP( W[13], W[ 8], W[ 0], W[15] );
|
||||
|
||||
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 48 );
|
||||
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 48 );
|
||||
SHA256_4X32_ROUND( C, D, E, F, G, H, A, B, 14, 48 );
|
||||
SHA256_4X32_ROUND( B, C, D, E, F, G, H, A, 15, 48 );
|
||||
|
||||
state_out[0] = v128_add32( state_in[0], A );
|
||||
state_out[1] = v128_add32( state_in[1], B );
|
||||
@@ -420,7 +445,7 @@ return 1;
|
||||
|
||||
#endif
|
||||
|
||||
void sha256_4way_init( sha256_4way_context *sc )
|
||||
void sha256_4x32_init( sha256_4x32_context *sc )
|
||||
{
|
||||
sc->count_high = sc->count_low = 0;
|
||||
sc->val[0] = v128_32( sha256_iv[0] );
|
||||
@@ -433,7 +458,7 @@ void sha256_4way_init( sha256_4way_context *sc )
|
||||
sc->val[7] = v128_32( sha256_iv[7] );
|
||||
}
|
||||
|
||||
void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
void sha256_4x32_update( sha256_4x32_context *sc, const void *data, size_t len )
|
||||
{
|
||||
v128_t *vdata = (v128_t*)data;
|
||||
size_t ptr;
|
||||
@@ -454,7 +479,7 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
len -= clen;
|
||||
if ( ptr == buf_size )
|
||||
{
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
ptr = 0;
|
||||
}
|
||||
clow = sc->count_low;
|
||||
@@ -465,7 +490,7 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data, size_t len )
|
||||
}
|
||||
}
|
||||
|
||||
void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
void sha256_4x32_close( sha256_4x32_context *sc, void *dst )
|
||||
{
|
||||
unsigned ptr;
|
||||
uint32_t low, high;
|
||||
@@ -479,7 +504,7 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
if ( ptr > pad )
|
||||
{
|
||||
v128_memset_zero( sc->buf + (ptr>>2), (buf_size - ptr) >> 2 );
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
v128_memset_zero( sc->buf, pad >> 2 );
|
||||
}
|
||||
else
|
||||
@@ -491,17 +516,17 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
|
||||
|
||||
sc->buf[ pad >> 2 ] = v128_32( bswap_32( high ) );
|
||||
sc->buf[( pad+4 ) >> 2 ] = v128_32( bswap_32( low ) );
|
||||
sha256_4way_transform_be( sc->val, sc->buf, sc->val );
|
||||
sha256_4x32_transform_be( sc->val, sc->buf, sc->val );
|
||||
|
||||
v128_block_bswap32( dst, sc->val );
|
||||
}
|
||||
|
||||
void sha256_4way_full( void *dst, const void *data, size_t len )
|
||||
void sha256_4x32_full( void *dst, const void *data, size_t len )
|
||||
{
|
||||
sha256_4way_context ctx;
|
||||
sha256_4way_init( &ctx );
|
||||
sha256_4way_update( &ctx, data, len );
|
||||
sha256_4way_close( &ctx, dst );
|
||||
sha256_4x32_context ctx;
|
||||
sha256_4x32_init( &ctx );
|
||||
sha256_4x32_update( &ctx, data, len );
|
||||
sha256_4x32_close( &ctx, dst );
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
@@ -1200,7 +1200,7 @@ void sha256_neon_sha_transform_le( uint32_t *state_out, const void *input,
|
||||
MSG2_Y = vsha256su1q_u32( MSG2_Y, MSG0_Y, MSG1_Y ); \
|
||||
/* Rounds 44-47 */ \
|
||||
MSG3_X = vsha256su0q_u32( MSG3_X, MSG0_X ); \
|
||||
MSG3_Y = vsha256su0q_u32( MSG3_X, MSG0_Y ); \
|
||||
MSG3_Y = vsha256su0q_u32( MSG3_Y, MSG0_Y ); \
|
||||
TMP2_X = STATE0_X; \
|
||||
TMP2_Y = STATE0_Y; \
|
||||
TMP0_X = vaddq_u32( MSG0_X, casti_v128( K256, 12 ) ); \
|
||||
|
@@ -97,6 +97,14 @@ void sha256_neon_x2sha_final_rounds( uint32_t *state_out_X,
|
||||
#define sha256_prehash_3rounds sha256_neon_sha_prehash_3rounds
|
||||
#define sha256_2x_final_rounds sha256_neon_x2sha_final_rounds
|
||||
|
||||
// generic API
|
||||
#define sha256_transform_le sha256_neon_sha_transform_le
|
||||
#define sha256_transform_be sha256_neon_sha_transform_be
|
||||
#define sha256_2x_transform_le sha256_neon_x2sha_transform_le
|
||||
#define sha256_2x_transform_be sha256_neon_x2sha_transform_be
|
||||
#define sha256_prehash_3rounds sha256_neon_sha_prehash_3rounds
|
||||
#define sha256_2x_final_rounds sha256_neon_x2sha_final_rounds
|
||||
|
||||
#else
|
||||
// without HW acceleration...
|
||||
#include "sph_sha2.h"
|
||||
|
@@ -1,9 +1,9 @@
|
||||
#include "sha256d-4way.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "sha256-hash.h"
|
||||
#include "sha256d.h"
|
||||
|
||||
static const uint32_t sha256_iv[8] __attribute__ ((aligned (32))) =
|
||||
{
|
||||
@@ -360,15 +360,17 @@ int scanhash_sha256d_8way( struct work *work, const uint32_t max_nonce,
|
||||
|
||||
#if defined(SHA256D_4WAY)
|
||||
|
||||
int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256d_4x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t istate[8] __attribute__ ((aligned (32)));
|
||||
v128_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
@@ -376,17 +378,14 @@ int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
vdata[16+15] = v128_32( 80*8 );
|
||||
@@ -396,36 +395,38 @@ int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
|
||||
block[15] = v128_32( 32*8 );
|
||||
|
||||
// initialize state
|
||||
istate[0] = v128_32( sha256_iv[0] );
|
||||
istate[1] = v128_32( sha256_iv[1] );
|
||||
istate[2] = v128_32( sha256_iv[2] );
|
||||
istate[3] = v128_32( sha256_iv[3] );
|
||||
istate[4] = v128_32( sha256_iv[4] );
|
||||
istate[5] = v128_32( sha256_iv[5] );
|
||||
istate[6] = v128_32( sha256_iv[6] );
|
||||
istate[7] = v128_32( sha256_iv[7] );
|
||||
iv[0] = v128_32( sha256_iv[0] );
|
||||
iv[1] = v128_32( sha256_iv[1] );
|
||||
iv[2] = v128_32( sha256_iv[2] );
|
||||
iv[3] = v128_32( sha256_iv[3] );
|
||||
iv[4] = v128_32( sha256_iv[4] );
|
||||
iv[5] = v128_32( sha256_iv[5] );
|
||||
iv[6] = v128_32( sha256_iv[6] );
|
||||
iv[7] = v128_32( sha256_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform_le( mstate, vdata, istate );
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
sha256_4way_transform_le( block, vdata+16, mstate );
|
||||
sha256_4way_transform_le( hash32, block, istate );
|
||||
|
||||
v128_block_bswap32( hash32, hash32 );
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
sha256_4x32_transform_le( hash32, block, iv );
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
extr_lane_4x32( lhash, hash32, lane, 256 );
|
||||
casti_v128( lhash, 0 ) = v128_bswap32( casti_v128( lhash, 0 ) );
|
||||
casti_v128( lhash, 1 ) = v128_bswap32( casti_v128( lhash, 1 ) );
|
||||
if ( likely( valid_hash( lhash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lhash, mythr );
|
||||
}
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#include "sha256-hash.h"
|
||||
#include "sha256d.h"
|
||||
|
||||
void sha256d( void *hash, const void *data, int len )
|
||||
@@ -5,4 +6,24 @@ void sha256d( void *hash, const void *data, int len )
|
||||
sha256_full( hash, data, len );
|
||||
sha256_full( hash, hash, 32 );
|
||||
}
|
||||
bool register_sha256d_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
|
||||
#if defined(SHA256D_16WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha256d_16way;
|
||||
#elif defined(SHA256D_SHA)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256d_sha;
|
||||
#elif defined(SHA256D_NEON_SHA2)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256d_neon_sha2;
|
||||
#elif defined(SHA256D_8WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha256d_8way;
|
||||
#elif defined(SHA256D_4WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha256d_4x32;
|
||||
#else
|
||||
gate->hash = (void*)&sha256d;
|
||||
#endif
|
||||
return true;
|
||||
};
|
||||
|
||||
|
@@ -1,7 +1,58 @@
|
||||
#ifndef __SHA256D_4WAY_H__
|
||||
#define __SHA256D_4WAY_H__ 1
|
||||
|
||||
#include <stdint.h>
|
||||
#include "algo-gate-api.h"
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include "sha256-hash.h"
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SHA256D_16WAY 1
|
||||
#elif defined(__SHA__)
|
||||
#define SHA256D_SHA 1
|
||||
#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_SHA2)
|
||||
#define SHA256D_NEON_SHA2 1
|
||||
#elif defined(__AVX2__)
|
||||
#define SHA256D_8WAY 1
|
||||
#else
|
||||
#define SHA256D_4WAY 1
|
||||
#endif
|
||||
|
||||
bool register_sha256d_algo( algo_gate_t* gate );
|
||||
|
||||
#if defined(SHA256D_16WAY)
|
||||
|
||||
int scanhash_sha256d_16way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
#endif
|
||||
|
||||
#if defined(SHA256D_8WAY)
|
||||
|
||||
int scanhash_sha256d_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
#endif
|
||||
|
||||
#if defined(SHA256D_4WAY)
|
||||
|
||||
int scanhash_sha256d_4x32( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
#endif
|
||||
|
||||
#if defined(SHA256D_SHA)
|
||||
|
||||
int scanhash_sha256d_sha( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(SHA256D_NEON_SHA2)
|
||||
|
||||
int scanhash_sha256d_neon_sha2( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#endif
|
||||
|
||||
void sha256d( void *hash, const void *data, int len );
|
||||
|
||||
bool register_sha256d_algo( algo_gate_t* gate );
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -7,15 +7,15 @@
|
||||
#include "sph_sha2.h"
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SHA256DT_16X64 1
|
||||
#define SHA256DT_16X32 1
|
||||
#elif defined(__x86_64__) && defined(__SHA__)
|
||||
#define SHA256DT_X86_SHA256 1
|
||||
#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_SHA2)
|
||||
#define SHA256DT_NEON_SHA256 1
|
||||
#elif defined(__AVX2__)
|
||||
#define SHA256DT_8X64 1
|
||||
#define SHA256DT_8X32 1
|
||||
#elif defined (__SSE2__) || defined(__ARM_NEON)
|
||||
#define SHA256DT_4X64 1
|
||||
#define SHA256DT_4X32 1
|
||||
#endif
|
||||
// else ref, should never happen
|
||||
|
||||
@@ -183,9 +183,9 @@ int scanhash_sha256dt_neon_x2sha( struct work *work, uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_16X64)
|
||||
#elif defined(SHA256DT_16X32)
|
||||
|
||||
int scanhash_sha256dt_16x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_16x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m512i block[16] __attribute__ ((aligned (128)));
|
||||
@@ -275,9 +275,9 @@ int scanhash_sha256dt_16x64( struct work *work, const uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_8X64)
|
||||
#elif defined(SHA256DT_8X32)
|
||||
|
||||
int scanhash_sha256dt_8x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_8x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
__m256i vdata[32] __attribute__ ((aligned (64)));
|
||||
@@ -355,16 +355,18 @@ int scanhash_sha256dt_8x64( struct work *work, const uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA256DT_4X64)
|
||||
#elif defined(SHA256DT_4X32)
|
||||
|
||||
int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256dt_4x32( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash[8] __attribute__ ((aligned (32)));
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
@@ -373,26 +375,22 @@ int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
v128_memset_zero( vdata+16 + 5, 9 );
|
||||
vdata[16+15] = v128_32( 0x480 );
|
||||
|
||||
block[ 8] = last_byte;
|
||||
v128_memset_zero( block + 9, 6 );
|
||||
v128_memset_zero( block + 9, 5 );
|
||||
block[15] = v128_32( 0x300 );
|
||||
|
||||
// initialize state
|
||||
iv[0] = v128_32( sha256dt_iv[0] );
|
||||
iv[1] = v128_32( sha256dt_iv[1] );
|
||||
iv[2] = v128_32( sha256dt_iv[2] );
|
||||
@@ -402,62 +400,15 @@ int scanhash_sha256dt_4x64( struct work *work, const uint32_t max_nonce,
|
||||
iv[6] = v128_32( sha256dt_iv[6] );
|
||||
iv[7] = v128_32( sha256dt_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4x32_transform_le( mhash, vdata, iv );
|
||||
|
||||
/*
|
||||
uint32_t m1 [8] __attribute__ ((aligned (32)));
|
||||
uint32_t h1 [8] __attribute__ ((aligned (32)));
|
||||
uint32_t b1 [16] __attribute__ ((aligned (32)));
|
||||
uint32_t e16 [16] __attribute__ ((aligned (32)));
|
||||
uint32_t *m4 = (uint32_t*)&midstate;
|
||||
uint32_t *h4 = (uint32_t*)hash32;
|
||||
|
||||
sha256_transform_le( m1, pdata, sha256dt_iv );
|
||||
|
||||
memcpy( e16, pdata + 16, 12 );
|
||||
e16[3] = n;
|
||||
e16[4] = 0x80000000;
|
||||
memset( &e16[5], 0, 40 );
|
||||
e16[15] = 0x480; // funky bit count
|
||||
|
||||
b1[8] = 0x80000000;
|
||||
memset( &b1[9], 0, 24 );
|
||||
b1[9] = b1[10] = b1[11] = b1[12] = b1[13] = b1[14] = 0;
|
||||
b1[15] = 0x300; // bit count
|
||||
*/
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
sha256_4x32_transform_le( block, vdata+16, mhash );
|
||||
|
||||
//sha256_transform_le( b1, e16, m1 );
|
||||
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
// sha256_4x32_transform_le( block, vdata+16, mhash1 );
|
||||
sha256_4x32_transform_le( hash32, block, iv );
|
||||
|
||||
/*
|
||||
sha256_transform_le( h1, b1, sha256dt_iv );
|
||||
|
||||
printf("final hash1: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h1[0],h1[1],h1[2],h1[3],h1[4],h1[5],h1[6],h1[7]);
|
||||
printf("final hash4: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h4[0],h4[4],h4[8],h4[12],h4[16],h4[20],h4[24],h4[28]);
|
||||
|
||||
casti_v128( h1,0 ) = v128_bswap32( casti_v128( h1,0 ) );
|
||||
casti_v128( h1,1 ) = v128_bswap32( casti_v128( h1,1 ) );
|
||||
*/
|
||||
|
||||
// v128_block_bswap32( hash32, hash32 );
|
||||
|
||||
/*
|
||||
printf("bswap hash1: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h1[0],h1[1],h1[2],h1[3],h1[4],h1[5],h1[6],h1[7]);
|
||||
printf("bswap hash4: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
h4[0],h4[4],h4[8],h4[12],h4[16],h4[20],h4[24],h4[28]);
|
||||
|
||||
exit(0);
|
||||
*/
|
||||
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
{
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
@@ -472,7 +423,7 @@ exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
@@ -485,10 +436,10 @@ exit(0);
|
||||
int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t block1a[16] __attribute__ ((aligned (32)));
|
||||
uint32_t block2a[16] __attribute__ ((aligned (32)));
|
||||
uint32_t hasha[8] __attribute__ ((aligned (32)));
|
||||
uint32_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t block1[16] __attribute__ ((aligned (32)));
|
||||
uint32_t block2[16] __attribute__ ((aligned (32)));
|
||||
uint32_t hash32[8] __attribute__ ((aligned (32)));
|
||||
uint32_t mstate[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
@@ -497,37 +448,40 @@ int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
|
||||
memset( block1, 0, 64 );
|
||||
memset( block2, 0, 64 );
|
||||
|
||||
// hash first 64 byte block of data
|
||||
sha256_transform_le( mstate, pdata, sha256dt_iv );
|
||||
|
||||
// fill & pad second bock without nonce
|
||||
memcpy( block1a, pdata + 16, 12 );
|
||||
block1a[ 3] = 0;
|
||||
block1a[ 4] = 0x80000000;
|
||||
memset( block1a + 5, 0, 40 );
|
||||
block1a[15] = 0x480; // funky bit count
|
||||
memcpy( block1, pdata + 16, 12 );
|
||||
block1[ 3] = n;
|
||||
block1[ 4] = 0x80000000;
|
||||
memset( block1 + 5, 0, 40 );
|
||||
block1[15] = 0x480; // funky bit count
|
||||
|
||||
// Pad third block
|
||||
block2a[ 8] = 0x80000000;
|
||||
memset( block2a + 9, 0, 24 );
|
||||
block2a[15] = 0x300; // bit count
|
||||
block2[ 8] = 0x80000000;
|
||||
memset( block2 + 9, 0, 24 );
|
||||
block2[15] = 0x300; // bit count
|
||||
|
||||
do
|
||||
{
|
||||
// Insert nonce for second block
|
||||
block1a[3] = n;
|
||||
sha256_transform_le( block2a, block1a, mstate );
|
||||
block1[3] = n;
|
||||
sha256_transform_le( block2, block1, mstate );
|
||||
|
||||
sha256_transform_le( hasha, block2a, sha256dt_iv );
|
||||
sha256_transform_le( hash32, block2, sha256dt_iv );
|
||||
|
||||
if ( unlikely( bswap_32( hasha[7] ) <= ptarget[7] ) )
|
||||
if ( unlikely( bswap_32( hash32[7] ) <= ptarget[7] ) )
|
||||
{
|
||||
casti_v128( hasha, 0 ) = v128_bswap32( casti_v128( hasha, 0 ) );
|
||||
casti_v128( hasha, 1 ) = v128_bswap32( casti_v128( hasha, 1 ) );
|
||||
if ( likely( valid_hash( hasha, ptarget ) && !bench ) )
|
||||
casti_v128( hash32, 0 ) = v128_bswap32( casti_v128( hash32, 0 ) );
|
||||
casti_v128( hash32, 1 ) = v128_bswap32( casti_v128( hash32, 1 ) );
|
||||
if ( likely( valid_hash( hash32, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n;
|
||||
submit_solution( work, hasha, mythr );
|
||||
submit_solution( work, hash32, mythr );
|
||||
}
|
||||
}
|
||||
n += 1;
|
||||
@@ -543,18 +497,18 @@ int scanhash_sha256dt_ref( struct work *work, uint32_t max_nonce,
|
||||
bool register_sha256dt_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
|
||||
#if defined(SHA256DT_16X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_16x64;
|
||||
#if defined(SHA256DT_16X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_16x32;
|
||||
#elif defined(SHA256DT_X86_SHA256)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_x86_x2sha;
|
||||
#elif defined(SHA256DT_NEON_SHA256)
|
||||
gate->optimizations = SHA_OPT;
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_neon_x2sha;
|
||||
#elif defined(SHA256DT_8X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_8x64;
|
||||
#elif defined(SHA256DT_4X64)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_4x64;
|
||||
#elif defined(SHA256DT_8X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_8x32;
|
||||
#elif defined(SHA256DT_4X32)
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_4x32;
|
||||
#else
|
||||
gate->scanhash = (void*)&scanhash_sha256dt_ref;
|
||||
#endif
|
||||
|
@@ -372,14 +372,14 @@ int scanhash_sha256t_8way( struct work *work, const uint32_t max_nonce,
|
||||
int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t istate[8] __attribute__ ((aligned (32)));
|
||||
v128_t mstate[8] __attribute__ ((aligned (32)));
|
||||
// v128_t mstate2[8] __attribute__ ((aligned (32)));
|
||||
// v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
v128_t vdata[32] __attribute__ ((aligned (64)));
|
||||
v128_t block[16] __attribute__ ((aligned (32)));
|
||||
v128_t hash32[8] __attribute__ ((aligned (32)));
|
||||
v128_t iv[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash1[8] __attribute__ ((aligned (32)));
|
||||
v128_t mhash2[8] __attribute__ ((aligned (32)));
|
||||
v128_t mexp_pre[8] __attribute__ ((aligned (32)));
|
||||
uint32_t lhash[8] __attribute__ ((aligned (32)));
|
||||
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
@@ -387,62 +387,57 @@ int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128_t *noncev = vdata + 19;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128_t last_byte = v128_32( 0x80000000 );
|
||||
const v128_t four = v128_32( 4 );
|
||||
|
||||
for ( int i = 0; i < 19; i++ )
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
|
||||
*noncev = v128_set32( n+ 3, n+ 2, n+1, n );
|
||||
|
||||
vdata[i] = v128_32( pdata[i] );
|
||||
vdata[16+3] = v128_set32( n+3, n+2, n+1, n );
|
||||
vdata[16+4] = last_byte;
|
||||
v128_memset_zero( vdata+16 + 5, 10 );
|
||||
vdata[16+15] = v128_32( 80*8 ); // bit count
|
||||
vdata[16+15] = v128_32( 80*8 );
|
||||
|
||||
block[ 8] = last_byte;
|
||||
v128_memset_zero( block + 9, 6 );
|
||||
block[15] = v128_32( 32*8 ); // bit count
|
||||
|
||||
block[15] = v128_32( 32*8 );
|
||||
|
||||
// initialize state
|
||||
istate[0] = v128_32( sha256_iv[0] );
|
||||
istate[1] = v128_32( sha256_iv[1] );
|
||||
istate[2] = v128_32( sha256_iv[2] );
|
||||
istate[3] = v128_32( sha256_iv[3] );
|
||||
istate[4] = v128_32( sha256_iv[4] );
|
||||
istate[5] = v128_32( sha256_iv[5] );
|
||||
istate[6] = v128_32( sha256_iv[6] );
|
||||
istate[7] = v128_32( sha256_iv[7] );
|
||||
iv[0] = v128_32( sha256_iv[0] );
|
||||
iv[1] = v128_32( sha256_iv[1] );
|
||||
iv[2] = v128_32( sha256_iv[2] );
|
||||
iv[3] = v128_32( sha256_iv[3] );
|
||||
iv[4] = v128_32( sha256_iv[4] );
|
||||
iv[5] = v128_32( sha256_iv[5] );
|
||||
iv[6] = v128_32( sha256_iv[6] );
|
||||
iv[7] = v128_32( sha256_iv[7] );
|
||||
|
||||
// hash first 64 bytes of data
|
||||
sha256_4way_transform_le( mstate, vdata, istate );
|
||||
|
||||
// sha256_4way_prehash_3rounds( mstate2, mexp_pre, vdata + 16, mstate1 );
|
||||
sha256_4x32_transform_le( mhash1, vdata, iv );
|
||||
sha256_4x32_prehash_3rounds( mhash2, mexp_pre, vdata + 16, mhash1 );
|
||||
|
||||
do
|
||||
{
|
||||
// sha256_4way_final_rounds( block, vdata+16, mstate1, mstate2,
|
||||
// mexp_pre );
|
||||
|
||||
sha256_4way_transform_le( block, vdata+16, mstate );
|
||||
sha256_4way_transform_le( block, block, istate );
|
||||
sha256_4way_transform_le( hash32, block, istate );
|
||||
sha256_4x32_final_rounds( block, vdata+16, mhash1, mhash2, mexp_pre );
|
||||
sha256_4way_transform_le( block, block, iv );
|
||||
sha256_4way_transform_le( hash32, block, iv );
|
||||
|
||||
v128_block_bswap32( hash32, hash32 );
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
{
|
||||
if ( unlikely( bswap_32( hash32_d7[ lane ] ) <= targ32_d7 ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
extr_lane_4x32( lhash, hash32, lane, 256 );
|
||||
casti_v128( lhash, 0 ) = v128_bswap32( casti_v128( lhash, 0 ) );
|
||||
casti_v128( lhash, 1 ) = v128_bswap32( casti_v128( lhash, 1 ) );
|
||||
if ( likely( valid_hash( lhash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
submit_solution( work, lhash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, four );
|
||||
n += 4;
|
||||
}
|
||||
vdata[16+3] = v128_add32( vdata[16+3], four );
|
||||
n += 4;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
|
@@ -692,7 +692,7 @@ do { \
|
||||
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( Y, Z ), X ), Z )
|
||||
|
||||
#define MAJ(X, Y, Z) \
|
||||
_mm256_xor_si256( Y, _mm256_and_si256( X_xor_Y = _mm256_xor_si256( X, Y ), \
|
||||
_mm256_xor_si256( Y, _mm256_and_si256( (X_xor_Y = _mm256_xor_si256( X, Y )), \
|
||||
Y_xor_Z ) )
|
||||
|
||||
#define SHA3_4WAY_STEP( A, B, C, D, E, F, G, H, i ) \
|
||||
@@ -873,26 +873,26 @@ void sha512_4x64_ctx( sha512_4x64_context *sc, void *dst, const void *data,
|
||||
// SHA512 2 way 64 SSE2 or NEON
|
||||
|
||||
#define BSG5_0_2x64( x ) v128_xor3( v128_ror64( x, 28 ), \
|
||||
v128_ror64( x, 34 ), \
|
||||
v128_ror64( x, 39 ) )
|
||||
v128_ror64( x, 34 ), \
|
||||
v128_ror64( x, 39 ) )
|
||||
|
||||
#define BSG5_1_2x64( x ) v128_xor3( v128_ror64( x, 14 ), \
|
||||
v128_ror64( x, 18 ), \
|
||||
v128_ror64( x, 41 ) )
|
||||
v128_ror64( x, 18 ), \
|
||||
v128_ror64( x, 41 ) )
|
||||
|
||||
#define SSG5_0_2x64( x ) v128_xor3( v128_ror64( x, 1 ), \
|
||||
v128_ror64( x, 8 ), \
|
||||
v128_sr64( x, 7 ) )
|
||||
v128_ror64( x, 8 ), \
|
||||
v128_sr64( x, 7 ) )
|
||||
|
||||
#define SSG5_1_2x64( x ) v128_xor3( v128_ror64( x, 19 ), \
|
||||
v128_ror64( x, 61 ), \
|
||||
v128_sr64( x, 6 ) )
|
||||
v128_ror64( x, 61 ), \
|
||||
v128_sr64( x, 6 ) )
|
||||
|
||||
#define CH_2x64(X, Y, Z) \
|
||||
v128_xor( v128_and( v128_xor( Y, Z ), X ), Z )
|
||||
|
||||
#define MAJ_2x64(X, Y, Z) \
|
||||
v128_xor( Y, v128_and( X_xor_Y = v128_xor( X, Y ), Y_xor_Z ) )
|
||||
v128_xor( Y, v128_and( (X_xor_Y = v128_xor( X, Y ) ), Y_xor_Z ) )
|
||||
|
||||
#define SHA3_2x64_STEP( A, B, C, D, E, F, G, H, i ) \
|
||||
do { \
|
||||
@@ -917,34 +917,20 @@ sha512_2x64_round( sha512_2x64_context *ctx, v128u64_t *in, v128u64_t r[8] )
|
||||
v128u64_t W[80];
|
||||
|
||||
v128_block_bswap64( W , in );
|
||||
v128_block_bswap64( (&W[8]), (&in[8]) );
|
||||
v128_block_bswap64( W+8, in+8 );
|
||||
|
||||
for ( i = 16; i < 80; i++ )
|
||||
W[i] = v128_add4_64( SSG5_0_2x64( W[i-15] ), SSG5_1_2x64( W[i-2] ),
|
||||
W[ i- 7 ], W[ i-16 ] );
|
||||
|
||||
if ( ctx->initialized )
|
||||
{
|
||||
A = r[0];
|
||||
B = r[1];
|
||||
C = r[2];
|
||||
D = r[3];
|
||||
E = r[4];
|
||||
F = r[5];
|
||||
G = r[6];
|
||||
H = r[7];
|
||||
}
|
||||
else
|
||||
{
|
||||
A = v128_64( 0x6A09E667F3BCC908 );
|
||||
B = v128_64( 0xBB67AE8584CAA73B );
|
||||
C = v128_64( 0x3C6EF372FE94F82B );
|
||||
D = v128_64( 0xA54FF53A5F1D36F1 );
|
||||
E = v128_64( 0x510E527FADE682D1 );
|
||||
F = v128_64( 0x9B05688C2B3E6C1F );
|
||||
G = v128_64( 0x1F83D9ABFB41BD6B );
|
||||
H = v128_64( 0x5BE0CD19137E2179 );
|
||||
}
|
||||
A = r[0];
|
||||
B = r[1];
|
||||
C = r[2];
|
||||
D = r[3];
|
||||
E = r[4];
|
||||
F = r[5];
|
||||
G = r[6];
|
||||
H = r[7];
|
||||
|
||||
Y_xor_Z = v128_xor( B, C );
|
||||
|
||||
@@ -960,35 +946,28 @@ sha512_2x64_round( sha512_2x64_context *ctx, v128u64_t *in, v128u64_t r[8] )
|
||||
SHA3_2x64_STEP( B, C, D, E, F, G, H, A, i + 7 );
|
||||
}
|
||||
|
||||
if ( ctx->initialized )
|
||||
{
|
||||
r[0] = v128_add64( r[0], A );
|
||||
r[1] = v128_add64( r[1], B );
|
||||
r[2] = v128_add64( r[2], C );
|
||||
r[3] = v128_add64( r[3], D );
|
||||
r[4] = v128_add64( r[4], E );
|
||||
r[5] = v128_add64( r[5], F );
|
||||
r[6] = v128_add64( r[6], G );
|
||||
r[7] = v128_add64( r[7], H );
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx->initialized = true;
|
||||
r[0] = v128_add64( A, v128_64( 0x6A09E667F3BCC908 ) );
|
||||
r[1] = v128_add64( B, v128_64( 0xBB67AE8584CAA73B ) );
|
||||
r[2] = v128_add64( C, v128_64( 0x3C6EF372FE94F82B ) );
|
||||
r[3] = v128_add64( D, v128_64( 0xA54FF53A5F1D36F1 ) );
|
||||
r[4] = v128_add64( E, v128_64( 0x510E527FADE682D1 ) );
|
||||
r[5] = v128_add64( F, v128_64( 0x9B05688C2B3E6C1F ) );
|
||||
r[6] = v128_add64( G, v128_64( 0x1F83D9ABFB41BD6B ) );
|
||||
r[7] = v128_add64( H, v128_64( 0x5BE0CD19137E2179 ) );
|
||||
}
|
||||
r[0] = v128_add64( r[0], A );
|
||||
r[1] = v128_add64( r[1], B );
|
||||
r[2] = v128_add64( r[2], C );
|
||||
r[3] = v128_add64( r[3], D );
|
||||
r[4] = v128_add64( r[4], E );
|
||||
r[5] = v128_add64( r[5], F );
|
||||
r[6] = v128_add64( r[6], G );
|
||||
r[7] = v128_add64( r[7], H );
|
||||
}
|
||||
|
||||
void sha512_2x64_init( sha512_2x64_context *sc )
|
||||
{
|
||||
sc->initialized = false;
|
||||
sc->val[0] = v128_64( 0x6A09E667F3BCC908 );
|
||||
sc->val[1] = v128_64( 0xBB67AE8584CAA73B );
|
||||
sc->val[2] = v128_64( 0x3C6EF372FE94F82B );
|
||||
sc->val[3] = v128_64( 0xA54FF53A5F1D36F1 );
|
||||
sc->val[4] = v128_64( 0x510E527FADE682D1 );
|
||||
sc->val[5] = v128_64( 0x9B05688C2B3E6C1F );
|
||||
sc->val[6] = v128_64( 0x1F83D9ABFB41BD6B );
|
||||
sc->val[7] = v128_64( 0x5BE0CD19137E2179 );
|
||||
sc->count = 0;
|
||||
sc->initialized = true;
|
||||
}
|
||||
|
||||
void sha512_2x64_update( sha512_2x64_context *sc, const void *data, size_t len )
|
||||
@@ -1036,7 +1015,7 @@ void sha512_2x64_close( sha512_2x64_context *sc, void *dst )
|
||||
v128_memset_zero( sc->buf + (ptr>>3), (pad - ptr) >> 3 );
|
||||
|
||||
sc->buf[ pad >> 3 ] = v128_bswap64( v128_64( sc->count >> 61 ) );
|
||||
sc->buf[ ( pad+8 ) >> 3 ] = v128_bswap64( v128_64( sc->count << 3 ) );
|
||||
sc->buf[ ( pad+8 ) >> 3 ] = v128_bswap64( v128_64( sc->count << 3 ) );
|
||||
sha512_2x64_round( sc, sc->buf, sc->val );
|
||||
|
||||
v128_block_bswap64( castp_v128u64( dst ), sc->val );
|
||||
|
@@ -5,9 +5,11 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#define SHA512256D_8WAY 1
|
||||
#define SHA512256D_8WAY 1
|
||||
#elif defined(__AVX2__)
|
||||
#define SHA512256D_4WAY 1
|
||||
#define SHA512256D_4WAY 1
|
||||
#elif defined(__SSE2__) || defined(__ARM_NEON)
|
||||
#define SHA512256D_2WAY 1
|
||||
#endif
|
||||
|
||||
#if defined(SHA512256D_8WAY)
|
||||
@@ -145,6 +147,74 @@ int scanhash_sha512256d_4way( struct work *work, uint32_t max_nonce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(SHA512256D_2WAY)
|
||||
|
||||
static void sha512256d_2x64_init( sha512_2x64_context *ctx )
|
||||
{
|
||||
ctx->count = 0;
|
||||
ctx->initialized = true;
|
||||
ctx->val[0] = v128_64( 0x22312194FC2BF72C );
|
||||
ctx->val[1] = v128_64( 0x9F555FA3C84C64C2 );
|
||||
ctx->val[2] = v128_64( 0x2393B86B6F53B151 );
|
||||
ctx->val[3] = v128_64( 0x963877195940EABD );
|
||||
ctx->val[4] = v128_64( 0x96283EE2A88EFFE3 );
|
||||
ctx->val[5] = v128_64( 0xBE5E1E2553863992 );
|
||||
ctx->val[6] = v128_64( 0x2B0199FC2C85B8AA );
|
||||
ctx->val[7] = v128_64( 0x0EB72DDC81C52CA2 );
|
||||
}
|
||||
|
||||
int scanhash_sha512256d_2x64( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint64_t hash[8*2] __attribute__ ((aligned (64)));
|
||||
uint32_t vdata[20*2] __attribute__ ((aligned (64)));
|
||||
sha512_2x64_context ctx;
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||
uint64_t *hash_q3 = &(hash[3*2]);
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint64_t targ_q3 = ((uint64_t*)ptarget)[3];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
uint32_t n = first_nonce;
|
||||
v128u64_t *noncev = (v128u64_t*)vdata + 9;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
const v128u64_t two = v128_64( 0x0000000200000000 );
|
||||
|
||||
v128_bswap32_intrlv80_2x64( vdata, pdata );
|
||||
*noncev = v128_add32( v128_set32( 1, 0, 0, 0 ), *noncev );
|
||||
// *noncev = v128_intrlv_blend_32( v128_set32( n+1, 0, n, 0 ), *noncev );
|
||||
|
||||
do
|
||||
{
|
||||
sha512256d_2x64_init( &ctx );
|
||||
sha512_2x64_update( &ctx, vdata, 80 );
|
||||
sha512_2x64_close( &ctx, hash );
|
||||
|
||||
sha512256d_2x64_init( &ctx );
|
||||
sha512_2x64_update( &ctx, hash, 32 );
|
||||
sha512_2x64_close( &ctx, hash );
|
||||
|
||||
for ( int lane = 0; lane < 2; lane++ )
|
||||
if ( hash_q3[ lane ] <= targ_q3 )
|
||||
{
|
||||
extr_lane_2x64( lane_hash, hash, lane, 256 );
|
||||
if ( valid_hash( lane_hash, ptarget ) && !bench )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = v128_add32( *noncev, two );
|
||||
n += 2;
|
||||
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
|
||||
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#include "sph_sha2.h"
|
||||
@@ -214,6 +284,8 @@ bool register_sha512256d_algo( algo_gate_t* gate )
|
||||
gate->scanhash = (void*)&scanhash_sha512256d_8way;
|
||||
#elif defined(SHA512256D_4WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha512256d_4way;
|
||||
#elif defined(SHA512256D_2WAY)
|
||||
gate->scanhash = (void*)&scanhash_sha512256d_2x64;
|
||||
#else
|
||||
gate->scanhash = (void*)&scanhash_sha512256d;
|
||||
#endif
|
||||
|
133
algo/sha/sph_sha1.h
Normal file
133
algo/sha/sph_sha1.h
Normal file
@@ -0,0 +1,133 @@
|
||||
/* $Id: sph_sha1.h 216 2010-06-08 09:46:57Z tp $ */
|
||||
/**
|
||||
* SHA-1 interface.
|
||||
*
|
||||
* SHA-1 is described in FIPS 180-1 (now superseded by FIPS 180-2, but the
|
||||
* description of SHA-1 is still included and has not changed). FIPS
|
||||
* standards can be found at: http://csrc.nist.gov/publications/fips/
|
||||
*
|
||||
* @warning A theoretical collision attack against SHA-1, with work
|
||||
* factor 2^63, has been published. SHA-1 should not be used in new
|
||||
* protocol designs.
|
||||
*
|
||||
* ==========================(LICENSE BEGIN)============================
|
||||
*
|
||||
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* ===========================(LICENSE END)=============================
|
||||
*
|
||||
* @file sph_sha1.h
|
||||
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
|
||||
*/
|
||||
|
||||
#ifndef SPH_SHA1_H__
|
||||
#define SPH_SHA1_H__
|
||||
|
||||
#include <stddef.h>
|
||||
#include "compat/sph_types.h"
|
||||
|
||||
/**
|
||||
* Output size (in bits) for SHA-1.
|
||||
*/
|
||||
#define SPH_SIZE_sha1 160
|
||||
|
||||
/**
|
||||
* This structure is a context for SHA-1 computations: it contains the
|
||||
* intermediate values and some data from the last entered block. Once
|
||||
* a SHA-1 computation has been performed, the context can be reused for
|
||||
* another computation.
|
||||
*
|
||||
* The contents of this structure are private. A running SHA-1 computation
|
||||
* can be cloned by copying the context (e.g. with a simple
|
||||
* <code>memcpy()</code>).
|
||||
*/
|
||||
typedef struct {
|
||||
#ifndef DOXYGEN_IGNORE
|
||||
unsigned char buf[64]; /* first field, for alignment */
|
||||
sph_u32 val[5];
|
||||
#if SPH_64
|
||||
sph_u64 count;
|
||||
#else
|
||||
sph_u32 count_high, count_low;
|
||||
#endif
|
||||
#endif
|
||||
} sph_sha1_context;
|
||||
|
||||
/**
|
||||
* Initialize a SHA-1 context. This process performs no memory allocation.
|
||||
*
|
||||
* @param cc the SHA-1 context (pointer to a <code>sph_sha1_context</code>)
|
||||
*/
|
||||
void sph_sha1_init(void *cc);
|
||||
|
||||
/**
|
||||
* Process some data bytes. It is acceptable that <code>len</code> is zero
|
||||
* (in which case this function does nothing).
|
||||
*
|
||||
* @param cc the SHA-1 context
|
||||
* @param data the input data
|
||||
* @param len the input data length (in bytes)
|
||||
*/
|
||||
void sph_sha1(void *cc, const void *data, size_t len);
|
||||
|
||||
/**
|
||||
* Terminate the current SHA-1 computation and output the result into the
|
||||
* provided buffer. The destination buffer must be wide enough to
|
||||
* accomodate the result (20 bytes). The context is automatically
|
||||
* reinitialized.
|
||||
*
|
||||
* @param cc the SHA-1 context
|
||||
* @param dst the destination buffer
|
||||
*/
|
||||
void sph_sha1_close(void *cc, void *dst);
|
||||
|
||||
/**
|
||||
* Add a few additional bits (0 to 7) to the current computation, then
|
||||
* terminate it and output the result in the provided buffer, which must
|
||||
* be wide enough to accomodate the result (20 bytes). If bit number i
|
||||
* in <code>ub</code> has value 2^i, then the extra bits are those
|
||||
* numbered 7 downto 8-n (this is the big-endian convention at the byte
|
||||
* level). The context is automatically reinitialized.
|
||||
*
|
||||
* @param cc the SHA-1 context
|
||||
* @param ub the extra bits
|
||||
* @param n the number of extra bits (0 to 7)
|
||||
* @param dst the destination buffer
|
||||
*/
|
||||
void sph_sha1_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst);
|
||||
|
||||
/**
|
||||
* Apply the SHA-1 compression function on the provided data. The
|
||||
* <code>msg</code> parameter contains the 16 32-bit input blocks,
|
||||
* as numerical values (hence after the big-endian decoding). The
|
||||
* <code>val</code> parameter contains the 5 32-bit input blocks for
|
||||
* the compression function; the output is written in place in this
|
||||
* array.
|
||||
*
|
||||
* @param msg the message block (16 values)
|
||||
* @param val the function 160-bit input and output
|
||||
*/
|
||||
void sph_sha1_comp(const sph_u32 msg[16], sph_u32 val[5]);
|
||||
|
||||
void sph_sha1_full( void *hash, const void *msg, size_t len );
|
||||
|
||||
#endif
|
@@ -60,7 +60,6 @@ static const sph_u32 IV512[] = {
|
||||
static void
|
||||
c512( sph_shavite_big_context *sc, const void *msg )
|
||||
{
|
||||
const v128_t zero = v128_zero;
|
||||
v128_t p0, p1, p2, p3, x;
|
||||
v128_t k00, k01, k02, k03, k10, k11, k12, k13;
|
||||
v128_t *m = (v128_t*)msg;
|
||||
@@ -76,39 +75,39 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
|
||||
k00 = m[0];
|
||||
x = v128_xor( p1, k00 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
k01 = m[1];
|
||||
x = v128_xor( x, k01 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k02 = m[2];
|
||||
x = v128_xor( x, k02 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k03 = m[3];
|
||||
x = v128_xor( x, k03 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p0 = v128_xor( p0, x );
|
||||
|
||||
k10 = m[4];
|
||||
x = v128_xor( p3, k10 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k11 = m[5];
|
||||
x = v128_xor( x, k11 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k12 = m[6];
|
||||
x = v128_xor( x, k12 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k13 = m[7];
|
||||
x = v128_xor( x, k13 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p2 = v128_xor( p2, x );
|
||||
|
||||
for ( r = 0; r < 3; r ++ )
|
||||
{
|
||||
// round 1, 5, 9
|
||||
k00 = v128_shuflr32( v128_aesenc( k00, zero ) );
|
||||
k00 = v128_shuflr32( v128_aesenc_nokey( k00 ) );
|
||||
k00 = v128_xor( k00, k13 );
|
||||
|
||||
if ( r == 0 )
|
||||
@@ -116,8 +115,8 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
~sc->count3, sc->count2, sc->count1, sc->count0 ) );
|
||||
|
||||
x = v128_xor( p0, k00 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k01 = v128_shuflr32( v128_aesenc( k01, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k01 = v128_shuflr32( v128_aesenc_nokey( k01 ) );
|
||||
k01 = v128_xor( k01, k00 );
|
||||
|
||||
if ( r == 1 )
|
||||
@@ -125,32 +124,32 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
~sc->count0, sc->count1, sc->count2, sc->count3 ) );
|
||||
|
||||
x = v128_xor( x, k01 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k02 = v128_shuflr32( v128_aesenc( k02, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k02 = v128_shuflr32( v128_aesenc_nokey( k02 ) );
|
||||
k02 = v128_xor( k02, k01 );
|
||||
x = v128_xor( x, k02 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k03 = v128_shuflr32( v128_aesenc( k03, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k03 = v128_shuflr32( v128_aesenc_nokey( k03 ) );
|
||||
k03 = v128_xor( k03, k02 );
|
||||
x = v128_xor( x, k03 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p3 = v128_xor( p3, x );
|
||||
|
||||
k10 = v128_shuflr32( v128_aesenc( k10, zero ) );
|
||||
k10 = v128_shuflr32( v128_aesenc_nokey( k10 ) );
|
||||
k10 = v128_xor( k10, k03 );
|
||||
|
||||
x = v128_xor( p2, k10 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k11 = v128_shuflr32( v128_aesenc( k11, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k11 = v128_shuflr32( v128_aesenc_nokey( k11 ) );
|
||||
k11 = v128_xor( k11, k10 );
|
||||
x = v128_xor( x, k11 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k12 = v128_shuflr32( v128_aesenc( k12, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k12 = v128_shuflr32( v128_aesenc_nokey( k12 ) );
|
||||
k12 = v128_xor( k12, k11 );
|
||||
x = v128_xor( x, k12 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k13 = v128_shuflr32( v128_aesenc( k13, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k13 = v128_shuflr32( v128_aesenc_nokey( k13 ) );
|
||||
k13 = v128_xor( k13, k12 );
|
||||
|
||||
if ( r == 2 )
|
||||
@@ -158,78 +157,78 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
~sc->count1, sc->count0, sc->count3, sc->count2 ) );
|
||||
|
||||
x = v128_xor( x, k13 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
p1 = v128_xor( p1, x );
|
||||
|
||||
// round 2, 6, 10
|
||||
|
||||
k00 = v128_xor( k00, v128_alignr8( k13, k12, 4 ) );
|
||||
x = v128_xor( p3, k00 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k01 = v128_xor( k01, v128_alignr8( k00, k13, 4 ) );
|
||||
x = v128_xor( x, k01 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k02 = v128_xor( k02, v128_alignr8( k01, k00, 4 ) );
|
||||
x = v128_xor( x, k02 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k03 = v128_xor( k03, v128_alignr8( k02, k01, 4 ) );
|
||||
x = v128_xor( x, k03 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p2 = v128_xor( p2, x );
|
||||
|
||||
k10 = v128_xor( k10, v128_alignr8( k03, k02, 4 ) );
|
||||
x = v128_xor( p1, k10 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k11 = v128_xor( k11, v128_alignr8( k10, k03, 4 ) );
|
||||
x = v128_xor( x, k11 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k12 = v128_xor( k12, v128_alignr8( k11, k10, 4 ) );
|
||||
x = v128_xor( x, k12 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k13 = v128_xor( k13, v128_alignr8( k12, k11, 4 ) );
|
||||
x = v128_xor( x, k13 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p0 = v128_xor( p0, x );
|
||||
|
||||
// round 3, 7, 11
|
||||
|
||||
k00 = v128_shuflr32( v128_aesenc( k00, zero ) );
|
||||
k00 = v128_shuflr32( v128_aesenc_nokey( k00 ) );
|
||||
k00 = v128_xor( k00, k13 );
|
||||
x = v128_xor( p2, k00 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k01 = v128_shuflr32( v128_aesenc( k01, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k01 = v128_shuflr32( v128_aesenc_nokey( k01 ) );
|
||||
k01 = v128_xor( k01, k00 );
|
||||
x = v128_xor( x, k01 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k02 = v128_shuflr32( v128_aesenc( k02, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k02 = v128_shuflr32( v128_aesenc_nokey( k02 ) );
|
||||
k02 = v128_xor( k02, k01 );
|
||||
x = v128_xor( x, k02 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k03 = v128_shuflr32( v128_aesenc( k03, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k03 = v128_shuflr32( v128_aesenc_nokey( k03 ) );
|
||||
k03 = v128_xor( k03, k02 );
|
||||
x = v128_xor( x, k03 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p1 = v128_xor( p1, x );
|
||||
|
||||
k10 = v128_shuflr32( v128_aesenc( k10, zero ) );
|
||||
k10 = v128_shuflr32( v128_aesenc_nokey( k10 ) );
|
||||
k10 = v128_xor( k10, k03 );
|
||||
x = v128_xor( p0, k10 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k11 = v128_shuflr32( v128_aesenc( k11, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k11 = v128_shuflr32( v128_aesenc_nokey( k11 ) );
|
||||
k11 = v128_xor( k11, k10 );
|
||||
x = v128_xor( x, k11 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k12 = v128_shuflr32( v128_aesenc( k12, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k12 = v128_shuflr32( v128_aesenc_nokey( k12 ) );
|
||||
k12 = v128_xor( k12, k11 );
|
||||
x = v128_xor( x, k12 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k13 = v128_shuflr32( v128_aesenc( k13, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k13 = v128_shuflr32( v128_aesenc_nokey( k13 ) );
|
||||
k13 = v128_xor( k13, k12 );
|
||||
x = v128_xor( x, k13 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p3 = v128_xor( p3, x );
|
||||
|
||||
@@ -237,73 +236,73 @@ c512( sph_shavite_big_context *sc, const void *msg )
|
||||
|
||||
k00 = v128_xor( k00, v128_alignr8( k13, k12, 4 ) );
|
||||
x = v128_xor( p1, k00 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k01 = v128_xor( k01, v128_alignr8( k00, k13, 4 ) );
|
||||
x = v128_xor( x, k01 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k02 = v128_xor( k02, v128_alignr8( k01, k00, 4 ) );
|
||||
x = v128_xor( x, k02 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k03 = v128_xor( k03, v128_alignr8( k02, k01, 4 ) );
|
||||
x = v128_xor( x, k03 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p0 = v128_xor( p0, x );
|
||||
|
||||
k10 = v128_xor( k10, v128_alignr8( k03, k02, 4 ) );
|
||||
x = v128_xor( p3, k10 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k11 = v128_xor( k11, v128_alignr8( k10, k03, 4 ) );
|
||||
x = v128_xor( x, k11 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k12 = v128_xor( k12, v128_alignr8( k11, k10, 4 ) );
|
||||
x = v128_xor( x, k12 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k13 = v128_xor( k13, v128_alignr8( k12, k11, 4 ) );
|
||||
x = v128_xor( x, k13 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p2 = v128_xor( p2, x );
|
||||
}
|
||||
|
||||
// round 13
|
||||
|
||||
k00 = v128_shuflr32( v128_aesenc( k00, zero ) );
|
||||
k00 = v128_shuflr32( v128_aesenc_nokey( k00 ) );
|
||||
k00 = v128_xor( k00, k13 );
|
||||
x = v128_xor( p0, k00 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k01 = v128_shuflr32( v128_aesenc( k01, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k01 = v128_shuflr32( v128_aesenc_nokey( k01 ) );
|
||||
k01 = v128_xor( k01, k00 );
|
||||
x = v128_xor( x, k01 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k02 = v128_shuflr32( v128_aesenc( k02, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k02 = v128_shuflr32( v128_aesenc_nokey( k02 ) );
|
||||
k02 = v128_xor( k02, k01 );
|
||||
x = v128_xor( x, k02 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k03 = v128_shuflr32( v128_aesenc( k03, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k03 = v128_shuflr32( v128_aesenc_nokey( k03 ) );
|
||||
k03 = v128_xor( k03, k02 );
|
||||
x = v128_xor( x, k03 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p3 = v128_xor( p3, x );
|
||||
|
||||
k10 = v128_shuflr32( v128_aesenc( k10, zero ) );
|
||||
k10 = v128_shuflr32( v128_aesenc_nokey( k10 ) );
|
||||
k10 = v128_xor( k10, k03 );
|
||||
x = v128_xor( p2, k10 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k11 = v128_shuflr32( v128_aesenc( k11, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k11 = v128_shuflr32( v128_aesenc_nokey( k11 ) );
|
||||
k11 = v128_xor( k11, k10 );
|
||||
x = v128_xor( x, k11 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k12 = v128_shuflr32( v128_aesenc( k12, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k12 = v128_shuflr32( v128_aesenc_nokey( k12 ) );
|
||||
k12 = v128_xor( k12, v128_xor( k11, v128_set32(
|
||||
~sc->count2, sc->count3, sc->count0, sc->count1 ) ) );
|
||||
x = v128_xor( x, k12 );
|
||||
x = v128_aesenc( x, zero );
|
||||
k13 = v128_shuflr32( v128_aesenc( k13, zero ) );
|
||||
x = v128_aesenc_nokey( x );
|
||||
k13 = v128_shuflr32( v128_aesenc_nokey( k13 ) );
|
||||
k13 = v128_xor( k13, k12 );
|
||||
x = v128_xor( x, k13 );
|
||||
x = v128_aesenc( x, zero );
|
||||
x = v128_aesenc_nokey( x );
|
||||
|
||||
p1 = v128_xor( p1, x );
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -2,23 +2,68 @@
|
||||
#define SIMD_HASH_2WAY_H__ 1
|
||||
|
||||
#include "simd-compat.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__SSE2__) || defined (__ARM_NEON)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint32_t A[32];
|
||||
uint8_t buffer[128];
|
||||
uint64_t count;
|
||||
unsigned int hashbitlen;
|
||||
unsigned int blocksize;
|
||||
unsigned int n_feistels;
|
||||
} simd512_context __attribute__((aligned(64)));
|
||||
|
||||
// datalen is bytes
|
||||
int simd512_ctx( simd512_context *ctx, void *hashval, const void *data,
|
||||
int datalen );
|
||||
|
||||
int simd512( void *hashval, const void *data, int datalen );
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
#include "simd-utils.h"
|
||||
typedef struct
|
||||
{
|
||||
uint32_t A[ 32*2 ];
|
||||
uint8_t buffer[ 128*2 ];
|
||||
uint64_t count;
|
||||
unsigned int hashbitlen;
|
||||
unsigned int blocksize;
|
||||
unsigned int n_feistels;
|
||||
} simd512_2way_context __attribute__((aligned(128)));
|
||||
#define simd_2way_context simd512_2way_context
|
||||
|
||||
// databitlen is bits
|
||||
int simd_2way_init( simd_2way_context *state, int hashbitlen );
|
||||
int simd_2way_update( simd_2way_context *state, const void *data,
|
||||
int databitlen );
|
||||
int simd_2way_close( simd_2way_context *state, void *hashval );
|
||||
int simd_2way_update_close( simd_2way_context *state, void *hashval,
|
||||
const void *data, int databitlen );
|
||||
int simd512_2way_ctx( simd512_2way_context *state, void *hashval,
|
||||
const void *data, int datalen );
|
||||
#define simd512_2way_full simd512_2way_ctx
|
||||
|
||||
int simd512_2way( void *hashval, const void *data, int datalen );
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct {
|
||||
typedef struct
|
||||
{
|
||||
uint32_t A[ 32*4 ];
|
||||
uint8_t buffer[ 128*4 ];
|
||||
uint64_t count;
|
||||
unsigned int hashbitlen;
|
||||
unsigned int blocksize;
|
||||
unsigned int n_feistels;
|
||||
|
||||
} simd_4way_context __attribute__((aligned(128)));
|
||||
} simd512_4way_context __attribute__((aligned(128)));
|
||||
#define simd_4way_context simd512_4way_context
|
||||
|
||||
int simd_4way_init( simd_4way_context *state, int hashbitlen );
|
||||
int simd_4way_update( simd_4way_context *state, const void *data,
|
||||
@@ -26,29 +71,12 @@ int simd_4way_update( simd_4way_context *state, const void *data,
|
||||
int simd_4way_close( simd_4way_context *state, void *hashval );
|
||||
int simd_4way_update_close( simd_4way_context *state, void *hashval,
|
||||
const void *data, int databitlen );
|
||||
int simd512_4way_full( simd_4way_context *state, void *hashval,
|
||||
int simd512_4way_ctx( simd_4way_context *state, void *hashval,
|
||||
const void *data, int datalen );
|
||||
#define simd512_4way_full simd512_4way_ctx
|
||||
|
||||
int simd512_4way( void *hashval, const void *data, int datalen );
|
||||
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
uint32_t A[ 32*2 ];
|
||||
uint8_t buffer[ 128*2 ];
|
||||
uint64_t count;
|
||||
unsigned int hashbitlen;
|
||||
unsigned int blocksize;
|
||||
unsigned int n_feistels;
|
||||
|
||||
} simd_2way_context __attribute__((aligned(128)));
|
||||
|
||||
int simd_2way_init( simd_2way_context *state, int hashbitlen );
|
||||
int simd_2way_update( simd_2way_context *state, const void *data,
|
||||
int databitlen );
|
||||
int simd_2way_close( simd_2way_context *state, void *hashval );
|
||||
int simd_2way_update_close( simd_2way_context *state, void *hashval,
|
||||
const void *data, int databitlen );
|
||||
int simd512_2way_full( simd_2way_context *state, void *hashval,
|
||||
const void *data, int datalen );
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -14,20 +14,19 @@
|
||||
#include "algo/cubehash/cubehash_sse2.h"
|
||||
#if defined(__aarch64__)
|
||||
#include "algo/simd/sph_simd.h"
|
||||
#include "algo/luffa/sph_luffa.h"
|
||||
#endif
|
||||
#include "algo/hamsi/sph_hamsi.h"
|
||||
#include "algo/shabal/sph_shabal.h"
|
||||
#include "algo/whirlpool/sph_whirlpool.h"
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
#include "algo/yespower/yespower.h"
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
//#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
#include "algo/echo/aes_ni/hash_api.h"
|
||||
#include "algo/groestl/aes_ni/hash-groestl.h"
|
||||
#else
|
||||
//#else
|
||||
#include "algo/echo/sph_echo.h"
|
||||
#include "algo/groestl/sph_groestl.h"
|
||||
#endif
|
||||
//#endif
|
||||
#if defined(__AES__)
|
||||
#include "algo/fugue/fugue-aesni.h"
|
||||
#else
|
||||
@@ -48,7 +47,7 @@ typedef struct TortureGarden TortureGarden;
|
||||
// Graph of hash algos plus SPH contexts
|
||||
struct TortureGarden
|
||||
{
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
#if defined(__AES__) // || defined(__ARM_FEATURE_AES)
|
||||
hashState_echo echo;
|
||||
hashState_groestl groestl;
|
||||
#else
|
||||
@@ -67,11 +66,7 @@ struct TortureGarden
|
||||
sph_keccak512_context keccak;
|
||||
cubehashParam cube;
|
||||
shavite512_context shavite;
|
||||
#if defined(__aarch64__)
|
||||
sph_luffa512_context luffa;
|
||||
#else
|
||||
hashState_luffa luffa;
|
||||
#endif
|
||||
#if defined(__aarch64__)
|
||||
sph_simd512_context simd;
|
||||
#else
|
||||
@@ -112,7 +107,7 @@ static int get_hash( void *output, const void *input, TortureGarden *garden,
|
||||
cubehashUpdateDigest( &garden->cube, hash, input, 64 );
|
||||
break;
|
||||
case 3:
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
#if defined(__AES__) // || defined(__ARM_FEATURE_AES)
|
||||
echo_full( &garden->echo, hash, 512, input, 64 );
|
||||
#else
|
||||
sph_echo512_init( &garden->echo );
|
||||
@@ -128,7 +123,7 @@ static int get_hash( void *output, const void *input, TortureGarden *garden,
|
||||
#endif
|
||||
break;
|
||||
case 5:
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
#if defined(__AES__) // || defined(__ARM_FEATURE_AES)
|
||||
groestl512_full( &garden->groestl, hash, input, 512 );
|
||||
#else
|
||||
sph_groestl512_init( &garden->groestl) ;
|
||||
@@ -157,13 +152,7 @@ static int get_hash( void *output, const void *input, TortureGarden *garden,
|
||||
sph_keccak512_close( &garden->keccak, hash );
|
||||
break;
|
||||
case 10:
|
||||
#if defined(__aarch64__)
|
||||
sph_luffa512_init( &garden->luffa );
|
||||
sph_luffa512( &garden->luffa, input, 64 );
|
||||
sph_luffa512_close( &garden->luffa, hash );
|
||||
#else
|
||||
luffa_full( &garden->luffa, hash, 512, input, 64 );
|
||||
#endif
|
||||
break;
|
||||
case 11:
|
||||
sph_shabal512_init( &garden->shabal );
|
||||
|
@@ -929,43 +929,31 @@ int scanhash_x17_4x64( struct work *work, uint32_t max_nonce,
|
||||
#elif defined(X17_2X64)
|
||||
|
||||
// Need sph in some cases
|
||||
//#include "algo/blake/sph_blake.h"
|
||||
#include "algo/bmw/sph_bmw.h"
|
||||
#include "algo/jh/sph_jh.h"
|
||||
//#include "algo/keccak/sph_keccak.h"
|
||||
#include "algo/skein/sph_skein.h"
|
||||
#include "algo/luffa/sph_luffa.h"
|
||||
#include "algo/luffa/luffa_for_sse2.h"
|
||||
//#include "algo/cubehash/sph_cubehash.h"
|
||||
#include "algo/cubehash/cubehash_sse2.h"
|
||||
#include "algo/shavite/sph_shavite.h"
|
||||
#include "algo/simd/sph_simd.h"
|
||||
#include "algo/simd/nist.h"
|
||||
#include "algo/hamsi/sph_hamsi.h"
|
||||
#include "algo/shabal/sph_shabal.h"
|
||||
#include "algo/whirlpool/sph_whirlpool.h"
|
||||
#include "algo/haval/sph-haval.h"
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
#if !( defined(__AES__) || defined(__ARM_FEATURE_AES) )
|
||||
//#if !( defined(__AES__) || defined(__ARM_FEATURE_AES) )
|
||||
#include "algo/groestl/sph_groestl.h"
|
||||
#include "algo/echo/sph_echo.h"
|
||||
#endif
|
||||
//#endif
|
||||
#include "algo/fugue/sph_fugue.h"
|
||||
|
||||
union _x17_context_overlay
|
||||
{
|
||||
// blake512_2x64_context blake;
|
||||
blake512_context blake;
|
||||
#if defined(__x86_64__)
|
||||
blake512_2x64_context blake;
|
||||
bmw512_2x64_context bmw;
|
||||
#else
|
||||
sph_bmw512_context bmw;
|
||||
#endif
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
#if defined(__AES__) // || defined(__ARM_FEATURE_AES)
|
||||
hashState_groestl groestl;
|
||||
hashState_echo echo;
|
||||
#else
|
||||
sph_groestl512_context groestl;
|
||||
#endif
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
hashState_echo echo;
|
||||
#else
|
||||
sph_echo512_context echo;
|
||||
#endif
|
||||
#if defined(__AES__)
|
||||
@@ -973,26 +961,14 @@ union _x17_context_overlay
|
||||
#else
|
||||
sph_fugue512_context fugue;
|
||||
#endif
|
||||
#if defined(__x86_64__)
|
||||
jh512_2x64_context jh;
|
||||
#else
|
||||
sph_jh512_context jh;
|
||||
#endif
|
||||
keccak512_2x64_context keccak;
|
||||
#if defined(__x86_64__)
|
||||
skein512_2x64_context skein;
|
||||
#else
|
||||
sph_skein512_context skein;
|
||||
#endif
|
||||
#if defined(__x86_64__)
|
||||
hashState_luffa luffa;
|
||||
#else
|
||||
sph_luffa512_context luffa;
|
||||
#endif
|
||||
cubehashParam cube;
|
||||
sph_shavite512_context shavite;
|
||||
#if defined(__x86_64__)
|
||||
hashState_sd simd;
|
||||
simd512_context simd;
|
||||
#else
|
||||
sph_simd512_context simd;
|
||||
#endif
|
||||
@@ -1003,11 +979,7 @@ union _x17_context_overlay
|
||||
#endif
|
||||
sph_shabal512_context shabal;
|
||||
sph_whirlpool_context whirlpool;
|
||||
#if defined(__x86_64__)
|
||||
sha512_2x64_context sha;
|
||||
#else
|
||||
sph_sha512_context sha;
|
||||
#endif
|
||||
sph_haval256_5_context haval;
|
||||
};
|
||||
typedef union _x17_context_overlay x17_context_overlay;
|
||||
@@ -1019,30 +991,16 @@ int x17_2x64_hash( void *output, const void *input, int thr_id )
|
||||
uint8_t hash1[64] __attribute__((aligned(64)));
|
||||
x17_context_overlay ctx;
|
||||
|
||||
// intrlv_2x64( vhash, input, input+80, 640 );
|
||||
// blake512_2x64_full( &ctx.blake, vhash, vhash, 80 );
|
||||
// dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
|
||||
blake512_full( &ctx.blake, hash0, input, 80 );
|
||||
blake512_full( &ctx.blake, hash1, input+80, 80 );
|
||||
intrlv_2x64( vhash, input, input+80, 640 );
|
||||
|
||||
|
||||
#if defined(__x86_64__)
|
||||
intrlv_2x64( vhash, hash0, hash1, 512 );
|
||||
blake512_2x64_full( &ctx.blake, vhash, vhash, 80 );
|
||||
bmw512_2x64_init( &ctx.bmw );
|
||||
bmw512_2x64_update( &ctx.bmw, vhash, 64 );
|
||||
bmw512_2x64_close( &ctx.bmw, vhash );
|
||||
dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
#else
|
||||
sph_bmw512_init( &ctx.bmw );
|
||||
sph_bmw512( &ctx.bmw, hash0, 64 );
|
||||
sph_bmw512_close( &ctx.bmw, hash0 );
|
||||
sph_bmw512_init( &ctx.bmw );
|
||||
sph_bmw512( &ctx.bmw, hash1, 64 );
|
||||
sph_bmw512_close( &ctx.bmw, hash1 );
|
||||
#endif
|
||||
|
||||
#if defined(__AES__) || defined(__ARM_FEATURE_AES)
|
||||
dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
|
||||
#if defined(__AES__) // || defined(__ARM_FEATURE_AES)
|
||||
groestl512_full( &ctx.groestl, hash0, hash0, 512 );
|
||||
groestl512_full( &ctx.groestl, hash1, hash1, 512 );
|
||||
#else
|
||||
@@ -1054,47 +1012,16 @@ int x17_2x64_hash( void *output, const void *input, int thr_id )
|
||||
sph_groestl512_close( &ctx.groestl, hash1 );
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__)
|
||||
intrlv_2x64( vhash, hash0, hash1, 512 );
|
||||
|
||||
skein512_2x64_full( &ctx.skein, vhash, vhash, 64 );
|
||||
dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
#else
|
||||
sph_skein512_init( &ctx.skein );
|
||||
sph_skein512( &ctx.skein, hash0, 64 );
|
||||
sph_skein512_close( &ctx.skein, hash0);
|
||||
sph_skein512_init( &ctx.skein );
|
||||
sph_skein512( &ctx.skein, hash1, 64 );
|
||||
sph_skein512_close( &ctx.skein, hash1 );
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__)
|
||||
intrlv_2x64( vhash, hash0, hash1, 512);
|
||||
jh512_2x64_ctx( &ctx.jh, vhash, vhash, 64 );
|
||||
dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
#else
|
||||
sph_jh512_init( &ctx.jh );
|
||||
sph_jh512( &ctx.jh, hash0, 64 );
|
||||
sph_jh512_close( &ctx.jh, hash0 );
|
||||
sph_jh512_init( &ctx.jh);
|
||||
sph_jh512( &ctx.jh, hash1, 64 );
|
||||
sph_jh512_close( &ctx.jh, hash1 );
|
||||
#endif
|
||||
|
||||
intrlv_2x64( vhash, hash0, hash1, 512);
|
||||
keccak512_2x64_ctx( &ctx.keccak, vhash, vhash, 64 );
|
||||
|
||||
dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
|
||||
#if defined(__x86_64__)
|
||||
luffa_full( &ctx.luffa, hash0, 512, hash0, 64 );
|
||||
luffa_full( &ctx.luffa, hash1, 512, hash1, 64 );
|
||||
#else
|
||||
sph_luffa512_init( &ctx.luffa );
|
||||
sph_luffa512( &ctx.luffa, hash0, 64 );
|
||||
sph_luffa512_close( &ctx.luffa, hash0 );
|
||||
sph_luffa512_init( &ctx.luffa );
|
||||
sph_luffa512( &ctx.luffa, hash1, 64 );
|
||||
sph_luffa512_close( &ctx.luffa, hash1 );
|
||||
#endif
|
||||
|
||||
cubehash_full( &ctx.cube, hash0, 512, hash0, 64 );
|
||||
cubehash_full( &ctx.cube, hash1, 512, hash1, 64 );
|
||||
@@ -1107,8 +1034,8 @@ int x17_2x64_hash( void *output, const void *input, int thr_id )
|
||||
sph_shavite512_close( &ctx.shavite, hash1 );
|
||||
|
||||
#if defined(__x86_64__)
|
||||
simd_full( &ctx.simd, hash0, hash0, 512 );
|
||||
simd_full( &ctx.simd, hash1, hash1, 512 );
|
||||
simd512_ctx( &ctx.simd, hash0, hash0, 64 );
|
||||
simd512_ctx( &ctx.simd, hash1, hash1, 64 );
|
||||
#else
|
||||
sph_simd512_init( &ctx.simd );
|
||||
sph_simd512( &ctx.simd, hash0, 64 );
|
||||
@@ -1130,7 +1057,7 @@ int x17_2x64_hash( void *output, const void *input, int thr_id )
|
||||
sph_echo512_close( &ctx.echo, hash1 );
|
||||
#endif
|
||||
|
||||
#if defined(__SSE4_2__) // || defined(__ARM_NEON)
|
||||
#if defined(__SSE4_2__) // || defined(__ARM_NEON)
|
||||
intrlv_2x64( vhash, hash0, hash1, 512 );
|
||||
hamsi512_2x64_ctx( &ctx.hamsi, vhash, vhash, 64 );
|
||||
dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
@@ -1165,18 +1092,9 @@ int x17_2x64_hash( void *output, const void *input, int thr_id )
|
||||
sph_whirlpool( &ctx.whirlpool, hash1, 64 );
|
||||
sph_whirlpool_close( &ctx.whirlpool, hash1 );
|
||||
|
||||
#if defined(__x86_64__)
|
||||
intrlv_2x64( vhash, hash0, hash1, 512 );
|
||||
sha512_2x64_ctx( &ctx.sha, vhash, vhash, 64 );
|
||||
dintrlv_2x64( hash0, hash1, vhash, 512 );
|
||||
#else
|
||||
sph_sha512_init( &ctx.sha );
|
||||
sph_sha512( &ctx.sha, hash0, 64 );
|
||||
sph_sha512_close( &ctx.sha, hash0 );
|
||||
sph_sha512_init( &ctx.sha );
|
||||
sph_sha512( &ctx.sha, hash1, 64 );
|
||||
sph_sha512_close( &ctx.sha, hash1 );
|
||||
#endif
|
||||
|
||||
sph_haval256_5_init( &ctx.haval );
|
||||
sph_haval256_5( &ctx.haval, hash0, 64 );
|
||||
|
@@ -210,7 +210,7 @@ int scanhash_x22i( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
if ( x22i_hash( hash64, edata, thr_id ) );
|
||||
if ( x22i_hash( hash64, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
|
@@ -245,7 +245,7 @@ int scanhash_x25x( struct work *work, uint32_t max_nonce,
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
if ( x25x_hash( hash64, edata, thr_id ) );
|
||||
if ( x25x_hash( hash64, edata, thr_id ) )
|
||||
if ( unlikely( valid_hash( hash64, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
|
@@ -53,7 +53,6 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "algo/sha/hmac-sha256-hash.h"
|
||||
#include "algo/sha/hmac-sha256-hash-4way.h"
|
||||
#include "yespower.h"
|
||||
#include "yespower-platform.c"
|
||||
|
||||
|
13
api.c
13
api.c
@@ -8,6 +8,7 @@
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version. See COPYING for more details.
|
||||
*/
|
||||
|
||||
#define APIVERSION "1.0"
|
||||
|
||||
#ifdef WIN32
|
||||
@@ -27,9 +28,9 @@
|
||||
#include <math.h>
|
||||
#include <stdarg.h>
|
||||
#include <assert.h>
|
||||
#include <openssl/sha.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include "algo/sha/sha1-hash.h"
|
||||
|
||||
#include "miner.h"
|
||||
#include "sysinfos.c"
|
||||
@@ -208,7 +209,7 @@ static char *remote_seturl(char *params)
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
/*-hash*
|
||||
* Ask the miner to quit
|
||||
*/
|
||||
static char *remote_quit(char *params)
|
||||
@@ -336,7 +337,6 @@ static int websocket_handshake(SOCKETTYPE c, char *result, char *clientkey)
|
||||
char inpkey[128] = { 0 };
|
||||
char seckey[64];
|
||||
uchar sha1[20];
|
||||
// SHA_CTX ctx;
|
||||
|
||||
if (opt_protocol)
|
||||
applog(LOG_DEBUG, "clientkey: %s", clientkey);
|
||||
@@ -346,11 +346,7 @@ static int websocket_handshake(SOCKETTYPE c, char *result, char *clientkey)
|
||||
// SHA-1 test from rfc, returns in base64 "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="
|
||||
//sprintf(inpkey, "dGhlIHNhbXBsZSBub25jZQ==258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
|
||||
|
||||
SHA1( inpkey, strlen(inpkey), sha1 );
|
||||
// Deprecated in openssl-3
|
||||
// SHA1_Init(&ctx);
|
||||
// SHA1_Update(&ctx, inpkey, strlen(inpkey));
|
||||
// SHA1_Final(sha1, &ctx);
|
||||
sph_sha1_full( sha1, inpkey, strlen(inpkey) );
|
||||
|
||||
base64_encode(sha1, 20, seckey, sizeof(seckey));
|
||||
|
||||
@@ -733,3 +729,4 @@ void *api_thread(void *userdata)
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@@ -40,4 +40,3 @@ rm -f config.status
|
||||
CFLAGS="-O3 -march=native -Wall -flax-vector-conversions" ./configure --with-curl
|
||||
make -j $nproc
|
||||
strip -s cpuminer
|
||||
mv cpuminer cpuminer
|
||||
|
1186
asm/scrypt-arm.S
1186
asm/scrypt-arm.S
File diff suppressed because it is too large
Load Diff
2907
asm/scrypt-x64.S
2907
asm/scrypt-x64.S
File diff suppressed because it is too large
Load Diff
830
asm/scrypt-x86.S
830
asm/scrypt-x86.S
@@ -1,830 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011-2012, 2014 pooler@litecoinpool.org
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <cpuminer-config.h>
|
||||
|
||||
#if defined(__linux__) && defined(__ELF__)
|
||||
.section .note.GNU-stack,"",%progbits
|
||||
#endif
|
||||
|
||||
#if defined(USE_ASM) && defined(__i386__)
|
||||
|
||||
.macro scrypt_shuffle src, so, dest, do
|
||||
movl \so+60(\src), %eax
|
||||
movl \so+44(\src), %ebx
|
||||
movl \so+28(\src), %ecx
|
||||
movl \so+12(\src), %edx
|
||||
movl %eax, \do+12(\dest)
|
||||
movl %ebx, \do+28(\dest)
|
||||
movl %ecx, \do+44(\dest)
|
||||
movl %edx, \do+60(\dest)
|
||||
movl \so+40(\src), %eax
|
||||
movl \so+8(\src), %ebx
|
||||
movl \so+48(\src), %ecx
|
||||
movl \so+16(\src), %edx
|
||||
movl %eax, \do+8(\dest)
|
||||
movl %ebx, \do+40(\dest)
|
||||
movl %ecx, \do+16(\dest)
|
||||
movl %edx, \do+48(\dest)
|
||||
movl \so+20(\src), %eax
|
||||
movl \so+4(\src), %ebx
|
||||
movl \so+52(\src), %ecx
|
||||
movl \so+36(\src), %edx
|
||||
movl %eax, \do+4(\dest)
|
||||
movl %ebx, \do+20(\dest)
|
||||
movl %ecx, \do+36(\dest)
|
||||
movl %edx, \do+52(\dest)
|
||||
movl \so+0(\src), %eax
|
||||
movl \so+24(\src), %ebx
|
||||
movl \so+32(\src), %ecx
|
||||
movl \so+56(\src), %edx
|
||||
movl %eax, \do+0(\dest)
|
||||
movl %ebx, \do+24(\dest)
|
||||
movl %ecx, \do+32(\dest)
|
||||
movl %edx, \do+56(\dest)
|
||||
.endm
|
||||
|
||||
.macro salsa8_core_gen_quadround
|
||||
movl 52(%esp), %ecx
|
||||
movl 4(%esp), %edx
|
||||
movl 20(%esp), %ebx
|
||||
movl 8(%esp), %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 4(%esp)
|
||||
movl 36(%esp), %edi
|
||||
leal (%edx, %ebx), %ebp
|
||||
roll $9, %ebp
|
||||
xorl %ebp, %edi
|
||||
movl 24(%esp), %ebp
|
||||
movl %edi, 8(%esp)
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 40(%esp), %ebx
|
||||
movl %ecx, 20(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 24(%esp)
|
||||
movl 56(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 36(%esp)
|
||||
movl 28(%esp), %ecx
|
||||
movl %edx, 28(%esp)
|
||||
movl 44(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 60(%esp), %ebx
|
||||
movl %esi, 40(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 44(%esp)
|
||||
movl 12(%esp), %edi
|
||||
xorl %esi, %ebp
|
||||
leal (%edx, %ebx), %esi
|
||||
roll $9, %esi
|
||||
xorl %esi, %edi
|
||||
movl %edi, 12(%esp)
|
||||
movl 48(%esp), %esi
|
||||
movl %ebp, 48(%esp)
|
||||
movl 64(%esp), %ebp
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl 32(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 32(%esp)
|
||||
movl %ebx, %ecx
|
||||
movl %edx, 52(%esp)
|
||||
movl 28(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 40(%esp), %ebx
|
||||
movl %esi, 28(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 40(%esp)
|
||||
movl 12(%esp), %edi
|
||||
xorl %esi, %ebp
|
||||
leal (%edx, %ebx), %esi
|
||||
roll $9, %esi
|
||||
xorl %esi, %edi
|
||||
movl %edi, 12(%esp)
|
||||
movl 4(%esp), %esi
|
||||
movl %ebp, 4(%esp)
|
||||
movl 48(%esp), %ebp
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 48(%esp)
|
||||
movl 32(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 32(%esp)
|
||||
movl 24(%esp), %ecx
|
||||
movl %edx, 24(%esp)
|
||||
movl 52(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 28(%esp), %ebx
|
||||
movl %esi, 28(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 52(%esp)
|
||||
movl 8(%esp), %edi
|
||||
xorl %esi, %ebp
|
||||
leal (%edx, %ebx), %esi
|
||||
roll $9, %esi
|
||||
xorl %esi, %edi
|
||||
movl %edi, 8(%esp)
|
||||
movl 44(%esp), %esi
|
||||
movl %ebp, 44(%esp)
|
||||
movl 4(%esp), %ebp
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 20(%esp), %ebx
|
||||
movl %ecx, 4(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl 36(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 20(%esp)
|
||||
movl %ebx, %ecx
|
||||
movl %edx, 36(%esp)
|
||||
movl 24(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 28(%esp), %ebx
|
||||
movl %esi, 24(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 28(%esp)
|
||||
xorl %esi, %ebp
|
||||
movl 8(%esp), %esi
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl 40(%esp), %edi
|
||||
movl %ebp, 8(%esp)
|
||||
movl 44(%esp), %ebp
|
||||
movl %esi, 40(%esp)
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 4(%esp), %ebx
|
||||
movl %ecx, 44(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 4(%esp)
|
||||
movl 20(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 56(%esp)
|
||||
movl 48(%esp), %ecx
|
||||
movl %edx, 20(%esp)
|
||||
movl 36(%esp), %edx
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl 24(%esp), %ebx
|
||||
movl %edi, 24(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
leal (%ecx, %edx), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 60(%esp)
|
||||
movl 12(%esp), %esi
|
||||
xorl %edi, %ebp
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl %esi, 12(%esp)
|
||||
movl 52(%esp), %edi
|
||||
movl %ebp, 36(%esp)
|
||||
movl 8(%esp), %ebp
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl 32(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 32(%esp)
|
||||
movl %ebx, %ecx
|
||||
movl %edx, 48(%esp)
|
||||
movl 20(%esp), %edx
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl 24(%esp), %ebx
|
||||
movl %edi, 20(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
leal (%ecx, %edx), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 8(%esp)
|
||||
movl 12(%esp), %esi
|
||||
xorl %edi, %ebp
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl %esi, 12(%esp)
|
||||
movl 28(%esp), %edi
|
||||
movl %ebp, 52(%esp)
|
||||
movl 36(%esp), %ebp
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 28(%esp)
|
||||
movl 32(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 32(%esp)
|
||||
movl 4(%esp), %ecx
|
||||
movl %edx, 4(%esp)
|
||||
movl 48(%esp), %edx
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl 20(%esp), %ebx
|
||||
movl %edi, 20(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
leal (%ecx, %edx), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 48(%esp)
|
||||
movl 40(%esp), %esi
|
||||
xorl %edi, %ebp
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl %esi, 36(%esp)
|
||||
movl 60(%esp), %edi
|
||||
movl %ebp, 24(%esp)
|
||||
movl 52(%esp), %ebp
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 44(%esp), %ebx
|
||||
movl %ecx, 40(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 52(%esp)
|
||||
movl 56(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 56(%esp)
|
||||
addl %esi, %ebx
|
||||
movl %edx, 44(%esp)
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl %edi, 60(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
xorl %edi, %ebp
|
||||
movl %ebp, 64(%esp)
|
||||
.endm
|
||||
|
||||
.text
|
||||
.p2align 5
|
||||
salsa8_core_gen:
|
||||
salsa8_core_gen_quadround
|
||||
salsa8_core_gen_quadround
|
||||
ret
|
||||
|
||||
|
||||
.text
|
||||
.p2align 5
|
||||
.globl scrypt_core
|
||||
.globl _scrypt_core
|
||||
scrypt_core:
|
||||
_scrypt_core:
|
||||
pushl %ebx
|
||||
pushl %ebp
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
|
||||
/* Check for SSE2 availability */
|
||||
movl $1, %eax
|
||||
cpuid
|
||||
andl $0x04000000, %edx
|
||||
jnz scrypt_core_sse2
|
||||
|
||||
scrypt_core_gen:
|
||||
movl 20(%esp), %edi
|
||||
movl 24(%esp), %esi
|
||||
movl 28(%esp), %ecx
|
||||
subl $72, %esp
|
||||
|
||||
.macro scrypt_core_macro1a p, q
|
||||
movl \p(%edi), %eax
|
||||
movl \q(%edi), %edx
|
||||
movl %eax, \p(%esi)
|
||||
movl %edx, \q(%esi)
|
||||
xorl %edx, %eax
|
||||
movl %eax, \p(%edi)
|
||||
movl %eax, \p(%esp)
|
||||
.endm
|
||||
|
||||
.macro scrypt_core_macro1b p, q
|
||||
movl \p(%edi), %eax
|
||||
xorl \p(%esi, %edx), %eax
|
||||
movl \q(%edi), %ebx
|
||||
xorl \q(%esi, %edx), %ebx
|
||||
movl %ebx, \q(%edi)
|
||||
xorl %ebx, %eax
|
||||
movl %eax, \p(%edi)
|
||||
movl %eax, \p(%esp)
|
||||
.endm
|
||||
|
||||
.macro scrypt_core_macro2 p, q
|
||||
movl \p(%esp), %eax
|
||||
addl \p(%edi), %eax
|
||||
movl %eax, \p(%edi)
|
||||
xorl \q(%edi), %eax
|
||||
movl %eax, \q(%edi)
|
||||
movl %eax, \p(%esp)
|
||||
.endm
|
||||
|
||||
.macro scrypt_core_macro3 p, q
|
||||
movl \p(%esp), %eax
|
||||
addl \q(%edi), %eax
|
||||
movl %eax, \q(%edi)
|
||||
.endm
|
||||
|
||||
shll $7, %ecx
|
||||
addl %esi, %ecx
|
||||
scrypt_core_gen_loop1:
|
||||
movl %esi, 64(%esp)
|
||||
movl %ecx, 68(%esp)
|
||||
|
||||
scrypt_core_macro1a 0, 64
|
||||
scrypt_core_macro1a 4, 68
|
||||
scrypt_core_macro1a 8, 72
|
||||
scrypt_core_macro1a 12, 76
|
||||
scrypt_core_macro1a 16, 80
|
||||
scrypt_core_macro1a 20, 84
|
||||
scrypt_core_macro1a 24, 88
|
||||
scrypt_core_macro1a 28, 92
|
||||
scrypt_core_macro1a 32, 96
|
||||
scrypt_core_macro1a 36, 100
|
||||
scrypt_core_macro1a 40, 104
|
||||
scrypt_core_macro1a 44, 108
|
||||
scrypt_core_macro1a 48, 112
|
||||
scrypt_core_macro1a 52, 116
|
||||
scrypt_core_macro1a 56, 120
|
||||
scrypt_core_macro1a 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
scrypt_core_macro2 0, 64
|
||||
scrypt_core_macro2 4, 68
|
||||
scrypt_core_macro2 8, 72
|
||||
scrypt_core_macro2 12, 76
|
||||
scrypt_core_macro2 16, 80
|
||||
scrypt_core_macro2 20, 84
|
||||
scrypt_core_macro2 24, 88
|
||||
scrypt_core_macro2 28, 92
|
||||
scrypt_core_macro2 32, 96
|
||||
scrypt_core_macro2 36, 100
|
||||
scrypt_core_macro2 40, 104
|
||||
scrypt_core_macro2 44, 108
|
||||
scrypt_core_macro2 48, 112
|
||||
scrypt_core_macro2 52, 116
|
||||
scrypt_core_macro2 56, 120
|
||||
scrypt_core_macro2 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
scrypt_core_macro3 0, 64
|
||||
scrypt_core_macro3 4, 68
|
||||
scrypt_core_macro3 8, 72
|
||||
scrypt_core_macro3 12, 76
|
||||
scrypt_core_macro3 16, 80
|
||||
scrypt_core_macro3 20, 84
|
||||
scrypt_core_macro3 24, 88
|
||||
scrypt_core_macro3 28, 92
|
||||
scrypt_core_macro3 32, 96
|
||||
scrypt_core_macro3 36, 100
|
||||
scrypt_core_macro3 40, 104
|
||||
scrypt_core_macro3 44, 108
|
||||
scrypt_core_macro3 48, 112
|
||||
scrypt_core_macro3 52, 116
|
||||
scrypt_core_macro3 56, 120
|
||||
scrypt_core_macro3 60, 124
|
||||
|
||||
movl 64(%esp), %esi
|
||||
movl 68(%esp), %ecx
|
||||
addl $128, %esi
|
||||
cmpl %ecx, %esi
|
||||
jne scrypt_core_gen_loop1
|
||||
|
||||
movl 96(%esp), %esi
|
||||
movl 100(%esp), %ecx
|
||||
movl %ecx, %eax
|
||||
subl $1, %eax
|
||||
movl %eax, 100(%esp)
|
||||
scrypt_core_gen_loop2:
|
||||
movl %ecx, 68(%esp)
|
||||
|
||||
movl 64(%edi), %edx
|
||||
andl 100(%esp), %edx
|
||||
shll $7, %edx
|
||||
|
||||
scrypt_core_macro1b 0, 64
|
||||
scrypt_core_macro1b 4, 68
|
||||
scrypt_core_macro1b 8, 72
|
||||
scrypt_core_macro1b 12, 76
|
||||
scrypt_core_macro1b 16, 80
|
||||
scrypt_core_macro1b 20, 84
|
||||
scrypt_core_macro1b 24, 88
|
||||
scrypt_core_macro1b 28, 92
|
||||
scrypt_core_macro1b 32, 96
|
||||
scrypt_core_macro1b 36, 100
|
||||
scrypt_core_macro1b 40, 104
|
||||
scrypt_core_macro1b 44, 108
|
||||
scrypt_core_macro1b 48, 112
|
||||
scrypt_core_macro1b 52, 116
|
||||
scrypt_core_macro1b 56, 120
|
||||
scrypt_core_macro1b 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
scrypt_core_macro2 0, 64
|
||||
scrypt_core_macro2 4, 68
|
||||
scrypt_core_macro2 8, 72
|
||||
scrypt_core_macro2 12, 76
|
||||
scrypt_core_macro2 16, 80
|
||||
scrypt_core_macro2 20, 84
|
||||
scrypt_core_macro2 24, 88
|
||||
scrypt_core_macro2 28, 92
|
||||
scrypt_core_macro2 32, 96
|
||||
scrypt_core_macro2 36, 100
|
||||
scrypt_core_macro2 40, 104
|
||||
scrypt_core_macro2 44, 108
|
||||
scrypt_core_macro2 48, 112
|
||||
scrypt_core_macro2 52, 116
|
||||
scrypt_core_macro2 56, 120
|
||||
scrypt_core_macro2 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
movl 96(%esp), %esi
|
||||
scrypt_core_macro3 0, 64
|
||||
scrypt_core_macro3 4, 68
|
||||
scrypt_core_macro3 8, 72
|
||||
scrypt_core_macro3 12, 76
|
||||
scrypt_core_macro3 16, 80
|
||||
scrypt_core_macro3 20, 84
|
||||
scrypt_core_macro3 24, 88
|
||||
scrypt_core_macro3 28, 92
|
||||
scrypt_core_macro3 32, 96
|
||||
scrypt_core_macro3 36, 100
|
||||
scrypt_core_macro3 40, 104
|
||||
scrypt_core_macro3 44, 108
|
||||
scrypt_core_macro3 48, 112
|
||||
scrypt_core_macro3 52, 116
|
||||
scrypt_core_macro3 56, 120
|
||||
scrypt_core_macro3 60, 124
|
||||
|
||||
movl 68(%esp), %ecx
|
||||
subl $1, %ecx
|
||||
ja scrypt_core_gen_loop2
|
||||
|
||||
addl $72, %esp
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebp
|
||||
popl %ebx
|
||||
ret
|
||||
|
||||
|
||||
.macro salsa8_core_sse2_doubleround
|
||||
movdqa %xmm1, %xmm4
|
||||
paddd %xmm0, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $7, %xmm4
|
||||
psrld $25, %xmm5
|
||||
pxor %xmm4, %xmm3
|
||||
movdqa %xmm0, %xmm4
|
||||
pxor %xmm5, %xmm3
|
||||
|
||||
paddd %xmm3, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $9, %xmm4
|
||||
psrld $23, %xmm5
|
||||
pxor %xmm4, %xmm2
|
||||
movdqa %xmm3, %xmm4
|
||||
pxor %xmm5, %xmm2
|
||||
pshufd $0x93, %xmm3, %xmm3
|
||||
|
||||
paddd %xmm2, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $13, %xmm4
|
||||
psrld $19, %xmm5
|
||||
pxor %xmm4, %xmm1
|
||||
movdqa %xmm2, %xmm4
|
||||
pxor %xmm5, %xmm1
|
||||
pshufd $0x4e, %xmm2, %xmm2
|
||||
|
||||
paddd %xmm1, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $18, %xmm4
|
||||
psrld $14, %xmm5
|
||||
pxor %xmm4, %xmm0
|
||||
movdqa %xmm3, %xmm4
|
||||
pxor %xmm5, %xmm0
|
||||
pshufd $0x39, %xmm1, %xmm1
|
||||
|
||||
paddd %xmm0, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $7, %xmm4
|
||||
psrld $25, %xmm5
|
||||
pxor %xmm4, %xmm1
|
||||
movdqa %xmm0, %xmm4
|
||||
pxor %xmm5, %xmm1
|
||||
|
||||
paddd %xmm1, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $9, %xmm4
|
||||
psrld $23, %xmm5
|
||||
pxor %xmm4, %xmm2
|
||||
movdqa %xmm1, %xmm4
|
||||
pxor %xmm5, %xmm2
|
||||
pshufd $0x93, %xmm1, %xmm1
|
||||
|
||||
paddd %xmm2, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $13, %xmm4
|
||||
psrld $19, %xmm5
|
||||
pxor %xmm4, %xmm3
|
||||
movdqa %xmm2, %xmm4
|
||||
pxor %xmm5, %xmm3
|
||||
pshufd $0x4e, %xmm2, %xmm2
|
||||
|
||||
paddd %xmm3, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $18, %xmm4
|
||||
psrld $14, %xmm5
|
||||
pxor %xmm4, %xmm0
|
||||
pshufd $0x39, %xmm3, %xmm3
|
||||
pxor %xmm5, %xmm0
|
||||
.endm
|
||||
|
||||
.macro salsa8_core_sse2
|
||||
salsa8_core_sse2_doubleround
|
||||
salsa8_core_sse2_doubleround
|
||||
salsa8_core_sse2_doubleround
|
||||
salsa8_core_sse2_doubleround
|
||||
.endm
|
||||
|
||||
.p2align 5
|
||||
scrypt_core_sse2:
|
||||
movl 20(%esp), %edi
|
||||
movl 24(%esp), %esi
|
||||
movl %esp, %ebp
|
||||
subl $128, %esp
|
||||
andl $-16, %esp
|
||||
|
||||
scrypt_shuffle %edi, 0, %esp, 0
|
||||
scrypt_shuffle %edi, 64, %esp, 64
|
||||
|
||||
movdqa 96(%esp), %xmm6
|
||||
movdqa 112(%esp), %xmm7
|
||||
|
||||
movl %esi, %edx
|
||||
movl 28(%ebp), %ecx
|
||||
shll $7, %ecx
|
||||
addl %esi, %ecx
|
||||
scrypt_core_sse2_loop1:
|
||||
movdqa 0(%esp), %xmm0
|
||||
movdqa 16(%esp), %xmm1
|
||||
movdqa 32(%esp), %xmm2
|
||||
movdqa 48(%esp), %xmm3
|
||||
movdqa 64(%esp), %xmm4
|
||||
movdqa 80(%esp), %xmm5
|
||||
pxor %xmm4, %xmm0
|
||||
pxor %xmm5, %xmm1
|
||||
movdqa %xmm0, 0(%edx)
|
||||
movdqa %xmm1, 16(%edx)
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm2, 32(%edx)
|
||||
movdqa %xmm3, 48(%edx)
|
||||
movdqa %xmm4, 64(%edx)
|
||||
movdqa %xmm5, 80(%edx)
|
||||
movdqa %xmm6, 96(%edx)
|
||||
movdqa %xmm7, 112(%edx)
|
||||
|
||||
salsa8_core_sse2
|
||||
paddd 0(%edx), %xmm0
|
||||
paddd 16(%edx), %xmm1
|
||||
paddd 32(%edx), %xmm2
|
||||
paddd 48(%edx), %xmm3
|
||||
movdqa %xmm0, 0(%esp)
|
||||
movdqa %xmm1, 16(%esp)
|
||||
movdqa %xmm2, 32(%esp)
|
||||
movdqa %xmm3, 48(%esp)
|
||||
|
||||
pxor 64(%esp), %xmm0
|
||||
pxor 80(%esp), %xmm1
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
movdqa %xmm2, %xmm6
|
||||
movdqa %xmm3, %xmm7
|
||||
salsa8_core_sse2
|
||||
paddd 64(%esp), %xmm0
|
||||
paddd 80(%esp), %xmm1
|
||||
paddd %xmm2, %xmm6
|
||||
paddd %xmm3, %xmm7
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
|
||||
addl $128, %edx
|
||||
cmpl %ecx, %edx
|
||||
jne scrypt_core_sse2_loop1
|
||||
|
||||
movdqa 64(%esp), %xmm4
|
||||
movdqa 80(%esp), %xmm5
|
||||
|
||||
movl 28(%ebp), %ecx
|
||||
movl %ecx, %eax
|
||||
subl $1, %eax
|
||||
scrypt_core_sse2_loop2:
|
||||
movd %xmm4, %edx
|
||||
movdqa 0(%esp), %xmm0
|
||||
movdqa 16(%esp), %xmm1
|
||||
movdqa 32(%esp), %xmm2
|
||||
movdqa 48(%esp), %xmm3
|
||||
andl %eax, %edx
|
||||
shll $7, %edx
|
||||
pxor 0(%esi, %edx), %xmm0
|
||||
pxor 16(%esi, %edx), %xmm1
|
||||
pxor 32(%esi, %edx), %xmm2
|
||||
pxor 48(%esi, %edx), %xmm3
|
||||
|
||||
pxor %xmm4, %xmm0
|
||||
pxor %xmm5, %xmm1
|
||||
movdqa %xmm0, 0(%esp)
|
||||
movdqa %xmm1, 16(%esp)
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm2, 32(%esp)
|
||||
movdqa %xmm3, 48(%esp)
|
||||
salsa8_core_sse2
|
||||
paddd 0(%esp), %xmm0
|
||||
paddd 16(%esp), %xmm1
|
||||
paddd 32(%esp), %xmm2
|
||||
paddd 48(%esp), %xmm3
|
||||
movdqa %xmm0, 0(%esp)
|
||||
movdqa %xmm1, 16(%esp)
|
||||
movdqa %xmm2, 32(%esp)
|
||||
movdqa %xmm3, 48(%esp)
|
||||
|
||||
pxor 64(%esi, %edx), %xmm0
|
||||
pxor 80(%esi, %edx), %xmm1
|
||||
pxor 96(%esi, %edx), %xmm2
|
||||
pxor 112(%esi, %edx), %xmm3
|
||||
pxor 64(%esp), %xmm0
|
||||
pxor 80(%esp), %xmm1
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
movdqa %xmm2, %xmm6
|
||||
movdqa %xmm3, %xmm7
|
||||
salsa8_core_sse2
|
||||
paddd 64(%esp), %xmm0
|
||||
paddd 80(%esp), %xmm1
|
||||
paddd %xmm2, %xmm6
|
||||
paddd %xmm3, %xmm7
|
||||
movdqa %xmm0, %xmm4
|
||||
movdqa %xmm1, %xmm5
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
|
||||
subl $1, %ecx
|
||||
ja scrypt_core_sse2_loop2
|
||||
|
||||
movdqa %xmm6, 96(%esp)
|
||||
movdqa %xmm7, 112(%esp)
|
||||
|
||||
scrypt_shuffle %esp, 0, %edi, 0
|
||||
scrypt_shuffle %esp, 64, %edi, 64
|
||||
|
||||
movl %ebp, %esp
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebp
|
||||
popl %ebx
|
||||
ret
|
||||
|
||||
#endif
|
1583
asm/sha2-arm.S
1583
asm/sha2-arm.S
File diff suppressed because it is too large
Load Diff
3661
asm/sha2-x64.S
3661
asm/sha2-x64.S
File diff suppressed because it is too large
Load Diff
1193
asm/sha2-x86.S
1193
asm/sha2-x86.S
File diff suppressed because it is too large
Load Diff
20
configure
vendored
20
configure
vendored
@@ -1,6 +1,6 @@
|
||||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.71 for cpuminer-opt 23.5.
|
||||
# Generated by GNU Autoconf 2.71 for cpuminer-opt 23.8.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
|
||||
@@ -608,8 +608,8 @@ MAKEFLAGS=
|
||||
# Identity of this package.
|
||||
PACKAGE_NAME='cpuminer-opt'
|
||||
PACKAGE_TARNAME='cpuminer-opt'
|
||||
PACKAGE_VERSION='23.5'
|
||||
PACKAGE_STRING='cpuminer-opt 23.5'
|
||||
PACKAGE_VERSION='23.8'
|
||||
PACKAGE_STRING='cpuminer-opt 23.8'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
@@ -1360,7 +1360,7 @@ if test "$ac_init_help" = "long"; then
|
||||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures cpuminer-opt 23.5 to adapt to many kinds of systems.
|
||||
\`configure' configures cpuminer-opt 23.8 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
@@ -1432,7 +1432,7 @@ fi
|
||||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 23.5:";;
|
||||
short | recursive ) echo "Configuration of cpuminer-opt 23.8:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
@@ -1538,7 +1538,7 @@ fi
|
||||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
cpuminer-opt configure 23.5
|
||||
cpuminer-opt configure 23.8
|
||||
generated by GNU Autoconf 2.71
|
||||
|
||||
Copyright (C) 2021 Free Software Foundation, Inc.
|
||||
@@ -1985,7 +1985,7 @@ cat >config.log <<_ACEOF
|
||||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by cpuminer-opt $as_me 23.5, which was
|
||||
It was created by cpuminer-opt $as_me 23.8, which was
|
||||
generated by GNU Autoconf 2.71. Invocation command line was
|
||||
|
||||
$ $0$ac_configure_args_raw
|
||||
@@ -3593,7 +3593,7 @@ fi
|
||||
|
||||
# Define the identity of the package.
|
||||
PACKAGE='cpuminer-opt'
|
||||
VERSION='23.5'
|
||||
VERSION='23.8'
|
||||
|
||||
|
||||
printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
|
||||
@@ -7508,7 +7508,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by cpuminer-opt $as_me 23.5, which was
|
||||
This file was extended by cpuminer-opt $as_me 23.8, which was
|
||||
generated by GNU Autoconf 2.71. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
@@ -7576,7 +7576,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\
|
||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config='$ac_cs_config_escaped'
|
||||
ac_cs_version="\\
|
||||
cpuminer-opt config.status 23.5
|
||||
cpuminer-opt config.status 23.8
|
||||
configured by $0, generated by GNU Autoconf 2.71,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
AC_INIT([cpuminer-opt], [23.5])
|
||||
AC_INIT([cpuminer-opt], [23.8])
|
||||
|
||||
AC_PREREQ([2.59c])
|
||||
AC_CANONICAL_SYSTEM
|
||||
|
4357
configure~
4357
configure~
File diff suppressed because it is too large
Load Diff
84
cpu-miner.c
84
cpu-miner.c
@@ -36,7 +36,7 @@
|
||||
#include <memory.h>
|
||||
#include <curl/curl.h>
|
||||
#include <jansson.h>
|
||||
#include <openssl/sha.h>
|
||||
//#include <openssl/sha.h>
|
||||
//#include <mm_malloc.h>
|
||||
#include "sysinfos.c"
|
||||
#include "algo/sha/sha256d.h"
|
||||
@@ -1967,18 +1967,6 @@ void sha256_gen_merkle_root( char* merkle_root, struct stratum_ctx* sctx )
|
||||
sha256d( merkle_root, merkle_root, 64 );
|
||||
}
|
||||
}
|
||||
/*
|
||||
// OpenSSL single sha256, deprecated
|
||||
void SHA256_gen_merkle_root( char* merkle_root, struct stratum_ctx* sctx )
|
||||
{
|
||||
SHA256( sctx->job.coinbase, (int)sctx->job.coinbase_size, merkle_root );
|
||||
for ( int i = 0; i < sctx->job.merkle_count; i++ )
|
||||
{
|
||||
memcpy( merkle_root + 32, sctx->job.merkle[i], 32 );
|
||||
sha256d( merkle_root, merkle_root, 64 );
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// Default is do_nothing (assumed LE)
|
||||
void set_work_data_big_endian( struct work *work )
|
||||
@@ -2212,8 +2200,8 @@ static void *miner_thread( void *userdata )
|
||||
// int64_t max64 = 1000;
|
||||
int nonce_found = 0;
|
||||
|
||||
if ( likely( algo_gate.do_this_thread( thr_id ) ) )
|
||||
{
|
||||
// if ( likely( algo_gate.do_this_thread( thr_id ) ) )
|
||||
// {
|
||||
if ( have_stratum )
|
||||
{
|
||||
while ( unlikely( stratum_down ) )
|
||||
@@ -2262,8 +2250,8 @@ static void *miner_thread( void *userdata )
|
||||
|
||||
pthread_rwlock_unlock( &g_work_lock );
|
||||
|
||||
} // do_this_thread
|
||||
algo_gate.resync_threads( thr_id, &work );
|
||||
// } // do_this_thread
|
||||
// algo_gate.resync_threads( thr_id, &work );
|
||||
|
||||
// conditional mining
|
||||
if ( unlikely( !wanna_mine( thr_id ) ) )
|
||||
@@ -2321,6 +2309,12 @@ static void *miner_thread( void *userdata )
|
||||
gettimeofday( (struct timeval *) &tv_start, NULL );
|
||||
|
||||
// Scan for nonce
|
||||
// nonce_found = scanhash_sha256dt_ref( &work, max_nonce, &hashes_done,
|
||||
// mythr );
|
||||
// nonce_found = scanhash_sha256dt_4x32( &work, max_nonce, &hashes_done,
|
||||
// mythr );
|
||||
|
||||
|
||||
nonce_found = algo_gate.scanhash( &work, max_nonce, &hashes_done,
|
||||
mythr );
|
||||
|
||||
@@ -3677,58 +3671,17 @@ static int thread_create(struct thr_info *thr, void* func)
|
||||
|
||||
void get_defconfig_path(char *out, size_t bufsize, char *argv0);
|
||||
|
||||
|
||||
#include "simd-utils.h"
|
||||
#include "algo/echo/aes_ni/hash_api.h"
|
||||
#include "compat/aes_helper.c"
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct thr_info *thr;
|
||||
long flags;
|
||||
int i, err;
|
||||
|
||||
/*
|
||||
#include "simd-utils.h"
|
||||
|
||||
printf("bswap32: %08x, bswap64: %016lx\n", bswap_32( 0x03020100 ), bswap_64( 0x0706050403020100 ) );
|
||||
printf("ror32: %08x, ror64: %016lx\n", ror32( 0x03020100, 8 ), ror64( 0x0706050403020100, 8 ) );
|
||||
exit(0);
|
||||
|
||||
uint64x2_t a64 = v128_set64( 0x5555555555555555, 0xcccccccccccccccc ) ;
|
||||
uint64x2_t c64 = v128_set64( 0xffffffffffffffff, 0x0000000000000000 ) ;
|
||||
uint64x2_t mask = v128_set64( 0x0f0f0f0ff0f0f0f0, 0xf0f0f0f00f0f0f0f ) ;
|
||||
|
||||
uint32x4_t a32 = v128_set32( 0x0f0e0d0c, 0x0b0a0908, 0x07060504, 0x03020100 );
|
||||
uint16x8_t a16 = v128_set16( 0x0f0e, 0x00d0c, 0x0b0a, 0x0908, 0x0706, 0x0504, 0x0302, 0x0100 );
|
||||
uint8x16_t a8 = v128_set8( 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00 );
|
||||
|
||||
a64 = v128_bswap64( a32 );
|
||||
a32 = v128_bswap32( a32 );
|
||||
a16 = v128_bswap16( a16 );
|
||||
|
||||
uint64_t *b64 = (uint64_t*)&a64;
|
||||
uint32_t *b32 = (uint32_t*)&a32;
|
||||
uint16_t *b16 = (uint16_t*)&a16;
|
||||
|
||||
//a32 = v128_ror32( a32, 4 );
|
||||
|
||||
|
||||
printf("64: %016lx, %016lx\n", b64[1], b64[0] );
|
||||
|
||||
printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
|
||||
printf("16: %04x %04x %04x %04x %04x %04x %04x %04x\n", b16[7], b16[6], b16[5], b16[4], b16[3], b16[2], b16[1], b16[0] );
|
||||
|
||||
//a32 = v128_ror32( a32, 28 );
|
||||
//printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
//a32 = v128_rol32( a32, 4 );
|
||||
//printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
//a32 = v128_rol32( a32, 28 );
|
||||
//printf("32: %08x %08x %08x %08x\n", b32[3], b32[2], b32[1], b32[0] );
|
||||
|
||||
exit(0);
|
||||
*/
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pthread_mutex_init(&applog_lock, NULL);
|
||||
|
||||
show_credits();
|
||||
@@ -3864,6 +3817,9 @@ exit(0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ( is_root() )
|
||||
applog( LOG_NOTICE, "Running cpuminer as Superuser is discouraged.");
|
||||
|
||||
#ifndef WIN32
|
||||
if (opt_background)
|
||||
{
|
||||
@@ -4087,7 +4043,7 @@ exit(0);
|
||||
applog( LOG_INFO, "%d of %d miner threads started using '%s' algorithm",
|
||||
opt_n_threads, num_cpus, algo_names[opt_algo] );
|
||||
|
||||
/* main loop - simply wait for workio thread to exit */
|
||||
/* main loop - simply wait for workio thread to exit */
|
||||
pthread_join( thr_info[work_thr_id].pth, NULL );
|
||||
applog( LOG_WARNING, "workio thread dead, exiting." );
|
||||
return 0;
|
||||
|
47
miner.h
47
miner.h
@@ -1,38 +1,41 @@
|
||||
#ifndef __MINER_H__
|
||||
#define __MINER_H__
|
||||
#ifndef MINER_H__
|
||||
#define MINER_H__
|
||||
|
||||
#include <cpuminer-config.h>
|
||||
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#define USER_AGENT_ARCH "x64"
|
||||
#define USER_AGENT_ARCH "x64" // Intel, AMD x86_64
|
||||
#elif defined(__aarch64__)
|
||||
#define USER_AGENT_ARCH "arm"
|
||||
#define USER_AGENT_ARCH "arm" // AArch64
|
||||
//#elif
|
||||
// #define USER_AGENT_ARCH "R5" // RISC-V
|
||||
#else
|
||||
#define USER_AGENT_ARCH
|
||||
#endif
|
||||
|
||||
#if defined(__linux)
|
||||
#define USER_AGENT_OS "L"
|
||||
#define USER_AGENT_OS "L" // GNU Linux
|
||||
#elif defined(WIN32)
|
||||
#define USER_AGENT_OS "W"
|
||||
#define USER_AGENT_OS "W" // MS Windows
|
||||
#elif defined(__APPLE__)
|
||||
#define USER_AGENT_OS "M" // Apple MacOS
|
||||
// is there a generic BSD macro?
|
||||
#elif defined(__unix__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
|
||||
#define USER_AGENT_OS "U" // BSD unix
|
||||
#else
|
||||
#define USER_AGENT_OS
|
||||
#endif
|
||||
|
||||
#define USER_AGENT PACKAGE_NAME "-" PACKAGE_VERSION "-" USER_AGENT_ARCH "-" USER_AGENT_OS
|
||||
|
||||
//#define MAX_CPUS 128
|
||||
#define USER_AGENT PACKAGE_NAME "-" PACKAGE_VERSION "-" USER_AGENT_ARCH USER_AGENT_OS
|
||||
|
||||
/*
|
||||
#ifdef _MSC_VER
|
||||
|
||||
#undef USE_ASM /* to fix */
|
||||
|
||||
#undef USE_ASM
|
||||
#ifdef NOASM
|
||||
#undef USE_ASM
|
||||
#endif
|
||||
|
||||
/* missing arch defines for msvc */
|
||||
#if defined(_M_X64)
|
||||
#define __i386__ 1
|
||||
#define __x86_64__ 1
|
||||
@@ -40,13 +43,13 @@
|
||||
#define __i386__ 1
|
||||
#endif
|
||||
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
#endif
|
||||
*/
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <inttypes.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <pthread.h>
|
||||
#include <jansson.h>
|
||||
#include <curl/curl.h>
|
||||
@@ -75,6 +78,15 @@
|
||||
|
||||
#endif
|
||||
|
||||
//TODO for windows
|
||||
static inline bool is_root()
|
||||
{
|
||||
#if defined(WIN32)
|
||||
return false;
|
||||
#else
|
||||
return !getuid();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
#ifndef min
|
||||
@@ -598,7 +610,6 @@ enum algos {
|
||||
ALGO_GROESTL,
|
||||
ALGO_HEX,
|
||||
ALGO_HMQ1725,
|
||||
ALGO_HODL,
|
||||
ALGO_JHA,
|
||||
ALGO_KECCAK,
|
||||
ALGO_KECCAKC,
|
||||
@@ -694,7 +705,6 @@ static const char* const algo_names[] = {
|
||||
"groestl",
|
||||
"hex",
|
||||
"hmq1725",
|
||||
"hodl",
|
||||
"jha",
|
||||
"keccak",
|
||||
"keccakc",
|
||||
@@ -856,7 +866,6 @@ Options:\n\
|
||||
groestl Groestl coin\n\
|
||||
hex x16r-hex\n\
|
||||
hmq1725 Espers\n\
|
||||
hodl Hodlcoin\n\
|
||||
jha jackppot (Jackpotcoin)\n\
|
||||
keccak Maxcoin\n\
|
||||
keccakc Creative Coin\n\
|
||||
|
@@ -1509,24 +1509,35 @@ static inline void v128_bswap32_intrlv80_2x64( void *d, const void *src )
|
||||
|
||||
#elif defined(__ARM_NEON)
|
||||
|
||||
casti_v128u64( d,0 ) = vdupq_laneq_u64( s0, 0 );
|
||||
casti_v128u64( d,1 ) = vdupq_laneq_u64( s0, 1 );
|
||||
casti_v128u64( d,0 ) = vdupq_laneq_u64( (uint64x2_t)s0, 0 );
|
||||
casti_v128u64( d,1 ) = vdupq_laneq_u64( (uint64x2_t)s0, 1 );
|
||||
|
||||
casti_v128u64( d,2 ) = vdupq_laneq_u64( s1, 0 );
|
||||
casti_v128u64( d,3 ) = vdupq_laneq_u64( s1, 1 );
|
||||
casti_v128u64( d,2 ) = vdupq_laneq_u64( (uint64x2_t)s1, 0 );
|
||||
casti_v128u64( d,3 ) = vdupq_laneq_u64( (uint64x2_t)s1, 1 );
|
||||
|
||||
casti_v128u64( d,4 ) = vdupq_laneq_u64( s2, 0 );
|
||||
casti_v128u64( d,5 ) = vdupq_laneq_u64( s2, 1 );
|
||||
casti_v128u64( d,4 ) = vdupq_laneq_u64( (uint64x2_t)s2, 0 );
|
||||
casti_v128u64( d,5 ) = vdupq_laneq_u64( (uint64x2_t)s2, 1 );
|
||||
|
||||
casti_v128u64( d,6 ) = vdupq_laneq_u64( s3, 0 );
|
||||
casti_v128u64( d,7 ) = vdupq_laneq_u64( s3, 1 );
|
||||
casti_v128u64( d,6 ) = vdupq_laneq_u64( (uint64x2_t)s3, 0 );
|
||||
casti_v128u64( d,7 ) = vdupq_laneq_u64( (uint64x2_t)s3, 1 );
|
||||
|
||||
casti_v128u64( d,8 ) = vdupq_laneq_u64( s4, 0 );
|
||||
casti_v128u64( d,9 ) = vdupq_laneq_u64( s4, 1 );
|
||||
casti_v128u64( d,8 ) = vdupq_laneq_u64( (uint64x2_t)s4, 0 );
|
||||
casti_v128u64( d,9 ) = vdupq_laneq_u64( (uint64x2_t)s4, 1 );
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void extr_lane_2x64( void *dst, const void *src,
|
||||
const int lane, const int bit_len )
|
||||
{
|
||||
uint64_t *d = (uint64_t*)dst;
|
||||
const uint64_t *s = (const uint64_t*)src;
|
||||
d[ 0] = s[ lane ]; d[ 1] = s[ lane+ 2 ];
|
||||
d[ 2] = s[ lane+ 4 ]; d[ 3] = s[ lane+ 6 ];
|
||||
if ( bit_len <= 256 ) return;
|
||||
d[ 4] = s[ lane+ 8 ]; d[ 5] = s[ lane+10 ];
|
||||
d[ 6] = s[ lane+12 ]; d[ 7] = s[ lane+14 ];
|
||||
}
|
||||
|
||||
|
||||
// 4x64 (AVX2)
|
||||
|
@@ -152,26 +152,23 @@
|
||||
#define v128_unpacklo8 _mm_unpacklo_epi8
|
||||
#define v128_unpackhi8 _mm_unpackhi_epi8
|
||||
|
||||
// New shorter agnostic name
|
||||
#define v128_ziplo64 _mm_unpacklo_epi64
|
||||
#define v128_ziphi64 _mm_unpackhi_epi64
|
||||
#define v128_ziplo32 _mm_unpacklo_epi32
|
||||
#define v128_ziphi32 _mm_unpackhi_epi32
|
||||
#define v128_ziplo16 _mm_unpacklo_epi16
|
||||
#define v128_ziphi16 _mm_unpackhi_epi16
|
||||
#define v128_ziplo8 _mm_unpacklo_epi8
|
||||
#define v128_ziphi8 _mm_unpackhi_epi8
|
||||
|
||||
// AES
|
||||
// Nokey means nothing on x86_64 but it saves an instruction and a register
|
||||
// on ARM.
|
||||
#define v128_aesenc _mm_aesenc_si128
|
||||
#define v128_aesenc_nokey(v) _mm_aesenc_si128( v, v128_zero )
|
||||
#define v128_aesenclast _mm_aesenclast_si128
|
||||
#define v128_aesenclast_nokey(v) _mm_aesenclast_si128( v, v128_zero )
|
||||
#define v128_aesdec _mm_aesdec_si128
|
||||
#define v128_aesdec_nokey(v) _mm_aesdec_si128( v, v128_zero )
|
||||
#define v128_aesdeclast _mm_aesdeclast_si128
|
||||
#define v128_aesdeclast_nokey(v) _mm_aesdeclast_si128( v, v128_zero )
|
||||
|
||||
// Used instead if casting.
|
||||
typedef union
|
||||
{
|
||||
__m128i m128;
|
||||
v128_t v128;
|
||||
__m128i m128;
|
||||
uint32_t u32[4];
|
||||
} __attribute__ ((aligned (16))) m128_ovly;
|
||||
#define v128_ovly m128_ovly
|
||||
@@ -218,19 +215,41 @@ static inline __m128i mm128_mov32_128( const uint32_t n )
|
||||
return a;
|
||||
}
|
||||
|
||||
// Emulate broadcast & insert instructions not available in SSE2
|
||||
// FYI only, not used anywhere
|
||||
//#define mm128_bcast_m64( v ) _mm_shuffle_epi32( v, 0x44 )
|
||||
//#define mm128_bcast_m32( v ) _mm_shuffle_epi32( v, 0x00 )
|
||||
// broadcast lane 0 to all lanes
|
||||
#define v128_bcast64(v) _mm_shuffle_epi32( v, 0x44 )
|
||||
#define v128_bcast32(v) _mm_shuffle_epi32( v, 0x00 )
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
#define v128_bcast16(v) _mm_broadcastw_epi16(v)
|
||||
|
||||
#else
|
||||
|
||||
#define v128_bcast16(v) \
|
||||
v128_bcast32( v128_or( v128_sl32( v, 16 ), v ) )
|
||||
|
||||
#endif
|
||||
|
||||
// broadcast lane l to all lanes
|
||||
#define v128_replane64( v, l ) \
|
||||
( (l) == 0 ) ? _mm_shuffle_epi32( v, 0x44 ) \
|
||||
: _mm_shuffle_epi32( v, 0xee )
|
||||
|
||||
#define v128_replane32( v, l ) \
|
||||
( (l) == 0 ) ? _mm_shuffle_epi32( v, 0x00 ) \
|
||||
: ( (l) == 1 ) ? _mm_shuffle_epi32( v, 0x55 ) \
|
||||
: ( (l) == 2 ) ? _mm_shuffle_epi32( v, 0xaa ) \
|
||||
: _mm_shuffle_epi32( v, 0xff )
|
||||
|
||||
// Pseudo constants
|
||||
#define v128_zero _mm_setzero_si128()
|
||||
#define m128_zero v128_zero
|
||||
#define m128_zero _mm_setzero_si128()
|
||||
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
// Bitwise AND, return 1 if result is all bits clear.
|
||||
#define v128_and_eq0 _mm_testz_si128
|
||||
#define v128_and_eq0 _mm_testz_si128
|
||||
|
||||
static inline int v128_cmpeq0( v128_t v )
|
||||
{ return v128_and_eq0( v, v ); }
|
||||
@@ -341,9 +360,12 @@ static inline __m128i v128_neg1_fn()
|
||||
*/
|
||||
|
||||
|
||||
#define mm128_mask_32( v, m ) mm128_xim_32( v, v, m )
|
||||
|
||||
// Zero 32 bit elements when corresponding bit in 4 bit mask is set.
|
||||
static inline __m128i mm128_mask_32( const __m128i v, const int m )
|
||||
{ return mm128_xim_32( v, v, m ); }
|
||||
//static inline __m128i mm128_mask_32( const __m128i v, const int m )
|
||||
//{ return mm128_xim_32( v, v, m ); }
|
||||
#define v128_mask32 mm128_mask_32
|
||||
|
||||
// Copy element i2 of v2 to element i1 of dest and copy remaining elements from v1.
|
||||
#define v128_movlane32( v1, l1, v0, l0 ) \
|
||||
@@ -483,77 +505,141 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
//
|
||||
// Bit rotations
|
||||
|
||||
// Neon has fast xor-ror, useful for big blake, if it actually works.
|
||||
#define v128_xror64( v1, v0, c ) v128_ror64( v128_xor( v1, v0 ) c )
|
||||
#define v128_shuffle16( v, c ) \
|
||||
_mm_shufflehi_epi16( _mm_shufflelo_epi16( v, c ), c )
|
||||
|
||||
#define v128_qrev32(v) _mm_shuffle_epi32( v, 0xb1 )
|
||||
#define v128_swap64_32(v) _mm_shuffle_epi32( v, 0xb1 ) // grandfathered
|
||||
|
||||
// Slow bit rotation, used as last resort
|
||||
#define mm128_ror_64_sse2( v, c ) \
|
||||
#define v128_qrev16(v) v128_shuffle16( v, 0x1b )
|
||||
#define v128_lrev16(v) v128_shuffle16( v, 0xb1 )
|
||||
|
||||
// These sgould never be callled from application code, use rol/ror.
|
||||
#define v128_ror64_sse2( v, c ) \
|
||||
_mm_or_si128( _mm_srli_epi64( v, c ), _mm_slli_epi64( v, 64-(c) ) )
|
||||
|
||||
#define mm128_rol_64_sse2( v, c ) \
|
||||
#define v128_rol64_sse2( v, c ) \
|
||||
_mm_or_si128( _mm_slli_epi64( v, c ), _mm_srli_epi64( v, 64-(c) ) )
|
||||
|
||||
#define mm128_ror_32_sse2( v, c ) \
|
||||
#define v128_ror32_sse2( v, c ) \
|
||||
_mm_or_si128( _mm_srli_epi32( v, c ), _mm_slli_epi32( v, 32-(c) ) )
|
||||
|
||||
#define mm128_rol_32_sse2( v, c ) \
|
||||
#define v128_rol32_sse2( v, c ) \
|
||||
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
#define mm128_ror_64 _mm_ror_epi64
|
||||
#define mm128_rol_64 _mm_rol_epi64
|
||||
#define mm128_ror_32 _mm_ror_epi32
|
||||
#define mm128_rol_32 _mm_rol_epi32
|
||||
// AVX512 fastest all rotations.
|
||||
#define mm128_ror_64 _mm_ror_epi64
|
||||
#define mm128_rol_64 _mm_rol_epi64
|
||||
#define mm128_ror_32 _mm_ror_epi32
|
||||
#define mm128_rol_32 _mm_rol_epi32
|
||||
|
||||
// ror/rol will alway find the fastest but these names may fit better with
|
||||
// application code performing shuffles rather than bit rotations.
|
||||
#define v128_shuflr64_8( v) _mm_ror_epi64( v, 8 )
|
||||
#define v128_shufll64_8( v) _mm_rol_epi64( v, 8 )
|
||||
#define v128_shuflr64_16(v) _mm_ror_epi64( v, 16 )
|
||||
#define v128_shufll64_16(v) _mm_rol_epi64( v, 16 )
|
||||
#define v128_shuflr64_24(v) _mm_ror_epi64( v, 24 )
|
||||
#define v128_shufll64_24(v) _mm_rol_epi64( v, 24 )
|
||||
#define v128_shuflr32_8( v) _mm_ror_epi32( v, 8 )
|
||||
#define v128_shufll32_8( v) _mm_rol_epi32( v, 8 )
|
||||
#define v128_shuflr32_16(v) _mm_ror_epi32( v, 16 )
|
||||
#define v128_shufll32_16(v) _mm_rol_epi32( v, 16 )
|
||||
|
||||
// optimized byte wise rotation
|
||||
#elif defined(__SSSE3__)
|
||||
// SSE2: fastest 32 bit, very fast 16, fast 8
|
||||
|
||||
#define v128_shuflr64_8( v ) \
|
||||
_mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x080f0e0d0c0b0a09, 0x0007060504030201 ) )
|
||||
|
||||
#define v128_shufll64_8( v ) \
|
||||
_mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0e0d0c0b0a09080f, 0x0605040302010007 ) )
|
||||
|
||||
#define v128_shuflr64_24( v ) \
|
||||
_mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0a09080f0e0d0c0b, 0x0201000706050403 ) )
|
||||
|
||||
#define v128_shufll64_24( v ) \
|
||||
_mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0c0b0a09080f0e0d, 0x0403020100070605 ) )
|
||||
|
||||
#define v128_shuflr32_8( v ) \
|
||||
_mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0c0f0e0d080b0a09, 0x0407060500030201 ) )
|
||||
|
||||
#define v128_shufll32_8( v ) \
|
||||
_mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0e0d0c0f0a09080b, 0x0605040702010003 ) )
|
||||
|
||||
#define mm128_ror_64( v, c ) \
|
||||
( (c) == 32 ) ? _mm_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 24 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0a09080f0e0d0c0b, 0x0201000706050403 ) ) \
|
||||
: ( (c) == 16 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x09080f0e0d0c0b0a, 0x0100070605040302 ) ) \
|
||||
: ( (c) == 8 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x080f0e0d0c0b0a09, 0x0007060504030201 ) ) \
|
||||
: mm128_ror_64_sse2( v, c )
|
||||
( (c) == 8 ) ? v128_shuflr64_8( v ) \
|
||||
: ( (c) == 16 ) ? v128_shuffle16( v, 0x39 ) \
|
||||
: ( (c) == 24 ) ? v128_shuflr64_24( v ) \
|
||||
: ( (c) == 32 ) ? _mm_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 40 ) ? v128_shufll64_24( v ) \
|
||||
: ( (c) == 48 ) ? v128_shuffle16( v, 0x93 ) \
|
||||
: ( (c) == 56 ) ? v128_shufll64_8( v ) \
|
||||
: v128_ror64_sse2( v, c )
|
||||
|
||||
#define mm128_rol_64( v, c ) \
|
||||
( (c) == 32 ) ? _mm_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 24 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0c0b0a09080f0e0d, 0x0403020100070605 ) ) \
|
||||
: ( (c) == 16 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0d0c0b0a09080f0e, 0x0504030201000706 ) ) \
|
||||
: ( (c) == 8 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0e0d0c0b0a09080f, 0x0605040302010007 ) ) \
|
||||
: mm128_rol_64_sse2( v, c )
|
||||
( (c) == 8 ) ? v128_shufll64_8( v ) \
|
||||
: ( (c) == 16 ) ? v128_shuffle16( v, 0x93 ) \
|
||||
: ( (c) == 24 ) ? v128_shufll64_24( v ) \
|
||||
: ( (c) == 32 ) ? _mm_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 40 ) ? v128_shuflr64_24( v ) \
|
||||
: ( (c) == 48 ) ? v128_shuffle16( v, 0x39 ) \
|
||||
: ( (c) == 56 ) ? v128_shuflr64_8( v ) \
|
||||
: v128_rol64_sse2( v, c )
|
||||
|
||||
#define mm128_ror_32( v, c ) \
|
||||
( (c) == 16 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0d0c0f0e09080b0a, 0x0504070601000302 ) ) \
|
||||
: ( (c) == 8 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0c0f0e0d080b0a09, 0x0407060500030201 ) ) \
|
||||
: mm128_ror_32_sse2( v, c )
|
||||
( (c) == 8 ) ? v128_shuflr32_8( v ) \
|
||||
: ( (c) == 16 ) ? v128_lrev16( v ) \
|
||||
: ( (c) == 24 ) ? v128_shufll32_8( v ) \
|
||||
: v128_ror32_sse2( v, c )
|
||||
|
||||
#define mm128_rol_32( v, c ) \
|
||||
( (c) == 16 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0d0c0f0e09080b0a, 0x0504070601000302 ) ) \
|
||||
: ( (c) == 8 ) ? _mm_shuffle_epi8( v, _mm_set_epi64x( \
|
||||
0x0e0d0c0f0a09080b, 0x0605040702010003 ) ) \
|
||||
: mm128_rol_32_sse2( v, c )
|
||||
( (c) == 8 ) ? v128_shufll32_8( v ) \
|
||||
: ( (c) == 16 ) ? v128_lrev16( v ) \
|
||||
: ( (c) == 24 ) ? v128_shuflr32_8( v ) \
|
||||
: v128_rol32_sse2( v, c )
|
||||
|
||||
#elif defined(__SSE2__)
|
||||
// SSE2: fastest 32 bit, very fast 16
|
||||
|
||||
#define mm128_ror_64( v, c ) \
|
||||
( (c) == 16 ) ? v128_shuffle16( v, 0x39 ) \
|
||||
: ( (c) == 32 ) ? _mm_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 48 ) ? v128_shuffle16( v, 0x93 ) \
|
||||
: v128_ror64_sse2( v, c )
|
||||
|
||||
#define mm128_rol_64( v, c ) \
|
||||
( (c) == 16 ) ? v128_shuffle16( v, 0x93 ) \
|
||||
: ( (c) == 32 ) ? _mm_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 48 ) ? v128_shuffle16( v, 0x39 ) \
|
||||
: v128_rol64_sse2( v, c )
|
||||
|
||||
#define mm128_ror_32( v, c ) \
|
||||
( (c) == 16 ) ? v128_lrev16( v ) \
|
||||
: v128_ror32_sse2( v, c )
|
||||
|
||||
#define mm128_rol_32( v, c ) \
|
||||
( (c) == 16 ) ? v128_lrev16( v ) \
|
||||
: v128_rol32_sse2( v, c )
|
||||
|
||||
#else
|
||||
|
||||
#define mm128_ror_64 mm128_ror_64_sse2
|
||||
#define mm128_rol_64 mm128_rol_64_sse2
|
||||
#define mm128_ror_32 mm128_ror_32_sse2
|
||||
#define mm128_rol_32 mm128_rol_32_sse2
|
||||
#define mm128_ror_64 v128_ror64_sse2
|
||||
#define mm128_rol_64 v128_rol64_sse2
|
||||
#define mm128_ror_32 v128_ror32_sse2
|
||||
#define mm128_rol_32 v128_rol32_sse2
|
||||
|
||||
#endif
|
||||
|
||||
// Architecturally agnostic naming
|
||||
// Generic names for portable code
|
||||
#define v128_ror64 mm128_ror_64
|
||||
#define v128_rol64 mm128_rol_64
|
||||
#define v128_ror32 mm128_ror_32
|
||||
@@ -645,32 +731,40 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
|
||||
// Limited 2 input shuffle, combines shuffle with blend. The destination low
|
||||
// half is always taken from v1, and the high half from v2.
|
||||
#define mm128_shuffle2_64( v1, v2, c ) \
|
||||
#define v128_shuffle2_64( v1, v2, c ) \
|
||||
_mm_castpd_si128( _mm_shuffle_pd( _mm_castsi128_pd( v1 ), \
|
||||
_mm_castsi128_pd( v2 ), c ) );
|
||||
#define mm128_shuffle2_64 v128_shuffle2_64
|
||||
|
||||
#define mm128_shuffle2_32( v1, v2, c ) \
|
||||
#define v128_shuffle2_32( v1, v2, c ) \
|
||||
_mm_castps_si128( _mm_shuffle_ps( _mm_castsi128_ps( v1 ), \
|
||||
_mm_castsi128_ps( v2 ), c ) );
|
||||
#define mm128_shuffle2_32 v128_shuffle2_32
|
||||
|
||||
// Rotate vector elements accross all lanes
|
||||
|
||||
#define mm128_swap_64( v ) _mm_shuffle_epi32( v, 0x4e )
|
||||
#define v128_swap64 mm128_swap_64
|
||||
#define mm128_shuflr_64 mm128_swap_64
|
||||
#define mm128_shufll_64 mm128_swap_64
|
||||
// reverse elements in vector
|
||||
#define v128_swap64(v) _mm_shuffle_epi32( v, 0x4e ) // grandfathered
|
||||
#define v128_rev64(v) _mm_shuffle_epi32( v, 0x4e ) // preferred
|
||||
#define v128_rev32(v) _mm_shuffle_epi32( v, 0x1b )
|
||||
#define v128_rev16(v) v128_shuffle16( v, 0x1b )
|
||||
|
||||
// Don't use as an alias for byte sized bit rotation
|
||||
#define mm128_shuflr_32( v ) _mm_shuffle_epi32( v, 0x39 )
|
||||
#define v128_shuflr32 mm128_shuflr_32
|
||||
// rotate vector elements
|
||||
#define v128_shuflr32(v) _mm_shuffle_epi32( v, 0x39 )
|
||||
#define v128_shufll32(v) _mm_shuffle_epi32( v, 0x93 )
|
||||
|
||||
#define mm128_shufll_32( v ) _mm_shuffle_epi32( v, 0x93 )
|
||||
#define v128_shufll32 mm128_shufll_32
|
||||
#define v128_shuflr16(v) v128_shuffle16( v, 0x39 )
|
||||
#define v128_shufll16(v) v128_shuffle16( v, 0x93 )
|
||||
|
||||
#define v128_swap64_32( v ) v128_ror64( v, 32 )
|
||||
|
||||
#define mm128_rev_32( v ) _mm_shuffle_epi32( v, 0x1b )
|
||||
#define v128_rev32 mm128_rev_32
|
||||
//TODO fix this
|
||||
// alias bswap
|
||||
//#define v128_qrev8(v) _mm_shuffle_epi8( v, v128_8( 0,1,2,3,4,5,6,7 ) )
|
||||
//#define v128_lrev8(v) _mm_shuffle_epi8( v, v128_8( 4,5,6,7, 0,1,2,3 ) )
|
||||
//#define v128_wrev8(v) _mm_shuffle_epi8( v, v128_8( 6,7, 4,5, 2,3, 1,0 ) )
|
||||
|
||||
// reverse bits, can it be done?
|
||||
//#define v128_bitrev8( v ) vrbitq_u8
|
||||
|
||||
/* Not used
|
||||
#if defined(__SSSE3__)
|
||||
@@ -682,7 +776,6 @@ static inline __m128i mm128_shuflr_x8( const __m128i v, const int c )
|
||||
#endif
|
||||
*/
|
||||
|
||||
//
|
||||
// Endian byte swap.
|
||||
|
||||
#if defined(__SSSE3__)
|
||||
@@ -756,6 +849,16 @@ static inline __m128i mm128_shuflr_x8( const __m128i v, const int c )
|
||||
#define mm128_block_bswap32_256 mm128_block_bswap_32
|
||||
#define v128_block_bswap32_256 mm128_block_bswap_32
|
||||
|
||||
|
||||
#define mm128_block_bswap32_128( d, s ) \
|
||||
{ \
|
||||
__m128i ctl = _mm_set_epi64x( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
|
||||
casti_m128i( d,0 ) = _mm_shuffle_epi8( casti_m128i( s,0 ), ctl ); \
|
||||
casti_m128i( d,1 ) = _mm_shuffle_epi8( casti_m128i( s,1 ), ctl ); \
|
||||
casti_m128i( d,2 ) = _mm_shuffle_epi8( casti_m128i( s,2 ), ctl ); \
|
||||
casti_m128i( d,3 ) = _mm_shuffle_epi8( casti_m128i( s,3 ), ctl ); \
|
||||
}
|
||||
|
||||
#define v128_block_bswap32_512( d, s ) \
|
||||
{ \
|
||||
__m128i ctl = _mm_set_epi64x( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
|
||||
@@ -798,8 +901,7 @@ static inline __m128i mm128_bswap_16( __m128i v )
|
||||
return _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
|
||||
}
|
||||
|
||||
#define mm128_bswap_128( v ) \
|
||||
mm128_swap_64( mm128_bswap_64( v ) )
|
||||
#define mm128_bswap_128( v ) v128_qrev32( v128_bswap64( v ) )
|
||||
|
||||
static inline void mm128_block_bswap_64( __m128i *d, const __m128i *s )
|
||||
{
|
||||
@@ -846,7 +948,7 @@ static inline void mm128_block_bswap_32( __m128i *d, const __m128i *s )
|
||||
d[7] = mm128_bswap_32( s[7] );
|
||||
}
|
||||
#define mm128_block_bswap32_256 mm128_block_bswap_32
|
||||
#define v128_block_bswap32_256 mm128_block_bswap_32
|
||||
#define v128_block_bswap32_256 mm128_block_bswap_32
|
||||
|
||||
static inline void mm128_block_bswap32_512( __m128i *d, const __m128i *s )
|
||||
{
|
||||
@@ -907,7 +1009,7 @@ static inline void mm128_block_bswap32_512( __m128i *d, const __m128i *s )
|
||||
#else
|
||||
|
||||
#define v128_blendv( v1, v0, mask ) \
|
||||
v128_or( v128_andnot( mask, v0 ), v128_and( mask, v1 ) )
|
||||
v128_or( v128_andnot( mask, v0 ), v128_and( mask, v1 ) )
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -218,7 +218,29 @@ static inline __m256i mm256_not( const __m256i v )
|
||||
//
|
||||
// Bit rotations.
|
||||
|
||||
// Slow version, used as last resort
|
||||
#define mm256_shuffle16( v, c ) \
|
||||
_mm256_shufflehi_epi16( _mm256_shufflelo_epi16( v, c ), c )
|
||||
|
||||
#define mm256_qrev32(v) _mm256_shuffle_epi32( v, 0xb1 )
|
||||
#define mm256_swap64_32 mm256_qrev32 // grandfathered
|
||||
|
||||
#define mm256_qrev16(v) mm256_shuffle16( v, 0x1b )
|
||||
|
||||
#define mm256_qrev8(v) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
v128_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ) ) )
|
||||
|
||||
#define mm256_lrev16(v) mm256_shuffle16( v, 0xb1 )
|
||||
|
||||
#define mm256_lrev8(v) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
v128_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ) ) )
|
||||
|
||||
#define mm256_wrev8(v) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
v128_64( 0x0e0f0c0d0a0b0809, 0x0607040502030001 ) ) )
|
||||
|
||||
// These should never be called directly by applications.
|
||||
#define mm256_ror_64_avx2( v, c ) \
|
||||
_mm256_or_si256( _mm256_srli_epi64( v, c ), \
|
||||
_mm256_slli_epi64( v, 64-(c) ) )
|
||||
@@ -242,40 +264,76 @@ static inline __m256i mm256_not( const __m256i v )
|
||||
#define mm256_ror_32 _mm256_ror_epi32
|
||||
#define mm256_rol_32 _mm256_rol_epi32
|
||||
|
||||
// Redundant but naming may be a better fit in some applications.
|
||||
#define mm126_shuflr64_8( v) _mm256_ror_epi64( v, 8 )
|
||||
#define mm156_shufll64_8( v) _mm256_rol_epi64( v, 8 )
|
||||
#define mm256_shuflr64_16(v) _mm256_ror_epi64( v, 16 )
|
||||
#define mm256_shufll64_16(v) _mm256_rol_epi64( v, 16 )
|
||||
#define mm256_shuflr64_24(v) _mm256_ror_epi64( v, 24 )
|
||||
#define mm256_shufll64_24(v) _mm256_rol_epi64( v, 24 )
|
||||
#define mm256_shuflr32_8( v) _mm256_ror_epi32( v, 8 )
|
||||
#define mm256_shufll32_8( v) _mm256_rol_epi32( v, 8 )
|
||||
#define mm256_shuflr32_16(v) _mm256_ror_epi32( v, 16 )
|
||||
#define mm256_shufll32_16(v) _mm256_rol_epi32( v, 16 )
|
||||
|
||||
#else
|
||||
|
||||
// ROR & ROL will always find the fastest but these names may be a better fit
|
||||
// in some applications.
|
||||
#define mm256_shuflr64_8( v ) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x080f0e0d0c0b0a09, 0x0007060504030201 ) ) )
|
||||
|
||||
#define mm256_shufll64_8( v ) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0e0d0c0b0a09080f, 0x0605040302010007 ) ) )
|
||||
|
||||
#define mm256_shuflr64_24( v ) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0a09080f0e0d0c0b, 0x0201000706050403 ) ) )
|
||||
|
||||
#define mm256_shufll64_24( v ) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0c0b0a09080f0e0d, 0x0403020100070605 ) ) )
|
||||
|
||||
#define mm256_shuflr32_8( v ) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0c0f0e0d080b0a09, 0x0407060500030201 ) ) )
|
||||
|
||||
#define mm256_shufll32_8( v ) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0e0d0c0f0a09080b, 0x0605040702010003 ) ) )
|
||||
|
||||
#define mm256_ror_64( v, c ) \
|
||||
( (c) == 32 ) ? _mm256_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 24 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0a09080f0e0d0c0b, 0x0201000706050403 ) ) ) \
|
||||
: ( (c) == 16 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x09080f0e0d0c0b0a, 0x0100070605040302 ) ) ) \
|
||||
: ( (c) == 8 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x080f0e0d0c0b0a09, 0x0007060504030201 ) ) ) \
|
||||
( (c) == 8 ) ? mm256_shuflr64_8( v ) \
|
||||
: ( (c) == 16 ) ? mm256_shuffle16( v, 0x39 ) \
|
||||
: ( (c) == 24 ) ? mm256_shuflr64_24( v ) \
|
||||
: ( (c) == 32 ) ? _mm256_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 40 ) ? mm256_shufll64_24( v ) \
|
||||
: ( (c) == 48 ) ? mm256_shuffle16( v, 0x93 ) \
|
||||
: ( (c) == 56 ) ? mm256_shufll64_8( v ) \
|
||||
: mm256_ror_64_avx2( v, c )
|
||||
|
||||
#define mm256_rol_64( v, c ) \
|
||||
( (c) == 32 ) ? _mm256_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 24 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0c0b0a09080f0e0d, 0x0403020100070605 ) ) ) \
|
||||
: ( (c) == 16 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0d0c0b0a09080f0e, 0x0504030201000706 ) ) ) \
|
||||
: ( (c) == 8 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0e0d0c0b0a09080f, 0x0605040302010007 ) ) ) \
|
||||
( (c) == 8 ) ? mm256_shufll64_8( v ) \
|
||||
: ( (c) == 16 ) ? mm256_shuffle16( v, 0x93 ) \
|
||||
: ( (c) == 24 ) ? mm256_shufll64_24( v ) \
|
||||
: ( (c) == 32 ) ? _mm256_shuffle_epi32( v, 0xb1 ) \
|
||||
: ( (c) == 40 ) ? mm256_shuflr64_24( v ) \
|
||||
: ( (c) == 48 ) ? mm256_shuffle16( v, 0x39 ) \
|
||||
: ( (c) == 56 ) ? mm256_shuflr64_8( v ) \
|
||||
: mm256_rol_64_avx2( v, c )
|
||||
|
||||
#define mm256_ror_32( v, c ) \
|
||||
( (c) == 16 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0d0c0f0e09080b0a, 0x0504070601000302 ) ) )\
|
||||
: ( (c) == 8 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0c0f0e0d080b0a09, 0x0407060500030201 ) ) ) \
|
||||
( (c) == 8 ) ? mm256_shuflr32_8( v ) \
|
||||
: ( (c) == 16 ) ? mm256_lrev16( v ) \
|
||||
: ( (c) == 24 ) ? mm256_shufll32_8( v ) \
|
||||
: mm256_ror_32_avx2( v, c )
|
||||
|
||||
#define mm256_rol_32( v, c ) \
|
||||
( (c) == 16 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0d0c0f0e09080b0a, 0x0504070601000302 ) ) ) \
|
||||
: ( (c) == 8 ) ? _mm256_shuffle_epi8( v, mm256_bcast_m128( \
|
||||
_mm_set_epi64x( 0x0e0d0c0f0a09080b, 0x0605040702010003 ) ) ) \
|
||||
( (c) == 8 ) ? mm256_shufll32_8( v ) \
|
||||
: ( (c) == 16 ) ? mm256_lrev16( v ) \
|
||||
: ( (c) == 24 ) ? mm256_shuflr32_8( v ) \
|
||||
: mm256_rol_32_avx2( v, c )
|
||||
|
||||
#endif
|
||||
@@ -375,39 +433,44 @@ static inline __m256i mm256_not( const __m256i v )
|
||||
// Cross lane shuffles
|
||||
//
|
||||
// Rotate elements accross all lanes.
|
||||
#define mm256_shuffle_16( v, c ) \
|
||||
_mm256_or_si256( _mm256_shufflehi_epi16( v, c ), \
|
||||
_mm256_shufflelo_epi16( v, c ) )
|
||||
|
||||
// Swap 128 bit elements in 256 bit vector.
|
||||
#define mm256_swap_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
|
||||
#define mm256_shuflr_128 mm256_swap_128
|
||||
#define mm256_shufll_128 mm256_swap_128
|
||||
#define mm256_rev_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
|
||||
|
||||
// Rotate 256 bit vector by one 64 bit element
|
||||
#define mm256_shuflr_64( v ) _mm256_permute4x64_epi64( v, 0x39 )
|
||||
#define mm256_shufll_64( v ) _mm256_permute4x64_epi64( v, 0x93 )
|
||||
|
||||
// Reverse 64 bit elements
|
||||
#define mm256_rev_64( v ) _mm256_permute4x64_epi64( v, 0x1b )
|
||||
|
||||
#define mm256_rev_32( v ) \
|
||||
_mm256_permute8x32_epi64( v, 0x0000000000000001, 0x0000000200000003, \
|
||||
0x0000000400000005, 0x0000000600000007 )
|
||||
|
||||
#define mm256_rev_16( v ) \
|
||||
_mm256_permute4x64_epi64( mm256_shuffle_16( v, 0x1b ), 0x4e )
|
||||
|
||||
/* Not used
|
||||
// Rotate 256 bit vector by one 32 bit element.
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
static inline __m256i mm256_shuflr_32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 1 ); }
|
||||
|
||||
static inline __m256i mm256_shufll_32( const __m256i v )
|
||||
{ return _mm256_alignr_epi32( v, v, 15 ); }
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_shuflr_32( v ) \
|
||||
_mm256_permutevar8x32_epi32( v, \
|
||||
_mm256_set_spi64x( 0x0000000000000007, 0x0000000600000005, \
|
||||
0x0000000400000003, 0x0000000200000001 ) )
|
||||
|
||||
#define mm256_shufll_32( v ) \
|
||||
_mm256_permutevar8x32_epi32( v, \
|
||||
_mm256_set_epi64x( 0x0000000600000005, 0x0000000400000003, \
|
||||
0x0000000200000001, 0x0000000000000007 ) )
|
||||
|
||||
#endif
|
||||
*/
|
||||
|
||||
@@ -423,21 +486,22 @@ static inline __m256i mm256_shufll_32( const __m256i v )
|
||||
_mm256_castps_si256( _mm256_shuffle_ps( _mm256_castsi256_ps( v1 ), \
|
||||
_mm256_castsi256_ps( v2 ), c ) );
|
||||
|
||||
#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e )
|
||||
#define mm256_shuflr128_64 mm256_swap128_64
|
||||
#define mm256_shufll128_64 mm256_swap128_64
|
||||
#define mm256_swap128_64(v) _mm256_shuffle_epi32( v, 0x4e )
|
||||
#define mm256_rev128_64(v) _mm256_shuffle_epi32( v, 0x4e )
|
||||
#define mm256_rev128_32(v) _mm256_shuffle_epi32( v, 0x1b )
|
||||
#define mm256_rev128_16(v) mm256_shuffle_16( v, 0x1b )
|
||||
|
||||
#define mm256_shuflr128_32( v ) _mm256_shuffle_epi32( v, 0x39 )
|
||||
#define mm256_shufll128_32( v ) _mm256_shuffle_epi32( v, 0x93 )
|
||||
#define mm256_shuflr128_32(v) _mm256_shuffle_epi32( v, 0x39 )
|
||||
#define mm256_shufll128_32(v) _mm256_shuffle_epi32( v, 0x93 )
|
||||
|
||||
#define mm256_shuflr128_16(v) _mm256_shuffle_epi16( v, 0x39 )
|
||||
#define mm256_shufll128_16(v) _mm256_shuffle_epi16( v, 0x93 )
|
||||
|
||||
/* Not used
|
||||
static inline __m256i mm256_shuflr128_x8( const __m256i v, const int c )
|
||||
{ return _mm256_alignr_epi8( v, v, c ); }
|
||||
*/
|
||||
|
||||
// Same as bit rotation but logically used as byte/word rotation.
|
||||
#define mm256_swap64_32( v ) mm256_ror_64( v, 32 )
|
||||
|
||||
// Reverse byte order in elements, endian bswap.
|
||||
#define mm256_bswap_64( v ) \
|
||||
_mm256_shuffle_epi8( v, mm256_bcast_m128( _mm_set_epi64x( \
|
||||
|
@@ -1,6 +1,9 @@
|
||||
#if !defined(SIMD_INT_H__)
|
||||
#define SIMD_INT_H__ 1
|
||||
|
||||
//TODO compile time test for byte order
|
||||
// be64 etc using HW bowap.
|
||||
//
|
||||
// Endian byte swap
|
||||
#if defined(__x86_64__)
|
||||
|
||||
@@ -9,8 +12,6 @@
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
//#pragma message "aarch64 fast bswap"
|
||||
|
||||
static inline uint64_t bswap_64( uint64_t a )
|
||||
{
|
||||
uint64_t b;
|
||||
|
@@ -15,11 +15,11 @@
|
||||
// vxarq_u64( v1, v0, n ) ror( xor( v1, v0 ), n )
|
||||
// vraxlq_u64( v1, v0 ) xor( rol( v1, 1 ), rol( v0, 1 ) )
|
||||
// vbcaxq( v2, v1, v0 ) xor( v2, and( v1, not(v0) ) )
|
||||
// vsraq_n( v1, v0, n ) add( v1, sr( v0, n ) )
|
||||
//
|
||||
// might not work, not tried yet:
|
||||
// Doesn't work on RPi but works on OPi:
|
||||
//
|
||||
// vornq( v1, v0 ) or( v1, not( v0 ) )
|
||||
// vsraq_n( v1, v0, n ) add( v1, sr( v0, n ) )
|
||||
|
||||
#define v128_t uint32x4_t // default,
|
||||
#define v128u64_t uint64x2_t
|
||||
@@ -31,6 +31,15 @@
|
||||
#define v128_load( p ) vld1q_u32( (uint32_t*)(p) )
|
||||
#define v128_store( p, v ) vst1q_u32( (uint32_t*)(p), v )
|
||||
|
||||
#define v128u64_load( p ) vld1q_u64( (uint64_t*)(p) )
|
||||
#define v128u64_store( p, v ) vst1q_u64( (uint64_t*)(p), v )
|
||||
#define v128u32_load( p ) vld1q_u32( (uint32_t*)(p) )
|
||||
#define v128u32_store( p, v ) vst1q_u32( (uint32_t*)(p), v )
|
||||
#define v128u16_load( p ) vld1q_u16( (uint16_t*)(p) )
|
||||
#define v128u16_store( p, v ) vst1q_u16( (uint16_t*)(p), v )
|
||||
#define v128u8_load( p ) vld1q_u16( (uint8_t*)(p) )
|
||||
#define v128u8_store( p, v ) vst1q_u16( (uint8_t*)(p), v )
|
||||
|
||||
// load & set1 combined
|
||||
#define v128_load1_64(p) vld1q_dup_u64( (uint64_t*)(p) )
|
||||
#define v128_load1_32(p) vld1q_dup_u32( (uint32_t*)(p) )
|
||||
@@ -72,7 +81,10 @@ static inline uint64x2_t v128_mulw32( uint32x4_t v1, uint32x4_t v0 )
|
||||
#define v128_cmpeq16 vceqq_u16
|
||||
#define v128_cmpeq8 vceqq_u8
|
||||
|
||||
#define v128_cmpeq0 vceqzq_u64
|
||||
#define v128_iszero vceqzq_u64
|
||||
|
||||
// Not yet needed
|
||||
//#define v128_cmpeq1
|
||||
|
||||
#define v128_cmpgt64 vcgtq_u64
|
||||
#define v128_cmpgt32 vcgtq_u32
|
||||
@@ -95,31 +107,55 @@ static inline uint64x2_t v128_mulw32( uint32x4_t v1, uint32x4_t v0 )
|
||||
#define v128_sr16 vshrq_n_u16
|
||||
#define v128_sr8 vshrq_n_u8
|
||||
|
||||
// Maybe signed shift will work.
|
||||
// Unit tested, working.
|
||||
#define v128_sra64 vshrq_n_s64
|
||||
#define v128_sra32 vshrq_n_s32
|
||||
#define v128_sra16 vshrq_n_s16
|
||||
|
||||
// logic
|
||||
// unary logic
|
||||
#define v128_not vmvnq_u32
|
||||
|
||||
// binary logic
|
||||
#define v128_or vorrq_u32
|
||||
#define v128_and vandq_u32
|
||||
#define v128_not vmvnq_u32
|
||||
#define v128_xor veorq_u32
|
||||
#define v128_andnot( v1, v0 ) vandq_u32( vmvnq_u32(v1), v0 )
|
||||
#define v128_xnor( a, b ) v128_not( v128_xor( a, b ) )
|
||||
#define v128_ornot vornq_u32
|
||||
|
||||
// ternary logic, veorq_u32 not defined
|
||||
// ~v1 & v0
|
||||
#define v128_andnot( v1, v0 ) vandq_u32( vmvnq_u32( v1 ), v0 )
|
||||
|
||||
// ~( a ^ b ), same as (~a) ^ b
|
||||
#define v128_xnor( v1, v0 ) v128_not( v128_xor( v1, v0 ) )
|
||||
|
||||
// ~v1 | v0, x86_64 convention, first arg is not'ed
|
||||
#define v128_ornot( v1, v0 ) vornq_u32( v0, v1 )
|
||||
|
||||
// ternary logic
|
||||
|
||||
// v2 ^ v1 ^ v0
|
||||
// veorq_u32 not defined
|
||||
//#define v128_xor3 veor3q_u32
|
||||
#define v128_xor3( v2, v1, v0 ) veorq_u32( v2, veorq_u32( v1, v0 ) )
|
||||
#define v128_nor vornq_u32
|
||||
|
||||
// v2 & v1 & v0
|
||||
#define v128_and3( v2, v1, v0 ) v128_and( v2, v128_and( v1, v0 ) )
|
||||
|
||||
// v2 | v1 | v0
|
||||
#define v128_or3( v2, v1, v0 ) v128_or( v2, v128_or( v1, v0 ) )
|
||||
|
||||
// a ^ ( ~b & c )
|
||||
#define v128_xorandnot( v2, v1, v0 ) v128_xor( v2, v128_andnot( v1, v0 ) )
|
||||
#define v128_and3( a, b, c ) v128_and( a, v128_and( b, c ) )
|
||||
#define v128_or3( a, b, c ) v128_or( a, v128_or( b, c ) )
|
||||
#define v128_xorand( a, b, c ) v128_xor( a, v128_and( b, c ) )
|
||||
#define v128_andxor( a, b, c ) v128_and( a, v128_xor( b, c ))
|
||||
#define v128_xoror( a, b, c ) v128_xor( a, v128_or( b, c ) )
|
||||
#define v128_orand( a, b, c ) v128_or( a, v128_and( b, c ) )
|
||||
|
||||
// a ^ ( b & c )
|
||||
#define v128_xorand( v2, v1, v0 ) v128_xor( v2, v128_and( v1, v0 ) )
|
||||
|
||||
// a & ( b ^ c )
|
||||
#define v128_andxor( v2, v1, v0 ) v128_and( v2, v128_xor( v1, v0 ) )
|
||||
|
||||
// a ^ ( b | c )
|
||||
#define v128_xoror( v2, v1, v0 ) v128_xor( v2, v128_or( v1, v0 ) )
|
||||
|
||||
// v2 | ( v1 & v0 )
|
||||
#define v128_orand( v2, v1, v0 ) v128_or( v2, v128_and( v1, v0 ) )
|
||||
|
||||
// shift 2 concatenated vectors right.
|
||||
#define v128_alignr64( v1, v0, c ) vextq_u64( v0, v1, c )
|
||||
@@ -127,39 +163,61 @@ static inline uint64x2_t v128_mulw32( uint32x4_t v1, uint32x4_t v0 )
|
||||
#define v128_alignr8( v1, v0, c ) vextq_u8( v0, v1, c )
|
||||
|
||||
// Intetleave high or low half of 2 vectors.
|
||||
#define v128_unpacklo64( v1, v0 ) vzip1q_u64( v0, v1 )
|
||||
#define v128_unpackhi64( v1, v0 ) vzip2q_u64( v0, v1 )
|
||||
#define v128_unpacklo32( v1, v0 ) vzip1q_u32( v0, v1 )
|
||||
#define v128_unpackhi32( v1, v0 ) vzip2q_u32( v0, v1 )
|
||||
#define v128_unpacklo16( v1, v0 ) vzip1q_u16( v0, v1 )
|
||||
#define v128_unpackhi16( v1, v0 ) vzip2q_u16( v0, v1 )
|
||||
#define v128_unpacklo8( v1, v0 ) vzip1q_u8( v0, v1 )
|
||||
#define v128_unpackhi8( v1, v0 ) vzip2q_u8( v0, v1 )
|
||||
#define v128_unpacklo64( v1, v0 ) vzip1q_u64( v1, v0 )
|
||||
#define v128_unpackhi64( v1, v0 ) vzip2q_u64( v1, v0 )
|
||||
#define v128_unpacklo32( v1, v0 ) vzip1q_u32( v1, v0 )
|
||||
#define v128_unpackhi32( v1, v0 ) vzip2q_u32( v1, v0 )
|
||||
#define v128_unpacklo16( v1, v0 ) vzip1q_u16( v1, v0 )
|
||||
#define v128_unpackhi16( v1, v0 ) vzip2q_u16( v1, v0 )
|
||||
#define v128_unpacklo8( v1, v0 ) vzip1q_u8( v1, v0 )
|
||||
#define v128_unpackhi8( v1, v0 ) vzip2q_u8( v1, v0 )
|
||||
|
||||
// Shorter agnostic names for unpack using NEON-like syntax
|
||||
#define v128_ziplo64 vzip1q_u64
|
||||
#define v128_ziphi64 vzip2q_u64
|
||||
#define v128_ziplo32 vzip1q_u32
|
||||
#define v128_ziphi32 vzip2q_u32
|
||||
#define v128_ziplo16 vzip1q_u16
|
||||
#define v128_ziphi16 vzip2q_u16
|
||||
#define v128_ziplo8 vzip1q_u8
|
||||
#define v128_ziphi8 vzip2q_u8
|
||||
|
||||
// AES
|
||||
// consistent with Intel AES, break up for optimizing
|
||||
#define v128_aesenc( v, k ) vaesmcq_u8( vaeseq_u8( v, k ) )
|
||||
#define v128_aesenclast( v, k ) vaeseq_u8( v, k )
|
||||
// consistent with Intel AES intrinsics, break up for optimizing
|
||||
#define v128_aesenc( v, k ) \
|
||||
v128_xor( k, vaesmcq_u8( vaeseq_u8( v, v128_zero ) ) )
|
||||
|
||||
#define v128_aesenc_nokey( v ) \
|
||||
vaesmcq_u8( vaeseq_u8( v, v128_zero ) )
|
||||
|
||||
#define v128_aesenclast( v, k ) \
|
||||
v128_xor( k, vaeseq_u8( v, v128_zero ) )
|
||||
|
||||
#define v128_aesenclast_nokey( v, k ) \
|
||||
vaeseq_u8( v, v128_zero )
|
||||
|
||||
#define v128_aesdec( v, k ) \
|
||||
v128_xor( k, vaesimcq_u8( vaesdq_u8( v, v128_zero ) ) )
|
||||
|
||||
#define v128_aesdec_nokey( v, k ) \
|
||||
vaesimcq_u8( vaesdq_u8( v, v128_zero ) )
|
||||
|
||||
#define v128_aesdeclast( v, k ) \
|
||||
v128_xor( k, vaesdq_u8( v, v128_zero ) )
|
||||
|
||||
#define v128_aesdeclast_nokey( v, k ) \
|
||||
vaesdq_u8( v, v128_zero )
|
||||
|
||||
#define v128_aesdec( v, k ) vaesimcq_u8( vaesdq_u8( v, k ) )
|
||||
#define v128_aesdeclast( v, k ) vaesdq_u8( v, k )
|
||||
|
||||
typedef union
|
||||
{
|
||||
uint32x4_t v128;
|
||||
uint32x4_t m128;
|
||||
uint32_t u32[4];
|
||||
uint32_t u32[4];
|
||||
} __attribute__ ((aligned (16))) v128_ovly;
|
||||
|
||||
|
||||
// Broadcast lane 0 to all lanes, consistent with x86_64 broadcast
|
||||
#define v128_bcast64(v) vdupq_laneq_u64( v, 0 )
|
||||
#define v128_bcast32(v) vdupq_laneq_u32( v, 0 )
|
||||
#define v128_bcast16(v) vdupq_laneq_u16( v, 0 )
|
||||
|
||||
// Replicate (broadcast) lane l to all lanes
|
||||
#define v128_replane64( v, l ) vdupq_laneq_u64( v, l )
|
||||
#define v128_replane32( v, l ) vdupq_laneq_u32( v, l )
|
||||
#define v128_replane16( v, l ) vdupq_laneq_u16( v, l )
|
||||
|
||||
// pointer indexing
|
||||
#define casti_v128( p, i ) (((uint32x4_t*)(p))[i])
|
||||
#define cast_v128( p ) (*((uint32x4_t*)(p)))
|
||||
@@ -253,12 +311,13 @@ typedef union
|
||||
#define v128_negate16 vnegq_s16
|
||||
#define v128_negate8 vnegq_s8
|
||||
|
||||
// Nothing else seems to work
|
||||
static inline void v128_memset_zero( void *dst, const int n )
|
||||
{
|
||||
for( int i = 0; i < n; i++ )
|
||||
((uint32x4_t*)dst)[n] = (uint32x4_t)(uint128_t)0;
|
||||
memset( dst, 0, n*16 );
|
||||
}
|
||||
|
||||
|
||||
static inline void v128_memset( void *dst, const void *src, const int n )
|
||||
{
|
||||
for( int i = 0; i < n; i++ )
|
||||
@@ -271,51 +330,40 @@ static inline void v128_memcpy( void *dst, const void *src, const int n )
|
||||
((uint32x4_t*)dst)[i] = ((const uint32x4_t*)src)[i];
|
||||
}
|
||||
|
||||
// how to build a bitmask from vector elements?
|
||||
// how to build a bitmask from vector elements? Efficiently???
|
||||
#define v128_movmask32
|
||||
#define v128_movmask64
|
||||
|
||||
// Bit rotation
|
||||
//TODO, maybe, Optimize 64 bit rotations
|
||||
// Fall back for odd bit rotations
|
||||
static inline uint64x2_t v128_ror64( uint64x2_t v, int c )
|
||||
{ return vsriq_n_u64( vshlq_n_u64( v, 64-c ), v, c ); }
|
||||
|
||||
static inline uint64x2_t v128_rol64( uint64x2_t v, int c )
|
||||
{ return vsriq_n_u64( vshlq_n_u64( v, c ), v, 64-c ); }
|
||||
|
||||
static inline uint32x4_t v128_ror32( uint32x4_t v, int c )
|
||||
{ return vsriq_n_u32( vshlq_n_u32( v, 32-c ), v, c ); }
|
||||
|
||||
static inline uint32x4_t v128_rol32( uint32x4_t v, int c )
|
||||
{ return vsriq_n_u32( vshlq_n_u32( v, c ), v, 32-c ); }
|
||||
|
||||
static inline uint16x8_t v128_ror16( uint16x8_t v, int c )
|
||||
{ return vsriq_n_u16( vshlq_n_u16( v, 16-c ), v, c ); }
|
||||
|
||||
static inline uint16x8_t v128_rol16( uint16x8_t v, int c )
|
||||
{ return vsriq_n_u16( vshlq_n_u16( v, c ), v, 16-c ); }
|
||||
|
||||
static inline uint8x16_t v128_ror8( uint8x16_t v, int c )
|
||||
{ return vsriq_n_u8( vshlq_n_u8( v, 8-c ), v, c ); }
|
||||
|
||||
static inline uint8x16_t v128_rol8( uint16x8_t v, int c )
|
||||
{ return vsriq_n_u8( vshlq_n_u8( v, c ), v, 8-c ); }
|
||||
|
||||
/*
|
||||
// Optimzed for half element rotations (swap)
|
||||
#define v128_ror64( v, c ) \
|
||||
( (c) == 32 ) ? (uint64x2_t)vrev64q_u32( v ) : v128_ror64_neon( v, c )
|
||||
( (c) == 32 ) ? (uint64x2_t)vrev64q_u32( ((uint64x2_t)v) ) \
|
||||
: vsriq_n_u64( vshlq_n_u64( ((uint64x2_t)v), 64-c ), ((uint64x2_t)v), c )
|
||||
|
||||
#define v128_rol64( v, c ) \
|
||||
( (c) == 32 ) ? (uint64x2_t)vrev64q_u32( v ) : v128_rol64_neon( v, c )
|
||||
|
||||
( (c) == 32 ) ? (uint64x2_t)vrev64q_u32( ((uint64x2_t)v) ) \
|
||||
: vsliq_n_u64( vshrq_n_u64( ((uint64x2_t)v), 64-c ), ((uint64x2_t)v), c )
|
||||
|
||||
#define v128_ror32( v, c ) \
|
||||
( (c) == 16 ) ? (uint32x4_t)vrev32q_u16( v ) : v128_ror32_neon( v, c )
|
||||
( (c) == 16 ) ? (uint32x4_t)vrev32q_u16( ((uint32x4_t)v) ) \
|
||||
: vsriq_n_u32( vshlq_n_u32( ((uint32x4_t)v), 32-c ), ((uint32x4_t)v), c )
|
||||
|
||||
#define v128_rol32( v, c ) \
|
||||
( (c) == 16 ) ? (uint32x4_t)vrev32q_u16( v ) : v128_rol32_neon( v, c )
|
||||
*/
|
||||
( (c) == 16 ) ? (uint32x4_t)vrev32q_u16( ((uint32x4_t)v) ) \
|
||||
: vsliq_n_u32( vshrq_n_u32( ((uint32x4_t)v), 32-c ), ((uint32x4_t)v), c )
|
||||
|
||||
#define v128_ror16( v, c ) \
|
||||
( (c) == 8 ) ? (uint16x8_t)vrev16q_u8( ((uint16x8_t)v) ) \
|
||||
: vsriq_n_u16( vshlq_n_u16( ((uint16x8_t)v), 16-c ), ((uint16x8_t)v), c )
|
||||
|
||||
#define v128_rol16( v, c ) \
|
||||
( (c) == 8 ) ? (uint16x8_t)vrev16q_u8( ((uint16x8_t)v) ) \
|
||||
: vsliq_n_u16( vshrq_n_u16( ((uint16x8_t)v), 16-c ), ((uint16x8_t)v), c )
|
||||
|
||||
#define v128_ror8( v, c ) \
|
||||
vsriq_n_u8( vshlq_n_u8( ((uint8x16_t)v), 8-c ), ((uint8x16_t)v), c )
|
||||
|
||||
#define v128_rol8( v, c ) \
|
||||
vsliq_n_u8( vshrq_n_u8( ((uint8x16_t)v), 8-c ), ((uint8x16_t)v), c )
|
||||
|
||||
#define v128_2ror64( v1, v0, c ) \
|
||||
{ \
|
||||
@@ -343,7 +391,7 @@ static inline uint8x16_t v128_rol8( uint16x8_t v, int c )
|
||||
uint32x4_t t1 = vshrq_n_u32( v1, c ); \
|
||||
v0 = vsliq_n_u32( v0, 32-(c) ); \
|
||||
v1 = vsliq_n_u32( v1, 32-(c) ); \
|
||||
v0 = vorrq_u32( v0, t0 ); \
|
||||
v0 = vorrq_32( v0, t0 ); \
|
||||
v1 = vorrq_u32( v1, t1 ); \
|
||||
}
|
||||
|
||||
@@ -357,16 +405,6 @@ static inline uint8x16_t v128_rol8( uint16x8_t v, int c )
|
||||
v1 = vorrq_u32( v1, t1 ); \
|
||||
}
|
||||
|
||||
// vector rotation , size?
|
||||
static inline uint32x4_t v128_swap64( uint32x4_t v )
|
||||
{ return vextq_u64( v, v, 1 ); }
|
||||
|
||||
static inline uint32x4_t v128_shuflr32( uint32x4_t v )
|
||||
{ return vextq_u32( v, v, 1 ); }
|
||||
|
||||
static inline uint32x4_t v128_shufll32( uint32x4_t v )
|
||||
{ return vextq_u32( v, v, 3 ); }
|
||||
|
||||
// Cross lane shuffles, no programmable shuffle in NEON
|
||||
|
||||
// vector mask, use as last resort. prefer rev, alignr, etc
|
||||
@@ -395,29 +433,54 @@ static inline uint32x4_t v128_shufll32( uint32x4_t v )
|
||||
((uint8_t*)&v)[ ((uint8_t*)(&vmask))[ 1] ], \
|
||||
((uint8_t*)&v)[ ((uint8_t*)(&vmask))[ 0] ] )
|
||||
|
||||
#define v128_swap64_32( v ) vrev64q_u32( v )
|
||||
#define v128_v128_shuflr64_16( v ) v128_ror_64( v, 16 )
|
||||
#define v128_v128_shufll64_16( v ) v128_rol_64( v, 16 )
|
||||
// sub-vector shuffles sometimes mirror bit rotation. Shuffle is faster.
|
||||
// Bit rotation already promotes faster widths. Usage is context sensitive.
|
||||
// preferred.
|
||||
|
||||
// Don't use as an alias for byte sized bit rotation
|
||||
#define v128_swap32_16( v ) vrev64q_u16( v )
|
||||
#define v128_v128_shuflr32_8( v ) v128_ror_32( v, 8 )
|
||||
#define v128_v128_shufll32_8( v ) v128_rol_32( v, 8 )
|
||||
// reverse elements in vector lanes
|
||||
#define v128_qrev32 vrev64q_u32
|
||||
#define v128_swap64_32 vrev64q_u32 // grandfathered
|
||||
|
||||
// reverse elements
|
||||
#define v128_rev32( v ) vrev64q_u32( v )
|
||||
#define v128_rev16( v ) vrev64q_u16( v )
|
||||
#define v128_rev8( v ) vrev64q_u8( v )
|
||||
#define v128_qrev16 vrev64q_u16
|
||||
#define v128_lrev16 vrev32q_u16
|
||||
|
||||
// reverse bits, nothing like it in x86_64
|
||||
#define v128_bitrev8( v ) vrbitq_u8
|
||||
// aka bswap
|
||||
#define v128_qrev8 vrev64q_u8
|
||||
#define v128_lrev8 vrev32q_u8
|
||||
#define v128_wrev8 vrev16q_u8
|
||||
|
||||
// full vector rotation
|
||||
|
||||
// reverse elements in vector
|
||||
static inline uint64x2_t v128_rev64( uint64x2_t v )
|
||||
{ return vextq_u64( v, v, 1 ); }
|
||||
#define v128_swap64 v128_rev64 // grandfathered
|
||||
|
||||
#define v128_rev32(v) v128_rev64( v128_qrev32( v ) )
|
||||
#define v128_rev16(v) v128_rev64( v128_qrev16( v ) )
|
||||
|
||||
// shuffle-rotate vector elements
|
||||
static inline uint32x4_t v128_shuflr32( uint32x4_t v )
|
||||
{ return vextq_u32( v, v, 1 ); }
|
||||
|
||||
static inline uint32x4_t v128_shufll32( uint32x4_t v )
|
||||
{ return vextq_u32( v, v, 3 ); }
|
||||
|
||||
static inline uint16x8_t v128_shuflr16( uint16x8_t v )
|
||||
{ return vextq_u16( v, v, 1 ); }
|
||||
|
||||
static inline uint16x8_t v128_shufll16( uint16x8_t v )
|
||||
{ return vextq_u16( v, v, 7 ); }
|
||||
|
||||
// reverse bits in bytes, nothing like it in x86_64
|
||||
#define v128_bitrev8 vrbitq_u8
|
||||
|
||||
// reverse byte order
|
||||
#define v128_bswap16 vrev16q_u8
|
||||
#define v128_bswap32 vrev32q_u8
|
||||
#define v128_bswap64 vrev64q_u8
|
||||
#define v128_bswap128(v) v128_swap64( v128_bswap64(v) )
|
||||
#define v128_bswap256(p) v128_bswap128( (p)[0], (p)[1] )
|
||||
#define v128_bswap16(v) (uint16x8_t)vrev16q_u8( (uint8x16_t)(v) )
|
||||
#define v128_bswap32(v) (uint32x4_t)vrev32q_u8( (uint8x16_t)(v) )
|
||||
#define v128_bswap64(v) (uint64x2_t)vrev64q_u8( (uint8x16_t)(v) )
|
||||
#define v128_bswap128(v) (uint32x4_t)v128_swap64( v128_bswap64(v) )
|
||||
#define v128_bswap256(p) v128_bswap128( (p)[0], (p)[1] )
|
||||
|
||||
// Usefull for x86_64 but does nothing for ARM
|
||||
#define v128_block_bswap32( dst, src ) \
|
||||
|
87
sysinfos.c
87
sysinfos.c
@@ -15,7 +15,7 @@
|
||||
#include <string.h>
|
||||
#include "miner.h"
|
||||
|
||||
#if defined(__aarch64__)
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
// for arm's "cpuid"
|
||||
#include <sys/auxv.h>
|
||||
#include <asm/hwcap.h>
|
||||
@@ -141,26 +141,13 @@ static inline void linux_cpu_hilo_freq( float *lo, float *hi )
|
||||
*lo = (float)lo_freq;
|
||||
}
|
||||
|
||||
|
||||
#else /* WIN32 */
|
||||
|
||||
static inline float win32_cputemp( int core )
|
||||
{
|
||||
// todo
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
|
||||
#endif /* !WIN32 */
|
||||
|
||||
|
||||
/* exports */
|
||||
|
||||
|
||||
static inline float cpu_temp( int core )
|
||||
{
|
||||
#ifdef WIN32
|
||||
return win32_cputemp( core );
|
||||
return 0.;
|
||||
#else
|
||||
return linux_cputemp( core );
|
||||
#endif
|
||||
@@ -321,7 +308,7 @@ static inline void cpuid( unsigned int leaf, unsigned int subleaf,
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
#elif defined(__aarch64__) && !defined(__APPLE__)
|
||||
|
||||
static inline void cpuid( unsigned int leaf, unsigned int subleaf,
|
||||
unsigned int output[4] )
|
||||
@@ -495,11 +482,9 @@ static inline bool cpu_arch_aarch64()
|
||||
static inline bool has_sse()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( CPU_INFO, 0, cpu_info );
|
||||
return cpu_info[ EDX_Reg ] & SSE_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -508,11 +493,9 @@ static inline bool has_sse()
|
||||
static inline bool has_sse2()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( CPU_INFO, 0, cpu_info );
|
||||
return cpu_info[ EDX_Reg ] & SSE2_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -521,11 +504,9 @@ static inline bool has_sse2()
|
||||
static inline bool has_ssse3()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( CPU_INFO, 0, cpu_info );
|
||||
return cpu_info[ ECX_Reg ] & SSSE3_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -534,11 +515,9 @@ static inline bool has_ssse3()
|
||||
static inline bool has_sse41()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( CPU_INFO, 0, cpu_info );
|
||||
return cpu_info[ ECX_Reg ] & SSE41_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -547,11 +526,9 @@ static inline bool has_sse41()
|
||||
static inline bool has_sse42()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( CPU_INFO, 0, cpu_info );
|
||||
return cpu_info[ ECX_Reg ] & SSE42_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -559,7 +536,7 @@ static inline bool has_sse42()
|
||||
|
||||
static inline bool has_neon()
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
return cpu_info[0];
|
||||
#else
|
||||
@@ -570,7 +547,6 @@ static inline bool has_neon()
|
||||
static inline bool has_aes_ni()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_sse2() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -578,9 +554,7 @@ static inline bool has_aes_ni()
|
||||
return cpu_info[ ECX_Reg ] & AES_NI_Flag;
|
||||
}
|
||||
return false;
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
#elif defined(__aarch64__) && !defined(__APPLE__)
|
||||
if ( has_neon() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -588,7 +562,6 @@ static inline bool has_aes_ni()
|
||||
return cpu_info[0] & HWCAP_AES;
|
||||
}
|
||||
return false;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -597,11 +570,9 @@ static inline bool has_aes_ni()
|
||||
static inline bool has_avx()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( CPU_INFO, 0, cpu_info );
|
||||
return ( ( cpu_info[ ECX_Reg ] & AVX_mask ) == AVX_mask );
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -610,11 +581,9 @@ static inline bool has_avx()
|
||||
static inline bool has_avx2()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return cpu_info[ EBX_Reg ] & AVX2_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -623,7 +592,6 @@ static inline bool has_avx2()
|
||||
static inline bool has_sha()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_avx() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -631,9 +599,7 @@ static inline bool has_sha()
|
||||
return cpu_info[ EBX_Reg ] & SHA_Flag;
|
||||
}
|
||||
return false;
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
#elif defined(__aarch64__) && !defined(__APPLE__)
|
||||
if ( has_neon() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -641,7 +607,6 @@ static inline bool has_sha()
|
||||
return cpu_info[0] & HWCAP_SHA2;
|
||||
}
|
||||
return false;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -650,7 +615,6 @@ static inline bool has_sha()
|
||||
static inline bool has_sha512()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_avx2() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -658,9 +622,7 @@ static inline bool has_sha512()
|
||||
return cpu_info[ EAX_Reg ] & SHA512_Flag;
|
||||
}
|
||||
return false;
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
#elif defined(__aarch64__) && !defined(__APPLE__)
|
||||
if ( has_neon() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -668,7 +630,6 @@ static inline bool has_sha512()
|
||||
return cpu_info[0] & HWCAP_SHA3;
|
||||
}
|
||||
return false;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -677,7 +638,6 @@ static inline bool has_sha512()
|
||||
static inline bool has_avx512f()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return cpu_info[ EBX_Reg ] & AVX512_F_Flag;
|
||||
@@ -689,7 +649,6 @@ static inline bool has_avx512f()
|
||||
static inline bool has_avx512dq()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return cpu_info[ EBX_Reg ] & AVX512_DQ_Flag;
|
||||
@@ -701,7 +660,6 @@ static inline bool has_avx512dq()
|
||||
static inline bool has_avx512bw()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return cpu_info[ EBX_Reg ] & AVX512_BW_Flag;
|
||||
@@ -713,7 +671,6 @@ static inline bool has_avx512bw()
|
||||
static inline bool has_avx512vl()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return cpu_info[ EBX_Reg ] & AVX512_VL_Flag;
|
||||
@@ -722,14 +679,13 @@ static inline bool has_avx512vl()
|
||||
#endif
|
||||
}
|
||||
|
||||
// baseline for useability
|
||||
static inline bool has_avx512()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return ( ( cpu_info[ EBX_Reg ] & AVX512_mask ) == AVX512_mask );
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -738,7 +694,6 @@ static inline bool has_avx512()
|
||||
static inline bool has_vaes()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_avx2() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -754,11 +709,9 @@ static inline bool has_vaes()
|
||||
static inline bool has_vbmi()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return cpu_info[ ECX_Reg ] & AVX512_VBMI_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -767,7 +720,6 @@ static inline bool has_vbmi()
|
||||
static inline bool has_vbmi2()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 0, cpu_info );
|
||||
return cpu_info[ ECX_Reg ] & AVX512_VBMI2_Flag;
|
||||
@@ -780,7 +732,6 @@ static inline bool has_vbmi2()
|
||||
static inline bool has_xop()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_CPU_INFO, 0, cpu_info );
|
||||
return cpu_info[ ECX_Reg ] & XOP_Flag;
|
||||
@@ -792,11 +743,9 @@ static inline bool has_xop()
|
||||
static inline bool has_fma3()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( CPU_INFO, 0, cpu_info );
|
||||
return ( ( cpu_info[ ECX_Reg ] & FMA3_mask ) == FMA3_mask );
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -805,24 +754,21 @@ static inline bool has_fma3()
|
||||
static inline bool has_apx_f()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 1, cpu_info );
|
||||
return cpu_info[ EDX_Reg ] & APX_F_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Not much use on it's own
|
||||
static inline bool has_avx10()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
cpuid( EXTENDED_FEATURES, 1, cpu_info );
|
||||
return cpu_info[ EDX_Reg ] & AVX10_Flag;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -831,7 +777,6 @@ static inline bool has_avx10()
|
||||
static inline unsigned int avx10_version()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_avx10() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -839,7 +784,6 @@ static inline unsigned int avx10_version()
|
||||
return cpu_info[ EBX_Reg ] & AVX10_VERSION_mask;
|
||||
}
|
||||
return 0;
|
||||
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
@@ -849,7 +793,6 @@ static inline unsigned int avx10_version()
|
||||
static inline bool has_avx10_512()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_avx10() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -857,17 +800,15 @@ static inline bool has_avx10_512()
|
||||
return cpu_info[ EBX_Reg ] & AVX10_512_Flag;
|
||||
}
|
||||
return false;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// may not include 512
|
||||
// Includes 128 but may not include 512
|
||||
static inline bool has_avx10_256()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_avx10() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -875,7 +816,6 @@ static inline bool has_avx10_256()
|
||||
return cpu_info[ EBX_Reg ] & AVX10_256_Flag;
|
||||
}
|
||||
return false;
|
||||
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@@ -885,7 +825,6 @@ static inline bool has_avx10_256()
|
||||
static inline unsigned int avx10_vector_length()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
if ( has_avx10() )
|
||||
{
|
||||
unsigned int cpu_info[4] = { 0 };
|
||||
@@ -894,16 +833,12 @@ static inline unsigned int avx10_vector_length()
|
||||
: ( cpu_info[ EBX_Reg ] & AVX10_256_Flag ? 256 : 0 );
|
||||
}
|
||||
return 0;
|
||||
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
static inline uint32_t cpuid_get_highest_function_number()
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
@@ -922,7 +857,7 @@ static inline void cpuid_get_highest_function( char* s )
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
|
||||
uint32_t fn = cpuid_get_highest_function_number();
|
||||
uint32_t fn = cpuid_get_highest_function_number();
|
||||
switch (fn)
|
||||
{
|
||||
case 0x16:
|
||||
|
@@ -10,12 +10,14 @@
|
||||
# define some local variables
|
||||
|
||||
export LOCAL_LIB="$HOME/usr/lib"
|
||||
export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
|
||||
export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --host=x86_64-w64-mingw32"
|
||||
#export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
|
||||
export MINGW_LIB="/usr/x86_64-w64-mingw32/lib"
|
||||
# set correct gcc version
|
||||
export GCC_MINGW_LIB="/usr/lib/gcc/x86_64-w64-mingw32/9.3-win32"
|
||||
# used by GCC
|
||||
export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
|
||||
export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs"
|
||||
#export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
|
||||
# Support for Windows 7 CPU groups, AES sometimes not included in -march
|
||||
# CPU groups disabled due to incompatibilities between Intel and AMD CPUs.
|
||||
#export DEFAULT_CFLAGS="-maes -O3 -Wall -D_WIN32_WINNT=0x0601"
|
||||
@@ -38,7 +40,7 @@ cp $MINGW_LIB/zlib1.dll release/
|
||||
cp $MINGW_LIB/libwinpthread-1.dll release/
|
||||
cp $GCC_MINGW_LIB/libstdc++-6.dll release/
|
||||
cp $GCC_MINGW_LIB/libgcc_s_seh-1.dll release/
|
||||
cp ./../libcrypto-1_1-x64.dll release/
|
||||
#cp ./../libcrypto-1_1-x64.dll release/
|
||||
cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
|
||||
|
||||
# Start building...
|
||||
|
Reference in New Issue
Block a user