This commit is contained in:
Jay D Dee
2018-02-23 12:39:15 -05:00
parent 502ed0b1fe
commit 3c02653dbe
70 changed files with 3871 additions and 1848 deletions

View File

@@ -1,7 +1,4 @@
#include "lbry-gate.h"
#if defined(LBRY_4WAY)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
@@ -9,6 +6,140 @@
#include "algo/sha/sha2-hash-4way.h"
#include "ripemd-hash-4way.h"
#define LBRY_INPUT_SIZE 112
#define LBRY_MIDSTATE 64
#define LBRY_TAIL (LBRY_INPUT_SIZE) - (LBRY_MIDSTATE)
#if defined(LBRY_8WAY)
static __thread sha256_8way_context sha256_8w_mid;
void lbry_8way_hash( void* output, const void* input )
{
uint32_t _ALIGN(64) vhashA[16<<3];
uint32_t _ALIGN(64) vhashB[16<<3];
uint32_t _ALIGN(64) vhashC[16<<3];
uint32_t _ALIGN(32) h0[32];
uint32_t _ALIGN(32) h1[32];
uint32_t _ALIGN(32) h2[32];
uint32_t _ALIGN(32) h3[32];
uint32_t _ALIGN(32) h4[32];
uint32_t _ALIGN(32) h5[32];
uint32_t _ALIGN(32) h6[32];
uint32_t _ALIGN(32) h7[32];
sha256_8way_context ctx_sha256 __attribute__ ((aligned (64)));
sha512_4way_context ctx_sha512;
ripemd160_8way_context ctx_ripemd;
memcpy( &ctx_sha256, &sha256_8w_mid, sizeof(ctx_sha256) );
sha256_8way( &ctx_sha256, input + (LBRY_MIDSTATE<<3), LBRY_TAIL );
sha256_8way_close( &ctx_sha256, vhashA );
sha256_8way_init( &ctx_sha256 );
sha256_8way( &ctx_sha256, vhashA, 32 );
sha256_8way_close( &ctx_sha256, vhashA );
// reinterleave to do sha512 4-way 64 bit twice.
mm256_deinterleave_8x32( h0, h1, h2, h3, h4, h5, h6, h7, vhashA, 256 );
mm256_interleave_4x64( vhashA, h0, h1, h2, h3, 256 );
mm256_interleave_4x64( vhashB, h4, h5, h6, h7, 256 );
sha512_4way_init( &ctx_sha512 );
sha512_4way( &ctx_sha512, vhashA, 32 );
sha512_4way_close( &ctx_sha512, vhashA );
sha512_4way_init( &ctx_sha512 );
sha512_4way( &ctx_sha512, vhashB, 32 );
sha512_4way_close( &ctx_sha512, vhashB );
// back to 8-way 32 bit
mm256_deinterleave_4x64( h0, h1, h2, h3, vhashA, 512 );
mm256_deinterleave_4x64( h4, h5, h6, h7, vhashB, 512 );
mm256_interleave_8x32( vhashA, h0, h1, h2, h3, h4, h5, h6, h7, 512 );
ripemd160_8way_init( &ctx_ripemd );
ripemd160_8way( &ctx_ripemd, vhashA, 32 );
ripemd160_8way_close( &ctx_ripemd, vhashB );
ripemd160_8way_init( &ctx_ripemd );
ripemd160_8way( &ctx_ripemd, vhashA+(8<<3), 32 );
ripemd160_8way_close( &ctx_ripemd, vhashC );
sha256_8way_init( &ctx_sha256 );
sha256_8way( &ctx_sha256, vhashB, 20 );
sha256_8way( &ctx_sha256, vhashC, 20 );
sha256_8way_close( &ctx_sha256, vhashA );
sha256_8way_init( &ctx_sha256 );
sha256_8way( &ctx_sha256, vhashA, 32 );
sha256_8way_close( &ctx_sha256, vhashA );
mm256_deinterleave_8x32( output, output+ 32, output+ 64, output+ 96,
output+128, output+160, output+192, output+224,
vhashA, 256 );
}
int scanhash_lbry_8way( int thr_id, struct work *work, uint32_t max_nonce,
uint64_t *hashes_done)
{
uint32_t hash[8*8] __attribute__ ((aligned (64)));
uint32_t vdata[32*8] __attribute__ ((aligned (64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[27];
const uint32_t first_nonce = pdata[27];
const uint32_t Htarg = ptarget[7];
uint32_t edata[32] __attribute__ ((aligned (64)));
uint32_t *nonces = work->nonces;
int num_found = 0;
uint32_t *noncep = vdata + 216; // 27*8
uint64_t htmax[] = { 0, 0xF, 0xFF,
0xFFF, 0xFFFF, 0x10000000 };
uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00,
0xFFFFF000, 0xFFFF0000, 0 };
// we need bigendian data...
swab32_array( edata, pdata, 32 );
mm256_interleave_8x32( vdata, edata, edata, edata, edata,
edata, edata, edata, edata, 1024 );
sha256_8way_init( &sha256_8w_mid );
sha256_8way( &sha256_8w_mid, vdata, LBRY_MIDSTATE );
for ( int m = 0; m < sizeof(masks); m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];
do
{
be32enc( noncep, n );
be32enc( noncep+1, n+1 );
be32enc( noncep+2, n+2 );
be32enc( noncep+3, n+3 );
be32enc( noncep+4, n+4 );
be32enc( noncep+5, n+5 );
be32enc( noncep+6, n+6 );
be32enc( noncep+7, n+7 );
lbry_8way_hash( hash, vdata );
for ( int i = 0; i < 8; i++ )
if ( !( (hash+(i<<3))[7] & mask ) && fulltest( hash+(i<<3), ptarget ) )
{
nonces[ num_found++ ] = n+i;
work_set_target_ratio( work, hash+(i<<3) );
}
n+=8;
} while ( ( num_found == 0 ) && ( n < max_nonce )
&& !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce;
return num_found;
}
#elif defined(LBRY_4WAY)
static __thread sha256_4way_context sha256_mid;
void lbry_4way_hash( void* output, const void* input )
@@ -21,7 +152,7 @@ void lbry_4way_hash( void* output, const void* input )
uint32_t _ALIGN(64) vhashC[16<<2];
memcpy( &ctx_sha256, &sha256_mid, sizeof(ctx_sha256) );
sha256_4way( &ctx_sha256, input+(64<<2), 48 );
sha256_4way( &ctx_sha256, input + (LBRY_MIDSTATE<<2), LBRY_TAIL );
sha256_4way_close( &ctx_sha256, vhashA );
sha256_4way_init( &ctx_sha256 );
@@ -67,12 +198,8 @@ int scanhash_lbry_4way( int thr_id, struct work *work, uint32_t max_nonce,
const uint32_t Htarg = ptarget[7];
uint32_t edata[32] __attribute__ ((aligned (64)));
uint32_t *nonces = work->nonces;
bool *found = work->nfound;
int num_found = 0;
uint32_t *noncep0 = vdata + 108; // 27*4
uint32_t *noncep1 = vdata + 109;
uint32_t *noncep2 = vdata + 110;
uint32_t *noncep3 = vdata + 111;
uint32_t *noncep = vdata + 108; // 27*4
uint64_t htmax[] = { 0, 0xF, 0xFF,
0xFFF, 0xFFFF, 0x10000000 };
@@ -83,47 +210,25 @@ int scanhash_lbry_4way( int thr_id, struct work *work, uint32_t max_nonce,
swab32_array( edata, pdata, 32 );
mm_interleave_4x32( vdata, edata, edata, edata, edata, 1024 );
sha256_4way_init( &sha256_mid );
sha256_4way( &sha256_mid, vdata, 64 );
sha256_4way( &sha256_mid, vdata, LBRY_MIDSTATE );
for ( int m = 0; m < sizeof(masks); m++ ) if ( Htarg <= htmax[m] )
{
uint32_t mask = masks[m];
do
{
found[0] = found[1] = found[2] = found[3] = false;
be32enc( noncep0, n );
be32enc( noncep1, n+1 );
be32enc( noncep2, n+2 );
be32enc( noncep3, n+3 );
be32enc( noncep, n );
be32enc( noncep+1, n+1 );
be32enc( noncep+2, n+2 );
be32enc( noncep+3, n+3 );
lbry_4way_hash( hash, vdata );
if ( !( hash[7] & mask ) && fulltest( hash, ptarget ) )
for ( int i = 0; i < 4; i++ )
if ( !( (hash+(i<<3))[7] & mask ) && fulltest( hash+(i<<3), ptarget ) )
{
found[0] = true;
num_found++;
nonces[0] = pdata[27] = n;
work_set_target_ratio( work, hash );
}
if ( !( (hash+8)[7] & mask ) && fulltest( hash+8, ptarget ) )
{
found[1] = true;
num_found++;
nonces[1] = n+1;
work_set_target_ratio( work, hash+8 );
}
if ( !( (hash+16)[7] & mask ) && fulltest( hash+16, ptarget ) )
{
found[2] = true;
num_found++;
nonces[2] = n+2;
work_set_target_ratio( work, hash+16 );
}
if ( !( (hash+24)[7] & mask ) && fulltest( hash+24, ptarget ) )
{
found[3] = true;
num_found++;
nonces[3] = n+3;
work_set_target_ratio( work, hash+24 );
nonces[ num_found++ ] = n+i;
work_set_target_ratio( work, hash+(i<<3) );
}
n+=4;
} while ( ( num_found == 0 ) && ( n < max_nonce )

View File

@@ -73,7 +73,10 @@ int64_t lbry_get_max64() { return 0x1ffffLL; }
bool register_lbry_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AVX_OPT | AVX2_OPT | SHA_OPT;
#if defined (LBRY_4WAY)
#if defined (LBRY_8WAY)
gate->scanhash = (void*)&scanhash_lbry_8way;
gate->hash = (void*)&lbry_8way_hash;
#elif defined (LBRY_4WAY)
gate->scanhash = (void*)&scanhash_lbry_4way;
gate->hash = (void*)&lbry_4way_hash;
#else

View File

@@ -4,8 +4,9 @@
#include "algo-gate-api.h"
#include <stdint.h>
// need sha512 2 way AVX x2 or 1 way scalar x4 to support 4way AVX.
#if defined(__AVX2__)
#define LBRY_4WAY
#define LBRY_8WAY
#endif
#define LBRY_NTIME_INDEX 25
@@ -16,15 +17,21 @@
bool register_lbry_algo( algo_gate_t* gate );
#if defined(LBRY_4WAY)
#if defined(LBRY_8WAY)
void lbry_8way_hash( void *state, const void *input );
int scanhash_lbry_8way( int thr_id, struct work *work, uint32_t max_nonce,
uint64_t *hashes_done );
#elif defined(LBRY_4WAY)
void lbry_4way_hash( void *state, const void *input );
int scanhash_lbry_4way( int thr_id, struct work *work, uint32_t max_nonce,
uint64_t *hashes_done );
#endif
#else
void lbry_hash( void *state, const void *input );
int scanhash_lbry( int thr_id, struct work *work, uint32_t max_nonce,
uint64_t *hashes_done );
#endif
#endif

View File

@@ -5,25 +5,6 @@
#include <stddef.h>
#include <string.h>
/*
* Round functions for RIPEMD-128 and RIPEMD-160.
*/
#define F1(x, y, z) \
_mm_xor_si128( _mm_xor_si128( x, y ), z )
#define F2(x, y, z) \
_mm_xor_si128( _mm_and_si128( _mm_xor_si128( y, z ), x ), z )
#define F3(x, y, z) \
_mm_xor_si128( _mm_or_si128( x, mm_not( y ) ), z )
#define F4(x, y, z) \
_mm_xor_si128( _mm_and_si128( _mm_xor_si128( x, y ), z ), y )
#define F5(x, y, z) \
_mm_xor_si128( x, _mm_or_si128( y, mm_not( z ) ) )
static const uint32_t IV[5] =
{ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 };
@@ -42,6 +23,23 @@ static const uint32_t IV[5] =
#define K24 0x7A6D76E9
#define K25 0x00000000
// RIPEMD-160 4 way
#define F1(x, y, z) \
_mm_xor_si128( _mm_xor_si128( x, y ), z )
#define F2(x, y, z) \
_mm_xor_si128( _mm_and_si128( _mm_xor_si128( y, z ), x ), z )
#define F3(x, y, z) \
_mm_xor_si128( _mm_or_si128( x, mm_not( y ) ), z )
#define F4(x, y, z) \
_mm_xor_si128( _mm_and_si128( _mm_xor_si128( x, y ), z ), y )
#define F5(x, y, z) \
_mm_xor_si128( x, _mm_or_si128( y, mm_not( z ) ) )
#define RR(a, b, c, d, e, f, s, r, k) \
do{ \
a = _mm_add_epi32( mm_rotl_32( _mm_add_epi32( _mm_add_epi32( \
@@ -321,3 +319,304 @@ void ripemd160_4way_close( ripemd160_4way_context *sc, void *dst )
}
#endif
#if defined(__AVX2__)
// Ripemd-160 8 way
#define F8W_1(x, y, z) \
_mm256_xor_si256( _mm256_xor_si256( x, y ), z )
#define F8W_2(x, y, z) \
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( y, z ), x ), z )
#define F8W_3(x, y, z) \
_mm256_xor_si256( _mm256_or_si256( x, mm256_not( y ) ), z )
#define F8W_4(x, y, z) \
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( x, y ), z ), y )
#define F8W_5(x, y, z) \
_mm256_xor_si256( x, _mm256_or_si256( y, mm256_not( z ) ) )
#define RR_8W(a, b, c, d, e, f, s, r, k) \
do{ \
a = _mm256_add_epi32( mm256_rotl_32( _mm256_add_epi32( _mm256_add_epi32( \
_mm256_add_epi32( a, f( b ,c, d ) ), r ), \
_mm256_set1_epi32( k ) ), s ), e ); \
c = mm256_rotl_32( c, 10 );\
} while (0)
#define ROUND1_8W(a, b, c, d, e, f, s, r, k) \
RR_8W(a ## 1, b ## 1, c ## 1, d ## 1, e ## 1, f, s, r, K1 ## k)
#define ROUND2_8W(a, b, c, d, e, f, s, r, k) \
RR_8W(a ## 2, b ## 2, c ## 2, d ## 2, e ## 2, f, s, r, K2 ## k)
static void ripemd160_8way_round( ripemd160_8way_context *sc )
{
const __m256i *in = (__m256i*)sc->buf;
__m256i *h = (__m256i*)sc->val;
register __m256i A1, B1, C1, D1, E1;
register __m256i A2, B2, C2, D2, E2;
__m256i tmp;
A1 = A2 = h[0];
B1 = B2 = h[1];
C1 = C2 = h[2];
D1 = D2 = h[3];
E1 = E2 = h[4];
ROUND1_8W( A, B, C, D, E, F8W_1, 11, in[ 0], 1 );
ROUND1_8W( E, A, B, C, D, F8W_1, 14, in[ 1], 1 );
ROUND1_8W( D, E, A, B, C, F8W_1, 15, in[ 2], 1 );
ROUND1_8W( C, D, E, A, B, F8W_1, 12, in[ 3], 1 );
ROUND1_8W( B, C, D, E, A, F8W_1, 5, in[ 4], 1 );
ROUND1_8W( A, B, C, D, E, F8W_1, 8, in[ 5], 1 );
ROUND1_8W( E, A, B, C, D, F8W_1, 7, in[ 6], 1 );
ROUND1_8W( D, E, A, B, C, F8W_1, 9, in[ 7], 1 );
ROUND1_8W( C, D, E, A, B, F8W_1, 11, in[ 8], 1 );
ROUND1_8W( B, C, D, E, A, F8W_1, 13, in[ 9], 1 );
ROUND1_8W( A, B, C, D, E, F8W_1, 14, in[10], 1 );
ROUND1_8W( E, A, B, C, D, F8W_1, 15, in[11], 1 );
ROUND1_8W( D, E, A, B, C, F8W_1, 6, in[12], 1 );
ROUND1_8W( C, D, E, A, B, F8W_1, 7, in[13], 1 );
ROUND1_8W( B, C, D, E, A, F8W_1, 9, in[14], 1 );
ROUND1_8W( A, B, C, D, E, F8W_1, 8, in[15], 1 );
ROUND1_8W( E, A, B, C, D, F8W_2, 7, in[ 7], 2 );
ROUND1_8W( D, E, A, B, C, F8W_2, 6, in[ 4], 2 );
ROUND1_8W( C, D, E, A, B, F8W_2, 8, in[13], 2 );
ROUND1_8W( B, C, D, E, A, F8W_2, 13, in[ 1], 2 );
ROUND1_8W( A, B, C, D, E, F8W_2, 11, in[10], 2 );
ROUND1_8W( E, A, B, C, D, F8W_2, 9, in[ 6], 2 );
ROUND1_8W( D, E, A, B, C, F8W_2, 7, in[15], 2 );
ROUND1_8W( C, D, E, A, B, F8W_2, 15, in[ 3], 2 );
ROUND1_8W( B, C, D, E, A, F8W_2, 7, in[12], 2 );
ROUND1_8W( A, B, C, D, E, F8W_2, 12, in[ 0], 2 );
ROUND1_8W( E, A, B, C, D, F8W_2, 15, in[ 9], 2 );
ROUND1_8W( D, E, A, B, C, F8W_2, 9, in[ 5], 2 );
ROUND1_8W( C, D, E, A, B, F8W_2, 11, in[ 2], 2 );
ROUND1_8W( B, C, D, E, A, F8W_2, 7, in[14], 2 );
ROUND1_8W( A, B, C, D, E, F8W_2, 13, in[11], 2 );
ROUND1_8W( E, A, B, C, D, F8W_2, 12, in[ 8], 2 );
ROUND1_8W( D, E, A, B, C, F8W_3, 11, in[ 3], 3 );
ROUND1_8W( C, D, E, A, B, F8W_3, 13, in[10], 3 );
ROUND1_8W( B, C, D, E, A, F8W_3, 6, in[14], 3 );
ROUND1_8W( A, B, C, D, E, F8W_3, 7, in[ 4], 3 );
ROUND1_8W( E, A, B, C, D, F8W_3, 14, in[ 9], 3 );
ROUND1_8W( D, E, A, B, C, F8W_3, 9, in[15], 3 );
ROUND1_8W( C, D, E, A, B, F8W_3, 13, in[ 8], 3 );
ROUND1_8W( B, C, D, E, A, F8W_3, 15, in[ 1], 3 );
ROUND1_8W( A, B, C, D, E, F8W_3, 14, in[ 2], 3 );
ROUND1_8W( E, A, B, C, D, F8W_3, 8, in[ 7], 3 );
ROUND1_8W( D, E, A, B, C, F8W_3, 13, in[ 0], 3 );
ROUND1_8W( C, D, E, A, B, F8W_3, 6, in[ 6], 3 );
ROUND1_8W( B, C, D, E, A, F8W_3, 5, in[13], 3 );
ROUND1_8W( A, B, C, D, E, F8W_3, 12, in[11], 3 );
ROUND1_8W( E, A, B, C, D, F8W_3, 7, in[ 5], 3 );
ROUND1_8W( D, E, A, B, C, F8W_3, 5, in[12], 3 );
ROUND1_8W( C, D, E, A, B, F8W_4, 11, in[ 1], 4 );
ROUND1_8W( B, C, D, E, A, F8W_4, 12, in[ 9], 4 );
ROUND1_8W( A, B, C, D, E, F8W_4, 14, in[11], 4 );
ROUND1_8W( E, A, B, C, D, F8W_4, 15, in[10], 4 );
ROUND1_8W( D, E, A, B, C, F8W_4, 14, in[ 0], 4 );
ROUND1_8W( C, D, E, A, B, F8W_4, 15, in[ 8], 4 );
ROUND1_8W( B, C, D, E, A, F8W_4, 9, in[12], 4 );
ROUND1_8W( A, B, C, D, E, F8W_4, 8, in[ 4], 4 );
ROUND1_8W( E, A, B, C, D, F8W_4, 9, in[13], 4 );
ROUND1_8W( D, E, A, B, C, F8W_4, 14, in[ 3], 4 );
ROUND1_8W( C, D, E, A, B, F8W_4, 5, in[ 7], 4 );
ROUND1_8W( B, C, D, E, A, F8W_4, 6, in[15], 4 );
ROUND1_8W( A, B, C, D, E, F8W_4, 8, in[14], 4 );
ROUND1_8W( E, A, B, C, D, F8W_4, 6, in[ 5], 4 );
ROUND1_8W( D, E, A, B, C, F8W_4, 5, in[ 6], 4 );
ROUND1_8W( C, D, E, A, B, F8W_4, 12, in[ 2], 4 );
ROUND1_8W( B, C, D, E, A, F8W_5, 9, in[ 4], 5 );
ROUND1_8W( A, B, C, D, E, F8W_5, 15, in[ 0], 5 );
ROUND1_8W( E, A, B, C, D, F8W_5, 5, in[ 5], 5 );
ROUND1_8W( D, E, A, B, C, F8W_5, 11, in[ 9], 5 );
ROUND1_8W( C, D, E, A, B, F8W_5, 6, in[ 7], 5 );
ROUND1_8W( B, C, D, E, A, F8W_5, 8, in[12], 5 );
ROUND1_8W( A, B, C, D, E, F8W_5, 13, in[ 2], 5 );
ROUND1_8W( E, A, B, C, D, F8W_5, 12, in[10], 5 );
ROUND1_8W( D, E, A, B, C, F8W_5, 5, in[14], 5 );
ROUND1_8W( C, D, E, A, B, F8W_5, 12, in[ 1], 5 );
ROUND1_8W( B, C, D, E, A, F8W_5, 13, in[ 3], 5 );
ROUND1_8W( A, B, C, D, E, F8W_5, 14, in[ 8], 5 );
ROUND1_8W( E, A, B, C, D, F8W_5, 11, in[11], 5 );
ROUND1_8W( D, E, A, B, C, F8W_5, 8, in[ 6], 5 );
ROUND1_8W( C, D, E, A, B, F8W_5, 5, in[15], 5 );
ROUND1_8W( B, C, D, E, A, F8W_5, 6, in[13], 5 );
ROUND2_8W( A, B, C, D, E, F8W_5, 8, in[ 5], 1 );
ROUND2_8W( E, A, B, C, D, F8W_5, 9, in[14], 1 );
ROUND2_8W( D, E, A, B, C, F8W_5, 9, in[ 7], 1 );
ROUND2_8W( C, D, E, A, B, F8W_5, 11, in[ 0], 1 );
ROUND2_8W( B, C, D, E, A, F8W_5, 13, in[ 9], 1 );
ROUND2_8W( A, B, C, D, E, F8W_5, 15, in[ 2], 1 );
ROUND2_8W( E, A, B, C, D, F8W_5, 15, in[11], 1 );
ROUND2_8W( D, E, A, B, C, F8W_5, 5, in[ 4], 1 );
ROUND2_8W( C, D, E, A, B, F8W_5, 7, in[13], 1 );
ROUND2_8W( B, C, D, E, A, F8W_5, 7, in[ 6], 1 );
ROUND2_8W( A, B, C, D, E, F8W_5, 8, in[15], 1 );
ROUND2_8W( E, A, B, C, D, F8W_5, 11, in[ 8], 1 );
ROUND2_8W( D, E, A, B, C, F8W_5, 14, in[ 1], 1 );
ROUND2_8W( C, D, E, A, B, F8W_5, 14, in[10], 1 );
ROUND2_8W( B, C, D, E, A, F8W_5, 12, in[ 3], 1 );
ROUND2_8W( A, B, C, D, E, F8W_5, 6, in[12], 1 );
ROUND2_8W( E, A, B, C, D, F8W_4, 9, in[ 6], 2 );
ROUND2_8W( D, E, A, B, C, F8W_4, 13, in[11], 2 );
ROUND2_8W( C, D, E, A, B, F8W_4, 15, in[ 3], 2 );
ROUND2_8W( B, C, D, E, A, F8W_4, 7, in[ 7], 2 );
ROUND2_8W( A, B, C, D, E, F8W_4, 12, in[ 0], 2 );
ROUND2_8W( E, A, B, C, D, F8W_4, 8, in[13], 2 );
ROUND2_8W( D, E, A, B, C, F8W_4, 9, in[ 5], 2 );
ROUND2_8W( C, D, E, A, B, F8W_4, 11, in[10], 2 );
ROUND2_8W( B, C, D, E, A, F8W_4, 7, in[14], 2 );
ROUND2_8W( A, B, C, D, E, F8W_4, 7, in[15], 2 );
ROUND2_8W( E, A, B, C, D, F8W_4, 12, in[ 8], 2 );
ROUND2_8W( D, E, A, B, C, F8W_4, 7, in[12], 2 );
ROUND2_8W( C, D, E, A, B, F8W_4, 6, in[ 4], 2 );
ROUND2_8W( B, C, D, E, A, F8W_4, 15, in[ 9], 2 );
ROUND2_8W( A, B, C, D, E, F8W_4, 13, in[ 1], 2 );
ROUND2_8W( E, A, B, C, D, F8W_4, 11, in[ 2], 2 );
ROUND2_8W( D, E, A, B, C, F8W_3, 9, in[15], 3 );
ROUND2_8W( C, D, E, A, B, F8W_3, 7, in[ 5], 3 );
ROUND2_8W( B, C, D, E, A, F8W_3, 15, in[ 1], 3 );
ROUND2_8W( A, B, C, D, E, F8W_3, 11, in[ 3], 3 );
ROUND2_8W( E, A, B, C, D, F8W_3, 8, in[ 7], 3 );
ROUND2_8W( D, E, A, B, C, F8W_3, 6, in[14], 3 );
ROUND2_8W( C, D, E, A, B, F8W_3, 6, in[ 6], 3 );
ROUND2_8W( B, C, D, E, A, F8W_3, 14, in[ 9], 3 );
ROUND2_8W( A, B, C, D, E, F8W_3, 12, in[11], 3 );
ROUND2_8W( E, A, B, C, D, F8W_3, 13, in[ 8], 3 );
ROUND2_8W( D, E, A, B, C, F8W_3, 5, in[12], 3 );
ROUND2_8W( C, D, E, A, B, F8W_3, 14, in[ 2], 3 );
ROUND2_8W( B, C, D, E, A, F8W_3, 13, in[10], 3 );
ROUND2_8W( A, B, C, D, E, F8W_3, 13, in[ 0], 3 );
ROUND2_8W( E, A, B, C, D, F8W_3, 7, in[ 4], 3 );
ROUND2_8W( D, E, A, B, C, F8W_3, 5, in[13], 3 );
ROUND2_8W( C, D, E, A, B, F8W_2, 15, in[ 8], 4 );
ROUND2_8W( B, C, D, E, A, F8W_2, 5, in[ 6], 4 );
ROUND2_8W( A, B, C, D, E, F8W_2, 8, in[ 4], 4 );
ROUND2_8W( E, A, B, C, D, F8W_2, 11, in[ 1], 4 );
ROUND2_8W( D, E, A, B, C, F8W_2, 14, in[ 3], 4 );
ROUND2_8W( C, D, E, A, B, F8W_2, 14, in[11], 4 );
ROUND2_8W( B, C, D, E, A, F8W_2, 6, in[15], 4 );
ROUND2_8W( A, B, C, D, E, F8W_2, 14, in[ 0], 4 );
ROUND2_8W( E, A, B, C, D, F8W_2, 6, in[ 5], 4 );
ROUND2_8W( D, E, A, B, C, F8W_2, 9, in[12], 4 );
ROUND2_8W( C, D, E, A, B, F8W_2, 12, in[ 2], 4 );
ROUND2_8W( B, C, D, E, A, F8W_2, 9, in[13], 4 );
ROUND2_8W( A, B, C, D, E, F8W_2, 12, in[ 9], 4 );
ROUND2_8W( E, A, B, C, D, F8W_2, 5, in[ 7], 4 );
ROUND2_8W( D, E, A, B, C, F8W_2, 15, in[10], 4 );
ROUND2_8W( C, D, E, A, B, F8W_2, 8, in[14], 4 );
ROUND2_8W( B, C, D, E, A, F8W_1, 8, in[12], 5 );
ROUND2_8W( A, B, C, D, E, F8W_1, 5, in[15], 5 );
ROUND2_8W( E, A, B, C, D, F8W_1, 12, in[10], 5 );
ROUND2_8W( D, E, A, B, C, F8W_1, 9, in[ 4], 5 );
ROUND2_8W( C, D, E, A, B, F8W_1, 12, in[ 1], 5 );
ROUND2_8W( B, C, D, E, A, F8W_1, 5, in[ 5], 5 );
ROUND2_8W( A, B, C, D, E, F8W_1, 14, in[ 8], 5 );
ROUND2_8W( E, A, B, C, D, F8W_1, 6, in[ 7], 5 );
ROUND2_8W( D, E, A, B, C, F8W_1, 8, in[ 6], 5 );
ROUND2_8W( C, D, E, A, B, F8W_1, 13, in[ 2], 5 );
ROUND2_8W( B, C, D, E, A, F8W_1, 6, in[13], 5 );
ROUND2_8W( A, B, C, D, E, F8W_1, 5, in[14], 5 );
ROUND2_8W( E, A, B, C, D, F8W_1, 15, in[ 0], 5 );
ROUND2_8W( D, E, A, B, C, F8W_1, 13, in[ 3], 5 );
ROUND2_8W( C, D, E, A, B, F8W_1, 11, in[ 9], 5 );
ROUND2_8W( B, C, D, E, A, F8W_1, 11, in[11], 5 );
tmp = _mm256_add_epi32( _mm256_add_epi32( h[1], C1 ), D2 );
h[1] = _mm256_add_epi32( _mm256_add_epi32( h[2], D1 ), E2 );
h[2] = _mm256_add_epi32( _mm256_add_epi32( h[3], E1 ), A2 );
h[3] = _mm256_add_epi32( _mm256_add_epi32( h[4], A1 ), B2 );
h[4] = _mm256_add_epi32( _mm256_add_epi32( h[0], B1 ), C2 );
h[0] = tmp;
}
void ripemd160_8way_init( ripemd160_8way_context *sc )
{
sc->val[0] = _mm256_set1_epi32( IV[0] );
sc->val[1] = _mm256_set1_epi32( IV[1] );
sc->val[2] = _mm256_set1_epi32( IV[2] );
sc->val[3] = _mm256_set1_epi32( IV[3] );
sc->val[4] = _mm256_set1_epi32( IV[4] );
sc->count_high = sc->count_low = 0;
}
void ripemd160_8way( ripemd160_8way_context *sc, const void *data, size_t len )
{
__m256i *vdata = (__m256i*)data;
size_t ptr;
const int block_size = 64;
ptr = (unsigned)sc->count_low & (block_size - 1U);
while ( len > 0 )
{
size_t clen;
uint32_t clow, clow2;
clen = block_size - ptr;
if ( clen > len )
clen = len;
memcpy_256( sc->buf + (ptr>>2), vdata, clen>>2 );
vdata = vdata + (clen>>2);
ptr += clen;
len -= clen;
if ( ptr == block_size )
{
ripemd160_8way_round( sc );
ptr = 0;
}
clow = sc->count_low;
clow2 = clow + clen;
sc->count_low = clow2;
if ( clow2 < clow )
sc->count_high++;
}
}
void ripemd160_8way_close( ripemd160_8way_context *sc, void *dst )
{
unsigned ptr, u;
uint32_t low, high;
const int block_size = 64;
const int pad = block_size - 8;
ptr = (unsigned)sc->count_low & ( block_size - 1U);
sc->buf[ ptr>>2 ] = _mm256_set1_epi32( 0x80 );
ptr += 4;
if ( ptr > pad )
{
memset_zero_256( sc->buf + (ptr>>2), (block_size - ptr) >> 2 );
ripemd160_8way_round( sc );
memset_zero_256( sc->buf, pad>>2 );
}
else
memset_zero_256( sc->buf + (ptr>>2), (pad - ptr) >> 2 );
low = sc->count_low;
high = (sc->count_high << 3) | (low >> 29);
low = low << 3;
sc->buf[ pad>>2 ] = _mm256_set1_epi32( low );
sc->buf[ (pad>>2) + 1 ] = _mm256_set1_epi32( high );
ripemd160_8way_round( sc );
for (u = 0; u < 5; u ++)
casti_m256i( dst, u ) = sc->val[u];
}
#endif // __AVX2__

View File

@@ -19,5 +19,20 @@ void ripemd160_4way_init( ripemd160_4way_context *sc );
void ripemd160_4way( ripemd160_4way_context *sc, const void *data, size_t len );
void ripemd160_4way_close( ripemd160_4way_context *sc, void *dst );
#endif
#endif
#if defined (__AVX2__)
typedef struct
{
__m256i buf[64>>2];
__m256i val[5];
uint32_t count_high, count_low;
} __attribute__ ((aligned (64))) ripemd160_8way_context;
void ripemd160_8way_init( ripemd160_8way_context *sc );
void ripemd160_8way( ripemd160_8way_context *sc, const void *data, size_t len );
void ripemd160_8way_close( ripemd160_8way_context *sc, void *dst );
#endif // __AVX2__
#endif // __AVX__
#endif // RIPEMD_HASH_4WAY_H__