diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 2ca3180..870df90 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -38,6 +38,12 @@ supported. Change Log ---------- +v3.9.5.4 + +Fixed sha256q AVX2 poor performance. +Fixed skein2 buffer overflow and restored bswap-interleave optimization. +More restructuring. + v3.9.5.3 Fix crash mining hodl with aes-sse42. @@ -45,7 +51,7 @@ More restructuring and share report tweaks. v3.9.5.2 -Revert bswap-interleave optiization for causing crashes on Windows. +Revert bswap-interleave optimization for causing crashes on Windows. v3.9.5.1 diff --git a/algo/blake/blake-4way.c b/algo/blake/blake-4way.c index a075fa3..8d1372f 100644 --- a/algo/blake/blake-4way.c +++ b/algo/blake/blake-4way.c @@ -27,25 +27,19 @@ int scanhash_blake_4way( struct work *work, uint32_t max_nonce, uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; uint32_t HTarget = ptarget[7]; - uint32_t _ALIGN(32) edata[20]; + __m128i *noncev = (__m128i*)vdata + 19; // aligned uint32_t n = first_nonce; int thr_id = mythr->id; // thr_id arg is deprecated if (opt_benchmark) HTarget = 0x7f; - // we need big endian data... - swab32_array( edata, pdata, 20 ); - mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); blake256r14_4way_init( &blake_4w_ctx ); blake256r14_4way( &blake_4w_ctx, vdata, 64 ); - uint32_t *noncep = vdata + 76; // 19*4 do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); + *noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) ); blakehash_4way( hash, vdata ); @@ -76,9 +70,9 @@ void blakehash_8way( void *state, const void *input ) memcpy( &ctx, &blake_8w_ctx, sizeof ctx ); blake256r14_8way( &ctx, input + (64<<3), 16 ); blake256r14_8way_close( &ctx, vhash ); - mm256_dintrlv_8x32( state, state+ 32, state+ 64, state+ 96, - state+128, state+160, state+192, state+224, - vhash, 256 ); + _dintrlv_8x32( state, state+ 32, state+ 64, state+ 96, + state+128, state+160, state+192, state+224, + vhash, 256 ); } int scanhash_blake_8way( struct work *work, uint32_t max_nonce, @@ -90,32 +84,21 @@ int scanhash_blake_8way( struct work *work, uint32_t max_nonce, uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; uint32_t HTarget = ptarget[7]; - uint32_t _ALIGN(32) edata[20]; uint32_t n = first_nonce; + __m256i *noncev = (__m256i*)vdata + 19; // aligned int thr_id = mythr->id; // thr_id arg is deprecated if (opt_benchmark) HTarget = 0x7f; - // we need big endian data... - swab32_array( edata, pdata, 20 ); - - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, - edata, edata, edata, edata, 640 ); + mm256_bswap32_intrlv80_8x32( vdata, pdata ); blake256r14_8way_init( &blake_8w_ctx ); blake256r14_8way( &blake_8w_ctx, vdata, 64 ); - uint32_t *noncep = vdata + 152; // 19*8 do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); - be32enc( noncep +4, n+4 ); - be32enc( noncep +5, n+5 ); - be32enc( noncep +6, n+6 ); - be32enc( noncep +7, n+7 ); + *noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4, + n+3, n+2, n+1, n ) ); pdata[19] = n; blakehash_8way( hash, vdata ); diff --git a/algo/blake/blake2s-4way.c b/algo/blake/blake2s-4way.c index e26b587..9048566 100644 --- a/algo/blake/blake2s-4way.c +++ b/algo/blake/blake2s-4way.c @@ -16,9 +16,9 @@ void blake2s_8way_hash( void *output, const void *input ) blake2s_8way_update( &ctx, input + (64<<3), 16 ); blake2s_8way_final( &ctx, vhash, BLAKE2S_OUTBYTES ); - mm256_dintrlv_8x32( output, output+ 32, output+ 64, output+ 96, - output+128, output+160, output+192, output+224, - vhash, 256 ); + dintrlv_8x32( output, output+ 32, output+ 64, output+ 96, + output+128, output+160, output+192, output+224, + vhash, 256 ); } int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce, @@ -28,28 +28,19 @@ int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce, uint32_t hash[8*8] __attribute__ ((aligned (32))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; - uint32_t _ALIGN(64) edata[20]; const uint32_t Htarg = ptarget[7]; const uint32_t first_nonce = pdata[19]; + __m256i *noncev = (__m256i*)vdata + 19; // aligned uint32_t n = first_nonce; - uint32_t *noncep = vdata + 152; // 19*8 int thr_id = mythr->id; // thr_id arg is deprecated - swab32_array( edata, pdata, 20 ); - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, - edata, edata, edata, edata, 640 ); + mm256_bswap32_intrlv80_8x32( vdata, pdata ); blake2s_8way_init( &blake2s_8w_ctx, BLAKE2S_OUTBYTES ); blake2s_8way_update( &blake2s_8w_ctx, vdata, 64 ); do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); - be32enc( noncep +4, n+4 ); - be32enc( noncep +5, n+5 ); - be32enc( noncep +6, n+6 ); - be32enc( noncep +7, n+7 ); + *noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4, + n+3, n+2, n+1, n ) ); pdata[19] = n; blake2s_8way_hash( hash, vdata ); @@ -94,23 +85,18 @@ int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce, uint32_t hash[8*4] __attribute__ ((aligned (32))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; - uint32_t _ALIGN(64) edata[20]; const uint32_t Htarg = ptarget[7]; const uint32_t first_nonce = pdata[19]; + __m128i *noncev = (__m128i*)vdata + 19; // aligned uint32_t n = first_nonce; - uint32_t *noncep = vdata + 76; // 19*4 int thr_id = mythr->id; // thr_id arg is deprecated - swab32_array( edata, pdata, 20 ); - mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); blake2s_4way_init( &blake2s_4w_ctx, BLAKE2S_OUTBYTES ); blake2s_4way_update( &blake2s_4w_ctx, vdata, 64 ); do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); + *noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) ); pdata[19] = n; blake2s_4way_hash( hash, vdata ); diff --git a/algo/blake/blakecoin-4way.c b/algo/blake/blakecoin-4way.c index 4fbd91e..898cbe3 100644 --- a/algo/blake/blakecoin-4way.c +++ b/algo/blake/blakecoin-4way.c @@ -29,23 +29,18 @@ int scanhash_blakecoin_4way( struct work *work, uint32_t max_nonce, uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; uint32_t HTarget = ptarget[7]; - uint32_t _ALIGN(32) edata[20]; uint32_t n = first_nonce; + __m128i *noncev = (__m128i*)vdata + 19; // aligned int thr_id = mythr->id; // thr_id arg is deprecated if ( opt_benchmark ) HTarget = 0x7f; - swab32_array( edata, pdata, 20 ); - mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); blake256r8_4way_init( &blakecoin_4w_ctx ); blake256r8_4way( &blakecoin_4w_ctx, vdata, 64 ); - uint32_t *noncep = vdata + 76; // 19*4 do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); + *noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) ); pdata[19] = n; blakecoin_4way_hash( hash, vdata ); @@ -79,9 +74,8 @@ void blakecoin_8way_hash( void *state, const void *input ) blake256r8_8way( &ctx, input + (64<<3), 16 ); blake256r8_8way_close( &ctx, vhash ); - mm256_dintrlv_8x32( state, state+ 32, state+ 64, state+ 96, - state+128, state+160, state+192, state+224, - vhash, 256 ); + dintrlv_8x32( state, state+ 32, state+ 64, state+ 96, state+128, + state+160, state+192, state+224, vhash, 256 ); } int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce, @@ -93,29 +87,19 @@ int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce, uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; uint32_t HTarget = ptarget[7]; - uint32_t _ALIGN(32) edata[20]; uint32_t n = first_nonce; - uint32_t *noncep = vdata + 152; // 19*8 + __m256i *noncev = (__m256i*)vdata + 19; // aligned int thr_id = mythr->id; // thr_id arg is deprecated if ( opt_benchmark ) HTarget = 0x7f; - // we need big endian data... - swab32_array( edata, pdata, 20 ); - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, - edata, edata, edata, edata, 640 ); + mm256_bswap32_intrlv80_8x32( vdata, pdata ); blake256r8_8way_init( &blakecoin_8w_ctx ); blake256r8_8way( &blakecoin_8w_ctx, vdata, 64 ); do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); - be32enc( noncep +4, n+4 ); - be32enc( noncep +5, n+5 ); - be32enc( noncep +6, n+6 ); - be32enc( noncep +7, n+7 ); + *noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4, + n+3, n+2, n+1, n ) ); pdata[19] = n; blakecoin_8way_hash( hash, vdata ); diff --git a/algo/blake/pentablake-4way.c b/algo/blake/pentablake-4way.c index 7033d4a..1b45afa 100644 --- a/algo/blake/pentablake-4way.c +++ b/algo/blake/pentablake-4way.c @@ -10,13 +10,8 @@ #include "blake-hash-4way.h" #include "sph_blake.h" -//#define DEBUG_ALGO - extern void pentablakehash_4way( void *output, const void *input ) { -// unsigned char _ALIGN(32) hash[128]; -// // same as uint32_t hashA[16], hashB[16]; -// #define hashB hash+64 uint64_t hash0[8] __attribute__ ((aligned (64))); uint64_t hash1[8] __attribute__ ((aligned (64))); @@ -29,22 +24,7 @@ extern void pentablakehash_4way( void *output, const void *input ) blake512_4way_init( &ctx ); blake512_4way( &ctx, input, 80 ); blake512_4way_close( &ctx, vhash ); -/* -uint64_t sin0[10], sin1[10], sin2[10], sin3[10]; -mm256_deinterleave_4x64( sin0, sin1, sin2, sin3, input, 640 ); -sph_blake512_context ctx2_blake; -sph_blake512_init(&ctx2_blake); -sph_blake512(&ctx2_blake, sin0, 80); -sph_blake512_close(&ctx2_blake, (void*) hash); -mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); -uint64_t* hash64 = (uint64_t*)hash; -for( int i = 0; i < 8; i++ ) -{ - if ( hash0[i] != hash64[i] ) - printf("hash mismatch %u\n",i); -} -*/ blake512_4way_init( &ctx ); blake512_4way( &ctx, vhash, 64 ); blake512_4way_close( &ctx, vhash ); @@ -61,42 +41,10 @@ for( int i = 0; i < 8; i++ ) blake512_4way( &ctx, vhash, 64 ); blake512_4way_close( &ctx, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); memcpy( output, hash0, 32 ); memcpy( output+32, hash1, 32 ); memcpy( output+64, hash2, 32 ); memcpy( output+96, hash3, 32 ); - -/* - uint64_t sin0[10] __attribute__ ((aligned (64))); - uint64_t sin1[10] __attribute__ ((aligned (64))); - uint64_t sin2[10] __attribute__ ((aligned (64))); - uint64_t sin3[10] __attribute__ ((aligned (64))); - - sph_blake512_context ctx_blake; - - sph_blake512_init(&ctx_blake); - sph_blake512(&ctx_blake, input, 80); - sph_blake512_close(&ctx_blake, hash); - - sph_blake512_init(&ctx_blake); - sph_blake512(&ctx_blake, hash, 64); - sph_blake512_close(&ctx_blake, hash); - - sph_blake512_init(&ctx_blake); - sph_blake512(&ctx_blake, hash, 64); - sph_blake512_close(&ctx_blake, hash); - - sph_blake512_init(&ctx_blake); - sph_blake512(&ctx_blake, hash, 64); - sph_blake512_close(&ctx_blake, hash); - - sph_blake512_init(&ctx_blake); - sph_blake512(&ctx_blake, hash, 64); - sph_blake512_close(&ctx_blake, hash); - - memcpy(output, hash, 32); -*/ } int scanhash_pentablake_4way( struct work *work, @@ -137,7 +85,7 @@ int scanhash_pentablake_4way( struct work *work, swab32_array( endiandata, pdata, 20 ); uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); + intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); for ( int m=0; m < 6; m++ ) { diff --git a/algo/cryptonight/cryptolight.c b/algo/cryptonight/cryptolight.c index 18e2940..6295be5 100644 --- a/algo/cryptonight/cryptolight.c +++ b/algo/cryptonight/cryptolight.c @@ -242,6 +242,8 @@ void cryptolight_hash(void* output, const void* input, int len) { free(ctx); } +#if defined(__AES__) + static void cryptolight_hash_ctx_aes_ni(void* output, const void* input, int len, struct cryptonight_ctx* ctx) { @@ -312,6 +314,8 @@ static void cryptolight_hash_ctx_aes_ni(void* output, const void* input, oaes_free((OAES_CTX **) &ctx->aes_ctx); } +#endif + int scanhash_cryptolight( struct work *work, uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr) { diff --git a/algo/groestl/myrgr-4way.c b/algo/groestl/myrgr-4way.c index ecf6a97..b7ba0eb 100644 --- a/algo/groestl/myrgr-4way.c +++ b/algo/groestl/myrgr-4way.c @@ -47,10 +47,6 @@ void myriad_4way_hash( void *output, const void *input ) sha256_4way( &ctx.sha, vhash, 64 ); sha256_4way_close( &ctx.sha, output ); - -// sha256_4way_close( &ctx.sha, vhash ); -// mm128_dintrlv_4x32( output, output+32, output+64, output+96, -// vhash, 256 ); } int scanhash_myriad_4way( struct work *work, uint32_t max_nonce, @@ -68,18 +64,10 @@ int scanhash_myriad_4way( struct work *work, uint32_t max_nonce, __m128i *noncev = (__m128i*)vdata + 19; // aligned int thr_id = mythr->id; // thr_id arg is deprecated -/* - uint32_t *pdata = work->data; - uint32_t *ptarget = work->target; - - uint32_t _ALIGN(64) endiandata[20]; - const uint32_t first_nonce = pdata[19]; - uint32_t nonce = first_nonce; -*/ if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff; - mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); do { *noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) ); diff --git a/algo/jh/jha-4way.c b/algo/jh/jha-4way.c index 8177cc9..2c76a33 100644 --- a/algo/jh/jha-4way.c +++ b/algo/jh/jha-4way.c @@ -3,7 +3,6 @@ #include #include #include -//#include "avxdefs.h" #if defined(JHA_4WAY) @@ -13,9 +12,6 @@ #include "algo/keccak/keccak-hash-4way.h" #include "algo/groestl/aes_ni/hash-groestl.h" -//static __thread keccak512_4way_context jha_kec_mid -// __attribute__ ((aligned (64))); - void jha_hash_4way( void *out, const void *input ) { uint64_t hash0[8] __attribute__ ((aligned (64))); @@ -46,7 +42,7 @@ void jha_hash_4way( void *out, const void *input ) vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], _mm256_set1_epi64x( 1 ) ), m256_zero ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx_groestl, 64 ); update_and_final_groestl( &ctx_groestl, (char*)hash0, (char*)hash0, 512 ); @@ -59,7 +55,7 @@ void jha_hash_4way( void *out, const void *input ) init_groestl( &ctx_groestl, 64 ); update_and_final_groestl( &ctx_groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx_skein ); skein512_4way( &ctx_skein, vhash, 64 ); @@ -79,8 +75,6 @@ void jha_hash_4way( void *out, const void *input ) for ( int i = 0; i < 8; i++ ) casti_m256i( out, i ) = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask ); } - -// mm256_dintrlv_4x64( out, out+32, out+64, out+96, vhash, 256 ); } int scanhash_jha_4way( struct work *work, uint32_t max_nonce, @@ -88,7 +82,6 @@ int scanhash_jha_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[8*4] __attribute__ ((aligned (64))); uint32_t vdata[20*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *hash7 = &(hash[25]); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *pdata = work->data; @@ -116,11 +109,7 @@ int scanhash_jha_4way( struct work *work, uint32_t max_nonce, 0 }; - for ( int i=0; i < 19; i++ ) - be32enc( &edata[i], pdata[i] ); - - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m = 0; m < 6; m++ ) { @@ -130,26 +119,17 @@ int scanhash_jha_4way( struct work *work, uint32_t max_nonce, do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev ); -// be32enc( noncep, n ); -// be32enc( noncep+2, n+1 ); -// be32enc( noncep+4, n+2 ); -// be32enc( noncep+6, n+3 ); jha_hash_4way( hash, vdata ); pdata[19] = n; -// for ( int i = 0; i < 4; i++ ) -// if ( ( !( (hash+(i<<3))[7] & mask ) == 0 ) -// && fulltest( hash+(i<<3), ptarget ) ) for ( int i = 0; i < 4; i++ ) if ( !( (hash7[i] & mask ) == 0 ) ) { - mm256_extr_lane_4x64( lane_hash, hash, i, 256 ); + extr_lane_4x64( lane_hash, hash, i, 256 ); if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark ) { pdata[19] = n+i; submit_lane_solution( work, lane_hash, mythr, i ); -// nonces[ num_found++ ] = n+i; -// work_set_target_ratio( work, hash+(i<<3) ); } } n += 4; diff --git a/algo/keccak/keccak-4way.c b/algo/keccak/keccak-4way.c index 5ab6dda..5d2c87d 100644 --- a/algo/keccak/keccak-4way.c +++ b/algo/keccak/keccak-4way.c @@ -20,8 +20,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t hash[8*4] __attribute__ ((aligned (32))); - uint32_t edata[20] __attribute__ ((aligned (64))); + uint32_t hash[16*4] __attribute__ ((aligned (32))); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *hash7 = &(hash[25]); // 3*8+1 uint32_t *pdata = work->data; @@ -32,9 +31,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce, // const uint32_t Htarg = ptarget[7]; int thr_id = mythr->id; // thr_id arg is deprecated - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev ); @@ -44,7 +41,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce, for ( int lane = 0; lane < 4; lane++ ) if ( ( ( hash7[ lane<<1 ] & 0xFFFFFF00 ) == 0 ) ) { - mm256_extr_lane_4x64( lane_hash, hash, lane, 256 ); + extr_lane_4x64( lane_hash, hash, lane, 256 ); if ( fulltest( lane_hash, ptarget ) ) { pdata[19] = n + lane; diff --git a/algo/lyra2/allium-4way.c b/algo/lyra2/allium-4way.c index 56d6216..d1471a5 100644 --- a/algo/lyra2/allium-4way.c +++ b/algo/lyra2/allium-4way.c @@ -44,11 +44,11 @@ void allium_4way_hash( void *state, const void *input ) blake256_4way( &ctx.blake, input + (64<<2), 16 ); blake256_4way_close( &ctx.blake, vhash32 ); - mm256_rintrlv_4x32_4x64( vhash64, vhash32, 256 ); + rintrlv_4x32_4x64( vhash64, vhash32, 256 ); keccak256_4way( &ctx.keccak, vhash64, 32 ); keccak256_4way_close( &ctx.keccak, vhash64 ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 ); LYRA2RE( hash1, 32, hash1, 32, hash1, 32, 1, 8, 8 ); @@ -68,12 +68,12 @@ void allium_4way_hash( void *state, const void *input ) LYRA2RE( hash2, 32, hash2, 32, hash2, 32, 1, 8, 8 ); LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 ); - mm256_intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 ); + intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 ); skein256_4way( &ctx.skein, vhash64, 32 ); skein256_4way_close( &ctx.skein, vhash64 ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); update_and_final_groestl256( &ctx.groestl, state, hash0, 256 ); memcpy( &ctx.groestl, &allium_4way_ctx.groestl, @@ -103,7 +103,7 @@ int scanhash_allium_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff; - mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); blake256_4way_init( &allium_4way_ctx.blake ); blake256_4way( &allium_4way_ctx.blake, vdata, 64 ); diff --git a/algo/lyra2/lyra2h-4way.c b/algo/lyra2/lyra2h-4way.c index 137b614..a76e68c 100644 --- a/algo/lyra2/lyra2h-4way.c +++ b/algo/lyra2/lyra2h-4way.c @@ -64,7 +64,7 @@ int scanhash_lyra2h_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ptarget[7] = 0x0000ff; - mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); lyra2h_4way_midstate( vdata ); do { diff --git a/algo/lyra2/lyra2rev2-4way.c b/algo/lyra2/lyra2rev2-4way.c index e100a06..9832fb1 100644 --- a/algo/lyra2/lyra2rev2-4way.c +++ b/algo/lyra2/lyra2rev2-4way.c @@ -42,12 +42,12 @@ void lyra2rev2_4way_hash( void *state, const void *input ) blake256_4way( &ctx.blake, input + (64<<2), 16 ); blake256_4way_close( &ctx.blake, vhash ); - mm256_rintrlv_4x32_4x64( vhash64, vhash, 256 ); + rintrlv_4x32_4x64( vhash64, vhash, 256 ); keccak256_4way( &ctx.keccak, vhash64, 32 ); keccak256_4way_close( &ctx.keccak, vhash64 ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 ); cubehashInit( &ctx.cube, 256, 16, 32 ); @@ -62,12 +62,12 @@ void lyra2rev2_4way_hash( void *state, const void *input ) LYRA2REV2( l2v2_wholeMatrix, hash2, 32, hash2, 32, hash2, 32, 1, 4, 4 ); LYRA2REV2( l2v2_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 ); - mm256_intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 ); + intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 ); skein256_4way( &ctx.skein, vhash64, 32 ); skein256_4way_close( &ctx.skein, vhash64 ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 ); @@ -102,7 +102,7 @@ int scanhash_lyra2rev2_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff; - mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); blake256_4way_init( &l2v2_4way_ctx.blake ); blake256_4way( &l2v2_4way_ctx.blake, vdata, 64 ); diff --git a/algo/lyra2/lyra2rev3-4way.c b/algo/lyra2/lyra2rev3-4way.c index 1246288..389aebf 100644 --- a/algo/lyra2/lyra2rev3-4way.c +++ b/algo/lyra2/lyra2rev3-4way.c @@ -41,7 +41,7 @@ void lyra2rev3_8way_hash( void *state, const void *input ) blake256_8way( &ctx.blake, input, 80 ); blake256_8way_close( &ctx.blake, vhash ); - mm256_dintrlv_8x32( hash0, hash1, hash2, hash3, + dintrlv_8x32( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7, vhash, 256 ); LYRA2REV3( l2v3_wholeMatrix, hash0, 32, hash0, 32, hash0, 32, 1, 4, 4 ); @@ -78,7 +78,7 @@ void lyra2rev3_8way_hash( void *state, const void *input ) LYRA2REV3( l2v3_wholeMatrix, hash6, 32, hash6, 32, hash6, 32, 1, 4, 4 ); LYRA2REV3( l2v3_wholeMatrix, hash7, 32, hash7, 32, hash7, 32, 1, 4, 4 ); - mm256_intrlv_8x32( vhash, hash0, hash1, hash2, hash3, + intrlv_8x32( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7, 256 ); bmw256_8way( &ctx.bmw, vhash, 32 ); @@ -91,7 +91,6 @@ int scanhash_lyra2rev3_8way( struct work *work, uint32_t max_nonce, { uint32_t hash[8*8] __attribute__ ((aligned (64))); uint32_t vdata[20*8] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *hash7 = &(hash[7<<3]); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *pdata = work->data; @@ -105,10 +104,7 @@ int scanhash_lyra2rev3_8way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, - edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_8x32( vdata, pdata ); + mm256_bswap32_intrlv80_8x32( vdata, pdata ); do { *noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4, @@ -119,7 +115,7 @@ int scanhash_lyra2rev3_8way( struct work *work, uint32_t max_nonce, for ( int lane = 0; lane < 8; lane++ ) if ( hash7[lane] <= Htarg ) { - mm256_extr_lane_8x32( lane_hash, hash, lane, 256 ); + extr_lane_8x32( lane_hash, hash, lane, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { pdata[19] = n + lane; @@ -208,7 +204,7 @@ int scanhash_lyra2rev3_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff; - mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); do { *noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) ); diff --git a/algo/lyra2/lyra2z-4way.c b/algo/lyra2/lyra2z-4way.c index 1123363..cf8ca0c 100644 --- a/algo/lyra2/lyra2z-4way.c +++ b/algo/lyra2/lyra2z-4way.c @@ -60,7 +60,7 @@ int scanhash_lyra2z_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ptarget[7] = 0x0000ff; - mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); lyra2z_4way_midstate( vdata ); do { @@ -119,8 +119,8 @@ void lyra2z_8way_hash( void *state, const void *input ) blake256_8way( &ctx_blake, input + (64*8), 16 ); blake256_8way_close( &ctx_blake, vhash ); - mm256_dintrlv_8x32( hash0, hash1, hash2, hash3, - hash4, hash5, hash6, hash7, vhash, 256 ); + dintrlv_8x32( hash0, hash1, hash2, hash3, + hash4, hash5, hash6, hash7, vhash, 256 ); LYRA2Z( lyra2z_8way_matrix, hash0, 32, hash0, 32, hash0, 32, 8, 8, 8 ); LYRA2Z( lyra2z_8way_matrix, hash1, 32, hash1, 32, hash1, 32, 8, 8, 8 ); @@ -146,7 +146,6 @@ int scanhash_lyra2z_8way( struct work *work, uint32_t max_nonce, { uint32_t hash[8*8] __attribute__ ((aligned (64))); uint32_t vdata[20*8] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t Htarg = ptarget[7]; @@ -158,10 +157,7 @@ int scanhash_lyra2z_8way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ptarget[7] = 0x0000ff; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, - edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_8x32( vdata, pdata ); + mm256_bswap32_intrlv80_8x32( vdata, pdata ); lyra2z_8way_midstate( vdata ); do { diff --git a/algo/lyra2/phi2-4way.c b/algo/lyra2/phi2-4way.c index ff9bff7..1c3f759 100644 --- a/algo/lyra2/phi2-4way.c +++ b/algo/lyra2/phi2-4way.c @@ -69,13 +69,13 @@ void phi2_hash_4way( void *state, const void *input ) LYRA2RE( &hashA[3][0], 32, &hashB[3][0], 32, &hashB[3][0], 32, 1, 8, 8 ); LYRA2RE( &hashA[3][8], 32, &hashB[3][8], 32, &hashB[3][8], 32, 1, 8, 8 ); - mm256_intrlv_4x64( vhash, hashA[0], hashA[1], hashA[2], hashA[3], 512 ); + intrlv_4x64( vhash, hashA[0], hashA[1], hashA[2], hashA[3], 512 ); jh512_4way_init( &ctx.jh ); jh512_4way( &ctx.jh, vhash, 64 ); jh512_4way_close( &ctx.jh, vhash ); - mm256_dintrlv_4x64( hash[0], hash[1], hash[2], hash[3], vhash, 512 ); + dintrlv_4x64( hash[0], hash[1], hash[2], hash[3], vhash, 512 ); if ( hash[0][0] & 1 ) { @@ -141,7 +141,7 @@ void phi2_hash_4way( void *state, const void *input ) (const BitSequence *)hash[3], 512 ); } - mm256_intrlv_4x64( vhash, hash[0], hash[1], hash[2], hash[3], 512 ); + intrlv_4x64( vhash, hash[0], hash[1], hash[2], hash[3], 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -217,7 +217,7 @@ int scanhash_phi2_4way( struct work *work, uint32_t max_nonce, for ( int lane = 0; lane < 4; lane++ ) if ( hash7[ lane<<1 ] < Htarg ) { - mm256_extr_lane_4x64( lane_hash, hash, lane, 256 ); + extr_lane_4x64( lane_hash, hash, lane, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { pdata[19] = n + lane; diff --git a/algo/nist5/nist5-4way.c b/algo/nist5/nist5-4way.c index e3f8fce..1d09c72 100644 --- a/algo/nist5/nist5-4way.c +++ b/algo/nist5/nist5-4way.c @@ -12,9 +12,6 @@ #include "algo/keccak/keccak-hash-4way.h" #include "algo/groestl/aes_ni/hash-groestl.h" -// no improvement with midstate -//static __thread blake512_4way_context ctx_mid; - void nist5hash_4way( void *out, const void *input ) { uint64_t hash0[8] __attribute__ ((aligned (64))); @@ -28,14 +25,11 @@ void nist5hash_4way( void *out, const void *input ) skein512_4way_context ctx_skein; keccak512_4way_context ctx_keccak; -// memcpy( &ctx_blake, &ctx_mid, sizeof(ctx_mid) ); -// blake512_4way( &ctx_blake, input + (64<<2), 16 ); - blake512_4way_init( &ctx_blake ); blake512_4way( &ctx_blake, input, 80 ); blake512_4way_close( &ctx_blake, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx_groestl, 64 ); update_and_final_groestl( &ctx_groestl, (char*)hash0, @@ -50,7 +44,7 @@ void nist5hash_4way( void *out, const void *input ) update_and_final_groestl( &ctx_groestl, (char*)hash3, (const char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); jh512_4way_init( &ctx_jh ); jh512_4way( &ctx_jh, vhash, 64 ); @@ -72,13 +66,12 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce, uint32_t *hash7 = &(hash[25]); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t endiandata[20] __attribute__((aligned(64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; const uint32_t first_nonce = pdata[19]; const uint32_t Htarg = ptarget[7]; - uint32_t *noncep = vdata + 73; // 9*8 + 1 + __m256i *noncev = (__m256i*)vdata + 9; // aligned int thr_id = mythr->id; // thr_id arg is deprecated uint64_t htmax[] = { 0, @@ -95,15 +88,7 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce, 0xFFFF0000, 0 }; - // we need bigendian data... - swab32_array( endiandata, pdata, 20 ); - - uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); - - // precalc midstate -// blake512_4way_init( &ctx_mid ); -// blake512_4way( &ctx_mid, vdata, 64 ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m=0; m < 6; m++ ) { @@ -112,17 +97,15 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce, uint32_t mask = masks[m]; do { - be32enc( noncep, n ); - be32enc( noncep+2, n+1 ); - be32enc( noncep+4, n+2 ); - be32enc( noncep+6, n+3 ); + *noncev = mm256_intrlv_blend_32( mm256_bswap_32( + _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev ); nist5hash_4way( hash, vdata ); for ( int lane = 0; lane < 4; lane++ ) if ( ( hash7[ lane ] & mask ) == 0 ) { - mm256_extr_lane_4x64( lane_hash, hash, lane, 256 ); + extr_lane_4x64( lane_hash, hash, lane, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { pdata[19] = n + lane; diff --git a/algo/quark/anime-4way.c b/algo/quark/anime-4way.c index b71398b..c5bdde3 100644 --- a/algo/quark/anime-4way.c +++ b/algo/quark/anime-4way.c @@ -62,7 +62,7 @@ void anime_4way_hash( void *state, const void *input ) vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); if ( hash0[0] & mask ) { @@ -88,7 +88,7 @@ void anime_4way_hash( void *state, const void *input ) (char*)hash3, 512 ); } - mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); if ( mm256_anybits0( vh_mask ) ) { @@ -98,7 +98,7 @@ void anime_4way_hash( void *state, const void *input ) mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); reinit_groestl( &ctx.groestl ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -109,7 +109,7 @@ void anime_4way_hash( void *state, const void *input ) reinit_groestl( &ctx.groestl ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); jh512_4way( &ctx.jh, vhash, 64 ); jh512_4way_close( &ctx.jh, vhash ); @@ -155,7 +155,7 @@ void anime_4way_hash( void *state, const void *input ) mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); - mm256_dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 ); + dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 ); } int scanhash_anime_4way( struct work *work, uint32_t max_nonce, @@ -163,7 +163,6 @@ int scanhash_anime_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; @@ -188,9 +187,7 @@ int scanhash_anime_4way( struct work *work, uint32_t max_nonce, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for (int m=0; m < 6; m++) if (Htarg <= htmax[m]) diff --git a/algo/quark/hmq1725-4way.c b/algo/quark/hmq1725-4way.c index 7658bf1..3645f19 100644 --- a/algo/quark/hmq1725-4way.c +++ b/algo/quark/hmq1725-4way.c @@ -67,7 +67,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) bmw512_4way( &ctx.bmw, input, 80 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_whirlpool_init( &ctx.whirlpool ); sph_whirlpool( &ctx.whirlpool, hash0, 64 ); @@ -84,7 +84,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) // first fork, A is groestl serial, B is skein parallel. - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ), m256_zero ); @@ -116,7 +116,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) (char*)hash3, 512 ); // } - mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); // B @@ -158,7 +158,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_luffa( &ctx.luffa, 512 ); update_and_final_luffa( &ctx.luffa, (BitSequence*)hash0, @@ -186,7 +186,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) cubehashUpdateDigest( &ctx.cube, (BitSequence *)hash3, (const BitSequence *)hash3, 64 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // A= keccak parallel, B= jh parallel @@ -209,7 +209,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_shavite512_init( &ctx.shavite ); sph_shavite512 ( &ctx.shavite, hash0, 64 ); @@ -240,7 +240,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) // A is whirlpool serial, B is haval parallel. - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ), m256_zero ); @@ -271,7 +271,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) sph_whirlpool_close( &ctx.whirlpool, hash3 ); // } - mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); // B @@ -285,7 +285,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -300,13 +300,13 @@ extern void hmq1725_4way_hash(void *state, const void *input) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); blake512_4way_init( &ctx.blake ); blake512_4way( &ctx.blake, vhash, 64 ); blake512_4way_close( &ctx.blake, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // shavite & luffa, both serial, select individually. @@ -362,13 +362,13 @@ extern void hmq1725_4way_hash(void *state, const void *input) (const BitSequence *)hash3, 64 ); } - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -438,13 +438,13 @@ extern void hmq1725_4way_hash(void *state, const void *input) (const BitSequence *)hash3, 512 ); } - mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); shabal512_4way_init( &ctx.shabal ); shabal512_4way( &ctx.shabal, vhash, 64 ); shabal512_4way_close( &ctx.shabal, vhash ); - mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 ); sph_whirlpool_init( &ctx.whirlpool ); sph_whirlpool( &ctx.whirlpool, hash0, 64 ); @@ -461,7 +461,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) // A = fugue serial, B = sha512 prarallel - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ), m256_zero ); @@ -491,7 +491,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) sph_fugue512_close( &ctx.fugue, hash3 ); // } - mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); // if ( mm256_any_clr_256( vh_mask ) ) // { @@ -502,7 +502,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -513,7 +513,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, 64 ); @@ -524,7 +524,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], vmask ), m256_zero ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // if ( mm256_any_set_256( vh_mask ) ) //4 // { @@ -559,7 +559,7 @@ extern void hmq1725_4way_hash(void *state, const void *input) sph_whirlpool_close( &ctx.whirlpool, hash3 ); // } - mm256_intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, 512 ); mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); @@ -589,7 +589,7 @@ int scanhash_hmq1725_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] ) { uint32_t mask = masks[ m ]; diff --git a/algo/quark/quark-4way.c b/algo/quark/quark-4way.c index 5e22c67..9c0fb5d 100644 --- a/algo/quark/quark-4way.c +++ b/algo/quark/quark-4way.c @@ -63,7 +63,7 @@ void quark_4way_hash( void *state, const void *input ) vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); if ( hash0[0] & mask ) { @@ -89,7 +89,7 @@ void quark_4way_hash( void *state, const void *input ) (char*)hash3, 512 ); } - mm256_intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); if ( mm256_anybits0( vh_mask ) ) { @@ -99,7 +99,7 @@ void quark_4way_hash( void *state, const void *input ) mm256_blend_hash_4x64( vh, vhA, vhB, vh_mask ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); reinit_groestl( &ctx.groestl ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -110,7 +110,7 @@ void quark_4way_hash( void *state, const void *input ) reinit_groestl( &ctx.groestl ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); jh512_4way( &ctx.jh, vhash, 64 ); jh512_4way_close( &ctx.jh, vhash ); @@ -168,7 +168,6 @@ int scanhash_quark_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (64))); uint32_t *hash7 = &(hash[25]); uint32_t *pdata = work->data; @@ -178,9 +177,7 @@ int scanhash_quark_4way( struct work *work, uint32_t max_nonce, __m256i *noncev = (__m256i*)vdata + 9; // aligned int thr_id = mythr->id; // thr_id arg is deprecated - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( @@ -192,7 +189,7 @@ int scanhash_quark_4way( struct work *work, uint32_t max_nonce, for ( int i = 0; i < 4; i++ ) if ( ( hash7[ i<<1 ] & 0xFFFFFF00 ) == 0 ) { - mm256_extr_lane_4x64( lane_hash, hash, i, 256 ); + extr_lane_4x64( lane_hash, hash, i, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { pdata[19] = n+i; diff --git a/algo/qubit/deep-2way.c b/algo/qubit/deep-2way.c index 0060932..9ca6608 100644 --- a/algo/qubit/deep-2way.c +++ b/algo/qubit/deep-2way.c @@ -39,7 +39,7 @@ void deep_2way_hash( void *output, const void *input ) memcpy( &ctx, &deep_2way_ctx, sizeof(deep_2way_ctx) ); luffa_2way_update( &ctx.luffa, input + (64<<1), 16 ); luffa_2way_close( &ctx.luffa, vhash ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -86,7 +86,7 @@ int scanhash_deep_2way( struct work *work,uint32_t max_nonce, casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 ); + intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 ); luffa_2way_init( &deep_2way_ctx.luffa, 512 ); luffa_2way_update( &deep_2way_ctx.luffa, vdata, 64 ); diff --git a/algo/qubit/qubit-2way.c b/algo/qubit/qubit-2way.c index 038cec0..8dc04b9 100644 --- a/algo/qubit/qubit-2way.c +++ b/algo/qubit/qubit-2way.c @@ -41,7 +41,7 @@ void qubit_2way_hash( void *output, const void *input ) memcpy( &ctx, &qubit_2way_ctx, sizeof(qubit_2way_ctx) ); luffa_2way_update( &ctx.luffa, input + (64<<1), 16 ); luffa_2way_close( &ctx.luffa, vhash ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -55,9 +55,9 @@ void qubit_2way_hash( void *output, const void *input ) sph_shavite512( &ctx.shavite, hash1, 64 ); sph_shavite512_close( &ctx.shavite, hash1 ); - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, (const BitSequence *) hash0, 512 ); @@ -92,7 +92,7 @@ int scanhash_qubit_2way( struct work *work,uint32_t max_nonce, casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 ); + intrlv_2x128( (uint64_t*)vdata, edata, edata, 640 ); luffa_2way_init( &qubit_2way_ctx.luffa, 512 ); luffa_2way_update( &qubit_2way_ctx.luffa, vdata, 64 ); diff --git a/algo/ripemd/lbry-4way.c b/algo/ripemd/lbry-4way.c index 3e3fa7d..9ac5f53 100644 --- a/algo/ripemd/lbry-4way.c +++ b/algo/ripemd/lbry-4way.c @@ -40,9 +40,9 @@ void lbry_8way_hash( void* output, const void* input ) sha256_8way_close( &ctx_sha256, vhashA ); // reinterleave to do sha512 4-way 64 bit twice. - mm256_dintrlv_8x32( h0, h1, h2, h3, h4, h5, h6, h7, vhashA, 256 ); - mm256_intrlv_4x64( vhashA, h0, h1, h2, h3, 256 ); - mm256_intrlv_4x64( vhashB, h4, h5, h6, h7, 256 ); + dintrlv_8x32( h0, h1, h2, h3, h4, h5, h6, h7, vhashA, 256 ); + intrlv_4x64( vhashA, h0, h1, h2, h3, 256 ); + intrlv_4x64( vhashB, h4, h5, h6, h7, 256 ); sha512_4way_init( &ctx_sha512 ); sha512_4way( &ctx_sha512, vhashA, 32 ); @@ -53,9 +53,9 @@ void lbry_8way_hash( void* output, const void* input ) sha512_4way_close( &ctx_sha512, vhashB ); // back to 8-way 32 bit - mm256_dintrlv_4x64( h0, h1, h2, h3, vhashA, 512 ); - mm256_dintrlv_4x64( h4, h5, h6, h7, vhashB, 512 ); - mm256_intrlv_8x32( vhashA, h0, h1, h2, h3, h4, h5, h6, h7, 512 ); + dintrlv_4x64( h0, h1, h2, h3, vhashA, 512 ); + dintrlv_4x64( h4, h5, h6, h7, vhashB, 512 ); + intrlv_8x32( vhashA, h0, h1, h2, h3, h4, h5, h6, h7, 512 ); ripemd160_8way_init( &ctx_ripemd ); ripemd160_8way( &ctx_ripemd, vhashA, 32 ); @@ -97,11 +97,15 @@ int scanhash_lbry_8way( struct work *work, uint32_t max_nonce, 0xFFFFF000, 0xFFFF0000, 0 }; // we need bigendian data... - casti_m256i( edata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) ); - casti_m256i( edata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) ); - casti_m256i( edata, 2 ) = mm256_bswap_32( casti_m256i( pdata, 2 ) ); - casti_m256i( edata, 3 ) = mm256_bswap_32( casti_m256i( pdata, 3 ) ); - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, + casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) ); + casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) ); + casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) ); + casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) ); + casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); + casti_m128i( edata, 5 ) = mm128_bswap_32( casti_m128i( pdata, 5 ) ); + casti_m128i( edata, 6 ) = mm128_bswap_32( casti_m128i( pdata, 6 ) ); + casti_m128i( edata, 7 ) = mm128_bswap_32( casti_m128i( pdata, 7 ) ); + intrlv_8x32( vdata, edata, edata, edata, edata, edata, edata, edata, edata, 1024 ); sha256_8way_init( &sha256_8w_mid ); sha256_8way( &sha256_8w_mid, vdata, LBRY_MIDSTATE ); @@ -118,7 +122,7 @@ int scanhash_lbry_8way( struct work *work, uint32_t max_nonce, for ( int i = 0; i < 8; i++ ) if ( !( hash7[ i ] & mask ) ) { // deinterleave hash for lane - mm256_extr_lane_8x32( lane_hash, hash, i, 256 ); + extr_lane_8x32( lane_hash, hash, i, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { pdata[27] = n + i; diff --git a/algo/sha/md-helper-4way.c b/algo/sha/md-helper-4way.c index eb5c05c..e67a7c5 100644 --- a/algo/sha/md-helper-4way.c +++ b/algo/sha/md-helper-4way.c @@ -196,9 +196,9 @@ SPH_XCAT( HASH, _addbits_and_close )(void *cc, unsigned ub, unsigned n, ptr = (unsigned)sc->count & (SPH_BLEN - 1U); #ifdef PW01 - sc->buf[ptr>>3] = _mm256_set1_epi64x( 0x100 >> 8 ); + sc->buf[ptr>>3] = m256_const1_64( 0x100 >> 8 ); #else - sc->buf[ptr>>3] = _mm256_set1_epi64x( 0x80 ); + sc->buf[ptr>>3] = m256_const1_64( 0x80 ); #endif ptr += 8; diff --git a/algo/sha/sha2-hash-4way.c b/algo/sha/sha2-hash-4way.c index d3e5150..9516543 100644 --- a/algo/sha/sha2-hash-4way.c +++ b/algo/sha/sha2-hash-4way.c @@ -660,7 +660,7 @@ void sha512_4way_close( sha512_4way_context *sc, void *dst ) const int pad = buf_size - 16; ptr = (unsigned)sc->count & (buf_size - 1U); - sc->buf[ ptr>>3 ] = _mm256_set1_epi64x( 0x80 ); + sc->buf[ ptr>>3 ] = m256_const1_64( 0x80 ); ptr += 8; if ( ptr > pad ) { diff --git a/algo/sha/sha256q-4way.c b/algo/sha/sha256q-4way.c index 689577d..cc47b1c 100644 --- a/algo/sha/sha256q-4way.c +++ b/algo/sha/sha256q-4way.c @@ -36,7 +36,6 @@ int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce, { uint32_t vdata[20*8] __attribute__ ((aligned (64))); uint32_t hash[8*8] __attribute__ ((aligned (32))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; @@ -60,10 +59,7 @@ int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce, 0 }; // Need big endian data - swab32_array( edata, pdata, 20 ); - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, - edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_8x32( vdata, pdata ); + mm256_bswap32_intrlv80_8x32( vdata, pdata ); sha256_8way_init( &sha256_ctx8 ); sha256_8way( &sha256_ctx8, vdata, 64 ); @@ -84,7 +80,7 @@ int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce, if ( !( hash7[ lane ] & mask ) ) { // deinterleave hash for lane - mm256_extr_lane_8x32( lane_hash, hash, lane, 256 ); + extr_lane_8x32( lane_hash, hash, lane, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { @@ -133,7 +129,6 @@ int scanhash_sha256q_4way( struct work *work, uint32_t max_nonce, { uint32_t vdata[20*4] __attribute__ ((aligned (64))); uint32_t hash[8*4] __attribute__ ((aligned (32))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *hash7 = &(hash[7<<2]); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *pdata = work->data; @@ -157,9 +152,7 @@ int scanhash_sha256q_4way( struct work *work, uint32_t max_nonce, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 ); -// mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); sha256_4way_init( &sha256_ctx4 ); sha256_4way( &sha256_ctx4, vdata, 64 ); diff --git a/algo/sha/sha256t-4way.c b/algo/sha/sha256t-4way.c index e6e404e..c17ea1d 100644 --- a/algo/sha/sha256t-4way.c +++ b/algo/sha/sha256t-4way.c @@ -72,7 +72,7 @@ int scanhash_sha256t_11way( struct work *work, uint32_t max_nonce, casti_m256i( dataz, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) ); casti_m128i( dataz, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); - mm256_intrlv_8x32( datax, dataz, dataz, dataz, dataz, + intrlv_8x32( datax, dataz, dataz, dataz, dataz, dataz, dataz, dataz, dataz, 640 ); mm64_interleave_2x32( datay, dataz, dataz, 640 ); @@ -99,7 +99,7 @@ int scanhash_sha256t_11way( struct work *work, uint32_t max_nonce, for ( i = 0; i < 8; i++ ) if ( !( hash7[ i ] & mask ) ) { // deinterleave hash for lane - mm256_extr_lane_8x32( lane_hash, hashx, i, 256 ); + extr_lane_8x32( lane_hash, hashx, i, 256 ); if ( fulltest( lane_hash, ptarget ) ) { pdata[19] = n + i; @@ -163,7 +163,6 @@ int scanhash_sha256t_8way( struct work *work, uint32_t max_nonce, { uint32_t vdata[20*8] __attribute__ ((aligned (64))); uint32_t hash[8*8] __attribute__ ((aligned (32))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *hash7 = &(hash[7<<3]); uint32_t *pdata = work->data; @@ -187,12 +186,9 @@ int scanhash_sha256t_8way( struct work *work, uint32_t max_nonce, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_8x32( vdata, edata, edata, edata, edata, - edata, edata, edata, edata, 640 ); // Need big endian data -// mm256_bswap_intrlv80_8x32( vdata, pdata ); + mm256_bswap32_intrlv80_8x32( vdata, pdata ); sha256_8way_init( &sha256_ctx8 ); sha256_8way( &sha256_ctx8, vdata, 64 ); @@ -209,7 +205,7 @@ int scanhash_sha256t_8way( struct work *work, uint32_t max_nonce, if ( !( hash7[ lane ] & mask ) ) { // deinterleave hash for lane - mm256_extr_lane_8x32( lane_hash, hash, lane, 256 ); + extr_lane_8x32( lane_hash, hash, lane, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { pdata[19] = n + lane; @@ -253,7 +249,6 @@ int scanhash_sha256t_4way( struct work *work, uint32_t max_nonce, { uint32_t vdata[20*4] __attribute__ ((aligned (64))); uint32_t hash[8*4] __attribute__ ((aligned (32))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (64))); uint32_t *hash7 = &(hash[7<<2]); uint32_t *pdata = work->data; @@ -277,10 +272,7 @@ int scanhash_sha256t_4way( struct work *work, uint32_t max_nonce, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm128_intrlv_4x32( vdata, edata, edata, edata, edata, 640 ); - -// mm128_bswap_intrlv80_4x32( vdata, pdata ); + mm128_bswap32_intrlv80_4x32( vdata, pdata ); sha256_4way_init( &sha256_ctx4 ); sha256_4way( &sha256_ctx4, vdata, 64 ); diff --git a/algo/sha/sha256t-gate.c b/algo/sha/sha256t-gate.c index 91e8e83..0271234 100644 --- a/algo/sha/sha256t-gate.c +++ b/algo/sha/sha256t-gate.c @@ -11,7 +11,7 @@ bool register_sha256t_algo( algo_gate_t* gate ) gate->scanhash = (void*)&scanhash_sha256t_4way; gate->hash = (void*)&sha256t_4way_hash; #else -gate->optimizations = SHA_OPT; + gate->optimizations = SHA_OPT; gate->scanhash = (void*)&scanhash_sha256t; gate->hash = (void*)&sha256t_hash; #endif @@ -21,7 +21,11 @@ gate->optimizations = SHA_OPT; bool register_sha256q_algo( algo_gate_t* gate ) { -#if defined(SHA256T_4WAY) +#if defined(SHA256T_8WAY) + gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT; + gate->scanhash = (void*)&scanhash_sha256q_8way; + gate->hash = (void*)&sha256q_8way_hash; +#elif defined(SHA256T_4WAY) gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT; gate->scanhash = (void*)&scanhash_sha256q_4way; gate->hash = (void*)&sha256q_4way_hash; diff --git a/algo/sha/sha256t-gate.h b/algo/sha/sha256t-gate.h index 30b2766..0d519aa 100644 --- a/algo/sha/sha256t-gate.h +++ b/algo/sha/sha256t-gate.h @@ -8,7 +8,7 @@ #if !defined(__SHA__) #if defined(__AVX2__) #define SHA256T_8WAY -#elif defined(__SSE2__) + #elif defined(__SSE2__) #define SHA256T_4WAY #endif #endif diff --git a/algo/skein/skein-4way.c b/algo/skein/skein-4way.c index 656adb5..d600e60 100644 --- a/algo/skein/skein-4way.c +++ b/algo/skein/skein-4way.c @@ -12,7 +12,7 @@ void skeinhash_4way( void *state, const void *input ) { - uint64_t vhash64[8*4] __attribute__ ((aligned (64))); + uint64_t vhash64[16*4] __attribute__ ((aligned (64))); skein512_4way_context ctx_skein; #if defined(__SHA__) uint32_t hash0[16] __attribute__ ((aligned (64))); @@ -30,7 +30,7 @@ void skeinhash_4way( void *state, const void *input ) skein512_4way_close( &ctx_skein, vhash64 ); #if defined(__SHA__) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 512 ); SHA256_Init( &ctx_sha256 ); SHA256_Update( &ctx_sha256, (unsigned char*)hash0, 64 ); @@ -50,7 +50,7 @@ void skeinhash_4way( void *state, const void *input ) intrlv_4x32( state, hash0, hash1, hash2, hash3, 256 ); #else - mm256_rintrlv_4x64_4x32( vhash32, vhash64, 512 ); + rintrlv_4x64_4x32( vhash32, vhash64, 512 ); sha256_4way_init( &ctx_sha256 ); sha256_4way( &ctx_sha256, vhash32, 64 ); @@ -62,8 +62,7 @@ int scanhash_skein_4way( struct work *work, uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t vdata[20*4] __attribute__ ((aligned (64))); - uint32_t hash[8*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); + uint32_t hash[16*4] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *hash7 = &(hash[7<<2]); uint32_t *pdata = work->data; @@ -74,9 +73,7 @@ int scanhash_skein_4way( struct work *work, uint32_t max_nonce, __m256i *noncev = (__m256i*)vdata + 9; // aligned int thr_id = mythr->id; // thr_id arg is deprecated - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( diff --git a/algo/skein/skein-hash-4way.c b/algo/skein/skein-hash-4way.c index ef8beb8..358ecd8 100644 --- a/algo/skein/skein-hash-4way.c +++ b/algo/skein/skein-hash-4way.c @@ -279,10 +279,7 @@ do { \ _mm256_xor_si256( k2, k3 ) ), \ _mm256_xor_si256( _mm256_xor_si256( k4, k5 ), \ _mm256_xor_si256( k6, k7 ) ) ), \ - _mm256_set_epi64x( SPH_C64(0x1BD11BDAA9FC1A22), \ - SPH_C64(0x1BD11BDAA9FC1A22), \ - SPH_C64(0x1BD11BDAA9FC1A22), \ - SPH_C64(0x1BD11BDAA9FC1A22) ) ); \ + m256_const1_64( 0x1BD11BDAA9FC1A22) ); \ t2 = t0 ^ t1; \ } while (0) @@ -294,13 +291,11 @@ do { \ w3 = _mm256_add_epi64( w3, SKBI(k,s,3) ); \ w4 = _mm256_add_epi64( w4, SKBI(k,s,4) ); \ w5 = _mm256_add_epi64( w5, _mm256_add_epi64( SKBI(k,s,5), \ - _mm256_set_epi64x( SKBT(t,s,0), SKBT(t,s,0), \ - SKBT(t,s,0), SKBT(t,s,0) ) ) ); \ + m256_const1_64( SKBT(t,s,0) ) ) ); \ w6 = _mm256_add_epi64( w6, _mm256_add_epi64( SKBI(k,s,6), \ - _mm256_set_epi64x( SKBT(t,s,1), SKBT(t,s,1), \ - SKBT(t,s,1), SKBT(t,s,1) ) ) ); \ + m256_const1_64( SKBT(t,s,1) ) ) ); \ w7 = _mm256_add_epi64( w7, _mm256_add_epi64( SKBI(k,s,7), \ - _mm256_set_epi64x( s, s, s, s ) ) ); \ + m256_const1_64( s ) ) ); \ } while (0) diff --git a/algo/skein/skein2-4way.c b/algo/skein/skein2-4way.c index 98680d4..b67fa78 100644 --- a/algo/skein/skein2-4way.c +++ b/algo/skein/skein2-4way.c @@ -8,7 +8,7 @@ void skein2hash_4way( void *output, const void *input ) { skein512_4way_context ctx; - uint64_t hash[8*4] __attribute__ ((aligned (64))); + uint64_t hash[16*4] __attribute__ ((aligned (64))); skein512_4way_init( &ctx ); skein512_4way( &ctx, input, 80 ); @@ -22,8 +22,7 @@ void skein2hash_4way( void *output, const void *input ) int scanhash_skein2_4way( struct work *work, uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr ) { - uint32_t hash[8*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); + uint32_t hash[16*4] __attribute__ ((aligned (64))); uint32_t vdata[20*4] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (64))); uint32_t *hash7 = &(hash[25]); @@ -34,20 +33,10 @@ int scanhash_skein2_4way( struct work *work, uint32_t max_nonce, uint32_t n = first_nonce; __m256i *noncev = (__m256i*)vdata + 9; // aligned int thr_id = mythr->id; // thr_id arg is deprecated -// uint32_t *noncep = vdata + 73; // 9*8 + 1 - - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); - -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { -// be32enc( noncep, n ); -// be32enc( noncep+2, n+1 ); -// be32enc( noncep+4, n+2 ); -// be32enc( noncep+6, n+3 ); - *noncev = mm256_intrlv_blend_32( mm256_bswap_32( _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev ); @@ -56,7 +45,7 @@ int scanhash_skein2_4way( struct work *work, uint32_t max_nonce, for ( int lane = 0; lane < 4; lane++ ) if ( hash7[ lane<<1 ] <= Htarg ) { - mm256_extr_lane_4x64( lane_hash, hash, lane, 256 ); + extr_lane_4x64( lane_hash, hash, lane, 256 ); if ( fulltest( lane_hash, ptarget ) && !opt_benchmark ) { pdata[19] = n + lane; diff --git a/algo/x11/c11-4way.c b/algo/x11/c11-4way.c index f82dec3..e2922bc 100644 --- a/algo/x11/c11-4way.c +++ b/algo/x11/c11-4way.c @@ -69,7 +69,7 @@ void c11_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -81,7 +81,7 @@ void c11_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // 4way - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // 4 JH jh512_4way( &ctx.jh, vhash, 64 ); @@ -96,16 +96,16 @@ void c11_4way_hash( void *state, const void *input ) skein512_4way_close( &ctx.skein, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 7 Luffa - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); - mm256_intrlv_2x128( vhashB, hash2, hash3, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhashB, hash2, hash3, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhashB, 512 ); // 8 Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -133,13 +133,13 @@ void c11_4way_hash( void *state, const void *input ) sph_shavite512_close( &ctx.shavite, hash3 ); // 10 Simd - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); - mm256_intrlv_2x128( vhashB, hash2, hash3, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhashB, hash2, hash3, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhashB, 512 ); // 11 Echo update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -165,7 +165,6 @@ int scanhash_c11_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__((aligned(64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; @@ -178,9 +177,7 @@ int scanhash_c11_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for (int m=0; m < 6; m++) if (Htarg <= htmax[m]) diff --git a/algo/x11/timetravel-4way.c b/algo/x11/timetravel-4way.c index 155ad11..ba3199c 100644 --- a/algo/x11/timetravel-4way.c +++ b/algo/x11/timetravel-4way.c @@ -87,19 +87,16 @@ void timetravel_4way_hash(void *output, const void *input) blake512_4way( &ctx.blake, vhashA, dataLen ); blake512_4way_close( &ctx.blake, vhashB ); if ( i == 7 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 1: bmw512_4way( &ctx.bmw, vhashA, dataLen ); bmw512_4way_close( &ctx.bmw, vhashB ); if ( i == 7 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 2: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashA, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, dataLen<<3 ); reinit_groestl( &ctx.groestl ); @@ -112,47 +109,40 @@ void timetravel_4way_hash(void *output, const void *input) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, dataLen<<3 ); if ( i != 7 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; case 3: skein512_4way( &ctx.skein, vhashA, dataLen ); skein512_4way_close( &ctx.skein, vhashB ); if ( i == 7 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 4: jh512_4way( &ctx.jh, vhashA, dataLen ); jh512_4way_close( &ctx.jh, vhashB ); if ( i == 7 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 5: keccak512_4way( &ctx.keccak, vhashA, dataLen ); keccak512_4way_close( &ctx.keccak, vhashB ); if ( i == 7 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 6: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashA, dataLen<<3 ); - mm256_intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); + intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); - mm256_intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 ); + dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); + intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen ); - mm256_dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 ); + dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 ); if ( i != 7 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; case 7: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashA, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*)hash0, dataLen ); memcpy( &ctx.cube, &tt8_4way_ctx.cube, sizeof(cubehashParam) ); @@ -165,8 +155,7 @@ void timetravel_4way_hash(void *output, const void *input) cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*)hash3, dataLen ); if ( i != 7 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; default: applog(LOG_ERR,"SWERR: timetravel invalid permutation"); @@ -215,7 +204,7 @@ int scanhash_timetravel_4way( struct work *work, uint32_t max_nonce, } uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); + intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); do { diff --git a/algo/x11/timetravel10-4way.c b/algo/x11/timetravel10-4way.c index 463c3a9..5dab3c8 100644 --- a/algo/x11/timetravel10-4way.c +++ b/algo/x11/timetravel10-4way.c @@ -93,19 +93,16 @@ void timetravel10_4way_hash(void *output, const void *input) blake512_4way( &ctx.blake, vhashA, dataLen ); blake512_4way_close( &ctx.blake, vhashB ); if ( i == 9 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 1: bmw512_4way( &ctx.bmw, vhashA, dataLen ); bmw512_4way_close( &ctx.bmw, vhashB ); if ( i == 9 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 2: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashA, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, dataLen<<3 ); reinit_groestl( &ctx.groestl ); @@ -118,46 +115,40 @@ void timetravel10_4way_hash(void *output, const void *input) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, dataLen<<3 ); if ( i != 9 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; case 3: skein512_4way( &ctx.skein, vhashA, dataLen ); skein512_4way_close( &ctx.skein, vhashB ); if ( i == 9 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 4: jh512_4way( &ctx.jh, vhashA, dataLen ); jh512_4way_close( &ctx.jh, vhashB ); if ( i == 9 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 5: keccak512_4way( &ctx.keccak, vhashA, dataLen ); keccak512_4way_close( &ctx.keccak, vhashB ); if ( i == 9 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashB, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashB, dataLen<<3 ); break; case 6: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashA, dataLen<<3 ); - mm256_intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); + intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); - mm256_intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 ); + dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); + intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen ); - mm256_dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 ); + dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 ); if ( i != 9 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; case 7: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*)hash0, dataLen ); @@ -171,12 +162,10 @@ void timetravel10_4way_hash(void *output, const void *input) cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*)hash3, dataLen ); if ( i != 9 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; case 8: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashA, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); sph_shavite512( &ctx.shavite, hash0, dataLen ); sph_shavite512_close( &ctx.shavite, hash0 ); memcpy( &ctx.shavite, &tt10_4way_ctx.shavite, sizeof ctx.shavite ); @@ -189,22 +178,19 @@ void timetravel10_4way_hash(void *output, const void *input) sph_shavite512( &ctx.shavite, hash3, dataLen ); sph_shavite512_close( &ctx.shavite, hash3 ); if ( i != 9 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; case 9: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhashA, dataLen<<3 ); - mm256_intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, dataLen<<3 ); + intrlv_2x128( vhashA, hash0, hash1, dataLen<<3 ); simd_2way_update_close( &ctx.simd, vhashA, vhashA, dataLen<<3 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); - mm256_intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 ); + dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); + intrlv_2x128( vhashA, hash2, hash3, dataLen<<3 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashA, vhashA, dataLen<<3 ); - mm256_dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 ); + dintrlv_2x128( hash2, hash3, vhashA, dataLen<<3 ); if ( i != 9 ) - mm256_intrlv_4x64( vhashB, - hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhashB, hash0, hash1, hash2, hash3, dataLen<<3 ); break; default: applog(LOG_ERR,"SWERR: timetravel invalid permutation"); @@ -253,7 +239,7 @@ int scanhash_timetravel10_4way( struct work *work, } uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); + intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); do { diff --git a/algo/x11/tribus-4way.c b/algo/x11/tribus-4way.c index 2e17ad3..d7f6194 100644 --- a/algo/x11/tribus-4way.c +++ b/algo/x11/tribus-4way.c @@ -37,7 +37,7 @@ void tribus_hash_4way(void *state, const void *input) keccak512_4way( &ctx_keccak, vhash, 64 ); keccak512_4way_close( &ctx_keccak, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // hash echo serially init_echo( &ctx_echo, 512 ); @@ -64,7 +64,6 @@ int scanhash_tribus_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[20*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; @@ -87,9 +86,7 @@ int scanhash_tribus_4way( struct work *work, uint32_t max_nonce, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); // precalc midstate // doing it one way then then interleaving would be faster but too diff --git a/algo/x11/x11-4way.c b/algo/x11/x11-4way.c index 26fb57b..52a3c1b 100644 --- a/algo/x11/x11-4way.c +++ b/algo/x11/x11-4way.c @@ -69,7 +69,7 @@ void x11_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -81,7 +81,7 @@ void x11_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // 4way - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // 4 Skein skein512_4way( &ctx.skein, vhash, 64 ); @@ -95,16 +95,16 @@ void x11_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 7 Luffa parallel 2 way 128 bit - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); - mm256_intrlv_2x128( vhashB, hash2, hash3, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhashB, hash2, hash3, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhashB, 512 ); // 8 Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -132,13 +132,13 @@ void x11_4way_hash( void *state, const void *input ) sph_shavite512_close( &ctx.shavite, hash3 ); // 10 Simd - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); - mm256_intrlv_2x128( vhashB, hash2, hash3, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhashB, hash2, hash3, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhashB, 512 ); // 11 Echo update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -164,7 +164,6 @@ int scanhash_x11_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__((aligned(64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; @@ -177,9 +176,7 @@ int scanhash_x11_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for (int m=0; m < 6; m++) if (Htarg <= htmax[m]) diff --git a/algo/x11/x11evo-4way.c b/algo/x11/x11evo-4way.c index eebc28d..8fe1512 100644 --- a/algo/x11/x11evo-4way.c +++ b/algo/x11/x11evo-4way.c @@ -87,19 +87,16 @@ void x11evo_4way_hash( void *state, const void *input ) case 0: blake512_4way( &ctx.blake, input, 80 ); blake512_4way_close( &ctx.blake, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); break; case 1: bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); if ( i >= len-1 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); break; case 2: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); reinit_groestl( &ctx.groestl ); @@ -112,47 +109,40 @@ void x11evo_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); if ( i < len-1 ) - mm256_intrlv_4x64( vhash, - hash0, hash1, hash2, hash3, 64<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 ); break; case 3: skein512_4way( &ctx.skein, vhash, 64 ); skein512_4way_close( &ctx.skein, vhash ); if ( i >= len-1 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); break; case 4: jh512_4way( &ctx.jh, vhash, 64 ); jh512_4way_close( &ctx.jh, vhash ); if ( i >= len-1 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); break; case 5: keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); if ( i >= len-1 ) - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); break; case 6: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); - mm256_intrlv_2x128( vhash, hash0, hash1, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); + intrlv_2x128( vhash, hash0, hash1, 64<<3 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 64<<3 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 64<<3 ); + dintrlv_2x128( hash0, hash1, vhash, 64<<3 ); + intrlv_2x128( vhash, hash2, hash3, 64<<3 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 64<<3 ); + dintrlv_2x128( hash2, hash3, vhash, 64<<3 ); if ( i < len-1 ) - mm256_intrlv_4x64( vhash, - hash0, hash1, hash2, hash3, 64<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 ); break; case 7: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); memcpy( &ctx.cube, &x11evo_4way_ctx.cube, sizeof(cubehashParam) ); @@ -165,12 +155,10 @@ void x11evo_4way_hash( void *state, const void *input ) cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*) hash3, 64 ); if ( i < len-1 ) - mm256_intrlv_4x64( vhash, - hash0, hash1, hash2, hash3, 64<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 ); break; case 8: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); sph_shavite512( &ctx.shavite, hash0, 64 ); sph_shavite512_close( &ctx.shavite, hash0 ); memcpy( &ctx.shavite, &x11evo_4way_ctx.shavite, @@ -186,26 +174,22 @@ void x11evo_4way_hash( void *state, const void *input ) sph_shavite512( &ctx.shavite, hash3, 64 ); sph_shavite512_close( &ctx.shavite, hash3 ); if ( i < len-1 ) - mm256_intrlv_4x64( vhash, - hash0, hash1, hash2, hash3, 64<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 ); break; case 9: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); - mm256_intrlv_2x128( vhash, hash0, hash1, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); + intrlv_2x128( vhash, hash0, hash1, 64<<3 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 64<<3 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 64<<3 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 64<<3 ); + dintrlv_2x128( hash0, hash1, vhash, 64<<3 ); + intrlv_2x128( vhash, hash2, hash3, 64<<3 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 64<<3 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 64<<3 ); + dintrlv_2x128( hash2, hash3, vhash, 64<<3 ); if ( i < len-1 ) - mm256_intrlv_4x64( vhash, - hash0, hash1, hash2, hash3, 64<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 ); break; case 10: - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, - vhash, 64<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 64<<3 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, (const BitSequence *) hash0, 512 ); memcpy( &ctx.echo, &x11evo_4way_ctx.echo, sizeof(hashState_echo) ); @@ -218,8 +202,7 @@ void x11evo_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); if ( i < len-1 ) - mm256_intrlv_4x64( vhash, - hash0, hash1, hash2, hash3, 64<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 64<<3 ); break; } } @@ -269,7 +252,7 @@ int scanhash_x11evo_4way( struct work* work, uint32_t max_nonce, } uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); + intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); do { diff --git a/algo/x11/x11gost-4way.c b/algo/x11/x11gost-4way.c index 21e03a8..90b1ebd 100644 --- a/algo/x11/x11gost-4way.c +++ b/algo/x11/x11gost-4way.c @@ -70,7 +70,7 @@ void x11gost_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); memcpy( &ctx.groestl, &x11gost_4way_ctx.groestl, @@ -84,7 +84,7 @@ void x11gost_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // 4way - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way( &ctx.skein, vhash, 64 ); skein512_4way_close( &ctx.skein, vhash ); @@ -96,7 +96,7 @@ void x11gost_4way_hash( void *state, const void *input ) keccak512_4way_close( &ctx.keccak, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_gost512( &ctx.gost, hash0, 64 ); sph_gost512_close( &ctx.gost, hash0 ); @@ -110,13 +110,13 @@ void x11gost_4way_hash( void *state, const void *input ) sph_gost512( &ctx.gost, hash3, 64 ); sph_gost512_close( &ctx.gost, hash3 ); - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); memcpy( &ctx.cube, &x11gost_4way_ctx.cube, sizeof(cubehashParam) ); @@ -141,12 +141,12 @@ void x11gost_4way_hash( void *state, const void *input ) sph_shavite512( &ctx.shavite, hash3, 64 ); sph_shavite512_close( &ctx.shavite, hash3 ); - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, (const BitSequence *) hash0, 512 ); @@ -171,7 +171,6 @@ int scanhash_x11gost_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; @@ -184,9 +183,7 @@ int scanhash_x11gost_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for (int m=0; m < 6; m++) if (Htarg <= htmax[m]) diff --git a/algo/x12/x12-4way.c b/algo/x12/x12-4way.c index 3a0cde7..80fae6d 100644 --- a/algo/x12/x12-4way.c +++ b/algo/x12/x12-4way.c @@ -33,7 +33,6 @@ typedef struct { simd_2way_context simd; hashState_echo echo; hamsi512_4way_context hamsi; -// sph_fugue512_context fugue; } x12_4way_ctx_holder; x12_4way_ctx_holder x12_4way_ctx __attribute__ ((aligned (64))); @@ -52,7 +51,6 @@ void init_x12_4way_ctx() simd_2way_init( &x12_4way_ctx.simd, 512 ); init_echo( &x12_4way_ctx.echo, 512 ); hamsi512_4way_init( &x12_4way_ctx.hamsi ); -// sph_fugue512_init( &x12_4way_ctx.fugue ); }; void x12_4way_hash( void *state, const void *input ) @@ -74,7 +72,7 @@ void x12_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -86,7 +84,7 @@ void x12_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // Parallel 4way 64 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // 4 Skein skein512_4way( &ctx.skein, vhash, 64 ); @@ -101,16 +99,16 @@ void x12_4way_hash( void *state, const void *input ) keccak512_4way_close( &ctx.keccak, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 7 Luffa - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + intrlv_2x128( hash2, hash3, vhash, 512 ); // 8 Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -138,13 +136,13 @@ void x12_4way_hash( void *state, const void *input ) sph_shavite512_close( &ctx.shavite, hash3 ); // 10 Simd - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // 11 Echo update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -160,11 +158,11 @@ void x12_4way_hash( void *state, const void *input ) (const BitSequence *) hash3, 512 ); // 12 Hamsi parallel 4way 32 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 ); + dintrlv_4x64( state, state+32, state+64, state+96, vhash, 256 ); } int scanhash_x12_4way( struct work *work, uint32_t max_nonce, @@ -189,7 +187,7 @@ int scanhash_x12_4way( struct work *work, uint32_t max_nonce, swab32_array( endiandata, pdata, 20 ); uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); + intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] ) diff --git a/algo/x13/phi1612-4way.c b/algo/x13/phi1612-4way.c index add72c8..a62b6f3 100644 --- a/algo/x13/phi1612-4way.c +++ b/algo/x13/phi1612-4way.c @@ -53,7 +53,7 @@ void phi1612_4way_hash( void *state, const void *input ) jh512_4way_close( &ctx.jh, vhash ); // Serial to the end - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -114,7 +114,6 @@ int scanhash_phi1612_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; @@ -125,9 +124,7 @@ int scanhash_phi1612_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0cff; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( diff --git a/algo/x13/skunk-4way.c b/algo/x13/skunk-4way.c index 461d80c..165047e 100644 --- a/algo/x13/skunk-4way.c +++ b/algo/x13/skunk-4way.c @@ -33,7 +33,7 @@ void skunk_4way_hash( void *output, const void *input ) skein512_4way( &ctx.skein, input, 80 ); skein512_4way_close( &ctx.skein, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*)hash0, 64 ); memcpy( &ctx.cube, &skunk_4way_ctx.cube, sizeof(cubehashParam) ); @@ -78,7 +78,6 @@ int scanhash_skunk_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; @@ -91,9 +90,7 @@ int scanhash_skunk_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ((uint32_t*)ptarget)[7] = 0x0cff; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( diff --git a/algo/x13/x13-4way.c b/algo/x13/x13-4way.c index b8d8220..970f8e3 100644 --- a/algo/x13/x13-4way.c +++ b/algo/x13/x13-4way.c @@ -74,7 +74,7 @@ void x13_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -86,7 +86,7 @@ void x13_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // Parallel 4way 64 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // 4 Skein skein512_4way( &ctx.skein, vhash, 64 ); @@ -101,16 +101,16 @@ void x13_4way_hash( void *state, const void *input ) keccak512_4way_close( &ctx.keccak, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 7 Luffa - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // 8 Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -138,13 +138,13 @@ void x13_4way_hash( void *state, const void *input ) sph_shavite512_close( &ctx.shavite, hash3 ); // 10 Simd - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // 11 Echo update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -160,10 +160,10 @@ void x13_4way_hash( void *state, const void *input ) (const BitSequence *) hash3, 512 ); // 12 Hamsi parallel 4way 32 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 13 Fugue serial sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -189,7 +189,6 @@ int scanhash_x13_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; @@ -202,9 +201,7 @@ int scanhash_x13_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] ) diff --git a/algo/x13/x13sm3-4way.c b/algo/x13/x13sm3-4way.c index 3289ed1..a107627 100644 --- a/algo/x13/x13sm3-4way.c +++ b/algo/x13/x13sm3-4way.c @@ -81,7 +81,7 @@ void x13sm3_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // Groestl update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -93,7 +93,7 @@ void x13sm3_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // Parallel 4way - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // Skein skein512_4way( &ctx.skein, vhash, 64 ); @@ -108,16 +108,16 @@ void x13sm3_4way_hash( void *state, const void *input ) keccak512_4way_close( &ctx.keccak, vhash ); // Serial to the end - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // Luffa - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -145,13 +145,13 @@ void x13sm3_4way_hash( void *state, const void *input ) sph_shavite512_close( &ctx.shavite, hash3 ); // Simd - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // Echo update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -185,10 +185,10 @@ void x13sm3_4way_hash( void *state, const void *input ) dintrlv_4x32( hash0, hash1, hash2, hash3, sm3_vhash, 512 ); // Hamsi parallel 4x32x2 - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // Fugue serial sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -214,7 +214,6 @@ int scanhash_x13sm3_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; @@ -227,9 +226,7 @@ int scanhash_x13sm3_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); blake512_4way_init( &x13sm3_ctx_mid ); blake512_4way( &x13sm3_ctx_mid, vdata, 64 ); diff --git a/algo/x14/polytimos-4way.c b/algo/x14/polytimos-4way.c index 4740bfd..3e1cc69 100644 --- a/algo/x14/polytimos-4way.c +++ b/algo/x14/polytimos-4way.c @@ -39,7 +39,7 @@ void polytimos_4way_hash( void *output, const void *input ) // Need to convert from 64 bit interleaved to 32 bit interleaved. uint32_t vhash32[16*4]; - mm256_rintrlv_4x64_4x32( vhash32, vhash, 512 ); + rintrlv_4x64_4x32( vhash32, vhash, 512 ); shabal512_4way_init( &ctx.shabal ); shabal512_4way( &ctx.shabal, vhash32, 64 ); shabal512_4way_close( &ctx.shabal, vhash32 ); @@ -58,15 +58,15 @@ void polytimos_4way_hash( void *output, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -105,7 +105,6 @@ int scanhash_polytimos_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; @@ -118,9 +117,7 @@ int scanhash_polytimos_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ptarget[7] = 0x0cff; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev ); diff --git a/algo/x14/veltor-4way.c b/algo/x14/veltor-4way.c index 15842a7..4f35161 100644 --- a/algo/x14/veltor-4way.c +++ b/algo/x14/veltor-4way.c @@ -40,7 +40,7 @@ void veltor_4way_hash( void *output, const void *input ) skein512_4way( &ctx.skein, input, 80 ); skein512_4way_close( &ctx.skein, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_shavite512( &ctx.shavite, hash0, 64 ); sph_shavite512_close( &ctx.shavite, hash0 ); @@ -82,7 +82,6 @@ int scanhash_veltor_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t Htarg = ptarget[7]; @@ -95,9 +94,7 @@ int scanhash_veltor_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ptarget[7] = 0x0cff; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { diff --git a/algo/x14/x14-4way.c b/algo/x14/x14-4way.c index de66208..5267d78 100644 --- a/algo/x14/x14-4way.c +++ b/algo/x14/x14-4way.c @@ -78,7 +78,7 @@ void x14_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -90,7 +90,7 @@ void x14_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // Parallel 4way - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // 4 Skein skein512_4way( &ctx.skein, vhash, 64 ); @@ -105,16 +105,16 @@ void x14_4way_hash( void *state, const void *input ) keccak512_4way_close( &ctx.keccak, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 7 Luffa - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // 8 Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -142,13 +142,13 @@ void x14_4way_hash( void *state, const void *input ) sph_shavite512_close( &ctx.shavite, hash3 ); // 10 Simd - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // 11 Echo update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -164,10 +164,10 @@ void x14_4way_hash( void *state, const void *input ) (const BitSequence *) hash3, 512 ); // 12 Hamsi parallel 4way 32 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 13 Fugue serial sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -193,7 +193,6 @@ int scanhash_x14_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*16] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t n = pdata[19]; @@ -206,9 +205,7 @@ int scanhash_x14_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] ) diff --git a/algo/x15/x15-4way.c b/algo/x15/x15-4way.c index e4b395d..87fe361 100644 --- a/algo/x15/x15-4way.c +++ b/algo/x15/x15-4way.c @@ -81,7 +81,7 @@ void x15_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -93,7 +93,7 @@ void x15_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // Parallel 4way - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // 4 Skein skein512_4way( &ctx.skein, vhash, 64 ); @@ -108,16 +108,16 @@ void x15_4way_hash( void *state, const void *input ) keccak512_4way_close( &ctx.keccak, vhash ); // Serial to the end - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 7 Luffa - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // 8 Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); @@ -145,13 +145,13 @@ void x15_4way_hash( void *state, const void *input ) sph_shavite512_close( &ctx.shavite, hash3 ); // 10 Simd - mm256_intrlv_2x128( vhash, hash0, hash1, 512 ); + intrlv_2x128( vhash, hash0, hash1, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, hash2, hash3, 512 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, hash2, hash3, 512 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); // 11 Echo update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -167,10 +167,10 @@ void x15_4way_hash( void *state, const void *input ) (const BitSequence *) hash3, 512 ); // 12 Hamsi parallel 4way 32 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 13 Fugue sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -217,7 +217,6 @@ int scanhash_x15_4way( struct work *work, uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t hash[4*8] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); uint32_t *pdata = work->data; uint32_t *ptarget = work->target; @@ -231,10 +230,8 @@ int scanhash_x15_4way( struct work *work, uint32_t max_nonce, uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] ) diff --git a/algo/x16/x16r-4way.c b/algo/x16/x16r-4way.c index 85eed51..56cce06 100644 --- a/algo/x16/x16r-4way.c +++ b/algo/x16/x16r-4way.c @@ -67,7 +67,7 @@ void x16r_4way_hash( void* output, const void* input ) void *in3 = (void*) hash3; int size = 80; - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, input, 640 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, input, 640 ); if ( s_ntime == UINT32_MAX ) { @@ -96,11 +96,11 @@ void x16r_4way_hash( void* output, const void* input ) blake512_4way( &ctx.blake, input, size ); else { - mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); + intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); blake512_4way( &ctx.blake, vhash, size ); } blake512_4way_close( &ctx.blake, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); break; case BMW: bmw512_4way_init( &ctx.bmw ); @@ -108,11 +108,11 @@ void x16r_4way_hash( void* output, const void* input ) bmw512_4way( &ctx.bmw, input, size ); else { - mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); + intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); bmw512_4way( &ctx.bmw, vhash, size ); } bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); break; case GROESTL: init_groestl( &ctx.groestl, 64 ); @@ -134,11 +134,11 @@ void x16r_4way_hash( void* output, const void* input ) skein512_4way( &ctx.skein, input, size ); else { - mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); + intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); skein512_4way( &ctx.skein, vhash, size ); } skein512_4way_close( &ctx.skein, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); break; case JH: jh512_4way_init( &ctx.jh ); @@ -146,11 +146,11 @@ void x16r_4way_hash( void* output, const void* input ) jh512_4way( &ctx.jh, input, size ); else { - mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); + intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); jh512_4way( &ctx.jh, vhash, size ); } jh512_4way_close( &ctx.jh, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); break; case KECCAK: keccak512_4way_init( &ctx.keccak ); @@ -158,21 +158,21 @@ void x16r_4way_hash( void* output, const void* input ) keccak512_4way( &ctx.keccak, input, size ); else { - mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); + intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); keccak512_4way( &ctx.keccak, vhash, size ); } keccak512_4way_close( &ctx.keccak, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); break; case LUFFA: - mm256_intrlv_2x128( vhash, in0, in1, size<<3 ); + intrlv_2x128( vhash, in0, in1, size<<3 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, size ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, in2, in3, size<<3 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, in2, in3, size<<3 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhash, vhash, size); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); break; case CUBEHASH: cubehashInit( &ctx.cube, 512, 16, 32 ); @@ -203,14 +203,14 @@ void x16r_4way_hash( void* output, const void* input ) sph_shavite512_close( &ctx.shavite, hash3 ); break; case SIMD: - mm256_intrlv_2x128( vhash, in0, in1, size<<3 ); + intrlv_2x128( vhash, in0, in1, size<<3 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 ); - mm256_dintrlv_2x128( hash0, hash1, vhash, 512 ); - mm256_intrlv_2x128( vhash, in2, in3, size<<3 ); + dintrlv_2x128( hash0, hash1, vhash, 512 ); + intrlv_2x128( vhash, in2, in3, size<<3 ); simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhash, vhash, size<<3 ); - mm256_dintrlv_2x128( hash2, hash3, vhash, 512 ); + dintrlv_2x128( hash2, hash3, vhash, 512 ); break; case ECHO: init_echo( &ctx.echo, 512 ); @@ -227,11 +227,11 @@ void x16r_4way_hash( void* output, const void* input ) (const BitSequence*)in3, size<<3 ); break; case HAMSI: - mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); + intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, size ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); break; case FUGUE: sph_fugue512_init( &ctx.fugue ); @@ -269,11 +269,11 @@ void x16r_4way_hash( void* output, const void* input ) sph_whirlpool_close( &ctx.whirlpool, hash3 ); break; case SHA_512: - mm256_intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); + intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 ); sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, size ); sha512_4way_close( &ctx.sha512, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); break; } size = 64; @@ -316,7 +316,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce, ptarget[7] = 0x0cff; uint64_t *edata = (uint64_t*)endiandata; - mm256_intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); + intrlv_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); do { diff --git a/algo/x17/sonoa-4way.c b/algo/x17/sonoa-4way.c index 18c1065..bda67b4 100644 --- a/algo/x17/sonoa-4way.c +++ b/algo/x17/sonoa-4way.c @@ -69,7 +69,7 @@ void sonoa_4way_hash( void *state, const void *input ) bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -80,7 +80,7 @@ void sonoa_4way_hash( void *state, const void *input ) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -94,7 +94,7 @@ void sonoa_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -116,8 +116,8 @@ void sonoa_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -134,13 +134,13 @@ void sonoa_4way_hash( void *state, const void *input ) // 2 - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); bmw512_4way_init( &ctx.bmw ); bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -151,7 +151,7 @@ void sonoa_4way_hash( void *state, const void *input ) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -165,7 +165,7 @@ void sonoa_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -187,8 +187,8 @@ void sonoa_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -203,7 +203,7 @@ void sonoa_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); @@ -215,7 +215,7 @@ void sonoa_4way_hash( void *state, const void *input ) bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -226,7 +226,7 @@ void sonoa_4way_hash( void *state, const void *input ) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -240,7 +240,7 @@ void sonoa_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -262,8 +262,8 @@ void sonoa_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -278,13 +278,13 @@ void sonoa_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -300,13 +300,13 @@ void sonoa_4way_hash( void *state, const void *input ) sph_fugue512_close( &ctx.fugue, hash3 ); // 4 - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); bmw512_4way_init( &ctx.bmw ); bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -317,7 +317,7 @@ void sonoa_4way_hash( void *state, const void *input ) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -331,7 +331,7 @@ void sonoa_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -353,8 +353,8 @@ void sonoa_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -369,13 +369,13 @@ void sonoa_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -396,13 +396,13 @@ void sonoa_4way_hash( void *state, const void *input ) shabal512_4way( &ctx.shabal, vhash, 64 ); shabal512_4way_close( &ctx.shabal, vhash ); - mm256_rintrlv_4x32_4x64( vhashB, vhash, 512 ); + rintrlv_4x32_4x64( vhashB, vhash, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhashB, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -417,8 +417,8 @@ void sonoa_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_2x128( vhashA, hash0, hash1, 512 ); - mm256_intrlv_2x128( vhashB, hash2, hash3, 512 ); + intrlv_2x128_512( vhashA, hash0, hash1 ); + intrlv_2x128_512( vhashB, hash2, hash3 ); shavite512_2way_init( &ctx.shavite ); shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); @@ -426,13 +426,13 @@ void sonoa_4way_hash( void *state, const void *input ) shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); // 5 - mm256_rintrlv_2x128_4x64( vhash, vhashA, vhashB, 512 ); + rintrlv_2x128_4x64( vhash, vhashA, vhashB, 512 ); bmw512_4way_init( &ctx.bmw ); bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 ); + rintrlv_4x64_4x32( vhashB, vhash, 512 ); shabal512_4way_init( &ctx.shabal ); shabal512_4way( &ctx.shabal, vhashB, 64 ); @@ -449,7 +449,7 @@ void sonoa_4way_hash( void *state, const void *input ) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -463,7 +463,7 @@ void sonoa_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -485,8 +485,8 @@ void sonoa_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -501,13 +501,13 @@ void sonoa_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -545,13 +545,13 @@ void sonoa_4way_hash( void *state, const void *input ) // 6 - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); bmw512_4way_init( &ctx.bmw ); bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -562,7 +562,7 @@ void sonoa_4way_hash( void *state, const void *input ) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -576,7 +576,7 @@ void sonoa_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -598,8 +598,8 @@ void sonoa_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -614,13 +614,13 @@ void sonoa_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -656,13 +656,13 @@ void sonoa_4way_hash( void *state, const void *input ) sph_whirlpool( &ctx.whirlpool, hash3, 64 ); sph_whirlpool_close( &ctx.whirlpool, hash3 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, 64 ); sha512_4way_close( &ctx.sha512, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_whirlpool_init( &ctx.whirlpool ); sph_whirlpool( &ctx.whirlpool, hash0, 64 ); @@ -679,13 +679,13 @@ void sonoa_4way_hash( void *state, const void *input ) // 7 - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); bmw512_4way_init( &ctx.bmw ); bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); @@ -696,7 +696,7 @@ void sonoa_4way_hash( void *state, const void *input ) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); @@ -710,7 +710,7 @@ void sonoa_4way_hash( void *state, const void *input ) keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -732,8 +732,8 @@ void sonoa_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -748,13 +748,13 @@ void sonoa_4way_hash( void *state, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); @@ -790,13 +790,13 @@ void sonoa_4way_hash( void *state, const void *input ) sph_whirlpool( &ctx.whirlpool, hash3, 64 ); sph_whirlpool_close( &ctx.whirlpool, hash3 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, 64 ); sha512_4way_close( &ctx.sha512, vhash ); - mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 ); + rintrlv_4x64_4x32( vhashB, vhash, 512 ); haval256_5_4way_init( &ctx.haval ); haval256_5_4way( &ctx.haval, vhashB, 64 ); @@ -808,7 +808,6 @@ int scanhash_sonoa_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *hash7 = &(hash[7<<2]); uint32_t *pdata = work->data; @@ -824,9 +823,7 @@ int scanhash_sonoa_4way( struct work *work, uint32_t max_nonce, 0xFFFFF000, 0xFFFF0000, 0 }; // Need big endian data - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] ) { uint32_t mask = masks[m]; diff --git a/algo/x17/x17-4way.c b/algo/x17/x17-4way.c index e0cec73..8c8ad6a 100644 --- a/algo/x17/x17-4way.c +++ b/algo/x17/x17-4way.c @@ -68,7 +68,7 @@ void x17_4way_hash( void *state, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serialize - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl init_groestl( &ctx.groestl, 64 ); @@ -81,7 +81,7 @@ void x17_4way_hash( void *state, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); // Parallellize - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); // 4 Skein parallel 4 way 64 bit skein512_4way_init( &ctx.skein ); @@ -99,7 +99,7 @@ void x17_4way_hash( void *state, const void *input ) keccak512_4way_close( &ctx.keccak, vhash ); // 7 Luffa parallel 2 way 128 bit - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); @@ -124,8 +124,8 @@ void x17_4way_hash( void *state, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); - mm256_dintrlv_2x128_512( hash0, hash1, vhashA ); - mm256_dintrlv_2x128_512( hash2, hash3, vhashB ); + dintrlv_2x128_512( hash0, hash1, vhashA ); + dintrlv_2x128_512( hash2, hash3, vhashB ); // 11 Echo serial init_echo( &ctx.echo, 512 ); @@ -142,13 +142,13 @@ void x17_4way_hash( void *state, const void *input ) (const BitSequence *) hash3, 512 ); // 12 Hamsi parallel 4 way 64 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 13 Fugue serial sph_fugue512_init( &ctx.fugue ); @@ -188,14 +188,14 @@ void x17_4way_hash( void *state, const void *input ) sph_whirlpool_close( &ctx.whirlpool, hash3 ); // 16 SHA512 parallel 64 bit - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, 64 ); sha512_4way_close( &ctx.sha512, vhash ); // 17 Haval parallel 32 bit - mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 ); + rintrlv_4x64_4x32( vhashB, vhash, 512 ); haval256_5_4way_init( &ctx.haval ); haval256_5_4way( &ctx.haval, vhashB, 64 ); @@ -207,7 +207,6 @@ int scanhash_x17_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *hash7 = &(hash[7<<2]); uint32_t *pdata = work->data; @@ -223,9 +222,7 @@ int scanhash_x17_4way( struct work *work, uint32_t max_nonce, 0xFFFFF000, 0xFFFF0000, 0 }; // Need big endian data - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] ) { uint32_t mask = masks[ m ]; @@ -235,7 +232,7 @@ int scanhash_x17_4way( struct work *work, uint32_t max_nonce, _mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev ); x17_4way_hash( hash, vdata ); - for ( int lane = 0; lane < 4; lane++ ) + for ( int lane = 0; lane < 4; lane++ ) if ( ( hash7[ lane ] & mask ) == 0 ) { extr_lane_4x32( lane_hash, hash, lane, 256 ); diff --git a/algo/x17/xevan-4way.c b/algo/x17/xevan-4way.c index 8b5a11c..f547276 100644 --- a/algo/x17/xevan-4way.c +++ b/algo/x17/xevan-4way.c @@ -71,7 +71,7 @@ void xevan_4way_hash( void *output, const void *input ) bmw512_4way_close( &ctx.bmw, vhash ); // Serial - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, @@ -87,7 +87,7 @@ void xevan_4way_hash( void *output, const void *input ) dataLen<<3 ); // Parallel 4way - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, dataLen ); @@ -101,7 +101,7 @@ void xevan_4way_hash( void *output, const void *input ) keccak512_4way( &ctx.keccak, vhash, dataLen ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen ); @@ -123,8 +123,8 @@ void xevan_4way_hash( void *output, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 ); + dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); + dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -139,13 +139,13 @@ void xevan_4way_hash( void *output, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, dataLen<<3 ); // Parallel - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, dataLen ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, dataLen ); @@ -183,19 +183,19 @@ void xevan_4way_hash( void *output, const void *input ) sph_whirlpool( &ctx.whirlpool, hash3, dataLen ); sph_whirlpool_close( &ctx.whirlpool, hash3 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, dataLen ); sha512_4way_close( &ctx.sha512, vhash ); - mm256_rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 ); + rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 ); haval256_5_4way_init( &ctx.haval ); haval256_5_4way( &ctx.haval, vhashA, dataLen ); haval256_5_4way_close( &ctx.haval, vhashA ); - mm256_rintrlv_4x32_4x64( vhash, vhashA, dataLen<<3 ); + rintrlv_4x32_4x64( vhash, vhashA, dataLen<<3 ); memset( &vhash[ 4<<2 ], 0, (dataLen-32) << 2 ); @@ -207,7 +207,7 @@ void xevan_4way_hash( void *output, const void *input ) bmw512_4way( &ctx.bmw, vhash, dataLen ); bmw512_4way_close( &ctx.bmw, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, @@ -222,7 +222,7 @@ void xevan_4way_hash( void *output, const void *input ) update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, dataLen<<3 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, dataLen ); @@ -236,7 +236,7 @@ void xevan_4way_hash( void *output, const void *input ) keccak512_4way( &ctx.keccak, vhash, dataLen ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 ); + rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen ); @@ -258,8 +258,8 @@ void xevan_4way_hash( void *output, const void *input ) simd_2way_init( &ctx.simd, 512 ); simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 ); - mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); - mm256_dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 ); + dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 ); + dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 ); init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, @@ -274,13 +274,13 @@ void xevan_4way_hash( void *output, const void *input ) update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, dataLen<<3 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, dataLen ); hamsi512_4way_close( &ctx.hamsi, vhash ); - mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); + dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 ); sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, dataLen ); @@ -316,13 +316,13 @@ void xevan_4way_hash( void *output, const void *input ) sph_whirlpool( &ctx.whirlpool, hash3, dataLen ); sph_whirlpool_close( &ctx.whirlpool, hash3 ); - mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); + intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 ); sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, dataLen ); sha512_4way_close( &ctx.sha512, vhash ); - mm256_rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 ); + rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 ); haval256_5_4way_init( &ctx.haval ); haval256_5_4way( &ctx.haval, vhashA, dataLen ); @@ -334,7 +334,6 @@ int scanhash_xevan_4way( struct work *work, uint32_t max_nonce, { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t vdata[24*4] __attribute__ ((aligned (64))); - uint32_t edata[20] __attribute__ ((aligned (64))); uint32_t lane_hash[8] __attribute__ ((aligned (32))); uint32_t *hash7 = &(hash[7<<2]); uint32_t *pdata = work->data; @@ -349,9 +348,7 @@ int scanhash_xevan_4way( struct work *work, uint32_t max_nonce, if ( opt_benchmark ) ptarget[7] = 0x0cff; - swab32_array( edata, pdata, 20 ); - mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 ); -// mm256_bswap_intrlv80_4x64( vdata, pdata ); + mm256_bswap32_intrlv80_4x64( vdata, pdata ); do { *noncev = mm256_intrlv_blend_32( mm256_bswap_32( _mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ), *noncev ); diff --git a/configure b/configure index 6dae213..0c55d03 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.5.3. +# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.5.4. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -577,8 +577,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='cpuminer-opt' PACKAGE_TARNAME='cpuminer-opt' -PACKAGE_VERSION='3.9.5.3' -PACKAGE_STRING='cpuminer-opt 3.9.5.3' +PACKAGE_VERSION='3.9.5.4' +PACKAGE_STRING='cpuminer-opt 3.9.5.4' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures cpuminer-opt 3.9.5.3 to adapt to many kinds of systems. +\`configure' configures cpuminer-opt 3.9.5.4 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1404,7 +1404,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of cpuminer-opt 3.9.5.3:";; + short | recursive ) echo "Configuration of cpuminer-opt 3.9.5.4:";; esac cat <<\_ACEOF @@ -1509,7 +1509,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -cpuminer-opt configure 3.9.5.3 +cpuminer-opt configure 3.9.5.4 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by cpuminer-opt $as_me 3.9.5.3, which was +It was created by cpuminer-opt $as_me 3.9.5.4, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2993,7 +2993,7 @@ fi # Define the identity of the package. PACKAGE='cpuminer-opt' - VERSION='3.9.5.3' + VERSION='3.9.5.4' cat >>confdefs.h <<_ACEOF @@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by cpuminer-opt $as_me 3.9.5.3, which was +This file was extended by cpuminer-opt $as_me 3.9.5.4, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -6756,7 +6756,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -cpuminer-opt config.status 3.9.5.3 +cpuminer-opt config.status 3.9.5.4 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index c1b4825..ea6d5e0 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -AC_INIT([cpuminer-opt], [3.9.5.3]) +AC_INIT([cpuminer-opt], [3.9.5.4]) AC_PREREQ([2.59c]) AC_CANONICAL_SYSTEM diff --git a/simd-utils.h b/simd-utils.h index 3bc0743..fb61eb9 100644 --- a/simd-utils.h +++ b/simd-utils.h @@ -175,32 +175,25 @@ // 64 bit vectors #include "simd-utils/simd-64.h" -#include "simd-utils/intrlv-mmx.h" +//#include "simd-utils/intrlv-mmx.h" #if defined(__SSE2__) // 128 bit vectors #include "simd-utils/simd-128.h" -#include "simd-utils/intrlv-sse2.h" #if defined(__AVX__) // 256 bit vector basics #include "simd-utils/simd-256.h" -#include "simd-utils/intrlv-avx.h" #if defined(__AVX2__) -// 256 bit everything else -//#include "simd-utils/simd-avx2.h" -#include "simd-utils/intrlv-avx2.h" - // Skylake-X has all these #if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) // 512 bit vectors #include "simd-utils/simd-512.h" -#include "simd-utils/intrlv-avx512.h" #endif // MMX #endif // SSE2 @@ -208,7 +201,6 @@ #endif // AVX2 #endif // AVX512 -// Picks implementation based on available CPU features. -#include "simd-utils/intrlv-selector.h" +#include "simd-utils/intrlv.h" #endif // SIMD_UTILS_H__ diff --git a/simd-utils/intrlv-avx.h b/simd-utils/intrlv-avx.h deleted file mode 100644 index 400f060..0000000 --- a/simd-utils/intrlv-avx.h +++ /dev/null @@ -1,911 +0,0 @@ -#if !defined(INTRLV_AVX_H__) -#define INTRLV_AVX_H__ 1 - -// philosophical discussion -// -// transitions: -// -// int32 <-> int64 -// uint64_t = (uint64_t)int32_lo | ( (uint64_t)int32_hi << 32 ) -// Efficient transition and post processing, 32 bit granularity is lost. -// -// int32 <-> m64 -// More complex, 32 bit granularity maintained, limited number of mmx regs. -// int32 <-> int64 <-> m64 might be more efficient. -// -// int32 <-> m128 -// Expensive, current implementation. -// -// int32 <-> m256 -// Very expensive multi stage, current implementation. -// -// int64/m64 <-> m128 -// Efficient, agnostic to native element size. Common. -// -// m128 <-> m256 -// Expensive for a single instruction, unavoidable. Common. -// -// Multi stage options -// -// int32 <-> int64 -> m128 -// More efficient than insert32, granularity maintained. Common. -// -// int64 <-> m128 -> m256 -// Unavoidable, reasonably efficient. Common -// -// int32 <-> int64 -> m128 -> m256 -// Seems inevitable, most efficient despite number of stages. Common. -// -// Implementation plan. -// -// 1. Complete m128 <-> m256 -// 2. Implement int64 <-> m128 -// 3. Combine int64 <-> m128 <-> m256 -// 4. Implement int32 <-> int64 <-> m128 -// 5. Combine int32 <-> int64 <-> m128 <-> m256 -// - -#if defined(__AVX__) - -// Convenient short cuts for local use only - -// Extract 64 bits from the low 128 bits of 256 bit vector. -#define extr64_cast128_256( a, n ) \ - _mm_extract_epi64( _mm256_castsi256_si128( a ), n ) - -// Extract 32 bits from the low 128 bits of 256 bit vector. -#define extr32_cast128_256( a, n ) \ - _mm_extract_epi32( _mm256_castsi256_si128( a ), n ) - -/////////////////////////////////////////////////////////// -// -// AVX 256 Bit Vectors -// -// 256 bit interleaving can be done with AVX. - -#define mm256_put_64( s0, s1, s2, s3) \ - _mm256_set_epi64x( *((const uint64_t*)(s3)), *((const uint64_t*)(s2)), \ - *((const uint64_t*)(s1)), *((const uint64_t*)(s0)) ) - -#define mm256_put_32( s00, s01, s02, s03, s04, s05, s06, s07 ) \ - _mm256_set_epi32( *((const uint32_t*)(s07)), *((const uint32_t*)(s06)), \ - *((const uint32_t*)(s05)), *((const uint32_t*)(s04)), \ - *((const uint32_t*)(s03)), *((const uint32_t*)(s02)), \ - *((const uint32_t*)(s01)), *((const uint32_t*)(s00)) ) - -#define mm256_get_64( s, i0, i1, i2, i3 ) \ - _mm256_set_epi64x( ((const uint64_t*)(s))[i3], ((const uint64_t*)(s))[i2], \ - ((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] ) - -#define mm256_get_32( s, i0, i1, i2, i3, i4, i5, i6, i7 ) \ - _mm256_set_epi32( ((const uint32_t*)(s))[i7], ((const uint32_t*)(s))[i6], \ - ((const uint32_t*)(s))[i5], ((const uint32_t*)(s))[i4], \ - ((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \ - ((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] ) - -/* -// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1], lo[0] } -#define mm256_intrlv_blend_128( hi, lo ) \ - _mm256_blend_epi32( hi, lo, 0x0f ) - -#define mm256_intrlv_blend_64( hi, lo ) \ - _mm256_blend_epi32( hi, lo, 0x33 ) - -#define mm256_intrlv_blend_32( hi, lo ) \ - _mm256_blend_epi32( hi, lo, 0x55 ) -*/ - -// Interleave 8x32_256 -#define mm256_intrlv_8x32_256( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \ -{ \ - __m128i s0hi = mm128_extr_hi128_256( s0 ); \ - __m128i s1hi = mm128_extr_hi128_256( s1 ); \ - __m128i s2hi = mm128_extr_hi128_256( s2 ); \ - __m128i s3hi = mm128_extr_hi128_256( s3 ); \ - __m128i s4hi = mm128_extr_hi128_256( s4 ); \ - __m128i s5hi = mm128_extr_hi128_256( s5 ); \ - __m128i s6hi = mm128_extr_hi128_256( s6 ); \ - __m128i s7hi = mm128_extr_hi128_256( s7 ); \ - casti_m256i( d,0 ) = _mm256_set_epi32( \ - extr32_cast128_256(s7,0), extr32_cast128_256(s6,0), \ - extr32_cast128_256(s5,0), extr32_cast128_256(s4,0), \ - extr32_cast128_256(s3,0), extr32_cast128_256(s2,0), \ - extr32_cast128_256(s1,0), extr32_cast128_256(s0,0) ); \ - casti_m256i( d,1 ) = _mm256_set_epi32( \ - extr32_cast128_256(s7,1), extr32_cast128_256(s6,1), \ - extr32_cast128_256(s5,1), extr32_cast128_256(s4,1), \ - extr32_cast128_256(s3,1), extr32_cast128_256(s2,1), \ - extr32_cast128_256(s1,1), extr32_cast128_256(s0,1) ); \ - casti_m256i( d,2 ) = _mm256_set_epi32( \ - extr32_cast128_256(s7,2), extr32_cast128_256(s6,2), \ - extr32_cast128_256(s5,2), extr32_cast128_256(s4,2), \ - extr32_cast128_256(s3,2), extr32_cast128_256(s2,2), \ - extr32_cast128_256(s1,2), extr32_cast128_256(s0,2) ); \ - casti_m256i( d,3 ) = _mm256_set_epi32( \ - extr32_cast128_256(s7,3), extr32_cast128_256(s6,3), \ - extr32_cast128_256(s5,3), extr32_cast128_256(s4,3), \ - extr32_cast128_256(s3,3), extr32_cast128_256(s2,3), \ - extr32_cast128_256(s1,3), extr32_cast128_256(s0,3) ); \ - casti_m256i( d,4 ) = _mm256_set_epi32( \ - mm128_extr_32(s7hi,0), mm128_extr_32(s6hi,0), \ - mm128_extr_32(s5hi,0), mm128_extr_32(s4hi,0), \ - mm128_extr_32(s3hi,0), mm128_extr_32(s2hi,0), \ - mm128_extr_32(s1hi,0), mm128_extr_32(s0hi,0) ); \ - casti_m256i( d,5 ) = _mm256_set_epi32( \ - mm128_extr_32(s7hi,1), mm128_extr_32(s6hi,1), \ - mm128_extr_32(s5hi,1), mm128_extr_32(s4hi,1), \ - mm128_extr_32(s3hi,1), mm128_extr_32(s2hi,1), \ - mm128_extr_32(s1hi,1), mm128_extr_32(s0hi,1) ); \ - casti_m256i( d,6 ) = _mm256_set_epi32( \ - mm128_extr_32(s7hi,2), mm128_extr_32(s6hi,2), \ - mm128_extr_32(s5hi,2), mm128_extr_32(s4hi,2), \ - mm128_extr_32(s3hi,2), mm128_extr_32(s2hi,2), \ - mm128_extr_32(s1hi,2), mm128_extr_32(s0hi,2) ); \ - casti_m256i( d,7 ) = _mm256_set_epi32( \ - mm128_extr_32(s7hi,3), mm128_extr_32(s6hi,3), \ - mm128_extr_32(s5hi,3), mm128_extr_32(s4hi,3), \ - mm128_extr_32(s3hi,3), mm128_extr_32(s2hi,3), \ - mm128_extr_32(s1hi,3), mm128_extr_32(s0hi,3) ); \ -} while(0) - -#define mm256_intrlv_8x32_128( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \ -{ \ - casti_m256i( d,0 ) = _mm256_set_epi32( \ - mm128_extr_32(s7,0), mm128_extr_32(s6,0), \ - mm128_extr_32(s5,0), mm128_extr_32(s4,0), \ - mm128_extr_32(s3,0), mm128_extr_32(s2,0), \ - mm128_extr_32(s1,0), mm128_extr_32(s0,0) ); \ - casti_m256i( d,1 ) = _mm256_set_epi32( \ - mm128_extr_32(s7,1), mm128_extr_32(s6,1), \ - mm128_extr_32(s5,1), mm128_extr_32(s4,1), \ - mm128_extr_32(s3,1), mm128_extr_32(s2,1), \ - mm128_extr_32(s1,1), mm128_extr_32(s0,1) ); \ - casti_m256i( d,2 ) = _mm256_set_epi32( \ - mm128_extr_32(s7,2), mm128_extr_32(s6,2), \ - mm128_extr_32(s5,2), mm128_extr_32(s4,2), \ - mm128_extr_32(s3,2), mm128_extr_32(s2,2), \ - mm128_extr_32(s1,2), mm128_extr_32(s0,2) ); \ - casti_m256i( d,3 ) = _mm256_set_epi32( \ - mm128_extr_32(s7,3), mm128_extr_32(s6,3), \ - mm128_extr_32(s5,3), mm128_extr_32(s4,3), \ - mm128_extr_32(s3,3), mm128_extr_32(s2,3), \ - mm128_extr_32(s1,3), mm128_extr_32(s0,3) ); \ -} while(0) - -/* -#define mm256_bswap_intrlv_8x32_256( d, src ) \ -do { \ - __m256i s0 = mm256_bswap_32( src ); \ - __m128i s1 = _mm256_extracti128_si256( s0, 1 ); \ - casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 0 ) ); \ - casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 1 ) ); \ - casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 2 ) ); \ - casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 3 ) ); \ - casti_m256i( d, 4 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 0 ) ); \ - casti_m256i( d, 5 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 1 ) ); \ - casti_m256i( d, 6 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 2 ) ); \ - casti_m256i( d, 7 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 3 ) ); \ -} while(0) - - -#define mm256_bswap_intrlv_8x32_128( d, src ) \ -do { \ - __m128i ss = mm128_bswap_32( src ); \ - casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 0 ) ); \ - casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 1 ) ); \ - casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 2 ) ); \ - casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 3 ) ); \ -} while(0) -*/ - -#define mm256_dintrlv_8x32_256( d0, d1, d2, d3, d4, d5, d6, d7, s ) \ -do { \ - __m256i s0 = casti_m256i(s,0); \ - __m256i s1 = casti_m256i(s,1); \ - __m256i s2 = casti_m256i(s,2); \ - __m256i s3 = casti_m256i(s,3); \ - __m256i s4 = casti_m256i(s,4); \ - __m256i s5 = casti_m256i(s,5); \ - __m256i s6 = casti_m256i(s,6); \ - __m256i s7 = casti_m256i(s,7); \ - __m128i s0hi = _mm256_extracti128_si256( s0, 1 ); \ - __m128i s1hi = _mm256_extracti128_si256( s1, 1 ); \ - __m128i s2hi = _mm256_extracti128_si256( s2, 1 ); \ - __m128i s3hi = _mm256_extracti128_si256( s3, 1 ); \ - __m128i s4hi = _mm256_extracti128_si256( s4, 1 ); \ - __m128i s5hi = _mm256_extracti128_si256( s5, 1 ); \ - __m128i s6hi = _mm256_extracti128_si256( s6, 1 ); \ - __m128i s7hi = _mm256_extracti128_si256( s7, 1 ); \ - d0 = _mm256_set_epi32( \ - extr32_cast128_256( s7, 0 ), extr32_cast128_256( s6, 0 ), \ - extr32_cast128_256( s5, 0 ), extr32_cast128_256( s4, 0 ), \ - extr32_cast128_256( s3, 0 ), extr32_cast128_256( s2, 0 ), \ - extr32_cast128_256( s1, 0 ), extr32_cast128_256( s0, 0 ) );\ - d1 = _mm256_set_epi32( \ - extr32_cast128_256( s7, 1 ), extr32_cast128_256( s6, 1 ), \ - extr32_cast128_256( s5, 1 ), extr32_cast128_256( s4, 1 ), \ - extr32_cast128_256( s3, 1 ), extr32_cast128_256( s2, 1 ), \ - extr32_cast128_256( s1, 1 ), extr32_cast128_256( s0, 1 ) );\ - d2 = _mm256_set_epi32( \ - extr32_cast128_256( s7, 2 ), extr32_cast128_256( s6, 2 ), \ - extr32_cast128_256( s5, 2 ), extr32_cast128_256( s4, 2 ), \ - extr32_cast128_256( s3, 2 ), extr32_cast128_256( s2, 2 ), \ - extr32_cast128_256( s1, 2 ), extr32_cast128_256( s0, 2 ) );\ - d3 = _mm256_set_epi32( \ - extr32_cast128_256( s7, 3 ), extr32_cast128_256( s6, 3 ), \ - extr32_cast128_256( s5, 3 ), extr32_cast128_256( s4, 3 ), \ - extr32_cast128_256( s3, 3 ), extr32_cast128_256( s2, 3 ), \ - extr32_cast128_256( s1, 3 ), extr32_cast128_256( s0, 3 ) );\ - d4 = _mm256_set_epi32( \ - _mm_extract_epi32( s7hi, 0 ), _mm_extract_epi32( s6hi, 0 ), \ - _mm_extract_epi32( s5hi, 0 ), _mm_extract_epi32( s4hi, 0 ), \ - _mm_extract_epi32( s3hi, 0 ), _mm_extract_epi32( s2hi, 0 ), \ - _mm_extract_epi32( s1hi, 0 ), _mm_extract_epi32( s0hi, 0 ) ); \ - d5 = _mm256_set_epi32( \ - _mm_extract_epi32( s7hi, 1 ), _mm_extract_epi32( s6hi, 1 ), \ - _mm_extract_epi32( s5hi, 1 ), _mm_extract_epi32( s4hi, 1 ), \ - _mm_extract_epi32( s3hi, 1 ), _mm_extract_epi32( s2hi, 1 ), \ - _mm_extract_epi32( s1hi, 1 ), _mm_extract_epi32( s0hi, 1 ) ); \ - d6 = _mm256_set_epi32( \ - _mm_extract_epi32( s7hi, 2 ), _mm_extract_epi32( s6hi, 2 ), \ - _mm_extract_epi32( s5hi, 2 ), _mm_extract_epi32( s4hi, 2 ), \ - _mm_extract_epi32( s3hi, 2 ), _mm_extract_epi32( s2hi, 2 ), \ - _mm_extract_epi32( s1hi, 2 ), _mm_extract_epi32( s0hi, 2 ) ); \ - d7 = _mm256_set_epi32( \ - _mm_extract_epi32( s7hi, 3 ), _mm_extract_epi32( s6hi, 3 ), \ - _mm_extract_epi32( s5hi, 3 ), _mm_extract_epi32( s4hi, 3 ), \ - _mm_extract_epi32( s3hi, 3 ), _mm_extract_epi32( s2hi, 3 ), \ - _mm_extract_epi32( s1hi, 3 ), _mm_extract_epi32( s0hi, 3 ) ); \ -} while(0) - -#define mm128_dintrlv_8x32_128( d0, d1, d2, d3, d4, d5, d6, d7, s ) \ -do { \ - __m128i s0 = casti_m128i(s,0); \ - __m128i s1 = casti_m128i(s,1); \ - __m128i s2 = casti_m128i(s,2); \ - __m128i s3 = casti_m128i(s,3); \ - d0 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \ - d1 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 1 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 1 ), _mm_extract_epi32( s0, 0 ) ); \ - d2 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \ - d3 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \ - d4 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \ - d5 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \ - d6 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \ - d7 = _mm_set_epi32( \ - _mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \ - _mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \ -} while(0) - -#define mm256_intrlv_4x64_256( d, s0, s1, s2, s3 ) \ -do { \ - __m128i s0hi = _mm256_extracti128_si256( s0, 1 ); \ - __m128i s1hi = _mm256_extracti128_si256( s1, 1 ); \ - __m128i s2hi = _mm256_extracti128_si256( s2, 1 ); \ - __m128i s3hi = _mm256_extracti128_si256( s3, 1 ); \ - casti_m256i( d,0 ) = _mm256_set_epi64x( \ - extr64_cast128_256( s3, 0 ), extr64_cast128_256( s2, 0 ), \ - extr64_cast128_256( s1, 0 ), extr64_cast128_256( s0, 0 ) ); \ - casti_m256i( d,1 ) = _mm256_set_epi64x( \ - extr64_cast128_256( s3, 1 ), extr64_cast128_256( s2, 1 ), \ - extr64_cast128_256( s1, 1 ), extr64_cast128_256( s0, 1 ) ); \ - casti_m256i( d,2 ) = _mm256_set_epi64x( \ - _mm_extract_epi64( s3hi,0 ), _mm_extract_epi64( s2hi,0 ), \ - _mm_extract_epi64( s1hi,0 ), _mm_extract_epi64( s0hi,0 ) ); \ - casti_m256i( d,3 ) = _mm256_set_epi64x( \ - _mm_extract_epi64( s3hi,1 ), _mm_extract_epi64( s2hi,1 ), \ - _mm_extract_epi64( s1hi,1 ), _mm_extract_epi64( s0hi,1 ) ); \ -} while(0) - -#define mm256_intrlv_4x64_128( d, s0, s1, s2, s3 ) \ -do { \ - casti_m256i( d,0 ) = _mm256_set_epi64x( \ - _mm_extract_epi64( s3, 0 ), _mm_extract_epi64( s2, 0 ), \ - _mm_extract_epi64( s1, 0 ), _mm_extract_epi64( s0, 0 ) ); \ - casti_m256i( d,1 ) = _mm256_set_epi64x( \ - _mm_extract_epi64( s3, 1 ), _mm_extract_epi64( s2, 1 ), \ - _mm_extract_epi64( s1, 1 ), _mm_extract_epi64( s0, 1 ) ); \ -} while(0) - -/* -#define mm256_bswap_intrlv_4x64_256( d, src ) \ -do { \ - __m256i s0 = mm256_bswap_32( src ); \ - __m128i s1 = _mm256_extracti128_si256( s0, 1 ); \ - casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( \ - _mm256_castsi256_si128( s0 ), 0 ) ); \ - casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( \ - _mm256_castsi256_si128( s0 ), 1 ) ); \ - casti_m256i( d,2 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 0 ) ); \ - casti_m256i( d,3 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 1 ) ); \ -} while(0) - -#define mm256_bswap_intrlv_4x64_128( d, src ) \ -do { \ - __m128i ss = mm128_bswap_32( src ); \ - casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 0 ) ); \ - casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 1 ) ); \ -} while(0) -*/ - -// 4 lanes of 256 bits using 64 bit interleaving (standard final hash size) -static inline void mm256_dintrlv_4x64_256( void *d0, void *d1, void *d2, - void *d3, const int n, const void *src ) -{ - __m256i s0 = *( (__m256i*) src ); // s[0][1:0] - __m256i s1 = *( (__m256i*)(src+32) ); // s[1][1:0] - __m256i s2 = *( (__m256i*)(src+64) ); // s[2][1:0] - __m256i s3 = *( (__m256i*)(src+96) ); // s[3][2:0] - __m128i s0hi = _mm256_extracti128_si256( s0, 1 ); // s[0][3:2] - __m128i s1hi = _mm256_extracti128_si256( s1, 1 ); // s[1][3:2] - __m128i s2hi = _mm256_extracti128_si256( s2, 1 ); // s[2][3:2] - __m128i s3hi = _mm256_extracti128_si256( s3, 1 ); // s[3][3:2] - - casti_m256i( d0,n ) = _mm256_set_epi64x( - extr64_cast128_256( s3, 0 ), extr64_cast128_256( s2, 0 ), - extr64_cast128_256( s1, 0 ), extr64_cast128_256( s0, 0 ) ); - casti_m256i( d1,n ) = _mm256_set_epi64x( - extr64_cast128_256( s3, 1 ), extr64_cast128_256( s2, 1 ), - extr64_cast128_256( s1, 1 ), extr64_cast128_256( s0, 1 ) ); - casti_m256i( d2,n ) = _mm256_set_epi64x( - _mm_extract_epi64( s3hi, 0 ), _mm_extract_epi64( s2hi, 0 ), - _mm_extract_epi64( s1hi, 0 ), _mm_extract_epi64( s0hi, 0 ) ); - casti_m256i( d3,n ) = _mm256_set_epi64x( - _mm_extract_epi64( s3hi, 1 ), _mm_extract_epi64( s2hi, 1 ), - _mm_extract_epi64( s1hi, 1 ), _mm_extract_epi64( s0hi, 1 ) ); -} - - -// quarter avx2 block, 16 bytes * 4 lanes -// 4 lanes of 128 bits using 64 bit interleaving -// Used for last 16 bytes of 80 byte input, only used for testing. -static inline void mm128_dintrlv_4x64_128( void *d0, void *d1, void *d2, - void *d3, const int n, const void *src ) -{ - __m256i s0 = *( (__m256i*) src ); - __m256i s1 = *( (__m256i*)(src+32) ); - __m128i s0hi = _mm256_extracti128_si256( s0, 1 ); - __m128i s1hi = _mm256_extracti128_si256( s1, 1 ); - - casti_m128i( d0,n ) = _mm_set_epi64x( extr64_cast128_256( s1 , 0 ), - extr64_cast128_256( s0 , 0 ) ); - casti_m128i( d1,n ) = _mm_set_epi64x( extr64_cast128_256( s1 , 1 ), - extr64_cast128_256( s0 , 1 ) ); - casti_m128i( d2,n ) = _mm_set_epi64x( _mm_extract_epi64( s1hi, 0 ), - _mm_extract_epi64( s0hi, 0 ) ); - casti_m128i( d3,n ) = _mm_set_epi64x( _mm_extract_epi64( s1hi, 1 ), - _mm_extract_epi64( s0hi, 1 ) ); -} - -/* -static inline void mm256_dintrlv_2x128x256( void *d0, void *d1, - const int n, const void *s ) -{ - casti_m256i( d0,n ) = mm256_get_64( s, 0, 1, 4, 5 ); - casti_m256i( d1,n ) = mm256_get_64( s, 2, 3, 6, 7 ); -} -*/ -// - -#define mm256_intrlv_4x32_256( d, s0, s1, s2, s3 ) \ -do { \ - casti_m256i( d,0 ) = _mm256_set_epi32( \ - mm128_extr_32( s3, 1 ), mm128_extr_32( s2, 1 ), \ - mm128_extr_32( s1, 1 ), mm128_extr_32( s0, 1 ), \ - mm128_extr_32( s3, 0 ), mm128_extr_32( s2, 0 ), \ - mm128_extr_32( s1, 0 ), mm128_extr_32( s0, 0 ) ); \ - casti_m256i( d,1 ) = _mm256_set_epi32( \ - mm128_extr_32( s3, 3 ), mm128_extr_32( s2, 3 ), \ - mm128_extr_32( s1, 3 ), mm128_extr_32( s0, 3 ), \ - mm128_extr_32( s3, 2 ), mm128_extr_32( s2, 2 ), \ - mm128_extr_32( s1, 2 ), mm128_extr_32( s0, 2 ) ); \ - casti_m256i( d,2 ) = _mm256_set_epi32( \ - mm128_extr_32( s3, 5 ), mm128_extr_32( s2, 5 ), \ - mm128_extr_32( s1, 5 ), mm128_extr_32( s0, 5 ), \ - mm128_extr_32( s3, 4 ), mm128_extr_32( s2, 4 ), \ - mm128_extr_32( s1, 4 ), mm128_extr_32( s0, 4 ) ); \ - casti_m256i( d,3 ) = _mm256_set_epi32( \ - mm128_extr_32( s3, 7 ), mm128_extr_32( s2, 7 ), \ - mm128_extr_32( s1, 7 ), mm128_extr_32( s0, 7 ), \ - mm128_extr_32( s3, 6 ), mm128_extr_32( s2, 6 ), \ - mm128_extr_32( s1, 6 ), mm128_extr_32( s0, 6 ) ); \ -} while(0) - -// 256 bit versions of commmon 128 bit functions. -static inline void mm256_intrlv_4x32( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, int bit_len ) -{ - mm256_intrlv_4x32_256( d ,casti_m256i(s0,0), casti_m256i(s1,0), - casti_m256i(s2,0), casti_m256i(s3,0) ); - if ( bit_len <= 256 ) return; - mm256_intrlv_4x32_256( d+128 ,casti_m256i(s0,1), casti_m256i(s1,1), - casti_m256i(s2,1), casti_m256i(s3,1) ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - mm128_intrlv_4x32_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4), - casti_m128i(s2,4), casti_m128i(s3,4) ); - return; - } - mm256_intrlv_4x32_256( d+256 ,casti_m256i(s0,2), casti_m256i(s1,2), - casti_m256i(s2,2), casti_m256i(s3,2) ); - mm256_intrlv_4x32_256( d+384 ,casti_m256i(s0,3), casti_m256i(s1,3), - casti_m256i(s2,3), casti_m256i(s3,3) ); -} - -static inline void mm256_dintrlv_4x32_256( void *d0, void *d1, void *d2, - void *d3, const void *src ) -{ - __m256i s0 = *(__m256i*) src; - __m256i s1 = *(__m256i*)(src+32); - __m256i s2 = *(__m256i*)(src+64); - __m256i s3 = *(__m256i*)(src+96); - *(__m256i*)d0 = _mm256_set_epi32( - _mm256_extract_epi32( s3,4 ), _mm256_extract_epi32( s3,0 ), - _mm256_extract_epi32( s2,4 ), _mm256_extract_epi32( s2,0 ), - _mm256_extract_epi32( s1,4 ), _mm256_extract_epi32( s1,0 ), - _mm256_extract_epi32( s0,4 ), _mm256_extract_epi32( s0,0 ) ); - *(__m256i*)d1 = _mm256_set_epi32( - _mm256_extract_epi32( s3,5 ), _mm256_extract_epi32( s3,1 ), - _mm256_extract_epi32( s2,5 ), _mm256_extract_epi32( s2,1 ), - _mm256_extract_epi32( s1,5 ), _mm256_extract_epi32( s1,1 ), - _mm256_extract_epi32( s0,5 ), _mm256_extract_epi32( s0,1 ) ); - *(__m256i*)d2 = _mm256_set_epi32( - _mm256_extract_epi32( s3,6 ), _mm256_extract_epi32( s3,2 ), - _mm256_extract_epi32( s2,6 ), _mm256_extract_epi32( s2,2 ), - _mm256_extract_epi32( s1,6 ), _mm256_extract_epi32( s1,2 ), - _mm256_extract_epi32( s0,6 ), _mm256_extract_epi32( s0,2 ) ); - *(__m256i*)d3 = _mm256_set_epi32( - _mm256_extract_epi32( s3,7 ), _mm256_extract_epi32( s3,3 ), - _mm256_extract_epi32( s2,7 ), _mm256_extract_epi32( s2,3 ), - _mm256_extract_epi32( s1,7 ), _mm256_extract_epi32( s1,3 ), - _mm256_extract_epi32( s0,7 ), _mm256_extract_epi32( s0,3 ) ); -} - -static inline void mm256_dintrlv_4x32( void *d0, void *d1, void *d2, - void *d3, const void *s, int bit_len ) -{ - mm256_dintrlv_4x32_256( d0 , d1 , d2 , d3 , s ); - if ( bit_len <= 256 ) return; - mm256_dintrlv_4x32_256( d0+ 32, d1+ 32, d2+ 32, d3+ 32, s+128 ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - mm128_dintrlv_4x32_128( d0+ 64, d1+ 64, d2+ 64, d3+ 64, s+256 ); - return; - } - mm256_dintrlv_4x32_256( d0+ 64, d1+ 64, d2+ 64, d3+ 64, s+256 ); - mm256_dintrlv_4x32_256( d0+ 96, d1+ 96, d2+ 96, d3+ 96, s+384 ); -} - -static inline void mm256_extr_lane_4x32( void *d, const void *s, - const int lane, const int bit_len ) -{ - casti_m256i( d, 0 ) = mm256_get_32( s, lane , lane+ 4, lane+ 8, lane+12, - lane+16, lane+20, lane+24, lane+28 ); - if ( bit_len <= 256 ) return; - casti_m256i( d, 1 ) = mm256_get_32( s, lane+32, lane+36, lane+40, lane+44, - lane+48, lane+52, lane+56, lane+60 ); -} - -// Interleave 8 source buffers containing 32 bit data into the destination -// vector -static inline void mm256_intrlv_8x32( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, const void *s4, - const void *s5, const void *s6, const void *s7, int bit_len ) -{ - mm256_intrlv_8x32_256( d , casti_m256i( s0,0 ), casti_m256i( s1,0 ), - casti_m256i( s2,0 ), casti_m256i( s3,0 ), casti_m256i( s4,0 ), - casti_m256i( s5,0 ), casti_m256i( s6,0 ), casti_m256i( s7,0 ) ); - if ( bit_len <= 256 ) return; - mm256_intrlv_8x32_256( d+256, casti_m256i( s0,1 ), casti_m256i( s1,1 ), - casti_m256i( s2,1 ), casti_m256i( s3,1 ), casti_m256i( s4,1 ), - casti_m256i( s5,1 ), casti_m256i( s6,1 ), casti_m256i( s7,1 ) ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - mm256_intrlv_8x32_128( d+512, casti_m128i( s0,4 ), casti_m128i( s1,4 ), - casti_m128i( s2,4 ), casti_m128i( s3,4 ), casti_m128i( s4,4 ), - casti_m128i( s5,4 ), casti_m128i( s6,4 ), casti_m128i( s7,4 ) ); - return; - } - mm256_intrlv_8x32_256( d+512, casti_m256i( s0,2 ), casti_m256i( s1,2 ), - casti_m256i( s2,2 ), casti_m256i( s3,2 ), casti_m256i( s4,2 ), - casti_m256i( s5,2 ), casti_m256i( s6,2 ), casti_m256i( s7,2 ) ); - mm256_intrlv_8x32_256( d+768, casti_m256i( s0,3 ), casti_m256i( s1,3 ), - casti_m256i( s2,3 ), casti_m256i( s3,3 ), casti_m256i( s4,3 ), - casti_m256i( s5,3 ), casti_m256i( s6,3 ), casti_m256i( s7,3 ) ); - // bit_len == 1024 -} - -// A couple of mining specifi functions. -/* -// Interleave 80 bytes of 32 bit data for 8 lanes. -static inline void mm256_bswap_intrlv80_8x32( void *d, const void *s ) -{ - mm256_bswap_intrlv_8x32_256( d , casti_m256i( s, 0 ) ); - mm256_bswap_intrlv_8x32_256( d+256, casti_m256i( s, 1 ) ); - mm256_bswap_intrlv_8x32_128( d+512, casti_m128i( s, 4 ) ); -} -*/ - -// Deinterleave 8 buffers of 32 bit data from the source buffer. -// Sub-function can be called directly for 32 byte final hash. -static inline void mm256_dintrlv_8x32( void *d0, void *d1, void *d2, - void *d3, void *d4, void *d5, void *d6, void *d7, - const void *s, int bit_len ) -{ - mm256_dintrlv_8x32_256( casti_m256i(d0,0), casti_m256i(d1,0), - casti_m256i(d2,0), casti_m256i(d3,0), casti_m256i(d4,0), - casti_m256i(d5,0), casti_m256i(d6,0), casti_m256i(d7,0), s ); - if ( bit_len <= 256 ) return; - mm256_dintrlv_8x32_256( casti_m256i(d0,1), casti_m256i(d1,1), - casti_m256i(d2,1), casti_m256i(d3,1), casti_m256i(d4,1), - casti_m256i(d5,1), casti_m256i(d6,1), casti_m256i(d7,1), s+256 ); - if ( bit_len <= 512 ) return; - // short block, final 16 bytes of input data - if ( bit_len <= 640 ) - { - mm128_dintrlv_8x32_128( casti_m128i(d0,2), casti_m128i(d1,2), - casti_m128i(d2,2), casti_m128i(d3,2), casti_m128i(d4,2), - casti_m128i(d5,2), casti_m128i(d6,2), casti_m128i(d7,2), s+512 ); - return; - } - // bitlen == 1024 - mm256_dintrlv_8x32_256( casti_m256i(d0,2), casti_m256i(d1,2), - casti_m256i(d2,2), casti_m256i(d3,2), casti_m256i(d4,2), - casti_m256i(d5,2), casti_m256i(d6,2), casti_m256i(d7,2), s+512 ); - mm256_dintrlv_8x32_256( casti_m256i(d0,3), casti_m256i(d1,3), - casti_m256i(d2,3), casti_m256i(d3,3), casti_m256i(d4,3), - casti_m256i(d5,3), casti_m256i(d6,3), casti_m256i(d7,3), s+768 ); -} - -static inline void mm256_extr_lane_8x32( void *d, const void *s, - const int lane, const int bit_len ) -{ - casti_m256i( d,0 ) = mm256_get_32(s, lane , lane+ 8, lane+ 16, lane+ 24, - lane+32, lane+ 40, lane+ 48, lane+ 56 ); - if ( bit_len <= 256 ) return; - casti_m256i( d,1 ) = mm256_get_32(s, lane+64, lane+ 72, lane+ 80, lane+ 88, - lane+96, lane+104, lane+112, lane+120 ); - // bit_len == 512 -} - -// Interleave 4 source buffers containing 64 bit data into the destination -// buffer. Only bit_len 256, 512, 640 & 1024 are supported. -static inline void mm256_intrlv_4x64( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, int bit_len ) -{ - mm256_intrlv_4x64_256( d , casti_m256i(s0,0), casti_m256i(s1,0), - casti_m256i(s2,0), casti_m256i(s3,0) ); - if ( bit_len <= 256 ) return; - mm256_intrlv_4x64_256( d+128, casti_m256i(s0,1), casti_m256i(s1,1), - casti_m256i(s2,1), casti_m256i(s3,1) ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - mm256_intrlv_4x64_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4), - casti_m128i(s2,4), casti_m128i(s3,4) ); - return; - } - // bit_len == 1024 - mm256_intrlv_4x64_256( d+256, casti_m256i(s0,2), casti_m256i(s1,2), - casti_m256i(s2,2), casti_m256i(s3,2) ); - mm256_intrlv_4x64_256( d+384, casti_m256i(s0,3), casti_m256i(s1,3), - casti_m256i(s2,3), casti_m256i(s3,3) ); -} -/* -// Interleave 80 bytes of 32 bit data for 8 lanes. -static inline void mm256_bswap_intrlv80_4x64( void *d, const void *s ) -{ - mm256_bswap_intrlv_4x64_256( d , casti_m256i( s, 0 ) ); - mm256_bswap_intrlv_4x64_256( d+128, casti_m256i( s, 1 ) ); - mm256_bswap_intrlv_4x64_128( d+256, casti_m128i( s, 4 ) ); -} - -// Blend 32 byte lanes of hash from 2 sources according to control mask. -// macro due to 256 bit value arg. -#define mm256_blend_hash_4x64( dst, a, b, mask ) \ -do { \ - dst[0] = _mm256_blendv_epi8( a[0], b[0], mask ); \ - dst[1] = _mm256_blendv_epi8( a[1], b[1], mask ); \ - dst[2] = _mm256_blendv_epi8( a[2], b[2], mask ); \ - dst[3] = _mm256_blendv_epi8( a[3], b[3], mask ); \ - dst[4] = _mm256_blendv_epi8( a[4], b[4], mask ); \ - dst[5] = _mm256_blendv_epi8( a[5], b[5], mask ); \ - dst[6] = _mm256_blendv_epi8( a[6], b[6], mask ); \ - dst[7] = _mm256_blendv_epi8( a[7], b[7], mask ); \ -} while(0) -*/ - -// Deinterleave 4 buffers of 64 bit data from the source buffer. -// bit_len must be 256, 512, 640 or 1024 bits. -// Requires overrun padding for 640 bit len. -static inline void mm256_dintrlv_4x64( void *d0, void *d1, void *d2, - void *d3, const void *s, int bit_len ) -{ - mm256_dintrlv_4x64_256( d0, d1, d2, d3, 0, s ); - if ( bit_len <= 256 ) return; - mm256_dintrlv_4x64_256( d0, d1, d2, d3, 1, s+128 ); - if ( bit_len <= 512 ) return; - // short block, final 16 bytes of input data - if ( bit_len <= 640 ) - { - mm128_dintrlv_4x64_128( d0, d1, d2, d3, 4, s+256 ); - return; - } - // bit_len == 1024 - mm256_dintrlv_4x64_256( d0, d1, d2, d3, 2, s+256 ); - mm256_dintrlv_4x64_256( d0, d1, d2, d3, 3, s+384 ); -} - -// extract and deinterleave specified lane. -#define mm256_extr_lane_4x64_256 \ - casti_m256i( d, 0 ) = mm256_get_64( s, lane, lane+4, lane+8, lane+12 ) -static inline void mm256_extr_lane_4x64( void *d, const void *s, - const int lane, const int bit_len ) -{ - casti_m256i( d, 0 ) = mm256_get_64( s, lane, lane+4, lane+8, lane+12 ); - if ( bit_len <= 256 ) return; - casti_m256i( d, 1 ) = mm256_get_64( s, lane+16, lane+20, lane+24, lane+28 ); - return; -} - - -// Convert from 4x32 SSE2 interleaving to 4x64 AVX2. -// Can't do it in place -static inline void mm256_rintrlv_4x32_4x64( void *dst, void *src, - int bit_len ) -{ - __m256i* d = (__m256i*)dst; - uint32_t *s = (uint32_t*)src; - - d[0] = _mm256_set_epi32( s[ 7],s[ 3],s[ 6],s[ 2],s[ 5],s[ 1],s[ 4],s[ 0] ); - d[1] = _mm256_set_epi32( s[15],s[11],s[14],s[10],s[13],s[ 9],s[12],s[ 8] ); - d[2] = _mm256_set_epi32( s[23],s[19],s[22],s[18],s[21],s[17],s[20],s[16] ); - d[3] = _mm256_set_epi32( s[31],s[27],s[30],s[26],s[29],s[25],s[28],s[24] ); - - if ( bit_len <= 256 ) return; - - d[4] = _mm256_set_epi32( s[39],s[35],s[38],s[34],s[37],s[33],s[36],s[32] ); - d[5] = _mm256_set_epi32( s[47],s[43],s[46],s[42],s[45],s[41],s[44],s[40] ); - d[6] = _mm256_set_epi32( s[55],s[51],s[54],s[50],s[53],s[49],s[52],s[48] ); - d[7] = _mm256_set_epi32( s[63],s[59],s[62],s[58],s[61],s[57],s[60],s[56] ); - - if ( bit_len <= 512 ) return; - - d[8] = _mm256_set_epi32( s[71],s[67],s[70],s[66],s[69],s[65],s[68],s[64] ); - d[9] = _mm256_set_epi32( s[79],s[75],s[78],s[74],s[77],s[73],s[76],s[72] ); - - if ( bit_len <= 640 ) return; - - d[10] = _mm256_set_epi32(s[87],s[83],s[86],s[82],s[85],s[81],s[84],s[80]); - d[11] = _mm256_set_epi32(s[95],s[91],s[94],s[90],s[93],s[89],s[92],s[88]); - - d[12] = _mm256_set_epi32(s[103],s[99],s[102],s[98],s[101],s[97],s[100],s[96]); - d[13] = _mm256_set_epi32(s[111],s[107],s[110],s[106],s[109],s[105],s[108],s[104]); - d[14] = _mm256_set_epi32(s[119],s[115],s[118],s[114],s[117],s[113],s[116],s[112]); - d[15] = _mm256_set_epi32(s[127],s[123],s[126],s[122],s[125],s[121],s[124],s[120]); - // bit_len == 1024 -} - -// Convert 4x64 byte (256 bit) vectors to 4x32 (128 bit) vectors for AVX -// bit_len must be multiple of 64 -static inline void mm256_rintrlv_4x64_4x32( void *dst, void *src, - int bit_len ) -{ - __m256i *d = (__m256i*)dst; - uint32_t *s = (uint32_t*)src; - - d[0] = _mm256_set_epi32( s[ 7],s[ 5],s[ 3],s[ 1],s[ 6],s[ 4],s[ 2],s[ 0] ); - d[1] = _mm256_set_epi32( s[15],s[13],s[11],s[ 9],s[14],s[12],s[10],s[ 8] ); - d[2] = _mm256_set_epi32( s[23],s[21],s[19],s[17],s[22],s[20],s[18],s[16] ); - d[3] = _mm256_set_epi32( s[31],s[29],s[27],s[25],s[30],s[28],s[26],s[24] ); - - if ( bit_len <= 256 ) return; - - d[4] = _mm256_set_epi32( s[39],s[37],s[35],s[33],s[38],s[36],s[34],s[32] ); - d[5] = _mm256_set_epi32( s[47],s[45],s[43],s[41],s[46],s[44],s[42],s[40] ); - d[6] = _mm256_set_epi32( s[55],s[53],s[51],s[49],s[54],s[52],s[50],s[48] ); - d[7] = _mm256_set_epi32( s[63],s[61],s[59],s[57],s[62],s[60],s[58],s[56] ); - - if ( bit_len <= 512 ) return; - - d[8] = _mm256_set_epi32( s[71],s[69],s[67],s[65],s[70],s[68],s[66],s[64] ); - d[9] = _mm256_set_epi32( s[79],s[77],s[75],s[73],s[78],s[76],s[74],s[72] ); - - if ( bit_len <= 640 ) return; - - d[10] = _mm256_set_epi32( s[87],s[85],s[83],s[81],s[86],s[84],s[82],s[80] ); - d[11] = _mm256_set_epi32( s[95],s[93],s[91],s[89],s[94],s[92],s[90],s[88] ); - - d[12] = _mm256_set_epi32( s[103],s[101],s[99],s[97],s[102],s[100],s[98],s[96] ); - d[13] = _mm256_set_epi32( s[111],s[109],s[107],s[105],s[110],s[108],s[106],s[104] ); - d[14] = _mm256_set_epi32( s[119],s[117],s[115],s[113],s[118],s[116],s[114],s[112] ); - d[15] = _mm256_set_epi32( s[127],s[125],s[123],s[121],s[126],s[124],s[122],s[120] ); - // bit_len == 1024 -} - -static inline void mm256_rintrlv_4x64_2x128( void *dst0, void *dst1, - const void *src, int bit_len ) -{ - __m256i* d0 = (__m256i*)dst0; - __m256i* d1 = (__m256i*)dst1; - uint64_t *s = (uint64_t*)src; - - d0[0] = _mm256_set_epi64x( s[ 5], s[ 1], s[ 4], s[ 0] ); - d1[0] = _mm256_set_epi64x( s[ 7], s[ 3], s[ 6], s[ 2] ); - - d0[1] = _mm256_set_epi64x( s[13], s[ 9], s[12], s[ 8] ); - d1[1] = _mm256_set_epi64x( s[15], s[11], s[14], s[10] ); - - if ( bit_len <= 256 ) return; - - d0[2] = _mm256_set_epi64x( s[21], s[17], s[20], s[16] ); - d1[2] = _mm256_set_epi64x( s[23], s[19], s[22], s[18] ); - - d0[3] = _mm256_set_epi64x( s[29], s[25], s[28], s[24] ); - d1[3] = _mm256_set_epi64x( s[31], s[27], s[30], s[26] ); - - if ( bit_len <= 512 ) return; - - d0[4] = _mm256_set_epi64x( s[37], s[33], s[36], s[32] ); - d1[4] = _mm256_set_epi64x( s[39], s[35], s[38], s[34] ); - - d0[5] = _mm256_set_epi64x( s[45], s[41], s[44], s[40] ); - d1[5] = _mm256_set_epi64x( s[47], s[43], s[46], s[42] ); - - d0[6] = _mm256_set_epi64x( s[53], s[49], s[52], s[48] ); - d1[6] = _mm256_set_epi64x( s[55], s[51], s[54], s[50] ); - - d0[7] = _mm256_set_epi64x( s[61], s[57], s[60], s[56] ); - d1[7] = _mm256_set_epi64x( s[63], s[59], s[62], s[58] ); -} - -static inline void mm256_rintrlv_2x128_4x64( void *dst, const void *src0, - const void *src1, int bit_len ) -{ - __m256i* d = (__m256i*)dst; - uint64_t *s0 = (uint64_t*)src0; - uint64_t *s1 = (uint64_t*)src1; - - d[ 0] = _mm256_set_epi64x( s1[2], s1[0], s0[2], s0[0] ); - d[ 1] = _mm256_set_epi64x( s1[3], s1[1], s0[3], s0[1] ); - d[ 2] = _mm256_set_epi64x( s1[6], s1[4], s0[6], s0[4] ); - d[ 3] = _mm256_set_epi64x( s1[7], s1[5], s0[7], s0[5] ); - - if ( bit_len <= 256 ) return; - - d[ 4] = _mm256_set_epi64x( s1[10], s1[ 8], s0[10], s0[ 8] ); - d[ 5] = _mm256_set_epi64x( s1[11], s1[ 9], s0[11], s0[ 9] ); - d[ 6] = _mm256_set_epi64x( s1[14], s1[12], s0[14], s0[12] ); - d[ 7] = _mm256_set_epi64x( s1[15], s1[13], s0[15], s0[13] ); - - if ( bit_len <= 512 ) return; - - d[ 8] = _mm256_set_epi64x( s1[18], s1[16], s0[18], s0[16] ); - d[ 9] = _mm256_set_epi64x( s1[19], s1[17], s0[19], s0[17] ); - d[10] = _mm256_set_epi64x( s1[22], s1[20], s0[22], s0[20] ); - d[11] = _mm256_set_epi64x( s1[23], s1[21], s0[23], s0[21] ); - - d[12] = _mm256_set_epi64x( s1[26], s1[24], s0[26], s0[24] ); - d[13] = _mm256_set_epi64x( s1[27], s1[25], s0[27], s0[25] ); - d[14] = _mm256_set_epi64x( s1[30], s1[28], s0[30], s0[28] ); - d[15] = _mm256_set_epi64x( s1[31], s1[29], s0[31], s0[29] ); -} - - -static inline void mm256_intrlv_2x128( const void *d, const void *s0, - void *s1, const int bit_len ) -{ - __m128i s1hi = _mm256_extracti128_si256( casti_m256i( s1,0 ), 1 ); - __m128i s0hi = _mm256_extracti128_si256( casti_m256i( s0,0 ), 1 ); - casti_m256i( d,0 ) = mm256_concat_128( - _mm256_castsi256_si128( casti_m256i( s1,0 ) ), - _mm256_castsi256_si128( casti_m256i( s0,0 ) ) ); - casti_m256i( d,1 ) = mm256_concat_128( s1hi, s0hi ); - - if ( bit_len <= 256 ) return; - s0hi = _mm256_extracti128_si256( casti_m256i( s0,1 ), 1 ); - s1hi = _mm256_extracti128_si256( casti_m256i( s1,1 ), 1 ); - casti_m256i( d,2 ) = mm256_concat_128( - _mm256_castsi256_si128( casti_m256i( s1,1 ) ), - _mm256_castsi256_si128( casti_m256i( s0,1 ) ) ); - casti_m256i( d,3 ) = mm256_concat_128( s1hi, s0hi ); - - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - casti_m256i( d,4 ) = mm256_concat_128( - _mm256_castsi256_si128( casti_m256i( s1,2 ) ), - _mm256_castsi256_si128( casti_m256i( s0,2 ) ) ); - return; - } - - s0hi = _mm256_extracti128_si256( casti_m256i( s0,2 ), 1 ); - s1hi = _mm256_extracti128_si256( casti_m256i( s1,2 ), 1 ); - casti_m256i( d,4 ) = mm256_concat_128( - _mm256_castsi256_si128( casti_m256i( s1,2 ) ), - _mm256_castsi256_si128( casti_m256i( s0,2 ) ) ); - casti_m256i( d,5 ) = mm256_concat_128( s1hi, s0hi ); - - s0hi = _mm256_extracti128_si256( casti_m256i( s0,3 ), 1 ); - s1hi = _mm256_extracti128_si256( casti_m256i( s1,3 ), 1 ); - casti_m256i( d,6 ) = mm256_concat_128( - _mm256_castsi256_si128( casti_m256i( s1,3 ) ), - _mm256_castsi256_si128( casti_m256i( s0,3 ) ) ); - casti_m256i( d,7 ) = mm256_concat_128( s1hi, s0hi ); -} - -// 512 is the bit len used by most, eliminate the conditionals -static inline void mm256_dintrlv_2x128_512( void *dst0, void *dst1, - const void *s ) -{ - __m256i *d0 = (__m256i*)dst0; - __m256i *d1 = (__m256i*)dst1; - - __m256i s0 = casti_m256i( s, 0 ); - __m256i s1 = casti_m256i( s, 1 ); - d0[0] = _mm256_permute2x128_si256( s0, s1, 0x20 ); - d1[0] = _mm256_permute2x128_si256( s0, s1, 0x31 ); - - s0 = casti_m256i( s, 2 ); - s1 = casti_m256i( s, 3 ); - d0[1] = _mm256_permute2x128_si256( s0, s1, 0x20 ); - d1[1] = _mm256_permute2x128_si256( s0, s1, 0x31 ); -} - -// Phase out usage for all 512 bit data lengths -static inline void mm256_dintrlv_2x128( void *dst0, void *dst1, const void *s, - int bit_len ) -{ - __m256i *d0 = (__m256i*)dst0; - __m256i *d1 = (__m256i*)dst1; - - __m256i s0 = casti_m256i( s, 0 ); - __m256i s1 = casti_m256i( s, 1 ); - d0[0] = _mm256_permute2x128_si256( s0, s1, 0x20 ); - d1[0] = _mm256_permute2x128_si256( s0, s1, 0x31 ); - - if ( bit_len <= 256 ) return; - - s0 = casti_m256i( s, 2 ); - s1 = casti_m256i( s, 3 ); - d0[1] = _mm256_permute2x128_si256( s0, s1, 0x20 ); - d1[1] = _mm256_permute2x128_si256( s0, s1, 0x31 ); - - if ( bit_len <= 512 ) return; - - s0 = casti_m256i( s, 4 ); - s1 = casti_m256i( s, 5 ); - d0[2] = _mm256_permute2x128_si256( s0, s1, 0x20 ); - d1[2] = _mm256_permute2x128_si256( s0, s1, 0x31 ); - - s0 = casti_m256i( s, 6 ); - s1 = casti_m256i( s, 7 ); - d0[3] = _mm256_permute2x128_si256( s0, s1, 0x20 ); - d1[3] = _mm256_permute2x128_si256( s0, s1, 0x31 ); -} - -#undef extr64_cast128_256 -#undef extr32_cast128_256 - -#endif // AVX -#endif // INTRLV_AVX_H__ diff --git a/simd-utils/intrlv-avx2.h b/simd-utils/intrlv-avx2.h deleted file mode 100644 index d4e3f77..0000000 --- a/simd-utils/intrlv-avx2.h +++ /dev/null @@ -1,104 +0,0 @@ -#if !defined(INTRLV_AVX2_H__) -#define INTRLV_AVX2_H__ 1 - -#if defined(__AVX2__) - -/////////////////////////////////////////////////////////// -// -// AVX2 256 Bit Vectors -// -// A few functions that need AVX2 for 256 bit. - - -// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1], lo[0] } -#define mm256_intrlv_blend_128( hi, lo ) \ - _mm256_blend_epi32( hi, lo, 0x0f ) - -#define mm256_intrlv_blend_64( hi, lo ) \ - _mm256_blend_epi32( hi, lo, 0x33 ) - -#define mm256_intrlv_blend_32( hi, lo ) \ - _mm256_blend_epi32( hi, lo, 0x55 ) - - -#define mm256_bswap_intrlv_8x32_256( d, src ) \ -do { \ - __m256i s0 = mm256_bswap_32( src ); \ - __m128i s1 = _mm256_extracti128_si256( s0, 1 ); \ - casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 0 ) ); \ - casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 1 ) ); \ - casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 2 ) ); \ - casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( \ - _mm256_castsi256_si128( s0 ), 3 ) ); \ - casti_m256i( d, 4 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 0 ) ); \ - casti_m256i( d, 5 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 1 ) ); \ - casti_m256i( d, 6 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 2 ) ); \ - casti_m256i( d, 7 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 3 ) ); \ -} while(0) - -#define mm256_bswap_intrlv_8x32_128( d, src ) \ -do { \ - __m128i ss = mm128_bswap_32( src ); \ - casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 0 ) ); \ - casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 1 ) ); \ - casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 2 ) ); \ - casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 3 ) ); \ -} while(0) - -#define mm256_bswap_intrlv_4x64_256( d, src ) \ -do { \ - __m256i s0 = mm256_bswap_32( src ); \ - __m128i s1 = _mm256_extracti128_si256( s0, 1 ); \ - casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( \ - _mm256_castsi256_si128( s0 ), 0 ) ); \ - casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( \ - _mm256_castsi256_si128( s0 ), 1 ) ); \ - casti_m256i( d,2 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 0 ) ); \ - casti_m256i( d,3 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 1 ) ); \ -} while(0) - -#define mm256_bswap_intrlv_4x64_128( d, src ) \ -do { \ - __m128i ss = mm128_bswap_32( src ); \ - casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 0 ) ); \ - casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 1 ) ); \ -} while(0) - - -// A couple of mining specifi functions. - -// Interleave 80 bytes of 32 bit data for 8 lanes. -static inline void mm256_bswap_intrlv80_8x32( void *d, const void *s ) -{ - mm256_bswap_intrlv_8x32_256( d , casti_m256i( s, 0 ) ); - mm256_bswap_intrlv_8x32_256( d+256, casti_m256i( s, 1 ) ); - mm256_bswap_intrlv_8x32_128( d+512, casti_m128i( s, 4 ) ); -} - -// Interleave 80 bytes of 32 bit data for 8 lanes. -static inline void mm256_bswap_intrlv80_4x64( void *d, const void *s ) -{ - mm256_bswap_intrlv_4x64_256( d , casti_m256i( s, 0 ) ); - mm256_bswap_intrlv_4x64_256( d+128, casti_m256i( s, 1 ) ); - mm256_bswap_intrlv_4x64_128( d+256, casti_m128i( s, 4 ) ); -} - -// Blend 32 byte lanes of hash from 2 sources according to control mask. -// macro due to 256 bit value arg. -#define mm256_blend_hash_4x64( dst, a, b, mask ) \ -do { \ - dst[0] = _mm256_blendv_epi8( a[0], b[0], mask ); \ - dst[1] = _mm256_blendv_epi8( a[1], b[1], mask ); \ - dst[2] = _mm256_blendv_epi8( a[2], b[2], mask ); \ - dst[3] = _mm256_blendv_epi8( a[3], b[3], mask ); \ - dst[4] = _mm256_blendv_epi8( a[4], b[4], mask ); \ - dst[5] = _mm256_blendv_epi8( a[5], b[5], mask ); \ - dst[6] = _mm256_blendv_epi8( a[6], b[6], mask ); \ - dst[7] = _mm256_blendv_epi8( a[7], b[7], mask ); \ -} while(0) - -#endif // AVX2 -#endif // INTRLV_AVX2_H__ diff --git a/simd-utils/intrlv-avx512.h b/simd-utils/intrlv-avx512.h deleted file mode 100644 index 011c6d2..0000000 --- a/simd-utils/intrlv-avx512.h +++ /dev/null @@ -1,679 +0,0 @@ -#if !defined(INTRLV_AVX512_H__) -#define INTRLV_AVX512_H__ 1 - -#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) - -// SSE2 functions used in AVX512 interleaving - -// AVX512 block is 64 * 64 bytes - -// quarter avx512 block, 16 bytes * 16 lanes -static inline void mm128_dintrlv_16x32x128( void *d00, void *d01, - void *d02, void *d03, void *d04, void *d05, void *d06, void *d07, - void *d08, void *d09, void *d10, void *d11, void *d12, void *d13, - void *d14, void *d15, const int n, const void *s ) -{ - cast_m128i( d00 ) = mm128_get_32( s, 0, 16, 32, 48 ); - cast_m128i( d01 ) = mm128_get_32( s, 1, 17, 33, 49 ); - cast_m128i( d02 ) = mm128_get_32( s, 2, 18, 34, 50 ); - cast_m128i( d03 ) = mm128_get_32( s, 3, 19, 35, 51 ); - cast_m128i( d04 ) = mm128_get_32( s, 4, 20, 36, 52 ); - cast_m128i( d05 ) = mm128_get_32( s, 5, 21, 37, 53 ); - cast_m128i( d06 ) = mm128_get_32( s, 6, 22, 38, 54 ); - cast_m128i( d07 ) = mm128_get_32( s, 7, 23, 39, 55 ); - cast_m128i( d08 ) = mm128_get_32( s, 8, 24, 40, 56 ); - cast_m128i( d09 ) = mm128_get_32( s, 9, 25, 41, 57 ); - cast_m128i( d10 ) = mm128_get_32( s, 10, 26, 42, 58 ); - cast_m128i( d11 ) = mm128_get_32( s, 11, 27, 43, 59 ); - cast_m128i( d12 ) = mm128_get_32( s, 12, 28, 44, 60 ); - cast_m128i( d13 ) = mm128_get_32( s, 13, 29, 45, 61 ); - cast_m128i( d14 ) = mm128_get_32( s, 14, 30, 46, 62 ); - cast_m128i( d15 ) = mm128_get_32( s, 15, 31, 47, 63 ); -} - -// quarter avx512 block, 32 bytes * 8 lanes -// 8 lanes of 128 bits using 64 bit interleaving -// Used for last 16 bytes of 80 byte input, only used for testing. -static inline void mm128_dintrlv_8x64x128( void *d0, void *d1, void *d2, - void *d3, void *d4, void *d5, void *d6, void *d7, - const int n, const void *s ) -{ - casti_m128i( d0,n ) = mm128_get_64( s, 0, 8 ); - casti_m128i( d1,n ) = mm128_get_64( s, 1, 9 ); - casti_m128i( d2,n ) = mm128_get_64( s, 2, 10 ); - casti_m128i( d3,n ) = mm128_get_64( s, 3, 11 ); - casti_m128i( d4,n ) = mm128_get_64( s, 4, 12 ); - casti_m128i( d5,n ) = mm128_get_64( s, 5, 13 ); - casti_m128i( d6,n ) = mm128_get_64( s, 6, 14 ); - casti_m128i( d7,n ) = mm128_get_64( s, 7, 15 ); -} - -static inline void mm128_dintrlv_4x128x128( void *d0, void *d1, void *d2, - void *d3, const int n, const void *s ) -{ - casti_m128i( d0,n ) = mm128_get_64( s, 0, 1 ); - casti_m128i( d1,n ) = mm128_get_64( s, 2, 3 ); - casti_m128i( d2,n ) = mm128_get_64( s, 4, 5 ); - casti_m128i( d3,n ) = mm128_get_64( s, 5, 7 ); -} - -// AVX2 functions Used in AVX512 interleaving - -static inline void mm256_dintrlv_16x32x256( void *d00, void *d01, - void *d02, void *d03, void *d04, void *d05, - void *d06, void *d07, void *d08, void *d09, - void *d10, void *d11, void *d12, void *d13, - void *d14, void *d15, const int n, const void *s ) -{ - casti_m256i( d00,n ) = mm256_get_32( s, 0, 16, 32, 48, 64, 80, 96,112 ); - casti_m256i( d01,n ) = mm256_get_32( s, 1, 17, 33, 49, 65, 81, 97,113 ); - casti_m256i( d02,n ) = mm256_get_32( s, 2, 18, 34, 50, 66, 82, 98,114 ); - casti_m256i( d03,n ) = mm256_get_32( s, 3, 19, 35, 51, 67, 83, 99,115 ); - casti_m256i( d04,n ) = mm256_get_32( s, 4, 20, 36, 52, 68, 84,100,116 ); - casti_m256i( d05,n ) = mm256_get_32( s, 5, 21, 37, 53, 69, 85,101,117 ); - casti_m256i( d06,n ) = mm256_get_32( s, 6, 22, 38, 54, 70, 86,102,118 ); - casti_m256i( d07,n ) = mm256_get_32( s, 7, 23, 39, 55, 71, 87,103,119 ); - casti_m256i( d08,n ) = mm256_get_32( s, 8, 24, 40, 56, 72, 88,104,120 ); - casti_m256i( d09,n ) = mm256_get_32( s, 9, 25, 41, 57, 73, 89,105,121 ); - casti_m256i( d10,n ) = mm256_get_32( s, 10, 26, 42, 58, 74, 90,106,122 ); - casti_m256i( d11,n ) = mm256_get_32( s, 11, 27, 43, 59, 75, 91,107,123 ); - casti_m256i( d12,n ) = mm256_get_32( s, 12, 28, 44, 60, 76, 92,108,124 ); - casti_m256i( d13,n ) = mm256_get_32( s, 13, 29, 45, 61, 77, 93,109,125 ); - casti_m256i( d14,n ) = mm256_get_32( s, 14, 30, 46, 62, 78, 94,110,126 ); - casti_m256i( d15,n ) = mm256_get_32( s, 15, 31, 47, 63, 79, 95,111,127 ); -} - -// 8 lanes of 256 bits using 64 bit interleaving (standard final hash size) -static inline void mm256_dintrlv_8x64x256( void *d0, void *d1, void *d2, - void *d3, void *d4, void *d5, void *d6, void *d7, - const int n, const void *s ) -{ - casti_m256i( d0,n ) = mm256_get_64( s, 0, 8, 16, 24 ); - casti_m256i( d1,n ) = mm256_get_64( s, 1, 9, 17, 25 ); - casti_m256i( d2,n ) = mm256_get_64( s, 2, 10, 18, 26 ); - casti_m256i( d3,n ) = mm256_get_64( s, 3, 11, 19, 27 ); - casti_m256i( d4,n ) = mm256_get_64( s, 4, 12, 20, 28 ); - casti_m256i( d5,n ) = mm256_get_64( s, 5, 13, 21, 29 ); - casti_m256i( d6,n ) = mm256_get_64( s, 6, 14, 22, 30 ); - casti_m256i( d7,n ) = mm256_get_64( s, 7, 15, 23, 31 ); -} - -static inline void mm256_dintrlv_4x128x256( void *d0, void *d1, void *d2, - void *d3, const int n, const void *s ) -{ - casti_m256i( d0,n ) = mm256_get_64( s, 0, 1, 8, 9 ); - casti_m256i( d1,n ) = mm256_get_64( s, 2, 3, 10, 11 ); - casti_m256i( d2,n ) = mm256_get_64( s, 4, 5, 12, 13 ); - casti_m256i( d3,n ) = mm256_get_64( s, 6, 7, 14, 15 ); -} - -// AVX 512 helper functions. -// -// Macro functions returning vector. -// Abstracted typecasting, avoid temp pointers. -// Source arguments may be any 64 or 32 byte aligned pointer as appropriate. - -#define mm512_put_64( s0, s1, s2, s3, s4, s5, s6, s7 ) \ - _mm512_set_epi64( *((const uint64_t*)(s7)), *((const uint64_t*)(s6)), \ - *((const uint64_t*)(s5)), *((const uint64_t*)(s4)), \ - *((const uint64_t*)(s3)), *((const uint64_t*)(s2)), \ - *((const uint64_t*)(s1)), *((const uint64_t*)(s0)) ) - -#define mm512_put_32( s00, s01, s02, s03, s04, s05, s06, s07, \ - s08, s09, s10, s11, s12, s13, s14, s15 ) \ - _mm512_set_epi32( *((const uint32_t*)(s15)), *((const uint32_t*)(s14)), \ - *((const uint32_t*)(s13)), *((const uint32_t*)(s12)), \ - *((const uint32_t*)(s11)), *((const uint32_t*)(s10)), \ - *((const uint32_t*)(s09)), *((const uint32_t*)(s08)), \ - *((const uint32_t*)(s07)), *((const uint32_t*)(s06)), \ - *((const uint32_t*)(s05)), *((const uint32_t*)(s04)), \ - *((const uint32_t*)(s03)), *((const uint32_t*)(s02)), \ - *((const uint32_t*)(s01)), *((const uint32_t*)(s00)) ) - -#define mm512_get_64( s, i0, i1, i2, i3, i4, i5, i6, i7 ) \ - _mm512_set_epi64( ((const uint64_t*)(s))[i7], ((const uint64_t*)(s))[i6], \ - ((const uint64_t*)(s))[i5], ((const uint64_t*)(s))[i4], \ - ((const uint64_t*)(s))[i3], ((const uint64_t*)(s))[i2], \ - ((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] ) - -#define mm512_get_32( s, i00, i01, i02, i03, i04, i05, i06, i07, \ - i08, i09, i10, i11, i12, i13, i14, i15 ) \ - _mm512_set_epi32( ((const uint32_t*)(s))[i15], ((const uint32_t*)(s))[i14], \ - ((const uint32_t*)(s))[i13], ((const uint32_t*)(s))[i12], \ - ((const uint32_t*)(s))[i11], ((const uint32_t*)(s))[i10], \ - ((const uint32_t*)(s))[i09], ((const uint32_t*)(s))[i08], \ - ((const uint32_t*)(s))[i07], ((const uint32_t*)(s))[i06], \ - ((const uint32_t*)(s))[i05], ((const uint32_t*)(s))[i04], \ - ((const uint32_t*)(s))[i03], ((const uint32_t*)(s))[i02], \ - ((const uint32_t*)(s))[i01], ((const uint32_t*)(s))[i00] ) - -// AVX512 has no blend, can be done with permute2xvar but at what cost? -// Can also be done with shifting and mask-or'ing for 3 instructins with -// 1 dependency. Finally it can be done with 1 _mm512_set but with 8 64 bit -// array index calculations and 8 pointer reads. - -// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1]. lo[0] } -#define mm512_interleave_blend_128( hi, lo ) \ - _mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \ - 0x7, 0x6, 0x5, 0x4, 0xb, 0xa, 0x9, 0x8 ) - -#define mm512_interleave_blend_64( hi, lo ) \ - _mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \ - 0x7, 0x6, 0xd, 0xc, 0x3, 0x2, 0x9, 0x8 ) - -#define mm512_interleave_blend_32( hi, lo ) \ - _mm256_permute2xvar_epi32( hi, lo, _mm512_set_epi32( \ - 0x0f, 0x1e, 0x0d, 0x1c, 0x0b, 0x1a, 0x09, 0x18, \ - 0x07, 0x16, 0x05, 0x14, 0x03, 0x12, 0x01, 0x10 ) -// - -static inline void mm512_intrlv_16x32x512( void *d, const void *s00, - const void *s01, const void *s02, const void *s03, const void *s04, - const void *s05, const void *s06, const void *s07, const void *s08, - const void *s09, const void *s10, const void *s11, const void *s12, - const void *s13, const void *s14, const void *s15 ) -{ - casti_m512i( d, 0 ) = mm512_put_32( - s00, s01, s02, s03, s04, s05, s06, s07, - s08, s09, s10, s11, s12, s13, s14, s15 ); - casti_m512i( d, 1 ) = mm512_put_32( - s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4, - s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 ); - casti_m512i( d, 2 ) = mm512_put_32( - s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8, - s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 ); - casti_m512i( d, 3 ) = mm512_put_32( - s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12, - s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 ); - casti_m512i( d, 4 ) = mm512_put_32( - s00+16, s01+16, s02+16, s03+16, s04+16, s05+16, s06+16, s07+16, - s08+16, s09+16, s10+16, s11+16, s12+16, s13+16, s14+16, s15+16 ); - casti_m512i( d, 5 ) = mm512_put_32( - s00+20, s01+20, s02+20, s03+20, s04+20, s05+20, s06+20, s07+20, - s08+20, s09+20, s10+20, s11+20, s12+20, s13+20, s14+20, s15+20 ); - casti_m512i( d, 6 ) = mm512_put_32( - s00+24, s01+24, s02+24, s03+24, s04+24, s05+24, s06+24, s07+24, - s08+24, s09+24, s10+24, s11+24, s12+24, s13+24, s14+24, s15+24 ); - casti_m512i( d, 7 ) = mm512_put_32( - s00+28, s01+28, s02+28, s03+28, s04+28, s05+28, s06+28, s07+28, - s08+28, s09+28, s10+28, s11+28, s12+28, s13+28, s14+28, s15+28 ); - casti_m512i( d, 8 ) = mm512_put_32( - s00+32, s01+28, s02+28, s03+28, s04+32, s05+28, s06+28, s07+28, - s08+32, s09+28, s10+28, s11+28, s12+32, s13+28, s14+28, s15+28 ); - casti_m512i( d, 9 ) = mm512_put_32( - s00+36, s01+28, s02+28, s03+28, s04+36, s05+28, s06+28, s07+28, - s08+36, s09+28, s10+28, s11+28, s12+36, s13+28, s14+28, s15+28 ); - casti_m512i( d,10 ) = mm512_put_32( - s00+40, s01+28, s02+28, s03+28, s04+40, s05+28, s06+28, s07+28, - s08+40, s09+28, s10+28, s11+28, s12+40, s13+28, s14+28, s15+28 ); - casti_m512i( d,11 ) = mm512_put_32( - s00+44, s01+28, s02+28, s03+28, s04+44, s05+28, s06+28, s07+28, - s08+44, s09+28, s10+28, s11+28, s12+44, s13+28, s14+28, s15+28 ); - casti_m512i( d,12 ) = mm512_put_32( - s00+48, s01+28, s02+28, s03+28, s04+48, s05+28, s06+28, s07+28, - s08+48, s09+28, s10+28, s11+28, s12+48, s13+28, s14+28, s15+28 ); - casti_m512i( d,13 ) = mm512_put_32( - s00+52, s01+28, s02+28, s03+28, s04+52, s05+28, s06+28, s07+28, - s08+52, s09+28, s10+28, s11+28, s12+52, s13+28, s14+28, s15+28 ); - casti_m512i( d,14 ) = mm512_put_32( - s00+56, s01+28, s02+28, s03+28, s04+56, s05+28, s06+28, s07+28, - s08+56, s09+28, s10+28, s11+28, s12+56, s13+28, s14+28, s15+28 ); - casti_m512i( d,15 ) = mm512_put_32( - s00+60, s01+28, s02+28, s03+28, s04+60, s05+28, s06+28, s07+28, - s08+60, s09+28, s10+28, s11+28, s12+60, s13+28, s14+28, s15+28 ); -} - -static inline void mm512_intrlv_16x32x256( void *d, const void *s00, - const void *s01, const void *s02, const void *s03, const void *s04, - const void *s05, const void *s06, const void *s07, const void *s08, - const void *s09, const void *s10, const void *s11, const void *s12, - const void *s13, const void *s14, const void *s15 ) -{ - casti_m512i( d, 0 ) = mm512_put_32( - s00, s01, s02, s03, s04, s05, s06, s07, - s08, s09, s10, s11, s12, s13, s14, s15 ); - casti_m512i( d, 1 ) = mm512_put_32( - s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4, - s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 ); - casti_m512i( d, 2 ) = mm512_put_32( - s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8, - s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 ); - casti_m512i( d, 3 ) = mm512_put_32( - s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12, - s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 ); - casti_m512i( d, 4 ) = mm512_put_32( - s00+16, s01+16, s02+16, s03+16, s04+16, s05+16, s06+16, s07+16, - s08+16, s09+16, s10+16, s11+16, s12+16, s13+16, s14+16, s15+16 ); - casti_m512i( d, 5 ) = mm512_put_32( - s00+20, s01+20, s02+20, s03+20, s04+20, s05+20, s06+20, s07+20, - s08+20, s09+20, s10+20, s11+20, s12+20, s13+20, s14+20, s15+20 ); - casti_m512i( d, 6 ) = mm512_put_32( - s00+24, s01+24, s02+24, s03+24, s04+24, s05+24, s06+24, s07+24, - s08+24, s09+24, s10+24, s11+24, s12+24, s13+24, s14+24, s15+24 ); - casti_m512i( d, 7 ) = mm512_put_32( - s00+28, s01+28, s02+28, s03+28, s04+28, s05+28, s06+28, s07+28, - s08+28, s09+28, s10+28, s11+28, s12+28, s13+28, s14+28, s15+28 ); -} - -// Last 16 bytes of input -static inline void mm512_intrlv_16x32x128( void *d, const void *s00, - const void *s01, const void *s02, const void *s03, const void *s04, - const void *s05, const void *s06, const void *s07, const void *s08, - const void *s09, const void *s10, const void *s11, const void *s12, - const void *s13, const void *s14, const void *s15 ) -{ - casti_m512i( d, 0 ) = mm512_put_32( - s00, s01, s02, s03, s04, s05, s06, s07, - s08, s09, s10, s11, s12, s13, s14, s15 ); - casti_m512i( d, 1 ) = mm512_put_32( - s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4, - s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 ); - casti_m512i( d, 2 ) = mm512_put_32( - s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8, - s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 ); - casti_m512i( d, 3 ) = mm512_put_32( - s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12, - s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 ); -} - -// can be called directly for 64 byte hash. -static inline void mm512_dintrlv_16x32x512( void *d00, void *d01, - void *d02, void *d03, void *d04, void *d05, void *d06, - void *d07, void *d08, void *d09, void *d10, void *d11, - void *d12, void *d13, void *d14, void *d15, const int n, - const void *s ) -{ - casti_m512i(d00,n) = mm512_get_32( s, 0, 16, 32, 48, 64, 80, 96,112, - 128,144,160,176,192,208,224,240 ); - casti_m512i(d01,n) = mm512_get_32( s, 1, 17, 33, 49, 65, 81, 97,113, - 129,145,161,177,193,209,225,241 ); - casti_m512i(d02,n) = mm512_get_32( s, 2, 18, 34, 50, 66, 82, 98,114, - 130,146,162,178,194,210,226,242 ); - casti_m512i(d03,n) = mm512_get_32( s, 3, 19, 35, 51, 67, 83, 99,115, - 131,147,163,179,195,211,227,243 ); - casti_m512i(d04,n) = mm512_get_32( s, 4, 20, 36, 52, 68, 84,100,116, - 132,148,164,180,196,212,228,244 ); - casti_m512i(d05,n) = mm512_get_32( s, 5, 21, 37, 53, 69, 85,101,117, - 133,149,165,181,197,213,229,245 ); - casti_m512i(d06,n) = mm512_get_32( s, 6, 22, 38, 54, 70, 86,102,118, - 134,150,166,182,198,214,230,246 ); - casti_m512i(d07,n) = mm512_get_32( s, 7, 23, 39, 55, 71, 87,103,119, - 135,151,167,183,199,215,231,247 ); - casti_m512i(d08,n) = mm512_get_32( s, 8, 24, 40, 56, 72, 88,104,120, - 136,152,168,184,200,216,232,248 ); - casti_m512i(d09,n) = mm512_get_32( s, 9, 25, 41, 57, 73, 89,105,121, - 137,153,169,185,201,217,233,249 ); - casti_m512i(d10,n) = mm512_get_32( s, 10, 26, 42, 58, 74, 90,106,122, - 138,154,170,186,202,218,234,250 ); - casti_m512i(d11,n) = mm512_get_32( s, 11, 27, 43, 59, 75, 91,107,123, - 139,155,171,187,203,219,235,251 ); - casti_m512i(d12,n) = mm512_get_32( s, 12, 28, 44, 60, 76, 92,108,124, - 140,156,172,188,204,220,236,252 ); - casti_m512i(d13,n) = mm512_get_32( s, 13, 29, 45, 61, 77, 93,109,125, - 141,157,173,189,205,221,237,253 ); - casti_m512i(d14,n) = mm512_get_32( s, 14, 30, 46, 62, 78, 94,110,126, - 142,158,174,190,206,222,238,254 ); - casti_m512i(d15,n) = mm512_get_32( s, 15, 31, 47, 63, 79, 95,111,127, - 143,159,175,191,207,223,239,255 ); -} - -static inline void mm512_intrlv_8x64x512( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, - const void *s4, const void *s5, const void *s6, - const void *s7 ) -{ - casti_m512i( d,0 ) = mm512_put_64( s0, s1, s2, s3, - s4, s5, s6, s7 ); - casti_m512i( d,1 ) = mm512_put_64( s0+ 8, s1+ 8, s2+ 8, s3+ 8, - s4+ 8, s5+ 8, s6+ 8, s7+ 8 ); - casti_m512i( d,2 ) = mm512_put_64( s0+16, s1+16, s2+16, s3+16, - s4+16, s5+16, s6+16, s7+16 ); - casti_m512i( d,3 ) = mm512_put_64( s0+24, s1+24, s2+24, s3+24, - s4+24, s5+24, s6+24, s7+24 ); - casti_m512i( d,4 ) = mm512_put_64( s0+32, s1+32, s2+32, s3+32, - s4+32, s5+32, s6+32, s7+32 ); - casti_m512i( d,5 ) = mm512_put_64( s0+40, s1+40, s2+40, s3+40, - s4+40, s5+40, s6+40, s7+40 ); - casti_m512i( d,6 ) = mm512_put_64( s0+48, s1+48, s2+48, s3+48, - s4+48, s5+48, s6+48, s7+48 ); - casti_m512i( d,7 ) = mm512_put_64( s0+56, s1+56, s2+56, s3+56, - s4+56, s5+56, s6+56, s7+56 ); -} - -static inline void mm512_intrlv_8x64x256( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, - const void *s4, const void *s5, const void *s6, - const void *s7 ) -{ - casti_m512i( d,0 ) = mm512_put_64( s0, s1, s2, s3, - s4, s5, s6, s7 ); - casti_m512i( d,1 ) = mm512_put_64( s0+ 8, s1+ 8, s2+ 8, s3+ 8, - s4+ 8, s5+ 8, s6+ 8, s7+ 8 ); - casti_m512i( d,2 ) = mm512_put_64( s0+16, s1+16, s2+16, s3+16, - s4+16, s5+16, s6+16, s7+16 ); - casti_m512i( d,3 ) = mm512_put_64( s0+24, s1+24, s2+24, s3+24, - s4+24, s5+24, s6+24, s7+24 ); -} - - -// 8 lanes of 512 bits using 64 bit interleaving (typical intermediate hash) -static inline void mm512_dintrlv_8x64x512( void *d0, void *d1, void *d2, - void *d3, void *d4, void *d5, void *d6, void *d7, - const int n, const void *s ) -{ - casti_m512i( d0,n ) = mm512_get_64( s, 0, 8, 16, 24, 32, 40, 48, 56 ); - casti_m512i( d1,n ) = mm512_get_64( s, 1, 9, 17, 25, 33, 41, 49, 57 ); - casti_m512i( d2,n ) = mm512_get_64( s, 2, 10, 18, 26, 34, 42, 50, 58 ); - casti_m512i( d3,n ) = mm512_get_64( s, 3, 11, 19, 27, 35, 43, 51, 59 ); - casti_m512i( d4,n ) = mm512_get_64( s, 4, 12, 20, 28, 36, 44, 52, 60 ); - casti_m512i( d5,n ) = mm512_get_64( s, 5, 13, 21, 29, 37, 45, 53, 61 ); - casti_m512i( d6,n ) = mm512_get_64( s, 6, 14, 22, 30, 38, 46, 54, 62 ); - casti_m512i( d7,n ) = mm512_get_64( s, 7, 15, 23, 31, 39, 47, 55, 63 ); -} - -static inline void mm512_dintrlv_4x128x512( void *d0, void *d1, void *d2, - void *d3, const int n, const void *s ) -{ - casti_m512i( d0,n ) = mm512_get_64( s, 0, 1, 8, 9, 16, 17, 24, 25 ); - casti_m512i( d1,n ) = mm512_get_64( s, 2, 3, 10, 11, 18, 19, 16, 27 ); - casti_m512i( d2,n ) = mm512_get_64( s, 4, 5, 12, 13, 20, 21, 28, 29 ); - casti_m512i( d3,n ) = mm512_get_64( s, 6, 7, 14, 15, 22, 23, 30, 31 ); -} - -// AVX-512 user facing functions. - -static inline void mm512_intrlv_16x32( void *d, const void *s00, - const void *s01, const void *s02, const void *s03, const void *s04, - const void *s05, const void *s06, const void *s07, const void *s08, - const void *s09, const void *s10, const void *s11, const void *s12, - const void *s13, const void *s14, const void *s15, int bit_len ) -{ - if ( bit_len <= 256 ) - { - mm512_intrlv_16x32x256( d, s00, s01, s02, s03, s04, s05, s06, s07, - s08, s09, s10, s11, s12, s13, s14, s15 ); - return; - } - mm512_intrlv_16x32x512( d, s00, s01, s02, s03, s04, s05, s06, s07, - s08, s09, s10, s11, s12, s13, s14, s15 ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - - mm512_intrlv_16x32x128( d+1024, s00+64, s01+64, s02+64, s03+64, - s04+64, s05+64, s06+64, s07+64, s08+64, s09+64, - s10+64, s11+64, s12+64, s13+64, s14+64, s15+64 ); - return; - } - mm512_intrlv_16x32x512( d+1024, s00+64, s01+64, s02+64, s03+64, - s04+64, s05+64, s06+64, s07+64, s08+64, s09+64, - s10+64, s11+64, s12+64, s13+64, s14+64, s15+64 ); - // bit_len == 1024 -} - -// sub-functions can be called directly for 32 & 64 byte hash. -static inline void mm512_dintrlv_16x32( void *d00, void *d01, void *d02, - void *d03, void *d04, void *d05, void *d06, void *d07, void *d08, - void *d09, void *d10, void *d11, void *d12, void *d13, void *d14, - void *d15, const void *src, const int bit_len ) -{ - if ( bit_len <= 256 ) - { - mm256_dintrlv_16x32x256( d00, d01, d02, d03, d04, d05, d06, d07, - d08, d09, d10, d11, d12, d13, d14, d15, - 0,src ); - return; - } - mm512_dintrlv_16x32x512( d00, d01, d02, d03, d04, d05, d06, d07, - d08, d09, d10, d11, d12, d13, d14, d15, - 0, src ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - // short block, final 16 bytes of input data. - mm128_dintrlv_16x32x128( d00, d01, d02, d03, d04, d05, d06, d07, - d08, d09, d10, d11, d12, d13, d14, d15, - 1, src+1024 ); - return; - } - // bit_len == 1024 - mm512_dintrlv_16x32x512( d00, d01, d02, d03, d04, d05, d06, d07, - d08, d09, d10, d11, d12, d13, d14, d15, - 1, src+1024 ); -} - -static inline void mm512_extr_lane_16x32( void *dst, const void *src, - const int lane, const int bit_len ) -{ - if ( bit_len <= 256 ) - { - cast_m256i( dst ) = mm256_get_32( src, lane, lane+16, lane+32, lane+48, - lane+64, lane+80, lane+96, lane+112 ); - return; - } - cast_m512i( dst ) = mm512_get_32( src, lane, lane+ 16, lane+ 32, lane+ 48, - lane+ 64, lane+ 80, lane+ 96, lane+112, lane+128, lane+144, - lane+160, lane+176, lane+192, lane+208, lane+224, lane+248 ); -} - -// - -static inline void mm512_intrlv_8x64( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, - const void *s4, const void *s5, const void *s6, - const void *s7, int bit_len ) -{ - if ( bit_len <= 256 ) - { - mm512_intrlv_8x64x256( d, s0, s1, s2, s3, s4, s5, s6, s7 ); - return; - } - mm512_intrlv_8x64x512( d, s0, s1, s2, s3, s4, s5, s6, s7 ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - casti_m512i( d, 8 ) = mm512_put_64( s7+64, s6+64, s5+64, s4+64, - s3+64, s2+64, s1+64, s0+64 ); - casti_m512i( d, 9 ) = mm512_put_64( s7+72, s6+72, s5+72, s4+72, - s3+72, s2+72, s1+72, s0+72 ); - return; - } - // bitlen == 1024 - mm512_intrlv_8x64x512( d+512, s0+64, s1+64, s2+64, s3+64, - s4+64, s5+64, s6+64, s7+64 ); -} - - -static inline void mm512_dintrlv_8x64( void *d0, void *d1, void *d2, - void *d3, void *d4, void *d5, void *d6, void *d7, - const void *s, const int bit_len ) -{ - if ( bit_len <= 256 ) - { - mm256_dintrlv_8x64x256( d0, d1, d2, d3, d4, d5, d6, d7, 0, s ); - return; - } - mm512_dintrlv_8x64x512( d0, d1, d2, d3, d4, d5, d6, d7, 0, s ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - // short block, final 16 bytes of input data. - mm128_dintrlv_8x64x128( d0, d1, d2, d3, d4, d5, d6, d7, 1, s+512 ); - return; - } - // bit_len == 1024 - mm512_dintrlv_8x64x512( d0, d1, d2, d3, d4, d5, d6, d7, 1, s+512 ); -} - -// Extract one lane from 64 bit interleaved data -static inline void mm512_extr_lane_8x64( void *d, const void *s, - const int lane, const int bit_len ) -{ - if ( bit_len <= 256 ) - { - cast_m256i( d ) = mm256_get_64( s, lane, lane+8, lane+16, lane+24 ); - return; - } - // else bit_len == 512 - cast_m512i( d ) = mm512_get_64( s, lane , lane+ 8, lane+16, lane+24, - lane+32, lane+40, lane+48, lane+56 ); -} - -// - -static inline void mm512_intrlv_4x128( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, const int bit_len ) -{ - casti_m512i( d, 0 ) = mm512_put_64( s0, s0+8, s1, s1+8, - s2, s2+8, s3, s3+8 ); - casti_m512i( d, 1 ) = mm512_put_64( s0+16, s0+24, s1+16, s1+24, - s2+16, s2+24, s3+16, s3+24 ); - if ( bit_len <= 256 ) return; - - casti_m512i( d, 2 ) = mm512_put_64( s0+32, s0+40, s1+32, s1+40, - s2+32, s2+40, s3+32, s3+40 ); - casti_m512i( d, 3 ) = mm512_put_64( s0+48, s0+56, s1+48, s1+56, - s2+48, s2+56, s3+48, s3+56 ); - if ( bit_len <= 512 ) return; - - casti_m512i( d, 4 ) = mm512_put_64( s0+64, s0+72, s1+64, s1+72, - s2+64, s2+72, s3+64, s3+72 ); - if ( bit_len <= 640 ) return; - - casti_m512i( d, 5 ) = mm512_put_64( s0+ 80, s0+ 88, s1+ 80, s1+ 88, - s2+ 80, s2+ 88, s3+ 80, s3+ 88 ); - casti_m512i( d, 6 ) = mm512_put_64( s0+ 96, s0+104, s1+ 96, s1+104, - s2+ 96, s2+104, s3+ 96, s3+104 ); - casti_m512i( d, 7 ) = mm512_put_64( s0+112, s0+120, s1+112, s1+120, - s2+112, s2+120, s3+112, s3+120 ); - // bit_len == 1024 -} - -static inline void mm512_dintrlv_4x128( void *d0, void *d1, void *d2, - void *d3, const void *s, const int bit_len ) -{ - if ( bit_len <= 256 ) - { - mm256_dintrlv_4x128x256( d0, d1, d2, d3, 0, s ); - return; - } - mm512_dintrlv_4x128x512( d0, d1, d2, d3, 0, s ); - if ( bit_len <= 512 ) return; - if ( bit_len <= 640 ) - { - mm128_dintrlv_4x128x128( d0, d1, d2, d3, 1, s+256 ); - return; - } - // bit_len == 1024 - mm512_dintrlv_4x128x512( d0, d1, d2, d3, 1, s+256 ); -} - -// input one 8x64 buffer and return 2*4*128 -static inline void mm512_rintrlv_8x64_4x128( void *dst0, void *dst1, - const void *src, int bit_len ) -{ - __m512i* d0 = (__m512i*)dst0; - __m512i* d1 = (__m512i*)dst1; - uint64_t *s = (uint64_t*)src; - - d0[0] = _mm512_set_epi64( s[ 11], s[ 3], s[ 10], s[ 2], - s[ 9], s[ 1], s[ 8], s[ 0] ); - d0[1] = _mm512_set_epi64( s[ 27], s[ 19], s[ 26], s[ 18], - s[ 25], s[ 17], s[ 24], s[ 16] ); - d0[2] = _mm512_set_epi64( s[ 15], s[ 7], s[ 14], s[ 6], - s[ 13], s[ 5], s[ 12], s[ 4] ); - d0[3] = _mm512_set_epi64( s[ 31], s[ 23], s[ 30], s[ 22], - s[ 29], s[ 21], s[ 28], s[ 20] ); - d1[0] = _mm512_set_epi64( s[ 43], s[ 35], s[ 42], s[ 34], - s[ 41], s[ 33], s[ 40], s[ 32] ); - d1[1] = _mm512_set_epi64( s[ 59], s[ 51], s[ 58], s[ 50], - s[ 57], s[ 49], s[ 56], s[ 48] ); - d1[2] = _mm512_set_epi64( s[ 47], s[ 39], s[ 46], s[ 38], - s[ 45], s[ 37], s[ 44], s[ 36] ); - d1[3] = _mm512_set_epi64( s[ 63], s[ 55], s[ 62], s[ 54], - s[ 61], s[ 53], s[ 60], s[ 52] ); - - if ( bit_len <= 512 ) return; - - d0[4] = _mm512_set_epi64( s[ 75], s[ 67], s[ 74], s[ 66], - s[ 73], s[ 65], s[ 72], s[ 64] ); - d0[5] = _mm512_set_epi64( s[ 91], s[ 83], s[ 90], s[ 82], - s[ 89], s[ 81], s[ 88], s[ 80] ); - d0[6] = _mm512_set_epi64( s[ 79], s[ 71], s[ 78], s[ 70], - s[ 77], s[ 69], s[ 76], s[ 68] ); - d0[7] = _mm512_set_epi64( s[ 95], s[ 87], s[ 94], s[ 86], - s[ 93], s[ 85], s[ 92], s[ 84] ); - d1[4] = _mm512_set_epi64( s[107], s[ 99], s[106], s[ 98], - s[105], s[ 97], s[104], s[ 96] ); - d1[5] = _mm512_set_epi64( s[123], s[115], s[122], s[114], - s[121], s[113], s[120], s[112] ); - d1[6] = _mm512_set_epi64( s[111], s[103], s[110], s[102], - s[109], s[101], s[108], s[100] ); - d1[7] = _mm512_set_epi64( s[127], s[119], s[126], s[118], - s[125], s[117], s[124], s[116] ); - -} - -// input 2 4x128 return 8x64 -static inline void mm512_rintrlv_4x128_8x64( void *dst, const void *src0, - const void *src1, int bit_len ) -{ - __m512i* d = (__m512i*)dst; - uint64_t *s0 = (uint64_t*)src0; - uint64_t *s1 = (uint64_t*)src1; - - d[0] = _mm512_set_epi64( s1[ 6], s1[ 4], s1[ 2], s1[ 0], - s0[ 6], s0[ 4], s0[ 2], s0[ 0] ); - d[1] = _mm512_set_epi64( s1[ 7], s1[ 5], s1[ 3], s1[ 1], - s0[ 7], s0[ 5], s0[ 3], s0[ 1] ); - d[2] = _mm512_set_epi64( s1[14], s1[12], s1[10], s1[ 8], - s0[14], s0[12], s0[10], s0[ 8] ); - d[3] = _mm512_set_epi64( s1[15], s1[13], s1[11], s1[ 9], - s0[15], s0[13], s0[11], s0[ 9] ); - d[4] = _mm512_set_epi64( s1[22], s1[20], s1[18], s1[16], - s0[22], s0[20], s0[18], s0[16] ); - d[5] = _mm512_set_epi64( s1[23], s1[21], s1[19], s1[17], - s0[24], s0[21], s0[19], s0[17] ); - d[6] = _mm512_set_epi64( s1[22], s1[28], s1[26], s1[24], - s0[22], s0[28], s0[26], s0[24] ); - d[7] = _mm512_set_epi64( s1[31], s1[29], s1[27], s1[25], - s0[31], s0[29], s0[27], s0[25] ); - - if ( bit_len <= 512 ) return; - - d[0] = _mm512_set_epi64( s1[38], s1[36], s1[34], s1[32], - s0[38], s0[36], s0[34], s0[32] ); - d[1] = _mm512_set_epi64( s1[39], s1[37], s1[35], s1[33], - s0[39], s0[37], s0[35], s0[33] ); - d[2] = _mm512_set_epi64( s1[46], s1[44], s1[42], s1[40], - s0[46], s0[44], s0[42], s0[40] ); - d[3] = _mm512_set_epi64( s1[47], s1[45], s1[43], s1[41], - s0[47], s0[45], s0[43], s0[41] ); - d[4] = _mm512_set_epi64( s1[54], s1[52], s1[50], s1[48], - s0[54], s0[52], s0[50], s0[48] ); - d[5] = _mm512_set_epi64( s1[55], s1[53], s1[51], s1[49], - s0[55], s0[53], s0[51], s0[49] ); - - d[6] = _mm512_set_epi64( s1[62], s1[60], s1[58], s1[56], - s0[62], s0[60], s0[58], s0[56] ); - d[7] = _mm512_set_epi64( s1[63], s1[61], s1[59], s1[57], - s0[63], s0[61], s0[59], s0[57] ); - -} - -static inline void mm512_extr_lane_4x128( void *d, const void *s, - const int lane, const int bit_len ) -{ - int l = lane<<1; - if ( bit_len <= 256 ) - { - cast_m256i( d ) = mm256_get_64( s, l, l+1, l+8, l+9 ); - return; - } - // else bit_len == 512 - cast_m512i( d ) = mm512_get_64( s, l , l+ 1, l+ 8, l+ 9, - l+16, l+17, l+24, l+25 ); -} - -#endif // AVX512 -#endif // INTRLV_AVX512_H__ diff --git a/simd-utils/intrlv-mmx.h b/simd-utils/intrlv-mmx.h deleted file mode 100644 index ed6dae6..0000000 --- a/simd-utils/intrlv-mmx.h +++ /dev/null @@ -1,126 +0,0 @@ -#if !defined(INTRLV_MMX_H__) -#define INTRLV_MMX_H__ 1 - -#if defined(__MMX__) - -////////////////////////////////////////////////////// -// -// MMX 64 bit vectors - -#define mm64_put_32( s0, s1 ) \ - _mm_set_pi32( *((const uint32_t*)(s1)), *((const uint32_t*)(s0)) ) - -#define mm64_get_32( s, i0, i1 ) \ - _mm_set_pi32( ((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] ) - -// 1 MMX block, 8 bytes * 2 lanes -static inline void mm64_intrlv_2x32( void *d, const void *s0, - const void *s1, int len ) -{ - casti_m64( d, 0 ) = mm64_put_32( s0 , s1 ); - casti_m64( d, 1 ) = mm64_put_32( s0+ 4, s1+ 4 ); - casti_m64( d, 2 ) = mm64_put_32( s0+ 8, s1+ 8 ); - casti_m64( d, 3 ) = mm64_put_32( s0+ 12, s1+ 12 ); - casti_m64( d, 4 ) = mm64_put_32( s0+ 16, s1+ 16 ); - casti_m64( d, 5 ) = mm64_put_32( s0+ 20, s1+ 20 ); - casti_m64( d, 6 ) = mm64_put_32( s0+ 24, s1+ 24 ); - casti_m64( d, 7 ) = mm64_put_32( s0+ 28, s1+ 28 ); - - if ( len <= 256 ) return; - - casti_m64( d, 8 ) = mm64_put_32( s0+ 32, s1+ 32 ); - casti_m64( d, 9 ) = mm64_put_32( s0+ 36, s1+ 36 ); - casti_m64( d,10 ) = mm64_put_32( s0+ 40, s1+ 40 ); - casti_m64( d,11 ) = mm64_put_32( s0+ 44, s1+ 44 ); - casti_m64( d,12 ) = mm64_put_32( s0+ 48, s1+ 48 ); - casti_m64( d,13 ) = mm64_put_32( s0+ 52, s1+ 52 ); - casti_m64( d,14 ) = mm64_put_32( s0+ 56, s1+ 56 ); - casti_m64( d,15 ) = mm64_put_32( s0+ 60, s1+ 60 ); - - if ( len <= 512 ) return; - - casti_m64( d,16 ) = mm64_put_32( s0+ 64, s1+ 64 ); - casti_m64( d,17 ) = mm64_put_32( s0+ 68, s1+ 68 ); - casti_m64( d,18 ) = mm64_put_32( s0+ 72, s1+ 72 ); - casti_m64( d,19 ) = mm64_put_32( s0+ 76, s1+ 76 ); - - if ( len <= 640 ) return; - casti_m64( d,20 ) = mm64_put_32( s0+ 80, s1+ 80 ); - casti_m64( d,21 ) = mm64_put_32( s0+ 84, s1+ 84 ); - casti_m64( d,22 ) = mm64_put_32( s0+ 88, s1+ 88 ); - casti_m64( d,23 ) = mm64_put_32( s0+ 92, s1+ 92 ); - casti_m64( d,24 ) = mm64_put_32( s0+ 96, s1+ 96 ); - casti_m64( d,25 ) = mm64_put_32( s0+100, s1+100 ); - casti_m64( d,26 ) = mm64_put_32( s0+104, s1+104 ); - casti_m64( d,27 ) = mm64_put_32( s0+108, s1+108 ); - casti_m64( d,28 ) = mm64_put_32( s0+112, s1+112 ); - casti_m64( d,29 ) = mm64_put_32( s0+116, s1+116 ); - casti_m64( d,30 ) = mm64_put_32( s0+120, s1+120 ); - casti_m64( d,31 ) = mm64_put_32( s0+124, s1+124 ); -} - -static inline void mm64_dintrlv_2x32( void *d00, void *d01, const int n, - const void *s, int len ) -{ - casti_m64( d00,0 ) = mm64_get_32( s, 0, 2 ); - casti_m64( d01,0 ) = mm64_get_32( s, 1, 3 ); - casti_m64( d00,1 ) = mm64_get_32( s, 4, 6 ); - casti_m64( d01,1 ) = mm64_get_32( s, 5, 7 ); - casti_m64( d00,2 ) = mm64_get_32( s, 8, 10 ); - casti_m64( d01,2 ) = mm64_get_32( s, 9, 11 ); - casti_m64( d00,3 ) = mm64_get_32( s, 12, 14 ); - casti_m64( d01,3 ) = mm64_get_32( s, 13, 15 ); - - if ( len <= 256 ) return; - - casti_m64( d00,4 ) = mm64_get_32( s, 16, 18 ); - casti_m64( d01,4 ) = mm64_get_32( s, 17, 19 ); - casti_m64( d00,5 ) = mm64_get_32( s, 20, 22 ); - casti_m64( d01,5 ) = mm64_get_32( s, 21, 23 ); - casti_m64( d00,6 ) = mm64_get_32( s, 24, 26 ); - casti_m64( d01,6 ) = mm64_get_32( s, 25, 27 ); - casti_m64( d00,7 ) = mm64_get_32( s, 28, 30 ); - casti_m64( d01,7 ) = mm64_get_32( s, 29, 31 ); - - if ( len <= 512 ) return; - - casti_m64( d00,8 ) = mm64_get_32( s, 32, 34 ); - casti_m64( d01,8 ) = mm64_get_32( s, 33, 35 ); - casti_m64( d00,9 ) = mm64_get_32( s, 36, 38 ); - casti_m64( d01,9 ) = mm64_get_32( s, 37, 39 ); - - if ( len <= 640 ) return; - casti_m64( d00,10 ) = mm64_get_32( s, 40, 42 ); - casti_m64( d01,10 ) = mm64_get_32( s, 41, 43 ); - casti_m64( d00,11 ) = mm64_get_32( s, 44, 46 ); - casti_m64( d01,11 ) = mm64_get_32( s, 45, 47 ); - casti_m64( d00,12 ) = mm64_get_32( s, 48, 50 ); - casti_m64( d01,12 ) = mm64_get_32( s, 49, 51 ); - casti_m64( d00,13 ) = mm64_get_32( s, 52, 54 ); - casti_m64( d01,13 ) = mm64_get_32( s, 53, 55 ); - casti_m64( d00,14 ) = mm64_get_32( s, 56, 58 ); - casti_m64( d01,14 ) = mm64_get_32( s, 57, 59 ); - casti_m64( d00,15 ) = mm64_get_32( s, 60, 62 ); - casti_m64( d01,15 ) = mm64_get_32( s, 61, 63 ); -} - -static inline void mm64_extr_lane_2x32( void *d, const void *s, - const int lane, const int bit_len ) -{ - casti_m64( d, 0 ) = mm64_get_32( s, lane , lane+ 4 ); - casti_m64( d, 1 ) = mm64_get_32( s, lane+ 8, lane+12 ); - casti_m64( d, 2 ) = mm64_get_32( s, lane+16, lane+20 ); - casti_m64( d, 3 ) = mm64_get_32( s, lane+24, lane+28 ); - - if ( bit_len <= 256 ) return; - casti_m64( d, 4 ) = mm64_get_32( s, lane+32, lane+36 ); - casti_m64( d, 5 ) = mm64_get_32( s, lane+40, lane+44 ); - casti_m64( d, 6 ) = mm64_get_32( s, lane+48, lane+52 ); - casti_m64( d, 7 ) = mm64_get_32( s, lane+56, lane+60 ); - // bit_len == 512 -} - - - -#endif // MMX -#endif // INTRLV_MMX_H__ diff --git a/simd-utils/intrlv-selector.h b/simd-utils/intrlv-selector.h deleted file mode 100644 index 3095b9e..0000000 --- a/simd-utils/intrlv-selector.h +++ /dev/null @@ -1,77 +0,0 @@ -#if !defined(INTRLV_SELECTOR_H__) -#define INTRLV_SELECTOR_H__ - -////////////////////////////////////////////////////////////// -// -// Generic interface for interleaving data for parallel processing. -// -// Best tech is chosen atomatically. - -/* -#if defined(__AVX512F__) - -#define intrlv_4x128 mm512_intrlv_4x128 -#define intrlv_4x128 mm512_intrlv_4x128 - -#define intrlv_8x64 mm512_intrlv_8x64 -#define dintrlv_8x64 mm512_dintrlv_8x64 -#define extr_lane_8x64 mm512_extr_lane_8x64 - -#define intrlv_16x32 mm512_intrlv_16x32 -#define dintrlv_16x32 mm512_dintrlv_16x32 -#define extr_lane_16x32 mm512_extr_lane_16x32 - -#define intrlv_2x128 mm512_intrlv_2x128 -#define dintrlv_2x128 mm512_dintrlv_2x128 - -#define intrlv_4x64 mm512_intrlv_4x64 -#define dintrlv_4x64 mm512_dintrlv_4x64 -#define extr_lane_4x64 mm512_extr_lane_4x64 - -#define intrlv_8x32 mm512_intrlv_8x32 -#define dintrlv_8x32 mm512_dintrlv_8x32 -#define extr_lane_8x32 mm512_extr_lane_8x32 - -#elif defined(__AVX__) -*/ -#if defined(__AVX__) - -#define intrlv_2x128 mm256_intrlv_2x128 -#define dintrlv_2x128 mm256_dintrlv_2x128 - -#define intrlv_4x64 mm256_intrlv_4x64 -#define dintrlv_4x64 mm256_dintrlv_4x64 -#define extr_lane_4x64 mm256_extr_lane_4x64 - -#define intrlv_8x32 mm256_intrlv_8x32 -#define dintrlv_8x32 mm256_dintrlv_8x32 -#define extr_lane_8x32 mm256_extr_lane_8x32 - -#define intrlv_4x32 mm256_intrlv_4x32 -#define dintrlv_4x32 mm256_dintrlv_4x32 -#define extr_lane_4x32 mm256_extr_lane_4x32 - -#else - -#define intrlv_2x128 mm128_intrlv_2x128 -#define dintrlv_2x128 mm128_dintrlv_2x128 - -#define intrlv_4x64 mm128_intrlv_4x64 -#define dintrlv_4x64 mm128_dintrlv_4x64 -#define extr_lane_4x64 mm128_extr_lane_4x64 - -#define intrlv_8x32 mm128_intrlv_8x32 -#define dintrlv_8x32 mm128_dintrlv_8x32 -#define extr_lane_8x32 mm128_extr_lane_8x32 - -#define intrlv_2x64 mm128_intrlv_2x64 -#define dintrlv_2x64 mm128_dintrlv_2x64 -#define extr_lane_2x64 mm128_extr_lane_2x64 - -#define intrlv_4x32 mm128_intrlv_4x32 -#define dintrlv_4x32 mm128_dintrlv_4x32 -#define extr_lane_4x32 mm128_extr_lane_4x32 - -#endif - -#endif // INTRLV_SELECTOR_H__ diff --git a/simd-utils/intrlv-sse2.h b/simd-utils/intrlv-sse2.h deleted file mode 100644 index aeb7cae..0000000 --- a/simd-utils/intrlv-sse2.h +++ /dev/null @@ -1,192 +0,0 @@ -#if !defined(INTRLV_SSE2_H__) -#define INTRLV_SSE2_H__ 1 - -// Don't call __mm_extract_epi32 directly, it needs SSE4.1. -// Use mm128_extr_32 wrapper instead, it has both SSE4.1 & SSE2 covered. - -#if defined(__SSE2__) - -/////////////////////////////////////////////////////////////// -// -// SSE2 128 bit vectors - - -// Macros to abstract typecasting - -// Interleave lanes -#define mm128_put_64( s0, s1) \ - _mm_set_epi64x( *((const uint64_t*)(s1)), *((const uint64_t*)(s0)) ) - -#define mm128_put_32( s0, s1, s2, s3 ) \ - _mm_set_epi32( *((const uint32_t*)(s3)), *((const uint32_t*)(s2)), \ - *((const uint32_t*)(s1)), *((const uint32_t*)(s0)) ) - -// Deinterleave lanes -#define mm128_get_64( s, i0, i1 ) \ - _mm_set_epi64x( ((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] ) - -#define mm128_get_32( s, i0, i1, i2, i3 ) \ - _mm_set_epi32( ((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \ - ((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] ) - -// blend 2 vectors while interleaving: { hi[n], lo[n-1], ... hi[1], lo[0] } -#define mm128_intrlv_blend_64( hi, lo ) \ - _mm256_blend_epi16( hi, lo, 0x0f ) -#define mm128_intrlv_blend_32( hi, lo ) \ - _mm6_blend_epi16( hi, lo, 0x33 ) - -// 1 sse2 block, 16 x 16 bytes - -#define mm128_intrlv_4x32_128( d, s0, s1, s2, s3 )\ -do { \ - casti_m128i( d,0 ) = _mm_set_epi32( \ - mm128_extr_32( s3, 0 ), mm128_extr_32( s2, 0 ), \ - mm128_extr_32( s1, 0 ), mm128_extr_32( s0, 0 ) ); \ - casti_m128i( d,1 ) = _mm_set_epi32( \ - mm128_extr_32( s3, 1 ), mm128_extr_32( s2, 1 ), \ - mm128_extr_32( s1, 1 ), mm128_extr_32( s0, 1 ) ); \ - casti_m128i( d,2 ) = _mm_set_epi32( \ - mm128_extr_32( s3, 2 ), mm128_extr_32( s2, 2 ), \ - mm128_extr_32( s1, 2 ), mm128_extr_32( s0, 2 ) ); \ - casti_m128i( d,3 ) = _mm_set_epi32( \ - mm128_extr_32( s3, 3 ), mm128_extr_32( s2, 3 ), \ - mm128_extr_32( s1, 3 ), mm128_extr_32( s0, 3 ) ); \ -} while(0) - -static inline void mm128_dintrlv_4x32_128( void *d0, void *d1, void *d2, - void *d3, const void *src ) -{ - __m128i s0 = *(__m128i*) src; - __m128i s1 = *(__m128i*)(src+16); - __m128i s2 = *(__m128i*)(src+32); - __m128i s3 = *(__m128i*)(src+48); - - *(__m128i*)d0 = _mm_set_epi32( - mm128_extr_32( s3,0 ), mm128_extr_32( s2,0 ), - mm128_extr_32( s1,0 ), mm128_extr_32( s0,0 ) ); - *(__m128i*)d1 = _mm_set_epi32( - mm128_extr_32( s3,1 ), mm128_extr_32( s2,1 ), - mm128_extr_32( s1,1 ), mm128_extr_32( s0,1 ) ); - *(__m128i*)d2 = _mm_set_epi32( - mm128_extr_32( s3,2 ), mm128_extr_32( s2,2 ), - mm128_extr_32( s1,2 ), mm128_extr_32( s0,2 ) ); - *(__m128i*)d3 = _mm_set_epi32( - mm128_extr_32( s3,3 ), mm128_extr_32( s2,3 ), - mm128_extr_32( s1,3 ), mm128_extr_32( s0,3 ) ); -} - -static inline void mm128_intrlv_2x64x128( void *d, const void *s0, - const void *s1 ) -{ - casti_m128i( d,0 ) = mm128_put_64( s0, s1 ); - casti_m128i( d,1 ) = mm128_put_64( s0+ 8, s1+ 8 ); - casti_m128i( d,2 ) = mm128_put_64( s0+16, s1+16 ); - casti_m128i( d,3 ) = mm128_put_64( s0+24, s1+24 ); -} - -#define mm128_bswap_intrlv_4x32_128( d, src ) \ -do { \ - __m128i ss = mm128_bswap_32( src );\ - casti_m128i( d,0 ) = _mm_set1_epi32( mm128_extr_32( ss, 0 ) ); \ - casti_m128i( d,1 ) = _mm_set1_epi32( mm128_extr_32( ss, 1 ) ); \ - casti_m128i( d,2 ) = _mm_set1_epi32( mm128_extr_32( ss, 2 ) ); \ - casti_m128i( d,3 ) = _mm_set1_epi32( mm128_extr_32( ss, 3 ) ); \ -} while(0) - - -// -// User functions. - -// interleave 4 arrays of 32 bit elements for 128 bit processing -// bit_len must be 256, 512 or 640 bits. -static inline void mm128_intrlv_4x32( void *d, const void *s0, - const void *s1, const void *s2, const void *s3, int bit_len ) -{ - mm128_intrlv_4x32_128( d , casti_m128i(s0,0), casti_m128i(s1,0), - casti_m128i(s2,0), casti_m128i(s3,0) ); - mm128_intrlv_4x32_128( d+ 64, casti_m128i(s0,1), casti_m128i(s1,1), - casti_m128i(s2,1), casti_m128i(s3,1) ); - if ( bit_len <= 256 ) return; - mm128_intrlv_4x32_128( d+128, casti_m128i(s0,2), casti_m128i(s1,2), - casti_m128i(s2,2), casti_m128i(s3,2) ); - mm128_intrlv_4x32_128( d+192, casti_m128i(s0,3), casti_m128i(s1,3), - casti_m128i(s2,3), casti_m128i(s3,3) ); - if ( bit_len <= 512 ) return; - mm128_intrlv_4x32_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4), - casti_m128i(s2,4), casti_m128i(s3,4) ); - if ( bit_len <= 640 ) return; - mm128_intrlv_4x32_128( d+320, casti_m128i(s0,5), casti_m128i(s1,5), - casti_m128i(s2,5), casti_m128i(s3,5) ); - mm128_intrlv_4x32_128( d+384, casti_m128i(s0,6), casti_m128i(s1,6), - casti_m128i(s2,6), casti_m128i(s3,6) ); - mm128_intrlv_4x32_128( d+448, casti_m128i(s0,7), casti_m128i(s1,7), - casti_m128i(s2,7), casti_m128i(s3,7) ); - // bit_len == 1024 -} - -// Still used by decred due to odd data size: 180 bytes -// bit_len must be multiple of 32 -static inline void mm128_intrlv_4x32x( void *dst, void *src0, void *src1, - void *src2, void *src3, int bit_len ) -{ - uint32_t *d = (uint32_t*)dst; - uint32_t *s0 = (uint32_t*)src0; - uint32_t *s1 = (uint32_t*)src1; - uint32_t *s2 = (uint32_t*)src2; - uint32_t *s3 = (uint32_t*)src3; - - for ( int i = 0; i < bit_len >> 5; i++, d += 4 ) - { - *d = *(s0+i); - *(d+1) = *(s1+i); - *(d+2) = *(s2+i); - *(d+3) = *(s3+i); - } -} - -static inline void mm128_dintrlv_4x32( void *d0, void *d1, void *d2, - void *d3, const void *s, int bit_len ) -{ - mm128_dintrlv_4x32_128( d0 , d1 , d2 , d3 , s ); - mm128_dintrlv_4x32_128( d0+ 16, d1+ 16, d2+ 16, d3+ 16, s+ 64 ); - if ( bit_len <= 256 ) return; - mm128_dintrlv_4x32_128( d0+ 32, d1+ 32, d2+ 32, d3+ 32, s+128 ); - mm128_dintrlv_4x32_128( d0+ 48, d1+ 48, d2+ 48, d3+ 48, s+192 ); - if ( bit_len <= 512 ) return; - mm128_dintrlv_4x32_128( d0+ 64, d1+ 64, d2+ 64, d3+ 64, s+256 ); - if ( bit_len <= 640 ) return; - mm128_dintrlv_4x32_128( d0+ 80, d1+ 80, d2+ 80, d3+ 80, s+320 ); - mm128_dintrlv_4x32_128( d0+ 96, d1+ 96, d2+ 96, d3+ 96, s+384 ); - mm128_dintrlv_4x32_128( d0+112, d1+112, d2+112, d3+112, s+448 ); - // bit_len == 1024 -} - -// extract and deinterleave specified lane. -static inline void mm128_extr_lane_4x32( void *d, const void *s, - const int lane, const int bit_len ) -{ - casti_m128i( d, 0 ) = - mm128_get_32( s, lane , lane+ 4, lane+ 8, lane+12 ); - casti_m128i( d, 1 ) = - mm128_get_32( s, lane+16, lane+20, lane+24, lane+28 ); - if ( bit_len <= 256 ) return; - casti_m128i( d, 2 ) = - mm128_get_32( s, lane+32, lane+36, lane+40, lane+44 ); - casti_m128i( d, 3 ) = - mm128_get_32( s, lane+48, lane+52, lane+56, lane+60 ); - // bit_len == 512 -} - -// Interleave 80 bytes of 32 bit data for 4 lanes. -static inline void mm128_bswap_intrlv80_4x32( void *d, const void *s ) -{ - mm128_bswap_intrlv_4x32_128( d , casti_m128i( s, 0 ) ); - mm128_bswap_intrlv_4x32_128( d+ 64, casti_m128i( s, 1 ) ); - mm128_bswap_intrlv_4x32_128( d+128, casti_m128i( s, 2 ) ); - mm128_bswap_intrlv_4x32_128( d+192, casti_m128i( s, 3 ) ); - mm128_bswap_intrlv_4x32_128( d+256, casti_m128i( s, 4 ) ); -} - -#endif // SSE2 -#endif // INTRLV_SSE2_H__ - diff --git a/simd-utils/intrlv.h b/simd-utils/intrlv.h new file mode 100644 index 0000000..9ab7fa7 --- /dev/null +++ b/simd-utils/intrlv.h @@ -0,0 +1,1333 @@ +#if !defined(INTERLEAVE_H__) +#define INTERLEAVE_H__ 1 + +// philosophical discussion +// +// transitions: +// +// int32 <-> int64 +// uint64_t = (uint64_t)int32_lo | ( (uint64_t)int32_hi << 32 ) +// Efficient transition and post processing, 32 bit granularity is lost. +// Not pratical. +// +// int32 <-> m64 +// More complex, 32 bit granularity maintained, limited number of mmx regs. +// int32 <-> int64 <-> m64 might be more efficient. +// +// int32 <-> m128 +// Expensive, current implementation. +// +// int32 <-> m256 +// Very expensive multi stage, current implementation. +// +// int64/m64 <-> m128 +// Efficient, agnostic to native element size. Common. +// +// m128 <-> m256 +// Expensive for a single instruction, unavoidable. Common. +// +// Multi stage options +// +// int32 <-> int64 -> m128 +// More efficient than insert32, granularity maintained. Common. +// +// int64 <-> m128 -> m256 +// Unavoidable, reasonably efficient. Common +// +// int32 <-> int64 -> m128 -> m256 +// Seems inevitable, most efficient despite number of stages. Common. +// +// It seems the best approach is to avoid transitions and use the native type +// of the data: 64 & 32 bit use integer, 128 bit use m128i. +// + +//////////////////////////////// +// +// 32 bit data + +// 2x32 + +static inline void intrlv_2x32( void *dst, const void *src0, + const void *src1, int bit_len ) +{ + uint32_t *d = (uint32_t*)dst;; + const uint32_t *s0 = (const uint32_t*)src0; + const uint32_t *s1 = (const uint32_t*)src1; + d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s0[ 1]; d[ 3] = s1[ 1]; + d[ 4] = s0[ 2]; d[ 5] = s1[ 2]; d[ 6] = s0[ 3]; d[ 7] = s1[ 3]; + d[ 8] = s0[ 4]; d[ 9] = s1[ 4]; d[10] = s0[ 5]; d[11] = s1[ 5]; + d[12] = s0[ 6]; d[13] = s1[ 6]; d[14] = s0[ 7]; d[15] = s1[ 7]; + if ( bit_len <= 256 ) return; + d[16] = s0[ 8]; d[17] = s1[ 8]; d[18] = s0[ 9]; d[19] = s1[ 9]; + d[20] = s0[10]; d[21] = s1[10]; d[22] = s0[11]; d[23] = s1[11]; + d[24] = s0[12]; d[25] = s1[12]; d[26] = s0[13]; d[27] = s1[13]; + d[28] = s0[14]; d[29] = s1[14]; d[30] = s0[15]; d[31] = s1[15]; + if ( bit_len <= 512 ) return; + d[32] = s0[16]; d[33] = s1[16]; d[34] = s0[17]; d[35] = s1[17]; + d[36] = s0[18]; d[37] = s1[18]; d[38] = s0[19]; d[39] = s1[19]; + if ( bit_len <= 640 ) return; + d[40] = s0[20]; d[41] = s1[20]; d[42] = s0[21]; d[43] = s1[21]; + d[44] = s0[22]; d[45] = s1[22]; d[46] = s0[23]; d[47] = s1[23]; + d[48] = s0[24]; d[49] = s1[24]; d[50] = s0[25]; d[51] = s1[25]; + d[52] = s0[26]; d[53] = s1[26]; d[54] = s0[27]; d[55] = s1[27]; + d[56] = s0[28]; d[57] = s1[28]; d[58] = s0[29]; d[59] = s1[29]; + d[60] = s0[30]; d[61] = s1[30]; d[62] = s0[31]; d[63] = s1[31]; +} + +static inline void dintrlv_2x32( void *dst0, void *dst1, + const void *src, int bit_len ) +{ + uint32_t *d0 = (uint32_t*)dst0; + uint32_t *d1 = (uint32_t*)dst1; + const uint32_t *s = (const uint32_t*)src; + + d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d0[ 1] = s[ 2]; d1[ 1] = s[ 3]; + d0[ 2] = s[ 4]; d1[ 2] = s[ 5]; d0[ 3] = s[ 6]; d1[ 3] = s[ 7]; + d0[ 4] = s[ 8]; d1[ 4] = s[ 9]; d0[ 5] = s[10]; d1[ 5] = s[11]; + d0[ 6] = s[12]; d1[ 6] = s[13]; d0[ 7] = s[14]; d1[ 7] = s[15]; + if ( bit_len <= 256 ) return; + d0[ 8] = s[16]; d1[ 8] = s[17]; d0[ 9] = s[18]; d1[ 9] = s[19]; + d0[10] = s[20]; d1[10] = s[21]; d0[11] = s[22]; d1[11] = s[23]; + d0[12] = s[24]; d1[12] = s[25]; d0[13] = s[26]; d1[13] = s[27]; + d0[14] = s[28]; d1[14] = s[29]; d0[15] = s[30]; d1[15] = s[31]; + if ( bit_len <= 512 ) return; + d0[16] = s[32]; d1[16] = s[33]; d0[17] = s[34]; d1[17] = s[35]; + d0[18] = s[36]; d1[18] = s[37]; d0[19] = s[38]; d1[19] = s[39]; + if ( bit_len <= 640 ) return; + d0[20] = s[40]; d1[20] = s[41]; d0[21] = s[42]; d1[21] = s[43]; + d0[22] = s[44]; d1[22] = s[45]; d0[23] = s[46]; d1[23] = s[47]; + d0[24] = s[48]; d1[24] = s[49]; d0[25] = s[50]; d1[25] = s[51]; + d0[26] = s[52]; d1[26] = s[53]; d0[27] = s[54]; d1[27] = s[55]; + d0[28] = s[56]; d1[28] = s[57]; d0[29] = s[58]; d1[29] = s[59]; + d0[30] = s[60]; d1[30] = s[61]; d0[31] = s[61]; d1[31] = s[63]; +} + +static inline void extr_lane_2x32( void *dst, const void *src, + const int lane, const int bit_len ) +{ + uint32_t *d = (uint32_t*)dst; + const uint32_t *s = (const uint32_t*)src; + d[ 0] = s[ lane ]; d[ 1] = s[ lane+ 2 ]; + d[ 2] = s[ lane+ 4 ]; d[ 3] = s[ lane+ 6 ]; + d[ 4] = s[ lane+ 8 ]; d[ 5] = s[ lane+10 ]; + d[ 6] = s[ lane+12 ]; d[ 7] = s[ lane+14 ]; + if ( bit_len <= 256 ) return; + d[ 8] = s[ lane+16 ]; d[ 9] = s[ lane+18 ]; + d[10] = s[ lane+20 ]; d[11] = s[ lane+22 ]; + d[12] = s[ lane+24 ]; d[13] = s[ lane+26 ]; + d[14] = s[ lane+28 ]; d[15] = s[ lane+30 ]; +} + +// 4x32 + +static inline void intrlv_4x32( void *dst, const void *src0, const void *src1, + const void *src2, const void *src3, int bit_len ) +{ + uint32_t *d = (uint32_t*)dst; + const uint32_t *s0 = (const uint32_t*)src0; + const uint32_t *s1 = (const uint32_t*)src1; + const uint32_t *s2 = (const uint32_t*)src2; + const uint32_t *s3 = (const uint32_t*)src3; + d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s2[ 0]; d[ 3] = s3[ 0]; + d[ 4] = s0[ 1]; d[ 5] = s1[ 1]; d[ 6] = s2[ 1]; d[ 7] = s3[ 1]; + d[ 8] = s0[ 2]; d[ 9] = s1[ 2]; d[ 10] = s2[ 2]; d[ 11] = s3[ 2]; + d[ 12] = s0[ 3]; d[ 13] = s1[ 3]; d[ 14] = s2[ 3]; d[ 15] = s3[ 3]; + d[ 16] = s0[ 4]; d[ 17] = s1[ 4]; d[ 18] = s2[ 4]; d[ 19] = s3[ 4]; + d[ 20] = s0[ 5]; d[ 21] = s1[ 5]; d[ 22] = s2[ 5]; d[ 23] = s3[ 5]; + d[ 24] = s0[ 6]; d[ 25] = s1[ 6]; d[ 26] = s2[ 6]; d[ 27] = s3[ 6]; + d[ 28] = s0[ 7]; d[ 29] = s1[ 7]; d[ 30] = s2[ 7]; d[ 31] = s3[ 7]; + if ( bit_len <= 256 ) return; + d[ 32] = s0[ 8]; d[ 33] = s1[ 8]; d[ 34] = s2[ 8]; d[ 35] = s3[ 8]; + d[ 36] = s0[ 9]; d[ 37] = s1[ 9]; d[ 38] = s2[ 9]; d[ 39] = s3[ 9]; + d[ 40] = s0[10]; d[ 41] = s1[10]; d[ 42] = s2[10]; d[ 43] = s3[10]; + d[ 44] = s0[11]; d[ 45] = s1[11]; d[ 46] = s2[11]; d[ 47] = s3[11]; + d[ 48] = s0[12]; d[ 49] = s1[12]; d[ 50] = s2[12]; d[ 51] = s3[12]; + d[ 52] = s0[13]; d[ 53] = s1[13]; d[ 54] = s2[13]; d[ 55] = s3[13]; + d[ 56] = s0[14]; d[ 57] = s1[14]; d[ 58] = s2[14]; d[ 59] = s3[14]; + d[ 60] = s0[15]; d[ 61] = s1[15]; d[ 62] = s2[15]; d[ 63] = s3[15]; + if ( bit_len <= 512 ) return; + d[ 64] = s0[16]; d[ 65] = s1[16]; d[ 66] = s2[16]; d[ 67] = s3[16]; + d[ 68] = s0[17]; d[ 69] = s1[17]; d[ 70] = s2[17]; d[ 71] = s3[17]; + d[ 72] = s0[18]; d[ 73] = s1[18]; d[ 74] = s2[18]; d[ 75] = s3[18]; + d[ 76] = s0[19]; d[ 77] = s1[19]; d[ 78] = s2[19]; d[ 79] = s3[19]; + if ( bit_len <= 640 ) return; + d[ 80] = s0[20]; d[ 81] = s1[20]; d[ 82] = s2[20]; d[ 83] = s3[20]; + d[ 84] = s0[21]; d[ 85] = s1[21]; d[ 86] = s2[21]; d[ 87] = s3[21]; + d[ 88] = s0[22]; d[ 89] = s1[22]; d[ 90] = s2[22]; d[ 91] = s3[22]; + d[ 92] = s0[23]; d[ 93] = s1[23]; d[ 94] = s2[23]; d[ 95] = s3[23]; + d[ 96] = s0[24]; d[ 97] = s1[24]; d[ 98] = s2[24]; d[ 99] = s3[24]; + d[100] = s0[25]; d[101] = s1[25]; d[102] = s2[25]; d[103] = s3[25]; + d[104] = s0[26]; d[105] = s1[26]; d[106] = s2[26]; d[107] = s3[26]; + d[108] = s0[27]; d[109] = s1[27]; d[110] = s2[27]; d[111] = s3[27]; + d[112] = s0[28]; d[113] = s1[28]; d[114] = s2[28]; d[115] = s3[28]; + d[116] = s0[29]; d[117] = s1[29]; d[118] = s2[29]; d[119] = s3[29]; + d[120] = s0[30]; d[121] = s1[30]; d[122] = s2[30]; d[123] = s3[30]; + d[124] = s0[31]; d[125] = s1[31]; d[126] = s2[31]; d[127] = s3[31]; +} + +static inline void intrlv_4x32_512( void *dst, const void *src0, + const void *src1, const void *src2, const void *src3 ) +{ + uint32_t *d = (uint32_t*)dst; + const uint32_t *s0 = (const uint32_t*)src0; + const uint32_t *s1 = (const uint32_t*)src1; + const uint32_t *s2 = (const uint32_t*)src2; + const uint32_t *s3 = (const uint32_t*)src3; + d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s2[ 0]; d[ 3] = s3[ 0]; + d[ 4] = s0[ 1]; d[ 5] = s1[ 1]; d[ 6] = s2[ 1]; d[ 7] = s3[ 1]; + d[ 8] = s0[ 2]; d[ 9] = s1[ 2]; d[ 10] = s2[ 2]; d[ 11] = s3[ 2]; + d[ 12] = s0[ 3]; d[ 13] = s1[ 3]; d[ 14] = s2[ 3]; d[ 15] = s3[ 3]; + d[ 16] = s0[ 4]; d[ 17] = s1[ 4]; d[ 18] = s2[ 4]; d[ 19] = s3[ 4]; + d[ 20] = s0[ 5]; d[ 21] = s1[ 5]; d[ 22] = s2[ 5]; d[ 23] = s3[ 5]; + d[ 24] = s0[ 6]; d[ 25] = s1[ 6]; d[ 26] = s2[ 6]; d[ 27] = s3[ 6]; + d[ 28] = s0[ 7]; d[ 29] = s1[ 7]; d[ 30] = s2[ 7]; d[ 31] = s3[ 7]; + d[ 32] = s0[ 8]; d[ 33] = s1[ 8]; d[ 34] = s2[ 8]; d[ 35] = s3[ 8]; + d[ 36] = s0[ 9]; d[ 37] = s1[ 9]; d[ 38] = s2[ 9]; d[ 39] = s3[ 9]; + d[ 40] = s0[10]; d[ 41] = s1[10]; d[ 42] = s2[10]; d[ 43] = s3[10]; + d[ 44] = s0[11]; d[ 45] = s1[11]; d[ 46] = s2[11]; d[ 47] = s3[11]; + d[ 48] = s0[12]; d[ 49] = s1[12]; d[ 50] = s2[12]; d[ 51] = s3[12]; + d[ 52] = s0[13]; d[ 53] = s1[13]; d[ 54] = s2[13]; d[ 55] = s3[13]; + d[ 56] = s0[14]; d[ 57] = s1[14]; d[ 58] = s2[14]; d[ 59] = s3[14]; + d[ 60] = s0[15]; d[ 61] = s1[15]; d[ 62] = s2[15]; d[ 63] = s3[15]; +} + +static inline void dintrlv_4x32( void *dst0, void *dst1, void *dst2, + void *dst3, const void *src, int bit_len ) +{ + uint32_t *d0 = (uint32_t*)dst0; + uint32_t *d1 = (uint32_t*)dst1; + uint32_t *d2 = (uint32_t*)dst2; + uint32_t *d3 = (uint32_t*)dst3; + const uint32_t *s = (const uint32_t*)src; + d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d2[ 0] = s[ 2]; d3[ 0] = s[ 3]; + d0[ 1] = s[ 4]; d1[ 1] = s[ 5]; d2[ 1] = s[ 6]; d3[ 1] = s[ 7]; + d0[ 2] = s[ 8]; d1[ 2] = s[ 9]; d2[ 2] = s[ 10]; d3[ 2] = s[ 11]; + d0[ 3] = s[ 12]; d1[ 3] = s[ 13]; d2[ 3] = s[ 14]; d3[ 3] = s[ 15]; + d0[ 4] = s[ 16]; d1[ 4] = s[ 17]; d2[ 4] = s[ 18]; d3[ 4] = s[ 19]; + d0[ 5] = s[ 20]; d1[ 5] = s[ 21]; d2[ 5] = s[ 22]; d3[ 5] = s[ 23]; + d0[ 6] = s[ 24]; d1[ 6] = s[ 25]; d2[ 6] = s[ 26]; d3[ 6] = s[ 27]; + d0[ 7] = s[ 28]; d1[ 7] = s[ 29]; d2[ 7] = s[ 30]; d3[ 7] = s[ 31]; + if ( bit_len <= 256 ) return; + d0[ 8] = s[ 32]; d1[ 8] = s[ 33]; d2[ 8] = s[ 34]; d3[ 8] = s[ 35]; + d0[ 9] = s[ 36]; d1[ 9] = s[ 37]; d2[ 9] = s[ 38]; d3[ 9] = s[ 39]; + d0[10] = s[ 40]; d1[10] = s[ 41]; d2[10] = s[ 42]; d3[10] = s[ 43]; + d0[11] = s[ 44]; d1[11] = s[ 45]; d2[11] = s[ 46]; d3[11] = s[ 47]; + d0[12] = s[ 48]; d1[12] = s[ 49]; d2[12] = s[ 50]; d3[12] = s[ 51]; + d0[13] = s[ 52]; d1[13] = s[ 53]; d2[13] = s[ 54]; d3[13] = s[ 55]; + d0[14] = s[ 56]; d1[14] = s[ 57]; d2[14] = s[ 58]; d3[14] = s[ 59]; + d0[15] = s[ 60]; d1[15] = s[ 61]; d2[15] = s[ 62]; d3[15] = s[ 63]; + if ( bit_len <= 512 ) return; + d0[16] = s[ 64]; d1[16] = s[ 65]; d2[16] = s[ 66]; d3[16] = s[ 67]; + d0[17] = s[ 68]; d1[17] = s[ 69]; d2[17] = s[ 70]; d3[17] = s[ 71]; + d0[18] = s[ 72]; d1[18] = s[ 73]; d2[18] = s[ 74]; d3[18] = s[ 75]; + d0[19] = s[ 76]; d1[19] = s[ 77]; d2[19] = s[ 78]; d3[19] = s[ 79]; + if ( bit_len <= 640 ) return; + d0[20] = s[ 80]; d1[20] = s[ 81]; d2[20] = s[ 82]; d3[20] = s[ 83]; + d0[21] = s[ 84]; d1[21] = s[ 85]; d2[21] = s[ 86]; d3[21] = s[ 87]; + d0[22] = s[ 88]; d1[22] = s[ 89]; d2[22] = s[ 90]; d3[22] = s[ 91]; + d0[23] = s[ 92]; d1[23] = s[ 93]; d2[23] = s[ 94]; d3[23] = s[ 95]; + d0[24] = s[ 96]; d1[24] = s[ 97]; d2[24] = s[ 98]; d3[24] = s[ 99]; + d0[25] = s[100]; d1[25] = s[101]; d2[25] = s[102]; d3[25] = s[103]; + d0[26] = s[104]; d1[26] = s[105]; d2[26] = s[106]; d3[26] = s[107]; + d0[27] = s[108]; d1[27] = s[109]; d2[27] = s[110]; d3[27] = s[111]; + d0[28] = s[112]; d1[28] = s[113]; d2[28] = s[114]; d3[28] = s[115]; + d0[29] = s[116]; d1[29] = s[117]; d2[29] = s[118]; d3[29] = s[119]; + d0[30] = s[120]; d1[30] = s[121]; d2[30] = s[122]; d3[30] = s[123]; + d0[31] = s[124]; d1[31] = s[125]; d2[31] = s[126]; d3[31] = s[127]; +} + +static inline void dintrlv_4x32_512( void *dst0, void *dst1, void *dst2, + void *dst3, const void *src ) +{ + uint32_t *d0 = (uint32_t*)dst0; + uint32_t *d1 = (uint32_t*)dst1; + uint32_t *d2 = (uint32_t*)dst2; + uint32_t *d3 = (uint32_t*)dst3; + const uint32_t *s = (const uint32_t*)src; + d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d2[ 0] = s[ 2]; d3[ 0] = s[ 3]; + d0[ 1] = s[ 4]; d1[ 1] = s[ 5]; d2[ 1] = s[ 6]; d3[ 1] = s[ 7]; + d0[ 2] = s[ 8]; d1[ 2] = s[ 9]; d2[ 2] = s[ 10]; d3[ 2] = s[ 11]; + d0[ 3] = s[ 12]; d1[ 3] = s[ 13]; d2[ 3] = s[ 14]; d3[ 3] = s[ 15]; + d0[ 4] = s[ 16]; d1[ 4] = s[ 17]; d2[ 4] = s[ 18]; d3[ 4] = s[ 19]; + d0[ 5] = s[ 20]; d1[ 5] = s[ 21]; d2[ 5] = s[ 22]; d3[ 5] = s[ 23]; + d0[ 6] = s[ 24]; d1[ 6] = s[ 25]; d2[ 6] = s[ 26]; d3[ 6] = s[ 27]; + d0[ 7] = s[ 28]; d1[ 7] = s[ 29]; d2[ 7] = s[ 30]; d3[ 7] = s[ 31]; + d0[ 8] = s[ 32]; d1[ 8] = s[ 33]; d2[ 8] = s[ 34]; d3[ 8] = s[ 35]; + d0[ 9] = s[ 36]; d1[ 9] = s[ 37]; d2[ 9] = s[ 38]; d3[ 9] = s[ 39]; + d0[10] = s[ 40]; d1[10] = s[ 41]; d2[10] = s[ 42]; d3[10] = s[ 43]; + d0[11] = s[ 44]; d1[11] = s[ 45]; d2[11] = s[ 46]; d3[11] = s[ 47]; + d0[12] = s[ 48]; d1[12] = s[ 49]; d2[12] = s[ 50]; d3[12] = s[ 51]; + d0[13] = s[ 52]; d1[13] = s[ 53]; d2[13] = s[ 54]; d3[13] = s[ 55]; + d0[14] = s[ 56]; d1[14] = s[ 57]; d2[14] = s[ 58]; d3[14] = s[ 59]; + d0[15] = s[ 60]; d1[15] = s[ 61]; d2[15] = s[ 62]; d3[15] = s[ 63]; +} + +#undef DLEAVE_4x32 + +static inline void extr_lane_4x32( void *d, const void *s, + const int lane, const int bit_len ) +{ + ((uint32_t*)d)[ 0] = ((uint32_t*)s)[ lane ]; + ((uint32_t*)d)[ 1] = ((uint32_t*)s)[ lane+ 4 ]; + ((uint32_t*)d)[ 2] = ((uint32_t*)s)[ lane+ 8 ]; + ((uint32_t*)d)[ 3] = ((uint32_t*)s)[ lane+12 ]; + ((uint32_t*)d)[ 4] = ((uint32_t*)s)[ lane+16 ]; + ((uint32_t*)d)[ 5] = ((uint32_t*)s)[ lane+20 ]; + ((uint32_t*)d)[ 6] = ((uint32_t*)s)[ lane+24 ]; + ((uint32_t*)d)[ 7] = ((uint32_t*)s)[ lane+28 ]; + if ( bit_len <= 256 ) return; + ((uint32_t*)d)[ 8] = ((uint32_t*)s)[ lane+32 ]; + ((uint32_t*)d)[ 9] = ((uint32_t*)s)[ lane+36 ]; + ((uint32_t*)d)[10] = ((uint32_t*)s)[ lane+40 ]; + ((uint32_t*)d)[11] = ((uint32_t*)s)[ lane+44 ]; + ((uint32_t*)d)[12] = ((uint32_t*)s)[ lane+48 ]; + ((uint32_t*)d)[13] = ((uint32_t*)s)[ lane+52 ]; + ((uint32_t*)d)[14] = ((uint32_t*)s)[ lane+56 ]; + ((uint32_t*)d)[15] = ((uint32_t*)s)[ lane+60 ]; +} + + + +// Still used by decred due to odd data size: 180 bytes +// bit_len must be multiple of 32 +static inline void mm128_intrlv_4x32x( void *dst, void *src0, void *src1, + void *src2, void *src3, int bit_len ) +{ + uint32_t *d = (uint32_t*)dst; + uint32_t *s0 = (uint32_t*)src0; + uint32_t *s1 = (uint32_t*)src1; + uint32_t *s2 = (uint32_t*)src2; + uint32_t *s3 = (uint32_t*)src3; + + for ( int i = 0; i < bit_len >> 5; i++, d += 4 ) + { + *d = *(s0+i); + *(d+1) = *(s1+i); + *(d+2) = *(s2+i); + *(d+3) = *(s3+i); + } +} + +static inline void mm128_bswap32_intrlv80_4x32( void *d, void *src ) +{ + __m128i sx = mm128_bswap_32( casti_m128i( src,0 ) ); + __m128i sy = mm128_bswap_32( casti_m128i( src,1 ) ); + casti_m128i( d, 0 ) = _mm_shuffle_epi32( sx, 0x00 ); + casti_m128i( d, 1 ) = _mm_shuffle_epi32( sx, 0x55 ); + casti_m128i( d, 2 ) = _mm_shuffle_epi32( sx, 0xaa ); + casti_m128i( d, 3 ) = _mm_shuffle_epi32( sx, 0xff ); + sx = mm128_bswap_32( casti_m128i( src,2 ) ); + casti_m128i( d, 4 ) = _mm_shuffle_epi32( sy, 0x00 ); + casti_m128i( d, 5 ) = _mm_shuffle_epi32( sy, 0x55 ); + casti_m128i( d, 6 ) = _mm_shuffle_epi32( sy, 0xaa ); + casti_m128i( d, 7 ) = _mm_shuffle_epi32( sy, 0xff ); + sy = mm128_bswap_32( casti_m128i( src,3 ) ); + casti_m128i( d, 8 ) = _mm_shuffle_epi32( sx, 0x00 ); + casti_m128i( d, 9 ) = _mm_shuffle_epi32( sx, 0x55 ); + casti_m128i( d,10 ) = _mm_shuffle_epi32( sx, 0xaa ); + casti_m128i( d,11 ) = _mm_shuffle_epi32( sx, 0xff ); + sx = mm128_bswap_32( casti_m128i( src,4 ) ); + casti_m128i( d,12 ) = _mm_shuffle_epi32( sy, 0x00 ); + casti_m128i( d,13 ) = _mm_shuffle_epi32( sy, 0x55 ); + casti_m128i( d,14 ) = _mm_shuffle_epi32( sy, 0xaa ); + casti_m128i( d,15 ) = _mm_shuffle_epi32( sy, 0xff ); + casti_m128i( d,16 ) = _mm_shuffle_epi32( sx, 0x00 ); + casti_m128i( d,17 ) = _mm_shuffle_epi32( sx, 0x55 ); + casti_m128i( d,18 ) = _mm_shuffle_epi32( sx, 0xaa ); + casti_m128i( d,19 ) = _mm_shuffle_epi32( sx, 0xff ); +} + +// 8x32 + +#define ILEAVE_8x32( i ) do \ +{ \ + uint32_t *d = (uint32_t*)(dst) + ( (i) << 3 ); \ + d[0] = *( (const uint32_t*)(s0) +(i) ); \ + d[1] = *( (const uint32_t*)(s1) +(i) ); \ + d[2] = *( (const uint32_t*)(s2) +(i) ); \ + d[3] = *( (const uint32_t*)(s3) +(i) ); \ + d[4] = *( (const uint32_t*)(s4) +(i) ); \ + d[5] = *( (const uint32_t*)(s5) +(i) ); \ + d[6] = *( (const uint32_t*)(s6) +(i) ); \ + d[7] = *( (const uint32_t*)(s7) +(i) ); \ +} while(0) + +static inline void intrlv_8x32( void *dst, const void *s0, const void *s1, + const void *s2, const void *s3, const void *s4, const void *s5, + const void *s6, const void *s7, int bit_len ) +{ + ILEAVE_8x32( 0 ); ILEAVE_8x32( 1 ); + ILEAVE_8x32( 2 ); ILEAVE_8x32( 3 ); + ILEAVE_8x32( 4 ); ILEAVE_8x32( 5 ); + ILEAVE_8x32( 6 ); ILEAVE_8x32( 7 ); + if ( bit_len <= 256 ) return; + ILEAVE_8x32( 8 ); ILEAVE_8x32( 9 ); + ILEAVE_8x32( 10 ); ILEAVE_8x32( 11 ); + ILEAVE_8x32( 12 ); ILEAVE_8x32( 13 ); + ILEAVE_8x32( 14 ); ILEAVE_8x32( 15 ); + if ( bit_len <= 512 ) return; + ILEAVE_8x32( 16 ); ILEAVE_8x32( 17 ); + ILEAVE_8x32( 18 ); ILEAVE_8x32( 19 ); + if ( bit_len <= 640 ) return; + ILEAVE_8x32( 20 ); ILEAVE_8x32( 21 ); + ILEAVE_8x32( 22 ); ILEAVE_8x32( 23 ); + ILEAVE_8x32( 24 ); ILEAVE_8x32( 25 ); + ILEAVE_8x32( 26 ); ILEAVE_8x32( 27 ); + ILEAVE_8x32( 28 ); ILEAVE_8x32( 29 ); + ILEAVE_8x32( 30 ); ILEAVE_8x32( 31 ); +} + +static inline void intrlv_8x32_512( void *dst, const void *s0, const void *s1, + const void *s2, const void *s3, const void *s4, const void *s5, + const void *s6, const void *s7 ) +{ + ILEAVE_8x32( 0 ); ILEAVE_8x32( 1 ); + ILEAVE_8x32( 2 ); ILEAVE_8x32( 3 ); + ILEAVE_8x32( 4 ); ILEAVE_8x32( 5 ); + ILEAVE_8x32( 6 ); ILEAVE_8x32( 7 ); + ILEAVE_8x32( 8 ); ILEAVE_8x32( 9 ); + ILEAVE_8x32( 10 ); ILEAVE_8x32( 11 ); + ILEAVE_8x32( 12 ); ILEAVE_8x32( 13 ); + ILEAVE_8x32( 14 ); ILEAVE_8x32( 15 ); +} + +#undef ILEAVE_8x32 + +#define DLEAVE_8x32( i ) do \ +{ \ + const uint32_t *s = (const uint32_t*)(src) + ( (i) << 3 ); \ + *( (uint32_t*)(d0) +(i) ) = s[0]; \ + *( (uint32_t*)(d1) +(i) ) = s[1]; \ + *( (uint32_t*)(d2) +(i) ) = s[2]; \ + *( (uint32_t*)(d3) +(i) ) = s[3]; \ + *( (uint32_t*)(d4) +(i) ) = s[4]; \ + *( (uint32_t*)(d5) +(i) ) = s[5]; \ + *( (uint32_t*)(d6) +(i) ) = s[6]; \ + *( (uint32_t*)(d7) +(i) ) = s[7]; \ +} while(0) + +static inline void dintrlv_8x32( void *d0, void *d1, void *d2, void *d3, + void *d4, void *d5, void *d6, void *d7, const void *src, int bit_len ) +{ + DLEAVE_8x32( 0 ); DLEAVE_8x32( 1 ); + DLEAVE_8x32( 2 ); DLEAVE_8x32( 3 ); + DLEAVE_8x32( 4 ); DLEAVE_8x32( 5 ); + DLEAVE_8x32( 6 ); DLEAVE_8x32( 7 ); + if ( bit_len <= 256 ) return; + DLEAVE_8x32( 8 ); DLEAVE_8x32( 9 ); + DLEAVE_8x32( 10 ); DLEAVE_8x32( 11 ); + DLEAVE_8x32( 12 ); DLEAVE_8x32( 13 ); + DLEAVE_8x32( 14 ); DLEAVE_8x32( 15 ); + if ( bit_len <= 512 ) return; + DLEAVE_8x32( 16 ); DLEAVE_8x32( 17 ); + DLEAVE_8x32( 18 ); DLEAVE_8x32( 19 ); + if ( bit_len <= 640 ) return; + DLEAVE_8x32( 20 ); DLEAVE_8x32( 21 ); + DLEAVE_8x32( 22 ); DLEAVE_8x32( 23 ); + DLEAVE_8x32( 24 ); DLEAVE_8x32( 25 ); + DLEAVE_8x32( 26 ); DLEAVE_8x32( 27 ); + DLEAVE_8x32( 28 ); DLEAVE_8x32( 29 ); + DLEAVE_8x32( 30 ); DLEAVE_8x32( 31 ); +} + +static inline void dintrlv_8x32_512( void *d0, void *d1, void *d2, void *d3, + void *d4, void *d5, void *d6, void *d7, const void *src ) +{ + DLEAVE_8x32( 0 ); DLEAVE_8x32( 1 ); + DLEAVE_8x32( 2 ); DLEAVE_8x32( 3 ); + DLEAVE_8x32( 4 ); DLEAVE_8x32( 5 ); + DLEAVE_8x32( 6 ); DLEAVE_8x32( 7 ); + DLEAVE_8x32( 8 ); DLEAVE_8x32( 9 ); + DLEAVE_8x32( 10 ); DLEAVE_8x32( 11 ); + DLEAVE_8x32( 12 ); DLEAVE_8x32( 13 ); + DLEAVE_8x32( 14 ); DLEAVE_8x32( 15 ); +} + +#undef DLEAVE_8x32 + +static inline void extr_lane_8x32( void *d, const void *s, + const int lane, const int bit_len ) +{ + ((uint32_t*)d)[ 0] = ((uint32_t*)s)[ lane ]; + ((uint32_t*)d)[ 1] = ((uint32_t*)s)[ lane+ 8 ]; + ((uint32_t*)d)[ 2] = ((uint32_t*)s)[ lane+ 16 ]; + ((uint32_t*)d)[ 3] = ((uint32_t*)s)[ lane+ 24 ]; + ((uint32_t*)d)[ 4] = ((uint32_t*)s)[ lane+ 32 ]; + ((uint32_t*)d)[ 5] = ((uint32_t*)s)[ lane+ 40 ]; + ((uint32_t*)d)[ 6] = ((uint32_t*)s)[ lane+ 48 ]; + ((uint32_t*)d)[ 7] = ((uint32_t*)s)[ lane+ 56 ]; + if ( bit_len <= 256 ) return; + ((uint32_t*)d)[ 8] = ((uint32_t*)s)[ lane+ 64 ]; + ((uint32_t*)d)[ 9] = ((uint32_t*)s)[ lane+ 72 ]; + ((uint32_t*)d)[10] = ((uint32_t*)s)[ lane+ 80 ]; + ((uint32_t*)d)[11] = ((uint32_t*)s)[ lane+ 88 ]; + ((uint32_t*)d)[12] = ((uint32_t*)s)[ lane+ 96 ]; + ((uint32_t*)d)[13] = ((uint32_t*)s)[ lane+104 ]; + ((uint32_t*)d)[14] = ((uint32_t*)s)[ lane+112 ]; + ((uint32_t*)d)[15] = ((uint32_t*)s)[ lane+120 ]; +} + +#if defined(__AVX2__) + +// There a alignment problems with the source buffer on Wwindows, +// can't use 256 bit bswap. + +static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src ) +{ + __m256i s0 = mm256_bswap_32( casti_m256i( src,0 ) ); + __m256i s1 = mm256_bswap_32( casti_m256i( src,1 ) ); + __m128i s2 = mm128_bswap_32( casti_m128i( src,4 ) ); + const __m256i zero = m256_zero; + const __m256i one = m256_one_32; + const __m256i two = _mm256_add_epi32( one, one ); + const __m256i tre = _mm256_add_epi32( two, one ); + const __m256i four = _mm256_add_epi32( two, two ); + + casti_m256i( d, 0 ) = _mm256_permutevar8x32_epi32( s0, zero ); + casti_m256i( d, 1 ) = _mm256_permutevar8x32_epi32( s0, one ); + casti_m256i( d, 2 ) = _mm256_permutevar8x32_epi32( s0, two ); + casti_m256i( d, 3 ) = _mm256_permutevar8x32_epi32( s0, tre ); + casti_m256i( d, 4 ) = _mm256_permutevar8x32_epi32( s0, four ); + casti_m256i( d, 5 ) = _mm256_permutevar8x32_epi32( s0, + _mm256_add_epi32( four, one ) ); + casti_m256i( d, 6 ) = _mm256_permutevar8x32_epi32( s0, + _mm256_add_epi32( four, two ) ); + casti_m256i( d, 7 ) = _mm256_permutevar8x32_epi32( s0, + _mm256_add_epi32( four, tre ) ); + casti_m256i( d, 8 ) = _mm256_permutevar8x32_epi32( s1, zero ); + casti_m256i( d, 9 ) = _mm256_permutevar8x32_epi32( s1, one ); + casti_m256i( d,10 ) = _mm256_permutevar8x32_epi32( s1, two ); + casti_m256i( d,11 ) = _mm256_permutevar8x32_epi32( s1, tre ); + casti_m256i( d,12 ) = _mm256_permutevar8x32_epi32( s1, four ); + casti_m256i( d,13 ) = _mm256_permutevar8x32_epi32( s1, + _mm256_add_epi32( four, one ) ); + casti_m256i( d,14 ) = _mm256_permutevar8x32_epi32( s1, + _mm256_add_epi32( four, two ) ); + casti_m256i( d,15 ) = _mm256_permutevar8x32_epi32( s1, + _mm256_add_epi32( four, tre ) ); + casti_m256i( d,16 ) = _mm256_permutevar8x32_epi32( + _mm256_castsi128_si256( s2 ), zero ); + casti_m256i( d,17 ) = _mm256_permutevar8x32_epi32( + _mm256_castsi128_si256( s2 ), one ); + casti_m256i( d,18 ) = _mm256_permutevar8x32_epi32( + _mm256_castsi128_si256( s2 ), two ); + casti_m256i( d,19 ) = _mm256_permutevar8x32_epi32( + _mm256_castsi128_si256( s2 ), tre ); +} + +#endif // AVX2 + +// 16x32 + +#define ILEAVE_16x32( i ) do \ +{ \ + uint32_t *d = (uint32_t*)(dst) + ( (i) << 4 ); \ + d[ 0] = *( (const uint32_t*)(s00) +(i) ); \ + d[ 1] = *( (const uint32_t*)(s01) +(i) ); \ + d[ 2] = *( (const uint32_t*)(s02) +(i) ); \ + d[ 3] = *( (const uint32_t*)(s03) +(i) ); \ + d[ 4] = *( (const uint32_t*)(s04) +(i) ); \ + d[ 5] = *( (const uint32_t*)(s05) +(i) ); \ + d[ 6] = *( (const uint32_t*)(s06) +(i) ); \ + d[ 7] = *( (const uint32_t*)(s07) +(i) ); \ + d[ 8] = *( (const uint32_t*)(s08) +(i) ); \ + d[ 9] = *( (const uint32_t*)(s09) +(i) ); \ + d[10] = *( (const uint32_t*)(s10) +(i) ); \ + d[11] = *( (const uint32_t*)(s11) +(i) ); \ + d[12] = *( (const uint32_t*)(s12) +(i) ); \ + d[13] = *( (const uint32_t*)(s13) +(i) ); \ + d[14] = *( (const uint32_t*)(s14) +(i) ); \ + d[15] = *( (const uint32_t*)(s15) +(i) ); \ +} while(0) + +static inline void intrlv_16x32( void *dst, const void *s00, + const void *s01, const void *s02, const void *s03, const void *s04, + const void *s05, const void *s06, const void *s07, const void *s08, + const void *s09, const void *s10, const void *s11, const void *s12, + const void *s13, const void *s14, const void *s15, int bit_len ) +{ + ILEAVE_16x32( 0 ); ILEAVE_16x32( 1 ); + ILEAVE_16x32( 2 ); ILEAVE_16x32( 3 ); + ILEAVE_16x32( 4 ); ILEAVE_16x32( 5 ); + ILEAVE_16x32( 6 ); ILEAVE_16x32( 7 ); + if ( bit_len <= 256 ) return; + ILEAVE_16x32( 8 ); ILEAVE_16x32( 9 ); + ILEAVE_16x32( 10 ); ILEAVE_16x32( 11 ); + ILEAVE_16x32( 12 ); ILEAVE_16x32( 13 ); + ILEAVE_16x32( 14 ); ILEAVE_16x32( 15 ); + if ( bit_len <= 512 ) return; + ILEAVE_16x32( 16 ); ILEAVE_16x32( 17 ); + ILEAVE_16x32( 18 ); ILEAVE_16x32( 19 ); + if ( bit_len <= 640 ) return; + ILEAVE_16x32( 20 ); ILEAVE_16x32( 21 ); + ILEAVE_16x32( 22 ); ILEAVE_16x32( 23 ); + ILEAVE_16x32( 24 ); ILEAVE_16x32( 25 ); + ILEAVE_16x32( 26 ); ILEAVE_16x32( 27 ); + ILEAVE_16x32( 28 ); ILEAVE_16x32( 29 ); + ILEAVE_16x32( 30 ); ILEAVE_16x32( 31 ); +} + +static inline void intrlv_16x32_512( void *dst, const void *s00, + const void *s01, const void *s02, const void *s03, const void *s04, + const void *s05, const void *s06, const void *s07, const void *s08, + const void *s09, const void *s10, const void *s11, const void *s12, + const void *s13, const void *s14, const void *s15 ) +{ + ILEAVE_16x32( 0 ); ILEAVE_16x32( 1 ); + ILEAVE_16x32( 2 ); ILEAVE_16x32( 3 ); + ILEAVE_16x32( 4 ); ILEAVE_16x32( 5 ); + ILEAVE_16x32( 6 ); ILEAVE_16x32( 7 ); + ILEAVE_16x32( 8 ); ILEAVE_16x32( 9 ); + ILEAVE_16x32( 10 ); ILEAVE_16x32( 11 ); + ILEAVE_16x32( 12 ); ILEAVE_16x32( 13 ); + ILEAVE_16x32( 14 ); ILEAVE_16x32( 15 ); +} + +#undef ILEAVE_16x32 + +#define DLEAVE_16x32( i ) do \ +{ \ + const uint32_t *s = (const uint32_t*)(src) + ( (i) << 4 ); \ + *( (uint32_t*)(d00) +(i) ) = s[ 0]; \ + *( (uint32_t*)(d01) +(i) ) = s[ 1]; \ + *( (uint32_t*)(d02) +(i) ) = s[ 2]; \ + *( (uint32_t*)(d03) +(i) ) = s[ 3]; \ + *( (uint32_t*)(d04) +(i) ) = s[ 4]; \ + *( (uint32_t*)(d05) +(i) ) = s[ 5]; \ + *( (uint32_t*)(d06) +(i) ) = s[ 6]; \ + *( (uint32_t*)(d07) +(i) ) = s[ 7]; \ + *( (uint32_t*)(d08) +(i) ) = s[ 8]; \ + *( (uint32_t*)(d09) +(i) ) = s[ 0]; \ + *( (uint32_t*)(d10) +(i) ) = s[10]; \ + *( (uint32_t*)(d11) +(i) ) = s[11]; \ + *( (uint32_t*)(d12) +(i) ) = s[12]; \ + *( (uint32_t*)(d13) +(i) ) = s[13]; \ + *( (uint32_t*)(d14) +(i) ) = s[14]; \ + *( (uint32_t*)(d15) +(i) ) = s[15]; \ +} while(0) + +static inline void dintrlv_16x32( void *d00, void *d01, void *d02, void *d03, + void *d04, void *d05, void *d06, void *d07, void *d08, void *d09, + void *d10, void *d11, void *d12, void *d13, void *d14, void *d15, + const void *src, int bit_len ) +{ + DLEAVE_16x32( 0 ); DLEAVE_16x32( 1 ); + DLEAVE_16x32( 2 ); DLEAVE_16x32( 3 ); + DLEAVE_16x32( 4 ); DLEAVE_16x32( 5 ); + DLEAVE_16x32( 6 ); DLEAVE_16x32( 7 ); + if ( bit_len <= 256 ) return; + DLEAVE_16x32( 8 ); DLEAVE_16x32( 9 ); + DLEAVE_16x32( 10 ); DLEAVE_16x32( 11 ); + DLEAVE_16x32( 12 ); DLEAVE_16x32( 13 ); + DLEAVE_16x32( 14 ); DLEAVE_16x32( 15 ); + if ( bit_len <= 512 ) return; + DLEAVE_16x32( 16 ); DLEAVE_16x32( 17 ); + DLEAVE_16x32( 18 ); DLEAVE_16x32( 19 ); + if ( bit_len <= 640 ) return; + DLEAVE_16x32( 20 ); DLEAVE_16x32( 21 ); + DLEAVE_16x32( 22 ); DLEAVE_16x32( 23 ); + DLEAVE_16x32( 24 ); DLEAVE_16x32( 25 ); + DLEAVE_16x32( 26 ); DLEAVE_16x32( 27 ); + DLEAVE_16x32( 28 ); DLEAVE_16x32( 29 ); + DLEAVE_16x32( 30 ); DLEAVE_16x32( 31 ); +} + +static inline void dintrlv_16x32_512( void *d00, void *d01, void *d02, + void *d03, void *d04, void *d05, void *d06, void *d07, + void *d08, void *d09, void *d10, void *d11, void *d12, + void *d13, void *d14, void *d15, const void *src ) +{ + DLEAVE_16x32( 0 ); DLEAVE_16x32( 1 ); + DLEAVE_16x32( 2 ); DLEAVE_16x32( 3 ); + DLEAVE_16x32( 4 ); DLEAVE_16x32( 5 ); + DLEAVE_16x32( 6 ); DLEAVE_16x32( 7 ); + DLEAVE_16x32( 8 ); DLEAVE_16x32( 9 ); + DLEAVE_16x32( 10 ); DLEAVE_16x32( 11 ); + DLEAVE_16x32( 12 ); DLEAVE_16x32( 13 ); + DLEAVE_16x32( 14 ); DLEAVE_16x32( 15 ); +} + +#undef DLEAVE_16x32 + +static inline void extr_lane_16x32( void *d, const void *s, + const int lane, const int bit_len ) +{ + ((uint32_t*)d)[ 0] = ((uint32_t*)s)[ lane ]; + ((uint32_t*)d)[ 1] = ((uint32_t*)s)[ lane+16 ]; + ((uint32_t*)d)[ 2] = ((uint32_t*)s)[ lane+32 ]; + ((uint32_t*)d)[ 3] = ((uint32_t*)s)[ lane+48 ]; + ((uint32_t*)d)[ 4] = ((uint32_t*)s)[ lane+64 ]; + ((uint32_t*)d)[ 5] = ((uint32_t*)s)[ lane+80 ]; + ((uint32_t*)d)[ 6] = ((uint32_t*)s)[ lane+96 ]; + ((uint32_t*)d)[ 7] = ((uint32_t*)s)[ lane+112 ]; + if ( bit_len <= 256 ) return; + ((uint32_t*)d)[ 8] = ((uint32_t*)s)[ lane+128 ]; + ((uint32_t*)d)[ 9] = ((uint32_t*)s)[ lane+144 ]; + ((uint32_t*)d)[10] = ((uint32_t*)s)[ lane+160 ]; + ((uint32_t*)d)[11] = ((uint32_t*)s)[ lane+176 ]; + ((uint32_t*)d)[12] = ((uint32_t*)s)[ lane+192 ]; + ((uint32_t*)d)[13] = ((uint32_t*)s)[ lane+208 ]; + ((uint32_t*)d)[14] = ((uint32_t*)s)[ lane+224 ]; + ((uint32_t*)d)[15] = ((uint32_t*)s)[ lane+240 ]; +} + +#if defined(__AVX512F__) && defined(__AVX512VL__) + +static inline void mm512_bswap32_intrlv80_16x32( void *d, void *src ) +{ + __m512i s0 = mm512_bswap_32( casti_m512i( src, 0 ) ); + __m128i s1 = mm128_bswap_32( casti_m128i( src, 4 ) ); + const __m512i zero = m512_zero; + const __m512i one = m512_one_32; + const __m512i two = _mm512_add_epi32( one, one ); + const __m512i tre = _mm512_add_epi32( two, one ); + const __m512i four = _mm512_add_epi32( two, two ); + const __m512i eight = _mm512_add_epi32( four, four ); + const __m512i eleven = _mm512_add_epi32( eight, tre ); + + casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, zero ); + casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( s0, one ); + casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( s0, two ); + casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( s0, tre ); + casti_m512i( d, 4 ) = _mm512_permutexvar_epi32( s0, four ); + casti_m512i( d, 5 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( four, one ) ); + casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( four, two ) ); + casti_m512i( d, 7 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( four, tre ) ); + casti_m512i( d, 8 ) = _mm512_permutexvar_epi32( s0, eight ); + casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( eight, one ) ); + casti_m512i( d,10 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( eight, two ) ); + casti_m512i( d,11 ) = _mm512_permutexvar_epi32( s0, eleven ); + casti_m512i( d,12 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( eleven, one ) ); + casti_m512i( d,13 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( eleven, two ) ); + casti_m512i( d,14 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( eleven, tre ) ); + casti_m512i( d,15 ) = _mm512_permutexvar_epi32( s0, + _mm512_add_epi32( eleven, four ) ); + casti_m512i( d,16 ) = _mm512_permutexvar_epi32( + _mm512_castsi128_si512( s1 ), zero ); + casti_m512i( d,17 ) = _mm512_permutexvar_epi32( + _mm512_castsi128_si512( s1 ), one ); + casti_m512i( d,18 ) = _mm512_permutexvar_epi32( + _mm512_castsi128_si512( s1 ), two ); + casti_m512i( d,19 ) = _mm512_permutexvar_epi32( + _mm512_castsi128_si512( s1 ), tre ); +} + +#endif // AVX512 + +/////////////////////////// +// +// 64 bit data + +// 2x64 (SSE2) + +static inline void intrlv_2x64( void *dst, const void *src0, + const void *src1, int bit_len ) +{ + uint64_t *d = (uint64_t*)dst;; + const uint64_t *s0 = (const uint64_t*)src0; + const uint64_t *s1 = (const uint64_t*)src1; + d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s0[ 1]; d[ 3] = s1[ 1]; + d[ 4] = s0[ 2]; d[ 5] = s1[ 2]; d[ 6] = s0[ 3]; d[ 7] = s1[ 3]; + if ( bit_len <= 256 ) return; + d[ 8] = s0[ 4]; d[ 9] = s1[ 4]; d[10] = s0[ 5]; d[11] = s1[ 5]; + d[12] = s0[ 6]; d[13] = s1[ 6]; d[14] = s0[ 7]; d[15] = s1[ 7]; + if ( bit_len <= 512 ) return; + d[16] = s0[ 8]; d[17] = s1[ 8]; d[18] = s0[ 9]; d[19] = s1[ 9]; + if ( bit_len <= 640 ) return; + d[20] = s0[10]; d[21] = s1[10]; d[22] = s0[11]; d[23] = s1[11]; + d[24] = s0[12]; d[25] = s1[12]; d[26] = s0[13]; d[27] = s1[13]; + d[28] = s0[14]; d[29] = s1[14]; d[30] = s0[15]; d[31] = s1[15]; +} + +static inline void dintrlv_2x64( void *dst0, void *dst1, + const void *src, int bit_len ) +{ + uint64_t *d0 = (uint64_t*)dst0; + uint64_t *d1 = (uint64_t*)dst1; + const uint64_t *s = (const uint64_t*)src; + + d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d0[ 1] = s[ 2]; d1[ 1] = s[ 3]; + d0[ 2] = s[ 4]; d1[ 2] = s[ 5]; d0[ 3] = s[ 6]; d1[ 3] = s[ 7]; + if ( bit_len <= 256 ) return; + d0[ 4] = s[ 8]; d1[ 4] = s[ 9]; d0[ 5] = s[10]; d1[ 5] = s[11]; + d0[ 6] = s[12]; d1[ 6] = s[13]; d0[ 7] = s[14]; d1[ 7] = s[15]; + if ( bit_len <= 512 ) return; + d0[ 8] = s[16]; d1[ 8] = s[17]; d0[ 9] = s[18]; d1[ 9] = s[19]; + if ( bit_len <= 640 ) return; + d0[10] = s[20]; d1[10] = s[21]; d0[11] = s[22]; d1[11] = s[23]; + d0[12] = s[24]; d1[12] = s[25]; d0[13] = s[26]; d1[13] = s[27]; + d0[14] = s[28]; d1[14] = s[29]; d0[15] = s[30]; d1[15] = s[31]; +} + +// 4x64 (AVX2) + +static inline void intrlv_4x64( void *dst, const void *src0, + const void *src1, const void *src2, const void *src3, int bit_len ) +{ + uint64_t *d = (uint64_t*)dst; + const uint64_t *s0 = (const uint64_t*)src0; + const uint64_t *s1 = (const uint64_t*)src1; + const uint64_t *s2 = (const uint64_t*)src2; + const uint64_t *s3 = (const uint64_t*)src3; + d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s2[ 0]; d[ 3] = s3[ 0]; + d[ 4] = s0[ 1]; d[ 5] = s1[ 1]; d[ 6] = s2[ 1]; d[ 7] = s3[ 1]; + d[ 8] = s0[ 2]; d[ 9] = s1[ 2]; d[ 10] = s2[ 2]; d[ 11] = s3[ 2]; + d[ 12] = s0[ 3]; d[ 13] = s1[ 3]; d[ 14] = s2[ 3]; d[ 15] = s3[ 3]; + if ( bit_len <= 256 ) return; + d[ 16] = s0[ 4]; d[ 17] = s1[ 4]; d[ 18] = s2[ 4]; d[ 19] = s3[ 4]; + d[ 20] = s0[ 5]; d[ 21] = s1[ 5]; d[ 22] = s2[ 5]; d[ 23] = s3[ 5]; + d[ 24] = s0[ 6]; d[ 25] = s1[ 6]; d[ 26] = s2[ 6]; d[ 27] = s3[ 6]; + d[ 28] = s0[ 7]; d[ 29] = s1[ 7]; d[ 30] = s2[ 7]; d[ 31] = s3[ 7]; + if ( bit_len <= 512 ) return; + d[ 32] = s0[ 8]; d[ 33] = s1[ 8]; d[ 34] = s2[ 8]; d[ 35] = s3[ 8]; + d[ 36] = s0[ 9]; d[ 37] = s1[ 9]; d[ 38] = s2[ 9]; d[ 39] = s3[ 9]; + if ( bit_len <= 640 ) return; + d[ 40] = s0[10]; d[ 41] = s1[10]; d[ 42] = s2[10]; d[ 43] = s3[10]; + d[ 44] = s0[11]; d[ 45] = s1[11]; d[ 46] = s2[11]; d[ 47] = s3[11]; + d[ 48] = s0[12]; d[ 49] = s1[12]; d[ 50] = s2[12]; d[ 51] = s3[12]; + d[ 52] = s0[13]; d[ 53] = s1[13]; d[ 54] = s2[13]; d[ 55] = s3[13]; + d[ 56] = s0[14]; d[ 57] = s1[14]; d[ 58] = s2[14]; d[ 59] = s3[14]; + d[ 60] = s0[15]; d[ 61] = s1[15]; d[ 62] = s2[15]; d[ 63] = s3[15]; +} + +static inline void intrlv_4x64_512( void *dst, const void *src0, + const void *src1, const void *src2, const void *src3 ) +{ + uint64_t *d = (uint64_t*)dst; + const uint64_t *s0 = (const uint64_t*)src0; + const uint64_t *s1 = (const uint64_t*)src1; + const uint64_t *s2 = (const uint64_t*)src2; + const uint64_t *s3 = (const uint64_t*)src3; + d[ 0] = s0[ 0]; d[ 1] = s1[ 0]; d[ 2] = s2[ 0]; d[ 3] = s3[ 0]; + d[ 4] = s0[ 1]; d[ 5] = s1[ 1]; d[ 6] = s2[ 1]; d[ 7] = s3[ 1]; + d[ 8] = s0[ 2]; d[ 9] = s1[ 2]; d[ 10] = s2[ 2]; d[ 11] = s3[ 2]; + d[ 12] = s0[ 3]; d[ 13] = s1[ 3]; d[ 14] = s2[ 3]; d[ 15] = s3[ 3]; + d[ 16] = s0[ 4]; d[ 17] = s1[ 4]; d[ 18] = s2[ 4]; d[ 19] = s3[ 4]; + d[ 20] = s0[ 5]; d[ 21] = s1[ 5]; d[ 22] = s2[ 5]; d[ 23] = s3[ 5]; + d[ 24] = s0[ 6]; d[ 25] = s1[ 6]; d[ 26] = s2[ 6]; d[ 27] = s3[ 6]; + d[ 28] = s0[ 7]; d[ 29] = s1[ 7]; d[ 30] = s2[ 7]; d[ 31] = s3[ 7]; +} + +static inline void dintrlv_4x64( void *dst0, void *dst1, void *dst2, + void *dst3, const void *src, int bit_len ) +{ + uint64_t *d0 = (uint64_t*)dst0; + uint64_t *d1 = (uint64_t*)dst1; + uint64_t *d2 = (uint64_t*)dst2; + uint64_t *d3 = (uint64_t*)dst3; + const uint64_t *s = (const uint64_t*)src; + d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d2[ 0] = s[ 2]; d3[ 0] = s[ 3]; + d0[ 1] = s[ 4]; d1[ 1] = s[ 5]; d2[ 1] = s[ 6]; d3[ 1] = s[ 7]; + d0[ 2] = s[ 8]; d1[ 2] = s[ 9]; d2[ 2] = s[10]; d3[ 2] = s[11]; + d0[ 3] = s[12]; d1[ 3] = s[13]; d2[ 3] = s[14]; d3[ 3] = s[15]; + if ( bit_len <= 256 ) return; + d0[ 4] = s[16]; d1[ 4] = s[17]; d2[ 4] = s[18]; d3[ 4] = s[19]; + d0[ 5] = s[20]; d1[ 5] = s[21]; d2[ 5] = s[22]; d3[ 5] = s[23]; + d0[ 6] = s[24]; d1[ 6] = s[25]; d2[ 6] = s[26]; d3[ 6] = s[27]; + d0[ 7] = s[28]; d1[ 7] = s[29]; d2[ 7] = s[30]; d3[ 7] = s[31]; + if ( bit_len <= 512 ) return; + d0[ 8] = s[32]; d1[ 8] = s[33]; d2[ 8] = s[34]; d3[ 8] = s[35]; + d0[ 9] = s[36]; d1[ 9] = s[37]; d2[ 9] = s[38]; d3[ 9] = s[39]; + if ( bit_len <= 640 ) return; + d0[10] = s[40]; d1[10] = s[41]; d2[10] = s[42]; d3[10] = s[43]; + d0[11] = s[44]; d1[11] = s[45]; d2[11] = s[46]; d3[11] = s[47]; + d0[12] = s[48]; d1[12] = s[49]; d2[12] = s[50]; d3[12] = s[51]; + d0[13] = s[52]; d1[13] = s[53]; d2[13] = s[54]; d3[13] = s[55]; + d0[14] = s[56]; d1[14] = s[57]; d2[14] = s[58]; d3[14] = s[59]; + d0[15] = s[60]; d1[15] = s[61]; d2[15] = s[62]; d3[15] = s[63]; +} + +static inline void dintrlv_4x64_512( void *dst0, void *dst1, void *dst2, + void *dst3, const void *src ) +{ + uint64_t *d0 = (uint64_t*)dst0; + uint64_t *d1 = (uint64_t*)dst1; + uint64_t *d2 = (uint64_t*)dst2; + uint64_t *d3 = (uint64_t*)dst3; + const uint64_t *s = (const uint64_t*)src; + d0[ 0] = s[ 0]; d1[ 0] = s[ 1]; d2[ 0] = s[ 2]; d3[ 0] = s[ 3]; + d0[ 1] = s[ 4]; d1[ 1] = s[ 5]; d2[ 1] = s[ 6]; d3[ 1] = s[ 7]; + d0[ 2] = s[ 8]; d1[ 2] = s[ 9]; d2[ 2] = s[10]; d3[ 2] = s[11]; + d0[ 3] = s[12]; d1[ 3] = s[13]; d2[ 3] = s[14]; d3[ 3] = s[15]; + d0[ 4] = s[16]; d1[ 4] = s[17]; d2[ 4] = s[18]; d3[ 4] = s[19]; + d0[ 5] = s[20]; d1[ 5] = s[21]; d2[ 5] = s[22]; d3[ 5] = s[23]; + d0[ 6] = s[24]; d1[ 6] = s[25]; d2[ 6] = s[26]; d3[ 6] = s[27]; + d0[ 7] = s[28]; d1[ 7] = s[29]; d2[ 7] = s[30]; d3[ 7] = s[31]; +} + +static inline void extr_lane_4x64( void *d, const void *s, + const int lane, const int bit_len ) +{ + ((uint64_t*)d)[ 0] = ((uint64_t*)s)[ lane ]; + ((uint64_t*)d)[ 1] = ((uint64_t*)s)[ lane+ 4 ]; + ((uint64_t*)d)[ 2] = ((uint64_t*)s)[ lane+ 8 ]; + ((uint64_t*)d)[ 3] = ((uint64_t*)s)[ lane+12 ]; + ((uint64_t*)d)[ 4] = ((uint64_t*)s)[ lane+16 ]; + ((uint64_t*)d)[ 5] = ((uint64_t*)s)[ lane+20 ]; + ((uint64_t*)d)[ 6] = ((uint64_t*)s)[ lane+24 ]; + ((uint64_t*)d)[ 7] = ((uint64_t*)s)[ lane+28 ]; + if ( bit_len <= 256 ) return; + ((uint64_t*)d)[ 8] = ((uint64_t*)s)[ lane+32 ]; + ((uint64_t*)d)[ 9] = ((uint64_t*)s)[ lane+36 ]; + ((uint64_t*)d)[10] = ((uint64_t*)s)[ lane+40 ]; + ((uint64_t*)d)[11] = ((uint64_t*)s)[ lane+44 ]; + ((uint64_t*)d)[12] = ((uint64_t*)s)[ lane+48 ]; + ((uint64_t*)d)[13] = ((uint64_t*)s)[ lane+52 ]; + ((uint64_t*)d)[14] = ((uint64_t*)s)[ lane+56 ]; + ((uint64_t*)d)[15] = ((uint64_t*)s)[ lane+60 ]; +} + +#if defined(__AVX2__) + +// There a alignment problems with the source buffer on Wwindows, +// can't use 256 bit bswap. + +static inline void mm256_bswap32_intrlv80_4x64( void *d, void *src ) +{ + __m256i s0 = mm256_bswap_32( casti_m256i( src, 0 ) ); + __m256i s1 = mm256_bswap_32( casti_m256i( src, 1 ) ); + __m128i s2 = mm128_bswap_32( casti_m128i( src, 4 ) ); + + casti_m256i( d, 0 ) = _mm256_permute4x64_epi64( s0, 0x00 ); + casti_m256i( d, 1 ) = _mm256_permute4x64_epi64( s0, 0x55 ); + casti_m256i( d, 2 ) = _mm256_permute4x64_epi64( s0, 0xaa ); + casti_m256i( d, 3 ) = _mm256_permute4x64_epi64( s0, 0xff ); + casti_m256i( d, 4 ) = _mm256_permute4x64_epi64( s1, 0x00 ); + casti_m256i( d, 5 ) = _mm256_permute4x64_epi64( s1, 0x55 ); + casti_m256i( d, 6 ) = _mm256_permute4x64_epi64( s1, 0xaa ); + casti_m256i( d, 7 ) = _mm256_permute4x64_epi64( s1, 0xff ); + casti_m256i( d, 8 ) = _mm256_permute4x64_epi64( + _mm256_castsi128_si256( s2 ), 0x00 ); + casti_m256i( d, 9 ) = _mm256_permute4x64_epi64( + _mm256_castsi128_si256( s2 ), 0x55 ); +} + +#endif // AVX2 + +// 8x64 (AVX512) + +#define ILEAVE_8x64( i ) do \ +{ \ + uint64_t *d = (uint64_t*)(dst) + ( (i) << 3 ); \ + d[0] = *( (const uint64_t*)(s0) +(i) ); \ + d[1] = *( (const uint64_t*)(s1) +(i) ); \ + d[2] = *( (const uint64_t*)(s2) +(i) ); \ + d[3] = *( (const uint64_t*)(s3) +(i) ); \ + d[4] = *( (const uint64_t*)(s4) +(i) ); \ + d[5] = *( (const uint64_t*)(s5) +(i) ); \ + d[6] = *( (const uint64_t*)(s6) +(i) ); \ + d[7] = *( (const uint64_t*)(s7) +(i) ); \ +} while(0) + +static inline void intrlv_8x64( void *dst, const void *s0, + const void *s1, const void *s2, const void *s3, const void *s4, + const void *s5, const void *s6, const void *s7, int bit_len ) +{ + ILEAVE_8x64( 0 ); ILEAVE_8x64( 1 ); + ILEAVE_8x64( 2 ); ILEAVE_8x64( 3 ); + if ( bit_len <= 256 ) return; + ILEAVE_8x64( 4 ); ILEAVE_8x64( 5 ); + ILEAVE_8x64( 6 ); ILEAVE_8x64( 7 ); + if ( bit_len <= 512 ) return; + ILEAVE_8x64( 8 ); ILEAVE_8x64( 9 ); + if ( bit_len <= 640 ) return; + ILEAVE_8x64( 10 ); ILEAVE_8x64( 11 ); + ILEAVE_8x64( 12 ); ILEAVE_8x64( 13 ); + ILEAVE_8x64( 14 ); ILEAVE_8x64( 15 ); +} + +#undef ILEAVE_8x64 + +#define DLEAVE_8x64( i ) do \ +{ \ + const uint64_t *s = (const uint64_t*)(src) + ( (i) << 3 ); \ + *( (uint64_t*)(d0) +(i) ) = s[0]; \ + *( (uint64_t*)(d1) +(i) ) = s[1]; \ + *( (uint64_t*)(d2) +(i) ) = s[2]; \ + *( (uint64_t*)(d3) +(i) ) = s[3]; \ + *( (uint64_t*)(d4) +(i) ) = s[4]; \ + *( (uint64_t*)(d5) +(i) ) = s[5]; \ + *( (uint64_t*)(d6) +(i) ) = s[6]; \ + *( (uint64_t*)(d7) +(i) ) = s[7]; \ +} while(0) + +static inline void dintrlv_8x64( void *d0, void *d1, void *d2, void *d3, + void *d4, void *d5, void *d6, void *d7, const void *src, int bit_len ) +{ + DLEAVE_8x64( 0 ); DLEAVE_8x64( 1 ); + DLEAVE_8x64( 2 ); DLEAVE_8x64( 3 ); + if ( bit_len <= 256 ) return; + DLEAVE_8x64( 4 ); DLEAVE_8x64( 5 ); + DLEAVE_8x64( 6 ); DLEAVE_8x64( 7 ); + if ( bit_len <= 512 ) return; + DLEAVE_8x64( 8 ); DLEAVE_8x64( 9 ); + if ( bit_len <= 640 ) return; + DLEAVE_8x64( 10 ); DLEAVE_8x64( 11 ); + DLEAVE_8x64( 12 ); DLEAVE_8x64( 13 ); + DLEAVE_8x64( 14 ); DLEAVE_8x64( 15 ); +} + +#undef DLEAVE_8x64 + +static inline void extr_lane_8x64( void *d, const void *s, + const int lane, const int bit_len ) +{ + ((uint64_t*)d)[ 0] = ((uint64_t*)s)[ lane ]; + ((uint64_t*)d)[ 1] = ((uint64_t*)s)[ lane+ 8 ]; + ((uint64_t*)d)[ 2] = ((uint64_t*)s)[ lane+ 16 ]; + ((uint64_t*)d)[ 3] = ((uint64_t*)s)[ lane+ 24 ]; + ((uint64_t*)d)[ 4] = ((uint64_t*)s)[ lane+ 32 ]; + ((uint64_t*)d)[ 5] = ((uint64_t*)s)[ lane+ 40 ]; + ((uint64_t*)d)[ 6] = ((uint64_t*)s)[ lane+ 48 ]; + ((uint64_t*)d)[ 7] = ((uint64_t*)s)[ lane+ 56 ]; + if ( bit_len <= 256 ) return; + ((uint64_t*)d)[ 8] = ((uint64_t*)s)[ lane+ 64 ]; + ((uint64_t*)d)[ 9] = ((uint64_t*)s)[ lane+ 72 ]; + ((uint64_t*)d)[10] = ((uint64_t*)s)[ lane+ 80 ]; + ((uint64_t*)d)[11] = ((uint64_t*)s)[ lane+ 88 ]; + ((uint64_t*)d)[12] = ((uint64_t*)s)[ lane+ 96 ]; + ((uint64_t*)d)[13] = ((uint64_t*)s)[ lane+104 ]; + ((uint64_t*)d)[14] = ((uint64_t*)s)[ lane+112 ]; + ((uint64_t*)d)[15] = ((uint64_t*)s)[ lane+120 ]; +} + +#if defined(__AVX512F__) && defined(__AVX512VL__) + +static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src ) +{ + __m512i *d = (__m512i*)dst; + __m512i s0 = mm512_bswap_32( casti_m512i(src, 0 ) ); + __m128i s1 = mm128_bswap_32( casti_m128i(src, 4 ) ); + const __m512i zero = m512_zero; + const __m512i one = m512_one_64; + const __m512i two = _mm512_add_epi64( one, one ); + const __m512i tre = _mm512_add_epi64( two, one ); + const __m512i four = _mm512_add_epi64( two, two ); + + d[0] = _mm512_permutexvar_epi64( s0, zero ); + d[1] = _mm512_permutexvar_epi64( s0, one ); + d[2] = _mm512_permutexvar_epi64( s0, two ); + d[3] = _mm512_permutexvar_epi64( s0, tre ); + d[4] = _mm512_permutexvar_epi64( s0, four ); + d[5] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, one ) ); + d[6] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, two ) ); + d[7] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, tre ) ); + d[8] = _mm512_permutexvar_epi64( + _mm512_castsi128_si512( s1 ), zero ); + d[9] = _mm512_permutexvar_epi64( + _mm512_castsi128_si512( s1 ), one ); +} + +#endif // AVX512 + +////////////////////////// +// +// 128 bit data + +// 2x128 (AVX2) + +static inline void intrlv_2x128( void *dst, const void *src0, + const void *src1, int bit_len ) +{ + __m128i *d = (__m128i*)dst; + const __m128i *s0 = (const __m128i*)src0; + const __m128i *s1 = (const __m128i*)src1; + d[ 0] = s0[0]; d[ 1] = s1[0]; + d[ 2] = s0[1]; d[ 3] = s1[1]; + if ( bit_len <= 256 ) return; + d[ 4] = s0[2]; d[ 5] = s1[2]; + d[ 6] = s0[3]; d[ 7] = s1[3]; + if ( bit_len <= 512 ) return; + d[ 8] = s0[4]; d[ 9] = s1[4]; + if ( bit_len <= 640 ) return; + d[10] = s0[5]; d[11] = s1[5]; + d[12] = s0[6]; d[13] = s1[6]; + d[14] = s0[7]; d[15] = s1[7]; +} + +static inline void intrlv_2x128_512( void *dst, const void *src0, + const void *src1 ) +{ + __m128i *d = (__m128i*)dst; + const __m128i *s0 = (const __m128i*)src0; + const __m128i *s1 = (const __m128i*)src1; + d[0] = s0[0]; d[1] = s1[0]; + d[2] = s0[1]; d[3] = s1[1]; + d[4] = s0[2]; d[5] = s1[2]; + d[6] = s0[3]; d[7] = s1[3]; +} + +static inline void dintrlv_2x128( void *dst0, void *dst1, + const void *src, int bit_len ) +{ + __m128i *d0 = (__m128i*)dst0; + __m128i *d1 = (__m128i*)dst1; + const __m128i *s = (const __m128i*)src; + + d0[0] = s[ 0]; d1[0] = s[ 1]; + d0[1] = s[ 2]; d1[1] = s[ 3]; + if ( bit_len <= 256 ) return; + d0[2] = s[ 4]; d1[2] = s[ 5]; + d0[3] = s[ 6]; d1[3] = s[ 7]; + if ( bit_len <= 512 ) return; + d0[4] = s[ 8]; d1[4] = s[ 9]; + if ( bit_len <= 640 ) return; + d0[5] = s[10]; d1[5] = s[11]; + d0[6] = s[12]; d1[6] = s[13]; + d0[7] = s[14]; d1[7] = s[15]; +} + +static inline void dintrlv_2x128_512( void *dst0, void *dst1, const void *src ) +{ + __m128i *d0 = (__m128i*)dst0; + __m128i *d1 = (__m128i*)dst1; + const __m128i *s = (const __m128i*)src; + + d0[0] = s[0]; d1[0] = s[1]; + d0[1] = s[2]; d1[1] = s[3]; + d0[2] = s[4]; d1[2] = s[5]; + d0[3] = s[6]; d1[3] = s[7]; +} + +// 4x128 (AVX512) + +static inline void intrlv_4x128( void *dst, const void *src0, + const void *src1, const void *src2, const void *src3, int bit_len ) +{ + __m128i *d = (__m128i*)dst; + const __m128i *s0 = (const __m128i*)src0; + const __m128i *s1 = (const __m128i*)src1; + const __m128i *s2 = (const __m128i*)src2; + const __m128i *s3 = (const __m128i*)src3; + d[ 0] = s0[0]; d[ 1] = s1[0]; d[ 2] = s2[0]; d[ 3] = s3[0]; + d[ 4] = s0[1]; d[ 5] = s1[1]; d[ 6] = s2[1]; d[ 7] = s3[1]; + if ( bit_len <= 256 ) return; + d[ 8] = s0[2]; d[ 9] = s1[2]; d[10] = s2[2]; d[11] = s3[2]; + d[12] = s0[3]; d[13] = s1[3]; d[14] = s2[3]; d[15] = s3[3]; + if ( bit_len <= 512 ) return; + d[16] = s0[4]; d[17] = s1[4]; d[18] = s2[4]; d[19] = s3[4]; + if ( bit_len <= 640 ) return; + d[20] = s0[5]; d[21] = s1[5]; d[22] = s2[5]; d[23] = s3[5]; + d[24] = s0[6]; d[25] = s1[6]; d[26] = s2[6]; d[27] = s3[6]; + d[28] = s0[7]; d[29] = s1[7]; d[30] = s2[7]; d[31] = s3[7]; +} + +static inline void intrlv_4x128_512( void *dst, const void *src0, + const void *src1, const void *src2, const void *src3 ) +{ + __m128i *d = (__m128i*)dst; + const __m128i *s0 = (const __m128i*)src0; + const __m128i *s1 = (const __m128i*)src1; + const __m128i *s2 = (const __m128i*)src2; + const __m128i *s3 = (const __m128i*)src3; + d[ 0] = s0[0]; d[ 1] = s1[0]; d[ 2] = s2[0]; d[ 3] = s3[0]; + d[ 4] = s0[1]; d[ 5] = s1[1]; d[ 6] = s2[1]; d[ 7] = s3[1]; + d[ 8] = s0[2]; d[ 9] = s1[2]; d[10] = s2[2]; d[11] = s3[2]; + d[12] = s0[3]; d[13] = s1[3]; d[14] = s2[3]; d[15] = s3[3]; +} + +static inline void dintrlv_4x128( void *dst0, void *dst1, void *dst2, + void *dst3, const void *src, int bit_len ) +{ + __m128i *d0 = (__m128i*)dst0; + __m128i *d1 = (__m128i*)dst1; + __m128i *d2 = (__m128i*)dst2; + __m128i *d3 = (__m128i*)dst3; + const __m128i *s = (const __m128i*)src; + d0[0] = s[ 0]; d1[0] = s[ 1]; d2[0] = s[ 2]; d3[0] = s[ 3]; + d0[1] = s[ 4]; d1[1] = s[ 5]; d2[1] = s[ 6]; d3[1] = s[ 7]; + if ( bit_len <= 256 ) return; + d0[2] = s[ 8]; d1[2] = s[ 9]; d2[2] = s[10]; d3[2] = s[11]; + d0[3] = s[12]; d1[3] = s[13]; d2[3] = s[14]; d3[3] = s[15]; + if ( bit_len <= 512 ) return; + d0[4] = s[16]; d1[4] = s[17]; d2[4] = s[18]; d3[4] = s[19]; + if ( bit_len <= 640 ) return; + d0[5] = s[20]; d1[5] = s[21]; d2[5] = s[22]; d3[5] = s[23]; + d0[6] = s[24]; d1[6] = s[25]; d2[6] = s[26]; d3[6] = s[27]; + d0[7] = s[28]; d1[7] = s[29]; d2[7] = s[30]; d3[7] = s[31]; +} + +static inline void dintrlv_4x128_512( void *dst0, void *dst1, void *dst2, + void *dst3, const void *src ) +{ + __m128i *d0 = (__m128i*)dst0; + __m128i *d1 = (__m128i*)dst1; + __m128i *d2 = (__m128i*)dst2; + __m128i *d3 = (__m128i*)dst3; + const __m128i *s = (const __m128i*)src; + d0[0] = s[ 0]; d1[0] = s[ 1]; d2[0] = s[ 2]; d3[0] = s[ 3]; + d0[1] = s[ 4]; d1[1] = s[ 5]; d2[1] = s[ 6]; d3[1] = s[ 7]; + d0[2] = s[ 8]; d1[2] = s[ 9]; d2[2] = s[10]; d3[2] = s[11]; + d0[3] = s[12]; d1[3] = s[13]; d2[3] = s[14]; d3[3] = s[15]; +} + + +/////////////////////////// +// +// Re-intereleaving + +// 4x64 -> 4x32 + +#define RLEAVE_4x64_4x32( i ) do \ +{ \ + uint32_t *d = (uint32_t*)dst + (i); \ + const uint32_t *s = (const uint32_t*)src + (i); \ + d[0] = s[0]; d[1] = s[2]; \ + d[2] = s[4]; d[3] = s[6]; \ + d[4] = s[1]; d[5] = s[3]; \ + d[6] = s[5]; d[7] = s[7]; \ +} while(0) + + +// Convert 4x64 byte (256 bit) vectors to 4x32 (128 bit) vectors for AVX +// bit_len must be multiple of 64 +static inline void rintrlv_4x64_4x32( void *dst, void *src, + int bit_len ) +{ + RLEAVE_4x64_4x32( 0 ); RLEAVE_4x64_4x32( 8 ); + RLEAVE_4x64_4x32( 16 ); RLEAVE_4x64_4x32( 24 ); + if ( bit_len <= 256 ) return; + RLEAVE_4x64_4x32( 32 ); RLEAVE_4x64_4x32( 40 ); + RLEAVE_4x64_4x32( 48 ); RLEAVE_4x64_4x32( 56 ); + if ( bit_len <= 512 ) return; + RLEAVE_4x64_4x32( 64 ); RLEAVE_4x64_4x32( 72 ); + RLEAVE_4x64_4x32( 80 ); RLEAVE_4x64_4x32( 88 ); + RLEAVE_4x64_4x32( 96 ); RLEAVE_4x64_4x32( 104 ); + RLEAVE_4x64_4x32( 112 ); RLEAVE_4x64_4x32( 120 ); +} + +#undef RLEAVE_4x64_4x32 + + +// 4x32 -> 4x64 + +#define RLEAVE_4x32_4x64(i) do \ +{ \ + uint32_t *d = (uint32_t*)dst + (i); \ + const uint32_t *s = (const uint32_t*)src + (i); \ + d[0] = s[0]; d[1] = s[4]; \ + d[2] = s[1]; d[3] = s[5]; \ + d[4] = s[2]; d[5] = s[6]; \ + d[6] = s[3]; d[7] = s[7]; \ +} while(0) + +static inline void rintrlv_4x32_4x64( void *dst, + const void *src, int bit_len ) +{ + RLEAVE_4x32_4x64( 0 ); RLEAVE_4x32_4x64( 8 ); + RLEAVE_4x32_4x64( 16 ); RLEAVE_4x32_4x64( 24 ); + if ( bit_len <= 256 ) return; + RLEAVE_4x32_4x64( 32 ); RLEAVE_4x32_4x64( 40 ); + RLEAVE_4x32_4x64( 48 ); RLEAVE_4x32_4x64( 56 ); + if ( bit_len <= 512 ) return; + RLEAVE_4x32_4x64( 64 ); RLEAVE_4x32_4x64( 72 ); + RLEAVE_4x32_4x64( 80 ); RLEAVE_4x32_4x64( 88 ); + RLEAVE_4x32_4x64( 96 ); RLEAVE_4x32_4x64( 104 ); + RLEAVE_4x32_4x64( 112 ); RLEAVE_4x32_4x64( 120 ); +} + +#undef RLEAVE_4x32_4x64 + + +// 2x128 -> 4x64 + +#define RLEAVE_2x128_4x64( i ) do \ +{ \ + uint64_t *d = (uint64_t*)dst + ((i)<<1); \ + const uint64_t *s0 = (const uint64_t*)src0 + (i); \ + const uint64_t *s1 = (const uint64_t*)src1 + (i); \ + d[0] = s0[0]; d[1] = s0[2]; \ + d[2] = s1[0]; d[3] = s1[2]; \ + d[4] = s0[1]; d[5] = s0[3]; \ + d[6] = s1[1]; d[7] = s1[3]; \ +} while(0) + +static inline void rintrlv_2x128_4x64( void *dst, const void *src0, + const void *src1, int bit_len ) +{ + RLEAVE_2x128_4x64( 0 ); RLEAVE_2x128_4x64( 4 ); + if ( bit_len <= 256 ) return; + RLEAVE_2x128_4x64( 8 ); RLEAVE_2x128_4x64( 12 ); + if ( bit_len <= 512 ) return; + RLEAVE_2x128_4x64( 16 ); RLEAVE_2x128_4x64( 20 ); + RLEAVE_2x128_4x64( 24 ); RLEAVE_2x128_4x64( 28 ); +} + +#undef RLEAVE_2x128_4x64 + + +// 4x64 -> 2x128 + +#define RLEAVE_4x64_2x128( i ) do \ +{ \ + uint64_t *d0 = (uint64_t*)dst0 + (i); \ + uint64_t *d1 = (uint64_t*)dst1 + (i); \ + const uint64_t *s = (const uint64_t*)src + ((i)<<1); \ + d0[0] = s[0]; d0[1] = s[4]; \ + d0[2] = s[1]; d0[3] = s[5]; \ + d1[0] = s[2]; d1[1] = s[6]; \ + d1[2] = s[3]; d1[3] = s[7]; \ +} while(0) + +static inline void rintrlv_4x64_2x128( void *dst0, void *dst1, + const void *src, int bit_len ) +{ + RLEAVE_4x64_2x128( 0 ); RLEAVE_4x64_2x128( 4 ); + if ( bit_len <= 256 ) return; + RLEAVE_4x64_2x128( 8 ); RLEAVE_4x64_2x128( 12 ); + if ( bit_len <= 512 ) return; + RLEAVE_4x64_2x128( 16 ); RLEAVE_4x64_2x128( 20 ); + RLEAVE_4x64_2x128( 24 ); RLEAVE_4x64_2x128( 28 ); +} + + +// +// Some functions customized for mining. + +// blend 2 vectors while interleaving: { hi[n], lo[n-1], ... hi[1], lo[0] } +#if defined(__SSE4_1__) +// No SSE2 implementation. + +#define mm128_intrlv_blend_64( hi, lo ) \ + _mm_blend_epi16( hi, lo, 0x0f ) +#define mm128_intrlv_blend_32( hi, lo ) \ + _mm_blend_epi16( hi, lo, 0x33 ) + +#endif // SSE4_1 + +#if defined(__AVX2__) + +#define mm256_intrlv_blend_128( hi, lo ) \ + _mm256_blend_epi32( hi, lo, 0x0f ) + +#define mm256_intrlv_blend_64( hi, lo ) \ + _mm256_blend_epi32( hi, lo, 0x33 ) + +#define mm256_intrlv_blend_32( hi, lo ) \ + _mm256_blend_epi32( hi, lo, 0x55 ) + +// Blend 32 byte lanes of hash from 2 sources according to control mask. +// macro due to 256 bit value arg. +#define mm256_blend_hash_4x64( dst, a, b, mask ) \ +do { \ + dst[0] = _mm256_blendv_epi8( a[0], b[0], mask ); \ + dst[1] = _mm256_blendv_epi8( a[1], b[1], mask ); \ + dst[2] = _mm256_blendv_epi8( a[2], b[2], mask ); \ + dst[3] = _mm256_blendv_epi8( a[3], b[3], mask ); \ + dst[4] = _mm256_blendv_epi8( a[4], b[4], mask ); \ + dst[5] = _mm256_blendv_epi8( a[5], b[5], mask ); \ + dst[6] = _mm256_blendv_epi8( a[6], b[6], mask ); \ + dst[7] = _mm256_blendv_epi8( a[7], b[7], mask ); \ +} while(0) + +#endif // AVX2 + +#endif // INTERLEAVE_H__ diff --git a/simd-utils/simd-128.h b/simd-utils/simd-128.h index a1d3cd4..82c0dc5 100644 --- a/simd-utils/simd-128.h +++ b/simd-utils/simd-128.h @@ -44,7 +44,17 @@ // repeatedly. It may be better for the application to reimplement the // utility to better suit its usage. // - +// More tips: +// +// Conversions from integer to vector should be avoided whenever possible. +// Extract, insert and set and set1 instructions should be avoided. +// In addition to the issues with constants set is also very inefficient with +// variables. +// Converting integer data to perform a couple of vector operations +// then converting back to integer should be avoided. Converting data in +// registers should also be avoided. Conversion should be limited to buffers +// in memory where the data is loaded directly to vector registers, bypassing +// the integer to vector conversion. // // Pseudo constants. // @@ -71,7 +81,7 @@ static inline __m128i m128_one_64_fn() asm( "pxor %0, %0\n\t" "pcmpeqd %%xmm1, %%xmm1\n\t" "psubq %%xmm1, %0\n\t" - :"=x"(a) + : "=x"(a) : : "xmm1" ); return a; @@ -84,7 +94,7 @@ static inline __m128i m128_one_32_fn() asm( "pxor %0, %0\n\t" "pcmpeqd %%xmm1, %%xmm1\n\t" "psubd %%xmm1, %0\n\t" - :"=x"(a) + : "=x"(a) : : "xmm1" ); return a; @@ -97,7 +107,7 @@ static inline __m128i m128_one_16_fn() asm( "pxor %0, %0\n\t" "pcmpeqd %%xmm1, %%xmm1\n\t" "psubw %%xmm1, %0\n\t" - :"=x"(a) + : "=x"(a) : : "xmm1" ); return a; @@ -110,7 +120,7 @@ static inline __m128i m128_one_8_fn() asm( "pxor %0, %0\n\t" "pcmpeqd %%xmm1, %%xmm1\n\t" "psubb %%xmm1, %0\n\t" - :"=x"(a) + : "=x"(a) : : "xmm1" ); return a; @@ -121,7 +131,7 @@ static inline __m128i m128_neg1_fn() { __m128i a; asm( "pcmpeqd %0, %0\n\t" - :"=x"(a) ); + : "=x"(a) ); return a; } #define m128_neg1 m128_neg1_fn() @@ -133,7 +143,7 @@ static inline __m128i m128_one_128_fn() __m128i a; asm( "pinsrq $0, $1, %0\n\t" "pinsrq $1, $0, %0\n\t" - :"=x"(a) ); + : "=x"(a) ); return a; } #define m128_one_128 m128_one_128_fn() @@ -145,8 +155,8 @@ static inline __m128i m128_const_64( uint64_t hi, uint64_t lo ) __m128i a; asm( "pinsrq $0, %2, %0\n\t" "pinsrq $1, %1, %0\n\t" - :"=x"(a) - :"r"(hi),"r"(lo) ); + : "=x"(a) + : "r"(hi), "r"(lo) ); return a; } diff --git a/simd-utils/simd-512.h b/simd-utils/simd-512.h index 41c9e3f..01e30bd 100644 --- a/simd-utils/simd-512.h +++ b/simd-utils/simd-512.h @@ -41,79 +41,6 @@ // Experimental, not fully tested. -// -// Compile time vector constants and initializers. -// -// The following macro constants and functions should only be used -// for compile time initialization of constant and variable vector -// arrays. These constants use memory, use set instruction or pseudo -// constants at run time to avoid using memory. - -// Constant initializers -#define mm512_const_64( x7, x6, x5, x4, x3, x2, x1, x0 ) \ - {{ x7, x6, x5, x4, x3, x2, x1, x0 }} - -#define mm512_const1_64( x ) {{ x,x,x,x,x,x,x }} - -#define mm512_const_32( x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 ) \ - {{ x15, x14, x13, x12, x11, x10, x09, x08, }} \ - x07, x06, x05, x04, x03, x02, x01, x00 }} - -#define mm512_const1_32( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }} - -#define mm512_const_16( x31, x30, x29, x28, x27, x26, x25, x24, \ - x23, x22, x21, x20, x19, x18, x17, x16, \ - x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 ) \ - {{ x31, x30, x29, x28, x27, x26, x25, x24, \ - x23, x22, x21, x20, x19, x18, x17, x16, \ - x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 }} - -#define mm512_const1_16( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \ - x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }} - -#define mm512_const_8( x63, x62, x61, x60, x59, x58, x57, x56, \ - x55, x54, x53, x52, x51, x50, x49, x48, \ - x47, x46, x45, x44, x43, x42, x41, x40, \ - x39, x38, x37, x36, x35, x34, x33, x32, \ - x31, x30, x29, x28, x27, x26, x25, x24, \ - x23, x22, x21, x20, x19, x18, x17, x16, \ - x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 ) \ - {{ x63, x62, x61, x60, x59, x58, x57, x56, \ - x55, x54, x53, x52, x51, x50, x49, x48, \ - x47, x46, x45, x44, x43, x42, x41, x40, \ - x39, x38, x37, x36, x35, x34, x33, x32, \ - x31, x30, x29, x28, x27, x26, x25, x24, \ - x23, x22, x21, x20, x19, x18, x17, x16, \ - x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 }} - -#define mm512_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \ - x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \ - x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \ - x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }} - -// Predefined compile time constant vectors. -#define c512_zero mm512_const1_64( 0ULL ) -#define c512_neg1 mm512_const1_64( 0xFFFFFFFFFFFFFFFFULL ) -#define c512_one_512 mm512_const_epi64( 0ULL, 0ULL, 0ULL, 0ULL, \ - 0ULL, 0ULL, 0ULL, 1ULL ) -#define c512_one_256 mm512_const_64( 0ULL, 0ULL, 0ULL, 1ULL, \ - 0ULL, 0ULL, 0ULL, 1ULL ) -#define c512_one_128 mm512_const_64( 0ULL, 1ULL, 0ULL, 1ULL, \ - 0ULL, 1ULL, 0ULL, 1ULL ) -#define c512_one_64 mm512_const1_64( 1ULL ) -#define c512_one_32 mm512_const1_32( 1UL ) -#define c512_one_16 mm512_const1_16( 1U ) -#define c512_one_8 mm512_const1_8( 1U ) -#define c512_neg1_64 mm512_const1_64( 0xFFFFFFFFFFFFFFFFULL ) -#define c512_neg1_32 mm512_const1_32( 0xFFFFFFFFUL ) -#define c512_neg1_16 mm512_const1_32( 0xFFFFU ) -#define c512_neg1_8 mm512_const1_32( 0xFFU ) - // // Pseudo constants. @@ -127,11 +54,77 @@ 0ULL, 0ULL, 0ULL, 1ULL ) #define m512_one_256 _mm512_set4_epi64( 0ULL, 0ULL, 0ULL, 1ULL ) #define m512_one_128 _mm512_set4_epi64( 0ULL, 1ULL, 0ULL, 1ULL ) -#define m512_one_64 _mm512_set1_epi64( 1ULL ) -#define m512_one_32 _mm512_set1_epi32( 1UL ) -#define m512_one_16 _mm512_set1_epi16( 1U ) -#define m512_one_8 _mm512_set1_epi8( 1U ) -#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL ) +//#define m512_one_64 _mm512_set1_epi64( 1ULL ) +//#define m512_one_32 _mm512_set1_epi32( 1UL ) +//#define m512_one_16 _mm512_set1_epi16( 1U ) +//#define m512_one_8 _mm512_set1_epi8( 1U ) +//#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL ) + +#define mi512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \ + _mm512_inserti64x4( _mm512_castsi512_si256( m256_const_64( i3.i2,i1,i0 ) ), \ + m256_const_64( i7,i6,i5,i4 ), 1 ) +#define m512_const1_64( i ) m256_const_64( i, i, i, i, i, i, i, i ) + +static inline __m512i m512_one_64_fn() +{ + __m512i a; + asm( "vpxorq %0, %0, %0\n\t" + "vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t" + "vpsubq %%zmm1, %0, %0\n\t" + :"=x"(a) + : + : "zmm1" ); + return a; +} +#define m512_one_64 m512_one_64_fn() + +static inline __m512i m512_one_32_fn() +{ + __m512i a; + asm( "vpxord %0, %0, %0\n\t" + "vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t" + "vpsubd %%zmm1, %0, %0\n\t" + :"=x"(a) + : + : "zmm1" ); + return a; +} +#define m512_one_32 m512_one_32_fn() + +static inline __m512i m512_one_16_fn() +{ + __m512i a; + asm( "vpxord %0, %0, %0\n\t" + "vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t" + "vpsubw %%zmm1, %0, %0\n\t" + :"=x"(a) + : + : "zmm1" ); + return a; +} +#define m512_one_16 m512_one_16_fn() + +static inline __m512i m512_one_8_fn() +{ + __m512i a; + asm( "vpxord %0, %0, %0\n\t" + "vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t" + "vpsubb %%zmm1, %0, %0\n\t" + :"=x"(a) + : + : "zmm1" ); + return a; +} +#define m512_one_8 m512_one_8_fn() + +static inline __m512i m512_neg1_fn() +{ + __m512i a; + asm( "vpcmpeqq %0, %0, %0\n\t" + :"=x"(a) ); + return a; +} +#define m512_neg1 m512_neg1_fn() // @@ -142,6 +135,15 @@ #define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x ) #define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x ) + + +#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a ) +#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 ) + +#define mm128_extr_lo128_512( a ) _mm512_castsi512_si256( a ) + + + // // Pointer casting @@ -225,6 +227,25 @@ *((uint32_t*)(d06)) = ((uint32_t*)(s))[14]; \ *((uint32_t*)(d07)) = ((uint32_t*)(s))[15]; +// Add 4 values, fewer dependencies than sequential addition. + + +#define mm512_add4_64( a, b, c, d ) \ + _mm512_add_epi64( _mm512_add_epi64( a, b ), _mm512_add_epi64( c, d ) ) + +#define mm512_add4_32( a, b, c, d ) \ + _mm512_add_epi32( _mm512_add_epi32( a, b ), _mm512_add_epi32( c, d ) ) + +#define mm512_add4_16( a, b, c, d ) \ + _mm512_add_epi16( _mm512_add_epi16( a, b ), _mm512_add_epi16( c, d ) ) + +#define mm512_add4_8( a, b, c, d ) \ + _mm512_add_epi8( _mm512_add_epi8( a, b ), _mm512_add_epi8( c, d ) ) + +#define mm512_xor4( a, b, c, d ) \ + _mm512_xor_si512( _mm512_xor_si256( a, b ), _mm512_xor_si256( c, d ) ) + + // // Bit rotations. @@ -263,45 +284,41 @@ #define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n ) -// Although documented to exist in AVX512F the _mm512_set_epi8 & -// _mm512_set_epi16 intrinsics fail to compile. Seems usefull to have -// for endian byte swapping. Workaround by using _mm512_set_epi32. -// Ugly but it works. #define mm512_ror_1x16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x0000001F, 0x001E001D, 0x001C001B, 0x001A0019, \ - 0X00180017, 0X00160015, 0X00140013, 0X00120011, \ - 0X0010000F, 0X000E000D, 0X000C000B, 0X000A0009, \ - 0X00080007, 0X00060005, 0X00040003, 0X00020001 ) ) + _mm512_permutexvar_epi16( v, m512_const_64( \ + 0x0000001F001E001D, 0x001C001B001A0019, \ + 0X0018001700160015, 0X0014001300120011, \ + 0X0010000F000E000D, 0X000C000B000A0009, \ + 0X0008000700060005, 0X0004000300020001 ) ) #define mm512_rol_1x16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \ - 0X00160015, 0X00140013, 0X00120011, 0x0010000F, \ - 0X000E000D, 0X000C000B, 0X000A0009, 0X00080007, \ - 0X00060005, 0X00040003, 0X00020001, 0x0000001F ) ) + _mm512_permutexvar_epi16( v, m512_const_64( \ + 0x001E001D001C001B, 0x001A001900180017, \ + 0X0016001500140013, 0X001200110010000F, \ + 0X000E000D000C000B, 0X000A000900080007, \ + 0X0006000500040003, 0X000200010000001F ) ) #define mm512_ror_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x003F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \ - 0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \ - 0x201F1E1D, 0x1C1B1A19. 0x18171615, 0x14131211, \ - 0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) ) + _mm512_permutexvar_epi8( v, m512_const_64( \ + 0x003F3E3D3C3B3A39, 0x3837363534333231, \ + 0x302F2E2D2C2B2A29, 0x2827262524232221, \ + 0x201F1E1D1C1B1A19. 0x1817161514131211, \ + 0x100F0E0D0C0B0A09, 0x0807060504030201 ) ) #define mm512_rol_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F. \ - 0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221201F, \ - 0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \ - 0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201003F ) ) + _mm512_permutexvar_epi8( v, m512_const_64( \ + 0x3E3D3C3B3A393837, 0x363534333231302F. \ + 0x2E2D2C2B2A292827, 0x262524232221201F, \ + 0x1E1D1C1B1A191817, 0x161514131211100F, \ + 0x0E0D0C0B0A090807, 0x060504030201003F ) ) // Invert vector: {3,2,1,0} -> {0,1,2,3} #define mm512_invert_128( v ) _mm512_permute4f128_epi32( a, 0x1b ) #define mm512_invert_64( v ) \ - _mm512_permutex_epi64( v, _mm512_set_epi64( 0,1,2,3,4,5,6,7 ) ) + _mm512_permutex_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) ) #define mm512_invert_32( v ) \ _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ @@ -378,32 +395,32 @@ #define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 ) #define mm512_ror1x16_128( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x0018001F, 0x001E001D, 0x001C001B, 0x001A0019, \ - 0x00100017, 0x00160015, 0x00140013, 0x00120011, \ - 0x0008000F, 0x000E000D, 0x000C000B, 0x000A0009, \ - 0x00000007, 0x00060005, 0x00040003, 0x00020001 ) ) + _mm512_permutexvar_epi16( v, m512_const_64( \ + 0x0018001F001E001D, 0x001C001B001A0019, \ + 0x0010001700160015, 0x0014001300120011, \ + 0x0008000F000E000D, 0x000C000B000A0009, \ + 0x0000000700060005, 0x0004000300020001 ) ) #define mm512_rol1x16_128( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x001E001D, 0x001C001B, 0x001A0019, 0x0018001F, \ - 0x00160015, 0x00140013, 0x00120011, 0x00100017, \ - 0x000E000D, 0x000C000B, 0x000A0009, 0x0008000F, \ - 0x00060005, 0x00040003, 0x00020001, 0x00000007 ) ) + _mm512_permutexvar_epi16( v, m512_const_64( \ + 0x001E001D001C001B, 0x001A00190018001F, \ + 0x0016001500140013, 0x0012001100100017, \ + 0x000E000D000C000B, 0x000A00090008000F, \ + 0x0006000500040003, 0x0002000100000007 ) ) #define mm512_ror1x8_128( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x303F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \ - 0x202F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \ - 0x101F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \ - 0x000F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) ) + _mm512_permutexvar_epi8( v, m512_const_64( \ + 0x303F3E3D3C3B3A39, 0x3837363534333231, \ + 0x202F2E2D2C2B2A29, 0x2827262524232221, \ + 0x101F1E1D1C1B1A19, 0x1817161514131211, \ + 0x000F0E0D0C0B0A09, 0x0807060504030201 ) ) #define mm512_rol1x8_128( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3E3D3C3B, 0x3A393837, 0x36353433. 0x3231303F, \ - 0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221202F, \ - 0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211101F, \ - 0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201000F ) ) + _mm512_permutexvar_epi8( v, m512_const_64( \ + 0x3E3D3C3B3A393837, 0x363534333231303F, \ + 0x2E2D2C2B2A292827, 0x262524232221202F, \ + 0x1E1D1C1B1A191817, 0x161514131211101F, \ + 0x0E0D0C0B0A090807, 0x060504030201000F ) ) // Rotate 128 bit lanes by c bytes. #define mm512_bror_128( v, c ) \