mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
4 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
71d6b97ee8 | ||
![]() |
b2331375a3 | ||
![]() |
7fec680835 | ||
![]() |
1b0a5aadf6 |
@@ -163,6 +163,7 @@ cpuminer_SOURCES = \
|
|||||||
algo/sha/sph_sha2.c \
|
algo/sha/sph_sha2.c \
|
||||||
algo/sha/sph_sha2big.c \
|
algo/sha/sph_sha2big.c \
|
||||||
algo/sha/sha2-hash-4way.c \
|
algo/sha/sha2-hash-4way.c \
|
||||||
|
algo/sha/sha256_hash_11way.c \
|
||||||
algo/sha/sha2.c \
|
algo/sha/sha2.c \
|
||||||
algo/sha/sha256t-gate.c \
|
algo/sha/sha256t-gate.c \
|
||||||
algo/sha/sha256t-4way.c \
|
algo/sha/sha256t-4way.c \
|
||||||
|
@@ -38,7 +38,28 @@ supported.
|
|||||||
Change Log
|
Change Log
|
||||||
----------
|
----------
|
||||||
|
|
||||||
v3.9.2.1
|
v3.9.3.1
|
||||||
|
|
||||||
|
Skippped v3.9.3 due to misidentification of v3.9.2.5 as v3.9.3.
|
||||||
|
Fixed x16r algo 25% invalid share reject rate. The bug may have also
|
||||||
|
affected other algos.
|
||||||
|
|
||||||
|
v3.9.2.5
|
||||||
|
|
||||||
|
Fixed 2 regressions: hodl AES detection, x16r invalid shares with AVX2.
|
||||||
|
More restructuring.
|
||||||
|
|
||||||
|
v3.9.2.4
|
||||||
|
|
||||||
|
Yet another affinity fix. Hopefully the last one.
|
||||||
|
|
||||||
|
v3.9.2.3
|
||||||
|
|
||||||
|
Another cpu-affinity fix.
|
||||||
|
Disabled test code that fails to compile on some CPUs with limited
|
||||||
|
AVX512 capabilities.
|
||||||
|
|
||||||
|
v3.9.2.2
|
||||||
|
|
||||||
Fixed some day one cpu-affinity issues.
|
Fixed some day one cpu-affinity issues.
|
||||||
|
|
||||||
|
@@ -345,9 +345,9 @@ const char* const algo_alias_map[][2] =
|
|||||||
{ NULL, NULL }
|
{ NULL, NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
// if arg is a valid alias for a known algo it is updated with the proper name.
|
// if arg is a valid alias for a known algo it is updated with the proper
|
||||||
// No validation of the algo or alias is done, It is the responsinility of the
|
// name. No validation of the algo or alias is done, It is the responsinility
|
||||||
// calling function to validate the algo after return.
|
// of the calling function to validate the algo after return.
|
||||||
void get_algo_alias( char** algo_or_alias )
|
void get_algo_alias( char** algo_or_alias )
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@@ -362,3 +362,22 @@ void get_algo_alias( char** algo_or_alias )
|
|||||||
|
|
||||||
#undef ALIAS
|
#undef ALIAS
|
||||||
#undef PROPER
|
#undef PROPER
|
||||||
|
|
||||||
|
// only for parallel when there are lanes.
|
||||||
|
bool submit_solution( struct work *work, void *hash,
|
||||||
|
struct thr_info *thr, int lane )
|
||||||
|
{
|
||||||
|
work_set_target_ratio( work, hash );
|
||||||
|
if ( submit_work( thr, work ) )
|
||||||
|
{
|
||||||
|
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
||||||
|
accepted_share_count + rejected_share_count + 1,
|
||||||
|
thr->id, lane );
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
applog( LOG_WARNING, "Failed to submit share." );
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -2,8 +2,7 @@
|
|||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "miner.h"
|
#include "miner.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
#include "interleave.h"
|
|
||||||
|
|
||||||
/////////////////////////////
|
/////////////////////////////
|
||||||
////
|
////
|
||||||
@@ -196,8 +195,9 @@ void four_way_not_tested();
|
|||||||
int null_scanhash();
|
int null_scanhash();
|
||||||
|
|
||||||
// The one and only, a callback for scanhash.
|
// The one and only, a callback for scanhash.
|
||||||
|
bool submit_solution( struct work *work, void *hash,
|
||||||
|
struct thr_info *thr, int lane );
|
||||||
|
|
||||||
bool submit_work( struct thr_info *thr, const struct work *work_in );
|
bool submit_work( struct thr_info *thr, const struct work *work_in );
|
||||||
|
|
||||||
// displays warning
|
// displays warning
|
||||||
|
@@ -45,7 +45,7 @@ extern "C"{
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#define SPH_SIZE_blake256 256
|
#define SPH_SIZE_blake256 256
|
||||||
|
|
||||||
|
@@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
#if defined(__SSE4_2__)
|
#if defined(__SSE4_2__)
|
||||||
|
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
@@ -43,7 +43,7 @@ extern "C"{
|
|||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
|
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#define SPH_SIZE_bmw256 256
|
#define SPH_SIZE_bmw256 256
|
||||||
|
|
||||||
|
@@ -4,7 +4,7 @@
|
|||||||
#if defined(__AVX2__)
|
#if defined(__AVX2__)
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
// 2x128, 2 way parallel SSE2
|
// 2x128, 2 way parallel SSE2
|
||||||
|
|
||||||
|
@@ -13,7 +13,7 @@
|
|||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <memory.h>
|
#include <memory.h>
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
// The result of hashing 10 rounds of initial data which is params and
|
// The result of hashing 10 rounds of initial data which is params and
|
||||||
|
@@ -11,6 +11,8 @@ extern "C"{
|
|||||||
#pragma warning (disable: 4146)
|
#pragma warning (disable: 4146)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define SPH_FUGUE_NOCOPY 1
|
||||||
|
|
||||||
static const sph_u32 IV224[] = {
|
static const sph_u32 IV224[] = {
|
||||||
SPH_C32(0xf4c9120d), SPH_C32(0x6286f757), SPH_C32(0xee39e01c),
|
SPH_C32(0xf4c9120d), SPH_C32(0x6286f757), SPH_C32(0xee39e01c),
|
||||||
SPH_C32(0xe074e3cb), SPH_C32(0xa1127c62), SPH_C32(0x9a43d215),
|
SPH_C32(0xe074e3cb), SPH_C32(0xa1127c62), SPH_C32(0x9a43d215),
|
||||||
|
@@ -12,7 +12,7 @@
|
|||||||
#include <memory.h>
|
#include <memory.h>
|
||||||
#include "hash-groestl.h"
|
#include "hash-groestl.h"
|
||||||
#include "miner.h"
|
#include "miner.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#ifndef NO_AES_NI
|
#ifndef NO_AES_NI
|
||||||
|
|
||||||
|
@@ -9,7 +9,7 @@
|
|||||||
#include <memory.h>
|
#include <memory.h>
|
||||||
#include "hash-groestl256.h"
|
#include "hash-groestl256.h"
|
||||||
#include "miner.h"
|
#include "miner.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#ifndef NO_AES_NI
|
#ifndef NO_AES_NI
|
||||||
|
|
||||||
|
@@ -40,7 +40,7 @@
|
|||||||
|
|
||||||
#if defined (__AVX2__)
|
#if defined (__AVX2__)
|
||||||
|
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C"{
|
extern "C"{
|
||||||
|
@@ -69,7 +69,7 @@ extern "C"{
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#define SPH_SIZE_haval256_5 256
|
#define SPH_SIZE_haval256_5 256
|
||||||
|
|
||||||
|
@@ -156,7 +156,7 @@ int hodl_scanhash( int thr_id, struct work* work, uint32_t max_nonce,
|
|||||||
|
|
||||||
bool register_hodl_algo( algo_gate_t* gate )
|
bool register_hodl_algo( algo_gate_t* gate )
|
||||||
{
|
{
|
||||||
#if defined(__AES__)
|
#if !defined(__AES__)
|
||||||
applog( LOG_ERR, "Only CPUs with AES are supported, use legacy version.");
|
applog( LOG_ERR, "Only CPUs with AES are supported, use legacy version.");
|
||||||
return false;
|
return false;
|
||||||
#endif
|
#endif
|
||||||
|
@@ -44,7 +44,7 @@ extern "C"{
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#define SPH_SIZE_jh256 256
|
#define SPH_SIZE_jh256 256
|
||||||
|
|
||||||
|
@@ -44,7 +44,7 @@ extern "C"{
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#define SPH_SIZE_keccak256 256
|
#define SPH_SIZE_keccak256 256
|
||||||
|
|
||||||
|
@@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
#if defined(__AVX2__)
|
#if defined(__AVX2__)
|
||||||
|
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#define MASK _mm256_set_epi32( 0UL, 0UL, 0UL, 0xffffffffUL, \
|
#define MASK _mm256_set_epi32( 0UL, 0UL, 0UL, 0xffffffffUL, \
|
||||||
0UL, 0UL, 0UL, 0xffffffffUL )
|
0UL, 0UL, 0UL, 0xffffffffUL )
|
||||||
|
@@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
#include <immintrin.h>
|
#include <immintrin.h>
|
||||||
#include "algo/sha/sha3-defs.h"
|
#include "algo/sha/sha3-defs.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
/* The length of digests*/
|
/* The length of digests*/
|
||||||
#define DIGEST_BIT_LEN_224 224
|
#define DIGEST_BIT_LEN_224 224
|
||||||
|
@@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <emmintrin.h>
|
#include <emmintrin.h>
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
#include "luffa_for_sse2.h"
|
#include "luffa_for_sse2.h"
|
||||||
|
|
||||||
#define MULT2(a0,a1) do \
|
#define MULT2(a0,a1) do \
|
||||||
|
@@ -44,10 +44,11 @@ void allium_4way_hash( void *state, const void *input )
|
|||||||
blake256_4way( &ctx.blake, input + (64<<2), 16 );
|
blake256_4way( &ctx.blake, input + (64<<2), 16 );
|
||||||
blake256_4way_close( &ctx.blake, vhash32 );
|
blake256_4way_close( &ctx.blake, vhash32 );
|
||||||
|
|
||||||
mm256_reinterleave_4x64( vhash64, vhash32, 256 );
|
mm256_rintrlv_4x32_4x64( vhash64, vhash32, 256 );
|
||||||
keccak256_4way( &ctx.keccak, vhash64, 32 );
|
keccak256_4way( &ctx.keccak, vhash64, 32 );
|
||||||
keccak256_4way_close( &ctx.keccak, vhash64 );
|
keccak256_4way_close( &ctx.keccak, vhash64 );
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
|
||||||
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
||||||
|
|
||||||
LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 );
|
LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 );
|
||||||
LYRA2RE( hash1, 32, hash1, 32, hash1, 32, 1, 8, 8 );
|
LYRA2RE( hash1, 32, hash1, 32, hash1, 32, 1, 8, 8 );
|
||||||
@@ -67,26 +68,23 @@ void allium_4way_hash( void *state, const void *input )
|
|||||||
LYRA2RE( hash2, 32, hash2, 32, hash2, 32, 1, 8, 8 );
|
LYRA2RE( hash2, 32, hash2, 32, hash2, 32, 1, 8, 8 );
|
||||||
LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 );
|
LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
|
mm256_intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
|
||||||
|
|
||||||
skein256_4way( &ctx.skein, vhash64, 32 );
|
skein256_4way( &ctx.skein, vhash64, 32 );
|
||||||
skein256_4way_close( &ctx.skein, vhash64 );
|
skein256_4way_close( &ctx.skein, vhash64 );
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
|
||||||
|
|
||||||
update_and_final_groestl256( &ctx.groestl, hash0, hash0, 256 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
||||||
memcpy( &ctx.groestl, &allium_4way_ctx.groestl,
|
|
||||||
sizeof(hashState_groestl256) );
|
|
||||||
update_and_final_groestl256( &ctx.groestl, hash1, hash1, 256 );
|
|
||||||
memcpy( &ctx.groestl, &allium_4way_ctx.groestl,
|
|
||||||
sizeof(hashState_groestl256) );
|
|
||||||
update_and_final_groestl256( &ctx.groestl, hash2, hash2, 256 );
|
|
||||||
memcpy( &ctx.groestl, &allium_4way_ctx.groestl,
|
|
||||||
sizeof(hashState_groestl256) );
|
|
||||||
update_and_final_groestl256( &ctx.groestl, hash3, hash3, 256 );
|
|
||||||
|
|
||||||
memcpy( state, hash0, 32 );
|
update_and_final_groestl256( &ctx.groestl, state, hash0, 256 );
|
||||||
memcpy( state+32, hash1, 32 );
|
memcpy( &ctx.groestl, &allium_4way_ctx.groestl,
|
||||||
memcpy( state+64, hash2, 32 );
|
sizeof(hashState_groestl256) );
|
||||||
memcpy( state+96, hash3, 32 );
|
update_and_final_groestl256( &ctx.groestl, state+32, hash1, 256 );
|
||||||
|
memcpy( &ctx.groestl, &allium_4way_ctx.groestl,
|
||||||
|
sizeof(hashState_groestl256) );
|
||||||
|
update_and_final_groestl256( &ctx.groestl, state+64, hash2, 256 );
|
||||||
|
memcpy( &ctx.groestl, &allium_4way_ctx.groestl,
|
||||||
|
sizeof(hashState_groestl256) );
|
||||||
|
update_and_final_groestl256( &ctx.groestl, state+96, hash3, 256 );
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_allium_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_allium_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -94,7 +92,6 @@ int scanhash_allium_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t _ALIGN(64) edata[20];
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
@@ -106,13 +103,7 @@ int scanhash_allium_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( opt_benchmark )
|
if ( opt_benchmark )
|
||||||
( (uint32_t*)ptarget )[7] = 0x0000ff;
|
( (uint32_t*)ptarget )[7] = 0x0000ff;
|
||||||
|
|
||||||
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
|
mm128_bswap_intrlv80_4x32( vdata, pdata );
|
||||||
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
|
|
||||||
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 );
|
|
||||||
blake256_4way_init( &allium_4way_ctx.blake );
|
blake256_4way_init( &allium_4way_ctx.blake );
|
||||||
blake256_4way( &allium_4way_ctx.blake, vdata, 64 );
|
blake256_4way( &allium_4way_ctx.blake, vdata, 64 );
|
||||||
|
|
||||||
@@ -124,16 +115,10 @@ int scanhash_allium_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
|
|
||||||
for ( int lane = 0; lane < 4; lane++ ) if ( (hash+(lane<<3))[7] <= Htarg )
|
for ( int lane = 0; lane < 4; lane++ ) if ( (hash+(lane<<3))[7] <= Htarg )
|
||||||
{
|
{
|
||||||
if ( fulltest( hash+(lane<<3), ptarget ) )
|
if ( fulltest( hash+(lane<<3), ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, hash+(lane<<3) );
|
submit_solution( work, hash+(lane<<3), mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
|
@@ -27,11 +27,15 @@
|
|||||||
// Convert algos that don't yet do so to use dynamic alllocation.
|
// Convert algos that don't yet do so to use dynamic alllocation.
|
||||||
// Alloc huge pages globally. If ok each thread will create a pointer to
|
// Alloc huge pages globally. If ok each thread will create a pointer to
|
||||||
// its chunk. If fail each thread will use use _mm_alloc for itself.
|
// its chunk. If fail each thread will use use _mm_alloc for itself.
|
||||||
|
// BLOCK_LEN_BYTES is 768.
|
||||||
|
|
||||||
#define LYRA2REV3_NROWS 4
|
#define LYRA2REV3_NROWS 4
|
||||||
#define LYRA2REV3_NCOLS 4
|
#define LYRA2REV3_NCOLS 4
|
||||||
//#define LYRA2REV3_MATRIX_SIZE ((BLOCK_LEN_BYTES)*(LYRA2REV3_NCOLS)* \
|
/*
|
||||||
// (LYRA2REV3_NROWS)*8)
|
#define LYRA2REV3_MATRIX_SIZE ((BLOCK_LEN_BYTES)*(LYRA2REV3_NCOLS)* \
|
||||||
|
(LYRA2REV3_NROWS)*8)
|
||||||
|
*/
|
||||||
|
|
||||||
#define LYRA2REV3_MATRIX_SIZE ((BLOCK_LEN_BYTES)<<4)
|
#define LYRA2REV3_MATRIX_SIZE ((BLOCK_LEN_BYTES)<<4)
|
||||||
|
|
||||||
__thread uint64_t* l2v3_wholeMatrix;
|
__thread uint64_t* l2v3_wholeMatrix;
|
||||||
|
@@ -5,7 +5,9 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "lyra2.h"
|
#include "lyra2.h"
|
||||||
|
|
||||||
#if defined(__AVX2__)
|
//#if defined(__AVX2__)
|
||||||
|
|
||||||
|
#if defined(__SSE2__)
|
||||||
#define LYRA2REV3_4WAY
|
#define LYRA2REV3_4WAY
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@@ -236,7 +236,7 @@ int LYRA2REV3( uint64_t* wholeMatrix, void *K, uint64_t kLen, const void *pwd,
|
|||||||
//Tries to allocate enough space for the whole memory matrix
|
//Tries to allocate enough space for the whole memory matrix
|
||||||
|
|
||||||
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * nCols;
|
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * nCols;
|
||||||
const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
|
// const int64_t ROW_LEN_BYTES = ROW_LEN_INT64 * 8;
|
||||||
const int64_t BLOCK_LEN = BLOCK_LEN_BLAKE2_SAFE_INT64;
|
const int64_t BLOCK_LEN = BLOCK_LEN_BLAKE2_SAFE_INT64;
|
||||||
/*
|
/*
|
||||||
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * nCols;
|
const int64_t ROW_LEN_INT64 = BLOCK_LEN_INT64 * nCols;
|
||||||
@@ -566,7 +566,7 @@ int LYRA2RE( void *K, uint64_t kLen, const void *pwd, const uint64_t pwdlen,
|
|||||||
|
|
||||||
#if defined(__AVX2__)
|
#if defined(__AVX2__)
|
||||||
memset_zero_256( (__m256i*)wholeMatrix, i>>5 );
|
memset_zero_256( (__m256i*)wholeMatrix, i>>5 );
|
||||||
#elif defined(__SSE4_2__)
|
#elif defined(__SSE2__)
|
||||||
memset_zero_128( (__m128i*)wholeMatrix, i>>4 );
|
memset_zero_128( (__m128i*)wholeMatrix, i>>4 );
|
||||||
#else
|
#else
|
||||||
memset( wholeMatrix, 0, i );
|
memset( wholeMatrix, 0, i );
|
||||||
|
@@ -36,17 +36,16 @@ void lyra2h_4way_hash( void *state, const void *input )
|
|||||||
blake256_4way( &ctx_blake, input + (64*4), 16 );
|
blake256_4way( &ctx_blake, input + (64*4), 16 );
|
||||||
blake256_4way_close( &ctx_blake, vhash );
|
blake256_4way_close( &ctx_blake, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
|
||||||
|
|
||||||
LYRA2Z( lyra2h_4way_matrix, hash0, 32, hash0, 32, hash0, 32, 16, 16, 16 );
|
LYRA2Z( lyra2h_4way_matrix, state, 32, hash0, 32, hash0, 32,
|
||||||
LYRA2Z( lyra2h_4way_matrix, hash1, 32, hash1, 32, hash1, 32, 16, 16, 16 );
|
16, 16, 16 );
|
||||||
LYRA2Z( lyra2h_4way_matrix, hash2, 32, hash2, 32, hash2, 32, 16, 16, 16 );
|
LYRA2Z( lyra2h_4way_matrix, state+32, 32, hash1, 32, hash1,
|
||||||
LYRA2Z( lyra2h_4way_matrix, hash3, 32, hash3, 32, hash3, 32, 16, 16, 16 );
|
32, 16, 16, 16 );
|
||||||
|
LYRA2Z( lyra2h_4way_matrix, state+64, 32, hash2, 32, hash2,
|
||||||
memcpy( state, hash0, 32 );
|
32, 16, 16, 16 );
|
||||||
memcpy( state+32, hash1, 32 );
|
LYRA2Z( lyra2h_4way_matrix, state+96, 32, hash3, 32, hash3,
|
||||||
memcpy( state+64, hash2, 32 );
|
32, 16, 16, 16 );
|
||||||
memcpy( state+96, hash3, 32 );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_lyra2h_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_lyra2h_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -54,49 +53,36 @@ int scanhash_lyra2h_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t _ALIGN(64) edata[20];
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
uint32_t n = first_nonce;
|
uint32_t n = first_nonce;
|
||||||
uint32_t *nonces = work->nonces;
|
__m128i *noncev = (__m128i*)vdata + 19; // aligned
|
||||||
int num_found = 0;
|
|
||||||
uint32_t *noncep= vdata + 76; // 19*4
|
|
||||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
|
|
||||||
if ( opt_benchmark )
|
if ( opt_benchmark )
|
||||||
ptarget[7] = 0x0000ff;
|
ptarget[7] = 0x0000ff;
|
||||||
|
|
||||||
for ( int i=0; i < 20; i++ )
|
mm128_bswap_intrlv80_4x32( vdata, pdata );
|
||||||
be32enc( &edata[i], pdata[i] );
|
|
||||||
|
|
||||||
mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
lyra2h_4way_midstate( vdata );
|
lyra2h_4way_midstate( vdata );
|
||||||
|
|
||||||
do {
|
do {
|
||||||
be32enc( noncep, n );
|
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
|
||||||
be32enc( noncep+1, n+1 );
|
|
||||||
be32enc( noncep+2, n+2 );
|
|
||||||
be32enc( noncep+3, n+3 );
|
|
||||||
|
|
||||||
be32enc( &edata[19], n );
|
|
||||||
lyra2h_4way_hash( hash, vdata );
|
lyra2h_4way_hash( hash, vdata );
|
||||||
|
|
||||||
for ( int i = 0; i < 4; i++ )
|
for ( int i = 0; i < 4; i++ )
|
||||||
if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget ) )
|
if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget )
|
||||||
|
&& !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n+i;
|
pdata[19] = n+i;
|
||||||
nonces[ num_found++ ] = n+i;
|
submit_solution( work, hash+(i<<3), mythr, i );
|
||||||
work_set_target_ratio( work, hash+(i<<3) );
|
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
} while ( (num_found == 0) && (n < max_nonce-4)
|
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
|
||||||
&& !work_restart[thr_id].restart);
|
|
||||||
|
|
||||||
*hashes_done = n - first_nonce + 1;
|
*hashes_done = n - first_nonce + 1;
|
||||||
return num_found;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
#include "algo/keccak/sph_keccak.h"
|
#include "algo/keccak/sph_keccak.h"
|
||||||
#include "lyra2.h"
|
#include "lyra2.h"
|
||||||
#include "algo-gate-api.h"
|
#include "algo-gate-api.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
#if defined(__AES__)
|
#if defined(__AES__)
|
||||||
#include "algo/groestl/aes_ni/hash-groestl256.h"
|
#include "algo/groestl/aes_ni/hash-groestl256.h"
|
||||||
#endif
|
#endif
|
||||||
|
@@ -42,10 +42,12 @@ void lyra2rev2_4way_hash( void *state, const void *input )
|
|||||||
blake256_4way( &ctx.blake, input + (64<<2), 16 );
|
blake256_4way( &ctx.blake, input + (64<<2), 16 );
|
||||||
blake256_4way_close( &ctx.blake, vhash );
|
blake256_4way_close( &ctx.blake, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64( vhash64, vhash, 256 );
|
mm256_rintrlv_4x32_4x64( vhash64, vhash, 256 );
|
||||||
|
|
||||||
keccak256_4way( &ctx.keccak, vhash64, 32 );
|
keccak256_4way( &ctx.keccak, vhash64, 32 );
|
||||||
keccak256_4way_close( &ctx.keccak, vhash64 );
|
keccak256_4way_close( &ctx.keccak, vhash64 );
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
|
||||||
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
||||||
|
|
||||||
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 );
|
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 );
|
||||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||||
@@ -60,10 +62,12 @@ void lyra2rev2_4way_hash( void *state, const void *input )
|
|||||||
LYRA2REV2( l2v2_wholeMatrix, hash2, 32, hash2, 32, hash2, 32, 1, 4, 4 );
|
LYRA2REV2( l2v2_wholeMatrix, hash2, 32, hash2, 32, hash2, 32, 1, 4, 4 );
|
||||||
LYRA2REV2( l2v2_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 );
|
LYRA2REV2( l2v2_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
|
mm256_intrlv_4x64( vhash64, hash0, hash1, hash2, hash3, 256 );
|
||||||
|
|
||||||
skein256_4way( &ctx.skein, vhash64, 32 );
|
skein256_4way( &ctx.skein, vhash64, 32 );
|
||||||
skein256_4way_close( &ctx.skein, vhash64 );
|
skein256_4way_close( &ctx.skein, vhash64 );
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
|
||||||
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 256 );
|
||||||
|
|
||||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||||
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 );
|
cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 );
|
||||||
@@ -74,11 +78,10 @@ void lyra2rev2_4way_hash( void *state, const void *input )
|
|||||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||||
cubehashUpdateDigest( &ctx.cube, (byte*) hash3, (const byte*) hash3, 32 );
|
cubehashUpdateDigest( &ctx.cube, (byte*) hash3, (const byte*) hash3, 32 );
|
||||||
|
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 256 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 256 );
|
||||||
bmw256_4way( &ctx.bmw, vhash, 32 );
|
|
||||||
bmw256_4way_close( &ctx.bmw, vhash );
|
|
||||||
|
|
||||||
mm128_deinterleave_4x32( state, state+32, state+64, state+96, vhash, 256 );
|
bmw256_4way( &ctx.bmw, vhash, 32 );
|
||||||
|
bmw256_4way_close( &ctx.bmw, state );
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_lyra2rev2_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_lyra2rev2_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -86,49 +89,44 @@ int scanhash_lyra2rev2_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t _ALIGN(64) edata[20];
|
uint32_t *hash7 = &(hash[7<<2]);
|
||||||
|
uint32_t lane_hash[8];
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
uint32_t n = first_nonce;
|
uint32_t n = first_nonce;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
uint32_t *nonces = work->nonces;
|
__m128i *noncev = (__m128i*)vdata + 19; // aligned
|
||||||
int num_found = 0;
|
|
||||||
uint32_t *noncep = vdata + 76; // 19*4
|
|
||||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
|
|
||||||
if ( opt_benchmark )
|
if ( opt_benchmark )
|
||||||
( (uint32_t*)ptarget )[7] = 0x0000ff;
|
( (uint32_t*)ptarget )[7] = 0x0000ff;
|
||||||
|
|
||||||
swab32_array( edata, pdata, 20 );
|
mm128_bswap_intrlv80_4x32( vdata, pdata );
|
||||||
|
|
||||||
mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
blake256_4way_init( &l2v2_4way_ctx.blake );
|
blake256_4way_init( &l2v2_4way_ctx.blake );
|
||||||
blake256_4way( &l2v2_4way_ctx.blake, vdata, 64 );
|
blake256_4way( &l2v2_4way_ctx.blake, vdata, 64 );
|
||||||
|
|
||||||
do {
|
do
|
||||||
be32enc( noncep, n );
|
{
|
||||||
be32enc( noncep+1, n+1 );
|
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
|
||||||
be32enc( noncep+2, n+2 );
|
|
||||||
be32enc( noncep+3, n+3 );
|
|
||||||
|
|
||||||
lyra2rev2_4way_hash( hash, vdata );
|
lyra2rev2_4way_hash( hash, vdata );
|
||||||
pdata[19] = n;
|
pdata[19] = n;
|
||||||
|
|
||||||
for ( int i = 0; i < 4; i++ )
|
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
|
||||||
if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget ) )
|
|
||||||
{
|
{
|
||||||
pdata[19] = n+i;
|
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
||||||
nonces[ num_found++ ] = n+i;
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
work_set_target_ratio( work, hash+(i<<3) );
|
{
|
||||||
|
pdata[19] = n + lane;
|
||||||
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
|
}
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
} while ( (num_found == 0) && (n < max_nonce-4)
|
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
|
||||||
&& !work_restart[thr_id].restart);
|
|
||||||
|
|
||||||
*hashes_done = n - first_nonce + 1;
|
*hashes_done = n - first_nonce + 1;
|
||||||
return num_found;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -35,7 +35,7 @@ void lyra2rev3_4way_hash( void *state, const void *input )
|
|||||||
|
|
||||||
blake256_4way( &ctx.blake, input, 80 );
|
blake256_4way( &ctx.blake, input, 80 );
|
||||||
blake256_4way_close( &ctx.blake, vhash );
|
blake256_4way_close( &ctx.blake, vhash );
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
|
||||||
|
|
||||||
LYRA2REV3( l2v3_wholeMatrix, hash0, 32, hash0, 32, hash0, 32, 1, 4, 4 );
|
LYRA2REV3( l2v3_wholeMatrix, hash0, 32, hash0, 32, hash0, 32, 1, 4, 4 );
|
||||||
LYRA2REV3( l2v3_wholeMatrix, hash1, 32, hash1, 32, hash1, 32, 1, 4, 4 );
|
LYRA2REV3( l2v3_wholeMatrix, hash1, 32, hash1, 32, hash1, 32, 1, 4, 4 );
|
||||||
@@ -55,10 +55,9 @@ void lyra2rev3_4way_hash( void *state, const void *input )
|
|||||||
LYRA2REV3( l2v3_wholeMatrix, hash2, 32, hash2, 32, hash2, 32, 1, 4, 4 );
|
LYRA2REV3( l2v3_wholeMatrix, hash2, 32, hash2, 32, hash2, 32, 1, 4, 4 );
|
||||||
LYRA2REV3( l2v3_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 );
|
LYRA2REV3( l2v3_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 );
|
||||||
|
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 256 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 256 );
|
||||||
bmw256_4way( &ctx.bmw, vhash, 32 );
|
bmw256_4way( &ctx.bmw, vhash, 32 );
|
||||||
bmw256_4way_close( &ctx.bmw, state );
|
bmw256_4way_close( &ctx.bmw, state );
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_lyra2rev3_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_lyra2rev3_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -66,7 +65,6 @@ int scanhash_lyra2rev3_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t edata[20] __attribute__ ((aligned (64)));
|
|
||||||
uint32_t *hash7 = &(hash[7<<2]);
|
uint32_t *hash7 = &(hash[7<<2]);
|
||||||
uint32_t lane_hash[8];
|
uint32_t lane_hash[8];
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
@@ -80,15 +78,7 @@ int scanhash_lyra2rev3_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( opt_benchmark )
|
if ( opt_benchmark )
|
||||||
( (uint32_t*)ptarget )[7] = 0x0000ff;
|
( (uint32_t*)ptarget )[7] = 0x0000ff;
|
||||||
|
|
||||||
// Need big endian data
|
mm128_bswap_intrlv80_4x32( vdata, pdata );
|
||||||
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
|
|
||||||
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
|
|
||||||
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
|
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
|
||||||
@@ -99,22 +89,14 @@ int scanhash_lyra2rev3_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
|
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
|
||||||
{
|
{
|
||||||
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
||||||
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
}
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
|
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
|
||||||
|
|
||||||
*hashes_done = n - first_nonce + 1;
|
*hashes_done = n - first_nonce + 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -36,17 +36,12 @@ void lyra2z_4way_hash( void *state, const void *input )
|
|||||||
blake256_4way( &ctx_blake, input + (64*4), 16 );
|
blake256_4way( &ctx_blake, input + (64*4), 16 );
|
||||||
blake256_4way_close( &ctx_blake, vhash );
|
blake256_4way_close( &ctx_blake, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 256 );
|
||||||
|
|
||||||
LYRA2Z( lyra2z_4way_matrix, hash0, 32, hash0, 32, hash0, 32, 8, 8, 8 );
|
LYRA2Z( lyra2z_4way_matrix, state , 32, hash0, 32, hash0, 32, 8, 8, 8 );
|
||||||
LYRA2Z( lyra2z_4way_matrix, hash1, 32, hash1, 32, hash1, 32, 8, 8, 8 );
|
LYRA2Z( lyra2z_4way_matrix, state+32, 32, hash1, 32, hash1, 32, 8, 8, 8 );
|
||||||
LYRA2Z( lyra2z_4way_matrix, hash2, 32, hash2, 32, hash2, 32, 8, 8, 8 );
|
LYRA2Z( lyra2z_4way_matrix, state+64, 32, hash2, 32, hash2, 32, 8, 8, 8 );
|
||||||
LYRA2Z( lyra2z_4way_matrix, hash3, 32, hash3, 32, hash3, 32, 8, 8, 8 );
|
LYRA2Z( lyra2z_4way_matrix, state+96, 32, hash3, 32, hash3, 32, 8, 8, 8 );
|
||||||
|
|
||||||
memcpy( state, hash0, 32 );
|
|
||||||
memcpy( state+32, hash1, 32 );
|
|
||||||
memcpy( state+64, hash2, 32 );
|
|
||||||
memcpy( state+96, hash3, 32 );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_lyra2z_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_lyra2z_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -54,7 +49,6 @@ int scanhash_lyra2z_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
uint32_t hash[8*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t _ALIGN(64) edata[20];
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
@@ -66,13 +60,7 @@ int scanhash_lyra2z_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( opt_benchmark )
|
if ( opt_benchmark )
|
||||||
ptarget[7] = 0x0000ff;
|
ptarget[7] = 0x0000ff;
|
||||||
|
|
||||||
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
|
mm128_bswap_intrlv80_4x32( vdata, pdata );
|
||||||
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
|
|
||||||
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
lyra2z_4way_midstate( vdata );
|
lyra2z_4way_midstate( vdata );
|
||||||
|
|
||||||
do {
|
do {
|
||||||
@@ -82,16 +70,11 @@ int scanhash_lyra2z_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
pdata[19] = n;
|
pdata[19] = n;
|
||||||
|
|
||||||
for ( int i = 0; i < 4; i++ )
|
for ( int i = 0; i < 4; i++ )
|
||||||
if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget ) )
|
if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget )
|
||||||
|
&& !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n+i;
|
pdata[19] = n+i;
|
||||||
work_set_target_ratio( work, hash+(i<<3) );
|
submit_solution( work, hash+(i<<3), mythr, i );
|
||||||
if ( submit_work( mythr, work ) )
|
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, i );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
|
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
|
||||||
@@ -136,8 +119,8 @@ void lyra2z_8way_hash( void *state, const void *input )
|
|||||||
blake256_8way( &ctx_blake, input + (64*8), 16 );
|
blake256_8way( &ctx_blake, input + (64*8), 16 );
|
||||||
blake256_8way_close( &ctx_blake, vhash );
|
blake256_8way_close( &ctx_blake, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_8x32( hash0, hash1, hash2, hash3,
|
mm256_dintrlv_8x32( hash0, hash1, hash2, hash3,
|
||||||
hash4, hash5, hash6, hash7, vhash, 256 );
|
hash4, hash5, hash6, hash7, vhash, 256 );
|
||||||
|
|
||||||
LYRA2Z( lyra2z_8way_matrix, hash0, 32, hash0, 32, hash0, 32, 8, 8, 8 );
|
LYRA2Z( lyra2z_8way_matrix, hash0, 32, hash0, 32, hash0, 32, 8, 8, 8 );
|
||||||
LYRA2Z( lyra2z_8way_matrix, hash1, 32, hash1, 32, hash1, 32, 8, 8, 8 );
|
LYRA2Z( lyra2z_8way_matrix, hash1, 32, hash1, 32, hash1, 32, 8, 8, 8 );
|
||||||
@@ -163,7 +146,6 @@ int scanhash_lyra2z_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
uint32_t hash[8*8] __attribute__ ((aligned (64)));
|
uint32_t hash[8*8] __attribute__ ((aligned (64)));
|
||||||
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
||||||
uint32_t _ALIGN(64) edata[20];
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
@@ -175,13 +157,7 @@ int scanhash_lyra2z_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( opt_benchmark )
|
if ( opt_benchmark )
|
||||||
ptarget[7] = 0x0000ff;
|
ptarget[7] = 0x0000ff;
|
||||||
|
|
||||||
casti_m256i( edata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
|
mm256_bswap_intrlv80_8x32( vdata, pdata );
|
||||||
casti_m256i( edata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
mm256_interleave_8x32( vdata, edata, edata, edata, edata,
|
|
||||||
edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
lyra2z_8way_midstate( vdata );
|
lyra2z_8way_midstate( vdata );
|
||||||
|
|
||||||
do {
|
do {
|
||||||
@@ -191,16 +167,11 @@ int scanhash_lyra2z_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
pdata[19] = n;
|
pdata[19] = n;
|
||||||
|
|
||||||
for ( int i = 0; i < 8; i++ )
|
for ( int i = 0; i < 8; i++ )
|
||||||
if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget ) )
|
if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget )
|
||||||
|
&& !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n+i;
|
pdata[19] = n+i;
|
||||||
work_set_target_ratio( work, hash+(i<<3) );
|
submit_solution( work, hash+(i<<3), mythr, i );
|
||||||
if ( submit_work( mythr, work ) )
|
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, i );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
}
|
||||||
n += 8;
|
n += 8;
|
||||||
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart);
|
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart);
|
||||||
|
@@ -3,7 +3,7 @@
|
|||||||
#include "lyra2-gate.h"
|
#include "lyra2-gate.h"
|
||||||
#include "lyra2.h"
|
#include "lyra2.h"
|
||||||
#include "algo/blake/sph_blake.h"
|
#include "algo/blake/sph_blake.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
__thread uint64_t* lyra2z_matrix;
|
__thread uint64_t* lyra2z_matrix;
|
||||||
|
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
#include <memory.h>
|
#include <memory.h>
|
||||||
#include "algo-gate-api.h"
|
#include "algo-gate-api.h"
|
||||||
#include "lyra2.h"
|
#include "lyra2.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
__thread uint64_t* lyra2z330_wholeMatrix;
|
__thread uint64_t* lyra2z330_wholeMatrix;
|
||||||
|
|
||||||
@@ -18,38 +18,44 @@ void lyra2z330_hash(void *state, const void *input, uint32_t height)
|
|||||||
int scanhash_lyra2z330( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_lyra2z330( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr )
|
uint64_t *hashes_done, struct thr_info *mythr )
|
||||||
{
|
{
|
||||||
uint32_t hash[8] __attribute__ ((aligned (64)));
|
uint32_t hash[8] __attribute__ ((aligned (64)));
|
||||||
uint32_t endiandata[20] __attribute__ ((aligned (64)));
|
uint32_t endiandata[20] __attribute__ ((aligned (64)));
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
uint32_t nonce = first_nonce;
|
uint32_t nonce = first_nonce;
|
||||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
if (opt_benchmark)
|
|
||||||
ptarget[7] = 0x0000ff;
|
|
||||||
|
|
||||||
for (int i=0; i < 19; i++) {
|
if (opt_benchmark)
|
||||||
be32enc(&endiandata[i], pdata[i]);
|
ptarget[7] = 0x0000ff;
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
|
||||||
be32enc(&endiandata[19], nonce);
|
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
|
||||||
lyra2z330_hash( hash, endiandata, work->height );
|
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
|
||||||
|
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
|
||||||
if (hash[7] <= Htarg && fulltest(hash, ptarget)) {
|
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
||||||
work_set_target_ratio(work, hash);
|
|
||||||
pdata[19] = nonce;
|
do
|
||||||
*hashes_done = pdata[19] - first_nonce;
|
{
|
||||||
return 1;
|
be32enc(&endiandata[19], nonce);
|
||||||
}
|
lyra2z330_hash( hash, endiandata, work->height );
|
||||||
nonce++;
|
if ( hash[7] <= Htarg && fulltest(hash, ptarget) && !opt_benchmark )
|
||||||
|
{
|
||||||
} while (nonce < max_nonce && !work_restart[thr_id].restart);
|
work_set_target_ratio(work, hash);
|
||||||
|
pdata[19] = nonce;
|
||||||
pdata[19] = nonce;
|
if ( submit_work( mythr, work ) )
|
||||||
*hashes_done = pdata[19] - first_nonce + 1;
|
applog( LOG_NOTICE, "Share %d submitted by thread %d",
|
||||||
return 0;
|
accepted_share_count + rejected_share_count + 1,
|
||||||
|
mythr->id );
|
||||||
|
else
|
||||||
|
applog( LOG_WARNING, "Failed to submit share." );
|
||||||
|
}
|
||||||
|
nonce++;
|
||||||
|
} while (nonce < max_nonce && !work_restart[thr_id].restart);
|
||||||
|
pdata[19] = nonce;
|
||||||
|
*hashes_done = pdata[19] - first_nonce + 1;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void lyra2z330_set_target( struct work* work, double job_diff )
|
void lyra2z330_set_target( struct work* work, double job_diff )
|
||||||
|
@@ -50,11 +50,11 @@ void phi2_hash(void *state, const void *input)
|
|||||||
unsigned char _ALIGN(128) hashA[64];
|
unsigned char _ALIGN(128) hashA[64];
|
||||||
unsigned char _ALIGN(128) hashB[64];
|
unsigned char _ALIGN(128) hashB[64];
|
||||||
|
|
||||||
phi2_ctx_holder ctx __attribute__ ((aligned (64)));
|
phi2_ctx_holder ctx __attribute__ ((aligned (64)));
|
||||||
memcpy( &ctx, &phi2_ctx, sizeof(phi2_ctx) );
|
memcpy( &ctx, &phi2_ctx, sizeof(phi2_ctx) );
|
||||||
|
|
||||||
cubehashUpdateDigest( &ctx.cube, (byte*)hashB, (const byte*)input,
|
cubehashUpdateDigest( &ctx.cube, (byte*)hashB, (const byte*)input,
|
||||||
phi2_has_roots ? 144 : 80 );
|
phi2_has_roots ? 144 : 80 );
|
||||||
|
|
||||||
LYRA2RE( &hashA[ 0], 32, &hashB[ 0], 32, &hashB[ 0], 32, 1, 8, 8 );
|
LYRA2RE( &hashA[ 0], 32, &hashB[ 0], 32, &hashB[ 0], 32, 1, 8, 8 );
|
||||||
LYRA2RE( &hashA[32], 32, &hashB[32], 32, &hashB[32], 32, 1, 8, 8 );
|
LYRA2RE( &hashA[32], 32, &hashB[32], 32, &hashB[32], 32, 1, 8, 8 );
|
||||||
@@ -63,17 +63,17 @@ void phi2_hash(void *state, const void *input)
|
|||||||
sph_jh512_close( &ctx.jh, (void*)hash );
|
sph_jh512_close( &ctx.jh, (void*)hash );
|
||||||
|
|
||||||
if ( hash[0] & 1 )
|
if ( hash[0] & 1 )
|
||||||
{
|
{
|
||||||
sph_gost512( &ctx.gost, (const void*)hash, 64 );
|
sph_gost512( &ctx.gost, (const void*)hash, 64 );
|
||||||
sph_gost512_close( &ctx.gost, (void*)hash );
|
sph_gost512_close( &ctx.gost, (void*)hash );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
#if defined(__AES__)
|
#if defined(__AES__)
|
||||||
update_final_echo ( &ctx.echo1, (BitSequence *)hash,
|
update_final_echo ( &ctx.echo1, (BitSequence *)hash,
|
||||||
(const BitSequence *)hash, 512 );
|
(const BitSequence *)hash, 512 );
|
||||||
update_final_echo ( &ctx.echo2, (BitSequence *)hash,
|
update_final_echo ( &ctx.echo2, (BitSequence *)hash,
|
||||||
(const BitSequence *)hash, 512 );
|
(const BitSequence *)hash, 512 );
|
||||||
#else
|
#else
|
||||||
sph_echo512( &ctx.echo1, (const void*)hash, 64 );
|
sph_echo512( &ctx.echo1, (const void*)hash, 64 );
|
||||||
sph_echo512_close( &ctx.echo1, (void*)hash );
|
sph_echo512_close( &ctx.echo1, (void*)hash );
|
||||||
|
@@ -51,7 +51,7 @@ inline void initState( uint64_t State[/*16*/] )
|
|||||||
state[3] = _mm256_set_epi64x( blake2b_IV[7], blake2b_IV[6],
|
state[3] = _mm256_set_epi64x( blake2b_IV[7], blake2b_IV[6],
|
||||||
blake2b_IV[5], blake2b_IV[4] );
|
blake2b_IV[5], blake2b_IV[4] );
|
||||||
|
|
||||||
#elif defined (__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
__m128i* state = (__m128i*)State;
|
__m128i* state = (__m128i*)State;
|
||||||
|
|
||||||
@@ -137,7 +137,7 @@ inline void squeeze( uint64_t *State, byte *Out, unsigned int len )
|
|||||||
//Squeezes remaining bytes
|
//Squeezes remaining bytes
|
||||||
memcpy_256( out, state, ( len_m256i % BLOCK_LEN_M256I ) );
|
memcpy_256( out, state, ( len_m256i % BLOCK_LEN_M256I ) );
|
||||||
|
|
||||||
#elif defined (__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
const int len_m128i = len / 16;
|
const int len_m128i = len / 16;
|
||||||
const int fullBlocks = len_m128i / BLOCK_LEN_M128I;
|
const int fullBlocks = len_m128i / BLOCK_LEN_M128I;
|
||||||
@@ -205,7 +205,7 @@ inline void absorbBlock( uint64_t *State, const uint64_t *In )
|
|||||||
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
||||||
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
||||||
|
|
||||||
#elif defined (__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
__m128i* state = (__m128i*)State;
|
__m128i* state = (__m128i*)State;
|
||||||
__m128i* in = (__m128i*)In;
|
__m128i* in = (__m128i*)In;
|
||||||
@@ -273,7 +273,7 @@ inline void absorbBlockBlake2Safe( uint64_t *State, const uint64_t *In )
|
|||||||
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
||||||
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
||||||
|
|
||||||
#elif defined (__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
__m128i* state = (__m128i*)State;
|
__m128i* state = (__m128i*)State;
|
||||||
__m128i* in = (__m128i*)In;
|
__m128i* in = (__m128i*)In;
|
||||||
@@ -355,7 +355,7 @@ inline void reducedSqueezeRow0( uint64_t* State, uint64_t* rowOut,
|
|||||||
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
||||||
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
||||||
|
|
||||||
#elif defined (__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
__m128i* state = (__m128i*)State;
|
__m128i* state = (__m128i*)State;
|
||||||
__m128i state0 = _mm_load_si128( state );
|
__m128i state0 = _mm_load_si128( state );
|
||||||
@@ -494,7 +494,7 @@ inline void reducedDuplexRow1( uint64_t *State, uint64_t *rowIn,
|
|||||||
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
||||||
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
||||||
|
|
||||||
#elif defined (__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
__m128i* state = (__m128i*)State;
|
__m128i* state = (__m128i*)State;
|
||||||
__m128i state0 = _mm_load_si128( state );
|
__m128i state0 = _mm_load_si128( state );
|
||||||
@@ -694,7 +694,7 @@ inline void reducedDuplexRowSetup( uint64_t *State, uint64_t *rowIn,
|
|||||||
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
||||||
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
||||||
|
|
||||||
#elif defined (__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
__m128i* in = (__m128i*)rowIn;
|
__m128i* in = (__m128i*)rowIn;
|
||||||
__m128i* inout = (__m128i*)rowInOut;
|
__m128i* inout = (__m128i*)rowInOut;
|
||||||
@@ -713,9 +713,9 @@ inline void reducedDuplexRowSetup( uint64_t *State, uint64_t *rowIn,
|
|||||||
__m128i* state = (__m128i*)State;
|
__m128i* state = (__m128i*)State;
|
||||||
|
|
||||||
// For the last round in this function not optimized for AVX
|
// For the last round in this function not optimized for AVX
|
||||||
uint64_t* ptrWordIn = rowIn; //In Lyra2: pointer to prev
|
// uint64_t* ptrWordIn = rowIn; //In Lyra2: pointer to prev
|
||||||
uint64_t* ptrWordInOut = rowInOut; //In Lyra2: pointer to row*
|
// uint64_t* ptrWordInOut = rowInOut; //In Lyra2: pointer to row*
|
||||||
uint64_t* ptrWordOut = rowOut + (nCols-1)*BLOCK_LEN_INT64; //In Lyra2: pointer to row
|
// uint64_t* ptrWordOut = rowOut + (nCols-1)*BLOCK_LEN_INT64; //In Lyra2: pointer to row
|
||||||
|
|
||||||
for ( i = 0; i < nCols; i++ )
|
for ( i = 0; i < nCols; i++ )
|
||||||
{
|
{
|
||||||
@@ -750,6 +750,28 @@ inline void reducedDuplexRowSetup( uint64_t *State, uint64_t *rowIn,
|
|||||||
out[4] = _mm_xor_si128( state[4], in[4] );
|
out[4] = _mm_xor_si128( state[4], in[4] );
|
||||||
out[5] = _mm_xor_si128( state[5], in[5] );
|
out[5] = _mm_xor_si128( state[5], in[5] );
|
||||||
|
|
||||||
|
|
||||||
|
__m128i t0, t1;
|
||||||
|
t0 = _mm_srli_si128( state[0], 8 );
|
||||||
|
t1 = _mm_srli_si128( state[1], 8 );
|
||||||
|
inout[0] = _mm_xor_si128( inout[0],
|
||||||
|
_mm_or_si128( _mm_slli_si128( state[0], 8 ),
|
||||||
|
_mm_srli_si128( state[5], 8 ) ) );
|
||||||
|
inout[1] = _mm_xor_si128( inout[1],
|
||||||
|
_mm_or_si128( _mm_slli_si128( state[1], 8 ), t0 ) );
|
||||||
|
t0 = _mm_srli_si128( state[2], 8 );
|
||||||
|
inout[2] = _mm_xor_si128( inout[2],
|
||||||
|
_mm_or_si128( _mm_slli_si128( state[2], 8 ), t1 ) );
|
||||||
|
t1 = _mm_srli_si128( state[3], 8 );
|
||||||
|
inout[3] = _mm_xor_si128( inout[3],
|
||||||
|
_mm_or_si128( _mm_slli_si128( state[3], 8 ), t0 ) );
|
||||||
|
t0 = _mm_srli_si128( state[4], 8 );
|
||||||
|
inout[4] = _mm_xor_si128( inout[4],
|
||||||
|
_mm_or_si128( _mm_slli_si128( state[4], 8 ), t1 ) );
|
||||||
|
inout[5] = _mm_xor_si128( inout[5],
|
||||||
|
_mm_or_si128( _mm_slli_si128( state[5], 8 ), t0 ) );
|
||||||
|
|
||||||
|
/*
|
||||||
ptrWordInOut[0] ^= State[11];
|
ptrWordInOut[0] ^= State[11];
|
||||||
ptrWordInOut[1] ^= State[0];
|
ptrWordInOut[1] ^= State[0];
|
||||||
ptrWordInOut[2] ^= State[1];
|
ptrWordInOut[2] ^= State[1];
|
||||||
@@ -768,7 +790,7 @@ inline void reducedDuplexRowSetup( uint64_t *State, uint64_t *rowIn,
|
|||||||
ptrWordIn += BLOCK_LEN_INT64;
|
ptrWordIn += BLOCK_LEN_INT64;
|
||||||
//Output: goes to previous column
|
//Output: goes to previous column
|
||||||
ptrWordOut -= BLOCK_LEN_INT64;
|
ptrWordOut -= BLOCK_LEN_INT64;
|
||||||
|
*/
|
||||||
inout += BLOCK_LEN_M128I;
|
inout += BLOCK_LEN_M128I;
|
||||||
in += BLOCK_LEN_M128I;
|
in += BLOCK_LEN_M128I;
|
||||||
out -= BLOCK_LEN_M128I;
|
out -= BLOCK_LEN_M128I;
|
||||||
@@ -930,7 +952,7 @@ inline void reducedDuplexRow( uint64_t *State, uint64_t *rowIn,
|
|||||||
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
_mm256_store_si256( (__m256i*)State + 2, state2 );
|
||||||
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
_mm256_store_si256( (__m256i*)State + 3, state3 );
|
||||||
|
|
||||||
#elif defined(__SSE4_2__)
|
#elif defined (__SSE2__)
|
||||||
|
|
||||||
__m128i* state = (__m128i*)State;
|
__m128i* state = (__m128i*)State;
|
||||||
__m128i* in = (__m128i*)rowIn;
|
__m128i* in = (__m128i*)rowIn;
|
||||||
|
@@ -23,7 +23,7 @@
|
|||||||
#define SPONGE_H_
|
#define SPONGE_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#if defined(__GNUC__)
|
#if defined(__GNUC__)
|
||||||
#define ALIGN __attribute__ ((aligned(32)))
|
#define ALIGN __attribute__ ((aligned(32)))
|
||||||
@@ -59,7 +59,7 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
|
|||||||
// returns void, updates all args
|
// returns void, updates all args
|
||||||
#define G_4X64(a,b,c,d) \
|
#define G_4X64(a,b,c,d) \
|
||||||
a = _mm256_add_epi64( a, b ); \
|
a = _mm256_add_epi64( a, b ); \
|
||||||
d = mm256_ror_64( _mm256_xor_si256( d, a), 32 ); \
|
d = mm256_ror_64( _mm256_xor_si256( d, a ), 32 ); \
|
||||||
c = _mm256_add_epi64( c, d ); \
|
c = _mm256_add_epi64( c, d ); \
|
||||||
b = mm256_ror_64( _mm256_xor_si256( b, c ), 24 ); \
|
b = mm256_ror_64( _mm256_xor_si256( b, c ), 24 ); \
|
||||||
a = _mm256_add_epi64( a, b ); \
|
a = _mm256_add_epi64( a, b ); \
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
#if defined(__SSE4_2__)
|
#if defined(__SSE4_2__)
|
||||||
|
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
typedef struct
|
typedef struct
|
||||||
{
|
{
|
||||||
|
@@ -42,7 +42,7 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "sph_types.h"
|
#include "sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#if defined(__SSE2__)
|
#if defined(__SSE2__)
|
||||||
//#if defined(__SSE4_2__)
|
//#if defined(__SSE4_2__)
|
||||||
@@ -122,12 +122,11 @@ typedef struct {
|
|||||||
} sha256_11way_context;
|
} sha256_11way_context;
|
||||||
|
|
||||||
void sha256_11way_init( sha256_11way_context *ctx );
|
void sha256_11way_init( sha256_11way_context *ctx );
|
||||||
void sha256_11way( sha256_11way_context *ctx, const void *datax,
|
void sha256_11way_update( sha256_11way_context *ctx, const void *datax,
|
||||||
void *datay, void *dataz, size_t len );
|
const void *datay, const void *dataz, size_t len );
|
||||||
void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void *dstyx,
|
void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void *dstyx,
|
||||||
void *dstz );
|
void *dstz );
|
||||||
|
|
||||||
|
#endif // __AVX2__
|
||||||
#endif
|
#endif // __SSE2__
|
||||||
#endif
|
#endif // SHA256_4WAY_H__
|
||||||
#endif
|
|
||||||
|
@@ -1,3 +1,4 @@
|
|||||||
|
#if 0
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
@@ -9,7 +10,7 @@
|
|||||||
// naming convention for variables and macros
|
// naming convention for variables and macros
|
||||||
// VARx: AVX2 8 way 32 bit
|
// VARx: AVX2 8 way 32 bit
|
||||||
// VARy: MMX 2 way 32 bit
|
// VARy: MMX 2 way 32 bit
|
||||||
// VARz: 32 bit integer
|
// VARz: scalar integer 32 bit
|
||||||
|
|
||||||
|
|
||||||
static const uint32_t H256[8] =
|
static const uint32_t H256[8] =
|
||||||
@@ -18,7 +19,7 @@ static const uint32_t H256[8] =
|
|||||||
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
|
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uont32_t K256[64] =
|
static const uint32_t K256[64] =
|
||||||
{
|
{
|
||||||
0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
|
0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
|
||||||
0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
|
0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
|
||||||
@@ -57,29 +58,25 @@ static const uont32_t K256[64] =
|
|||||||
|
|
||||||
#define MAJz(X, Y, Z) ( ( (X) & (Y) ) | ( ( (X) | (Y) ) & (Z) ) )
|
#define MAJz(X, Y, Z) ( ( (X) & (Y) ) | ( ( (X) | (Y) ) & (Z) ) )
|
||||||
|
|
||||||
|
|
||||||
#define BSG2_0x(x) \
|
#define BSG2_0x(x) \
|
||||||
_mm256_xor_si256( _mm256_xor_si256( \
|
_mm256_xor_si256( _mm256_xor_si256( \
|
||||||
mm256_ror_32(x,2), mm256_ror_32(x,13) ), mm256_ror_32( x,22) )
|
mm256_ror_32(x,2), mm256_ror_32(x,13) ), _mm256_srli_epi32(x,22) )
|
||||||
|
|
||||||
#define BSG2_0y(x) \
|
#define BSG2_0y(x) \
|
||||||
_mm_xor_si64( _mm_xor_si64( \
|
_mm_xor_si64( _mm_xor_si64( \
|
||||||
mm64_ror_32(x,2), mm64_ror_32(x,13) ), mm64_ror_32( x,22) )
|
mm64_ror_32(x,2), mm64_ror_32(x,13) ), _mm_srli_pi32(x,22) )
|
||||||
|
|
||||||
#define BSG2_0z(x) ( ( ror_32(x,2) ^ ror_32(x,13) ) ^ ror_32(x,22) )
|
|
||||||
|
|
||||||
|
#define BSG2_0z(x) ( u32_ror_32(x,2) ^ u32_ror_32(x,13) ^ ((x)>>22) )
|
||||||
|
|
||||||
#define BSG2_1x(x) \
|
#define BSG2_1x(x) \
|
||||||
_mm256_xor_si256( _mm256_xor_si256( \
|
_mm256_xor_si256( _mm256_xor_si256( \
|
||||||
mm256_ror_32(x,6), mm256_ror_32(x,11) ), mm256_ror_32( x,25) )
|
mm256_ror_32(x,6), mm256_ror_32(x,11) ), _mm256_srli_epi32(x,25) )
|
||||||
|
|
||||||
#define BSG2_1y(x) \
|
#define BSG2_1y(x) \
|
||||||
_mm_xor_si64( _mm_xor_si64( \
|
_mm_xor_si64( _mm_xor_si64( \
|
||||||
mm64_ror_32(x,6), mm64_ror_32(x,11) ), mm64_ror_32( x,25) )
|
mm64_ror_32(x,6), mm64_ror_32(x,11) ), _mm_srli_pi32(x,25) )
|
||||||
|
|
||||||
#define BSG2_1z(x) \
|
|
||||||
(mm256_ror_32(x,6) ^ mm256_ror_32(x,11) ^ mm256_ror_32( x,25) )
|
|
||||||
|
|
||||||
|
#define BSG2_1z(x) ( u32_ror_32(x,6) ^ u32_ror_32(x,11) ^ ((x)>>25) )
|
||||||
|
|
||||||
#define SSG2_0x(x) \
|
#define SSG2_0x(x) \
|
||||||
_mm256_xor_si256( _mm256_xor_si256( \
|
_mm256_xor_si256( _mm256_xor_si256( \
|
||||||
@@ -87,30 +84,35 @@ static const uont32_t K256[64] =
|
|||||||
|
|
||||||
#define SSG2_0y(x) \
|
#define SSG2_0y(x) \
|
||||||
_mm_xor_si64( _mm_xor_si64( \
|
_mm_xor_si64( _mm_xor_si64( \
|
||||||
mm64_ror_32(x,7), mm64_ror_32(x,18) ), _mm64_srli_pi32(x,3) )
|
mm64_ror_32(x,7), mm64_ror_32(x,18) ), _mm_srli_pi32(x,3) )
|
||||||
|
|
||||||
#define SSG2_0z(x) ( ror_32(x,7) ^ ror_32(x,18) ^ ((x)>>3) )
|
|
||||||
|
|
||||||
|
#define SSG2_0z(x) (( u32_ror_32(x,7) ^ u32_ror_32(x,18) ) ^ ((x)>>3) )
|
||||||
|
|
||||||
#define SSG2_1x(x) \
|
#define SSG2_1x(x) \
|
||||||
_mm256_xor_si256( _mm256_xor_si256( \
|
_mm256_xor_si256( _mm256_xor_si256( \
|
||||||
mm256_ror_32(x,17), mm256_ror_32(x,19) ), _mm256_srli_epi32(x,10) )
|
mm256_ror_32(x,17), mm256_ror_32(x,19) ), _mm256_srli_epi32(x,10) )
|
||||||
|
|
||||||
|
#define SSG2_1y(x) \
|
||||||
|
_mm_xor_si64( _mm_xor_si64( \
|
||||||
|
mm64_ror_32(x,17), mm64_ror_32(x,19) ), _mm_srli_pi32(x,10) )
|
||||||
|
|
||||||
|
#define SSG2_1z(x) ( u32_ror_32(x,17) ^ u32_ror_32(x,19) ^ ((x)>>10) )
|
||||||
|
|
||||||
#define SHA2x_MEXP( a, b, c, d ) \
|
#define SHA2x_MEXP( a, b, c, d ) \
|
||||||
_mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \
|
_mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \
|
||||||
SSG2_1x( Wx[a] ), Wx[b] ), SSG2_0x( Wx[c] ) ), Wx[d] );
|
SSG2_1x( Wx[a] ), Wx[b] ), SSG2_0x( Wx[c] ) ), Wx[d] )
|
||||||
|
|
||||||
#define SHA2y_MEXP( a, b, c, d ) \
|
#define SHA2y_MEXP( a, b, c, d ) \
|
||||||
_mm_add_pi32( _mm_add_pi32( _mm_add_pi32( \
|
_mm_add_pi32( _mm_add_pi32( _mm_add_pi32( \
|
||||||
SSG2_1y( Wy[a] ), Wy[b] ), SSG2_0y( Wy[c] ) ), Wy[d] );
|
SSG2_1y( Wy[a] ), Wy[b] ), SSG2_0y( Wy[c] ) ), Wy[d] )
|
||||||
|
|
||||||
#define SHA2z_MEXP( a, b, c, d ) \
|
#define SHA2z_MEXP( a, b, c, d ) \
|
||||||
( SSG2_1z( Wz[a] ) + Wz[b] + SSG2_0z( Wz[c] ) + Wz[d] );
|
( SSG2_1z( Wz[a] ) + Wz[b] + SSG2_0z( Wz[c] ) + Wz[d] )
|
||||||
|
|
||||||
|
|
||||||
#define SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx, \
|
#define SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx, \
|
||||||
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy, \
|
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy, \
|
||||||
Ax, Bx, Cz, Dz, Ez, Fz, Gz, Hz, i, j) \
|
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, i, j) \
|
||||||
do { \
|
do { \
|
||||||
__m256i T1x, T2x; \
|
__m256i T1x, T2x; \
|
||||||
__m64 T1y, T2y; \
|
__m64 T1y, T2y; \
|
||||||
@@ -119,22 +121,22 @@ do { \
|
|||||||
_mm256_add_epi32( Hx, BSG2_1x(Ex) ), CHx(Ex, Fx, Gx) ), \
|
_mm256_add_epi32( Hx, BSG2_1x(Ex) ), CHx(Ex, Fx, Gx) ), \
|
||||||
_mm256_set1_epi32( K256[( (j)+(i) )] ) ), Wx[i] ); \
|
_mm256_set1_epi32( K256[( (j)+(i) )] ) ), Wx[i] ); \
|
||||||
T1y = _mm_add_pi32( _mm_add_pi32( _mm_add_pi32( \
|
T1y = _mm_add_pi32( _mm_add_pi32( _mm_add_pi32( \
|
||||||
_mm_add_pi32( H, BSG2_1x(Ey) ), CHx(Ey, Fy, Gy) ), \
|
_mm_add_pi32( Hy, BSG2_1y(Ey) ), CHy(Ey, Fy, Gy) ), \
|
||||||
_mm_set1_pi32( K256[( (j)+(i) )] ) ), Wy[i] ); \
|
_mm_set1_pi32( K256[( (j)+(i) )] ) ), Wy[i] ); \
|
||||||
T1z = Hz + BSG2_1z( Ez ) + CHz( Ez, Fz, Gz ) + K256[ ((j)+(i)) ] + Wz[i]; \
|
T1z = Hz + BSG2_1z( Ez ) + CHz( Ez, Fz, Gz ) + K256[ ((j)+(i)) ] + Wz[i]; \
|
||||||
T2x = _mm256_add_epi32( BSG2_0x(Ax), MAJx(Ax, Bx, Cx) ); \
|
T2x = _mm256_add_epi32( BSG2_0x(Ax), MAJx(Ax, Bx, Cx) ); \
|
||||||
T2y = _mm256_add_epi32( BSG2_0y(Ay), MAJy(Ay, By, Cy) ); \
|
T2y = _mm_add_pi32( BSG2_0y(Ay), MAJy(Ay, By, Cy) ); \
|
||||||
T2z = BSG2_0z( Az ) + MAJz( Az, Bz, Cz ); \ \
|
T2z = BSG2_0z( Az ) + MAJz( Az, Bz, Cz ); \
|
||||||
Dx = _mm256_add_epi32( Dx, T1x ); \
|
Dx = _mm256_add_epi32( Dx, T1x ); \
|
||||||
Dy = _mm256_add_epi32( Dy, T1y ); \
|
Dy = _mm_add_pi32( Dy, T1y ); \
|
||||||
Dz = Dz + T1z; \
|
Dz = Dz + T1z; \
|
||||||
Hx = _mm256_add_epi32( T1x, T2x ); \
|
Hx = _mm256_add_epi32( T1x, T2x ); \
|
||||||
Hy = _mm256_add_epi32( T1y, T2y ); \
|
Hy = _mm_add_pi32( T1y, T2y ); \
|
||||||
Hz = T1z + T2z; \
|
Hz = T1z + T2z; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
sha256_8way_round( __m256i *inx, __m256i rx[8], __m64 *iny, __m64 *ry[8],
|
void sha256_11way_round( __m256i *inx, __m256i rx[8], __m64 *iny, __m64 ry[8],
|
||||||
uint32_t inz, uint32_t *rz[8] )
|
uint32_t *inz, uint32_t rz[8] )
|
||||||
{
|
{
|
||||||
__m256i Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx;
|
__m256i Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx;
|
||||||
__m256i Wx[16];
|
__m256i Wx[16];
|
||||||
@@ -169,43 +171,52 @@ sha256_8way_round( __m256i *inx, __m256i rx[8], __m64 *iny, __m64 *ry[8],
|
|||||||
|
|
||||||
Wx[ 6] = mm256_bswap_32( inx[ 6] );
|
Wx[ 6] = mm256_bswap_32( inx[ 6] );
|
||||||
Wy[ 6] = mm64_bswap_32( iny[ 6] );
|
Wy[ 6] = mm64_bswap_32( iny[ 6] );
|
||||||
Wz[ 6] = bswap_32( inx[ 6] );
|
Wz[ 6] = bswap_32( inz[ 6] );
|
||||||
|
|
||||||
Wx[ 7] = mm256_bswap_32( inx[ 7] );
|
Wx[ 7] = mm256_bswap_32( inx[ 7] );
|
||||||
Wy[ 7] = mm64_bswap_32( iny[ 7] );
|
Wy[ 7] = mm64_bswap_32( iny[ 7] );
|
||||||
Wz[ 7] = bswap_32( inx[ 7] );
|
Wz[ 7] = bswap_32( inz[ 7] );
|
||||||
|
|
||||||
Wx[ 8] = mm256_bswap_32( inx[ 8] );
|
Wx[ 8] = mm256_bswap_32( inx[ 8] );
|
||||||
Wy[ 8] = mm64_bswap_32( iny[ 8] );
|
Wy[ 8] = mm64_bswap_32( iny[ 8] );
|
||||||
Wz[ 8] = bswap_32( inx[ 8] );
|
Wz[ 8] = bswap_32( inz[ 8] );
|
||||||
|
|
||||||
Wx[ 9] = mm256_bswap_32( inx[ 9] );
|
Wx[ 9] = mm256_bswap_32( inx[ 9] );
|
||||||
Wy[ 9] = mm64_bswap_32( iny[ 9] );
|
Wy[ 9] = mm64_bswap_32( iny[ 9] );
|
||||||
Wz[ 9] = bswap_32( inx[ 9] );
|
Wz[ 9] = bswap_32( inz[ 9] );
|
||||||
|
|
||||||
Wx[10] = mm256_bswap_32( inx[10] );
|
Wx[10] = mm256_bswap_32( inx[10] );
|
||||||
Wy[10] = mm64_bswap_32( iny[10] );
|
Wy[10] = mm64_bswap_32( iny[10] );
|
||||||
Wz[10] = bswap_32( inx[10] );
|
Wz[10] = bswap_32( inz[10] );
|
||||||
|
|
||||||
Wx[11] = mm256_bswap_32( inx[11] );
|
Wx[11] = mm256_bswap_32( inx[11] );
|
||||||
Wy[11] = mm64_bswap_32( iny[11] );
|
Wy[11] = mm64_bswap_32( iny[11] );
|
||||||
Wz[11] = bswap_32( inx[11] );
|
Wz[11] = bswap_32( inz[11] );
|
||||||
|
|
||||||
Wx[12] = mm256_bswap_32( inx[12] );
|
Wx[12] = mm256_bswap_32( inx[12] );
|
||||||
Wy[12] = mm64_bswap_32( iny[12] );
|
Wy[12] = mm64_bswap_32( iny[12] );
|
||||||
Wz[12] = bswap_32( inx[12] );
|
Wz[12] = bswap_32( inz[12] );
|
||||||
|
|
||||||
Wx[13] = mm256_bswap_32( inx[13] );
|
Wx[13] = mm256_bswap_32( inx[13] );
|
||||||
Wy[13] = mm64_bswap_32( iny[13] );
|
Wy[13] = mm64_bswap_32( iny[13] );
|
||||||
Wz[13] = bswap_32( inx[13] );
|
Wz[13] = bswap_32( inz[13] );
|
||||||
|
|
||||||
Wx[14] = mm256_bswap_32( inx[14] );
|
Wx[14] = mm256_bswap_32( inx[14] );
|
||||||
Wy[14] = mm64_bswap_32( iny[14] );
|
Wy[14] = mm64_bswap_32( iny[14] );
|
||||||
Wz[14] = bswap_32( inx[14] );
|
Wz[14] = bswap_32( inz[14] );
|
||||||
|
|
||||||
Wx[15] = mm256_bswap_32( inx[15] );
|
Wx[15] = mm256_bswap_32( inx[15] );
|
||||||
Wy[15] = mm64_bswap_32( iny[15] );
|
Wy[15] = mm64_bswap_32( iny[15] );
|
||||||
Wz[15] = bswap_32( inx[15] );
|
Wz[15] = bswap_32( inz[15] );
|
||||||
|
|
||||||
|
Ax = rx[0]; Ay = ry[0]; Az = rz[0];
|
||||||
|
Bx = rx[1]; By = ry[1]; Bz = rz[1];
|
||||||
|
Cx = rx[2]; Cy = ry[2]; Cz = rz[2];
|
||||||
|
Dx = rx[3]; Dy = ry[3]; Dz = rz[3];
|
||||||
|
Ex = rx[4]; Ey = ry[4]; Ez = rz[4];
|
||||||
|
Fx = rx[5]; Fy = ry[5]; Fz = rz[5];
|
||||||
|
Gx = rx[6]; Gy = ry[6]; Gz = rz[6];
|
||||||
|
Hx = rx[7]; Hy = ry[7]; Hz = rz[7];
|
||||||
|
|
||||||
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
|
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
|
||||||
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
|
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
|
||||||
@@ -325,52 +336,52 @@ sha256_8way_round( __m256i *inx, __m256i rx[8], __m64 *iny, __m64 *ry[8],
|
|||||||
|
|
||||||
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
|
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
|
||||||
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
|
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
|
||||||
Az, By, Cz, Dz, Ez, Fy, Gz, Hz, 0, j );
|
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, 0, j );
|
||||||
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
|
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
|
||||||
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
|
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
|
||||||
HZ, Az, By, Cz, Dz, Ez, Fy, Gz, 1, j );
|
Hz, Az, Bz, Cz, Dz, Ez, Fz, Gz, 1, j );
|
||||||
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
|
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
|
||||||
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
|
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
|
||||||
Gz, HZ, Az, By, Cz, Dz, Ez, Fy, 2, j );
|
Gz, Hz, Az, Bz, Cz, Dz, Ez, Fz, 2, j );
|
||||||
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
|
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
|
||||||
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
|
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
|
||||||
Fz, Gz, HZ, Az, By, Cz, Dz, Ez, 3, j );
|
Fz, Gz, Hz, Az, Bz, Cz, Dz, Ez, 3, j );
|
||||||
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
|
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
|
||||||
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
|
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
|
||||||
Ez, Fz, Gz, HZ, Az, By, Cz, Dz, 4, j );
|
Ez, Fz, Gz, Hz, Az, Bz, Cz, Dz, 4, j );
|
||||||
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
|
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
|
||||||
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
|
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
|
||||||
Dz, Ez, Fz, Gz, HZ, Az, By, Cz, 5, j );
|
Dz, Ez, Fz, Gz, Hz, Az, Bz, Cz, 5, j );
|
||||||
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
|
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
|
||||||
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
|
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
|
||||||
Cz, Dz, Ez, Fz, Gz, HZ, Az, By, 6, j );
|
Cz, Dz, Ez, Fz, Gz, Hz, Az, Bz, 6, j );
|
||||||
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
|
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
|
||||||
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
|
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
|
||||||
Bz, Cz, Dz, Ez, Fz, Gz, HZ, Az, 7, j );
|
Bz, Cz, Dz, Ez, Fz, Gz, Hz, Az, 7, j );
|
||||||
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
|
SHA2s_11WAY_STEP( Ax, Bx, Cx, Dx, Ex, Fx, Gx, Hx,
|
||||||
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
|
Ay, By, Cy, Dy, Ey, Fy, Gy, Hy,
|
||||||
Az, By, Cz, Dz, Ez, Fy, Gz, Hz, 8, j );
|
Az, Bz, Cz, Dz, Ez, Fz, Gz, Hz, 8, j );
|
||||||
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
|
SHA2s_11WAY_STEP( Hx, Ax, Bx, Cx, Dx, Ex, Fx, Gx,
|
||||||
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
|
Hy, Ay, By, Cy, Dy, Ey, Fy, Gy,
|
||||||
HZ, Az, By, Cz, Dz, Ez, Fy, Gz, 9, j );
|
Hz, Az, Bz, Cz, Dz, Ez, Fz, Gz, 9, j );
|
||||||
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
|
SHA2s_11WAY_STEP( Gx, Hx, Ax, Bx, Cx, Dx, Ex, Fx,
|
||||||
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
|
Gy, Hy, Ay, By, Cy, Dy, Ey, Fy,
|
||||||
Gz, HZ, Az, By, Cz, Dz, Ez, Fy, 10, j );
|
Gz, Hz, Az, Bz, Cz, Dz, Ez, Fz, 10, j );
|
||||||
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
|
SHA2s_11WAY_STEP( Fx, Gx, Hx, Ax, Bx, Cx, Dx, Ex,
|
||||||
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
|
Fy, Gy, Hy, Ay, By, Cy, Dy, Ey,
|
||||||
Fz, Gz, HZ, Az, By, Cz, Dz, Ez, 11, j );
|
Fz, Gz, Hz, Az, Bz, Cz, Dz, Ez, 11, j );
|
||||||
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
|
SHA2s_11WAY_STEP( Ex, Fx, Gx, Hx, Ax, Bx, Cx, Dx,
|
||||||
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
|
Ey, Fy, Gy, Hy, Ay, By, Cy, Dy,
|
||||||
Ez, Fz, Gz, HZ, Az, By, Cz, Dz, 12, j );
|
Ez, Fz, Gz, Hz, Az, Bz, Cz, Dz, 12, j );
|
||||||
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
|
SHA2s_11WAY_STEP( Dx, Ex, Fx, Gx, Hx, Ax, Bx, Cx,
|
||||||
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
|
Dy, Ey, Fy, Gy, Hy, Ay, By, Cy,
|
||||||
Dz, Ez, Fz, Gz, HZ, Az, By, Cz, 13, j );
|
Dz, Ez, Fz, Gz, Hz, Az, Bz, Cz, 13, j );
|
||||||
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
|
SHA2s_11WAY_STEP( Cx, Dx, Ex, Fx, Gx, Hx, Ax, Bx,
|
||||||
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
|
Cy, Dy, Ey, Fy, Gy, Hy, Ay, By,
|
||||||
Cz, Dz, Ez, Fz, Gz, HZ, Az, By, 14, j );
|
Cz, Dz, Ez, Fz, Gz, Hz, Az, Bz, 14, j );
|
||||||
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
|
SHA2s_11WAY_STEP( Bx, Cx, Dx, Ex, Fx, Gx, Hx, Ax,
|
||||||
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
|
By, Cy, Dy, Ey, Fy, Gy, Hy, Ay,
|
||||||
Bz, Cz, Dz, Ez, Fz, Gz, HZ, Az, 15, j );
|
Bz, Cz, Dz, Ez, Fz, Gz, Hz, Az, 15, j );
|
||||||
}
|
}
|
||||||
|
|
||||||
rx[0] = _mm256_add_epi32( rx[0], Ax );
|
rx[0] = _mm256_add_epi32( rx[0], Ax );
|
||||||
@@ -384,7 +395,7 @@ sha256_8way_round( __m256i *inx, __m256i rx[8], __m64 *iny, __m64 *ry[8],
|
|||||||
rz[3] = rz[3]+ Dz;
|
rz[3] = rz[3]+ Dz;
|
||||||
rx[4] = _mm256_add_epi32( rx[4], Ex );
|
rx[4] = _mm256_add_epi32( rx[4], Ex );
|
||||||
ry[4] = _mm_add_pi32( ry[4], Ey );
|
ry[4] = _mm_add_pi32( ry[4], Ey );
|
||||||
rz[4] = rz[4], Ez;
|
rz[4] = rz[4]+ Ez;
|
||||||
rx[5] = _mm256_add_epi32( rx[5], Fx );
|
rx[5] = _mm256_add_epi32( rx[5], Fx );
|
||||||
ry[5] = _mm_add_pi32( ry[5], Fy );
|
ry[5] = _mm_add_pi32( ry[5], Fy );
|
||||||
rz[5] = rz[5]+ Fz;
|
rz[5] = rz[5]+ Fz;
|
||||||
@@ -397,7 +408,7 @@ sha256_8way_round( __m256i *inx, __m256i rx[8], __m64 *iny, __m64 *ry[8],
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void sha256_8way_init( sha256_11way_context *ctx )
|
void sha256_11way_init( sha256_11way_context *ctx )
|
||||||
{
|
{
|
||||||
ctx->count_high = ctx->count_low = 0;
|
ctx->count_high = ctx->count_low = 0;
|
||||||
ctx->valx[0] = _mm256_set1_epi32( H256[0] );
|
ctx->valx[0] = _mm256_set1_epi32( H256[0] );
|
||||||
@@ -416,12 +427,12 @@ void sha256_8way_init( sha256_11way_context *ctx )
|
|||||||
ctx->valy[6] = _mm_set1_pi32( H256[0] );
|
ctx->valy[6] = _mm_set1_pi32( H256[0] );
|
||||||
ctx->valx[7] = _mm256_set1_epi32( H256[0] );
|
ctx->valx[7] = _mm256_set1_epi32( H256[0] );
|
||||||
ctx->valy[7] = _mm_set1_pi32( H256[0] );
|
ctx->valy[7] = _mm_set1_pi32( H256[0] );
|
||||||
memscpy( ctx->valz, H256, 32 );
|
memcpy( ctx->valz, H256, 32 );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void sha256_11way( sha256_11way_context *ctx, const void *datax,
|
void sha256_11way_update( sha256_11way_context *ctx, const void *datax,
|
||||||
const void *datay, const void *dataz, size_t len )
|
const void *datay, const void *dataz, size_t len )
|
||||||
{
|
{
|
||||||
__m256i *vdatax = (__m256i*) datax;
|
__m256i *vdatax = (__m256i*) datax;
|
||||||
__m64 *vdatay = (__m64*) datay;
|
__m64 *vdatay = (__m64*) datay;
|
||||||
@@ -439,27 +450,27 @@ void sha256_11way( sha256_11way_context *ctx, const void *datax,
|
|||||||
if ( clen > len )
|
if ( clen > len )
|
||||||
clen = len;
|
clen = len;
|
||||||
memcpy_256( ctx->bufx + (ptr>>2), vdatax + (ptr>>2), clen>>2 );
|
memcpy_256( ctx->bufx + (ptr>>2), vdatax + (ptr>>2), clen>>2 );
|
||||||
memcpy_64 ( ctx->bufy + (ptr>>2), vdatay + (ptr>>2), clen>>2 );
|
memcpy_m64( ctx->bufy + (ptr>>2), vdatay + (ptr>>2), clen>>2 );
|
||||||
memcpy ( ctx->bufz + ptr, sdataz + ptr, clen );
|
memcpy ( ctx->bufz + ptr, idataz + ptr, clen );
|
||||||
ptr += clen;
|
ptr += clen;
|
||||||
len -= clen;
|
len -= clen;
|
||||||
if ( ptr == buf_size )
|
if ( ptr == buf_size )
|
||||||
{
|
{
|
||||||
sha256_11way_round( ctx->bufx, ctx->valx,
|
sha256_11way_round( ctx->bufx, ctx->valx,
|
||||||
ctx->bufy, ctx->valy,
|
ctx->bufy, ctx->valy,
|
||||||
ctx->bufz, ctx->valzx, );
|
ctx->bufz, ctx->valz );
|
||||||
ptr = 0;
|
ptr = 0;
|
||||||
}
|
}
|
||||||
clow = sc->count_low;
|
clow = ctx->count_low;
|
||||||
clow2 = clow + clen;
|
clow2 = clow + clen;
|
||||||
sc->count_low = clow2;
|
ctx->count_low = clow2;
|
||||||
if ( clow2 < clow )
|
if ( clow2 < clow )
|
||||||
sc->count_high++;
|
ctx->count_high++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void dsty,
|
void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void *dsty,
|
||||||
void *dstz)
|
void *dstz)
|
||||||
{
|
{
|
||||||
unsigned ptr, u;
|
unsigned ptr, u;
|
||||||
@@ -476,20 +487,20 @@ void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void dsty,
|
|||||||
if ( ptr > pad )
|
if ( ptr > pad )
|
||||||
{
|
{
|
||||||
memset_zero_256( ctx->bufx + (ptr>>2), (buf_size - ptr) >> 2 );
|
memset_zero_256( ctx->bufx + (ptr>>2), (buf_size - ptr) >> 2 );
|
||||||
memset_zero_64( ctx->bufy + (ptr>>2), (buf_size - ptr) >> 2 );
|
memset_zero_m64( ctx->bufy + (ptr>>2), (buf_size - ptr) >> 2 );
|
||||||
memset( ctx->bufz + (ptr>>2), 0, (buf_size - ptr) >> 2 );
|
memset( ctx->bufz + (ptr>>2), 0, (buf_size - ptr) >> 2 );
|
||||||
sha256_11way_round( ctx->bufx, ctx->valx,
|
sha256_11way_round( ctx->bufx, ctx->valx,
|
||||||
ctx->bufy, ctx->valy,
|
ctx->bufy, ctx->valy,
|
||||||
ctx->bufz, ctx->valz );
|
ctx->bufz, ctx->valz );
|
||||||
memset_zero_256( ctx->bufx, pad >> 2 );
|
memset_zero_256( ctx->bufx, pad >> 2 );
|
||||||
memset_zero_64( ctx->bufy, pad >> 2 );
|
memset_zero_m64( ctx->bufy, pad >> 2 );
|
||||||
memset( ctx->bufz, 0, pad >> 2 );
|
memset( ctx->bufz, 0, pad >> 2 );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
memset_zero_256( ctx->bufx + (ptr>>2), (pad - ptr) >> 2 );
|
memset_zero_256( ctx->bufx + (ptr>>2), (pad - ptr) >> 2 );
|
||||||
memset_zero_64( ctx->bufy + (ptr>>2), (pad - ptr) >> 2 );
|
memset_zero_m64( ctx->bufy + (ptr>>2), (pad - ptr) >> 2 );
|
||||||
memset( ctx->bufz + (ptr>>2), 0 (pad - ptr) >> 2 );
|
memset( ctx->bufz + (ptr>>2), 0, (pad - ptr) >> 2 );
|
||||||
}
|
}
|
||||||
|
|
||||||
low = ctx->count_low;
|
low = ctx->count_low;
|
||||||
@@ -511,9 +522,9 @@ void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void dsty,
|
|||||||
ctx->bufz[ ( pad+4 ) >> 2 ] =
|
ctx->bufz[ ( pad+4 ) >> 2 ] =
|
||||||
bswap_32( low );
|
bswap_32( low );
|
||||||
|
|
||||||
sha256_8way_round( ctx->bufx, ctx->valx,
|
sha256_11way_round( ctx->bufx, ctx->valx,
|
||||||
ctx->bufy, ctx->valy,
|
ctx->bufy, ctx->valy,
|
||||||
ctx->bufz, ctx->valz, );
|
ctx->bufz, ctx->valz );
|
||||||
|
|
||||||
for ( u = 0; u < 8; u ++ )
|
for ( u = 0; u < 8; u ++ )
|
||||||
{
|
{
|
||||||
@@ -523,4 +534,5 @@ void sha256_11way_close( sha256_11way_context *ctx, void *dstx, void dsty,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
#endif // 0
|
@@ -36,7 +36,6 @@ int scanhash_sha256q_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
||||||
uint32_t hash[8*8] __attribute__ ((aligned (32)));
|
uint32_t hash[8*8] __attribute__ ((aligned (32)));
|
||||||
uint32_t edata[20] __attribute__ ((aligned (32)));;
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
@@ -59,12 +58,7 @@ int scanhash_sha256q_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
0 };
|
0 };
|
||||||
|
|
||||||
// Need big endian data
|
// Need big endian data
|
||||||
casti_m256i( edata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
|
mm256_bswap_intrlv80_8x32( vdata, pdata );
|
||||||
casti_m256i( edata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
mm256_interleave_8x32( vdata, edata, edata, edata, edata,
|
|
||||||
edata, edata, edata, edata, 640 );
|
|
||||||
sha256_8way_init( &sha256_ctx8 );
|
sha256_8way_init( &sha256_ctx8 );
|
||||||
sha256_8way( &sha256_ctx8, vdata, 64 );
|
sha256_8way( &sha256_ctx8, vdata, 64 );
|
||||||
|
|
||||||
@@ -73,11 +67,10 @@ int scanhash_sha256q_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t mask = masks[m];
|
uint32_t mask = masks[m];
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
*noncev = mm256_bswap_32(
|
*noncev = mm256_bswap_32(
|
||||||
_mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n ) );
|
_mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n ) );
|
||||||
|
|
||||||
pdata[19] = n;
|
|
||||||
|
|
||||||
|
pdata[19] = n;
|
||||||
sha256q_8way_hash( hash, vdata );
|
sha256q_8way_hash( hash, vdata );
|
||||||
|
|
||||||
uint32_t *hash7 = &(hash[7<<3]);
|
uint32_t *hash7 = &(hash[7<<3]);
|
||||||
@@ -86,27 +79,19 @@ int scanhash_sha256q_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( !( hash7[ lane ] & mask ) )
|
if ( !( hash7[ lane ] & mask ) )
|
||||||
{
|
{
|
||||||
// deinterleave hash for lane
|
// deinterleave hash for lane
|
||||||
uint32_t lane_hash[8];
|
uint32_t lane_hash[8];
|
||||||
mm256_extract_lane_8x32( lane_hash, hash, lane, 256 );
|
mm256_extract_lane_8x32( lane_hash, hash, lane, 256 );
|
||||||
|
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
}
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
}
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n += 8;
|
n += 8;
|
||||||
|
|
||||||
} while ( (n < max_nonce-10) && !work_restart[thr_id].restart );
|
} while ( (n < max_nonce-10) && !work_restart[thr_id].restart );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*hashes_done = n - first_nonce + 1;
|
*hashes_done = n - first_nonce + 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -146,7 +131,6 @@ int scanhash_sha256q_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t hash[8*4] __attribute__ ((aligned (32)));
|
uint32_t hash[8*4] __attribute__ ((aligned (32)));
|
||||||
uint32_t *hash7 = &(hash[7<<2]);
|
uint32_t *hash7 = &(hash[7<<2]);
|
||||||
uint32_t lane_hash[8];
|
uint32_t lane_hash[8];
|
||||||
uint32_t edata[20] __attribute__ ((aligned (32)));;
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
@@ -168,13 +152,7 @@ int scanhash_sha256q_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
0xFFFF0000,
|
0xFFFF0000,
|
||||||
0 };
|
0 };
|
||||||
|
|
||||||
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
|
mm128_bswap_intrlv80_4x32( vdata, pdata );
|
||||||
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
|
|
||||||
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 );
|
|
||||||
sha256_4way_init( &sha256_ctx4 );
|
sha256_4way_init( &sha256_ctx4 );
|
||||||
sha256_4way( &sha256_ctx4, vdata, 64 );
|
sha256_4way( &sha256_ctx4, vdata, 64 );
|
||||||
|
|
||||||
@@ -183,7 +161,7 @@ int scanhash_sha256q_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t mask = masks[m];
|
uint32_t mask = masks[m];
|
||||||
do {
|
do {
|
||||||
*noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) );
|
*noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) );
|
||||||
pdata[19] = n;
|
pdata[19] = n;
|
||||||
|
|
||||||
sha256q_4way_hash( hash, vdata );
|
sha256q_4way_hash( hash, vdata );
|
||||||
|
|
||||||
@@ -192,25 +170,16 @@ int scanhash_sha256q_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
{
|
{
|
||||||
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
||||||
|
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
n += 4;
|
||||||
n += 4;
|
|
||||||
|
|
||||||
} while ( (n < max_nonce - 4) && !work_restart[thr_id].restart );
|
} while ( (n < max_nonce - 4) && !work_restart[thr_id].restart );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*hashes_done = n - first_nonce + 1;
|
*hashes_done = n - first_nonce + 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -5,6 +5,137 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include "sha2-hash-4way.h"
|
#include "sha2-hash-4way.h"
|
||||||
|
|
||||||
|
#if defined(SHA256T_11WAY)
|
||||||
|
|
||||||
|
static __thread sha256_11way_context sha256_ctx11 __attribute__ ((aligned (64)));
|
||||||
|
|
||||||
|
void sha256t_11way_hash( void *outx, void *outy, void *outz, const void *inpx,
|
||||||
|
const void *inpy, const void*inpz )
|
||||||
|
{
|
||||||
|
uint32_t hashx[8*8] __attribute__ ((aligned (64)));
|
||||||
|
uint32_t hashy[8*2] __attribute__ ((aligned (64)));
|
||||||
|
uint32_t hashz[8] __attribute__ ((aligned (64)));
|
||||||
|
sha256_11way_context ctx;
|
||||||
|
const void *inpx64 = inpx+(64<<3);
|
||||||
|
const void *inpy64 = inpy+(64<<1);
|
||||||
|
const void *inpz64 = inpz+ 64;
|
||||||
|
|
||||||
|
memcpy( &ctx, &sha256_ctx11, sizeof ctx );
|
||||||
|
sha256_11way_update( &ctx, inpx64, inpy64, inpz64, 16 );
|
||||||
|
sha256_11way_close( &ctx, hashx, hashy, hashz );
|
||||||
|
|
||||||
|
sha256_11way_init( &ctx );
|
||||||
|
sha256_11way_update( &ctx, hashx, hashy, hashz, 32 );
|
||||||
|
sha256_11way_close( &ctx, hashx, hashy, hashz );
|
||||||
|
|
||||||
|
sha256_11way_init( &ctx );
|
||||||
|
sha256_11way_update( &ctx, hashx, hashy, hashz, 32 );
|
||||||
|
sha256_11way_close( &ctx, outx, outy, outz );
|
||||||
|
}
|
||||||
|
|
||||||
|
int scanhash_sha256t_11way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
|
uint64_t *hashes_done, struct thr_info *mythr )
|
||||||
|
{
|
||||||
|
uint32_t datax[20*8] __attribute__ ((aligned (64)));
|
||||||
|
uint32_t datay[20*2] __attribute__ ((aligned (32)));
|
||||||
|
uint32_t dataz[20] __attribute__ ((aligned (32)));
|
||||||
|
uint32_t hashx[8*8] __attribute__ ((aligned (32)));
|
||||||
|
uint32_t hashy[8*2] __attribute__ ((aligned (32)));
|
||||||
|
uint32_t hashz[8] __attribute__ ((aligned (32)));
|
||||||
|
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
||||||
|
uint32_t *hash7;
|
||||||
|
uint32_t *pdata = work->data;
|
||||||
|
uint32_t *ptarget = work->target;
|
||||||
|
const uint32_t Htarg = ptarget[7];
|
||||||
|
const uint32_t first_nonce = pdata[19];
|
||||||
|
uint32_t n = first_nonce;
|
||||||
|
__m256i *noncex = (__m256i*) datax + 19;
|
||||||
|
__m64 *noncey = (__m64*) datay + 19;
|
||||||
|
uint32_t *noncez = (uint32_t*)dataz + 19;
|
||||||
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
|
int i;
|
||||||
|
const uint64_t htmax[] = { 0,
|
||||||
|
0xF,
|
||||||
|
0xFF,
|
||||||
|
0xFFF,
|
||||||
|
0xFFFF,
|
||||||
|
0x10000000 };
|
||||||
|
const uint32_t masks[] = { 0xFFFFFFFF,
|
||||||
|
0xFFFFFFF0,
|
||||||
|
0xFFFFFF00,
|
||||||
|
0xFFFFF000,
|
||||||
|
0xFFFF0000,
|
||||||
|
0 };
|
||||||
|
|
||||||
|
// Use dataz (scalar) to stage bswapped data for the vectors.
|
||||||
|
casti_m256i( dataz, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
|
||||||
|
casti_m256i( dataz, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
|
||||||
|
casti_m128i( dataz, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
||||||
|
|
||||||
|
mm256_intrlv_8x32( datax, dataz, dataz, dataz, dataz,
|
||||||
|
dataz, dataz, dataz, dataz, 640 );
|
||||||
|
mm64_interleave_2x32( datay, dataz, dataz, 640 );
|
||||||
|
|
||||||
|
sha256_11way_init( &sha256_ctx11 );
|
||||||
|
sha256_11way_update( &sha256_ctx11, datax, datay, dataz, 64 );
|
||||||
|
|
||||||
|
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
|
||||||
|
{
|
||||||
|
uint32_t mask = masks[m];
|
||||||
|
do
|
||||||
|
{
|
||||||
|
*noncex = mm256_bswap_32(
|
||||||
|
_mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n ) );
|
||||||
|
*noncey = mm64_bswap_32( _mm_set_pi32( n+9, n+8 ) );
|
||||||
|
*noncez = bswap_32( n+10 );
|
||||||
|
|
||||||
|
pdata[19] = n;
|
||||||
|
|
||||||
|
sha256t_11way_hash( hashx, hashy, hashz, datax, datay, dataz );
|
||||||
|
|
||||||
|
if ( opt_benchmark ) { n += 11; continue; }
|
||||||
|
|
||||||
|
hash7 = &(hashx[7<<3]);
|
||||||
|
for ( i = 0; i < 8; i++ ) if ( !( hash7[ i ] & mask ) )
|
||||||
|
{
|
||||||
|
// deinterleave hash for lane
|
||||||
|
mm256_extract_lane_8x32( lane_hash, hashx, i, 256 );
|
||||||
|
if ( fulltest( lane_hash, ptarget ) )
|
||||||
|
{
|
||||||
|
pdata[19] = n + i;
|
||||||
|
submit_solution( work, lane_hash, mythr, i );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hash7 = &(hashy[7<<1]);
|
||||||
|
for( i = 0; i < 2; i++ ) if ( !(hash7[ 0] & mask ) )
|
||||||
|
|
||||||
|
{
|
||||||
|
mm64_extract_lane_2x32( lane_hash, hashy, i, 256 );
|
||||||
|
if ( fulltest( lane_hash, ptarget ) )
|
||||||
|
{
|
||||||
|
pdata[19] = n + 8 + i;
|
||||||
|
submit_solution( work, lane_hash, mythr, i+8 );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( !(hashz[7] & mask ) && fulltest( hashz, ptarget ) )
|
||||||
|
{
|
||||||
|
pdata[19] = n+10;
|
||||||
|
submit_solution( work, hashz, mythr, 10 );
|
||||||
|
}
|
||||||
|
n += 11;
|
||||||
|
|
||||||
|
} while ( (n < max_nonce-12) && !work_restart[thr_id].restart );
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
*hashes_done = n - first_nonce + 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(SHA256T_8WAY)
|
#if defined(SHA256T_8WAY)
|
||||||
|
|
||||||
static __thread sha256_8way_context sha256_ctx8 __attribute__ ((aligned (64)));
|
static __thread sha256_8way_context sha256_ctx8 __attribute__ ((aligned (64)));
|
||||||
@@ -25,15 +156,15 @@ void sha256t_8way_hash( void* output, const void* input )
|
|||||||
sha256_8way_init( &ctx );
|
sha256_8way_init( &ctx );
|
||||||
sha256_8way( &ctx, vhash, 32 );
|
sha256_8way( &ctx, vhash, 32 );
|
||||||
sha256_8way_close( &ctx, output );
|
sha256_8way_close( &ctx, output );
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_sha256t_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_sha256t_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr )
|
uint64_t *hashes_done, struct thr_info *mythr )
|
||||||
{
|
{
|
||||||
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
||||||
uint32_t hash[8*8] __attribute__ ((aligned (32)));
|
uint32_t hash[8*8] __attribute__ ((aligned (32)));
|
||||||
uint32_t edata[20] __attribute__ ((aligned (32)));;
|
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
|
||||||
|
uint32_t *hash7 = &(hash[7<<3]);
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
@@ -56,12 +187,7 @@ int scanhash_sha256t_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
0 };
|
0 };
|
||||||
|
|
||||||
// Need big endian data
|
// Need big endian data
|
||||||
casti_m256i( edata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
|
mm256_bswap_intrlv80_8x32( vdata, pdata );
|
||||||
casti_m256i( edata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
mm256_interleave_8x32( vdata, edata, edata, edata, edata,
|
|
||||||
edata, edata, edata, edata, 640 );
|
|
||||||
sha256_8way_init( &sha256_ctx8 );
|
sha256_8way_init( &sha256_ctx8 );
|
||||||
sha256_8way( &sha256_ctx8, vdata, 64 );
|
sha256_8way( &sha256_ctx8, vdata, 64 );
|
||||||
|
|
||||||
@@ -70,40 +196,25 @@ int scanhash_sha256t_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t mask = masks[m];
|
uint32_t mask = masks[m];
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
*noncev = mm256_bswap_32(
|
*noncev = mm256_bswap_32( _mm256_set_epi32(
|
||||||
_mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n ) );
|
n+7,n+6,n+5,n+4,n+3,n+2,n+1,n ) );
|
||||||
|
pdata[19] = n;
|
||||||
pdata[19] = n;
|
|
||||||
|
|
||||||
sha256t_8way_hash( hash, vdata );
|
sha256t_8way_hash( hash, vdata );
|
||||||
|
|
||||||
uint32_t *hash7 = &(hash[7<<3]);
|
|
||||||
|
|
||||||
for ( int lane = 0; lane < 8; lane++ )
|
for ( int lane = 0; lane < 8; lane++ )
|
||||||
if ( !( hash7[ lane ] & mask ) )
|
if ( !( hash7[ lane ] & mask ) )
|
||||||
{
|
{
|
||||||
// deinterleave hash for lane
|
// deinterleave hash for lane
|
||||||
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
mm256_extract_lane_8x32( lane_hash, hash, lane, 256 );
|
||||||
mm256_extract_lane_8x32( lane_hash, hash, lane, 256 );
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
|
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
}
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
}
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n += 8;
|
n += 8;
|
||||||
|
|
||||||
} while ( (n < max_nonce-10) && !work_restart[thr_id].restart );
|
} while ( (n < max_nonce-10) && !work_restart[thr_id].restart );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*hashes_done = n - first_nonce + 1;
|
*hashes_done = n - first_nonce + 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -130,7 +241,6 @@ void sha256t_4way_hash( void* output, const void* input )
|
|||||||
sha256_4way_init( &ctx );
|
sha256_4way_init( &ctx );
|
||||||
sha256_4way( &ctx, vhash, 32 );
|
sha256_4way( &ctx, vhash, 32 );
|
||||||
sha256_4way_close( &ctx, output );
|
sha256_4way_close( &ctx, output );
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -139,7 +249,6 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t hash[8*4] __attribute__ ((aligned (32)));
|
uint32_t hash[8*4] __attribute__ ((aligned (32)));
|
||||||
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
||||||
uint32_t edata[20] __attribute__ ((aligned (32)));;
|
|
||||||
uint32_t *hash7 = &(hash[7<<2]);
|
uint32_t *hash7 = &(hash[7<<2]);
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
@@ -155,20 +264,14 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
0xFFF,
|
0xFFF,
|
||||||
0xFFFF,
|
0xFFFF,
|
||||||
0x10000000 };
|
0x10000000 };
|
||||||
const uint32_t masks[] = { 0xFFFFFFFF,
|
const uint32_t masks[] = { 0xFFFFFFFF,
|
||||||
0xFFFFFFF0,
|
0xFFFFFFF0,
|
||||||
0xFFFFFF00,
|
0xFFFFFF00,
|
||||||
0xFFFFF000,
|
0xFFFFF000,
|
||||||
0xFFFF0000,
|
0xFFFF0000,
|
||||||
0 };
|
0 };
|
||||||
|
|
||||||
casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
|
mm128_bswap_intrlv80_4x32( vdata, pdata );
|
||||||
casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
|
|
||||||
casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 );
|
|
||||||
sha256_4way_init( &sha256_ctx4 );
|
sha256_4way_init( &sha256_ctx4 );
|
||||||
sha256_4way( &sha256_ctx4, vdata, 64 );
|
sha256_4way( &sha256_ctx4, vdata, 64 );
|
||||||
|
|
||||||
@@ -177,7 +280,7 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t mask = masks[m];
|
uint32_t mask = masks[m];
|
||||||
do {
|
do {
|
||||||
*noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) );
|
*noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) );
|
||||||
pdata[19] = n;
|
pdata[19] = n;
|
||||||
|
|
||||||
sha256t_4way_hash( hash, vdata );
|
sha256t_4way_hash( hash, vdata );
|
||||||
|
|
||||||
@@ -185,26 +288,16 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( !( hash7[ lane ] & mask ) )
|
if ( !( hash7[ lane ] & mask ) )
|
||||||
{
|
{
|
||||||
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
||||||
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
}
|
||||||
applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.",
|
}
|
||||||
accepted_share_count + rejected_share_count + 1,
|
n += 4;
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n += 4;
|
|
||||||
|
|
||||||
} while ( (n < max_nonce - 4) && !work_restart[thr_id].restart );
|
} while ( (n < max_nonce - 4) && !work_restart[thr_id].restart );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*hashes_done = n - first_nonce + 1;
|
*hashes_done = n - first_nonce + 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -21,11 +21,7 @@ gate->optimizations = SHA_OPT;
|
|||||||
|
|
||||||
bool register_sha256q_algo( algo_gate_t* gate )
|
bool register_sha256q_algo( algo_gate_t* gate )
|
||||||
{
|
{
|
||||||
#if defined(SHA256T_8WAY)
|
#if defined(SHA256T_4WAY)
|
||||||
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
|
|
||||||
gate->scanhash = (void*)&scanhash_sha256q_8way;
|
|
||||||
gate->hash = (void*)&sha256q_8way_hash;
|
|
||||||
#elif defined(SHA256T_4WAY)
|
|
||||||
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
|
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
|
||||||
gate->scanhash = (void*)&scanhash_sha256q_4way;
|
gate->scanhash = (void*)&scanhash_sha256q_4way;
|
||||||
gate->hash = (void*)&sha256q_4way_hash;
|
gate->hash = (void*)&sha256q_4way_hash;
|
||||||
|
@@ -6,7 +6,6 @@
|
|||||||
|
|
||||||
// Override multi way on ryzen, SHA is better.
|
// Override multi way on ryzen, SHA is better.
|
||||||
#if !defined(RYZEN_)
|
#if !defined(RYZEN_)
|
||||||
//#if defined(__SSE4_2__)
|
|
||||||
#if defined(__SSE2__)
|
#if defined(__SSE2__)
|
||||||
#define SHA256T_4WAY
|
#define SHA256T_4WAY
|
||||||
#endif
|
#endif
|
||||||
@@ -26,8 +25,9 @@ int scanhash_sha256t_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
void sha256q_8way_hash( void *output, const void *input );
|
void sha256q_8way_hash( void *output, const void *input );
|
||||||
int scanhash_sha256q_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_sha256q_8way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
#endif
|
||||||
|
|
||||||
#elif defined(SHA256T_4WAY)
|
#if defined(SHA256T_4WAY)
|
||||||
|
|
||||||
void sha256t_4way_hash( void *output, const void *input );
|
void sha256t_4way_hash( void *output, const void *input );
|
||||||
int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -35,7 +35,7 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
void sha256q_4way_hash( void *output, const void *input );
|
void sha256q_4way_hash( void *output, const void *input );
|
||||||
int scanhash_sha256q_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_sha256q_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done, struct thr_info *mythr );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
#else
|
#endif
|
||||||
|
|
||||||
void sha256t_hash( void *output, const void *input );
|
void sha256t_hash( void *output, const void *input );
|
||||||
int scanhash_sha256t( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_sha256t( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -46,5 +46,3 @@ int scanhash_sha256q( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
@@ -40,7 +40,7 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C"{
|
extern "C"{
|
||||||
|
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
#if defined(__AVX2__)
|
#if defined(__AVX2__)
|
||||||
|
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned char buf[128<<1];
|
unsigned char buf[128<<1];
|
||||||
|
@@ -36,7 +36,7 @@
|
|||||||
#ifdef __AES__
|
#ifdef __AES__
|
||||||
|
|
||||||
#include "sph_shavite.h"
|
#include "sph_shavite.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C"{
|
extern "C"{
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
#if defined(__AVX2__)
|
#if defined(__AVX2__)
|
||||||
|
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
uint32_t A[ 32*2 ] __attribute__((aligned(64)));
|
uint32_t A[ 32*2 ] __attribute__((aligned(64)));
|
||||||
|
@@ -17,13 +17,13 @@ void skeinhash_4way( void *state, const void *input )
|
|||||||
skein512_4way( &ctx_skein, input, 80 );
|
skein512_4way( &ctx_skein, input, 80 );
|
||||||
skein512_4way_close( &ctx_skein, vhash64 );
|
skein512_4way_close( &ctx_skein, vhash64 );
|
||||||
|
|
||||||
mm256_reinterleave_4x32( vhash32, vhash64, 512 );
|
mm256_rintrlv_4x64_4x32( vhash32, vhash64, 512 );
|
||||||
|
|
||||||
sha256_4way_init( &ctx_sha256 );
|
sha256_4way_init( &ctx_sha256 );
|
||||||
sha256_4way( &ctx_sha256, vhash32, 64 );
|
sha256_4way( &ctx_sha256, vhash32, 64 );
|
||||||
sha256_4way_close( &ctx_sha256, state );
|
sha256_4way_close( &ctx_sha256, state );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( state, state+32, state+64, state+96,
|
mm128_dintrlv_4x32( state, state+32, state+64, state+96,
|
||||||
vhash32, 256 );
|
vhash32, 256 );
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,7 +48,7 @@ int scanhash_skein_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
|
|
||||||
swab32_array( edata, pdata, 20 );
|
swab32_array( edata, pdata, 20 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vdata, edata, edata, edata, edata, 640 );
|
mm256_intrlv_4x64( vdata, edata, edata, edata, edata, 640 );
|
||||||
|
|
||||||
uint32_t *noncep = vdata + 73; // 9*8 + 1
|
uint32_t *noncep = vdata + 73; // 9*8 + 1
|
||||||
|
|
||||||
|
@@ -49,7 +49,7 @@ extern "C"{
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
// Output size in bits
|
// Output size in bits
|
||||||
#define SPH_SIZE_skein256 256
|
#define SPH_SIZE_skein256 256
|
||||||
|
@@ -59,7 +59,7 @@
|
|||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@@ -52,7 +52,7 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include "algo/sha/sph_types.h"
|
#include "algo/sha/sph_types.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Output size (in bits) for WHIRLPOOL.
|
* Output size (in bits) for WHIRLPOOL.
|
||||||
|
@@ -49,10 +49,10 @@ void polytimos_4way_hash( void *output, const void *input )
|
|||||||
|
|
||||||
// Need to convert from 64 bit interleaved to 32 bit interleaved.
|
// Need to convert from 64 bit interleaved to 32 bit interleaved.
|
||||||
uint32_t vhash32[16*4];
|
uint32_t vhash32[16*4];
|
||||||
mm256_reinterleave_4x32( vhash32, vhash, 512 );
|
mm256_rintrlv_4x64_4x32( vhash32, vhash, 512 );
|
||||||
shabal512_4way( &ctx.shabal, vhash32, 64 );
|
shabal512_4way( &ctx.shabal, vhash32, 64 );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash32 );
|
shabal512_4way_close( &ctx.shabal, vhash32 );
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash32, 512 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash32, 512 );
|
||||||
|
|
||||||
update_final_echo ( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo ( &ctx.echo, (BitSequence *)hash0,
|
||||||
(const BitSequence *)hash0, 512 );
|
(const BitSequence *)hash0, 512 );
|
||||||
@@ -66,13 +66,13 @@ void polytimos_4way_hash( void *output, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_2x128( vhash, hash0, hash1, 512 );
|
mm256_intrlv_2x128( vhash, hash0, hash1, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhash, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhash, 512 );
|
||||||
mm256_interleave_2x128( vhash, hash2, hash3, 512 );
|
mm256_intrlv_2x128( vhash, hash2, hash3, 512 );
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhash, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||||
sph_fugue512_close( &ctx.fugue, hash0 );
|
sph_fugue512_close( &ctx.fugue, hash0 );
|
||||||
|
@@ -293,7 +293,7 @@ void x16r_4way_hash( void* output, const void* input )
|
|||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x16r_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_x16r_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done )
|
uint64_t *hashes_done, struct thr_info *mythr)
|
||||||
{
|
{
|
||||||
uint32_t hash[4*16] __attribute__ ((aligned (64)));
|
uint32_t hash[4*16] __attribute__ ((aligned (64)));
|
||||||
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
||||||
@@ -303,6 +303,7 @@ int scanhash_x16r_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
uint32_t n = first_nonce;
|
uint32_t n = first_nonce;
|
||||||
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
uint32_t *nonces = work->nonces;
|
uint32_t *nonces = work->nonces;
|
||||||
int num_found = 0;
|
int num_found = 0;
|
||||||
uint32_t *noncep = vdata + 73; // 9*8 + 1
|
uint32_t *noncep = vdata + 73; // 9*8 + 1
|
||||||
|
@@ -35,7 +35,7 @@ void x16s_getAlgoString( const uint8_t* prevblock, char *output )
|
|||||||
bool register_x16r_algo( algo_gate_t* gate )
|
bool register_x16r_algo( algo_gate_t* gate )
|
||||||
{
|
{
|
||||||
#if defined (X16R_4WAY)
|
#if defined (X16R_4WAY)
|
||||||
init_x16r_4way_ctx();
|
// init_x16r_4way_ctx();
|
||||||
gate->scanhash = (void*)&scanhash_x16r_4way;
|
gate->scanhash = (void*)&scanhash_x16r_4way;
|
||||||
gate->hash = (void*)&x16r_4way_hash;
|
gate->hash = (void*)&x16r_4way_hash;
|
||||||
#else
|
#else
|
||||||
@@ -52,7 +52,7 @@ bool register_x16r_algo( algo_gate_t* gate )
|
|||||||
bool register_x16s_algo( algo_gate_t* gate )
|
bool register_x16s_algo( algo_gate_t* gate )
|
||||||
{
|
{
|
||||||
#if defined (X16R_4WAY)
|
#if defined (X16R_4WAY)
|
||||||
init_x16r_4way_ctx();
|
// init_x16r_4way_ctx();
|
||||||
gate->scanhash = (void*)&scanhash_x16r_4way;
|
gate->scanhash = (void*)&scanhash_x16r_4way;
|
||||||
gate->hash = (void*)&x16r_4way_hash;
|
gate->hash = (void*)&x16r_4way_hash;
|
||||||
#else
|
#else
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
#define X16R_GATE_H__ 1
|
#define X16R_GATE_H__ 1
|
||||||
|
|
||||||
#include "algo-gate-api.h"
|
#include "algo-gate-api.h"
|
||||||
#include "avxdefs.h"
|
#include "simd-utils.h"
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#if defined(__AVX2__) && defined(__AES__)
|
#if defined(__AVX2__) && defined(__AES__)
|
||||||
@@ -41,7 +41,7 @@ bool register_x16s_algo( algo_gate_t* gate );
|
|||||||
void x16r_4way_hash( void *state, const void *input );
|
void x16r_4way_hash( void *state, const void *input );
|
||||||
|
|
||||||
int scanhash_x16r_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_x16r_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
void init_x16r_4way_ctx();
|
void init_x16r_4way_ctx();
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ void init_x16r_4way_ctx();
|
|||||||
void x16r_hash( void *state, const void *input );
|
void x16r_hash( void *state, const void *input );
|
||||||
|
|
||||||
int scanhash_x16r( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_x16r( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done );
|
uint64_t *hashes_done, struct thr_info *mythr );
|
||||||
|
|
||||||
void init_x16r_ctx();
|
void init_x16r_ctx();
|
||||||
|
|
||||||
|
@@ -184,7 +184,7 @@ void x16r_hash( void* output, const void* input )
|
|||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x16r( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_x16r( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
uint64_t *hashes_done )
|
uint64_t *hashes_done, struct thr_info *mythr )
|
||||||
{
|
{
|
||||||
uint32_t _ALIGN(128) hash32[8];
|
uint32_t _ALIGN(128) hash32[8];
|
||||||
uint32_t _ALIGN(128) endiandata[20];
|
uint32_t _ALIGN(128) endiandata[20];
|
||||||
@@ -192,6 +192,7 @@ int scanhash_x16r( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
uint32_t nonce = first_nonce;
|
uint32_t nonce = first_nonce;
|
||||||
volatile uint8_t *restart = &(work_restart[thr_id].restart);
|
volatile uint8_t *restart = &(work_restart[thr_id].restart);
|
||||||
|
|
||||||
|
@@ -299,7 +299,7 @@ extern void hmq1725hash(void *state, const void *input)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_hmq1725( int thr_id, struct work *work, int32_t max_nonce,
|
int scanhash_hmq1725( int thr_id, struct work *work, int32_t max_nonce,
|
||||||
uint64_t *hashes_done )
|
uint64_t *hashes_done, struct thr_info *mythr )
|
||||||
{
|
{
|
||||||
uint32_t endiandata[32] __attribute__((aligned(64)));
|
uint32_t endiandata[32] __attribute__((aligned(64)));
|
||||||
uint32_t hash64[8] __attribute__((aligned(64)));
|
uint32_t hash64[8] __attribute__((aligned(64)));
|
||||||
@@ -307,6 +307,7 @@ int scanhash_hmq1725( int thr_id, struct work *work, int32_t max_nonce,
|
|||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
uint32_t n = pdata[19] - 1;
|
uint32_t n = pdata[19] - 1;
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
//const uint32_t Htarg = ptarget[7];
|
//const uint32_t Htarg = ptarget[7];
|
||||||
|
|
||||||
//we need bigendian data...
|
//we need bigendian data...
|
||||||
|
@@ -47,30 +47,6 @@ union _sonoa_4way_context_overlay
|
|||||||
};
|
};
|
||||||
|
|
||||||
typedef union _sonoa_4way_context_overlay sonoa_4way_context_overlay;
|
typedef union _sonoa_4way_context_overlay sonoa_4way_context_overlay;
|
||||||
/*
|
|
||||||
sonoa_4way_ctx_holder sonoa_4way_ctx __attribute__ ((aligned (64)));
|
|
||||||
|
|
||||||
void init_sonoa_4way_ctx()
|
|
||||||
{
|
|
||||||
blake512_4way_init( &sonoa_4way_ctx.blake );
|
|
||||||
bmw512_4way_init( &sonoa_4way_ctx.bmw );
|
|
||||||
init_groestl( &sonoa_4way_ctx.groestl, 64 );
|
|
||||||
skein512_4way_init( &sonoa_4way_ctx.skein );
|
|
||||||
jh512_4way_init( &sonoa_4way_ctx.jh );
|
|
||||||
keccak512_4way_init( &sonoa_4way_ctx.keccak );
|
|
||||||
luffa_2way_init( &sonoa_4way_ctx.luffa, 512 );
|
|
||||||
cube_2way_init( &sonoa_4way_ctx.cube, 512, 16, 32 );
|
|
||||||
shavite512_2way_init( &sonoa_4way_ctx.shavite );
|
|
||||||
simd_2way_init( &sonoa_4way_ctx.simd, 512 );
|
|
||||||
init_echo( &sonoa_4way_ctx.echo, 512 );
|
|
||||||
hamsi512_4way_init( &sonoa_4way_ctx.hamsi );
|
|
||||||
sph_fugue512_init( &sonoa_4way_ctx.fugue );
|
|
||||||
shabal512_4way_init( &sonoa_4way_ctx.shabal );
|
|
||||||
sph_whirlpool_init( &sonoa_4way_ctx.whirlpool );
|
|
||||||
sha512_4way_init( &sonoa_4way_ctx.sha512 );
|
|
||||||
haval256_5_4way_init( &sonoa_4way_ctx.haval );
|
|
||||||
};
|
|
||||||
*/
|
|
||||||
|
|
||||||
void sonoa_4way_hash( void *state, const void *input )
|
void sonoa_4way_hash( void *state, const void *input )
|
||||||
{
|
{
|
||||||
@@ -82,8 +58,6 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
uint64_t vhashA[8*4] __attribute__ ((aligned (64)));
|
uint64_t vhashA[8*4] __attribute__ ((aligned (64)));
|
||||||
uint64_t vhashB[8*4] __attribute__ ((aligned (64)));
|
uint64_t vhashB[8*4] __attribute__ ((aligned (64)));
|
||||||
sonoa_4way_context_overlay ctx;
|
sonoa_4way_context_overlay ctx;
|
||||||
// sonoa_4way_ctx_holder ctx __attribute__ ((aligned (64)));
|
|
||||||
// memcpy( &ctx, &sonoa_4way_ctx, sizeof(sonoa_4way_ctx) );
|
|
||||||
|
|
||||||
// 1
|
// 1
|
||||||
|
|
||||||
@@ -95,7 +69,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||||
@@ -106,7 +80,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, 64 );
|
skein512_4way( &ctx.skein, vhash, 64 );
|
||||||
@@ -120,7 +94,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -142,8 +116,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -160,13 +134,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
|
|
||||||
// 2
|
// 2
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
bmw512_4way_init( &ctx.bmw );
|
bmw512_4way_init( &ctx.bmw );
|
||||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||||
@@ -177,7 +151,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, 64 );
|
skein512_4way( &ctx.skein, vhash, 64 );
|
||||||
@@ -191,7 +165,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -213,8 +187,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -229,7 +203,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||||
@@ -241,7 +215,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||||
@@ -252,7 +226,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, 64 );
|
skein512_4way( &ctx.skein, vhash, 64 );
|
||||||
@@ -266,7 +240,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -288,8 +262,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -304,13 +278,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||||
@@ -326,13 +300,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
// 4
|
// 4
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
bmw512_4way_init( &ctx.bmw );
|
bmw512_4way_init( &ctx.bmw );
|
||||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||||
@@ -343,7 +317,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, 64 );
|
skein512_4way( &ctx.skein, vhash, 64 );
|
||||||
@@ -357,7 +331,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -379,8 +353,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -395,13 +369,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||||
@@ -416,19 +390,19 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
sph_fugue512( &ctx.fugue, hash3, 64 );
|
sph_fugue512( &ctx.fugue, hash3, 64 );
|
||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhash, 64 );
|
shabal512_4way( &ctx.shabal, vhash, 64 );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x32_4x64( vhashB, vhash, 512 );
|
mm256_rintrlv_4x32_4x64( vhashB, vhash, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhashB, 64 );
|
hamsi512_4way( &ctx.hamsi, vhashB, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -443,8 +417,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_2x128( vhashA, hash0, hash1, 512 );
|
mm256_intrlv_2x128( vhashA, hash0, hash1, 512 );
|
||||||
mm256_interleave_2x128( vhashB, hash2, hash3, 512 );
|
mm256_intrlv_2x128( vhashB, hash2, hash3, 512 );
|
||||||
|
|
||||||
shavite512_2way_init( &ctx.shavite );
|
shavite512_2way_init( &ctx.shavite );
|
||||||
shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 );
|
shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 );
|
||||||
@@ -452,19 +426,19 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
|
shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 );
|
||||||
|
|
||||||
// 5
|
// 5
|
||||||
mm256_reinterleave_2x128_4x64( vhash, vhashA, vhashB, 512 );
|
mm256_rintrlv_2x128_4x64( vhash, vhashA, vhashB, 512 );
|
||||||
|
|
||||||
bmw512_4way_init( &ctx.bmw );
|
bmw512_4way_init( &ctx.bmw );
|
||||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_4x32( vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhashB, 64 );
|
shabal512_4way( &ctx.shabal, vhashB, 64 );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||||
@@ -475,7 +449,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, 64 );
|
skein512_4way( &ctx.skein, vhash, 64 );
|
||||||
@@ -489,7 +463,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -511,8 +485,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -527,13 +501,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||||
@@ -548,13 +522,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
sph_fugue512( &ctx.fugue, hash3, 64 );
|
sph_fugue512( &ctx.fugue, hash3, 64 );
|
||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhash, 64 );
|
shabal512_4way( &ctx.shabal, vhash, 64 );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_whirlpool_init( &ctx.whirlpool );
|
sph_whirlpool_init( &ctx.whirlpool );
|
||||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||||
@@ -571,13 +545,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
|
|
||||||
// 6
|
// 6
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
bmw512_4way_init( &ctx.bmw );
|
bmw512_4way_init( &ctx.bmw );
|
||||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||||
@@ -588,7 +562,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, 64 );
|
skein512_4way( &ctx.skein, vhash, 64 );
|
||||||
@@ -602,7 +576,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -624,8 +598,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -640,13 +614,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||||
@@ -661,13 +635,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
sph_fugue512( &ctx.fugue, hash3, 64 );
|
sph_fugue512( &ctx.fugue, hash3, 64 );
|
||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhash, 64 );
|
shabal512_4way( &ctx.shabal, vhash, 64 );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_whirlpool_init( &ctx.whirlpool );
|
sph_whirlpool_init( &ctx.whirlpool );
|
||||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||||
@@ -682,13 +656,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
||||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
sha512_4way_init( &ctx.sha512 );
|
sha512_4way_init( &ctx.sha512 );
|
||||||
sha512_4way( &ctx.sha512, vhash, 64 );
|
sha512_4way( &ctx.sha512, vhash, 64 );
|
||||||
sha512_4way_close( &ctx.sha512, vhash );
|
sha512_4way_close( &ctx.sha512, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_whirlpool_init( &ctx.whirlpool );
|
sph_whirlpool_init( &ctx.whirlpool );
|
||||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||||
@@ -705,13 +679,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
|
|
||||||
// 7
|
// 7
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
bmw512_4way_init( &ctx.bmw );
|
bmw512_4way_init( &ctx.bmw );
|
||||||
bmw512_4way( &ctx.bmw, vhash, 64 );
|
bmw512_4way( &ctx.bmw, vhash, 64 );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||||
@@ -722,7 +696,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, 64 );
|
skein512_4way( &ctx.skein, vhash, 64 );
|
||||||
@@ -736,7 +710,7 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, 64 );
|
keccak512_4way( &ctx.keccak, vhash, 64 );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -758,8 +732,8 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -774,13 +748,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
sph_fugue512( &ctx.fugue, hash0, 64 );
|
sph_fugue512( &ctx.fugue, hash0, 64 );
|
||||||
@@ -795,13 +769,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
sph_fugue512( &ctx.fugue, hash3, 64 );
|
sph_fugue512( &ctx.fugue, hash3, 64 );
|
||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhash, 64 );
|
shabal512_4way( &ctx.shabal, vhash, 64 );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
sph_whirlpool_init( &ctx.whirlpool );
|
sph_whirlpool_init( &ctx.whirlpool );
|
||||||
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
sph_whirlpool( &ctx.whirlpool, hash0, 64 );
|
||||||
@@ -816,13 +790,13 @@ void sonoa_4way_hash( void *state, const void *input )
|
|||||||
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
sph_whirlpool( &ctx.whirlpool, hash3, 64 );
|
||||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
sha512_4way_init( &ctx.sha512 );
|
sha512_4way_init( &ctx.sha512 );
|
||||||
sha512_4way( &ctx.sha512, vhash, 64 );
|
sha512_4way( &ctx.sha512, vhash, 64 );
|
||||||
sha512_4way_close( &ctx.sha512, vhash );
|
sha512_4way_close( &ctx.sha512, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_4x32( vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 );
|
||||||
|
|
||||||
haval256_5_4way_init( &ctx.haval );
|
haval256_5_4way_init( &ctx.haval );
|
||||||
haval256_5_4way( &ctx.haval, vhashB, 64 );
|
haval256_5_4way( &ctx.haval, vhashB, 64 );
|
||||||
@@ -836,7 +810,6 @@ int scanhash_sonoa_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t *hash7 = &(hash[7<<2]);
|
uint32_t *hash7 = &(hash[7<<2]);
|
||||||
uint32_t lane_hash[8];
|
uint32_t lane_hash[8];
|
||||||
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
uint32_t n = pdata[19];
|
uint32_t n = pdata[19];
|
||||||
@@ -850,19 +823,13 @@ int scanhash_sonoa_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
0xFFFFF000, 0xFFFF0000, 0 };
|
0xFFFFF000, 0xFFFF0000, 0 };
|
||||||
|
|
||||||
// Need big endian data
|
// Need big endian data
|
||||||
casti_m256i( endiandata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
|
mm256_bswap_intrlv80_4x64( vdata, pdata );
|
||||||
casti_m256i( endiandata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
|
|
||||||
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
uint64_t *edata = (uint64_t*)endiandata;
|
|
||||||
mm256_interleave_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] )
|
for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] )
|
||||||
{
|
{
|
||||||
uint32_t mask = masks[m];
|
uint32_t mask = masks[m];
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
*noncev = mm256_interleave_blend_32( mm256_bswap_32(
|
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
|
||||||
_mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ),
|
_mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ),
|
||||||
*noncev );
|
*noncev );
|
||||||
sonoa_4way_hash( hash, vdata );
|
sonoa_4way_hash( hash, vdata );
|
||||||
@@ -871,17 +838,10 @@ int scanhash_sonoa_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( ( ( hash7[ lane ] & mask ) == 0 ) )
|
if ( ( ( hash7[ lane ] & mask ) == 0 ) )
|
||||||
{
|
{
|
||||||
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
|
||||||
applog( LOG_NOTICE,
|
|
||||||
"Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
|
@@ -24,7 +24,6 @@
|
|||||||
#include "algo/haval/haval-hash-4way.h"
|
#include "algo/haval/haval-hash-4way.h"
|
||||||
#include "algo/sha/sha2-hash-4way.h"
|
#include "algo/sha/sha2-hash-4way.h"
|
||||||
|
|
||||||
//typedef struct {
|
|
||||||
union _x17_4way_context_overlay
|
union _x17_4way_context_overlay
|
||||||
{
|
{
|
||||||
blake512_4way_context blake;
|
blake512_4way_context blake;
|
||||||
@@ -47,30 +46,6 @@ union _x17_4way_context_overlay
|
|||||||
};
|
};
|
||||||
typedef union _x17_4way_context_overlay x17_4way_context_overlay;
|
typedef union _x17_4way_context_overlay x17_4way_context_overlay;
|
||||||
|
|
||||||
/*
|
|
||||||
x17_4way_ctx_holder x17_4way_ctx __attribute__ ((aligned (64)));
|
|
||||||
|
|
||||||
void init_x17_4way_ctx()
|
|
||||||
{
|
|
||||||
blake512_4way_init( &x17_4way_ctx.blake );
|
|
||||||
bmw512_4way_init( &x17_4way_ctx.bmw );
|
|
||||||
init_groestl( &x17_4way_ctx.groestl, 64 );
|
|
||||||
skein512_4way_init( &x17_4way_ctx.skein );
|
|
||||||
jh512_4way_init( &x17_4way_ctx.jh );
|
|
||||||
keccak512_4way_init( &x17_4way_ctx.keccak );
|
|
||||||
luffa_2way_init( &x17_4way_ctx.luffa, 512 );
|
|
||||||
cube_2way_init( &x17_4way_ctx.cube, 512, 16, 32 );
|
|
||||||
shavite512_2way_init( &x17_4way_ctx.shavite );
|
|
||||||
simd_2way_init( &x17_4way_ctx.simd, 512 );
|
|
||||||
init_echo( &x17_4way_ctx.echo, 512 );
|
|
||||||
hamsi512_4way_init( &x17_4way_ctx.hamsi );
|
|
||||||
sph_fugue512_init( &x17_4way_ctx.fugue );
|
|
||||||
shabal512_4way_init( &x17_4way_ctx.shabal );
|
|
||||||
sph_whirlpool_init( &x17_4way_ctx.whirlpool );
|
|
||||||
sha512_4way_init( &x17_4way_ctx.sha512 );
|
|
||||||
haval256_5_4way_init( &x17_4way_ctx.haval );
|
|
||||||
};
|
|
||||||
*/
|
|
||||||
void x17_4way_hash( void *state, const void *input )
|
void x17_4way_hash( void *state, const void *input )
|
||||||
{
|
{
|
||||||
uint64_t hash0[8] __attribute__ ((aligned (64)));
|
uint64_t hash0[8] __attribute__ ((aligned (64)));
|
||||||
@@ -81,7 +56,6 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
uint64_t vhashA[8*4] __attribute__ ((aligned (64)));
|
uint64_t vhashA[8*4] __attribute__ ((aligned (64)));
|
||||||
uint64_t vhashB[8*4] __attribute__ ((aligned (64)));
|
uint64_t vhashB[8*4] __attribute__ ((aligned (64)));
|
||||||
x17_4way_context_overlay ctx;
|
x17_4way_context_overlay ctx;
|
||||||
// memcpy( &ctx, &x17_4way_ctx, sizeof(x17_4way_ctx) );
|
|
||||||
|
|
||||||
// 1 Blake parallel 4 way 64 bit
|
// 1 Blake parallel 4 way 64 bit
|
||||||
blake512_4way_init( &ctx.blake );
|
blake512_4way_init( &ctx.blake );
|
||||||
@@ -94,7 +68,7 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
// Serialize
|
// Serialize
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
// 3 Groestl
|
// 3 Groestl
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
@@ -107,7 +81,7 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||||
|
|
||||||
// Parallellize
|
// Parallellize
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
// 4 Skein parallel 4 way 64 bit
|
// 4 Skein parallel 4 way 64 bit
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
@@ -125,7 +99,7 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
// 7 Luffa parallel 2 way 128 bit
|
// 7 Luffa parallel 2 way 128 bit
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, 512 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 );
|
||||||
@@ -150,8 +124,8 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 );
|
||||||
|
|
||||||
mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, 512 );
|
||||||
mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, 512 );
|
||||||
|
|
||||||
// 11 Echo serial
|
// 11 Echo serial
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
@@ -168,13 +142,13 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
(const BitSequence *) hash3, 512 );
|
(const BitSequence *) hash3, 512 );
|
||||||
|
|
||||||
// 12 Hamsi parallel 4 way 64 bit
|
// 12 Hamsi parallel 4 way 64 bit
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
hamsi512_4way( &ctx.hamsi, vhash, 64 );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
// 13 Fugue serial
|
// 13 Fugue serial
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
@@ -191,13 +165,13 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
// 14 Shabal, parallel 4 way 32 bit
|
// 14 Shabal, parallel 4 way 32 bit
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhash, 64 );
|
shabal512_4way( &ctx.shabal, vhash, 64 );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||||
|
|
||||||
// 15 Whirlpool serial
|
// 15 Whirlpool serial
|
||||||
sph_whirlpool_init( &ctx.whirlpool );
|
sph_whirlpool_init( &ctx.whirlpool );
|
||||||
@@ -214,19 +188,18 @@ void x17_4way_hash( void *state, const void *input )
|
|||||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||||
|
|
||||||
// 16 SHA512 parallel 64 bit
|
// 16 SHA512 parallel 64 bit
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||||
|
|
||||||
sha512_4way_init( &ctx.sha512 );
|
sha512_4way_init( &ctx.sha512 );
|
||||||
sha512_4way( &ctx.sha512, vhash, 64 );
|
sha512_4way( &ctx.sha512, vhash, 64 );
|
||||||
sha512_4way_close( &ctx.sha512, vhash );
|
sha512_4way_close( &ctx.sha512, vhash );
|
||||||
|
|
||||||
// 17 Haval parallel 32 bit
|
// 17 Haval parallel 32 bit
|
||||||
mm256_reinterleave_4x64_4x32( vhashB, vhash, 512 );
|
mm256_rintrlv_4x64_4x32( vhashB, vhash, 512 );
|
||||||
|
|
||||||
haval256_5_4way_init( &ctx.haval );
|
haval256_5_4way_init( &ctx.haval );
|
||||||
haval256_5_4way( &ctx.haval, vhashB, 64 );
|
haval256_5_4way( &ctx.haval, vhashB, 64 );
|
||||||
haval256_5_4way_close( &ctx.haval, state );
|
haval256_5_4way_close( &ctx.haval, state );
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int scanhash_x17_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
int scanhash_x17_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
||||||
@@ -236,7 +209,6 @@ int scanhash_x17_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t *hash7 = &(hash[7<<2]);
|
uint32_t *hash7 = &(hash[7<<2]);
|
||||||
uint32_t lane_hash[8];
|
uint32_t lane_hash[8];
|
||||||
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t endiandata[20] __attribute__((aligned(64)));
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
uint32_t n = pdata[19];
|
uint32_t n = pdata[19];
|
||||||
@@ -250,38 +222,24 @@ int scanhash_x17_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
0xFFFFF000, 0xFFFF0000, 0 };
|
0xFFFFF000, 0xFFFF0000, 0 };
|
||||||
|
|
||||||
// Need big endian data
|
// Need big endian data
|
||||||
casti_m256i( endiandata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
|
mm256_bswap_intrlv80_4x64( vdata, pdata );
|
||||||
casti_m256i( endiandata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
|
|
||||||
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
|
|
||||||
uint64_t *edata = (uint64_t*)endiandata;
|
|
||||||
mm256_interleave_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
|
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
|
||||||
{
|
{
|
||||||
uint32_t mask = masks[ m ];
|
uint32_t mask = masks[ m ];
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
*noncev = mm256_interleave_blend_32( mm256_bswap_32(
|
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
|
||||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ),
|
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
|
||||||
*noncev );
|
|
||||||
x17_4way_hash( hash, vdata );
|
x17_4way_hash( hash, vdata );
|
||||||
|
|
||||||
for ( int lane = 0; lane < 4; lane++ )
|
for ( int lane = 0; lane < 4; lane++ )
|
||||||
if ( ( hash7[ lane ] & mask ) == 0 )
|
if ( ( hash7[ lane ] & mask ) == 0 )
|
||||||
{
|
{
|
||||||
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
|
||||||
applog( LOG_NOTICE,
|
|
||||||
"Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
|
@@ -71,7 +71,7 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
// Serial
|
// Serial
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0,
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0,
|
||||||
@@ -87,7 +87,7 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
dataLen<<3 );
|
dataLen<<3 );
|
||||||
|
|
||||||
// Parallel 4way
|
// Parallel 4way
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, dataLen );
|
skein512_4way( &ctx.skein, vhash, dataLen );
|
||||||
@@ -101,7 +101,7 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, dataLen );
|
keccak512_4way( &ctx.keccak, vhash, dataLen );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
|
||||||
@@ -123,8 +123,8 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
|
||||||
|
|
||||||
mm256_deinterleave_1x128( hash0, hash1, vhashA, dataLen<<3 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
|
||||||
mm256_deinterleave_1x128( hash2, hash3, vhashB, dataLen<<3 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -139,13 +139,13 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, dataLen<<3 );
|
(const BitSequence *) hash3, dataLen<<3 );
|
||||||
// Parallel
|
// Parallel
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, dataLen );
|
hamsi512_4way( &ctx.hamsi, vhash, dataLen );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
||||||
|
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
sph_fugue512( &ctx.fugue, hash0, dataLen );
|
sph_fugue512( &ctx.fugue, hash0, dataLen );
|
||||||
@@ -161,13 +161,13 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
// Parallel 4way 32 bit
|
// Parallel 4way 32 bit
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhash, dataLen );
|
shabal512_4way( &ctx.shabal, vhash, dataLen );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
||||||
|
|
||||||
// Serial
|
// Serial
|
||||||
sph_whirlpool_init( &ctx.whirlpool );
|
sph_whirlpool_init( &ctx.whirlpool );
|
||||||
@@ -183,19 +183,19 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
|
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
|
||||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
sha512_4way_init( &ctx.sha512 );
|
sha512_4way_init( &ctx.sha512 );
|
||||||
sha512_4way( &ctx.sha512, vhash, dataLen );
|
sha512_4way( &ctx.sha512, vhash, dataLen );
|
||||||
sha512_4way_close( &ctx.sha512, vhash );
|
sha512_4way_close( &ctx.sha512, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_4x32( vhashA, vhash, dataLen<<3 );
|
mm256_rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 );
|
||||||
|
|
||||||
haval256_5_4way_init( &ctx.haval );
|
haval256_5_4way_init( &ctx.haval );
|
||||||
haval256_5_4way( &ctx.haval, vhashA, dataLen );
|
haval256_5_4way( &ctx.haval, vhashA, dataLen );
|
||||||
haval256_5_4way_close( &ctx.haval, vhashA );
|
haval256_5_4way_close( &ctx.haval, vhashA );
|
||||||
|
|
||||||
mm256_reinterleave_4x32_4x64( vhash, vhashA, dataLen<<3 );
|
mm256_rintrlv_4x32_4x64( vhash, vhashA, dataLen<<3 );
|
||||||
|
|
||||||
memset( &vhash[ 4<<2 ], 0, (dataLen-32) << 2 );
|
memset( &vhash[ 4<<2 ], 0, (dataLen-32) << 2 );
|
||||||
|
|
||||||
@@ -207,7 +207,7 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
bmw512_4way( &ctx.bmw, vhash, dataLen );
|
bmw512_4way( &ctx.bmw, vhash, dataLen );
|
||||||
bmw512_4way_close( &ctx.bmw, vhash );
|
bmw512_4way_close( &ctx.bmw, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
||||||
|
|
||||||
init_groestl( &ctx.groestl, 64 );
|
init_groestl( &ctx.groestl, 64 );
|
||||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0,
|
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0,
|
||||||
@@ -222,7 +222,7 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3,
|
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3,
|
||||||
dataLen<<3 );
|
dataLen<<3 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
skein512_4way_init( &ctx.skein );
|
skein512_4way_init( &ctx.skein );
|
||||||
skein512_4way( &ctx.skein, vhash, dataLen );
|
skein512_4way( &ctx.skein, vhash, dataLen );
|
||||||
@@ -236,7 +236,7 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
keccak512_4way( &ctx.keccak, vhash, dataLen );
|
keccak512_4way( &ctx.keccak, vhash, dataLen );
|
||||||
keccak512_4way_close( &ctx.keccak, vhash );
|
keccak512_4way_close( &ctx.keccak, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
|
mm256_rintrlv_4x64_2x128( vhashA, vhashB, vhash, dataLen<<3 );
|
||||||
|
|
||||||
luffa_2way_init( &ctx.luffa, 512 );
|
luffa_2way_init( &ctx.luffa, 512 );
|
||||||
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
|
luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, dataLen );
|
||||||
@@ -258,8 +258,8 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
simd_2way_init( &ctx.simd, 512 );
|
simd_2way_init( &ctx.simd, 512 );
|
||||||
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
|
simd_2way_update_close( &ctx.simd, vhashB, vhashB, dataLen<<3 );
|
||||||
|
|
||||||
mm256_deinterleave_1x128( hash0, hash1, vhashA, dataLen<<3 );
|
mm256_dintrlv_2x128( hash0, hash1, vhashA, dataLen<<3 );
|
||||||
mm256_deinterleave_1x128( hash2, hash3, vhashB, dataLen<<3 );
|
mm256_dintrlv_2x128( hash2, hash3, vhashB, dataLen<<3 );
|
||||||
|
|
||||||
init_echo( &ctx.echo, 512 );
|
init_echo( &ctx.echo, 512 );
|
||||||
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
update_final_echo( &ctx.echo, (BitSequence *)hash0,
|
||||||
@@ -274,13 +274,13 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
update_final_echo( &ctx.echo, (BitSequence *)hash3,
|
||||||
(const BitSequence *) hash3, dataLen<<3 );
|
(const BitSequence *) hash3, dataLen<<3 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
hamsi512_4way_init( &ctx.hamsi );
|
hamsi512_4way_init( &ctx.hamsi );
|
||||||
hamsi512_4way( &ctx.hamsi, vhash, dataLen );
|
hamsi512_4way( &ctx.hamsi, vhash, dataLen );
|
||||||
hamsi512_4way_close( &ctx.hamsi, vhash );
|
hamsi512_4way_close( &ctx.hamsi, vhash );
|
||||||
|
|
||||||
mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
||||||
|
|
||||||
sph_fugue512_init( &ctx.fugue );
|
sph_fugue512_init( &ctx.fugue );
|
||||||
sph_fugue512( &ctx.fugue, hash0, dataLen );
|
sph_fugue512( &ctx.fugue, hash0, dataLen );
|
||||||
@@ -295,13 +295,13 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
sph_fugue512( &ctx.fugue, hash3, dataLen );
|
sph_fugue512( &ctx.fugue, hash3, dataLen );
|
||||||
sph_fugue512_close( &ctx.fugue, hash3 );
|
sph_fugue512_close( &ctx.fugue, hash3 );
|
||||||
|
|
||||||
mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm128_intrlv_4x32( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
shabal512_4way_init( &ctx.shabal );
|
shabal512_4way_init( &ctx.shabal );
|
||||||
shabal512_4way( &ctx.shabal, vhash, dataLen );
|
shabal512_4way( &ctx.shabal, vhash, dataLen );
|
||||||
shabal512_4way_close( &ctx.shabal, vhash );
|
shabal512_4way_close( &ctx.shabal, vhash );
|
||||||
|
|
||||||
mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
mm128_dintrlv_4x32( hash0, hash1, hash2, hash3, vhash, dataLen<<3 );
|
||||||
|
|
||||||
sph_whirlpool_init( &ctx.whirlpool );
|
sph_whirlpool_init( &ctx.whirlpool );
|
||||||
sph_whirlpool( &ctx.whirlpool, hash0, dataLen );
|
sph_whirlpool( &ctx.whirlpool, hash0, dataLen );
|
||||||
@@ -316,13 +316,13 @@ void xevan_4way_hash( void *output, const void *input )
|
|||||||
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
|
sph_whirlpool( &ctx.whirlpool, hash3, dataLen );
|
||||||
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
sph_whirlpool_close( &ctx.whirlpool, hash3 );
|
||||||
|
|
||||||
mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
mm256_intrlv_4x64( vhash, hash0, hash1, hash2, hash3, dataLen<<3 );
|
||||||
|
|
||||||
sha512_4way_init( &ctx.sha512 );
|
sha512_4way_init( &ctx.sha512 );
|
||||||
sha512_4way( &ctx.sha512, vhash, dataLen );
|
sha512_4way( &ctx.sha512, vhash, dataLen );
|
||||||
sha512_4way_close( &ctx.sha512, vhash );
|
sha512_4way_close( &ctx.sha512, vhash );
|
||||||
|
|
||||||
mm256_reinterleave_4x64_4x32( vhashA, vhash, dataLen<<3 );
|
mm256_rintrlv_4x64_4x32( vhashA, vhash, dataLen<<3 );
|
||||||
|
|
||||||
haval256_5_4way_init( &ctx.haval );
|
haval256_5_4way_init( &ctx.haval );
|
||||||
haval256_5_4way( &ctx.haval, vhashA, dataLen );
|
haval256_5_4way( &ctx.haval, vhashA, dataLen );
|
||||||
@@ -336,7 +336,6 @@ int scanhash_xevan_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t *hash7 = &(hash[7<<2]);
|
uint32_t *hash7 = &(hash[7<<2]);
|
||||||
uint32_t lane_hash[8];
|
uint32_t lane_hash[8];
|
||||||
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
uint32_t vdata[24*4] __attribute__ ((aligned (64)));
|
||||||
uint32_t _ALIGN(64) endiandata[20];
|
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
@@ -349,15 +348,9 @@ int scanhash_xevan_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( opt_benchmark )
|
if ( opt_benchmark )
|
||||||
ptarget[7] = 0x0cff;
|
ptarget[7] = 0x0cff;
|
||||||
|
|
||||||
uint64_t *edata = (uint64_t*)endiandata;
|
mm256_bswap_intrlv80_4x64( vdata, pdata );
|
||||||
|
|
||||||
casti_m256i( edata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) );
|
|
||||||
casti_m256i( edata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) );
|
|
||||||
casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
|
|
||||||
mm256_interleave_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 );
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
*noncev = mm256_interleave_blend_32( mm256_bswap_32(
|
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
|
||||||
_mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ), *noncev );
|
_mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ), *noncev );
|
||||||
|
|
||||||
xevan_4way_hash( hash, vdata );
|
xevan_4way_hash( hash, vdata );
|
||||||
@@ -365,17 +358,10 @@ int scanhash_xevan_4way( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
if ( hash7[ lane ] <= Htarg )
|
if ( hash7[ lane ] <= Htarg )
|
||||||
{
|
{
|
||||||
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
mm128_extract_lane_4x32( lane_hash, hash, lane, 256 );
|
||||||
if ( fulltest( lane_hash, ptarget ) )
|
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||||
{
|
{
|
||||||
pdata[19] = n + lane;
|
pdata[19] = n + lane;
|
||||||
work_set_target_ratio( work, lane_hash );
|
submit_solution( work, lane_hash, mythr, lane );
|
||||||
if ( submit_work( mythr, work ) )
|
|
||||||
applog( LOG_NOTICE,
|
|
||||||
"Share %d submitted by thread %d, lane %d.",
|
|
||||||
accepted_share_count + rejected_share_count + 1,
|
|
||||||
thr_id, lane );
|
|
||||||
else
|
|
||||||
applog( LOG_WARNING, "Failed to submit share." );
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
n += 4;
|
n += 4;
|
||||||
|
@@ -237,8 +237,7 @@ int scanhash_xevan( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
uint32_t _ALIGN(64) endiandata[20];
|
uint32_t _ALIGN(64) endiandata[20];
|
||||||
uint32_t *pdata = work->data;
|
uint32_t *pdata = work->data;
|
||||||
uint32_t *ptarget = work->target;
|
uint32_t *ptarget = work->target;
|
||||||
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
/* int */ thr_id = mythr->id; // thr_id arg is deprecated
|
||||||
|
|
||||||
const uint32_t Htarg = ptarget[7];
|
const uint32_t Htarg = ptarget[7];
|
||||||
const uint32_t first_nonce = pdata[19];
|
const uint32_t first_nonce = pdata[19];
|
||||||
uint32_t nonce = first_nonce;
|
uint32_t nonce = first_nonce;
|
||||||
@@ -250,8 +249,7 @@ int scanhash_xevan( int thr_id, struct work *work, uint32_t max_nonce,
|
|||||||
for (int k=0; k < 19; k++)
|
for (int k=0; k < 19; k++)
|
||||||
be32enc(&endiandata[k], pdata[k]);
|
be32enc(&endiandata[k], pdata[k]);
|
||||||
|
|
||||||
xevan_blake512_midstate( endiandata );
|
xevan_blake512_midstate( endiandata );
|
||||||
|
|
||||||
do {
|
do {
|
||||||
be32enc(&endiandata[19], nonce);
|
be32enc(&endiandata[19], nonce);
|
||||||
xevan_hash(hash, endiandata);
|
xevan_hash(hash, endiandata);
|
||||||
|
@@ -100,9 +100,9 @@ rm -f config.status
|
|||||||
CFLAGS="-O3 -march=native -Wall" ./configure --with-curl
|
CFLAGS="-O3 -march=native -Wall" ./configure --with-curl
|
||||||
make -j 16
|
make -j 16
|
||||||
strip -s cpuminer.exe
|
strip -s cpuminer.exe
|
||||||
mv cpuminer.exe cpuminer-native.exe
|
#mv cpuminer.exe cpuminer-native.exe
|
||||||
strip -s cpuminer
|
strip -s cpuminer
|
||||||
mv cpuminer cpuminer-native
|
#mv cpuminer cpuminer-native
|
||||||
|
|
||||||
make clean || echo done
|
#make clean || echo done
|
||||||
|
|
||||||
|
20
configure
vendored
20
configure
vendored
@@ -1,6 +1,6 @@
|
|||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Guess values for system-dependent variables and create Makefiles.
|
# Guess values for system-dependent variables and create Makefiles.
|
||||||
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.2.2.
|
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.3.1.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
|
||||||
@@ -577,8 +577,8 @@ MAKEFLAGS=
|
|||||||
# Identity of this package.
|
# Identity of this package.
|
||||||
PACKAGE_NAME='cpuminer-opt'
|
PACKAGE_NAME='cpuminer-opt'
|
||||||
PACKAGE_TARNAME='cpuminer-opt'
|
PACKAGE_TARNAME='cpuminer-opt'
|
||||||
PACKAGE_VERSION='3.9.2.2'
|
PACKAGE_VERSION='3.9.3.1'
|
||||||
PACKAGE_STRING='cpuminer-opt 3.9.2.2'
|
PACKAGE_STRING='cpuminer-opt 3.9.3.1'
|
||||||
PACKAGE_BUGREPORT=''
|
PACKAGE_BUGREPORT=''
|
||||||
PACKAGE_URL=''
|
PACKAGE_URL=''
|
||||||
|
|
||||||
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
|
|||||||
# Omit some internal or obsolete options to make the list less imposing.
|
# Omit some internal or obsolete options to make the list less imposing.
|
||||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||||
cat <<_ACEOF
|
cat <<_ACEOF
|
||||||
\`configure' configures cpuminer-opt 3.9.2.2 to adapt to many kinds of systems.
|
\`configure' configures cpuminer-opt 3.9.3.1 to adapt to many kinds of systems.
|
||||||
|
|
||||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||||
|
|
||||||
@@ -1404,7 +1404,7 @@ fi
|
|||||||
|
|
||||||
if test -n "$ac_init_help"; then
|
if test -n "$ac_init_help"; then
|
||||||
case $ac_init_help in
|
case $ac_init_help in
|
||||||
short | recursive ) echo "Configuration of cpuminer-opt 3.9.2.2:";;
|
short | recursive ) echo "Configuration of cpuminer-opt 3.9.3.1:";;
|
||||||
esac
|
esac
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
|
|
||||||
@@ -1509,7 +1509,7 @@ fi
|
|||||||
test -n "$ac_init_help" && exit $ac_status
|
test -n "$ac_init_help" && exit $ac_status
|
||||||
if $ac_init_version; then
|
if $ac_init_version; then
|
||||||
cat <<\_ACEOF
|
cat <<\_ACEOF
|
||||||
cpuminer-opt configure 3.9.2.2
|
cpuminer-opt configure 3.9.3.1
|
||||||
generated by GNU Autoconf 2.69
|
generated by GNU Autoconf 2.69
|
||||||
|
|
||||||
Copyright (C) 2012 Free Software Foundation, Inc.
|
Copyright (C) 2012 Free Software Foundation, Inc.
|
||||||
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
|
|||||||
This file contains any messages produced by compilers while
|
This file contains any messages produced by compilers while
|
||||||
running configure, to aid debugging if configure makes a mistake.
|
running configure, to aid debugging if configure makes a mistake.
|
||||||
|
|
||||||
It was created by cpuminer-opt $as_me 3.9.2.2, which was
|
It was created by cpuminer-opt $as_me 3.9.3.1, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
$ $0 $@
|
$ $0 $@
|
||||||
@@ -2993,7 +2993,7 @@ fi
|
|||||||
|
|
||||||
# Define the identity of the package.
|
# Define the identity of the package.
|
||||||
PACKAGE='cpuminer-opt'
|
PACKAGE='cpuminer-opt'
|
||||||
VERSION='3.9.2.2'
|
VERSION='3.9.3.1'
|
||||||
|
|
||||||
|
|
||||||
cat >>confdefs.h <<_ACEOF
|
cat >>confdefs.h <<_ACEOF
|
||||||
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
|||||||
# report actual input values of CONFIG_FILES etc. instead of their
|
# report actual input values of CONFIG_FILES etc. instead of their
|
||||||
# values after options handling.
|
# values after options handling.
|
||||||
ac_log="
|
ac_log="
|
||||||
This file was extended by cpuminer-opt $as_me 3.9.2.2, which was
|
This file was extended by cpuminer-opt $as_me 3.9.3.1, which was
|
||||||
generated by GNU Autoconf 2.69. Invocation command line was
|
generated by GNU Autoconf 2.69. Invocation command line was
|
||||||
|
|
||||||
CONFIG_FILES = $CONFIG_FILES
|
CONFIG_FILES = $CONFIG_FILES
|
||||||
@@ -6756,7 +6756,7 @@ _ACEOF
|
|||||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||||
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
|
||||||
ac_cs_version="\\
|
ac_cs_version="\\
|
||||||
cpuminer-opt config.status 3.9.2.2
|
cpuminer-opt config.status 3.9.3.1
|
||||||
configured by $0, generated by GNU Autoconf 2.69,
|
configured by $0, generated by GNU Autoconf 2.69,
|
||||||
with options \\"\$ac_cs_config\\"
|
with options \\"\$ac_cs_config\\"
|
||||||
|
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
AC_INIT([cpuminer-opt], [3.9.2.2])
|
AC_INIT([cpuminer-opt], [3.9.3.1])
|
||||||
|
|
||||||
AC_PREREQ([2.59c])
|
AC_PREREQ([2.59c])
|
||||||
AC_CANONICAL_SYSTEM
|
AC_CANONICAL_SYSTEM
|
||||||
|
84
cpu-miner.c
84
cpu-miner.c
@@ -105,11 +105,11 @@ enum algos opt_algo = ALGO_NULL;
|
|||||||
int opt_scrypt_n = 0;
|
int opt_scrypt_n = 0;
|
||||||
int opt_pluck_n = 128;
|
int opt_pluck_n = 128;
|
||||||
int opt_n_threads = 0;
|
int opt_n_threads = 0;
|
||||||
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
|
// Windows doesn't support 128 bit affinity mask.
|
||||||
|
#if defined(__linux) && defined(GCC_INT128)
|
||||||
#define AFFINITY_USES_UINT128 1
|
#define AFFINITY_USES_UINT128 1
|
||||||
uint128_t opt_affinity = i128_neg1;
|
uint128_t opt_affinity = -1LL;
|
||||||
#else
|
#else
|
||||||
#define AFFINITY_USES_UINT128 0
|
|
||||||
uint64_t opt_affinity = -1LL;
|
uint64_t opt_affinity = -1LL;
|
||||||
#endif
|
#endif
|
||||||
int opt_priority = 0;
|
int opt_priority = 0;
|
||||||
@@ -205,7 +205,8 @@ static inline void drop_policy(void)
|
|||||||
#define pthread_setaffinity_np(tid,sz,s) {} /* only do process affinity */
|
#define pthread_setaffinity_np(tid,sz,s) {} /* only do process affinity */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
|
// Linux affinity can use int128.
|
||||||
|
#if AFFINITY_USES_UINT128
|
||||||
static void affine_to_cpu_mask( int id, unsigned __int128 mask )
|
static void affine_to_cpu_mask( int id, unsigned __int128 mask )
|
||||||
#else
|
#else
|
||||||
static void affine_to_cpu_mask( int id, unsigned long long mask )
|
static void affine_to_cpu_mask( int id, unsigned long long mask )
|
||||||
@@ -218,7 +219,7 @@ static void affine_to_cpu_mask( int id, unsigned long long mask )
|
|||||||
for ( uint8_t i = 0; i < ncpus; i++ )
|
for ( uint8_t i = 0; i < ncpus; i++ )
|
||||||
{
|
{
|
||||||
// cpu mask
|
// cpu mask
|
||||||
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
|
#if AFFINITY_USES_UINT128
|
||||||
if( ( mask & ( (unsigned __int128)1ULL << i ) ) ) CPU_SET( i, &set );
|
if( ( mask & ( (unsigned __int128)1ULL << i ) ) ) CPU_SET( i, &set );
|
||||||
#else
|
#else
|
||||||
if( (ncpus > 64) || ( mask & (1ULL << i) ) ) CPU_SET( i, &set );
|
if( (ncpus > 64) || ( mask & (1ULL << i) ) ) CPU_SET( i, &set );
|
||||||
@@ -239,6 +240,7 @@ static void affine_to_cpu_mask( int id, unsigned long long mask )
|
|||||||
#elif defined(WIN32) /* Windows */
|
#elif defined(WIN32) /* Windows */
|
||||||
static inline void drop_policy(void) { }
|
static inline void drop_policy(void) { }
|
||||||
|
|
||||||
|
// Windows CPU groups to manage more than 64 CPUs.
|
||||||
static void affine_to_cpu_mask( int id, unsigned long mask )
|
static void affine_to_cpu_mask( int id, unsigned long mask )
|
||||||
{
|
{
|
||||||
bool success;
|
bool success;
|
||||||
@@ -247,12 +249,12 @@ static void affine_to_cpu_mask( int id, unsigned long mask )
|
|||||||
// DWORD last_error;
|
// DWORD last_error;
|
||||||
|
|
||||||
if ( id == -1 )
|
if ( id == -1 )
|
||||||
success = SetProcessAffinityMask( GetCurrentProcess(), (DWORD_PTR)&mask );
|
success = SetProcessAffinityMask( GetCurrentProcess(), mask );
|
||||||
|
|
||||||
// Are Windows CPU Groups supported?
|
// Are Windows CPU Groups supported?
|
||||||
#if _WIN32_WINNT==0x0601
|
#if _WIN32_WINNT==0x0601
|
||||||
else if ( num_cpugroups == 1 )
|
else if ( num_cpugroups == 1 )
|
||||||
success = SetThreadAffinityMask( GetCurrentThread(), (DWORD_PTR)&mask );
|
success = SetThreadAffinityMask( GetCurrentThread(), mask );
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Find the correct cpu group
|
// Find the correct cpu group
|
||||||
@@ -265,7 +267,7 @@ static void affine_to_cpu_mask( int id, unsigned long mask )
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
cpu -= cpus;
|
cpu -= cpus;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opt_debug)
|
if (opt_debug)
|
||||||
applog(LOG_DEBUG, "Binding thread %d to cpu %d on cpu group %d (mask %x)", id, cpu, group, (1ULL << cpu));
|
applog(LOG_DEBUG, "Binding thread %d to cpu %d on cpu group %d (mask %x)", id, cpu, group, (1ULL << cpu));
|
||||||
@@ -277,7 +279,7 @@ static void affine_to_cpu_mask( int id, unsigned long mask )
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
else
|
else
|
||||||
success = SetThreadAffinityMask( GetCurrentThread(), (DWORD_PTR)&mask );
|
success = SetThreadAffinityMask( GetCurrentThread(), mask );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!success)
|
if (!success)
|
||||||
@@ -1848,40 +1850,36 @@ static void *miner_thread( void *userdata )
|
|||||||
if ( num_cpus > 1 )
|
if ( num_cpus > 1 )
|
||||||
{
|
{
|
||||||
#if AFFINITY_USES_UINT128
|
#if AFFINITY_USES_UINT128
|
||||||
|
// Default affinity
|
||||||
if ( (opt_affinity == i128_neg1 ) && opt_n_threads > 1 )
|
if ( (opt_affinity == i128_neg1 ) && opt_n_threads > 1 )
|
||||||
{
|
{
|
||||||
if ( opt_debug )
|
if ( opt_debug )
|
||||||
applog( LOG_DEBUG,
|
applog( LOG_DEBUG, "Binding thread %d to cpu %d.",
|
||||||
"Binding thread %d to cpu %d (mask %016llx %016llx)",
|
thr_id, thr_id % num_cpus,
|
||||||
thr_id, thr_id % num_cpus,
|
u128_hi64( (uint128_t)1ULL << (thr_id % num_cpus) ),
|
||||||
i128_hi64( i128_neg1 << (thr_id % num_cpus) ),
|
u128_lo64( (uint128_t)1ULL << (thr_id % num_cpus) ) );
|
||||||
i128_lo64( i128_neg1 << (thr_id % num_cpus) ) );
|
affine_to_cpu_mask( thr_id, (uint128_t)1ULL << (thr_id % num_cpus) );
|
||||||
affine_to_cpu_mask( thr_id,
|
|
||||||
(uint128_t)1LL << (thr_id % num_cpus) );
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
if ( (opt_affinity == -1LL) && opt_n_threads > 1 )
|
if ( (opt_affinity == -1LL) && opt_n_threads > 1 )
|
||||||
{
|
{
|
||||||
if (opt_debug)
|
if (opt_debug)
|
||||||
applog( LOG_DEBUG, "Binding thread %d to cpu %d (mask %x)",
|
applog( LOG_DEBUG, "Binding thread %d to cpu %d.",
|
||||||
thr_id, thr_id % num_cpus, L << (thr_id % num_cpus)) ;
|
thr_id, thr_id % num_cpus, 1LL << (thr_id % num_cpus)) ;
|
||||||
affine_to_cpu_mask( thr_id, 1ULL << (thr_id % num_cpus) );
|
affine_to_cpu_mask( thr_id, 1ULL << (thr_id % num_cpus) );
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
else
|
else // Custom affinity
|
||||||
{
|
{
|
||||||
#if AFFINITY_USES_UINT128
|
#if AFFINITY_USES_UINT128
|
||||||
if (opt_debug)
|
if (opt_debug)
|
||||||
applog( LOG_DEBUG,
|
applog( LOG_DEBUG, "Binding thread %d to mask %016llx %016llx",
|
||||||
"Binding thread %d to cpu mask %016llx %016llx",
|
thr_id, u128_hi64( opt_affinity ),
|
||||||
thr_id, i128_hi64( i128_neg1 << (thr_id % num_cpus) ),
|
u128_lo64( opt_affinity ) );
|
||||||
i128_lo64( i128_neg1 << (thr_id % num_cpus) ) );
|
|
||||||
#else
|
#else
|
||||||
if (opt_debug)
|
if (opt_debug)
|
||||||
applog( LOG_DEBUG,
|
applog( LOG_DEBUG, "Binding thread %d to mask %016llx",
|
||||||
"Binding thread %d to cpu mask %016llx %016llx",
|
thr_id, opt_affinity );
|
||||||
thr_id, opt_affinity );
|
|
||||||
#endif
|
#endif
|
||||||
affine_to_cpu_mask( thr_id, opt_affinity );
|
affine_to_cpu_mask( thr_id, opt_affinity );
|
||||||
}
|
}
|
||||||
@@ -2926,11 +2924,13 @@ void parse_arg(int key, char *arg )
|
|||||||
// if ( ul > ( 1ULL << num_cpus ) - 1ULL )
|
// if ( ul > ( 1ULL << num_cpus ) - 1ULL )
|
||||||
// ul = -1LL;
|
// ul = -1LL;
|
||||||
#if AFFINITY_USES_UINT128
|
#if AFFINITY_USES_UINT128
|
||||||
// replicate the low 64 bits to make a full 128 bit mask
|
// replicate the low 64 bits to make a full 128 bit mask if there are more
|
||||||
opt_affinity = (uint128_t)(ul);
|
// than 64 CPUs, otherwise zero extend the upper half.
|
||||||
opt_affinity = (opt_affinity << 64 ) | (uint128_t)ul;
|
opt_affinity = (uint128_t)ul;
|
||||||
|
if ( num_cpus > 64 )
|
||||||
|
opt_affinity = (opt_affinity << 64 ) | (uint128_t)ul;
|
||||||
#else
|
#else
|
||||||
opt_affinity = ul;
|
opt_affinity = ul;
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
case 1021:
|
case 1021:
|
||||||
@@ -3330,20 +3330,18 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!rpc_userpass)
|
if (!rpc_userpass)
|
||||||
{
|
{
|
||||||
rpc_userpass = (char*) malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
|
rpc_userpass = (char*) malloc(strlen(rpc_user) + strlen(rpc_pass) + 2);
|
||||||
if (rpc_userpass)
|
if (rpc_userpass)
|
||||||
sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
|
sprintf(rpc_userpass, "%s:%s", rpc_user, rpc_pass);
|
||||||
else
|
else
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// All options must be set before starting the gate
|
// All options must be set before starting the gate
|
||||||
if ( !register_algo_gate( opt_algo, &algo_gate ) )
|
if ( !register_algo_gate( opt_algo, &algo_gate ) ) exit(1);
|
||||||
exit(1);
|
|
||||||
|
|
||||||
if ( !check_cpu_capability() )
|
if ( !check_cpu_capability() ) exit(1);
|
||||||
exit(1);
|
|
||||||
|
|
||||||
pthread_mutex_init(&stats_lock, NULL);
|
pthread_mutex_init(&stats_lock, NULL);
|
||||||
pthread_mutex_init(&g_work_lock, NULL);
|
pthread_mutex_init(&g_work_lock, NULL);
|
||||||
@@ -3356,7 +3354,7 @@ int main(int argc, char *argv[])
|
|||||||
? (CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL)
|
? (CURL_GLOBAL_ALL & ~CURL_GLOBAL_SSL)
|
||||||
: CURL_GLOBAL_ALL;
|
: CURL_GLOBAL_ALL;
|
||||||
if (curl_global_init(flags))
|
if (curl_global_init(flags))
|
||||||
{
|
{
|
||||||
applog(LOG_ERR, "CURL initialization failed");
|
applog(LOG_ERR, "CURL initialization failed");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
1590
interleave.h
1590
interleave.h
File diff suppressed because it is too large
Load Diff
2
miner.h
2
miner.h
@@ -363,7 +363,7 @@ struct work {
|
|||||||
char *job_id;
|
char *job_id;
|
||||||
size_t xnonce2_len;
|
size_t xnonce2_len;
|
||||||
unsigned char *xnonce2;
|
unsigned char *xnonce2;
|
||||||
uint32_t nonces[8];
|
uint32_t nonces[8]; // deprecated
|
||||||
} __attribute__ ((aligned (64)));
|
} __attribute__ ((aligned (64)));
|
||||||
|
|
||||||
struct stratum_job {
|
struct stratum_job {
|
||||||
|
183
simd-utils.h
Normal file
183
simd-utils.h
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
#if !defined(SIMD_UTILS_H__)
|
||||||
|
#define SIMD_UTILS_H__ 1
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// SIMD utilities
|
||||||
|
//
|
||||||
|
// Not to be confused with the hashing function of the same name. This
|
||||||
|
// is about Single Instruction Multiple Data programming using CPU
|
||||||
|
// features such as SSE and AVX.
|
||||||
|
//
|
||||||
|
// This header is the entry point to a suite of macros and functions
|
||||||
|
// to perform basic operations on vectors that are useful in crypto
|
||||||
|
// mining. Some of these functions have native CPU support for scalar
|
||||||
|
// data but not for vectors. The main categories are bit rotation
|
||||||
|
// and endian byte swapping
|
||||||
|
//
|
||||||
|
// An attempt was made to make the names as similar as possible to
|
||||||
|
// Intel's intrinsic function format. Most variations are to avoid
|
||||||
|
// confusion with actual Intel intrinsics, brevity, and clarity.
|
||||||
|
//
|
||||||
|
// This suite supports some operations on regular 64 bit integers
|
||||||
|
// as well as 128 bit integers available on recent versions of Linux
|
||||||
|
// and GCC.
|
||||||
|
//
|
||||||
|
// It also supports various vector sizes on CPUs that meet the minimum
|
||||||
|
// requirements.
|
||||||
|
//
|
||||||
|
// The minimum for any real work is a 64 bit CPU with SSE2,
|
||||||
|
// ie an the Intel Core 2.
|
||||||
|
//
|
||||||
|
// Following are the minimum requirements for each vector size. There
|
||||||
|
// is no significant 64 bit vectorization therefore SSE2 is the practical
|
||||||
|
// minimum for using this code.
|
||||||
|
//
|
||||||
|
// MMX: 64 bit vectors
|
||||||
|
// SSE2: 128 bit vectors (64 bit CPUs only, such as Intel Core2.
|
||||||
|
// AVX2: 256 bit vectors (Starting with Intel Haswell and AMD Ryzen)
|
||||||
|
// AVX512: 512 bit vectors (still under development)
|
||||||
|
//
|
||||||
|
// Most functions are avalaible at the stated levels but in rare cases
|
||||||
|
// a higher level feature may be required with no compatible alternative.
|
||||||
|
// Some SSE2 functions have versions optimized for higher feature levels
|
||||||
|
// such as SSSE3 or SSE4.1 that will be used automatically on capable
|
||||||
|
// CPUs.
|
||||||
|
//
|
||||||
|
// The vector size boundaries are respected to maintain compatibility.
|
||||||
|
// For example, an instruction introduced with AVX2 may improve 128 bit
|
||||||
|
// vector performance but will not be implemented. A CPU with AVX2 will
|
||||||
|
// tend to use 256 bit vectors. On a practical level AVX512 does introduce
|
||||||
|
// bit rotation instructions for 128 and 256 bit vectors in addition to
|
||||||
|
// its own 5a12 bit vectors. These will not be back ported to replace the
|
||||||
|
// SW implementations for the smaller vectors. This policy may be reviewed
|
||||||
|
// in the future once AVX512 is established.
|
||||||
|
//
|
||||||
|
// Strict alignment of data is required: 16 bytes for 128 bit vectors,
|
||||||
|
// 32 bytes for 256 bit vectors and 64 bytes for 512 bit vectors. 64 byte
|
||||||
|
// alignment is recommended in all cases for best cache alignment.
|
||||||
|
//
|
||||||
|
// Windows has problems with function vector arguments larger than
|
||||||
|
// 128 bits. Stack alignment is only guaranteed to 16 bytes. Always use
|
||||||
|
// pointers for larger vectors in function arguments. Macros can be
|
||||||
|
// used for larger value arguments.
|
||||||
|
//
|
||||||
|
// An attempt was made to make the names as similar as possible to
|
||||||
|
// Intel's intrinsic function format. Most variations are to avoid
|
||||||
|
// confusion with actual Intel intrinsics, brevity, and clarity
|
||||||
|
//
|
||||||
|
// The main differences are:
|
||||||
|
//
|
||||||
|
// - the leading underscore(s) "_" and the "i" are dropped from the
|
||||||
|
// prefix of vector instructions.
|
||||||
|
// - "mm64" and "mm128" used for 64 and 128 bit prefix respectively
|
||||||
|
// to avoid the ambiguity of "mm".
|
||||||
|
// - the element size does not include additional type specifiers
|
||||||
|
// like "epi".
|
||||||
|
// - some macros contain value args that are updated.
|
||||||
|
// - specialized shift and rotate functions that move elements around
|
||||||
|
// use the notation "1x32" to indicate the distance moved as units of
|
||||||
|
// the element size.
|
||||||
|
// - there is a subset of some functions for scalar data. They may have
|
||||||
|
// no prefix nor vec-size, just one size, the size of the data.
|
||||||
|
//
|
||||||
|
// Function names follow this pattern:
|
||||||
|
//
|
||||||
|
// prefix_op[esize]_[vsize]
|
||||||
|
//
|
||||||
|
// Prefix: usually the size of the largest vectors used. Following
|
||||||
|
// are some examples:
|
||||||
|
//
|
||||||
|
// u64: unsigned 64 bit integer function
|
||||||
|
// i128: signed 128 bit integer function
|
||||||
|
// m128: 128 bit vector identifier
|
||||||
|
// mm128: 128 bit vector function
|
||||||
|
//
|
||||||
|
// op: describes the operation of the function or names the data
|
||||||
|
// identifier.
|
||||||
|
//
|
||||||
|
// esize: optional, element size of operation
|
||||||
|
//
|
||||||
|
// vsize: optional, lane size used when a function operates on elements
|
||||||
|
// of vectors within lanes of a vector.
|
||||||
|
//
|
||||||
|
// Ex: mm256_ror1x64_128 rotates each 128 bit lane of a 256 bit vector
|
||||||
|
// right by 64 bits.
|
||||||
|
//
|
||||||
|
// Some random thoughts about macros and inline functions, the pros and
|
||||||
|
// cons, when to use them, etc:
|
||||||
|
//
|
||||||
|
// Macros are very convenient and efficient for statement functions.
|
||||||
|
// Macro args are passed by value and modifications are seen by the caller.
|
||||||
|
// Macros should not generally call regular functions unless it is for a
|
||||||
|
// special purpose such overloading a function name.
|
||||||
|
// Statement function macros that return a value should not end in ";"
|
||||||
|
// Statement function macros that return a value and don't modify input args
|
||||||
|
// may be used in function arguments and expressions.
|
||||||
|
// Macro args used in expressions should be protected ex: (x)+1
|
||||||
|
// Macros force inlining, function inlining can be overridden by the compiler.
|
||||||
|
// Inline functions are preferred when multiple statements or local variables
|
||||||
|
// are needed.
|
||||||
|
// The compiler can't do any syntax checking or type checking of args making
|
||||||
|
// macros difficult to debug.
|
||||||
|
// Although it is technically posssible to access the callers data without
|
||||||
|
// they being passed as arguments it is good practice to always define
|
||||||
|
// arguments even if they have the same name.
|
||||||
|
//
|
||||||
|
// General guidelines for inline functions:
|
||||||
|
//
|
||||||
|
// Inline functions should not have loops, it defeats the purpose of inlining.
|
||||||
|
// Inline functions should be short, the benefit is lost and the memory cost
|
||||||
|
// increases if the function is referenced often.
|
||||||
|
// Inline functions may call other functions, inlined or not. It is convenient
|
||||||
|
// for wrapper functions whether or not the wrapped function is itself inlined.
|
||||||
|
// Care should be taken when unrolling loops that contain calls to inlined
|
||||||
|
// functions that may be large.
|
||||||
|
// Large code blocks used only once may use function inlining to
|
||||||
|
// improve high level code readability without the penalty of function
|
||||||
|
// overhead.
|
||||||
|
//
|
||||||
|
///////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <x86intrin.h>
|
||||||
|
#include <memory.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
// byteswap.h doesn't exist on Windows, find alternative
|
||||||
|
//#include <byteswap.h>
|
||||||
|
|
||||||
|
// Various types and overlays
|
||||||
|
#include "simd-utils/simd-types.h"
|
||||||
|
|
||||||
|
// 64 and 128 bit integers.
|
||||||
|
#include "simd-utils/simd-int.h"
|
||||||
|
|
||||||
|
#if defined(__MMX__)
|
||||||
|
|
||||||
|
// 64 bit vectors
|
||||||
|
#include "simd-utils/simd-mmx.h"
|
||||||
|
#include "simd-utils/intrlv-mmx.h"
|
||||||
|
#if defined(__SSE2__)
|
||||||
|
|
||||||
|
// 128 bit vectors
|
||||||
|
#include "simd-utils/simd-sse2.h"
|
||||||
|
#include "simd-utils/intrlv-sse2.h"
|
||||||
|
|
||||||
|
#if defined(__AVX2__)
|
||||||
|
|
||||||
|
// 256 bit vectors
|
||||||
|
#include "simd-utils/simd-avx2.h"
|
||||||
|
#include "simd-utils/intrlv-avx2.h"
|
||||||
|
|
||||||
|
// Skylake-X has all these
|
||||||
|
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||||
|
|
||||||
|
// 512 bit vectors
|
||||||
|
#include "simd-utils/simd-avx512.h"
|
||||||
|
#include "simd-utils/intrlv-avx512.h"
|
||||||
|
|
||||||
|
#endif // MMX
|
||||||
|
#endif // SSE2
|
||||||
|
#endif // AVX2
|
||||||
|
#endif // AVX512
|
||||||
|
#endif // SIMD_UTILS_H__
|
733
simd-utils/intrlv-avx2.h
Normal file
733
simd-utils/intrlv-avx2.h
Normal file
@@ -0,0 +1,733 @@
|
|||||||
|
#if !defined(INTRLV_AVX22_H__)
|
||||||
|
#define INTRLV_AVX2_H__ 1
|
||||||
|
|
||||||
|
#if defined(__AVX2__)
|
||||||
|
|
||||||
|
// Convenient short cuts for local use only
|
||||||
|
|
||||||
|
// Extract 64 bits from the low 128 bits of 256 bit vector.
|
||||||
|
#define extr64_cast128_256( a, n ) \
|
||||||
|
_mm_extract_epi64( _mm256_castsi256_si128( a ), n )
|
||||||
|
|
||||||
|
// Extract 32 bits from the low 128 bits of 256 bit vector.
|
||||||
|
#define extr32_cast128_256( a, n ) \
|
||||||
|
_mm_extract_epi32( _mm256_castsi256_si128( a ), n )
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// AVX2 256 Bit Vectors
|
||||||
|
//
|
||||||
|
|
||||||
|
#define mm256_put_64( s0, s1, s2, s3) \
|
||||||
|
_mm256_set_epi64x( *((const uint64_t*)(s3)), *((const uint64_t*)(s2)), \
|
||||||
|
*((const uint64_t*)(s1)), *((const uint64_t*)(s0)) )
|
||||||
|
|
||||||
|
#define mm256_put_32( s00, s01, s02, s03, s04, s05, s06, s07 ) \
|
||||||
|
_mm256_set_epi32( *((const uint32_t*)(s07)), *((const uint32_t*)(s06)), \
|
||||||
|
*((const uint32_t*)(s05)), *((const uint32_t*)(s04)), \
|
||||||
|
*((const uint32_t*)(s03)), *((const uint32_t*)(s02)), \
|
||||||
|
*((const uint32_t*)(s01)), *((const uint32_t*)(s00)) )
|
||||||
|
|
||||||
|
#define mm256_get_64( s, i0, i1, i2, i3 ) \
|
||||||
|
_mm256_set_epi64x( ((const uint64_t*)(s))[i3], ((const uint64_t*)(s))[i2], \
|
||||||
|
((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] )
|
||||||
|
|
||||||
|
#define mm256_get_32( s, i0, i1, i2, i3, i4, i5, i6, i7 ) \
|
||||||
|
_mm256_set_epi32( ((const uint32_t*)(s))[i7], ((const uint32_t*)(s))[i6], \
|
||||||
|
((const uint32_t*)(s))[i5], ((const uint32_t*)(s))[i4], \
|
||||||
|
((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \
|
||||||
|
((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] )
|
||||||
|
|
||||||
|
|
||||||
|
// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1], lo[0] }
|
||||||
|
#define mm256_intrlv_blend_128( hi, lo ) \
|
||||||
|
_mm256_blend_epi32( hi, lo, 0x0f )
|
||||||
|
|
||||||
|
#define mm256_intrlv_blend_64( hi, lo ) \
|
||||||
|
_mm256_blend_epi32( hi, lo, 0x33 )
|
||||||
|
|
||||||
|
#define mm256_intrlv_blend_32( hi, lo ) \
|
||||||
|
_mm256_blend_epi32( hi, lo, 0x55 )
|
||||||
|
|
||||||
|
// Interleave 8x32_256
|
||||||
|
#define mm256_intrlv_8x32_256( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
|
||||||
|
{ \
|
||||||
|
__m128i s0hi = mm128_extr_hi128_256( s0 ); \
|
||||||
|
__m128i s1hi = mm128_extr_hi128_256( s1 ); \
|
||||||
|
__m128i s2hi = mm128_extr_hi128_256( s2 ); \
|
||||||
|
__m128i s3hi = mm128_extr_hi128_256( s3 ); \
|
||||||
|
__m128i s4hi = mm128_extr_hi128_256( s4 ); \
|
||||||
|
__m128i s5hi = mm128_extr_hi128_256( s5 ); \
|
||||||
|
__m128i s6hi = mm128_extr_hi128_256( s6 ); \
|
||||||
|
__m128i s7hi = mm128_extr_hi128_256( s7 ); \
|
||||||
|
casti_m256i( d,0 ) = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256(s7,0), extr32_cast128_256(s6,0), \
|
||||||
|
extr32_cast128_256(s5,0), extr32_cast128_256(s4,0), \
|
||||||
|
extr32_cast128_256(s3,0), extr32_cast128_256(s2,0), \
|
||||||
|
extr32_cast128_256(s1,0), extr32_cast128_256(s0,0) ); \
|
||||||
|
casti_m256i( d,1 ) = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256(s7,1), extr32_cast128_256(s6,1), \
|
||||||
|
extr32_cast128_256(s5,1), extr32_cast128_256(s4,1), \
|
||||||
|
extr32_cast128_256(s3,1), extr32_cast128_256(s2,1), \
|
||||||
|
extr32_cast128_256(s1,1), extr32_cast128_256(s0,1) ); \
|
||||||
|
casti_m256i( d,2 ) = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256(s7,2), extr32_cast128_256(s6,2), \
|
||||||
|
extr32_cast128_256(s5,2), extr32_cast128_256(s4,2), \
|
||||||
|
extr32_cast128_256(s3,2), extr32_cast128_256(s2,2), \
|
||||||
|
extr32_cast128_256(s1,2), extr32_cast128_256(s0,2) ); \
|
||||||
|
casti_m256i( d,3 ) = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256(s7,3), extr32_cast128_256(s6,3), \
|
||||||
|
extr32_cast128_256(s5,3), extr32_cast128_256(s4,3), \
|
||||||
|
extr32_cast128_256(s3,3), extr32_cast128_256(s2,3), \
|
||||||
|
extr32_cast128_256(s1,3), extr32_cast128_256(s0,3) ); \
|
||||||
|
casti_m256i( d,4 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7hi,0), mm128_extr_32(s6hi,0), \
|
||||||
|
mm128_extr_32(s5hi,0), mm128_extr_32(s4hi,0), \
|
||||||
|
mm128_extr_32(s3hi,0), mm128_extr_32(s2hi,0), \
|
||||||
|
mm128_extr_32(s1hi,0), mm128_extr_32(s0hi,0) ); \
|
||||||
|
casti_m256i( d,5 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7hi,1), mm128_extr_32(s6hi,1), \
|
||||||
|
mm128_extr_32(s5hi,1), mm128_extr_32(s4hi,1), \
|
||||||
|
mm128_extr_32(s3hi,1), mm128_extr_32(s2hi,1), \
|
||||||
|
mm128_extr_32(s1hi,1), mm128_extr_32(s0hi,1) ); \
|
||||||
|
casti_m256i( d,6 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7hi,2), mm128_extr_32(s6hi,2), \
|
||||||
|
mm128_extr_32(s5hi,2), mm128_extr_32(s4hi,2), \
|
||||||
|
mm128_extr_32(s3hi,2), mm128_extr_32(s2hi,2), \
|
||||||
|
mm128_extr_32(s1hi,2), mm128_extr_32(s0hi,2) ); \
|
||||||
|
casti_m256i( d,7 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7hi,3), mm128_extr_32(s6hi,3), \
|
||||||
|
mm128_extr_32(s5hi,3), mm128_extr_32(s4hi,3), \
|
||||||
|
mm128_extr_32(s3hi,3), mm128_extr_32(s2hi,3), \
|
||||||
|
mm128_extr_32(s1hi,3), mm128_extr_32(s0hi,3) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_intrlv_8x32_128( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
|
||||||
|
{ \
|
||||||
|
casti_m256i( d,0 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7,0), mm128_extr_32(s6,0), \
|
||||||
|
mm128_extr_32(s5,0), mm128_extr_32(s4,0), \
|
||||||
|
mm128_extr_32(s3,0), mm128_extr_32(s2,0), \
|
||||||
|
mm128_extr_32(s1,0), mm128_extr_32(s0,0) ); \
|
||||||
|
casti_m256i( d,1 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7,1), mm128_extr_32(s6,1), \
|
||||||
|
mm128_extr_32(s5,1), mm128_extr_32(s4,1), \
|
||||||
|
mm128_extr_32(s3,1), mm128_extr_32(s2,1), \
|
||||||
|
mm128_extr_32(s1,1), mm128_extr_32(s0,1) ); \
|
||||||
|
casti_m256i( d,2 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7,2), mm128_extr_32(s6,2), \
|
||||||
|
mm128_extr_32(s5,2), mm128_extr_32(s4,2), \
|
||||||
|
mm128_extr_32(s3,2), mm128_extr_32(s2,2), \
|
||||||
|
mm128_extr_32(s1,2), mm128_extr_32(s0,2) ); \
|
||||||
|
casti_m256i( d,3 ) = _mm256_set_epi32( \
|
||||||
|
mm128_extr_32(s7,3), mm128_extr_32(s6,3), \
|
||||||
|
mm128_extr_32(s5,3), mm128_extr_32(s4,3), \
|
||||||
|
mm128_extr_32(s3,3), mm128_extr_32(s2,3), \
|
||||||
|
mm128_extr_32(s1,3), mm128_extr_32(s0,3) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_bswap_intrlv_8x32_256( d, src ) \
|
||||||
|
do { \
|
||||||
|
__m256i s0 = mm256_bswap_32( src ); \
|
||||||
|
__m128i s1 = _mm256_extracti128_si256( s0, 1 ); \
|
||||||
|
casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( \
|
||||||
|
_mm256_castsi256_si128( s0 ), 0 ) ); \
|
||||||
|
casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( \
|
||||||
|
_mm256_castsi256_si128( s0 ), 1 ) ); \
|
||||||
|
casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( \
|
||||||
|
_mm256_castsi256_si128( s0 ), 2 ) ); \
|
||||||
|
casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( \
|
||||||
|
_mm256_castsi256_si128( s0 ), 3 ) ); \
|
||||||
|
casti_m256i( d, 4 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 0 ) ); \
|
||||||
|
casti_m256i( d, 5 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 1 ) ); \
|
||||||
|
casti_m256i( d, 6 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 2 ) ); \
|
||||||
|
casti_m256i( d, 7 ) = _mm256_set1_epi32( _mm_extract_epi32( s1, 3 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_bswap_intrlv_8x32_128( d, src ) \
|
||||||
|
do { \
|
||||||
|
__m128i ss = mm128_bswap_32( src ); \
|
||||||
|
casti_m256i( d, 0 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 0 ) ); \
|
||||||
|
casti_m256i( d, 1 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 1 ) ); \
|
||||||
|
casti_m256i( d, 2 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 2 ) ); \
|
||||||
|
casti_m256i( d, 3 ) = _mm256_set1_epi32( _mm_extract_epi32( ss, 3 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_dintrlv_8x32_256( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
|
||||||
|
do { \
|
||||||
|
__m256i s0 = casti_m256i(s,0); \
|
||||||
|
__m256i s1 = casti_m256i(s,1); \
|
||||||
|
__m256i s2 = casti_m256i(s,2); \
|
||||||
|
__m256i s3 = casti_m256i(s,3); \
|
||||||
|
__m256i s4 = casti_m256i(s,4); \
|
||||||
|
__m256i s5 = casti_m256i(s,5); \
|
||||||
|
__m256i s6 = casti_m256i(s,6); \
|
||||||
|
__m256i s7 = casti_m256i(s,7); \
|
||||||
|
__m128i s0hi = _mm256_extracti128_si256( s0, 1 ); \
|
||||||
|
__m128i s1hi = _mm256_extracti128_si256( s1, 1 ); \
|
||||||
|
__m128i s2hi = _mm256_extracti128_si256( s2, 1 ); \
|
||||||
|
__m128i s3hi = _mm256_extracti128_si256( s3, 1 ); \
|
||||||
|
__m128i s4hi = _mm256_extracti128_si256( s4, 1 ); \
|
||||||
|
__m128i s5hi = _mm256_extracti128_si256( s5, 1 ); \
|
||||||
|
__m128i s6hi = _mm256_extracti128_si256( s6, 1 ); \
|
||||||
|
__m128i s7hi = _mm256_extracti128_si256( s7, 1 ); \
|
||||||
|
d0 = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256( s7, 0 ), extr32_cast128_256( s6, 0 ), \
|
||||||
|
extr32_cast128_256( s5, 0 ), extr32_cast128_256( s4, 0 ), \
|
||||||
|
extr32_cast128_256( s3, 0 ), extr32_cast128_256( s2, 0 ), \
|
||||||
|
extr32_cast128_256( s1, 0 ), extr32_cast128_256( s0, 0 ) );\
|
||||||
|
d1 = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256( s7, 1 ), extr32_cast128_256( s6, 1 ), \
|
||||||
|
extr32_cast128_256( s5, 1 ), extr32_cast128_256( s4, 1 ), \
|
||||||
|
extr32_cast128_256( s3, 1 ), extr32_cast128_256( s2, 1 ), \
|
||||||
|
extr32_cast128_256( s1, 1 ), extr32_cast128_256( s0, 1 ) );\
|
||||||
|
d2 = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256( s7, 2 ), extr32_cast128_256( s6, 2 ), \
|
||||||
|
extr32_cast128_256( s5, 2 ), extr32_cast128_256( s4, 2 ), \
|
||||||
|
extr32_cast128_256( s3, 2 ), extr32_cast128_256( s2, 2 ), \
|
||||||
|
extr32_cast128_256( s1, 2 ), extr32_cast128_256( s0, 2 ) );\
|
||||||
|
d3 = _mm256_set_epi32( \
|
||||||
|
extr32_cast128_256( s7, 3 ), extr32_cast128_256( s6, 3 ), \
|
||||||
|
extr32_cast128_256( s5, 3 ), extr32_cast128_256( s4, 3 ), \
|
||||||
|
extr32_cast128_256( s3, 3 ), extr32_cast128_256( s2, 3 ), \
|
||||||
|
extr32_cast128_256( s1, 3 ), extr32_cast128_256( s0, 3 ) );\
|
||||||
|
d4 = _mm256_set_epi32( \
|
||||||
|
_mm_extract_epi32( s7hi, 0 ), _mm_extract_epi32( s6hi, 0 ), \
|
||||||
|
_mm_extract_epi32( s5hi, 0 ), _mm_extract_epi32( s4hi, 0 ), \
|
||||||
|
_mm_extract_epi32( s3hi, 0 ), _mm_extract_epi32( s2hi, 0 ), \
|
||||||
|
_mm_extract_epi32( s1hi, 0 ), _mm_extract_epi32( s0hi, 0 ) ); \
|
||||||
|
d5 = _mm256_set_epi32( \
|
||||||
|
_mm_extract_epi32( s7hi, 1 ), _mm_extract_epi32( s6hi, 1 ), \
|
||||||
|
_mm_extract_epi32( s5hi, 1 ), _mm_extract_epi32( s4hi, 1 ), \
|
||||||
|
_mm_extract_epi32( s3hi, 1 ), _mm_extract_epi32( s2hi, 1 ), \
|
||||||
|
_mm_extract_epi32( s1hi, 1 ), _mm_extract_epi32( s0hi, 1 ) ); \
|
||||||
|
d6 = _mm256_set_epi32( \
|
||||||
|
_mm_extract_epi32( s7hi, 2 ), _mm_extract_epi32( s6hi, 2 ), \
|
||||||
|
_mm_extract_epi32( s5hi, 2 ), _mm_extract_epi32( s4hi, 2 ), \
|
||||||
|
_mm_extract_epi32( s3hi, 2 ), _mm_extract_epi32( s2hi, 2 ), \
|
||||||
|
_mm_extract_epi32( s1hi, 2 ), _mm_extract_epi32( s0hi, 2 ) ); \
|
||||||
|
d7 = _mm256_set_epi32( \
|
||||||
|
_mm_extract_epi32( s7hi, 3 ), _mm_extract_epi32( s6hi, 3 ), \
|
||||||
|
_mm_extract_epi32( s5hi, 3 ), _mm_extract_epi32( s4hi, 3 ), \
|
||||||
|
_mm_extract_epi32( s3hi, 3 ), _mm_extract_epi32( s2hi, 3 ), \
|
||||||
|
_mm_extract_epi32( s1hi, 3 ), _mm_extract_epi32( s0hi, 3 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_dintrlv_8x32_128( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
|
||||||
|
do { \
|
||||||
|
__m128i s0 = casti_m128i(s,0); \
|
||||||
|
__m128i s1 = casti_m128i(s,1); \
|
||||||
|
__m128i s2 = casti_m128i(s,2); \
|
||||||
|
__m128i s3 = casti_m128i(s,3); \
|
||||||
|
d0 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
d1 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 1 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 1 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
d2 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
d3 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
d4 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
d5 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
d6 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
d7 = _mm_set_epi32( \
|
||||||
|
_mm_extract_epi32( s3, 0 ), _mm_extract_epi32( s2, 0 ), \
|
||||||
|
_mm_extract_epi32( s1, 0 ), _mm_extract_epi32( s0, 0 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_intrlv_4x64_256( d, s0, s1, s2, s3 ) \
|
||||||
|
do { \
|
||||||
|
__m128i s0hi = _mm256_extracti128_si256( s0, 1 ); \
|
||||||
|
__m128i s1hi = _mm256_extracti128_si256( s1, 1 ); \
|
||||||
|
__m128i s2hi = _mm256_extracti128_si256( s2, 1 ); \
|
||||||
|
__m128i s3hi = _mm256_extracti128_si256( s3, 1 ); \
|
||||||
|
casti_m256i( d,0 ) = _mm256_set_epi64x( \
|
||||||
|
extr64_cast128_256( s3, 0 ), extr64_cast128_256( s2, 0 ), \
|
||||||
|
extr64_cast128_256( s1, 0 ), extr64_cast128_256( s0, 0 ) ); \
|
||||||
|
casti_m256i( d,1 ) = _mm256_set_epi64x( \
|
||||||
|
extr64_cast128_256( s3, 1 ), extr64_cast128_256( s2, 1 ), \
|
||||||
|
extr64_cast128_256( s1, 1 ), extr64_cast128_256( s0, 1 ) ); \
|
||||||
|
casti_m256i( d,2 ) = _mm256_set_epi64x( \
|
||||||
|
_mm_extract_epi64( s3hi,0 ), _mm_extract_epi64( s2hi,0 ), \
|
||||||
|
_mm_extract_epi64( s1hi,0 ), _mm_extract_epi64( s0hi,0 ) ); \
|
||||||
|
casti_m256i( d,3 ) = _mm256_set_epi64x( \
|
||||||
|
_mm_extract_epi64( s3hi,1 ), _mm_extract_epi64( s2hi,1 ), \
|
||||||
|
_mm_extract_epi64( s1hi,1 ), _mm_extract_epi64( s0hi,1 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_intrlv_4x64_128( d, s0, s1, s2, s3 ) \
|
||||||
|
do { \
|
||||||
|
casti_m256i( d,0 ) = _mm256_set_epi64x( \
|
||||||
|
_mm_extract_epi64( s3, 0 ), _mm_extract_epi64( s2, 0 ), \
|
||||||
|
_mm_extract_epi64( s1, 0 ), _mm_extract_epi64( s0, 0 ) ); \
|
||||||
|
casti_m256i( d,1 ) = _mm256_set_epi64x( \
|
||||||
|
_mm_extract_epi64( s3, 1 ), _mm_extract_epi64( s2, 1 ), \
|
||||||
|
_mm_extract_epi64( s1, 1 ), _mm_extract_epi64( s0, 1 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_bswap_intrlv_4x64_256( d, src ) \
|
||||||
|
do { \
|
||||||
|
__m256i s0 = mm256_bswap_32( src ); \
|
||||||
|
__m128i s1 = _mm256_extracti128_si256( s0, 1 ); \
|
||||||
|
casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( \
|
||||||
|
_mm256_castsi256_si128( s0 ), 0 ) ); \
|
||||||
|
casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( \
|
||||||
|
_mm256_castsi256_si128( s0 ), 1 ) ); \
|
||||||
|
casti_m256i( d,2 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 0 ) ); \
|
||||||
|
casti_m256i( d,3 ) = _mm256_set1_epi64x( _mm_extract_epi64( s1, 1 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_bswap_intrlv_4x64_128( d, src ) \
|
||||||
|
do { \
|
||||||
|
__m128i ss = mm128_bswap_32( src ); \
|
||||||
|
casti_m256i( d,0 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 0 ) ); \
|
||||||
|
casti_m256i( d,1 ) = _mm256_set1_epi64x( _mm_extract_epi64( ss, 1 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
// 4 lanes of 256 bits using 64 bit interleaving (standard final hash size)
|
||||||
|
static inline void mm256_dintrlv_4x64_256( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const int n, const void *src )
|
||||||
|
{
|
||||||
|
__m256i s0 = *( (__m256i*) src ); // s[0][1:0]
|
||||||
|
__m256i s1 = *( (__m256i*)(src+32) ); // s[1][1:0]
|
||||||
|
__m256i s2 = *( (__m256i*)(src+64) ); // s[2][1:0]
|
||||||
|
__m256i s3 = *( (__m256i*)(src+96) ); // s[3][2:0]
|
||||||
|
__m128i s0hi = _mm256_extracti128_si256( s0, 1 ); // s[0][3:2]
|
||||||
|
__m128i s1hi = _mm256_extracti128_si256( s1, 1 ); // s[1][3:2]
|
||||||
|
__m128i s2hi = _mm256_extracti128_si256( s2, 1 ); // s[2][3:2]
|
||||||
|
__m128i s3hi = _mm256_extracti128_si256( s3, 1 ); // s[3][3:2]
|
||||||
|
|
||||||
|
casti_m256i( d0,n ) = _mm256_set_epi64x(
|
||||||
|
extr64_cast128_256( s3, 0 ), extr64_cast128_256( s2, 0 ),
|
||||||
|
extr64_cast128_256( s1, 0 ), extr64_cast128_256( s0, 0 ) );
|
||||||
|
casti_m256i( d1,n ) = _mm256_set_epi64x(
|
||||||
|
extr64_cast128_256( s3, 1 ), extr64_cast128_256( s2, 1 ),
|
||||||
|
extr64_cast128_256( s1, 1 ), extr64_cast128_256( s0, 1 ) );
|
||||||
|
casti_m256i( d2,n ) = _mm256_set_epi64x(
|
||||||
|
_mm_extract_epi64( s3hi, 0 ), _mm_extract_epi64( s2hi, 0 ),
|
||||||
|
_mm_extract_epi64( s1hi, 0 ), _mm_extract_epi64( s0hi, 0 ) );
|
||||||
|
casti_m256i( d3,n ) = _mm256_set_epi64x(
|
||||||
|
_mm_extract_epi64( s3hi, 1 ), _mm_extract_epi64( s2hi, 1 ),
|
||||||
|
_mm_extract_epi64( s1hi, 1 ), _mm_extract_epi64( s0hi, 1 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// quarter avx2 block, 16 bytes * 4 lanes
|
||||||
|
// 4 lanes of 128 bits using 64 bit interleaving
|
||||||
|
// Used for last 16 bytes of 80 byte input, only used for testing.
|
||||||
|
static inline void mm128_dintrlv_4x64_128( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const int n, const void *src )
|
||||||
|
{
|
||||||
|
__m256i s0 = *( (__m256i*) src );
|
||||||
|
__m256i s1 = *( (__m256i*)(src+32) );
|
||||||
|
__m128i s0hi = _mm256_extracti128_si256( s0, 1 );
|
||||||
|
__m128i s1hi = _mm256_extracti128_si256( s1, 1 );
|
||||||
|
|
||||||
|
casti_m128i( d0,n ) = _mm_set_epi64x( extr64_cast128_256( s1 , 0 ),
|
||||||
|
extr64_cast128_256( s0 , 0 ) );
|
||||||
|
casti_m128i( d1,n ) = _mm_set_epi64x( extr64_cast128_256( s1 , 1 ),
|
||||||
|
extr64_cast128_256( s0 , 1 ) );
|
||||||
|
casti_m128i( d2,n ) = _mm_set_epi64x( _mm_extract_epi64( s1hi, 0 ),
|
||||||
|
_mm_extract_epi64( s0hi, 0 ) );
|
||||||
|
casti_m128i( d3,n ) = _mm_set_epi64x( _mm_extract_epi64( s1hi, 1 ),
|
||||||
|
_mm_extract_epi64( s0hi, 1 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
static inline void mm256_dintrlv_2x128x256( void *d0, void *d1,
|
||||||
|
const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m256i( d0,n ) = mm256_get_64( s, 0, 1, 4, 5 );
|
||||||
|
casti_m256i( d1,n ) = mm256_get_64( s, 2, 3, 6, 7 );
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
//
|
||||||
|
|
||||||
|
// Interleave 8 source buffers containing 32 bit data into the destination
|
||||||
|
// vector
|
||||||
|
#define mm256_interleave_8x32 mm256_intrlv_8x32
|
||||||
|
static inline void mm256_intrlv_8x32( void *d, const void *s0,
|
||||||
|
const void *s1, const void *s2, const void *s3, const void *s4,
|
||||||
|
const void *s5, const void *s6, const void *s7, int bit_len )
|
||||||
|
{
|
||||||
|
mm256_intrlv_8x32_256( d , casti_m256i( s0,0 ), casti_m256i( s1,0 ),
|
||||||
|
casti_m256i( s2,0 ), casti_m256i( s3,0 ), casti_m256i( s4,0 ),
|
||||||
|
casti_m256i( s5,0 ), casti_m256i( s6,0 ), casti_m256i( s7,0 ) );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
mm256_intrlv_8x32_256( d+256, casti_m256i( s0,1 ), casti_m256i( s1,1 ),
|
||||||
|
casti_m256i( s2,1 ), casti_m256i( s3,1 ), casti_m256i( s4,1 ),
|
||||||
|
casti_m256i( s5,1 ), casti_m256i( s6,1 ), casti_m256i( s7,1 ) );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
mm256_intrlv_8x32_128( d+512, casti_m128i( s0,4 ), casti_m128i( s1,4 ),
|
||||||
|
casti_m128i( s2,4 ), casti_m128i( s3,4 ), casti_m128i( s4,4 ),
|
||||||
|
casti_m128i( s5,4 ), casti_m128i( s6,4 ), casti_m128i( s7,4 ) );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mm256_intrlv_8x32_256( d+512, casti_m256i( s0,2 ), casti_m256i( s1,2 ),
|
||||||
|
casti_m256i( s2,2 ), casti_m256i( s3,2 ), casti_m256i( s4,2 ),
|
||||||
|
casti_m256i( s5,2 ), casti_m256i( s6,2 ), casti_m256i( s7,2 ) );
|
||||||
|
mm256_intrlv_8x32_256( d+768, casti_m256i( s0,3 ), casti_m256i( s1,3 ),
|
||||||
|
casti_m256i( s2,3 ), casti_m256i( s3,3 ), casti_m256i( s4,3 ),
|
||||||
|
casti_m256i( s5,3 ), casti_m256i( s6,3 ), casti_m256i( s7,3 ) );
|
||||||
|
// bit_len == 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Interleave 80 bytes of 32 bit data for 8 lanes.
|
||||||
|
static inline void mm256_bswap_intrlv80_8x32( void *d, const void *s )
|
||||||
|
{
|
||||||
|
mm256_bswap_intrlv_8x32_256( d , casti_m256i( s, 0 ) );
|
||||||
|
mm256_bswap_intrlv_8x32_256( d+256, casti_m256i( s, 1 ) );
|
||||||
|
mm256_bswap_intrlv_8x32_128( d+512, casti_m128i( s, 4 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deinterleave 8 buffers of 32 bit data from the source buffer.
|
||||||
|
// Sub-function can be called directly for 32 byte final hash.
|
||||||
|
#define mm256_deinterleave_8x32 mm256_dintrlv_8x32
|
||||||
|
static inline void mm256_dintrlv_8x32( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, void *d4, void *d5, void *d6, void *d7,
|
||||||
|
const void *s, int bit_len )
|
||||||
|
{
|
||||||
|
mm256_dintrlv_8x32_256( casti_m256i(d0,0), casti_m256i(d1,0),
|
||||||
|
casti_m256i(d2,0), casti_m256i(d3,0), casti_m256i(d4,0),
|
||||||
|
casti_m256i(d5,0), casti_m256i(d6,0), casti_m256i(d7,0), s );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
mm256_dintrlv_8x32_256( casti_m256i(d0,1), casti_m256i(d1,1),
|
||||||
|
casti_m256i(d2,1), casti_m256i(d3,1), casti_m256i(d4,1),
|
||||||
|
casti_m256i(d5,1), casti_m256i(d6,1), casti_m256i(d7,1), s+256 );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
// short block, final 16 bytes of input data
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
mm128_dintrlv_8x32_128( casti_m128i(d0,2), casti_m128i(d1,2),
|
||||||
|
casti_m128i(d2,2), casti_m128i(d3,2), casti_m128i(d4,2),
|
||||||
|
casti_m128i(d5,2), casti_m128i(d6,2), casti_m128i(d7,2), s+512 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// bitlen == 1024
|
||||||
|
mm256_dintrlv_8x32_256( casti_m256i(d0,2), casti_m256i(d1,2),
|
||||||
|
casti_m256i(d2,2), casti_m256i(d3,2), casti_m256i(d4,2),
|
||||||
|
casti_m256i(d5,2), casti_m256i(d6,2), casti_m256i(d7,2), s+512 );
|
||||||
|
mm256_dintrlv_8x32_256( casti_m256i(d0,3), casti_m256i(d1,3),
|
||||||
|
casti_m256i(d2,3), casti_m256i(d3,3), casti_m256i(d4,3),
|
||||||
|
casti_m256i(d5,3), casti_m256i(d6,3), casti_m256i(d7,3), s+768 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm256_extract_lane_8x32( void *d, const void *s,
|
||||||
|
const int lane, const int bit_len )
|
||||||
|
{
|
||||||
|
casti_m256i( d,0 ) = mm256_get_32(s, lane , lane+ 8, lane+ 16, lane+ 24,
|
||||||
|
lane+32, lane+ 40, lane+ 48, lane+ 56 );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
casti_m256i( d,1 ) = mm256_get_32(s, lane+64, lane+ 72, lane+ 80, lane+ 88,
|
||||||
|
lane+96, lane+104, lane+112, lane+120 );
|
||||||
|
// bit_len == 512
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interleave 4 source buffers containing 64 bit data into the destination
|
||||||
|
// buffer. Only bit_len 256, 512, 640 & 1024 are supported.
|
||||||
|
#define mm256_interleave_4x64 mm256_intrlv_4x64
|
||||||
|
static inline void mm256_intrlv_4x64( void *d, const void *s0,
|
||||||
|
const void *s1, const void *s2, const void *s3, int bit_len )
|
||||||
|
{
|
||||||
|
mm256_intrlv_4x64_256( d , casti_m256i(s0,0), casti_m256i(s1,0),
|
||||||
|
casti_m256i(s2,0), casti_m256i(s3,0) );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
mm256_intrlv_4x64_256( d+128, casti_m256i(s0,1), casti_m256i(s1,1),
|
||||||
|
casti_m256i(s2,1), casti_m256i(s3,1) );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
mm256_intrlv_4x64_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4),
|
||||||
|
casti_m128i(s2,4), casti_m128i(s3,4) );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// bit_len == 1024
|
||||||
|
mm256_intrlv_4x64_256( d+256, casti_m256i(s0,2), casti_m256i(s1,2),
|
||||||
|
casti_m256i(s2,2), casti_m256i(s3,2) );
|
||||||
|
mm256_intrlv_4x64_256( d+384, casti_m256i(s0,3), casti_m256i(s1,3),
|
||||||
|
casti_m256i(s2,3), casti_m256i(s3,3) );
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interleave 80 bytes of 32 bit data for 8 lanes.
|
||||||
|
static inline void mm256_bswap_intrlv80_4x64( void *d, const void *s )
|
||||||
|
{
|
||||||
|
mm256_bswap_intrlv_4x64_256( d , casti_m256i( s, 0 ) );
|
||||||
|
mm256_bswap_intrlv_4x64_256( d+128, casti_m256i( s, 1 ) );
|
||||||
|
mm256_bswap_intrlv_4x64_128( d+256, casti_m128i( s, 4 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deinterleave 4 buffers of 64 bit data from the source buffer.
|
||||||
|
// bit_len must be 256, 512, 640 or 1024 bits.
|
||||||
|
// Requires overrun padding for 640 bit len.
|
||||||
|
#define mm256_deinterleave_4x64 mm256_dintrlv_4x64
|
||||||
|
static inline void mm256_dintrlv_4x64( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const void *s, int bit_len )
|
||||||
|
{
|
||||||
|
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 0, s );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 1, s+128 );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
// short block, final 16 bytes of input data
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
mm128_dintrlv_4x64_128( d0, d1, d2, d3, 4, s+256 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// bit_len == 1024
|
||||||
|
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 2, s+256 );
|
||||||
|
mm256_dintrlv_4x64_256( d0, d1, d2, d3, 3, s+384 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract and deinterleave specified lane.
|
||||||
|
#define mm256_extract_lane_4x64_256 \
|
||||||
|
casti_m256i( d, 0 ) = mm256_get_64( s, lane, lane+4, lane+8, lane+12 )
|
||||||
|
static inline void mm256_extract_lane_4x64( void *d, const void *s,
|
||||||
|
const int lane, const int bit_len )
|
||||||
|
{
|
||||||
|
casti_m256i( d, 0 ) = mm256_get_64( s, lane, lane+4, lane+8, lane+12 );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
casti_m256i( d, 1 ) = mm256_get_64( s, lane+16, lane+20, lane+24, lane+28 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Convert from 4x32 SSE2 interleaving to 4x64 AVX2.
|
||||||
|
// Can't do it in place
|
||||||
|
#define mm256_reinterleave_4x32_4x64 mm256_rintrlv_4x32_4x64
|
||||||
|
static inline void mm256_rintrlv_4x32_4x64( void *dst, void *src,
|
||||||
|
int bit_len )
|
||||||
|
{
|
||||||
|
__m256i* d = (__m256i*)dst;
|
||||||
|
uint32_t *s = (uint32_t*)src;
|
||||||
|
|
||||||
|
d[0] = _mm256_set_epi32( s[ 7],s[ 3],s[ 6],s[ 2],s[ 5],s[ 1],s[ 4],s[ 0] );
|
||||||
|
d[1] = _mm256_set_epi32( s[15],s[11],s[14],s[10],s[13],s[ 9],s[12],s[ 8] );
|
||||||
|
d[2] = _mm256_set_epi32( s[23],s[19],s[22],s[18],s[21],s[17],s[20],s[16] );
|
||||||
|
d[3] = _mm256_set_epi32( s[31],s[27],s[30],s[26],s[29],s[25],s[28],s[24] );
|
||||||
|
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
|
||||||
|
d[4] = _mm256_set_epi32( s[39],s[35],s[38],s[34],s[37],s[33],s[36],s[32] );
|
||||||
|
d[5] = _mm256_set_epi32( s[47],s[43],s[46],s[42],s[45],s[41],s[44],s[40] );
|
||||||
|
d[6] = _mm256_set_epi32( s[55],s[51],s[54],s[50],s[53],s[49],s[52],s[48] );
|
||||||
|
d[7] = _mm256_set_epi32( s[63],s[59],s[62],s[58],s[61],s[57],s[60],s[56] );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
d[8] = _mm256_set_epi32( s[71],s[67],s[70],s[66],s[69],s[65],s[68],s[64] );
|
||||||
|
d[9] = _mm256_set_epi32( s[79],s[75],s[78],s[74],s[77],s[73],s[76],s[72] );
|
||||||
|
|
||||||
|
if ( bit_len <= 640 ) return;
|
||||||
|
|
||||||
|
d[10] = _mm256_set_epi32(s[87],s[83],s[86],s[82],s[85],s[81],s[84],s[80]);
|
||||||
|
d[11] = _mm256_set_epi32(s[95],s[91],s[94],s[90],s[93],s[89],s[92],s[88]);
|
||||||
|
|
||||||
|
d[12] = _mm256_set_epi32(s[103],s[99],s[102],s[98],s[101],s[97],s[100],s[96]);
|
||||||
|
d[13] = _mm256_set_epi32(s[111],s[107],s[110],s[106],s[109],s[105],s[108],s[104]);
|
||||||
|
d[14] = _mm256_set_epi32(s[119],s[115],s[118],s[114],s[117],s[113],s[116],s[112]);
|
||||||
|
d[15] = _mm256_set_epi32(s[127],s[123],s[126],s[122],s[125],s[121],s[124],s[120]);
|
||||||
|
// bit_len == 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert 4x64 byte (256 bit) vectors to 4x32 (128 bit) vectors for AVX
|
||||||
|
// bit_len must be multiple of 64
|
||||||
|
#define mm256_reinterleave_4x64_4x32 mm256_rintrlv_4x64_4x32
|
||||||
|
static inline void mm256_rintrlv_4x64_4x32( void *dst, void *src,
|
||||||
|
int bit_len )
|
||||||
|
{
|
||||||
|
__m256i *d = (__m256i*)dst;
|
||||||
|
uint32_t *s = (uint32_t*)src;
|
||||||
|
|
||||||
|
d[0] = _mm256_set_epi32( s[ 7],s[ 5],s[ 3],s[ 1],s[ 6],s[ 4],s[ 2],s[ 0] );
|
||||||
|
d[1] = _mm256_set_epi32( s[15],s[13],s[11],s[ 9],s[14],s[12],s[10],s[ 8] );
|
||||||
|
d[2] = _mm256_set_epi32( s[23],s[21],s[19],s[17],s[22],s[20],s[18],s[16] );
|
||||||
|
d[3] = _mm256_set_epi32( s[31],s[29],s[27],s[25],s[30],s[28],s[26],s[24] );
|
||||||
|
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
|
||||||
|
d[4] = _mm256_set_epi32( s[39],s[37],s[35],s[33],s[38],s[36],s[34],s[32] );
|
||||||
|
d[5] = _mm256_set_epi32( s[47],s[45],s[43],s[41],s[46],s[44],s[42],s[40] );
|
||||||
|
d[6] = _mm256_set_epi32( s[55],s[53],s[51],s[49],s[54],s[52],s[50],s[48] );
|
||||||
|
d[7] = _mm256_set_epi32( s[63],s[61],s[59],s[57],s[62],s[60],s[58],s[56] );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
d[8] = _mm256_set_epi32( s[71],s[69],s[67],s[65],s[70],s[68],s[66],s[64] );
|
||||||
|
d[9] = _mm256_set_epi32( s[79],s[77],s[75],s[73],s[78],s[76],s[74],s[72] );
|
||||||
|
|
||||||
|
if ( bit_len <= 640 ) return;
|
||||||
|
|
||||||
|
d[10] = _mm256_set_epi32( s[87],s[85],s[83],s[81],s[86],s[84],s[82],s[80] );
|
||||||
|
d[11] = _mm256_set_epi32( s[95],s[93],s[91],s[89],s[94],s[92],s[90],s[88] );
|
||||||
|
|
||||||
|
d[12] = _mm256_set_epi32( s[103],s[101],s[99],s[97],s[102],s[100],s[98],s[96] );
|
||||||
|
d[13] = _mm256_set_epi32( s[111],s[109],s[107],s[105],s[110],s[108],s[106],s[104] );
|
||||||
|
d[14] = _mm256_set_epi32( s[119],s[117],s[115],s[113],s[118],s[116],s[114],s[112] );
|
||||||
|
d[15] = _mm256_set_epi32( s[127],s[125],s[123],s[121],s[126],s[124],s[122],s[120] );
|
||||||
|
// bit_len == 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
#define mm256_reinterleave_4x64_2x128 mm256_rintrlv_4x64_2x128
|
||||||
|
static inline void mm256_rintrlv_4x64_2x128( void *dst0, void *dst1,
|
||||||
|
const void *src, int bit_len )
|
||||||
|
{
|
||||||
|
__m256i* d0 = (__m256i*)dst0;
|
||||||
|
__m256i* d1 = (__m256i*)dst1;
|
||||||
|
uint64_t *s = (uint64_t*)src;
|
||||||
|
|
||||||
|
d0[0] = _mm256_set_epi64x( s[ 5], s[ 1], s[ 4], s[ 0] );
|
||||||
|
d1[0] = _mm256_set_epi64x( s[ 7], s[ 3], s[ 6], s[ 2] );
|
||||||
|
|
||||||
|
d0[1] = _mm256_set_epi64x( s[13], s[ 9], s[12], s[ 8] );
|
||||||
|
d1[1] = _mm256_set_epi64x( s[15], s[11], s[14], s[10] );
|
||||||
|
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
|
||||||
|
d0[2] = _mm256_set_epi64x( s[21], s[17], s[20], s[16] );
|
||||||
|
d1[2] = _mm256_set_epi64x( s[23], s[19], s[22], s[18] );
|
||||||
|
|
||||||
|
d0[3] = _mm256_set_epi64x( s[29], s[25], s[28], s[24] );
|
||||||
|
d1[3] = _mm256_set_epi64x( s[31], s[27], s[30], s[26] );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
d0[4] = _mm256_set_epi64x( s[37], s[33], s[36], s[32] );
|
||||||
|
d1[4] = _mm256_set_epi64x( s[39], s[35], s[38], s[34] );
|
||||||
|
|
||||||
|
d0[5] = _mm256_set_epi64x( s[45], s[41], s[44], s[40] );
|
||||||
|
d1[5] = _mm256_set_epi64x( s[47], s[43], s[46], s[42] );
|
||||||
|
|
||||||
|
d0[6] = _mm256_set_epi64x( s[53], s[49], s[52], s[48] );
|
||||||
|
d1[6] = _mm256_set_epi64x( s[55], s[51], s[54], s[50] );
|
||||||
|
|
||||||
|
d0[7] = _mm256_set_epi64x( s[61], s[57], s[60], s[56] );
|
||||||
|
d1[7] = _mm256_set_epi64x( s[63], s[59], s[62], s[58] );
|
||||||
|
}
|
||||||
|
|
||||||
|
#define mm256_reinterleave_2x128_4x64 mm256_rintrlv_2x128_4x64
|
||||||
|
static inline void mm256_rintrlv_2x128_4x64( void *dst, const void *src0,
|
||||||
|
const void *src1, int bit_len )
|
||||||
|
{
|
||||||
|
__m256i* d = (__m256i*)dst;
|
||||||
|
uint64_t *s0 = (uint64_t*)src0;
|
||||||
|
uint64_t *s1 = (uint64_t*)src1;
|
||||||
|
|
||||||
|
d[ 0] = _mm256_set_epi64x( s1[2], s1[0], s0[2], s0[0] );
|
||||||
|
d[ 1] = _mm256_set_epi64x( s1[3], s1[1], s0[3], s0[1] );
|
||||||
|
d[ 2] = _mm256_set_epi64x( s1[6], s1[4], s0[6], s0[4] );
|
||||||
|
d[ 3] = _mm256_set_epi64x( s1[7], s1[5], s0[7], s0[5] );
|
||||||
|
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
|
||||||
|
d[ 4] = _mm256_set_epi64x( s1[10], s1[ 8], s0[10], s0[ 8] );
|
||||||
|
d[ 5] = _mm256_set_epi64x( s1[11], s1[ 9], s0[11], s0[ 9] );
|
||||||
|
d[ 6] = _mm256_set_epi64x( s1[14], s1[12], s0[14], s0[12] );
|
||||||
|
d[ 7] = _mm256_set_epi64x( s1[15], s1[13], s0[15], s0[13] );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
d[ 8] = _mm256_set_epi64x( s1[18], s1[16], s0[18], s0[16] );
|
||||||
|
d[ 9] = _mm256_set_epi64x( s1[19], s1[17], s0[19], s0[17] );
|
||||||
|
d[10] = _mm256_set_epi64x( s1[22], s1[20], s0[22], s0[20] );
|
||||||
|
d[11] = _mm256_set_epi64x( s1[23], s1[21], s0[23], s0[21] );
|
||||||
|
|
||||||
|
d[12] = _mm256_set_epi64x( s1[26], s1[24], s0[26], s0[24] );
|
||||||
|
d[13] = _mm256_set_epi64x( s1[27], s1[25], s0[27], s0[25] );
|
||||||
|
d[14] = _mm256_set_epi64x( s1[30], s1[28], s0[30], s0[28] );
|
||||||
|
d[15] = _mm256_set_epi64x( s1[31], s1[29], s0[31], s0[29] );
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define mm256_interleave_2x128 mm256_intrlv_2x128
|
||||||
|
static inline void mm256_intrlv_2x128( const void *d, const void *s0,
|
||||||
|
void *s1, const int bit_len )
|
||||||
|
{
|
||||||
|
__m128i s1hi = _mm256_extracti128_si256( casti_m256i( s1,0), 1 );
|
||||||
|
__m128i s0hi = _mm256_extracti128_si256( casti_m256i( s0,0), 1 );
|
||||||
|
casti_m256i( d,0 ) = mm256_concat_128(
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s1,0 ) ),
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s0,0 ) ) );
|
||||||
|
casti_m256i( d,1 ) = mm256_concat_128( s1hi, s0hi );
|
||||||
|
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
s0hi = _mm256_extracti128_si256( casti_m256i( s0,1), 1 );
|
||||||
|
s1hi = _mm256_extracti128_si256( casti_m256i( s1,1), 1 );
|
||||||
|
casti_m256i( d,2 ) = mm256_concat_128(
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s1,1 ) ),
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s0,1 ) ) );
|
||||||
|
casti_m256i( d,3 ) = mm256_concat_128( s1hi, s0hi );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
casti_m256i( d,4 ) = mm256_concat_128(
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s1,2 ) ),
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s0,2 ) ) );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
s0hi = _mm256_extracti128_si256( casti_m256i( s0,2), 1 );
|
||||||
|
s1hi = _mm256_extracti128_si256( casti_m256i( s1,2), 1 );
|
||||||
|
casti_m256i( d,4 ) = mm256_concat_128(
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s1,2 ) ),
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s0,2 ) ) );
|
||||||
|
casti_m256i( d,5 ) = mm256_concat_128( s1hi, s0hi );
|
||||||
|
|
||||||
|
s0hi = _mm256_extracti128_si256( casti_m256i( s0,3), 1 );
|
||||||
|
s1hi = _mm256_extracti128_si256( casti_m256i( s1,3), 1 );
|
||||||
|
casti_m256i( d,6 ) = mm256_concat_128(
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s1,3 ) ),
|
||||||
|
_mm256_castsi256_si128( casti_m256i( s0,3 ) ) );
|
||||||
|
casti_m256i( d,7 ) = mm256_concat_128( s1hi, s0hi );
|
||||||
|
}
|
||||||
|
|
||||||
|
#define mm256_deinterleave_2x128 mm256_dintrlv_2x128
|
||||||
|
static inline void mm256_dintrlv_2x128( void *dst0, void *dst1, const void *s,
|
||||||
|
int bit_len )
|
||||||
|
{
|
||||||
|
__m256i *d0 = (__m256i*)dst0;
|
||||||
|
__m256i *d1 = (__m256i*)dst1;
|
||||||
|
|
||||||
|
__m256i s0 = casti_m256i( s, 0 );
|
||||||
|
__m256i s1 = casti_m256i( s, 1 );
|
||||||
|
d0[0] = _mm256_inserti128_si256( s0, mm128_extr_lo128_256( s1 ), 1 );
|
||||||
|
d1[0] = _mm256_inserti128_si256( s1, mm128_extr_hi128_256( s0 ), 0 );
|
||||||
|
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
|
||||||
|
s0 = casti_m256i( s, 2 );
|
||||||
|
s1 = casti_m256i( s, 3 );
|
||||||
|
d0[1] = _mm256_inserti128_si256( s0, mm128_extr_lo128_256( s1 ), 1 );
|
||||||
|
d1[1] = _mm256_inserti128_si256( s1, mm128_extr_hi128_256( s0 ), 0 );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
s0 = casti_m256i( s, 4 );
|
||||||
|
s1 = casti_m256i( s, 5 );
|
||||||
|
d0[2] = _mm256_inserti128_si256( s0, mm128_extr_lo128_256( s1 ), 1 );
|
||||||
|
d1[2] = _mm256_inserti128_si256( s1, mm128_extr_hi128_256( s0 ), 0 );
|
||||||
|
|
||||||
|
s0 = casti_m256i( s, 6 );
|
||||||
|
s1 = casti_m256i( s, 7 );
|
||||||
|
d0[3] = _mm256_inserti128_si256( s0, mm128_extr_lo128_256( s1 ), 1 );
|
||||||
|
d1[3] = _mm256_inserti128_si256( s1, mm128_extr_hi128_256( s0 ), 0 );
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef extr64_cast128_256
|
||||||
|
#undef extr32_cast128_256
|
||||||
|
|
||||||
|
#endif // AVX2
|
||||||
|
#endif // INTRLV_AVX22_H__
|
679
simd-utils/intrlv-avx512.h
Normal file
679
simd-utils/intrlv-avx512.h
Normal file
@@ -0,0 +1,679 @@
|
|||||||
|
#if !defined(INTRLV_AVX512_H__)
|
||||||
|
#define INTRLV_AVX512_H__ 1
|
||||||
|
|
||||||
|
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||||
|
|
||||||
|
// SSE2 functions used in AVX512 interleaving
|
||||||
|
|
||||||
|
// AVX512 block is 64 * 64 bytes
|
||||||
|
|
||||||
|
// quarter avx512 block, 16 bytes * 16 lanes
|
||||||
|
static inline void mm128_dintrlv_16x32x128( void *d00, void *d01,
|
||||||
|
void *d02, void *d03, void *d04, void *d05, void *d06, void *d07,
|
||||||
|
void *d08, void *d09, void *d10, void *d11, void *d12, void *d13,
|
||||||
|
void *d14, void *d15, const int n, const void *s )
|
||||||
|
{
|
||||||
|
cast_m128i( d00 ) = mm128_get_32( s, 0, 16, 32, 48 );
|
||||||
|
cast_m128i( d01 ) = mm128_get_32( s, 1, 17, 33, 49 );
|
||||||
|
cast_m128i( d02 ) = mm128_get_32( s, 2, 18, 34, 50 );
|
||||||
|
cast_m128i( d03 ) = mm128_get_32( s, 3, 19, 35, 51 );
|
||||||
|
cast_m128i( d04 ) = mm128_get_32( s, 4, 20, 36, 52 );
|
||||||
|
cast_m128i( d05 ) = mm128_get_32( s, 5, 21, 37, 53 );
|
||||||
|
cast_m128i( d06 ) = mm128_get_32( s, 6, 22, 38, 54 );
|
||||||
|
cast_m128i( d07 ) = mm128_get_32( s, 7, 23, 39, 55 );
|
||||||
|
cast_m128i( d08 ) = mm128_get_32( s, 8, 24, 40, 56 );
|
||||||
|
cast_m128i( d09 ) = mm128_get_32( s, 9, 25, 41, 57 );
|
||||||
|
cast_m128i( d10 ) = mm128_get_32( s, 10, 26, 42, 58 );
|
||||||
|
cast_m128i( d11 ) = mm128_get_32( s, 11, 27, 43, 59 );
|
||||||
|
cast_m128i( d12 ) = mm128_get_32( s, 12, 28, 44, 60 );
|
||||||
|
cast_m128i( d13 ) = mm128_get_32( s, 13, 29, 45, 61 );
|
||||||
|
cast_m128i( d14 ) = mm128_get_32( s, 14, 30, 46, 62 );
|
||||||
|
cast_m128i( d15 ) = mm128_get_32( s, 15, 31, 47, 63 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// quarter avx512 block, 32 bytes * 8 lanes
|
||||||
|
// 8 lanes of 128 bits using 64 bit interleaving
|
||||||
|
// Used for last 16 bytes of 80 byte input, only used for testing.
|
||||||
|
static inline void mm128_dintrlv_8x64x128( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, void *d4, void *d5, void *d6, void *d7,
|
||||||
|
const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m128i( d0,n ) = mm128_get_64( s, 0, 8 );
|
||||||
|
casti_m128i( d1,n ) = mm128_get_64( s, 1, 9 );
|
||||||
|
casti_m128i( d2,n ) = mm128_get_64( s, 2, 10 );
|
||||||
|
casti_m128i( d3,n ) = mm128_get_64( s, 3, 11 );
|
||||||
|
casti_m128i( d4,n ) = mm128_get_64( s, 4, 12 );
|
||||||
|
casti_m128i( d5,n ) = mm128_get_64( s, 5, 13 );
|
||||||
|
casti_m128i( d6,n ) = mm128_get_64( s, 6, 14 );
|
||||||
|
casti_m128i( d7,n ) = mm128_get_64( s, 7, 15 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm128_dintrlv_4x128x128( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m128i( d0,n ) = mm128_get_64( s, 0, 1 );
|
||||||
|
casti_m128i( d1,n ) = mm128_get_64( s, 2, 3 );
|
||||||
|
casti_m128i( d2,n ) = mm128_get_64( s, 4, 5 );
|
||||||
|
casti_m128i( d3,n ) = mm128_get_64( s, 5, 7 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// AVX2 functions Used in AVX512 interleaving
|
||||||
|
|
||||||
|
static inline void mm256_dintrlv_16x32x256( void *d00, void *d01,
|
||||||
|
void *d02, void *d03, void *d04, void *d05,
|
||||||
|
void *d06, void *d07, void *d08, void *d09,
|
||||||
|
void *d10, void *d11, void *d12, void *d13,
|
||||||
|
void *d14, void *d15, const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m256i( d00,n ) = mm256_get_32( s, 0, 16, 32, 48, 64, 80, 96,112 );
|
||||||
|
casti_m256i( d01,n ) = mm256_get_32( s, 1, 17, 33, 49, 65, 81, 97,113 );
|
||||||
|
casti_m256i( d02,n ) = mm256_get_32( s, 2, 18, 34, 50, 66, 82, 98,114 );
|
||||||
|
casti_m256i( d03,n ) = mm256_get_32( s, 3, 19, 35, 51, 67, 83, 99,115 );
|
||||||
|
casti_m256i( d04,n ) = mm256_get_32( s, 4, 20, 36, 52, 68, 84,100,116 );
|
||||||
|
casti_m256i( d05,n ) = mm256_get_32( s, 5, 21, 37, 53, 69, 85,101,117 );
|
||||||
|
casti_m256i( d06,n ) = mm256_get_32( s, 6, 22, 38, 54, 70, 86,102,118 );
|
||||||
|
casti_m256i( d07,n ) = mm256_get_32( s, 7, 23, 39, 55, 71, 87,103,119 );
|
||||||
|
casti_m256i( d08,n ) = mm256_get_32( s, 8, 24, 40, 56, 72, 88,104,120 );
|
||||||
|
casti_m256i( d09,n ) = mm256_get_32( s, 9, 25, 41, 57, 73, 89,105,121 );
|
||||||
|
casti_m256i( d10,n ) = mm256_get_32( s, 10, 26, 42, 58, 74, 90,106,122 );
|
||||||
|
casti_m256i( d11,n ) = mm256_get_32( s, 11, 27, 43, 59, 75, 91,107,123 );
|
||||||
|
casti_m256i( d12,n ) = mm256_get_32( s, 12, 28, 44, 60, 76, 92,108,124 );
|
||||||
|
casti_m256i( d13,n ) = mm256_get_32( s, 13, 29, 45, 61, 77, 93,109,125 );
|
||||||
|
casti_m256i( d14,n ) = mm256_get_32( s, 14, 30, 46, 62, 78, 94,110,126 );
|
||||||
|
casti_m256i( d15,n ) = mm256_get_32( s, 15, 31, 47, 63, 79, 95,111,127 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// 8 lanes of 256 bits using 64 bit interleaving (standard final hash size)
|
||||||
|
static inline void mm256_dintrlv_8x64x256( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, void *d4, void *d5, void *d6, void *d7,
|
||||||
|
const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m256i( d0,n ) = mm256_get_64( s, 0, 8, 16, 24 );
|
||||||
|
casti_m256i( d1,n ) = mm256_get_64( s, 1, 9, 17, 25 );
|
||||||
|
casti_m256i( d2,n ) = mm256_get_64( s, 2, 10, 18, 26 );
|
||||||
|
casti_m256i( d3,n ) = mm256_get_64( s, 3, 11, 19, 27 );
|
||||||
|
casti_m256i( d4,n ) = mm256_get_64( s, 4, 12, 20, 28 );
|
||||||
|
casti_m256i( d5,n ) = mm256_get_64( s, 5, 13, 21, 29 );
|
||||||
|
casti_m256i( d6,n ) = mm256_get_64( s, 6, 14, 22, 30 );
|
||||||
|
casti_m256i( d7,n ) = mm256_get_64( s, 7, 15, 23, 31 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm256_dintrlv_4x128x256( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m256i( d0,n ) = mm256_get_64( s, 0, 1, 8, 9 );
|
||||||
|
casti_m256i( d1,n ) = mm256_get_64( s, 2, 3, 10, 11 );
|
||||||
|
casti_m256i( d2,n ) = mm256_get_64( s, 4, 5, 12, 13 );
|
||||||
|
casti_m256i( d3,n ) = mm256_get_64( s, 6, 7, 14, 15 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// AVX 512 helper functions.
|
||||||
|
//
|
||||||
|
// Macro functions returning vector.
|
||||||
|
// Abstracted typecasting, avoid temp pointers.
|
||||||
|
// Source arguments may be any 64 or 32 byte aligned pointer as appropriate.
|
||||||
|
|
||||||
|
#define mm512_put_64( s0, s1, s2, s3, s4, s5, s6, s7 ) \
|
||||||
|
_mm512_set_epi64( *((const uint64_t*)(s7)), *((const uint64_t*)(s6)), \
|
||||||
|
*((const uint64_t*)(s5)), *((const uint64_t*)(s4)), \
|
||||||
|
*((const uint64_t*)(s3)), *((const uint64_t*)(s2)), \
|
||||||
|
*((const uint64_t*)(s1)), *((const uint64_t*)(s0)) )
|
||||||
|
|
||||||
|
#define mm512_put_32( s00, s01, s02, s03, s04, s05, s06, s07, \
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 ) \
|
||||||
|
_mm512_set_epi32( *((const uint32_t*)(s15)), *((const uint32_t*)(s14)), \
|
||||||
|
*((const uint32_t*)(s13)), *((const uint32_t*)(s12)), \
|
||||||
|
*((const uint32_t*)(s11)), *((const uint32_t*)(s10)), \
|
||||||
|
*((const uint32_t*)(s09)), *((const uint32_t*)(s08)), \
|
||||||
|
*((const uint32_t*)(s07)), *((const uint32_t*)(s06)), \
|
||||||
|
*((const uint32_t*)(s05)), *((const uint32_t*)(s04)), \
|
||||||
|
*((const uint32_t*)(s03)), *((const uint32_t*)(s02)), \
|
||||||
|
*((const uint32_t*)(s01)), *((const uint32_t*)(s00)) )
|
||||||
|
|
||||||
|
#define mm512_get_64( s, i0, i1, i2, i3, i4, i5, i6, i7 ) \
|
||||||
|
_mm512_set_epi64( ((const uint64_t*)(s))[i7], ((const uint64_t*)(s))[i6], \
|
||||||
|
((const uint64_t*)(s))[i5], ((const uint64_t*)(s))[i4], \
|
||||||
|
((const uint64_t*)(s))[i3], ((const uint64_t*)(s))[i2], \
|
||||||
|
((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] )
|
||||||
|
|
||||||
|
#define mm512_get_32( s, i00, i01, i02, i03, i04, i05, i06, i07, \
|
||||||
|
i08, i09, i10, i11, i12, i13, i14, i15 ) \
|
||||||
|
_mm512_set_epi32( ((const uint32_t*)(s))[i15], ((const uint32_t*)(s))[i14], \
|
||||||
|
((const uint32_t*)(s))[i13], ((const uint32_t*)(s))[i12], \
|
||||||
|
((const uint32_t*)(s))[i11], ((const uint32_t*)(s))[i10], \
|
||||||
|
((const uint32_t*)(s))[i09], ((const uint32_t*)(s))[i08], \
|
||||||
|
((const uint32_t*)(s))[i07], ((const uint32_t*)(s))[i06], \
|
||||||
|
((const uint32_t*)(s))[i05], ((const uint32_t*)(s))[i04], \
|
||||||
|
((const uint32_t*)(s))[i03], ((const uint32_t*)(s))[i02], \
|
||||||
|
((const uint32_t*)(s))[i01], ((const uint32_t*)(s))[i00] )
|
||||||
|
|
||||||
|
// AVX512 has no blend, can be done with permute2xvar but at what cost?
|
||||||
|
// Can also be done with shifting and mask-or'ing for 3 instructins with
|
||||||
|
// 1 dependency. Finally it can be done with 1 _mm512_set but with 8 64 bit
|
||||||
|
// array index calculations and 8 pointer reads.
|
||||||
|
|
||||||
|
// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1]. lo[0] }
|
||||||
|
#define mm512_interleave_blend_128( hi, lo ) \
|
||||||
|
_mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \
|
||||||
|
0x7, 0x6, 0x5, 0x4, 0xb, 0xa, 0x9, 0x8 )
|
||||||
|
|
||||||
|
#define mm512_interleave_blend_64( hi, lo ) \
|
||||||
|
_mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \
|
||||||
|
0x7, 0x6, 0xd, 0xc, 0x3, 0x2, 0x9, 0x8 )
|
||||||
|
|
||||||
|
#define mm512_interleave_blend_32( hi, lo ) \
|
||||||
|
_mm256_permute2xvar_epi32( hi, lo, _mm512_set_epi32( \
|
||||||
|
0x0f, 0x1e, 0x0d, 0x1c, 0x0b, 0x1a, 0x09, 0x18, \
|
||||||
|
0x07, 0x16, 0x05, 0x14, 0x03, 0x12, 0x01, 0x10 )
|
||||||
|
//
|
||||||
|
|
||||||
|
static inline void mm512_intrlv_16x32x512( void *d, const void *s00,
|
||||||
|
const void *s01, const void *s02, const void *s03, const void *s04,
|
||||||
|
const void *s05, const void *s06, const void *s07, const void *s08,
|
||||||
|
const void *s09, const void *s10, const void *s11, const void *s12,
|
||||||
|
const void *s13, const void *s14, const void *s15 )
|
||||||
|
{
|
||||||
|
casti_m512i( d, 0 ) = mm512_put_32(
|
||||||
|
s00, s01, s02, s03, s04, s05, s06, s07,
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 );
|
||||||
|
casti_m512i( d, 1 ) = mm512_put_32(
|
||||||
|
s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4,
|
||||||
|
s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 );
|
||||||
|
casti_m512i( d, 2 ) = mm512_put_32(
|
||||||
|
s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8,
|
||||||
|
s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 );
|
||||||
|
casti_m512i( d, 3 ) = mm512_put_32(
|
||||||
|
s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12,
|
||||||
|
s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 );
|
||||||
|
casti_m512i( d, 4 ) = mm512_put_32(
|
||||||
|
s00+16, s01+16, s02+16, s03+16, s04+16, s05+16, s06+16, s07+16,
|
||||||
|
s08+16, s09+16, s10+16, s11+16, s12+16, s13+16, s14+16, s15+16 );
|
||||||
|
casti_m512i( d, 5 ) = mm512_put_32(
|
||||||
|
s00+20, s01+20, s02+20, s03+20, s04+20, s05+20, s06+20, s07+20,
|
||||||
|
s08+20, s09+20, s10+20, s11+20, s12+20, s13+20, s14+20, s15+20 );
|
||||||
|
casti_m512i( d, 6 ) = mm512_put_32(
|
||||||
|
s00+24, s01+24, s02+24, s03+24, s04+24, s05+24, s06+24, s07+24,
|
||||||
|
s08+24, s09+24, s10+24, s11+24, s12+24, s13+24, s14+24, s15+24 );
|
||||||
|
casti_m512i( d, 7 ) = mm512_put_32(
|
||||||
|
s00+28, s01+28, s02+28, s03+28, s04+28, s05+28, s06+28, s07+28,
|
||||||
|
s08+28, s09+28, s10+28, s11+28, s12+28, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d, 8 ) = mm512_put_32(
|
||||||
|
s00+32, s01+28, s02+28, s03+28, s04+32, s05+28, s06+28, s07+28,
|
||||||
|
s08+32, s09+28, s10+28, s11+28, s12+32, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d, 9 ) = mm512_put_32(
|
||||||
|
s00+36, s01+28, s02+28, s03+28, s04+36, s05+28, s06+28, s07+28,
|
||||||
|
s08+36, s09+28, s10+28, s11+28, s12+36, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d,10 ) = mm512_put_32(
|
||||||
|
s00+40, s01+28, s02+28, s03+28, s04+40, s05+28, s06+28, s07+28,
|
||||||
|
s08+40, s09+28, s10+28, s11+28, s12+40, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d,11 ) = mm512_put_32(
|
||||||
|
s00+44, s01+28, s02+28, s03+28, s04+44, s05+28, s06+28, s07+28,
|
||||||
|
s08+44, s09+28, s10+28, s11+28, s12+44, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d,12 ) = mm512_put_32(
|
||||||
|
s00+48, s01+28, s02+28, s03+28, s04+48, s05+28, s06+28, s07+28,
|
||||||
|
s08+48, s09+28, s10+28, s11+28, s12+48, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d,13 ) = mm512_put_32(
|
||||||
|
s00+52, s01+28, s02+28, s03+28, s04+52, s05+28, s06+28, s07+28,
|
||||||
|
s08+52, s09+28, s10+28, s11+28, s12+52, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d,14 ) = mm512_put_32(
|
||||||
|
s00+56, s01+28, s02+28, s03+28, s04+56, s05+28, s06+28, s07+28,
|
||||||
|
s08+56, s09+28, s10+28, s11+28, s12+56, s13+28, s14+28, s15+28 );
|
||||||
|
casti_m512i( d,15 ) = mm512_put_32(
|
||||||
|
s00+60, s01+28, s02+28, s03+28, s04+60, s05+28, s06+28, s07+28,
|
||||||
|
s08+60, s09+28, s10+28, s11+28, s12+60, s13+28, s14+28, s15+28 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm512_intrlv_16x32x256( void *d, const void *s00,
|
||||||
|
const void *s01, const void *s02, const void *s03, const void *s04,
|
||||||
|
const void *s05, const void *s06, const void *s07, const void *s08,
|
||||||
|
const void *s09, const void *s10, const void *s11, const void *s12,
|
||||||
|
const void *s13, const void *s14, const void *s15 )
|
||||||
|
{
|
||||||
|
casti_m512i( d, 0 ) = mm512_put_32(
|
||||||
|
s00, s01, s02, s03, s04, s05, s06, s07,
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 );
|
||||||
|
casti_m512i( d, 1 ) = mm512_put_32(
|
||||||
|
s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4,
|
||||||
|
s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 );
|
||||||
|
casti_m512i( d, 2 ) = mm512_put_32(
|
||||||
|
s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8,
|
||||||
|
s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 );
|
||||||
|
casti_m512i( d, 3 ) = mm512_put_32(
|
||||||
|
s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12,
|
||||||
|
s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 );
|
||||||
|
casti_m512i( d, 4 ) = mm512_put_32(
|
||||||
|
s00+16, s01+16, s02+16, s03+16, s04+16, s05+16, s06+16, s07+16,
|
||||||
|
s08+16, s09+16, s10+16, s11+16, s12+16, s13+16, s14+16, s15+16 );
|
||||||
|
casti_m512i( d, 5 ) = mm512_put_32(
|
||||||
|
s00+20, s01+20, s02+20, s03+20, s04+20, s05+20, s06+20, s07+20,
|
||||||
|
s08+20, s09+20, s10+20, s11+20, s12+20, s13+20, s14+20, s15+20 );
|
||||||
|
casti_m512i( d, 6 ) = mm512_put_32(
|
||||||
|
s00+24, s01+24, s02+24, s03+24, s04+24, s05+24, s06+24, s07+24,
|
||||||
|
s08+24, s09+24, s10+24, s11+24, s12+24, s13+24, s14+24, s15+24 );
|
||||||
|
casti_m512i( d, 7 ) = mm512_put_32(
|
||||||
|
s00+28, s01+28, s02+28, s03+28, s04+28, s05+28, s06+28, s07+28,
|
||||||
|
s08+28, s09+28, s10+28, s11+28, s12+28, s13+28, s14+28, s15+28 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last 16 bytes of input
|
||||||
|
static inline void mm512_intrlv_16x32x128( void *d, const void *s00,
|
||||||
|
const void *s01, const void *s02, const void *s03, const void *s04,
|
||||||
|
const void *s05, const void *s06, const void *s07, const void *s08,
|
||||||
|
const void *s09, const void *s10, const void *s11, const void *s12,
|
||||||
|
const void *s13, const void *s14, const void *s15 )
|
||||||
|
{
|
||||||
|
casti_m512i( d, 0 ) = mm512_put_32(
|
||||||
|
s00, s01, s02, s03, s04, s05, s06, s07,
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 );
|
||||||
|
casti_m512i( d, 1 ) = mm512_put_32(
|
||||||
|
s00+ 4, s01+ 4, s02+ 4, s03+ 4, s04+ 4, s05+ 4, s06+ 4, s07+ 4,
|
||||||
|
s08+ 4, s09+ 4, s10+ 4, s11+ 4, s12+ 4, s13+ 4, s14+ 4, s15+ 4 );
|
||||||
|
casti_m512i( d, 2 ) = mm512_put_32(
|
||||||
|
s00+ 8, s01+ 8, s02+ 8, s03+ 8, s04+ 8, s05+ 8, s06+ 8, s07+ 8,
|
||||||
|
s08+ 8, s09+ 8, s10+ 8, s11+ 8, s12+ 8, s13+ 8, s14+ 8, s15+ 8 );
|
||||||
|
casti_m512i( d, 3 ) = mm512_put_32(
|
||||||
|
s00+12, s01+12, s02+12, s03+12, s04+12, s05+12, s06+12, s07+12,
|
||||||
|
s08+12, s09+12, s10+12, s11+12, s12+12, s13+12, s14+12, s15+12 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// can be called directly for 64 byte hash.
|
||||||
|
static inline void mm512_dintrlv_16x32x512( void *d00, void *d01,
|
||||||
|
void *d02, void *d03, void *d04, void *d05, void *d06,
|
||||||
|
void *d07, void *d08, void *d09, void *d10, void *d11,
|
||||||
|
void *d12, void *d13, void *d14, void *d15, const int n,
|
||||||
|
const void *s )
|
||||||
|
{
|
||||||
|
casti_m512i(d00,n) = mm512_get_32( s, 0, 16, 32, 48, 64, 80, 96,112,
|
||||||
|
128,144,160,176,192,208,224,240 );
|
||||||
|
casti_m512i(d01,n) = mm512_get_32( s, 1, 17, 33, 49, 65, 81, 97,113,
|
||||||
|
129,145,161,177,193,209,225,241 );
|
||||||
|
casti_m512i(d02,n) = mm512_get_32( s, 2, 18, 34, 50, 66, 82, 98,114,
|
||||||
|
130,146,162,178,194,210,226,242 );
|
||||||
|
casti_m512i(d03,n) = mm512_get_32( s, 3, 19, 35, 51, 67, 83, 99,115,
|
||||||
|
131,147,163,179,195,211,227,243 );
|
||||||
|
casti_m512i(d04,n) = mm512_get_32( s, 4, 20, 36, 52, 68, 84,100,116,
|
||||||
|
132,148,164,180,196,212,228,244 );
|
||||||
|
casti_m512i(d05,n) = mm512_get_32( s, 5, 21, 37, 53, 69, 85,101,117,
|
||||||
|
133,149,165,181,197,213,229,245 );
|
||||||
|
casti_m512i(d06,n) = mm512_get_32( s, 6, 22, 38, 54, 70, 86,102,118,
|
||||||
|
134,150,166,182,198,214,230,246 );
|
||||||
|
casti_m512i(d07,n) = mm512_get_32( s, 7, 23, 39, 55, 71, 87,103,119,
|
||||||
|
135,151,167,183,199,215,231,247 );
|
||||||
|
casti_m512i(d08,n) = mm512_get_32( s, 8, 24, 40, 56, 72, 88,104,120,
|
||||||
|
136,152,168,184,200,216,232,248 );
|
||||||
|
casti_m512i(d09,n) = mm512_get_32( s, 9, 25, 41, 57, 73, 89,105,121,
|
||||||
|
137,153,169,185,201,217,233,249 );
|
||||||
|
casti_m512i(d10,n) = mm512_get_32( s, 10, 26, 42, 58, 74, 90,106,122,
|
||||||
|
138,154,170,186,202,218,234,250 );
|
||||||
|
casti_m512i(d11,n) = mm512_get_32( s, 11, 27, 43, 59, 75, 91,107,123,
|
||||||
|
139,155,171,187,203,219,235,251 );
|
||||||
|
casti_m512i(d12,n) = mm512_get_32( s, 12, 28, 44, 60, 76, 92,108,124,
|
||||||
|
140,156,172,188,204,220,236,252 );
|
||||||
|
casti_m512i(d13,n) = mm512_get_32( s, 13, 29, 45, 61, 77, 93,109,125,
|
||||||
|
141,157,173,189,205,221,237,253 );
|
||||||
|
casti_m512i(d14,n) = mm512_get_32( s, 14, 30, 46, 62, 78, 94,110,126,
|
||||||
|
142,158,174,190,206,222,238,254 );
|
||||||
|
casti_m512i(d15,n) = mm512_get_32( s, 15, 31, 47, 63, 79, 95,111,127,
|
||||||
|
143,159,175,191,207,223,239,255 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm512_intrlv_8x64x512( void *d, const void *s0,
|
||||||
|
const void *s1, const void *s2, const void *s3,
|
||||||
|
const void *s4, const void *s5, const void *s6,
|
||||||
|
const void *s7 )
|
||||||
|
{
|
||||||
|
casti_m512i( d,0 ) = mm512_put_64( s0, s1, s2, s3,
|
||||||
|
s4, s5, s6, s7 );
|
||||||
|
casti_m512i( d,1 ) = mm512_put_64( s0+ 8, s1+ 8, s2+ 8, s3+ 8,
|
||||||
|
s4+ 8, s5+ 8, s6+ 8, s7+ 8 );
|
||||||
|
casti_m512i( d,2 ) = mm512_put_64( s0+16, s1+16, s2+16, s3+16,
|
||||||
|
s4+16, s5+16, s6+16, s7+16 );
|
||||||
|
casti_m512i( d,3 ) = mm512_put_64( s0+24, s1+24, s2+24, s3+24,
|
||||||
|
s4+24, s5+24, s6+24, s7+24 );
|
||||||
|
casti_m512i( d,4 ) = mm512_put_64( s0+32, s1+32, s2+32, s3+32,
|
||||||
|
s4+32, s5+32, s6+32, s7+32 );
|
||||||
|
casti_m512i( d,5 ) = mm512_put_64( s0+40, s1+40, s2+40, s3+40,
|
||||||
|
s4+40, s5+40, s6+40, s7+40 );
|
||||||
|
casti_m512i( d,6 ) = mm512_put_64( s0+48, s1+48, s2+48, s3+48,
|
||||||
|
s4+48, s5+48, s6+48, s7+48 );
|
||||||
|
casti_m512i( d,7 ) = mm512_put_64( s0+56, s1+56, s2+56, s3+56,
|
||||||
|
s4+56, s5+56, s6+56, s7+56 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm512_intrlv_8x64x256( void *d, const void *s0,
|
||||||
|
const void *s1, const void *s2, const void *s3,
|
||||||
|
const void *s4, const void *s5, const void *s6,
|
||||||
|
const void *s7 )
|
||||||
|
{
|
||||||
|
casti_m512i( d,0 ) = mm512_put_64( s0, s1, s2, s3,
|
||||||
|
s4, s5, s6, s7 );
|
||||||
|
casti_m512i( d,1 ) = mm512_put_64( s0+ 8, s1+ 8, s2+ 8, s3+ 8,
|
||||||
|
s4+ 8, s5+ 8, s6+ 8, s7+ 8 );
|
||||||
|
casti_m512i( d,2 ) = mm512_put_64( s0+16, s1+16, s2+16, s3+16,
|
||||||
|
s4+16, s5+16, s6+16, s7+16 );
|
||||||
|
casti_m512i( d,3 ) = mm512_put_64( s0+24, s1+24, s2+24, s3+24,
|
||||||
|
s4+24, s5+24, s6+24, s7+24 );
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// 8 lanes of 512 bits using 64 bit interleaving (typical intermediate hash)
|
||||||
|
static inline void mm512_dintrlv_8x64x512( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, void *d4, void *d5, void *d6, void *d7,
|
||||||
|
const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m512i( d0,n ) = mm512_get_64( s, 0, 8, 16, 24, 32, 40, 48, 56 );
|
||||||
|
casti_m512i( d1,n ) = mm512_get_64( s, 1, 9, 17, 25, 33, 41, 49, 57 );
|
||||||
|
casti_m512i( d2,n ) = mm512_get_64( s, 2, 10, 18, 26, 34, 42, 50, 58 );
|
||||||
|
casti_m512i( d3,n ) = mm512_get_64( s, 3, 11, 19, 27, 35, 43, 51, 59 );
|
||||||
|
casti_m512i( d4,n ) = mm512_get_64( s, 4, 12, 20, 28, 36, 44, 52, 60 );
|
||||||
|
casti_m512i( d5,n ) = mm512_get_64( s, 5, 13, 21, 29, 37, 45, 53, 61 );
|
||||||
|
casti_m512i( d6,n ) = mm512_get_64( s, 6, 14, 22, 30, 38, 46, 54, 62 );
|
||||||
|
casti_m512i( d7,n ) = mm512_get_64( s, 7, 15, 23, 31, 39, 47, 55, 63 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm512_dintrlv_4x128x512( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const int n, const void *s )
|
||||||
|
{
|
||||||
|
casti_m512i( d0,n ) = mm512_get_64( s, 0, 1, 8, 9, 16, 17, 24, 25 );
|
||||||
|
casti_m512i( d1,n ) = mm512_get_64( s, 2, 3, 10, 11, 18, 19, 16, 27 );
|
||||||
|
casti_m512i( d2,n ) = mm512_get_64( s, 4, 5, 12, 13, 20, 21, 28, 29 );
|
||||||
|
casti_m512i( d3,n ) = mm512_get_64( s, 6, 7, 14, 15, 22, 23, 30, 31 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// AVX-512 user facing functions.
|
||||||
|
|
||||||
|
static inline void mm512_intrlv_16x32( void *d, const void *s00,
|
||||||
|
const void *s01, const void *s02, const void *s03, const void *s04,
|
||||||
|
const void *s05, const void *s06, const void *s07, const void *s08,
|
||||||
|
const void *s09, const void *s10, const void *s11, const void *s12,
|
||||||
|
const void *s13, const void *s14, const void *s15, int bit_len )
|
||||||
|
{
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
mm512_intrlv_16x32x256( d, s00, s01, s02, s03, s04, s05, s06, s07,
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mm512_intrlv_16x32x512( d, s00, s01, s02, s03, s04, s05, s06, s07,
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
|
||||||
|
mm512_intrlv_16x32x128( d+1024, s00+64, s01+64, s02+64, s03+64,
|
||||||
|
s04+64, s05+64, s06+64, s07+64, s08+64, s09+64,
|
||||||
|
s10+64, s11+64, s12+64, s13+64, s14+64, s15+64 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mm512_intrlv_16x32x512( d+1024, s00+64, s01+64, s02+64, s03+64,
|
||||||
|
s04+64, s05+64, s06+64, s07+64, s08+64, s09+64,
|
||||||
|
s10+64, s11+64, s12+64, s13+64, s14+64, s15+64 );
|
||||||
|
// bit_len == 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
// sub-functions can be called directly for 32 & 64 byte hash.
|
||||||
|
static inline void mm512_dintrlv_16x32( void *d00, void *d01, void *d02,
|
||||||
|
void *d03, void *d04, void *d05, void *d06, void *d07, void *d08,
|
||||||
|
void *d09, void *d10, void *d11, void *d12, void *d13, void *d14,
|
||||||
|
void *d15, const void *src, const int bit_len )
|
||||||
|
{
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
mm256_dintrlv_16x32x256( d00, d01, d02, d03, d04, d05, d06, d07,
|
||||||
|
d08, d09, d10, d11, d12, d13, d14, d15,
|
||||||
|
0,src );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mm512_dintrlv_16x32x512( d00, d01, d02, d03, d04, d05, d06, d07,
|
||||||
|
d08, d09, d10, d11, d12, d13, d14, d15,
|
||||||
|
0, src );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
// short block, final 16 bytes of input data.
|
||||||
|
mm128_dintrlv_16x32x128( d00, d01, d02, d03, d04, d05, d06, d07,
|
||||||
|
d08, d09, d10, d11, d12, d13, d14, d15,
|
||||||
|
1, src+1024 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// bit_len == 1024
|
||||||
|
mm512_dintrlv_16x32x512( d00, d01, d02, d03, d04, d05, d06, d07,
|
||||||
|
d08, d09, d10, d11, d12, d13, d14, d15,
|
||||||
|
1, src+1024 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm512_extract_lane_16x32( void *dst, const void *src,
|
||||||
|
const int lane, const int bit_len )
|
||||||
|
{
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
cast_m256i( dst ) = mm256_get_32( src, lane, lane+16, lane+32, lane+48,
|
||||||
|
lane+64, lane+80, lane+96, lane+112 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cast_m512i( dst ) = mm512_get_32( src, lane, lane+ 16, lane+ 32, lane+ 48,
|
||||||
|
lane+ 64, lane+ 80, lane+ 96, lane+112, lane+128, lane+144,
|
||||||
|
lane+160, lane+176, lane+192, lane+208, lane+224, lane+248 );
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
|
||||||
|
static inline void mm512_intrlv_8x64( void *d, const void *s0,
|
||||||
|
const void *s1, const void *s2, const void *s3,
|
||||||
|
const void *s4, const void *s5, const void *s6,
|
||||||
|
const void *s7, int bit_len )
|
||||||
|
{
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
mm512_intrlv_8x64x256( d, s0, s1, s2, s3, s4, s5, s6, s7 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mm512_intrlv_8x64x512( d, s0, s1, s2, s3, s4, s5, s6, s7 );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
casti_m512i( d, 8 ) = mm512_put_64( s7+64, s6+64, s5+64, s4+64,
|
||||||
|
s3+64, s2+64, s1+64, s0+64 );
|
||||||
|
casti_m512i( d, 9 ) = mm512_put_64( s7+72, s6+72, s5+72, s4+72,
|
||||||
|
s3+72, s2+72, s1+72, s0+72 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// bitlen == 1024
|
||||||
|
mm512_intrlv_8x64x512( d+512, s0+64, s1+64, s2+64, s3+64,
|
||||||
|
s4+64, s5+64, s6+64, s7+64 );
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline void mm512_dintrlv_8x64( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, void *d4, void *d5, void *d6, void *d7,
|
||||||
|
const void *s, const int bit_len )
|
||||||
|
{
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
mm256_dintrlv_8x64x256( d0, d1, d2, d3, d4, d5, d6, d7, 0, s );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mm512_dintrlv_8x64x512( d0, d1, d2, d3, d4, d5, d6, d7, 0, s );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
// short block, final 16 bytes of input data.
|
||||||
|
mm128_dintrlv_8x64x128( d0, d1, d2, d3, d4, d5, d6, d7, 1, s+512 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// bit_len == 1024
|
||||||
|
mm512_dintrlv_8x64x512( d0, d1, d2, d3, d4, d5, d6, d7, 1, s+512 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract one lane from 64 bit interleaved data
|
||||||
|
static inline void mm512_extract_lane_8x64( void *d, const void *s,
|
||||||
|
const int lane, const int bit_len )
|
||||||
|
{
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
cast_m256i( d ) = mm256_get_64( s, lane, lane+8, lane+16, lane+24 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// else bit_len == 512
|
||||||
|
cast_m512i( d ) = mm512_get_64( s, lane , lane+ 8, lane+16, lane+24,
|
||||||
|
lane+32, lane+40, lane+48, lane+56 );
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
|
||||||
|
static inline void mm512_intrlv_4x128( void *d, const void *s0,
|
||||||
|
const void *s1, const void *s2, const void *s3, const int bit_len )
|
||||||
|
{
|
||||||
|
casti_m512i( d, 0 ) = mm512_put_64( s0, s0+8, s1, s1+8,
|
||||||
|
s2, s2+8, s3, s3+8 );
|
||||||
|
casti_m512i( d, 1 ) = mm512_put_64( s0+16, s0+24, s1+16, s1+24,
|
||||||
|
s2+16, s2+24, s3+16, s3+24 );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
|
||||||
|
casti_m512i( d, 2 ) = mm512_put_64( s0+32, s0+40, s1+32, s1+40,
|
||||||
|
s2+32, s2+40, s3+32, s3+40 );
|
||||||
|
casti_m512i( d, 3 ) = mm512_put_64( s0+48, s0+56, s1+48, s1+56,
|
||||||
|
s2+48, s2+56, s3+48, s3+56 );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
casti_m512i( d, 4 ) = mm512_put_64( s0+64, s0+72, s1+64, s1+72,
|
||||||
|
s2+64, s2+72, s3+64, s3+72 );
|
||||||
|
if ( bit_len <= 640 ) return;
|
||||||
|
|
||||||
|
casti_m512i( d, 5 ) = mm512_put_64( s0+ 80, s0+ 88, s1+ 80, s1+ 88,
|
||||||
|
s2+ 80, s2+ 88, s3+ 80, s3+ 88 );
|
||||||
|
casti_m512i( d, 6 ) = mm512_put_64( s0+ 96, s0+104, s1+ 96, s1+104,
|
||||||
|
s2+ 96, s2+104, s3+ 96, s3+104 );
|
||||||
|
casti_m512i( d, 7 ) = mm512_put_64( s0+112, s0+120, s1+112, s1+120,
|
||||||
|
s2+112, s2+120, s3+112, s3+120 );
|
||||||
|
// bit_len == 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm512_dintrlv_4x128( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const void *s, const int bit_len )
|
||||||
|
{
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
mm256_dintrlv_4x128x256( d0, d1, d2, d3, 0, s );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mm512_dintrlv_4x128x512( d0, d1, d2, d3, 0, s );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
if ( bit_len <= 640 )
|
||||||
|
{
|
||||||
|
mm128_dintrlv_4x128x128( d0, d1, d2, d3, 1, s+256 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// bit_len == 1024
|
||||||
|
mm512_dintrlv_4x128x512( d0, d1, d2, d3, 1, s+256 );
|
||||||
|
}
|
||||||
|
|
||||||
|
// input one 8x64 buffer and return 2*4*128
|
||||||
|
static inline void mm512_rintrlv_8x64_4x128( void *dst0, void *dst1,
|
||||||
|
const void *src, int bit_len )
|
||||||
|
{
|
||||||
|
__m512i* d0 = (__m512i*)dst0;
|
||||||
|
__m512i* d1 = (__m512i*)dst1;
|
||||||
|
uint64_t *s = (uint64_t*)src;
|
||||||
|
|
||||||
|
d0[0] = _mm512_set_epi64( s[ 11], s[ 3], s[ 10], s[ 2],
|
||||||
|
s[ 9], s[ 1], s[ 8], s[ 0] );
|
||||||
|
d0[1] = _mm512_set_epi64( s[ 27], s[ 19], s[ 26], s[ 18],
|
||||||
|
s[ 25], s[ 17], s[ 24], s[ 16] );
|
||||||
|
d0[2] = _mm512_set_epi64( s[ 15], s[ 7], s[ 14], s[ 6],
|
||||||
|
s[ 13], s[ 5], s[ 12], s[ 4] );
|
||||||
|
d0[3] = _mm512_set_epi64( s[ 31], s[ 23], s[ 30], s[ 22],
|
||||||
|
s[ 29], s[ 21], s[ 28], s[ 20] );
|
||||||
|
d1[0] = _mm512_set_epi64( s[ 43], s[ 35], s[ 42], s[ 34],
|
||||||
|
s[ 41], s[ 33], s[ 40], s[ 32] );
|
||||||
|
d1[1] = _mm512_set_epi64( s[ 59], s[ 51], s[ 58], s[ 50],
|
||||||
|
s[ 57], s[ 49], s[ 56], s[ 48] );
|
||||||
|
d1[2] = _mm512_set_epi64( s[ 47], s[ 39], s[ 46], s[ 38],
|
||||||
|
s[ 45], s[ 37], s[ 44], s[ 36] );
|
||||||
|
d1[3] = _mm512_set_epi64( s[ 63], s[ 55], s[ 62], s[ 54],
|
||||||
|
s[ 61], s[ 53], s[ 60], s[ 52] );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
d0[4] = _mm512_set_epi64( s[ 75], s[ 67], s[ 74], s[ 66],
|
||||||
|
s[ 73], s[ 65], s[ 72], s[ 64] );
|
||||||
|
d0[5] = _mm512_set_epi64( s[ 91], s[ 83], s[ 90], s[ 82],
|
||||||
|
s[ 89], s[ 81], s[ 88], s[ 80] );
|
||||||
|
d0[6] = _mm512_set_epi64( s[ 79], s[ 71], s[ 78], s[ 70],
|
||||||
|
s[ 77], s[ 69], s[ 76], s[ 68] );
|
||||||
|
d0[7] = _mm512_set_epi64( s[ 95], s[ 87], s[ 94], s[ 86],
|
||||||
|
s[ 93], s[ 85], s[ 92], s[ 84] );
|
||||||
|
d1[4] = _mm512_set_epi64( s[107], s[ 99], s[106], s[ 98],
|
||||||
|
s[105], s[ 97], s[104], s[ 96] );
|
||||||
|
d1[5] = _mm512_set_epi64( s[123], s[115], s[122], s[114],
|
||||||
|
s[121], s[113], s[120], s[112] );
|
||||||
|
d1[6] = _mm512_set_epi64( s[111], s[103], s[110], s[102],
|
||||||
|
s[109], s[101], s[108], s[100] );
|
||||||
|
d1[7] = _mm512_set_epi64( s[127], s[119], s[126], s[118],
|
||||||
|
s[125], s[117], s[124], s[116] );
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// input 2 4x128 return 8x64
|
||||||
|
static inline void mm512_rintrlv_4x128_8x64( void *dst, const void *src0,
|
||||||
|
const void *src1, int bit_len )
|
||||||
|
{
|
||||||
|
__m512i* d = (__m512i*)dst;
|
||||||
|
uint64_t *s0 = (uint64_t*)src0;
|
||||||
|
uint64_t *s1 = (uint64_t*)src1;
|
||||||
|
|
||||||
|
d[0] = _mm512_set_epi64( s1[ 6], s1[ 4], s1[ 2], s1[ 0],
|
||||||
|
s0[ 6], s0[ 4], s0[ 2], s0[ 0] );
|
||||||
|
d[1] = _mm512_set_epi64( s1[ 7], s1[ 5], s1[ 3], s1[ 1],
|
||||||
|
s0[ 7], s0[ 5], s0[ 3], s0[ 1] );
|
||||||
|
d[2] = _mm512_set_epi64( s1[14], s1[12], s1[10], s1[ 8],
|
||||||
|
s0[14], s0[12], s0[10], s0[ 8] );
|
||||||
|
d[3] = _mm512_set_epi64( s1[15], s1[13], s1[11], s1[ 9],
|
||||||
|
s0[15], s0[13], s0[11], s0[ 9] );
|
||||||
|
d[4] = _mm512_set_epi64( s1[22], s1[20], s1[18], s1[16],
|
||||||
|
s0[22], s0[20], s0[18], s0[16] );
|
||||||
|
d[5] = _mm512_set_epi64( s1[23], s1[21], s1[19], s1[17],
|
||||||
|
s0[24], s0[21], s0[19], s0[17] );
|
||||||
|
d[6] = _mm512_set_epi64( s1[22], s1[28], s1[26], s1[24],
|
||||||
|
s0[22], s0[28], s0[26], s0[24] );
|
||||||
|
d[7] = _mm512_set_epi64( s1[31], s1[29], s1[27], s1[25],
|
||||||
|
s0[31], s0[29], s0[27], s0[25] );
|
||||||
|
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
|
||||||
|
d[0] = _mm512_set_epi64( s1[38], s1[36], s1[34], s1[32],
|
||||||
|
s0[38], s0[36], s0[34], s0[32] );
|
||||||
|
d[1] = _mm512_set_epi64( s1[39], s1[37], s1[35], s1[33],
|
||||||
|
s0[39], s0[37], s0[35], s0[33] );
|
||||||
|
d[2] = _mm512_set_epi64( s1[46], s1[44], s1[42], s1[40],
|
||||||
|
s0[46], s0[44], s0[42], s0[40] );
|
||||||
|
d[3] = _mm512_set_epi64( s1[47], s1[45], s1[43], s1[41],
|
||||||
|
s0[47], s0[45], s0[43], s0[41] );
|
||||||
|
d[4] = _mm512_set_epi64( s1[54], s1[52], s1[50], s1[48],
|
||||||
|
s0[54], s0[52], s0[50], s0[48] );
|
||||||
|
d[5] = _mm512_set_epi64( s1[55], s1[53], s1[51], s1[49],
|
||||||
|
s0[55], s0[53], s0[51], s0[49] );
|
||||||
|
|
||||||
|
d[6] = _mm512_set_epi64( s1[62], s1[60], s1[58], s1[56],
|
||||||
|
s0[62], s0[60], s0[58], s0[56] );
|
||||||
|
d[7] = _mm512_set_epi64( s1[63], s1[61], s1[59], s1[57],
|
||||||
|
s0[63], s0[61], s0[59], s0[57] );
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm512_extract_lane_4x128( void *d, const void *s,
|
||||||
|
const int lane, const int bit_len )
|
||||||
|
{
|
||||||
|
int l = lane<<1;
|
||||||
|
if ( bit_len <= 256 )
|
||||||
|
{
|
||||||
|
cast_m256i( d ) = mm256_get_64( s, l, l+1, l+8, l+9 );
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// else bit_len == 512
|
||||||
|
cast_m512i( d ) = mm512_get_64( s, l , l+ 1, l+ 8, l+ 9,
|
||||||
|
l+16, l+17, l+24, l+25 );
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // AVX512
|
||||||
|
#endif // INTRLV_AVX512_H__
|
126
simd-utils/intrlv-mmx.h
Normal file
126
simd-utils/intrlv-mmx.h
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
#if !defined(INTRLV_MMX_H__)
|
||||||
|
#define INTRLV_MMX_H__ 1
|
||||||
|
|
||||||
|
#if defined(__MMX__)
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// MMX 64 bit vectors
|
||||||
|
|
||||||
|
#define mm64_put_32( s0, s1 ) \
|
||||||
|
_mm_set_pi32( *((const uint32_t*)(s1)), *((const uint32_t*)(s0)) )
|
||||||
|
|
||||||
|
#define mm64_get_32( s, i0, i1 ) \
|
||||||
|
_mm_set_pi32( ((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] )
|
||||||
|
|
||||||
|
// 1 MMX block, 8 bytes * 2 lanes
|
||||||
|
static inline void mm64_intrlv_2x32( void *d, const void *s0,
|
||||||
|
const void *s1, int len )
|
||||||
|
{
|
||||||
|
casti_m64( d, 0 ) = mm64_put_32( s0 , s1 );
|
||||||
|
casti_m64( d, 1 ) = mm64_put_32( s0+ 4, s1+ 4 );
|
||||||
|
casti_m64( d, 2 ) = mm64_put_32( s0+ 8, s1+ 8 );
|
||||||
|
casti_m64( d, 3 ) = mm64_put_32( s0+ 12, s1+ 12 );
|
||||||
|
casti_m64( d, 4 ) = mm64_put_32( s0+ 16, s1+ 16 );
|
||||||
|
casti_m64( d, 5 ) = mm64_put_32( s0+ 20, s1+ 20 );
|
||||||
|
casti_m64( d, 6 ) = mm64_put_32( s0+ 24, s1+ 24 );
|
||||||
|
casti_m64( d, 7 ) = mm64_put_32( s0+ 28, s1+ 28 );
|
||||||
|
|
||||||
|
if ( len <= 256 ) return;
|
||||||
|
|
||||||
|
casti_m64( d, 8 ) = mm64_put_32( s0+ 32, s1+ 32 );
|
||||||
|
casti_m64( d, 9 ) = mm64_put_32( s0+ 36, s1+ 36 );
|
||||||
|
casti_m64( d,10 ) = mm64_put_32( s0+ 40, s1+ 40 );
|
||||||
|
casti_m64( d,11 ) = mm64_put_32( s0+ 44, s1+ 44 );
|
||||||
|
casti_m64( d,12 ) = mm64_put_32( s0+ 48, s1+ 48 );
|
||||||
|
casti_m64( d,13 ) = mm64_put_32( s0+ 52, s1+ 52 );
|
||||||
|
casti_m64( d,14 ) = mm64_put_32( s0+ 56, s1+ 56 );
|
||||||
|
casti_m64( d,15 ) = mm64_put_32( s0+ 60, s1+ 60 );
|
||||||
|
|
||||||
|
if ( len <= 512 ) return;
|
||||||
|
|
||||||
|
casti_m64( d,16 ) = mm64_put_32( s0+ 64, s1+ 64 );
|
||||||
|
casti_m64( d,17 ) = mm64_put_32( s0+ 68, s1+ 68 );
|
||||||
|
casti_m64( d,18 ) = mm64_put_32( s0+ 72, s1+ 72 );
|
||||||
|
casti_m64( d,19 ) = mm64_put_32( s0+ 76, s1+ 76 );
|
||||||
|
|
||||||
|
if ( len <= 640 ) return;
|
||||||
|
casti_m64( d,20 ) = mm64_put_32( s0+ 80, s1+ 80 );
|
||||||
|
casti_m64( d,21 ) = mm64_put_32( s0+ 84, s1+ 84 );
|
||||||
|
casti_m64( d,22 ) = mm64_put_32( s0+ 88, s1+ 88 );
|
||||||
|
casti_m64( d,23 ) = mm64_put_32( s0+ 92, s1+ 92 );
|
||||||
|
casti_m64( d,24 ) = mm64_put_32( s0+ 96, s1+ 96 );
|
||||||
|
casti_m64( d,25 ) = mm64_put_32( s0+100, s1+100 );
|
||||||
|
casti_m64( d,26 ) = mm64_put_32( s0+104, s1+104 );
|
||||||
|
casti_m64( d,27 ) = mm64_put_32( s0+108, s1+108 );
|
||||||
|
casti_m64( d,28 ) = mm64_put_32( s0+112, s1+112 );
|
||||||
|
casti_m64( d,29 ) = mm64_put_32( s0+116, s1+116 );
|
||||||
|
casti_m64( d,30 ) = mm64_put_32( s0+120, s1+120 );
|
||||||
|
casti_m64( d,31 ) = mm64_put_32( s0+124, s1+124 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm64_dintrlv_2x32( void *d00, void *d01, const int n,
|
||||||
|
const void *s, int len )
|
||||||
|
{
|
||||||
|
casti_m64( d00,0 ) = mm64_get_32( s, 0, 2 );
|
||||||
|
casti_m64( d01,0 ) = mm64_get_32( s, 1, 3 );
|
||||||
|
casti_m64( d00,1 ) = mm64_get_32( s, 4, 6 );
|
||||||
|
casti_m64( d01,1 ) = mm64_get_32( s, 5, 7 );
|
||||||
|
casti_m64( d00,2 ) = mm64_get_32( s, 8, 10 );
|
||||||
|
casti_m64( d01,2 ) = mm64_get_32( s, 9, 11 );
|
||||||
|
casti_m64( d00,3 ) = mm64_get_32( s, 12, 14 );
|
||||||
|
casti_m64( d01,3 ) = mm64_get_32( s, 13, 15 );
|
||||||
|
|
||||||
|
if ( len <= 256 ) return;
|
||||||
|
|
||||||
|
casti_m64( d00,4 ) = mm64_get_32( s, 16, 18 );
|
||||||
|
casti_m64( d01,4 ) = mm64_get_32( s, 17, 19 );
|
||||||
|
casti_m64( d00,5 ) = mm64_get_32( s, 20, 22 );
|
||||||
|
casti_m64( d01,5 ) = mm64_get_32( s, 21, 23 );
|
||||||
|
casti_m64( d00,6 ) = mm64_get_32( s, 24, 26 );
|
||||||
|
casti_m64( d01,6 ) = mm64_get_32( s, 25, 27 );
|
||||||
|
casti_m64( d00,7 ) = mm64_get_32( s, 28, 30 );
|
||||||
|
casti_m64( d01,7 ) = mm64_get_32( s, 29, 31 );
|
||||||
|
|
||||||
|
if ( len <= 512 ) return;
|
||||||
|
|
||||||
|
casti_m64( d00,8 ) = mm64_get_32( s, 32, 34 );
|
||||||
|
casti_m64( d01,8 ) = mm64_get_32( s, 33, 35 );
|
||||||
|
casti_m64( d00,9 ) = mm64_get_32( s, 36, 38 );
|
||||||
|
casti_m64( d01,9 ) = mm64_get_32( s, 37, 39 );
|
||||||
|
|
||||||
|
if ( len <= 640 ) return;
|
||||||
|
casti_m64( d00,10 ) = mm64_get_32( s, 40, 42 );
|
||||||
|
casti_m64( d01,10 ) = mm64_get_32( s, 41, 43 );
|
||||||
|
casti_m64( d00,11 ) = mm64_get_32( s, 44, 46 );
|
||||||
|
casti_m64( d01,11 ) = mm64_get_32( s, 45, 47 );
|
||||||
|
casti_m64( d00,12 ) = mm64_get_32( s, 48, 50 );
|
||||||
|
casti_m64( d01,12 ) = mm64_get_32( s, 49, 51 );
|
||||||
|
casti_m64( d00,13 ) = mm64_get_32( s, 52, 54 );
|
||||||
|
casti_m64( d01,13 ) = mm64_get_32( s, 53, 55 );
|
||||||
|
casti_m64( d00,14 ) = mm64_get_32( s, 56, 58 );
|
||||||
|
casti_m64( d01,14 ) = mm64_get_32( s, 57, 59 );
|
||||||
|
casti_m64( d00,15 ) = mm64_get_32( s, 60, 62 );
|
||||||
|
casti_m64( d01,15 ) = mm64_get_32( s, 61, 63 );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm64_extract_lane_2x32( void *d, const void *s,
|
||||||
|
const int lane, const int bit_len )
|
||||||
|
{
|
||||||
|
casti_m64( d, 0 ) = mm64_get_32( s, lane , lane+ 4 );
|
||||||
|
casti_m64( d, 1 ) = mm64_get_32( s, lane+ 8, lane+12 );
|
||||||
|
casti_m64( d, 2 ) = mm64_get_32( s, lane+16, lane+20 );
|
||||||
|
casti_m64( d, 3 ) = mm64_get_32( s, lane+24, lane+28 );
|
||||||
|
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
casti_m64( d, 4 ) = mm64_get_32( s, lane+32, lane+36 );
|
||||||
|
casti_m64( d, 5 ) = mm64_get_32( s, lane+40, lane+44 );
|
||||||
|
casti_m64( d, 6 ) = mm64_get_32( s, lane+48, lane+52 );
|
||||||
|
casti_m64( d, 7 ) = mm64_get_32( s, lane+56, lane+60 );
|
||||||
|
// bit_len == 512
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#endif // MMX
|
||||||
|
#endif // INTRLV_MMX_H__
|
195
simd-utils/intrlv-sse2.h
Normal file
195
simd-utils/intrlv-sse2.h
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
#if !defined(INTRLV_SSE2_H__)
|
||||||
|
#define INTRLV_SSE2_H__ 1
|
||||||
|
|
||||||
|
// Don't call __mm_extract_epi32 directly, it needs SSE4.1.
|
||||||
|
// Use mm128_extr_32 wrapper instead, it has both SSE4.1 & SSE2 covered.
|
||||||
|
|
||||||
|
#if defined(__SSE2__)
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// SSE2 128 bit vectors
|
||||||
|
|
||||||
|
|
||||||
|
// Macros to abstract typecasting
|
||||||
|
|
||||||
|
// Interleave lanes
|
||||||
|
#define mm128_put_64( s0, s1) \
|
||||||
|
_mm_set_epi64x( *((const uint64_t*)(s1)), *((const uint64_t*)(s0)) )
|
||||||
|
|
||||||
|
#define mm128_put_32( s0, s1, s2, s3 ) \
|
||||||
|
_mm_set_epi32( *((const uint32_t*)(s3)), *((const uint32_t*)(s2)), \
|
||||||
|
*((const uint32_t*)(s1)), *((const uint32_t*)(s0)) )
|
||||||
|
|
||||||
|
// Deinterleave lanes
|
||||||
|
#define mm128_get_64( s, i0, i1 ) \
|
||||||
|
_mm_set_epi64x( ((const uint64_t*)(s))[i1], ((const uint64_t*)(s))[i0] )
|
||||||
|
|
||||||
|
#define mm128_get_32( s, i0, i1, i2, i3 ) \
|
||||||
|
_mm_set_epi32( ((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \
|
||||||
|
((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] )
|
||||||
|
|
||||||
|
// blend 2 vectors while interleaving: { hi[n], lo[n-1], ... hi[1], lo[0] }
|
||||||
|
#define mm128_intrlv_blend_64( hi, lo ) \
|
||||||
|
_mm256_blend_epi16( hi, lo, 0x0f )
|
||||||
|
#define mm128_intrlv_blend_32( hi, lo ) \
|
||||||
|
_mm6_blend_epi16( hi, lo, 0x33 )
|
||||||
|
|
||||||
|
// 1 sse2 block, 16 x 16 bytes
|
||||||
|
|
||||||
|
#define mm128_intrlv_4x32_128( d, s0, s1, s2, s3 )\
|
||||||
|
do { \
|
||||||
|
casti_m128i( d,0 ) = _mm_set_epi32( \
|
||||||
|
mm128_extr_32( s3, 0 ), mm128_extr_32( s2, 0 ), \
|
||||||
|
mm128_extr_32( s1, 0 ), mm128_extr_32( s0, 0 ) ); \
|
||||||
|
casti_m128i( d,1 ) = _mm_set_epi32( \
|
||||||
|
mm128_extr_32( s3, 1 ), mm128_extr_32( s2, 1 ), \
|
||||||
|
mm128_extr_32( s1, 1 ), mm128_extr_32( s0, 1 ) ); \
|
||||||
|
casti_m128i( d,2 ) = _mm_set_epi32( \
|
||||||
|
mm128_extr_32( s3, 2 ), mm128_extr_32( s2, 2 ), \
|
||||||
|
mm128_extr_32( s1, 2 ), mm128_extr_32( s0, 2 ) ); \
|
||||||
|
casti_m128i( d,3 ) = _mm_set_epi32( \
|
||||||
|
mm128_extr_32( s3, 3 ), mm128_extr_32( s2, 3 ), \
|
||||||
|
mm128_extr_32( s1, 3 ), mm128_extr_32( s0, 3 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
static inline void mm128_dintrlv_4x32_128( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const void *src )
|
||||||
|
{
|
||||||
|
__m128i s0 = *(__m128i*) src;
|
||||||
|
__m128i s1 = *(__m128i*)(src+16);
|
||||||
|
__m128i s2 = *(__m128i*)(src+32);
|
||||||
|
__m128i s3 = *(__m128i*)(src+48);
|
||||||
|
|
||||||
|
*(__m128i*)d0 = _mm_set_epi32(
|
||||||
|
mm128_extr_32( s3,0 ), mm128_extr_32( s2,0 ),
|
||||||
|
mm128_extr_32( s1,0 ), mm128_extr_32( s0,0 ) );
|
||||||
|
*(__m128i*)d1 = _mm_set_epi32(
|
||||||
|
mm128_extr_32( s3,1 ), mm128_extr_32( s2,1 ),
|
||||||
|
mm128_extr_32( s1,1 ), mm128_extr_32( s0,1 ) );
|
||||||
|
*(__m128i*)d2 = _mm_set_epi32(
|
||||||
|
mm128_extr_32( s3,2 ), mm128_extr_32( s2,2 ),
|
||||||
|
mm128_extr_32( s1,2 ), mm128_extr_32( s0,2 ) );
|
||||||
|
*(__m128i*)d3 = _mm_set_epi32(
|
||||||
|
mm128_extr_32( s3,3 ), mm128_extr_32( s2,3 ),
|
||||||
|
mm128_extr_32( s1,3 ), mm128_extr_32( s0,3 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mm128_intrlv_2x64x128( void *d, const void *s0,
|
||||||
|
const void *s1 )
|
||||||
|
{
|
||||||
|
casti_m128i( d,0 ) = mm128_put_64( s0, s1 );
|
||||||
|
casti_m128i( d,1 ) = mm128_put_64( s0+ 8, s1+ 8 );
|
||||||
|
casti_m128i( d,2 ) = mm128_put_64( s0+16, s1+16 );
|
||||||
|
casti_m128i( d,3 ) = mm128_put_64( s0+24, s1+24 );
|
||||||
|
}
|
||||||
|
|
||||||
|
#define mm128_bswap_intrlv_4x32_128( d, src ) \
|
||||||
|
do { \
|
||||||
|
__m128i ss = mm128_bswap_32( src );\
|
||||||
|
casti_m128i( d,0 ) = _mm_set1_epi32( mm128_extr_32( ss, 0 ) ); \
|
||||||
|
casti_m128i( d,1 ) = _mm_set1_epi32( mm128_extr_32( ss, 1 ) ); \
|
||||||
|
casti_m128i( d,2 ) = _mm_set1_epi32( mm128_extr_32( ss, 2 ) ); \
|
||||||
|
casti_m128i( d,3 ) = _mm_set1_epi32( mm128_extr_32( ss, 3 ) ); \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// User functions.
|
||||||
|
|
||||||
|
// interleave 4 arrays of 32 bit elements for 128 bit processing
|
||||||
|
// bit_len must be 256, 512 or 640 bits.
|
||||||
|
#define mm128_interleave_4x32 mm128_intrlv_4x32
|
||||||
|
static inline void mm128_intrlv_4x32( void *d, const void *s0,
|
||||||
|
const void *s1, const void *s2, const void *s3, int bit_len )
|
||||||
|
{
|
||||||
|
mm128_intrlv_4x32_128( d , casti_m128i(s0,0), casti_m128i(s1,0),
|
||||||
|
casti_m128i(s2,0), casti_m128i(s3,0) );
|
||||||
|
mm128_intrlv_4x32_128( d+ 64, casti_m128i(s0,1), casti_m128i(s1,1),
|
||||||
|
casti_m128i(s2,1), casti_m128i(s3,1) );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
mm128_intrlv_4x32_128( d+128, casti_m128i(s0,2), casti_m128i(s1,2),
|
||||||
|
casti_m128i(s2,2), casti_m128i(s3,2) );
|
||||||
|
mm128_intrlv_4x32_128( d+192, casti_m128i(s0,3), casti_m128i(s1,3),
|
||||||
|
casti_m128i(s2,3), casti_m128i(s3,3) );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
mm128_intrlv_4x32_128( d+256, casti_m128i(s0,4), casti_m128i(s1,4),
|
||||||
|
casti_m128i(s2,4), casti_m128i(s3,4) );
|
||||||
|
if ( bit_len <= 640 ) return;
|
||||||
|
mm128_intrlv_4x32_128( d+320, casti_m128i(s0,5), casti_m128i(s1,5),
|
||||||
|
casti_m128i(s2,5), casti_m128i(s3,5) );
|
||||||
|
mm128_intrlv_4x32_128( d+384, casti_m128i(s0,6), casti_m128i(s1,6),
|
||||||
|
casti_m128i(s2,6), casti_m128i(s3,6) );
|
||||||
|
mm128_intrlv_4x32_128( d+448, casti_m128i(s0,7), casti_m128i(s1,7),
|
||||||
|
casti_m128i(s2,7), casti_m128i(s3,7) );
|
||||||
|
// bit_len == 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
// Still used by decred due to odd data size: 180 bytes
|
||||||
|
// bit_len must be multiple of 32
|
||||||
|
#define mm128_interleave_4x32x mm128_intrlv_4x32x
|
||||||
|
static inline void mm128_intrlv_4x32x( void *dst, void *src0, void *src1,
|
||||||
|
void *src2, void *src3, int bit_len )
|
||||||
|
{
|
||||||
|
uint32_t *d = (uint32_t*)dst;
|
||||||
|
uint32_t *s0 = (uint32_t*)src0;
|
||||||
|
uint32_t *s1 = (uint32_t*)src1;
|
||||||
|
uint32_t *s2 = (uint32_t*)src2;
|
||||||
|
uint32_t *s3 = (uint32_t*)src3;
|
||||||
|
|
||||||
|
for ( int i = 0; i < bit_len >> 5; i++, d += 4 )
|
||||||
|
{
|
||||||
|
*d = *(s0+i);
|
||||||
|
*(d+1) = *(s1+i);
|
||||||
|
*(d+2) = *(s2+i);
|
||||||
|
*(d+3) = *(s3+i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define mm128_deinterleave_4x32 mm128_dintrlv_4x32
|
||||||
|
static inline void mm128_dintrlv_4x32( void *d0, void *d1, void *d2,
|
||||||
|
void *d3, const void *s, int bit_len )
|
||||||
|
{
|
||||||
|
mm128_dintrlv_4x32_128( d0 , d1 , d2 , d3 , s );
|
||||||
|
mm128_dintrlv_4x32_128( d0+ 16, d1+ 16, d2+ 16, d3+ 16, s+ 64 );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
mm128_dintrlv_4x32_128( d0+ 32, d1+ 32, d2+ 32, d3+ 32, s+128 );
|
||||||
|
mm128_dintrlv_4x32_128( d0+ 48, d1+ 48, d2+ 48, d3+ 48, s+192 );
|
||||||
|
if ( bit_len <= 512 ) return;
|
||||||
|
mm128_dintrlv_4x32_128( d0+ 64, d1+ 64, d2+ 64, d3+ 64, s+256 );
|
||||||
|
if ( bit_len <= 640 ) return;
|
||||||
|
mm128_dintrlv_4x32_128( d0+ 80, d1+ 80, d2+ 80, d3+ 80, s+320 );
|
||||||
|
mm128_dintrlv_4x32_128( d0+ 96, d1+ 96, d2+ 96, d3+ 96, s+384 );
|
||||||
|
mm128_dintrlv_4x32_128( d0+112, d1+112, d2+112, d3+112, s+448 );
|
||||||
|
// bit_len == 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract and deinterleave specified lane.
|
||||||
|
static inline void mm128_extract_lane_4x32( void *d, const void *s,
|
||||||
|
const int lane, const int bit_len )
|
||||||
|
{
|
||||||
|
casti_m128i( d, 0 ) =
|
||||||
|
mm128_get_32( s, lane , lane+ 4, lane+ 8, lane+12 );
|
||||||
|
casti_m128i( d, 1 ) =
|
||||||
|
mm128_get_32( s, lane+16, lane+20, lane+24, lane+28 );
|
||||||
|
if ( bit_len <= 256 ) return;
|
||||||
|
casti_m128i( d, 2 ) =
|
||||||
|
mm128_get_32( s, lane+32, lane+36, lane+40, lane+44 );
|
||||||
|
casti_m128i( d, 3 ) =
|
||||||
|
mm128_get_32( s, lane+48, lane+52, lane+56, lane+60 );
|
||||||
|
// bit_len == 512
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interleave 80 bytes of 32 bit data for 4 lanes.
|
||||||
|
static inline void mm128_bswap_intrlv80_4x32( void *d, const void *s )
|
||||||
|
{
|
||||||
|
mm128_bswap_intrlv_4x32_128( d , casti_m128i( s, 0 ) );
|
||||||
|
mm128_bswap_intrlv_4x32_128( d+ 64, casti_m128i( s, 1 ) );
|
||||||
|
mm128_bswap_intrlv_4x32_128( d+128, casti_m128i( s, 2 ) );
|
||||||
|
mm128_bswap_intrlv_4x32_128( d+192, casti_m128i( s, 3 ) );
|
||||||
|
mm128_bswap_intrlv_4x32_128( d+256, casti_m128i( s, 4 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // SSE2
|
||||||
|
#endif // INTRLV_SSE2_H__
|
||||||
|
|
484
simd-utils/simd-avx2.h
Normal file
484
simd-utils/simd-avx2.h
Normal file
@@ -0,0 +1,484 @@
|
|||||||
|
#if !defined(SIMD_AVX2_H__)
|
||||||
|
#define SIMD_AVX2_H__ 1
|
||||||
|
|
||||||
|
#if defined(__AVX2__)
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// AVX2 256 bit vectors
|
||||||
|
//
|
||||||
|
// AVX2 is required for integer support of 256 bit vectors.
|
||||||
|
// Some 256 bit vector utilities require AVX512 or have more efficient
|
||||||
|
// AVX512 implementations. They will be selected automatically but their use
|
||||||
|
// is limited because 256 bit vectors are less likely to be used when 512
|
||||||
|
// is available.
|
||||||
|
|
||||||
|
// Vector type overlays used by compile time vector constants.
|
||||||
|
// Constants of these types reside in memory.
|
||||||
|
|
||||||
|
|
||||||
|
// Compile time vector constants and initializers.
|
||||||
|
//
|
||||||
|
// The following macro constants and functions should only be used
|
||||||
|
// for compile time initialization of constant and variable vector
|
||||||
|
// arrays. These constants use memory, use _mm256_set at run time to
|
||||||
|
// avoid using memory.
|
||||||
|
|
||||||
|
#define mm256_const_64( x3, x2, x1, x0 ) {{ x3, x2, x1, x0 }}
|
||||||
|
#define mm256_const1_64( x ) {{ x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm256_const_32( x7, x6, x5, x4, x3, x2, x1, x0 ) \
|
||||||
|
{{ x7, x6, x5, x4, x3, x2, x1, x0 }}
|
||||||
|
#define mm256_const1_32( x ) {{ x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm256_const_16( x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 ) \
|
||||||
|
{{ x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 }}
|
||||||
|
#define mm256_const1_16( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm256_const_8( x31, x30, x29, x28, x27, x26, x25, x24, \
|
||||||
|
x23, x22, x21, x20, x19, x18, x17, x16, \
|
||||||
|
x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 ) \
|
||||||
|
{{ x31, x30, x29, x28, x27, x26, x25, x24, \
|
||||||
|
x23, x22, x21, x20, x19, x18, x17, x16, \
|
||||||
|
x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 }}
|
||||||
|
#define mm256_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \
|
||||||
|
x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
// Predefined compile time constant vectors.
|
||||||
|
// Use Pseudo constants at run time for all simple constant vectors.
|
||||||
|
#define c256_zero mm256_const1_64( 0ULL )
|
||||||
|
#define c256_one_256 mm256_const_64( 0ULL, 0ULL, 0ULL, 1ULL )
|
||||||
|
#define c256_one_128 mm256_const_64( 0ULL, 1ULL, 0ULL, 1ULL )
|
||||||
|
#define c256_one_64 mm256_const1_64( 1ULL )
|
||||||
|
#define c256_one_32 mm256_const1_32( 1UL )
|
||||||
|
#define c256_one_16 mm256_const1_16( 1U )
|
||||||
|
#define c256_one_8 mm256_const1_8( 1U )
|
||||||
|
#define c256_neg1 mm256_const1_64( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
#define c256_neg1_64 mm256_const1_64( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
#define c256_neg1_32 mm256_const1_32( 0xFFFFFFFFUL )
|
||||||
|
#define c256_neg1_16 mm256_const1_16( 0xFFFFU )
|
||||||
|
#define c256_neg1_8 mm256_const1_8( 0xFFU )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Pseudo constants.
|
||||||
|
// These can't be used for compile time initialization but are preferable
|
||||||
|
// for simple constant vectors at run time.
|
||||||
|
|
||||||
|
#define m256_zero _mm256_setzero_si256()
|
||||||
|
#define m256_one_256 _mm256_set_epi64x( 0ULL, 0ULL, 0ULL, 1ULL )
|
||||||
|
#define m256_one_128 _mm256_set_epi64x( 0ULL, 1ULL, 0ULL, 1ULL )
|
||||||
|
#define m256_one_64 _mm256_set1_epi64x( 1ULL )
|
||||||
|
#define m256_one_32 _mm256_set1_epi32( 1UL )
|
||||||
|
#define m256_one_16 _mm256_set1_epi16( 1U )
|
||||||
|
#define m256_one_8 _mm256_set1_epi8( 1U )
|
||||||
|
#define m256_neg1 _mm256_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Basic operations without SIMD equivalent
|
||||||
|
|
||||||
|
// Bitwise not ( ~x )
|
||||||
|
#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \
|
||||||
|
|
||||||
|
// Unary negation of each element ( -a )
|
||||||
|
#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a )
|
||||||
|
#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a )
|
||||||
|
#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Vector size conversion.
|
||||||
|
//
|
||||||
|
// Allows operations on either or both halves of a 256 bit vector serially.
|
||||||
|
// Handy for parallel AES.
|
||||||
|
// Caveats:
|
||||||
|
// _mm256_castsi256_si128 is free and without side effects.
|
||||||
|
// _mm256_castsi128_si256 is also free but leaves the high half
|
||||||
|
// undefined. That's ok if the hi half will be subseqnently assigned.
|
||||||
|
// If assigning both, do lo first, If assigning only 1, use
|
||||||
|
// _mm256_inserti128_si256.
|
||||||
|
//
|
||||||
|
#define mm128_extr_lo128_256( a ) _mm256_castsi256_si128( a )
|
||||||
|
#define mm128_extr_hi128_256( a ) _mm256_extracti128_si256( a, 1 )
|
||||||
|
|
||||||
|
// input __m128i, returns __m256i
|
||||||
|
// To build a 256 bit vector from 2 128 bit vectors lo must be done first.
|
||||||
|
// lo alone leaves hi undefined, hi alone leaves lo unchanged.
|
||||||
|
// Both cost one clock while preserving the other half..
|
||||||
|
// Insert b into specified half of a leaving other half of a unchanged.
|
||||||
|
#define mm256_ins_lo128_256( a, b ) _mm256_inserti128_si256( a, b, 0 )
|
||||||
|
#define mm256_ins_hi128_256( a, b ) _mm256_inserti128_si256( a, b, 1 )
|
||||||
|
|
||||||
|
// concatenate two 128 bit vectors into one 256 bit vector
|
||||||
|
#define mm256_concat_128( hi, lo ) \
|
||||||
|
mm256_ins_hi128_256( _mm256_castsi128_si256( lo ), hi )
|
||||||
|
|
||||||
|
// Parallel AES, for when x is expected to be in a 256 bit register.
|
||||||
|
#define mm256_aesenc_2x128( x ) \
|
||||||
|
mm256_concat_128( \
|
||||||
|
_mm_aesenc_si128( mm128_extr_hi128_256( x ), m128_zero ), \
|
||||||
|
_mm_aesenc_si128( mm128_extr_lo128_256( x ), m128_zero ) )
|
||||||
|
|
||||||
|
#define mm256_aesenckey_2x128( x, k ) \
|
||||||
|
mm256_concat_128( \
|
||||||
|
_mm_aesenc_si128( mm128_extr_hi128_256( x ), \
|
||||||
|
mm128_extr_lo128_256( k ) ), \
|
||||||
|
_mm_aesenc_si128( mm128_extr_hi128_256( x ), \
|
||||||
|
mm128_extr_lo128_256( k ) ) )
|
||||||
|
|
||||||
|
#define mm256_paesenc_2x128( y, x ) do \
|
||||||
|
{ \
|
||||||
|
__m256i *X = (__m256i*)x; \
|
||||||
|
__m256i *Y = (__m256i*)y; \
|
||||||
|
y[0] = _mm_aesenc_si128( x[0], m128_zero ); \
|
||||||
|
y[1] = _mm_aesenc_si128( x[1], m128_zero ); \
|
||||||
|
} while(0);
|
||||||
|
|
||||||
|
// With pointers.
|
||||||
|
#define mm256_paesenckey_2x128( y, x, k ) do \
|
||||||
|
{ \
|
||||||
|
__m256i *X = (__m256i*)x; \
|
||||||
|
__m256i *Y = (__m256i*)y; \
|
||||||
|
__m256i *K = (__m256i*)ky; \
|
||||||
|
y[0] = _mm_aesenc_si128( x[0], K[0] ); \
|
||||||
|
y[1] = _mm_aesenc_si128( x[1], K[1] ); \
|
||||||
|
} while(0);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Pointer casting
|
||||||
|
|
||||||
|
// p = any aligned pointer
|
||||||
|
// returns p as pointer to vector type, not very useful
|
||||||
|
#define castp_m256i(p) ((__m256i*)(p))
|
||||||
|
|
||||||
|
// p = any aligned pointer
|
||||||
|
// returns *p, watch your pointer arithmetic
|
||||||
|
#define cast_m256i(p) (*((__m256i*)(p)))
|
||||||
|
|
||||||
|
// p = any aligned pointer, i = scaled array index
|
||||||
|
// returns value p[i]
|
||||||
|
#define casti_m256i(p,i) (((__m256i*)(p))[(i)])
|
||||||
|
|
||||||
|
// p = any aligned pointer, o = scaled offset
|
||||||
|
// returns pointer p+o
|
||||||
|
#define casto_m256i(p,o) (((__m256i*)(p))+(o))
|
||||||
|
|
||||||
|
|
||||||
|
// Gather scatter
|
||||||
|
|
||||||
|
#define mm256_gather_64( d, s0, s1, s2, s3 ) \
|
||||||
|
((uint64_t*)(d))[0] = (uint64_t)(s0); \
|
||||||
|
((uint64_t*)(d))[1] = (uint64_t)(s1); \
|
||||||
|
((uint64_t*)(d))[2] = (uint64_t)(s2); \
|
||||||
|
((uint64_t*)(d))[3] = (uint64_t)(s3);
|
||||||
|
|
||||||
|
#define mm256_gather_32( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
|
||||||
|
((uint32_t*)(d))[0] = (uint32_t)(s0); \
|
||||||
|
((uint32_t*)(d))[1] = (uint32_t)(s1); \
|
||||||
|
((uint32_t*)(d))[2] = (uint32_t)(s2); \
|
||||||
|
((uint32_t*)(d))[3] = (uint32_t)(s3); \
|
||||||
|
((uint32_t*)(d))[4] = (uint32_t)(s4); \
|
||||||
|
((uint32_t*)(d))[5] = (uint32_t)(s5); \
|
||||||
|
((uint32_t*)(d))[6] = (uint32_t)(s6); \
|
||||||
|
((uint32_t*)(d))[7] = (uint32_t)(s7);
|
||||||
|
|
||||||
|
|
||||||
|
// Scatter data from contiguous memory.
|
||||||
|
// All arguments are pointers
|
||||||
|
#define mm256_scatter_64( d0, d1, d2, d3, s ) \
|
||||||
|
*((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \
|
||||||
|
*((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \
|
||||||
|
*((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \
|
||||||
|
*((uint64_t*)(d3)) = ((uint64_t*)(s))[3];
|
||||||
|
|
||||||
|
#define mm256_scatter_32( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
|
||||||
|
*((uint32_t*)(d0)) = ((uint32_t*)(s))[0]; \
|
||||||
|
*((uint32_t*)(d1)) = ((uint32_t*)(s))[1]; \
|
||||||
|
*((uint32_t*)(d2)) = ((uint32_t*)(s))[2]; \
|
||||||
|
*((uint32_t*)(d3)) = ((uint32_t*)(s))[3]; \
|
||||||
|
*((uint32_t*)(d4)) = ((uint32_t*)(s))[4]; \
|
||||||
|
*((uint32_t*)(d5)) = ((uint32_t*)(s))[5]; \
|
||||||
|
*((uint32_t*)(d6)) = ((uint32_t*)(s))[6]; \
|
||||||
|
*((uint32_t*)(d7)) = ((uint32_t*)(s))[7];
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Memory functions
|
||||||
|
// n = number of 256 bit (32 byte) vectors
|
||||||
|
|
||||||
|
static inline void memset_zero_256( __m256i *dst, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = m256_zero; }
|
||||||
|
|
||||||
|
static inline void memset_256( __m256i *dst, const __m256i a, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
|
||||||
|
|
||||||
|
static inline void memcpy_256( __m256i *dst, const __m256i *src, int n )
|
||||||
|
{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; }
|
||||||
|
|
||||||
|
//
|
||||||
|
// Bit rotations.
|
||||||
|
//
|
||||||
|
// The only bit shift for more than 64 bits is with __int128.
|
||||||
|
//
|
||||||
|
// AVX512 has bit rotate for 256 bit vectors with 64 or 32 bit elements
|
||||||
|
// but is of little value
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate each element of v by c bits
|
||||||
|
#define mm256_ror_64( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_srli_epi64( v, c ), \
|
||||||
|
_mm256_slli_epi64( v, 64-(c) ) )
|
||||||
|
|
||||||
|
#define mm256_rol_64( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_slli_epi64( v, c ), \
|
||||||
|
_mm256_srli_epi64( v, 64-(c) ) )
|
||||||
|
|
||||||
|
#define mm256_ror_32( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_srli_epi32( v, c ), \
|
||||||
|
_mm256_slli_epi32( v, 32-(c) ) )
|
||||||
|
|
||||||
|
#define mm256_rol_32( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_slli_epi32( v, c ), \
|
||||||
|
_mm256_srli_epi32( v, 32-(c) ) )
|
||||||
|
|
||||||
|
#define mm256_ror_16( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_srli_epi16( v, c ), \
|
||||||
|
_mm256_slli_epi16( v, 16-(c) ) )
|
||||||
|
|
||||||
|
#define mm256_rol_16( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_slli_epi16( v, c ), \
|
||||||
|
_mm256_srli_epi16( v, 16-(c) ) )
|
||||||
|
|
||||||
|
// Rotate bits in each element of v by the amount in corresponding element of
|
||||||
|
// index vector c
|
||||||
|
#define mm256_rorv_64( v, c ) \
|
||||||
|
_mm256_or_si256( \
|
||||||
|
_mm256_srlv_epi64( v, _mm256_set1_epi64x( c ) ), \
|
||||||
|
_mm256_sllv_epi64( v, _mm256_set1_epi64x( 64-(c) ) ) )
|
||||||
|
|
||||||
|
#define mm256_rolv_64( v, c ) \
|
||||||
|
_mm256_or_si256( \
|
||||||
|
_mm256_sllv_epi64( v, _mm256_set1_epi64x( c ) ), \
|
||||||
|
_mm256_srlv_epi64( v, _mm256_set1_epi64x( 64-(c) ) ) )
|
||||||
|
|
||||||
|
|
||||||
|
#define mm256_rorv_32( v, c ) \
|
||||||
|
_mm256_or_si256( \
|
||||||
|
_mm256_srlv_epi32( v, _mm256_set1_epi32( c ) ), \
|
||||||
|
_mm256_sllv_epi32( v, _mm256_set1_epi32( 32-(c) ) ) )
|
||||||
|
|
||||||
|
#define mm256_rolv_32( v, c ) \
|
||||||
|
_mm256_or_si256( \
|
||||||
|
_mm256_sllv_epi32( v, _mm256_set1_epi32( c ) ), \
|
||||||
|
_mm256_srlv_epi32( v, _mm256_set1_epi32( 32-(c) ) ) )
|
||||||
|
|
||||||
|
// AVX512 can do 16 bit elements.
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements accross all lanes.
|
||||||
|
//
|
||||||
|
// AVX2 has no full vector permute for elements less than 32 bits.
|
||||||
|
// AVX512 has finer granularity full vector permutes.
|
||||||
|
|
||||||
|
// Swap 128 bit elements in 256 bit vector.
|
||||||
|
#define mm256_swap_128( v ) _mm256_permute4x64_epi64( v, 0x4e )
|
||||||
|
|
||||||
|
// Rotate 256 bit vector by one 64 bit element
|
||||||
|
#define mm256_ror_1x64( v ) _mm256_permute4x64_epi64( v, 0x39 )
|
||||||
|
#define mm256_rol_1x64( v ) _mm256_permute4x64_epi64( v, 0x93 )
|
||||||
|
|
||||||
|
// Rotate 256 bit vector by one 32 bit element.
|
||||||
|
#define mm256_ror_1x32( v ) \
|
||||||
|
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 0,7,6,5, 4,3,2,1 ) )
|
||||||
|
#define mm256_rol_1x32( v ) \
|
||||||
|
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 6,5,4,3, 2,1,0,7 ) )
|
||||||
|
|
||||||
|
// Rotate 256 bit vector by three 32 bit elements (96 bits).
|
||||||
|
#define mm256_ror_3x32( v ) \
|
||||||
|
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 2,1,0,7, 6,5,4,3 ) )
|
||||||
|
#define mm256_rol_3x32( v ) \
|
||||||
|
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 4,3,2,1, 0,7,6,5 ) )
|
||||||
|
|
||||||
|
// AVX512 can do 16 & 8 bit elements.
|
||||||
|
#if defined(__AVX512VL__)
|
||||||
|
|
||||||
|
// Rotate 256 bit vector by one 16 bit element.
|
||||||
|
#define mm256_ror_1x16( v ) \
|
||||||
|
_mm256_permutexvar_epi16( _mm256_set_epi16( \
|
||||||
|
0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v )
|
||||||
|
|
||||||
|
#define mm256_rol_1x16( v ) \
|
||||||
|
_mm256_permutexvar_epi16( _mm256_set_epi16( \
|
||||||
|
14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,15 ), v )
|
||||||
|
|
||||||
|
// Rotate 256 bit vector by one byte.
|
||||||
|
#define mm256_ror_1x8( v ) \
|
||||||
|
_mm256_permutexvar_epi8( _mm256_set_epi8( \
|
||||||
|
0,31,30,29,28,27,26,25, 24,23,22,21,20,19,18,17, \
|
||||||
|
16,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v )
|
||||||
|
|
||||||
|
#define mm256_rol_1x8( v ) \
|
||||||
|
_mm256_permutexvar_epi8( _mm256_set_epi8( \
|
||||||
|
30,29,28,27,26,25,24,23, 22,21,20,19,18,17,16,15, \
|
||||||
|
14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,31 ), v )
|
||||||
|
|
||||||
|
#endif // AVX512
|
||||||
|
|
||||||
|
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||||
|
#define mm256_invert_64( v ) _mm256_permute4x64_epi64( a, 0x1b )
|
||||||
|
|
||||||
|
#define mm256_invert_32( v ) \
|
||||||
|
_mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 0,1,2,3,4,5,6,7 ) )
|
||||||
|
|
||||||
|
// AVX512 can do 16 & 8 bit elements.
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements within lanes of 256 bit vector.
|
||||||
|
|
||||||
|
// Swap 64 bit elements in each 128 bit lane.
|
||||||
|
#define mm256_swap64_128( v ) _mm256_shuffle_epi32( v, 0x4e )
|
||||||
|
|
||||||
|
// Rotate each 128 bit lane by one 32 bit element.
|
||||||
|
#define mm256_ror1x32_128( v ) _mm256_shuffle_epi32( v, 0x39 )
|
||||||
|
#define mm256_rol1x32_128( v ) _mm256_shuffle_epi32( v, 0x93 )
|
||||||
|
|
||||||
|
// Rotate each 128 bit lane by one 16 bit element.
|
||||||
|
#define mm256_rol1x16_128( v ) \
|
||||||
|
_mm256_shuffle_epi8( 13,12,11,10, 9,8,7,6, 5,4,3,2, 1,0,15,14 )
|
||||||
|
#define mm256_ror1x16_128( v ) \
|
||||||
|
_mm256_shuffle_epi8( 1,0,15,14, 13,12,11,10, 9,8,7,6, 5,4,3,2 )
|
||||||
|
|
||||||
|
// Rotate each 128 bit lane by one byte
|
||||||
|
#define mm256_rol1x8_128( v ) \
|
||||||
|
_mm256_shuffle_epi8( 14, 13,12,11, 10,9,8,7, 6,5,4,3, 2,1,0,15 )
|
||||||
|
#define mm256_ror1x8_128( v ) \
|
||||||
|
_mm256_shuffle_epi8( 0,15,14,13, 12,11,10,9, 8,7,6,5, 4,3,2,1 )
|
||||||
|
|
||||||
|
// Rotate each 128 bit lane by c bytes.
|
||||||
|
#define mm256_bror_128( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_bsrli_epi128( v, c ), \
|
||||||
|
_mm256_bslli_epi128( v, 16-(c) ) )
|
||||||
|
#define mm256_brol_128( v, c ) \
|
||||||
|
_mm256_or_si256( _mm256_bslli_epi128( v, c ), \
|
||||||
|
_mm256_bsrli_epi128( v, 16-(c) ) )
|
||||||
|
|
||||||
|
// Swap 32 bit elements in each 64 bit lane
|
||||||
|
#define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 )
|
||||||
|
|
||||||
|
#define mm256_ror16_64( v ) \
|
||||||
|
_mm256_shuffle_epi8( 9, 8,15,14,13,12,11,10, 1, 0, 7, 6, 5, 4, 3, 2 );
|
||||||
|
#define mm256_rol16_64( v ) \
|
||||||
|
_mm256_shuffle_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 );
|
||||||
|
|
||||||
|
|
||||||
|
// Swap 16 bit elements in each 32 bit lane
|
||||||
|
#define mm256_swap16_32( v ) _mm256_shuffle_epi8( v, \
|
||||||
|
_mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Swap bytes in vector elements, endian bswap.
|
||||||
|
#define mm256_bswap_64( v ) \
|
||||||
|
_mm256_shuffle_epi8( v, _mm256_set_epi8( 8, 9,10,11,12,13,14,15, \
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7, \
|
||||||
|
8, 9,10,11,12,13,14,15, \
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7 ) )
|
||||||
|
|
||||||
|
#define mm256_bswap_32( v ) \
|
||||||
|
_mm256_shuffle_epi8( v, _mm256_set_epi8( 12,13,14,15, 8, 9,10,11, \
|
||||||
|
4, 5, 6, 7, 0, 1, 2, 3, \
|
||||||
|
12,13,14,15, 8, 9,10,11, \
|
||||||
|
4, 5, 6, 7, 0, 1, 2, 3 ) )
|
||||||
|
|
||||||
|
#define mm256_bswap_16( v ) \
|
||||||
|
_mm256_shuffle_epi8( v, _mm256_set_epi8( 14,15, 12,13, 10,11, 8, 9, \
|
||||||
|
6, 7, 4, 5, 2, 3, 0, 1, \
|
||||||
|
14,15, 12,13, 10,11, 8, 9, \
|
||||||
|
6, 7, 4, 5, 2, 3, 0, 1 ) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate two concatenated 256 bit vectors as one 512 bit vector by specified
|
||||||
|
// number of elements. Rotate is done in place, source arguments are
|
||||||
|
// overwritten.
|
||||||
|
// Some of these can use permute but appears to be slower. Maybe a Ryzen
|
||||||
|
// issue
|
||||||
|
|
||||||
|
#define mm256_swap256_512 (v1, v2) \
|
||||||
|
v1 = _mm256_xor_si256(v1, v2); \
|
||||||
|
v2 = _mm256_xor_si256(v1, v2); \
|
||||||
|
v1 = _mm256_xor_si256(v1, v2);
|
||||||
|
|
||||||
|
#define mm256_ror1x128_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 16 ); \
|
||||||
|
v1 = _mm256_alignr_epi8( v2, v1, 16 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_rol1x128_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 16 ); \
|
||||||
|
v2 = _mm256_alignr_epi8( v2, v1, 16 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_ror1x64_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 8 ); \
|
||||||
|
v1 = _mm256_alignr_epi8( v2, v1, 8 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_rol1x64_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 24 ); \
|
||||||
|
v2 = _mm256_alignr_epi8( v2, v1, 24 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_ror1x32_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 4 ); \
|
||||||
|
v1 = _mm256_alignr_epi8( v2, v1, 4 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_rol1x32_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 28 ); \
|
||||||
|
v2 = _mm256_alignr_epi8( v2, v1, 28 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_ror1x16_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 2 ); \
|
||||||
|
v1 = _mm256_alignr_epi8( v2, v1, 2 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_rol1x16_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 30 ); \
|
||||||
|
v2 = _mm256_alignr_epi8( v2, v1, 30 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_ror1x8_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 1 ); \
|
||||||
|
v1 = _mm256_alignr_epi8( v2, v1, 1 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm256_rol1x8_512( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m256i t = _mm256_alignr_epi8( v1, v2, 31 ); \
|
||||||
|
v2 = _mm256_alignr_epi8( v2, v1, 31 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#endif // __AVX2__
|
||||||
|
#endif // SIMD_AVX2_H__
|
||||||
|
|
604
simd-utils/simd-avx512.h
Normal file
604
simd-utils/simd-avx512.h
Normal file
@@ -0,0 +1,604 @@
|
|||||||
|
#if !defined(SIMD_AVX512_H__)
|
||||||
|
#define SIMD_AVX512_H__ 1
|
||||||
|
|
||||||
|
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||||
|
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// Some extentsions in AVX512 supporting operations on
|
||||||
|
// smaller elements in 256 bit vectors.
|
||||||
|
|
||||||
|
// Variable rotate, each element rotates by corresponding index.
|
||||||
|
#define mm256_rorv_16( v, c ) \
|
||||||
|
_mm256_or_si256( \
|
||||||
|
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
|
||||||
|
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
|
||||||
|
|
||||||
|
#define mm256_rolv_16( v, c ) \
|
||||||
|
_mm256_or_si256( \
|
||||||
|
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
|
||||||
|
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
|
||||||
|
|
||||||
|
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
|
||||||
|
#define mm256_invert_16 ( v ) \
|
||||||
|
_mm256_permutex_epi16( v, _mm256_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, \
|
||||||
|
8, 9,10,11,12,13,14,15 ) )
|
||||||
|
|
||||||
|
#define mm256_invert_8( v ) \
|
||||||
|
_mm256_permutex_epi8( v, _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \
|
||||||
|
8, 9,10,11,12,13,14,15, \
|
||||||
|
16,17,18,19,20,21,22,23, \
|
||||||
|
24,25,26,27,28,29,30,31 ) )
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// AVX512 512 bit vectors
|
||||||
|
//
|
||||||
|
// Other AVX512 extensions that may be required for some functions.
|
||||||
|
// __AVX512VBMI__ __AVX512VAES__
|
||||||
|
//
|
||||||
|
// Experimental, not fully tested.
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Compile time vector constants and initializers.
|
||||||
|
//
|
||||||
|
// The following macro constants and functions should only be used
|
||||||
|
// for compile time initialization of constant and variable vector
|
||||||
|
// arrays. These constants use memory, use set instruction or pseudo
|
||||||
|
// constants at run time to avoid using memory.
|
||||||
|
|
||||||
|
// Constant initializers
|
||||||
|
#define mm512_const_64( x7, x6, x5, x4, x3, x2, x1, x0 ) \
|
||||||
|
{{ x7, x6, x5, x4, x3, x2, x1, x0 }}
|
||||||
|
|
||||||
|
#define mm512_const1_64( x ) {{ x,x,x,x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm512_const_32( x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 ) \
|
||||||
|
{{ x15, x14, x13, x12, x11, x10, x09, x08, }} \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 }}
|
||||||
|
|
||||||
|
#define mm512_const1_32( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm512_const_16( x31, x30, x29, x28, x27, x26, x25, x24, \
|
||||||
|
x23, x22, x21, x20, x19, x18, x17, x16, \
|
||||||
|
x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 ) \
|
||||||
|
{{ x31, x30, x29, x28, x27, x26, x25, x24, \
|
||||||
|
x23, x22, x21, x20, x19, x18, x17, x16, \
|
||||||
|
x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 }}
|
||||||
|
|
||||||
|
#define mm512_const1_16( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \
|
||||||
|
x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm512_const_8( x63, x62, x61, x60, x59, x58, x57, x56, \
|
||||||
|
x55, x54, x53, x52, x51, x50, x49, x48, \
|
||||||
|
x47, x46, x45, x44, x43, x42, x41, x40, \
|
||||||
|
x39, x38, x37, x36, x35, x34, x33, x32, \
|
||||||
|
x31, x30, x29, x28, x27, x26, x25, x24, \
|
||||||
|
x23, x22, x21, x20, x19, x18, x17, x16, \
|
||||||
|
x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 ) \
|
||||||
|
{{ x63, x62, x61, x60, x59, x58, x57, x56, \
|
||||||
|
x55, x54, x53, x52, x51, x50, x49, x48, \
|
||||||
|
x47, x46, x45, x44, x43, x42, x41, x40, \
|
||||||
|
x39, x38, x37, x36, x35, x34, x33, x32, \
|
||||||
|
x31, x30, x29, x28, x27, x26, x25, x24, \
|
||||||
|
x23, x22, x21, x20, x19, x18, x17, x16, \
|
||||||
|
x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 }}
|
||||||
|
|
||||||
|
#define mm512_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \
|
||||||
|
x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \
|
||||||
|
x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \
|
||||||
|
x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
// Predefined compile time constant vectors.
|
||||||
|
#define c512_zero mm512_const1_64( 0ULL )
|
||||||
|
#define c512_neg1 mm512_const1_64( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
#define c512_one_512 mm512_const_epi64( 0ULL, 0ULL, 0ULL, 0ULL, \
|
||||||
|
0ULL, 0ULL, 0ULL, 1ULL )
|
||||||
|
#define c512_one_256 mm512_const_64( 0ULL, 0ULL, 0ULL, 1ULL, \
|
||||||
|
0ULL, 0ULL, 0ULL, 1ULL )
|
||||||
|
#define c512_one_128 mm512_const_64( 0ULL, 1ULL, 0ULL, 1ULL, \
|
||||||
|
0ULL, 1ULL, 0ULL, 1ULL )
|
||||||
|
#define c512_one_64 mm512_const1_64( 1ULL )
|
||||||
|
#define c512_one_32 mm512_const1_32( 1UL )
|
||||||
|
#define c512_one_16 mm512_const1_16( 1U )
|
||||||
|
#define c512_one_8 mm512_const1_8( 1U )
|
||||||
|
#define c512_neg1_64 mm512_const1_64( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
#define c512_neg1_32 mm512_const1_32( 0xFFFFFFFFUL )
|
||||||
|
#define c512_neg1_16 mm512_const1_32( 0xFFFFU )
|
||||||
|
#define c512_neg1_8 mm512_const1_32( 0xFFU )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Pseudo constants.
|
||||||
|
|
||||||
|
// _mm512_setzero_si512 uses xor instruction. If needed frequently
|
||||||
|
// in a function is it better to define a register variable (const?)
|
||||||
|
// initialized to zero.
|
||||||
|
// It isn't clear to me yet how set or set1 actually work.
|
||||||
|
|
||||||
|
#define m512_zero _mm512_setzero_si512()
|
||||||
|
#define m512_one_512 _mm512_set_epi64( 0ULL, 0ULL, 0ULL, 0ULL, \
|
||||||
|
0ULL, 0ULL, 0ULL, 1ULL )
|
||||||
|
#define m512_one_256 _mm512_set4_epi64( 0ULL, 0ULL, 0ULL, 1ULL )
|
||||||
|
#define m512_one_128 _mm512_set4_epi64( 0ULL, 1ULL, 0ULL, 1ULL )
|
||||||
|
#define m512_one_64 _mm512_set1_epi64( 1ULL )
|
||||||
|
#define m512_one_32 _mm512_set1_epi32( 1UL )
|
||||||
|
#define m512_one_16 _mm512_set1_epi16( 1U )
|
||||||
|
#define m512_one_8 _mm512_set1_epi8( 1U )
|
||||||
|
#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Basic operations without SIMD equivalent
|
||||||
|
|
||||||
|
#define mm512_not( x ) _mm512_xor_si512( x, m512_neg1 )
|
||||||
|
#define mm512_negate_64( x ) _mm512_sub_epi64( m512_zero, x )
|
||||||
|
#define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x )
|
||||||
|
#define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Pointer casting
|
||||||
|
|
||||||
|
// p = any aligned pointer
|
||||||
|
// i = scaled array index
|
||||||
|
// o = scaled address offset
|
||||||
|
|
||||||
|
// returns p as pointer to vector
|
||||||
|
#define castp_m512i(p) ((__m512i*)(p))
|
||||||
|
|
||||||
|
// returns *p as vector value
|
||||||
|
#define cast_m512i(p) (*((__m512i*)(p)))
|
||||||
|
|
||||||
|
// returns p[i] as vector value
|
||||||
|
#define casti_m512i(p,i) (((__m512i*)(p))[(i)])
|
||||||
|
|
||||||
|
// returns p+o as pointer to vector
|
||||||
|
#define casto_m512i(p,o) (((__m512i*)(p))+(o))
|
||||||
|
|
||||||
|
// Gather scatter
|
||||||
|
|
||||||
|
#define mm512_gather_64( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \
|
||||||
|
((uint64_t*)(d))[0] = (uint64_t)(s0); \
|
||||||
|
((uint64_t*)(d))[1] = (uint64_t)(s1); \
|
||||||
|
((uint64_t*)(d))[2] = (uint64_t)(s2); \
|
||||||
|
((uint64_t*)(d))[3] = (uint64_t)(s3); \
|
||||||
|
((uint64_t*)(d))[4] = (uint64_t)(s4); \
|
||||||
|
((uint64_t*)(d))[5] = (uint64_t)(s5); \
|
||||||
|
((uint64_t*)(d))[6] = (uint64_t)(s6); \
|
||||||
|
((uint64_t*)(d))[7] = (uint64_t)(s7);
|
||||||
|
|
||||||
|
|
||||||
|
#define mm512_gather_32( d, s00, s01, s02, s03, s04, s05, s06, s07, \
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 ) \
|
||||||
|
((uint32_t*)(d))[ 0] = (uint32_t)(s00); \
|
||||||
|
((uint32_t*)(d))[ 1] = (uint32_t)(s01); \
|
||||||
|
((uint32_t*)(d))[ 2] = (uint32_t)(s02); \
|
||||||
|
((uint32_t*)(d))[ 3] = (uint32_t)(s03); \
|
||||||
|
((uint32_t*)(d))[ 4] = (uint32_t)(s04); \
|
||||||
|
((uint32_t*)(d))[ 5] = (uint32_t)(s05); \
|
||||||
|
((uint32_t*)(d))[ 6] = (uint32_t)(s06); \
|
||||||
|
((uint32_t*)(d))[ 7] = (uint32_t)(s07); \
|
||||||
|
((uint32_t*)(d))[ 8] = (uint32_t)(s08); \
|
||||||
|
((uint32_t*)(d))[ 9] = (uint32_t)(s09); \
|
||||||
|
((uint32_t*)(d))[10] = (uint32_t)(s10); \
|
||||||
|
((uint32_t*)(d))[11] = (uint32_t)(s11); \
|
||||||
|
((uint32_t*)(d))[12] = (uint32_t)(s12); \
|
||||||
|
((uint32_t*)(d))[13] = (uint32_t)(s13); \
|
||||||
|
((uint32_t*)(d))[13] = (uint32_t)(s14); \
|
||||||
|
((uint32_t*)(d))[15] = (uint32_t)(s15);
|
||||||
|
|
||||||
|
// Scatter data from contiguous memory.
|
||||||
|
// All arguments are pointers
|
||||||
|
#define mm512_scatter_64( d0, d1, d2, d3, d4, d5, d6, d7, s ) \
|
||||||
|
*((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \
|
||||||
|
*((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \
|
||||||
|
*((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \
|
||||||
|
*((uint64_t*)(d3)) = ((uint64_t*)(s))[3]; \
|
||||||
|
*((uint64_t*)(d4)) = ((uint64_t*)(s))[4]; \
|
||||||
|
*((uint64_t*)(d5)) = ((uint64_t*)(s))[5]; \
|
||||||
|
*((uint64_t*)(d6)) = ((uint64_t*)(s))[6]; \
|
||||||
|
*((uint64_t*)(d7)) = ((uint64_t*)(s))[7];
|
||||||
|
|
||||||
|
|
||||||
|
#define mm512_scatter_32( d00, d01, d02, d03, d04, d05, d06, d07, \
|
||||||
|
d08, d09, d10, d11, d12, d13, d14, d15, s ) \
|
||||||
|
*((uint32_t*)(d00)) = ((uint32_t*)(s))[ 0]; \
|
||||||
|
*((uint32_t*)(d01)) = ((uint32_t*)(s))[ 1]; \
|
||||||
|
*((uint32_t*)(d02)) = ((uint32_t*)(s))[ 2]; \
|
||||||
|
*((uint32_t*)(d03)) = ((uint32_t*)(s))[ 3]; \
|
||||||
|
*((uint32_t*)(d04)) = ((uint32_t*)(s))[ 4]; \
|
||||||
|
*((uint32_t*)(d05)) = ((uint32_t*)(s))[ 5]; \
|
||||||
|
*((uint32_t*)(d06)) = ((uint32_t*)(s))[ 6]; \
|
||||||
|
*((uint32_t*)(d07)) = ((uint32_t*)(s))[ 7]; \
|
||||||
|
*((uint32_t*)(d00)) = ((uint32_t*)(s))[ 8]; \
|
||||||
|
*((uint32_t*)(d01)) = ((uint32_t*)(s))[ 9]; \
|
||||||
|
*((uint32_t*)(d02)) = ((uint32_t*)(s))[10]; \
|
||||||
|
*((uint32_t*)(d03)) = ((uint32_t*)(s))[11]; \
|
||||||
|
*((uint32_t*)(d04)) = ((uint32_t*)(s))[12]; \
|
||||||
|
*((uint32_t*)(d05)) = ((uint32_t*)(s))[13]; \
|
||||||
|
*((uint32_t*)(d06)) = ((uint32_t*)(s))[14]; \
|
||||||
|
*((uint32_t*)(d07)) = ((uint32_t*)(s))[15];
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Bit rotations.
|
||||||
|
|
||||||
|
// AVX512F has built-in bit fixed and variable rotation for 64 & 32 bit
|
||||||
|
// elements. There is no bit rotation or shift for larger elements.
|
||||||
|
//
|
||||||
|
// _mm512_rol_epi64, _mm512_ror_epi64, _mm512_rol_epi32, _mm512_ror_epi32
|
||||||
|
// _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32
|
||||||
|
//
|
||||||
|
// Here is a bit rotate for 16 bit elements:
|
||||||
|
#define mm512_ror_16( v, c ) \
|
||||||
|
_mm512_or_si512( _mm512_srli_epi16( v, c ), \
|
||||||
|
_mm512_slli_epi16( v, 16-(c) )
|
||||||
|
#define mm512_rol_16( v, c ) \
|
||||||
|
_mm512_or_si512( _mm512_slli_epi16( v, c ), \
|
||||||
|
_mm512_srli_epi16( v, 16-(c) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements in 512 bit vector.
|
||||||
|
|
||||||
|
#define mm512_swap_256( v ) \
|
||||||
|
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 3,2,1,0, 7,6,5,4 ) )
|
||||||
|
|
||||||
|
#define mm512_ror_1x128( v ) \
|
||||||
|
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 1,0, 7,6, 5,4, 3,2 ) )
|
||||||
|
|
||||||
|
#define mm512_rol_1x128( v ) \
|
||||||
|
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 5,4, 3,2, 1,0, 7,6 ) )
|
||||||
|
|
||||||
|
#define mm512_ror_1x64( v ) \
|
||||||
|
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 0,7,6,5,4,3,2,1 ) )
|
||||||
|
|
||||||
|
#define mm512_rol_1x64( v ) \
|
||||||
|
_mm512_permutexvar_epi64( v, _mm512_set_epi64( 6,5,4,3,2,1,0,7 ) )
|
||||||
|
|
||||||
|
#define mm512_ror_1x32( v ) \
|
||||||
|
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||||
|
0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ) )
|
||||||
|
|
||||||
|
#define mm512_rol_1x32( v ) \
|
||||||
|
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||||
|
14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 15 ) )
|
||||||
|
|
||||||
|
// Although documented to exist in AVX512F the _mm512_set_epi8 &
|
||||||
|
// _mm512_set_epi16 intrinsics fail to compile. Seems usefull to have
|
||||||
|
// for endian byte swapping. Workaround by using _mm512_set_epi32.
|
||||||
|
// Ugly but it works.
|
||||||
|
|
||||||
|
#define mm512_ror_1x16( v ) \
|
||||||
|
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||||
|
0x0000001F, 0x001E001D, 0x001C001B, 0x001A0019, \
|
||||||
|
0X00180017, 0X00160015, 0X00140013, 0X00120011, \
|
||||||
|
0X0010000F, 0X000E000D, 0X000C000B, 0X000A0009, \
|
||||||
|
0X00080007, 0X00060005, 0X00040003, 0X00020001 ) )
|
||||||
|
|
||||||
|
#define mm512_rol_1x16( v ) \
|
||||||
|
_mm512_permutexvar_epi16( v, _mm512_set_epi16( \
|
||||||
|
0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \
|
||||||
|
0X00160015, 0X00140013, 0X00120011, 0x0010000F, \
|
||||||
|
0X000E000D, 0X000C000B, 0X000A0009, 0X00080007, \
|
||||||
|
0X00060005, 0X00040003, 0X00020001, 0x0000001F ) )
|
||||||
|
|
||||||
|
|
||||||
|
#define mm512_ror_1x8( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi8( \
|
||||||
|
0x003F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \
|
||||||
|
0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \
|
||||||
|
0x201F1E1D, 0x1C1B1A19. 0x18171615, 0x14131211, \
|
||||||
|
0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) )
|
||||||
|
|
||||||
|
#define mm512_rol_1x8( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi8( \
|
||||||
|
0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F. \
|
||||||
|
0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221201F, \
|
||||||
|
0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \
|
||||||
|
0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201003F ) )
|
||||||
|
|
||||||
|
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||||
|
#define mm512_invert_128( v ) _mm512_permute4f128_epi32( a, 0x1b )
|
||||||
|
|
||||||
|
#define mm512_invert_64( v ) \
|
||||||
|
_mm512_permutex_epi64( v, _mm512_set_epi64( 0,1,2,3,4,5,6,7 ) )
|
||||||
|
|
||||||
|
#define mm512_invert_32( v ) \
|
||||||
|
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15 ) )
|
||||||
|
|
||||||
|
|
||||||
|
#define mm512_invert_16( v ) \
|
||||||
|
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||||
|
0x00000001, 0x00020003, 0x00040005, 0x00060007, \
|
||||||
|
0x00080009, 0x000A000B, 0x000C000D, 0x000E000F, \
|
||||||
|
0x00100011, 0x00120013, 0x00140015, 0x00160017, \
|
||||||
|
0x00180019, 0x001A001B, 0x001C001D, 0x001E001F ) )
|
||||||
|
|
||||||
|
#define mm512_invert_8( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F, \
|
||||||
|
0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F, \
|
||||||
|
0x20212223, 0x24252627, 0x28292A2B, 0x2C2D2E2F, \
|
||||||
|
0x30313233, 0x34353637, 0x38393A3B, 0x3C3D3E3F ) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements within 256 bit lanes of 512 bit vector.
|
||||||
|
|
||||||
|
// Swap hi & lo 128 bits in each 256 bit lane
|
||||||
|
#define mm512_swap128_256( v ) _mm512_permutex_epi64( v, 0x4e )
|
||||||
|
|
||||||
|
// Rotate 256 bit lanes by one 64 bit element
|
||||||
|
#define mm512_ror1x64_256( v ) _mm512_permutex_epi64( v, 0x39 )
|
||||||
|
#define mm512_rol1x64_256( v ) _mm512_permutex_epi64( v, 0x93 )
|
||||||
|
|
||||||
|
// Rotate 256 bit lanes by one 32 bit element
|
||||||
|
#define mm512_ror1x32_256( v ) \
|
||||||
|
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||||
|
8,15,14,13,12,11,10, 9, 0, 7, 6, 5, 4, 3, 2, 1 ) )
|
||||||
|
#define mm512_rol1x32_256( v ) \
|
||||||
|
_mm512_permutexvar_epi32( v, _mm512_set_epi32( \
|
||||||
|
14,13,12,11,10, 9, 8,15, 6, 5, 4, 3, 2, 1, 0, 7 ) )
|
||||||
|
#define mm512_ror1x16_256( v ) \
|
||||||
|
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||||
|
0x0010001F, 0x001E001D, 0x001C001B, 0x001A0019, \
|
||||||
|
0x00180017, 0x00160015, 0x00140013, 0x00120011, \
|
||||||
|
0x0000000F, 0x000E000D, 0x000C000B, 0x000A0009, \
|
||||||
|
0x00080007, 0x00060005, 0x00040003, 0x00020001 ) )
|
||||||
|
|
||||||
|
#define mm512_rol1x16_256( v ) \
|
||||||
|
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||||
|
0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \
|
||||||
|
0x00160015, 0x00140013, 0x00120011, 0x0000000F, \
|
||||||
|
0x000E000D, 0x000C000B, 0x000A0009, 0x00080007, \
|
||||||
|
0x00060005, 0x00040003, 0x00020001, 0x0000001F ) )
|
||||||
|
|
||||||
|
#define mm512_ror1x8_256( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x203F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \
|
||||||
|
0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \
|
||||||
|
0x001F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \
|
||||||
|
0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) )
|
||||||
|
|
||||||
|
#define mm512_rol1x8_256( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F, \
|
||||||
|
0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221203F, \
|
||||||
|
0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \
|
||||||
|
0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201001F ) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements within 128 bit lanes of 512 bit vector.
|
||||||
|
|
||||||
|
// Swap hi & lo 64 bits in each 128 bit lane
|
||||||
|
#define mm512_swap64_128( v ) _mm512_permutex_epi64( v, 0xb1 )
|
||||||
|
|
||||||
|
// Rotate 128 bit lanes by one 32 bit element
|
||||||
|
#define mm512_ror1x32_128( v ) _mm512_shuffle_epi32( v, 0x39 )
|
||||||
|
#define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 )
|
||||||
|
|
||||||
|
#define mm512_ror1x16_128( v ) \
|
||||||
|
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||||
|
0x0018001F, 0x001E001D, 0x001C001B, 0x001A0019, \
|
||||||
|
0x00100017, 0x00160015, 0x00140013, 0x00120011, \
|
||||||
|
0x0008000F, 0x000E000D, 0x000C000B, 0x000A0009, \
|
||||||
|
0x00000007, 0x00060005, 0x00040003, 0x00020001 ) )
|
||||||
|
|
||||||
|
#define mm512_rol1x16_128( v ) \
|
||||||
|
_mm512_permutexvar_epi16( v, _mm512_set_epi32( \
|
||||||
|
0x001E001D, 0x001C001B, 0x001A0019, 0x0018001F, \
|
||||||
|
0x00160015, 0x00140013, 0x00120011, 0x00100017, \
|
||||||
|
0x000E000D, 0x000C000B, 0x000A0009, 0x0008000F, \
|
||||||
|
0x00060005, 0x00040003, 0x00020001, 0x00000007 ) )
|
||||||
|
|
||||||
|
#define mm512_ror1x8_128( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x303F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \
|
||||||
|
0x202F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \
|
||||||
|
0x101F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \
|
||||||
|
0x000F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) )
|
||||||
|
|
||||||
|
#define mm512_rol1x8_128( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x3E3D3C3B, 0x3A393837, 0x36353433. 0x3231303F, \
|
||||||
|
0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221202F, \
|
||||||
|
0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211101F, \
|
||||||
|
0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201000F ) )
|
||||||
|
|
||||||
|
// Rotate 128 bit lanes by c bytes.
|
||||||
|
#define mm512_bror_128( v, c ) \
|
||||||
|
_mm512_or_si512( _mm512_bsrli_epi128( v, c ), \
|
||||||
|
_mm512_bslli_epi128( v, 16-(c) ) )
|
||||||
|
#define mm512_brol_128( v, c ) \
|
||||||
|
_mm512_or_si512( _mm512_bslli_epi128( v, c ), \
|
||||||
|
_mm512_bsrli_epi128( v, 16-(c) ) )
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements within 64 bit lanes.
|
||||||
|
|
||||||
|
// Swap 32 bit elements in each 64 bit lane
|
||||||
|
#define mm512_swap32_64( v ) _mm512_shuffle_epi32( v, 0xb1 )
|
||||||
|
|
||||||
|
// _mm512_set_epi8 doesn't seem to work
|
||||||
|
|
||||||
|
// Rotate each 64 bit lane by one 16 bit element.
|
||||||
|
#define mm512_ror1x16_64( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x39383F3E, 0x3D3C3B3A, 0x31303736, 0x35343332, \
|
||||||
|
0x29282F2E, 0x2D2C2B2A, 0x21202726, 0x25242322, \
|
||||||
|
0x19181F1E, 0x1D1C1B1A, 0x11101716, 0x15141312, \
|
||||||
|
0x09080F0E, 0x0D0C0B0A, 0x01000706, 0x05040302 ) )
|
||||||
|
|
||||||
|
#define mm512_rol1x16_64( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x3D3C3B3A, 0x39383F3E, 0x35343332, 0x31303736 \
|
||||||
|
0x2D2C2B2A, 0x29282F2E, 0x25242322, 0x21202726 \
|
||||||
|
0x1D1C1B1A, 0x19181F1E, 0x15141312, 0x11101716 \
|
||||||
|
0x0D0C0B0A, 0x09080F0E, 0x05040302, 0x01000706 ) )
|
||||||
|
|
||||||
|
// Rotate each 64 bit lane by one byte.
|
||||||
|
#define mm512_ror1x8_64( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x383F3E3D, 0x3C3B3A39, 0x30373635, 0x34333231, \
|
||||||
|
0x282F2E2D, 0x2C2B2A29, 0x20272625, 0x24232221, \
|
||||||
|
0x181F1E1D, 0x1C1B1A19, 0x10171615, 0x14131211, \
|
||||||
|
0x080F0E0D, 0x0C0B0A09, 0x00070605, 0x0403020 )
|
||||||
|
#define mm512_rol1x8_64( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x3E3D3C3B, 0x3A39383F, 0x36353433, 0x32313037, \
|
||||||
|
0x2E2D2C2B, 0x2A29282F, 0x26252423, 0x22212027, \
|
||||||
|
0x1E1D1C1B, 0x1A19181F, 0x16151413, 0x12111017, \
|
||||||
|
0x0E0D0C0B, 0x0A09080F, 0x06050403, 0x02010007 )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements within 32 bit lanes.
|
||||||
|
|
||||||
|
#define mm512_swap16_32( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x001D001C, 0x001F001E, 0x00190018, 0x001B001A, \
|
||||||
|
0x00150014, 0x00170016, 0x00110010, 0x00130012, \
|
||||||
|
0x000D000C, 0x000F000E, 0x00190008, 0x000B000A, \
|
||||||
|
0x00050004, 0x00070006, 0x00110000, 0x00030002 )
|
||||||
|
|
||||||
|
#define mm512_ror1x8_32( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x3C3F3E3D, 0x383B3A39, 0x34373635, 0x30333231, \
|
||||||
|
0x2C2F2E2D, 0x282B2A29, 0x24272625, 0x20232221, \
|
||||||
|
0x1C1F1E1D, 0x181B1A19, 0x14171615, 0x10131211, \
|
||||||
|
0x0C0F0E0D, 0x080B0A09, 0x04070605, 0x00030201 ) )
|
||||||
|
|
||||||
|
#define mm512_rol1x8_32( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x3E3D3C3F, 0x3A39383B, 0x36353437, 0x32313033, \
|
||||||
|
0x2E2D2C2F, 0x2A29282B, 0x26252427, 0x22212023, \
|
||||||
|
0x1E1D1C1F, 0x1A19181B, 0x16151417, 0x12111013, \
|
||||||
|
0x0E0D0C0F, 0x0A09080B, 0x06050407, 0x02010003 ) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Swap bytes in vector elements, vectorized bswap.
|
||||||
|
|
||||||
|
#define mm512_bswap_64( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x38393A3B, 0x3C3D3E3F, 0x20313233, 0x34353637, \
|
||||||
|
0x28292A2B, 0x2C2D2E2F, 0x20212223, 0x34353637, \
|
||||||
|
0x18191A1B, 0x1C1D1E1F, 0x10111213, 0x14151617, \
|
||||||
|
0x08090A0B, 0x0C0D0E0F, 0x00010203, 0x04050607 ) )
|
||||||
|
|
||||||
|
#define mm512_bswap_32( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi832( \
|
||||||
|
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
|
||||||
|
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
|
||||||
|
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \
|
||||||
|
0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233 ) )
|
||||||
|
|
||||||
|
#define mm512_bswap_16( v ) \
|
||||||
|
_mm512_permutexvar_epi8( v, _mm512_set_epi32( \
|
||||||
|
0x3E3F3C3D, 0x3A3B3839, 0x36373435, 0x32333031, \
|
||||||
|
0x2E2F2C2D, 0x2A2B2829, 0x26272425, 0x22232021, \
|
||||||
|
0x1E1F1C1D, 0x1A1B1819, 0x16171415, 0x12131011, \
|
||||||
|
0x0E0F0C0D, 0x0A0B0809, 0x06070405, 0x02030001 ) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements from 2 512 bit vectors in place, source arguments
|
||||||
|
// are overwritten.
|
||||||
|
// These can all be done with 2 permutex2var instructions but they are
|
||||||
|
// slower than either xor or alignr.
|
||||||
|
|
||||||
|
#define mm512_swap512_1024(v1, v2) \
|
||||||
|
v1 = _mm512_xor_si512(v1, v2); \
|
||||||
|
v2 = _mm512_xor_si512(v1, v2); \
|
||||||
|
v1 = _mm512_xor_si512(v1, v2);
|
||||||
|
|
||||||
|
#define mm512_ror1x256_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi64( v1, v2, 4 ); \
|
||||||
|
v1 = _mm512_alignr_epi64( v2, v1, 4 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_rol1x256_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi64( v1, v2, 4 ); \
|
||||||
|
v2 = _mm512_alignr_epi64( v2, v1, 4 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_ror1x128_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi64( v1, v2, 2 ); \
|
||||||
|
v1 = _mm512_alignr_epi64( v2, v1, 2 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_rol1x128_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi64( v1, v2, 6 ); \
|
||||||
|
v2 = _mm512_alignr_epi64( v2, v1, 6 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_ror1x64_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi64( v1, v2, 1 ); \
|
||||||
|
v1 = _mm512_alignr_epi64( v2, v1, 1 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_rol1x64_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi64( v1, v2, 7 ); \
|
||||||
|
v2 = _mm512_alignr_epi64( v2, v1, 7 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_ror1x32_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi32( v1, v2, 1 ); \
|
||||||
|
v1 = _mm512_alignr_epi32( v2, v1, 1 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_rol1x32_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi32( v1, v2, 15 ); \
|
||||||
|
v2 = _mm512_alignr_epi32( v2, v1, 15 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_ror1x16_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi8( v1, v2, 2 ); \
|
||||||
|
v1 = _mm512_alignr_epi8( v2, v1, 2 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_rol1x16_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi8( v1, v2, 62 ); \
|
||||||
|
v2 = _mm512_alignr_epi8( v2, v1, 62 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_ror1x8_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi8( v1, v2, 1 ); \
|
||||||
|
v1 = _mm512_alignr_epi8( v2, v1, 1 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm512_rol1x8_1024( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m512i t = _mm512_alignr_epi8( v1, v2, 63 ); \
|
||||||
|
v2 = _mm512_alignr_epi8( v2, v1, 63 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#endif // AVX512
|
||||||
|
#endif // SIMD_AVX512_H__
|
84
simd-utils/simd-int.h
Normal file
84
simd-utils/simd-int.h
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
#if !defined(SIMD_SCALAR_H__)
|
||||||
|
#define SIMD_SCALAR_H__ 1
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
|
//
|
||||||
|
// Integers up to 64 bits.
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
// MMX has no extract instruction for 32 bit elements so this:
|
||||||
|
// Lo is trivial, high is a simple shift.
|
||||||
|
// Input may be uint64_t or __m64, returns uint32_t.
|
||||||
|
#define u64_extr_lo32(a) ( (uint32_t)( (uint64_t)(a) ) )
|
||||||
|
#define u64_extr_hi32(a) ( (uint32_t)( ((uint64_t)(a)) >> 32) )
|
||||||
|
|
||||||
|
#define u64_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 2-(n)) <<5 ) ) )
|
||||||
|
#define u64_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 4-(n)) <<4 ) ) )
|
||||||
|
#define u64_extr_8( a, n ) ( (uint8_t) ( (a) >> ( ( 8-(n)) <<3 ) ) )
|
||||||
|
|
||||||
|
|
||||||
|
// Rotate bits in various sized integers.
|
||||||
|
#define u64_ror_64( x, c ) \
|
||||||
|
(uint64_t)( ( (uint64_t)(x) >> (c) ) | ( (uint64_t)(x) << (64-(c)) ) )
|
||||||
|
#define u64_rol_64( x, c ) \
|
||||||
|
(uint64_t)( ( (uint64_t)(x) << (c) ) | ( (uint64_t)(x) >> (64-(c)) ) )
|
||||||
|
#define u32_ror_32( x, c ) \
|
||||||
|
(uint32_t)( ( (uint32_t)(x) >> (c) ) | ( (uint32_t)(x) << (32-(c)) ) )
|
||||||
|
#define u32_rol_32( x, c ) \
|
||||||
|
(uint32_t)( ( (uint32_t)(x) << (c) ) | ( (uint32_t)(x) >> (32-(c)) ) )
|
||||||
|
#define u16_ror_16( x, c ) \
|
||||||
|
(uint16_t)( ( (uint16_t)(x) >> (c) ) | ( (uint16_t)(x) << (16-(c)) ) )
|
||||||
|
#define u16rol_16( x, c ) \
|
||||||
|
(uint16_t)( ( (uint16_t)(x) << (c) ) | ( (uint16_t)(x) >> (16-(c)) ) )
|
||||||
|
#define u8_ror_8( x, c ) \
|
||||||
|
(uint8_t) ( ( (uint8_t) (x) >> (c) ) | ( (uint8_t) (x) << ( 8-(c)) ) )
|
||||||
|
#define u8_rol_8( x, c ) \
|
||||||
|
(uint8_t) ( ( (uint8_t) (x) << (c) ) | ( (uint8_t) (x) >> ( 8-(c)) ) )
|
||||||
|
|
||||||
|
|
||||||
|
// 64 bit mem functions use integral sizes instead of bytes, data must
|
||||||
|
// be aligned to 64 bits. Mostly for scaled indexing convenience.
|
||||||
|
static inline void memcpy_64( uint64_t *dst, const uint64_t *src, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; }
|
||||||
|
|
||||||
|
static inline void memset_zero_64( uint64_t *src, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) src[i] = 0ull; }
|
||||||
|
|
||||||
|
static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
|
||||||
|
|
||||||
|
#if defined (GCC_INT128)
|
||||||
|
|
||||||
|
///////////////////////////////////////
|
||||||
|
//
|
||||||
|
// 128 bit integers
|
||||||
|
//
|
||||||
|
|
||||||
|
// No real need or use.
|
||||||
|
#define i128_neg1 ((uint128_t)(-1LL))
|
||||||
|
|
||||||
|
// Extract specified 64 bit half of 128 bit integer.
|
||||||
|
// typecast should work for lo: (uint64_t)(x), test it!
|
||||||
|
#define u128_hi64( x ) ( (uint64_t)( (uint128_t)(x) >> 64 ) )
|
||||||
|
#define u128_lo64( x ) ( (uint64_t)( (uint128_t)(x) << 64 >> 64 ) )
|
||||||
|
// #define i128_lo64( x ) ((uint64_t)(x))
|
||||||
|
|
||||||
|
// Generic extract,
|
||||||
|
#define u128_extr_64( a, n ) ( (uint64_t)( (a) >> ( ( 2-(n)) <<6 ) ) )
|
||||||
|
#define u128_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 4-(n)) <<5 ) ) )
|
||||||
|
#define u128_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 8-(n)) <<4 ) ) )
|
||||||
|
#define u128_extr_8( a, n ) ( (uint8_t) ( (a) >> ( (16-(n)) <<3 ) ) )
|
||||||
|
|
||||||
|
|
||||||
|
// Not much need for this but it fills a gap.
|
||||||
|
#define u128_ror_128( x, c ) \
|
||||||
|
( ( (uint128_t)(x) >> (c) ) | ( (uint128_t)(x) << (128-(c)) ) )
|
||||||
|
#define u128_rol_128( x, c ) \
|
||||||
|
( ( (uint128_t)(x) << (c) ) | ( (uint128_t)(x) >> (128-(c)) ) )
|
||||||
|
|
||||||
|
#endif // GCC_INT128
|
||||||
|
|
||||||
|
#endif // SIMD_SCALAR_H__
|
||||||
|
|
||||||
|
|
135
simd-utils/simd-mmx.h
Normal file
135
simd-utils/simd-mmx.h
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
#if !defined(SIMD_MMX_H__)
|
||||||
|
#define SIMD_MMX_H__ 1
|
||||||
|
|
||||||
|
#if defined(__MMX__)
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// 64 bit MMX vectors.
|
||||||
|
//
|
||||||
|
// There are rumours MMX wil be removed. Although casting with int64
|
||||||
|
// works there is likely some overhead to move the data to An MMX register
|
||||||
|
// and back.
|
||||||
|
|
||||||
|
|
||||||
|
// Pseudo constants
|
||||||
|
#define m64_zero _mm_setzero_si64()
|
||||||
|
#define m64_one_64 _mm_set_pi32( 0UL, 1UL )
|
||||||
|
#define m64_one_32 _mm_set1_pi32( 1UL )
|
||||||
|
#define m64_one_16 _mm_set1_pi16( 1U )
|
||||||
|
#define m64_one_8 _mm_set1_pi8( 1U );
|
||||||
|
#define m64_neg1 _mm_set1_pi32( 0xFFFFFFFFUL )
|
||||||
|
/* cast also works, which is better?
|
||||||
|
#define m64_zero ( (__m64)0ULL )
|
||||||
|
#define m64_one_64 ( (__m64)1ULL )
|
||||||
|
#define m64_one_32 ( (__m64)0x0000000100000001ULL )
|
||||||
|
#define m64_one_16 ( (__m64)0x0001000100010001ULL )
|
||||||
|
#define m64_one_8 ( (__m64)0x0101010101010101ULL )
|
||||||
|
#define m64_neg1 ( (__m64)0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#define casti_m64(p,i) (((__m64*)(p))[(i)])
|
||||||
|
|
||||||
|
// cast all arguments as the're likely to be uint64_t
|
||||||
|
|
||||||
|
// Bitwise not: ~(a)
|
||||||
|
#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 )
|
||||||
|
|
||||||
|
// Unary negate elements
|
||||||
|
#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, (__m64)v )
|
||||||
|
#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, (__m64)v )
|
||||||
|
#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, (__m64)v )
|
||||||
|
|
||||||
|
// Rotate bits in packed elements of 64 bit vector
|
||||||
|
#define mm64_rol_32( a, n ) \
|
||||||
|
_mm_or_si64( _mm_slli_pi32( (__m64)(a), n ), \
|
||||||
|
_mm_srli_pi32( (__m64)(a), 32-(n) ) )
|
||||||
|
|
||||||
|
#define mm64_ror_32( a, n ) \
|
||||||
|
_mm_or_si64( _mm_srli_pi32( (__m64)(a), n ), \
|
||||||
|
_mm_slli_pi32( (__m64)(a), 32-(n) ) )
|
||||||
|
|
||||||
|
#define mm64_rol_16( a, n ) \
|
||||||
|
_mm_or_si64( _mm_slli_pi16( (__m64)(a), n ), \
|
||||||
|
_mm_srli_pi16( (__m64)(a), 16-(n) ) )
|
||||||
|
|
||||||
|
#define mm64_ror_16( a, n ) \
|
||||||
|
_mm_or_si64( _mm_srli_pi16( (__m64)(a), n ), \
|
||||||
|
_mm_slli_pi16( (__m64)(a), 16-(n) ) )
|
||||||
|
|
||||||
|
// Rotate packed elements accross lanes. Useful for byte swap and byte
|
||||||
|
// rotation.
|
||||||
|
|
||||||
|
// _mm_shuffle_pi8 requires SSSE3 while _mm_shuffle_pi16 requires SSE
|
||||||
|
// even though these are MMX instructions.
|
||||||
|
|
||||||
|
// Swap hi & lo 32 bits.
|
||||||
|
#define mm64_swap32( a ) _mm_shuffle_pi16( (__m64)(a), 0x4e )
|
||||||
|
|
||||||
|
#define mm64_ror1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x39 )
|
||||||
|
#define mm64_rol1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x93 )
|
||||||
|
|
||||||
|
// Swap hi & lo 16 bits of each 32 bit element
|
||||||
|
#define mm64_swap16_32( a ) _mm_shuffle_pi16( (__m64)(a), 0xb1 )
|
||||||
|
|
||||||
|
#if defined(__SSSE3__)
|
||||||
|
|
||||||
|
// Endian byte swap packed elements
|
||||||
|
// A vectorized version of the u64 bswap, use when data already in MMX reg.
|
||||||
|
#define mm64_bswap_64( v ) \
|
||||||
|
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) )
|
||||||
|
|
||||||
|
#define mm64_bswap_32( v ) \
|
||||||
|
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 4,5,6,7, 0,1,2,3 ) )
|
||||||
|
|
||||||
|
/*
|
||||||
|
#define mm64_bswap_16( v ) \
|
||||||
|
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 6,7, 4,5, 2,3, 0,1 ) );
|
||||||
|
*/
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define mm64_bswap_64( v ) \
|
||||||
|
(__m64)__builtin_bswap64( (uint64_t)v )
|
||||||
|
|
||||||
|
// This exists only for compatibility with CPUs without SSSE3. MMX doesn't
|
||||||
|
// have extract 32 instruction so pointers are needed to access elements.
|
||||||
|
// It' more efficient for the caller to use scalar variables and call
|
||||||
|
// bswap_32 directly.
|
||||||
|
#define mm64_bswap_32( v ) \
|
||||||
|
_mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \
|
||||||
|
__builtin_bswap32( ((uint32_t*)&v)[0] ) )
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||||
|
// Invert_64 is the same as bswap64
|
||||||
|
// Invert_32 is the same as swap32
|
||||||
|
|
||||||
|
#define mm64_invert_16( v ) _mm_shuffle_pi16( (__m64)v, 0x1b )
|
||||||
|
|
||||||
|
#if defined(__SSSE3__)
|
||||||
|
|
||||||
|
// An SSE2 versin of this would be monstrous, shifting, masking and oring
|
||||||
|
// each byte individually.
|
||||||
|
#define mm64_invert_8( v ) \
|
||||||
|
_mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) );
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// 64 bit mem functions use integral sizes instead of bytes, data must
|
||||||
|
// be aligned to 64 bits.
|
||||||
|
static inline void memcpy_m64( __m64 *dst, const __m64 *src, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; }
|
||||||
|
|
||||||
|
static inline void memset_zero_m64( __m64 *src, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) src[i] = (__m64)0ULL; }
|
||||||
|
|
||||||
|
static inline void memset_m64( __m64 *dst, const __m64 a, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
|
||||||
|
|
||||||
|
#endif // MMX
|
||||||
|
|
||||||
|
#endif // SIMD_MMX_H__
|
||||||
|
|
428
simd-utils/simd-sse2.h
Normal file
428
simd-utils/simd-sse2.h
Normal file
@@ -0,0 +1,428 @@
|
|||||||
|
#if !defined(SIMD_SSE2_H__)
|
||||||
|
#define SIMD_SSE2_H__ 1
|
||||||
|
|
||||||
|
#if defined(__SSE2__)
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// 128 bit SSE vectors
|
||||||
|
//
|
||||||
|
// SSE2 is generally required for full 128 bit support. Some functions
|
||||||
|
// are also optimized with SSSE3 or SSE4.1.
|
||||||
|
//
|
||||||
|
|
||||||
|
// Compile time constant initializers are type agnostic and can have
|
||||||
|
// a pointer handle of almost any type. All arguments must be scalar constants.
|
||||||
|
// up to 64 bits. These iniitializers should only be used at compile time
|
||||||
|
// to initialize vector arrays. All data reside in memory.
|
||||||
|
|
||||||
|
#define mm128_const_64( x1, x0 ) {{ x1, x0 }}
|
||||||
|
#define mm128_const1_64( x ) {{ x, x }}
|
||||||
|
|
||||||
|
#define mm128_const_32( x3, x2, x1, x0 ) {{ x3, x2, x1, x0 }}
|
||||||
|
#define mm128_const1_32( x ) {{ x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm128_const_16( x7, x6, x5, x4, x3, x2, x1, x0 ) \
|
||||||
|
{{ x7, x6, x5, x4, x3, x2, x1, x0 }}
|
||||||
|
#define mm128_const1_16( x ) {{ x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
#define mm128_const_8( x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 ) \
|
||||||
|
{{ x15, x14, x13, x12, x11, x10, x09, x08, \
|
||||||
|
x07, x06, x05, x04, x03, x02, x01, x00 }}
|
||||||
|
#define mm128_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }}
|
||||||
|
|
||||||
|
// Compile time constants, use only for compile time initializing.
|
||||||
|
#define c128_zero mm128_const1_64( 0ULL )
|
||||||
|
#define c128_one_128 mm128_const_64( 0ULL, 1ULL )
|
||||||
|
#define c128_one_64 mm128_const1_64( 1ULL )
|
||||||
|
#define c128_one_32 mm128_const1_32( 1UL )
|
||||||
|
#define c128_one_16 mm128_const1_16( 1U )
|
||||||
|
#define c128_one_8 mm128_const1_8( 1U )
|
||||||
|
#define c128_neg1 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
#define c128_neg1_64 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
#define c128_neg1_32 mm128_const1_32( 0xFFFFFFFFUL )
|
||||||
|
#define c128_neg1_16 mm128_const1_32( 0xFFFFU )
|
||||||
|
#define c128_neg1_8 mm128_const1_32( 0xFFU )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Pseudo constants.
|
||||||
|
//
|
||||||
|
// These can't be used for compile time initialization.
|
||||||
|
// These should be used for all simple vectors.
|
||||||
|
//
|
||||||
|
// _mm_setzero_si128 uses pxor instruction, it's unclear what _mm_set_epi does.
|
||||||
|
// Clearly it's faster than reading a memory resident constant. Assume set
|
||||||
|
// is also faster.
|
||||||
|
// If a pseudo constant is used often in a function it may be preferable
|
||||||
|
// to define a register variable to represent that constant.
|
||||||
|
// register __m128i zero = mm_setzero_si128().
|
||||||
|
// This reduces any references to a move instruction.
|
||||||
|
|
||||||
|
#define m128_zero _mm_setzero_si128()
|
||||||
|
|
||||||
|
#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
|
||||||
|
#define m128_one_64 _mm_set1_epi64x( 1ULL )
|
||||||
|
#define m128_one_32 _mm_set1_epi32( 1UL )
|
||||||
|
#define m128_one_16 _mm_set1_epi16( 1U )
|
||||||
|
#define m128_one_8 _mm_set1_epi8( 1U )
|
||||||
|
|
||||||
|
#define m128_neg1 _mm_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Basic operations without equivalent SIMD intrinsic
|
||||||
|
|
||||||
|
// Bitwise not (~v)
|
||||||
|
#define mm128_not( v ) _mm_xor_si128( (v), m128_neg1 )
|
||||||
|
|
||||||
|
// Unary negation of elements
|
||||||
|
#define mm128_negate_64( v ) _mm_sub_epi64( m128_zero, v )
|
||||||
|
#define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v )
|
||||||
|
#define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Vector pointer cast
|
||||||
|
|
||||||
|
// p = any aligned pointer
|
||||||
|
// returns p as pointer to vector type
|
||||||
|
#define castp_m128i(p) ((__m128i*)(p))
|
||||||
|
|
||||||
|
// p = any aligned pointer
|
||||||
|
// returns *p, watch your pointer arithmetic
|
||||||
|
#define cast_m128i(p) (*((__m128i*)(p)))
|
||||||
|
|
||||||
|
// p = any aligned pointer, i = scaled array index
|
||||||
|
// returns value p[i]
|
||||||
|
#define casti_m128i(p,i) (((__m128i*)(p))[(i)])
|
||||||
|
|
||||||
|
// p = any aligned pointer, o = scaled offset
|
||||||
|
// returns pointer p+o
|
||||||
|
#define casto_m128i(p,o) (((__m128i*)(p))+(o))
|
||||||
|
|
||||||
|
// SSE2 doesn't implement extract
|
||||||
|
#if defined(__SSE4_1)
|
||||||
|
|
||||||
|
#define mm128_extr_64(a,n) _mm_extract_epi64( a, n )
|
||||||
|
#define mm128_extr_32(a,n) _mm_extract_epi32( a, n )
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define mm128_extr_64(a,n) (((uint64_t*)&a)[n])
|
||||||
|
#define mm128_extr_32(a,n) (((uint32_t*)&a)[n])
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// Gather and scatter data.
|
||||||
|
// Surprise, they don't use vector instructions. Several reasons why.
|
||||||
|
// Since scalar data elements are being manipulated scalar instructions
|
||||||
|
// are most appropriate and can bypass vector registers. They are faster
|
||||||
|
// and more efficient on a per instruction basis due to the higher clock
|
||||||
|
// speed and greater avaiability of execution resources. It's good for
|
||||||
|
// interleaving data buffers for parallel processing.
|
||||||
|
// May suffer overhead if data is already in a vector register. This can
|
||||||
|
// usually be easilly avoided by the coder. Sometimes _mm_set is simply better.
|
||||||
|
// These macros are likely to be used when transposing matrices rather than
|
||||||
|
// conversions of a single vector.
|
||||||
|
|
||||||
|
// Gather data elements into contiguous memory for vector use.
|
||||||
|
// Source args are appropriately sized value integers, destination arg is a
|
||||||
|
// type agnostic pointer.
|
||||||
|
// Vector alignment is not required, though likely. Appropriate integer
|
||||||
|
// alignment satisfies these macros.
|
||||||
|
|
||||||
|
// rewrite using insert
|
||||||
|
#define mm128_gather_64( d, s0, s1 ) \
|
||||||
|
((uint64_t*)d)[0] = (uint64_t)s0; \
|
||||||
|
((uint64_t*)d)[1] = (uint64_t)s1;
|
||||||
|
|
||||||
|
#define mm128_gather_32( d, s0, s1, s2, s3 ) \
|
||||||
|
((uint32_t*)d)[0] = (uint32_t)s0; \
|
||||||
|
((uint32_t*)d)[1] = (uint32_t)s1; \
|
||||||
|
((uint32_t*)d)[2] = (uint32_t)s2; \
|
||||||
|
((uint32_t*)d)[3] = (uint32_t)s3;
|
||||||
|
|
||||||
|
// Scatter data from contiguous memory.
|
||||||
|
#define mm128_scatter_64( d0, d1, s ) \
|
||||||
|
*( (uint64_t*)d0) = ((uint64_t*)s)[0]; \
|
||||||
|
*( (uint64_t*)d1) = ((uint64_t*)s)[1];
|
||||||
|
|
||||||
|
#define mm128_scatter_32( d0, d1, d2, d3, s ) \
|
||||||
|
*( (uint32_t*)d0) = ((uint32_t*)s)[0]; \
|
||||||
|
*( (uint32_t*)d1) = ((uint32_t*)s)[1]; \
|
||||||
|
*( (uint32_t*)d2) = ((uint32_t*)s)[2]; \
|
||||||
|
*( (uint32_t*)d3) = ((uint32_t*)s)[3];
|
||||||
|
|
||||||
|
// Memory functions
|
||||||
|
// Mostly for convenience, avoids calculating bytes.
|
||||||
|
// Assumes data is alinged and integral.
|
||||||
|
// n = number of __m128i, bytes/16
|
||||||
|
|
||||||
|
// Memory functions
|
||||||
|
// Mostly for convenience, avoids calculating bytes.
|
||||||
|
// Assumes data is alinged and integral.
|
||||||
|
// n = number of __m128i, bytes/16
|
||||||
|
|
||||||
|
static inline void memset_zero_128( __m128i *dst, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = m128_zero; }
|
||||||
|
|
||||||
|
static inline void memset_128( __m128i *dst, const __m128i a, int n )
|
||||||
|
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
|
||||||
|
|
||||||
|
static inline void memcpy_128( __m128i *dst, const __m128i *src, int n )
|
||||||
|
{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; }
|
||||||
|
|
||||||
|
|
||||||
|
//
|
||||||
|
// Bit rotations
|
||||||
|
|
||||||
|
// AVX512 has implemented bit rotation for 128 bit vectors with
|
||||||
|
// 64 and 32 bit elements. Not really useful.
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate each element of v by c bits
|
||||||
|
|
||||||
|
#define mm128_ror_64( v, c ) \
|
||||||
|
_mm_or_si128( _mm_srli_epi64( v, c ), _mm_slli_epi64( v, 64-(c) ) )
|
||||||
|
|
||||||
|
#define mm128_rol_64( v, c ) \
|
||||||
|
_mm_or_si128( _mm_slli_epi64( v, c ), _mm_srli_epi64( v, 64-(c) ) )
|
||||||
|
|
||||||
|
#define mm128_ror_32( v, c ) \
|
||||||
|
_mm_or_si128( _mm_srli_epi32( v, c ), _mm_slli_epi32( v, 32-(c) ) )
|
||||||
|
|
||||||
|
#define mm128_rol_32( v, c ) \
|
||||||
|
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
|
||||||
|
|
||||||
|
#define mm128_ror_16( v, c ) \
|
||||||
|
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )
|
||||||
|
|
||||||
|
#define mm128_rol_16( v, c ) \
|
||||||
|
_mm_or_si128( _mm_slli_epi16( v, c ), _mm_srli_epi16( v, 16-(c) ) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements accross all lanes
|
||||||
|
|
||||||
|
#define mm128_swap_64( v ) _mm_shuffle_epi32( v, 0x4e )
|
||||||
|
|
||||||
|
#define mm128_ror_1x32( v ) _mm_shuffle_epi32( v, 0x39 )
|
||||||
|
#define mm128_rol_1x32( v ) _mm_shuffle_epi32( v, 0x93 )
|
||||||
|
|
||||||
|
#define mm128_ror_1x16( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 1, 0,15,14,13,12,11,10 \
|
||||||
|
9, 8, 7, 6, 5, 4, 3, 2 ) )
|
||||||
|
#define mm128_rol_1x16( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 13,12,11,10, 9, 8, 7, 6, \
|
||||||
|
5, 4, 3, 2, 1, 0,15,14 ) )
|
||||||
|
#define mm128_ror_1x8( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 0,15,14,13,12,11,10, 9, \
|
||||||
|
8, 7, 6, 5, 4, 3, 2, 1 ) )
|
||||||
|
#define mm128_rol_1x8( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 14,13,12,11,10, 9, 8, 7, \
|
||||||
|
6, 5, 4, 3, 2, 1, 0,15 ) )
|
||||||
|
|
||||||
|
// Rotate 16 byte (128 bit) vector by c bytes.
|
||||||
|
// Less efficient using shift but more versatile. Use only for odd number
|
||||||
|
// byte rotations. Use shuffle above whenever possible.
|
||||||
|
#define mm128_bror( v, c ) \
|
||||||
|
_mm_or_si128( _mm_srli_si128( v, c ), _mm_slli_si128( v, 16-(c) ) )
|
||||||
|
|
||||||
|
#define mm128_brol( v, c ) \
|
||||||
|
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
|
||||||
|
|
||||||
|
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||||
|
#define mm128_invert_32( v ) _mm_shuffle_epi32( a, 0x1b )
|
||||||
|
|
||||||
|
#define mm128_invert_16( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 1, 0, 3, 2, 5, 4, 7, 6, \
|
||||||
|
9, 8, 11,10, 13,12, 15,14 ) )
|
||||||
|
|
||||||
|
#define mm128_invert_8( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \
|
||||||
|
8, 9,10,11,12,13,14,15 ) )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Rotate elements within lanes.
|
||||||
|
|
||||||
|
#define mm128_swap32_64( v ) _mm_shuffle_epi32( v, 0xb1 )
|
||||||
|
|
||||||
|
#define mm128_ror16_64( v ) _mm_shuffle_epi8( v, \
|
||||||
|
_mm_set_epi8( 9, 8,15,14,13,12,11,10, 1, 0, 7, 6, 5, 4, 3, 2 )
|
||||||
|
#define mm128_rol16_64( v ) _mm_shuffle_epi8( v, \
|
||||||
|
_mm_set_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 )
|
||||||
|
|
||||||
|
|
||||||
|
#define mm128_swap16_32( v ) _mm_shuffle_epi8( v, \
|
||||||
|
_mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 )
|
||||||
|
|
||||||
|
//
|
||||||
|
// Endian byte swap.
|
||||||
|
|
||||||
|
#if defined(__SSSE3__)
|
||||||
|
|
||||||
|
#define mm128_bswap_64( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 8, 9,10,11,12,13,14,15, \
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7 ) )
|
||||||
|
|
||||||
|
#define mm128_bswap_32( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 12,13,14,15, 8, 9,10,11, \
|
||||||
|
4, 5, 6, 7, 0, 1, 2, 3 ) )
|
||||||
|
|
||||||
|
#define mm128_bswap_16( v ) \
|
||||||
|
_mm_shuffle_epi8( v, _mm_set_epi8( 14,15, 12,13, 10,11, 8, 9, \
|
||||||
|
6, 7, 4, 5, 2, 3, 0, 1 ) )
|
||||||
|
|
||||||
|
#else // SSE2
|
||||||
|
|
||||||
|
// Use inline function instead of macro due to multiple statements.
|
||||||
|
static inline __m128i mm128_bswap_64( __m128i v )
|
||||||
|
{
|
||||||
|
v = _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
|
||||||
|
v = _mm_shufflelo_epi16( v, _MM_SHUFFLE( 0, 1, 2, 3 ) );
|
||||||
|
return _mm_shufflehi_epi16( v, _MM_SHUFFLE( 0, 1, 2, 3 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline __m128i mm128_bswap_32( __m128i v )
|
||||||
|
{
|
||||||
|
v = _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
|
||||||
|
v = _mm_shufflelo_epi16( v, _MM_SHUFFLE( 2, 3, 0, 1 ) );
|
||||||
|
return _mm_shufflehi_epi16( v, _MM_SHUFFLE( 2, 3, 0, 1 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline __m128i mm128_bswap_16( __m128i v )
|
||||||
|
{
|
||||||
|
return _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // SSSE3 else SSE2
|
||||||
|
//
|
||||||
|
// Rotate in place concatenated 128 bit vectors as one 256 bit vector.
|
||||||
|
|
||||||
|
// Swap 128 bit vectorse.
|
||||||
|
|
||||||
|
#define mm128_swap128_256(v1, v2) \
|
||||||
|
v1 = _mm_xor_si128(v1, v2); \
|
||||||
|
v2 = _mm_xor_si128(v1, v2); \
|
||||||
|
v1 = _mm_xor_si128(v1, v2);
|
||||||
|
|
||||||
|
// Concatenate v1 & v2 and rotate as one 256 bit vector.
|
||||||
|
#if defined(__SSE4_1__)
|
||||||
|
|
||||||
|
#define mm128_ror1x64_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 8 ); \
|
||||||
|
v1 = _mm_alignr_epi8( v2, v1, 8 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x64_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 8 ); \
|
||||||
|
v2 = _mm_alignr_epi8( v2, v1, 8 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_ror1x32_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 4 ); \
|
||||||
|
v1 = _mm_alignr_epi8( v2, v1, 4 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x32_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 12 ); \
|
||||||
|
v2 = _mm_alignr_epi8( v2, v1, 12 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_ror1x16_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 2 ); \
|
||||||
|
v1 = _mm_alignr_epi8( v2, v1, 2 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x16_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 14 ); \
|
||||||
|
v2 = _mm_alignr_epi8( v2, v1, 14 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_ror1x8_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 1 ); \
|
||||||
|
v1 = _mm_alignr_epi8( v2, v1, 1 ); \
|
||||||
|
v2 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x8_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_alignr_epi8( v1, v2, 15 ); \
|
||||||
|
v2 = _mm_alignr_epi8( v2, v1, 15 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#else // SSE2
|
||||||
|
|
||||||
|
#define mm128_ror1x64_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_srli_si128( v1, 8 ) | _mm_slli_si128( v2, 8 ); \
|
||||||
|
v2 = _mm_srli_si128( v2, 8 ) | _mm_slli_si128( v1, 8 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x64_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_slli_si128( v1, 8 ) | _mm_srli_si128( v2, 8 ); \
|
||||||
|
v2 = _mm_slli_si128( v2, 8 ) | _mm_srli_si128( v1, 8 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_ror1x32_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_srli_si128( v1, 4 ) | _mm_slli_si128( v2, 12 ); \
|
||||||
|
v2 = _mm_srli_si128( v2, 4 ) | _mm_slli_si128( v1, 12 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x32_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_slli_si128( v1, 4 ) | _mm_srli_si128( v2, 12 ); \
|
||||||
|
v2 = _mm_slli_si128( v2, 4 ) | _mm_srli_si128( v1, 12 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_ror1x16_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_srli_si128( v1, 2 ) | _mm_slli_si128( v2, 14 ); \
|
||||||
|
v2 = _mm_srli_si128( v2, 2 ) | _mm_slli_si128( v1, 14 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x16_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_slli_si128( v1, 2 ) | _mm_srli_si128( v2, 14 ); \
|
||||||
|
v2 = _mm_slli_si128( v2, 2 ) | _mm_srli_si128( v1, 14 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_ror1x8_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_srli_si128( v1, 1 ) | _mm_slli_si128( v2, 15 ); \
|
||||||
|
v2 = _mm_srli_si128( v2, 1 ) | _mm_slli_si128( v1, 15 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define mm128_rol1x8_256( v1, v2 ) \
|
||||||
|
do { \
|
||||||
|
__m128i t = _mm_slli_si128( v1, 1 ) | _mm_srli_si128( v2, 15 ); \
|
||||||
|
v2 = _mm_slli_si128( v2, 1 ) | _mm_srli_si128( v1, 15 ); \
|
||||||
|
v1 = t; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#endif // SSE4.1 else SSE2
|
||||||
|
|
||||||
|
#endif // __SSE2__
|
||||||
|
#endif // SIMD_SSE2_H__
|
398
simd-utils/simd-types.h
Normal file
398
simd-utils/simd-types.h
Normal file
@@ -0,0 +1,398 @@
|
|||||||
|
//////////////////////////////////////
|
||||||
|
//
|
||||||
|
// Type abstraction overlays designed for use in highly optimized
|
||||||
|
// straight line code operating on array structures. It uses direct
|
||||||
|
// struct member access instead of indexing to access array elements.
|
||||||
|
// Ex: array.u32_3 instead of array[3].
|
||||||
|
//
|
||||||
|
// Vector types are used to represent asrrays. 64 and 128 bit vectors have
|
||||||
|
// corresponding 64 and 128 bit integer types.
|
||||||
|
//
|
||||||
|
// Data accesses are not tied to memory as arrays are. Thes structures
|
||||||
|
// can operate comfortably as reguietr variables.
|
||||||
|
//
|
||||||
|
// Although the abstraction makes for transparent usage there is overhead.
|
||||||
|
// Extra move instructins are required when an operation requires a
|
||||||
|
// different register type. Additionaly 128 bit operations, uint128_t
|
||||||
|
// and AES, can't be done in parallel with a 256 bit or lager vector.
|
||||||
|
// The require additionalmove instructions in addition to the lack of
|
||||||
|
// improvement from parallelism.
|
||||||
|
//
|
||||||
|
// Move instruction overhead is required when moving among gpr, mmx
|
||||||
|
// and xmm registers. The number of extra moves is usually the number
|
||||||
|
// of elements inthe vector. If bothe are the same size onlu one move
|
||||||
|
// is required. The number is doubled if the data is moved back.
|
||||||
|
//
|
||||||
|
// xmm and ymm resgisters are special, they are aliased. xmm registers
|
||||||
|
// overlay the lower 128 bits of the ymm registers. Accessing the data
|
||||||
|
// in the lower half of a ymm register by an xmm argument is free.
|
||||||
|
// The upper 128 bits need to be extracted and inserted like with other
|
||||||
|
// different sized data types.
|
||||||
|
//
|
||||||
|
// Integer types can be converted to differently sized integers without
|
||||||
|
// penalty.
|
||||||
|
//
|
||||||
|
// Conversions with penalty should be avoided as much possible by grouping
|
||||||
|
// operations requiring the same register set.
|
||||||
|
//
|
||||||
|
// There are two algorithms for extracting and inserting data.
|
||||||
|
//
|
||||||
|
// There isthe straightforward iterative meathod wher each element is
|
||||||
|
// extracted or inserted in turn. The compiler evidently take a different
|
||||||
|
// aproach based on assembly code generated by a set intrinsic.
|
||||||
|
// To extract 64 bit or smaller elements from a 256 bit vector the
|
||||||
|
// first extracts the upper 128 bit into a second xmm register. This
|
||||||
|
// eliminates a dependency between the upper and lower elements allowing
|
||||||
|
// the CPU more opportunity at multiple operations per clock.
|
||||||
|
// This adds one additional instruction to the process. With AVX512 an
|
||||||
|
// another stege is added by first splitting up the 512 bit vector into
|
||||||
|
// 2 256 bit vectors,
|
||||||
|
//
|
||||||
|
// xmm/ymm aliasing makes accessing low half trivial and without cost.
|
||||||
|
// Accessing the upper half requires a move from the upper half of
|
||||||
|
// the source register to the lower half of the destination.
|
||||||
|
// It's a bigger issue with GPRs as there is no aliasing.
|
||||||
|
//
|
||||||
|
// Theoretically memory resident data could bypass the move and load
|
||||||
|
// the data directly into the desired register type. However this
|
||||||
|
// ignores the overhead to ensure coherency between register and memory
|
||||||
|
// wich is significantly more.
|
||||||
|
//
|
||||||
|
// Overlay avoids pointer dereferences and favours register move over
|
||||||
|
// memory load, notwistanding compiler optimization.
|
||||||
|
//
|
||||||
|
// The syntax is ugly but can be abstracted with macros.
|
||||||
|
|
||||||
|
|
||||||
|
// Universal 64 bit overlay
|
||||||
|
// Avoids arrays and pointers, suitable as register variable.
|
||||||
|
// Conversions are transparent but not free, cost is one MOV instruction.
|
||||||
|
// Facilitates manipulating 32 bit data in 64 bit pairs.
|
||||||
|
// Allows full use of 64 bit registers for 32 bit data, effectively doubling
|
||||||
|
// the size of the register set.
|
||||||
|
// Potentially up to 50% reduction in instructions depending on rate of
|
||||||
|
// conversion.
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// 128 bit integer
|
||||||
|
//
|
||||||
|
// Native type __int128 supported starting with GCC-4.8.
|
||||||
|
//
|
||||||
|
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
|
||||||
|
// for 128 bit arithmetic. Vectors are preferred when 128 bit arith
|
||||||
|
// is not required. int128 also works better with other integer sizes.
|
||||||
|
// Vectors benefit from wider registers.
|
||||||
|
//
|
||||||
|
// For safety use typecasting on all numeric arguments.
|
||||||
|
//
|
||||||
|
// Use typecasting for conversion to/from 128 bit vector:
|
||||||
|
// __m128i v128 = (__m128i)my_int128l
|
||||||
|
// __m256i v256 = _mm256_set_m128i( (__m128i)my_int128, (__m128i)my_int128 );
|
||||||
|
// my_int128 = (uint128_t)_mm256_extracti128_si256( v256, 1 );
|
||||||
|
|
||||||
|
// Compiler check for __int128 support
|
||||||
|
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
|
||||||
|
#define GCC_INT128 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(GCC_INT128)
|
||||||
|
#warning "__int128 not supported, requires GCC-4.8 or newer."
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(GCC_INT128)
|
||||||
|
|
||||||
|
// Familiar looking type names
|
||||||
|
typedef __int128 int128_t;
|
||||||
|
typedef unsigned __int128 uint128_t;
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/////////////////////////////////////
|
||||||
|
//
|
||||||
|
// MMX 64 bit vector
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
// Emulates uint32_t[2]
|
||||||
|
struct _regarray_u32x2
|
||||||
|
{
|
||||||
|
uint32_t _0; uint32_t _1;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u32x2 regarray_u32x2;
|
||||||
|
|
||||||
|
// Emulates uint16_t[4]
|
||||||
|
struct _regarray_u16x4
|
||||||
|
{
|
||||||
|
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u16x4 regarray_u16x4;
|
||||||
|
|
||||||
|
// Emulates uint8_t[8]
|
||||||
|
struct _regarray_u8x8
|
||||||
|
{
|
||||||
|
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
|
||||||
|
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u8x8 regarray_u8x8;
|
||||||
|
|
||||||
|
// universal 64 bit overlay
|
||||||
|
union _regarray_64
|
||||||
|
{
|
||||||
|
regarray_u32x2 u32_; // uint32_t[2]
|
||||||
|
regarray_u16x4 u16_; // uint16_t[4]
|
||||||
|
regarray_u8x8 u8_; // uint8_t[8]
|
||||||
|
uint64_t u64;
|
||||||
|
__m64 v64;
|
||||||
|
};
|
||||||
|
typedef union _regarray_64 regarray_64;
|
||||||
|
|
||||||
|
/////
|
||||||
|
//
|
||||||
|
// SSE2
|
||||||
|
|
||||||
|
// Universal 128 bit overlay
|
||||||
|
//
|
||||||
|
// Avoids arrays and pointers, suitable as register variable.
|
||||||
|
// Designed for speed in straight line code with no loops.
|
||||||
|
//
|
||||||
|
// Conversions are transparent but not free, cost is one MOV instruction
|
||||||
|
// in each direction, except for lower half of ymm to/from xmm which are
|
||||||
|
// free.
|
||||||
|
//
|
||||||
|
// Facilitates two dimensional vectoring.
|
||||||
|
//
|
||||||
|
// 128 bit integer and AES can't be done in parallel. AES suffers extraction
|
||||||
|
// and insertion of the upper 128 bits. uint128_t suffers 4 times the cost
|
||||||
|
// with 2 64 bit extractions and 2 insertions for each 128 bit lane with
|
||||||
|
// single stage ymm <--> gpr for a total of 8 moves.
|
||||||
|
//
|
||||||
|
// Two stage conversion is possible which helps CPU instruction scheduling
|
||||||
|
// by removing a register dependency between the upper and lower 128 at the
|
||||||
|
// cost of two extra instructions (128 bit extract and insert. The compiler
|
||||||
|
// seems to prefer the 2 staged approach when using the set intrinsic.
|
||||||
|
|
||||||
|
// Use macros to simplify array access emulation.
|
||||||
|
// emulated array type: uint64_t a[4];
|
||||||
|
// array indexing: a[0], a[1]
|
||||||
|
// overlay emulation: a.u64_0, a.u64_1
|
||||||
|
// without macro: a.u64_._0, a.u64_._1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
struct _regarray_u64x2
|
||||||
|
{
|
||||||
|
uint64_t _0; uint64_t _1;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u64x2 regarray_u64x2;
|
||||||
|
|
||||||
|
struct _regarray_v64x2
|
||||||
|
{
|
||||||
|
__m64 _0; __m64 _1;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_v64x2 regarray_v64x2;
|
||||||
|
|
||||||
|
struct _regarray_u32x4
|
||||||
|
{
|
||||||
|
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u32x2 regarray_u32x4;
|
||||||
|
|
||||||
|
struct _regarray_u16x8
|
||||||
|
{
|
||||||
|
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
|
||||||
|
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u16x4 regarray_u16x4;
|
||||||
|
|
||||||
|
struct _regarray_u8x16
|
||||||
|
{
|
||||||
|
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
|
||||||
|
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
|
||||||
|
uint8_t _8; uint8_t _9; uint8_t _a; uint8_t _b;
|
||||||
|
uint8_t _c; uint8_t _d; uint8_t _e; uint8_t _f;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u8x16 regarray_u8x16;
|
||||||
|
|
||||||
|
|
||||||
|
union _register_array_m128v
|
||||||
|
{
|
||||||
|
#if defined(GCC_INT128)
|
||||||
|
uint128_t u128;
|
||||||
|
#endif
|
||||||
|
__m128i v128;
|
||||||
|
regarray_u64x2 u64_; // uint64_t[2]
|
||||||
|
regarray_v64x2 v64_; // __m64[2]
|
||||||
|
regarray_u32x4 u32_; // uint32_t[4]
|
||||||
|
regarray_u16x4 u16_; // uint16_t[8]
|
||||||
|
regarray_u8x16 u8_; // uint8_t[16]
|
||||||
|
};
|
||||||
|
typedef union _register_array_m128v register_array_m128v;
|
||||||
|
|
||||||
|
///////////////////
|
||||||
|
//
|
||||||
|
// AVX2
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
struct _regarray_v128x2
|
||||||
|
{
|
||||||
|
__m128i _0; __m128i _1;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_v128x2 regarray_v128x2;
|
||||||
|
|
||||||
|
struct _regarray_u128x2
|
||||||
|
{
|
||||||
|
uint128_t _0; uint128_t _1;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u128x2 regarray_u128x2;
|
||||||
|
|
||||||
|
struct _regarray_u64x4
|
||||||
|
{
|
||||||
|
uint64_t _0; uint64_t _1; uint64_t _2; uint64_t _3;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u64x4 regarray_u64x4;
|
||||||
|
|
||||||
|
struct _regarray_v64x4
|
||||||
|
{
|
||||||
|
__m64 _0; __m64 _1; __m64 _2; __m64 _3;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_v64x4 regarray_v64x4;
|
||||||
|
|
||||||
|
struct _regarray_u32x8
|
||||||
|
{
|
||||||
|
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
|
||||||
|
uint32_t _4; uint32_t _5; uint32_t _6; uint32_t _7;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u32x8 regarray_u32x8;
|
||||||
|
|
||||||
|
struct _regarray_u16x16
|
||||||
|
{
|
||||||
|
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
|
||||||
|
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
|
||||||
|
uint16_t _8; uint16_t _9; uint16_t _a; uint16_t _b;
|
||||||
|
uint16_t _c; uint16_t _d; uint16_t _e; uint16_t _f;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u16x16 regarray_u16x16;
|
||||||
|
|
||||||
|
struct _regarray_u8x32
|
||||||
|
{
|
||||||
|
uint8_t _00; uint8_t _01; uint8_t _02; uint8_t _03;
|
||||||
|
uint8_t _04; uint8_t _05; uint8_t _06; uint8_t _07;
|
||||||
|
uint8_t _08; uint8_t _09; uint8_t _0a; uint8_t _0b;
|
||||||
|
uint8_t _0c; uint8_t _0d; uint8_t _0e; uint8_t _0f;
|
||||||
|
uint8_t _10; uint8_t _11; uint8_t _12; uint8_t _13;
|
||||||
|
uint8_t _14; uint8_t _15; uint8_t _16; uint8_t _17;
|
||||||
|
uint8_t _18; uint8_t _19; uint8_t _1a; uint8_t _1b;
|
||||||
|
uint8_t _1c; uint8_t _1d; uint8_t _1e; uint8_t _1f;
|
||||||
|
};
|
||||||
|
typedef struct _regarray_u8x32 regarray_u8x32;
|
||||||
|
|
||||||
|
union _regarray_v256
|
||||||
|
{
|
||||||
|
__m256i v256;
|
||||||
|
#if defined(GCC_INT128)
|
||||||
|
regarray_u128x2 u128_; // uint128_t[2]
|
||||||
|
#endif
|
||||||
|
regarray_v128x2 v128_; // __m128i[2]
|
||||||
|
regarray_v64x4 v64_;
|
||||||
|
regarray_u64x4 u64_;
|
||||||
|
regarray_u32x8 u32_;
|
||||||
|
regarray_u16x16 u16_;
|
||||||
|
regarray_u8x32 u8_;
|
||||||
|
};
|
||||||
|
typedef union _regarray_v256 regarray_v256;
|
||||||
|
|
||||||
|
////////////
|
||||||
|
//
|
||||||
|
// Abstraction macros to allow easy readability.
|
||||||
|
// Users may define their own list to suit their preferences
|
||||||
|
// such as, upper case hex, leading zeros, multidimensional,
|
||||||
|
// alphabetic, day of week, etc..
|
||||||
|
|
||||||
|
#define v128_0 v128_._0
|
||||||
|
#define v128_1 v128_._1
|
||||||
|
|
||||||
|
#define u128_0 u128_._0
|
||||||
|
#define u128_1 u128_._1
|
||||||
|
|
||||||
|
#define v64_0 v64_._0
|
||||||
|
#define v64_1 v64_._1
|
||||||
|
#define v64_2 v64_._2
|
||||||
|
#define v64_3 v64_._3
|
||||||
|
|
||||||
|
#define u64_0 u64_._0
|
||||||
|
#define u64_1 u64_._1
|
||||||
|
#define u64_2 u64_._2
|
||||||
|
#define u64_3 u64_._3
|
||||||
|
|
||||||
|
#define u32_0 u32_._0
|
||||||
|
#define u32_1 u32_._1
|
||||||
|
#define u32_2 u32_._2
|
||||||
|
#define u32_3 u32_._3
|
||||||
|
#define u32_4 u32_._4
|
||||||
|
#define u32_5 u32_._5
|
||||||
|
#define u32_6 u32_._6
|
||||||
|
#define u32_7 u32_._7
|
||||||
|
|
||||||
|
#define u16_0 u16_._0
|
||||||
|
#define u16_1 u16_._1
|
||||||
|
#define u16_2 u16_._2
|
||||||
|
#define u16_3 u16_._3
|
||||||
|
#define u16_4 u16_._4
|
||||||
|
#define u16_5 u16_._5
|
||||||
|
#define u16_6 u16_._6
|
||||||
|
#define u16_7 u16_._7
|
||||||
|
#define u16_8 u16_._8
|
||||||
|
#define u16_9 u16_._9
|
||||||
|
#define u16_a u16_._a
|
||||||
|
#define u16_b u16_._b
|
||||||
|
#define u16_c u16_._c
|
||||||
|
#define u16_d u16_._d
|
||||||
|
#define u16_e u16_._e
|
||||||
|
#define u16_f u16_._f
|
||||||
|
|
||||||
|
#define u8_00 u8_._00
|
||||||
|
#define u8_01 u8_._01
|
||||||
|
#define u8_02 u8_._02
|
||||||
|
#define u8_03 u8_._03
|
||||||
|
#define u8_04 u8_._04
|
||||||
|
#define u8_05 u8_._05
|
||||||
|
#define u8_06 u8_._06
|
||||||
|
#define u8_07 u8_._07
|
||||||
|
#define u8_08 u8_._08
|
||||||
|
#define u8_09 u8_._09
|
||||||
|
#define u8_0a u8_._0a
|
||||||
|
#define u8_0b u8_._0b
|
||||||
|
#define u8_0c u8_._0c
|
||||||
|
#define u8_0d u8_._0d
|
||||||
|
#define u8_0e u8_._0e
|
||||||
|
#define u8_0f u8_._0f
|
||||||
|
#define u8_10 u8_._10
|
||||||
|
#define u8_11 u8_._11
|
||||||
|
#define u8_12 u8_._12
|
||||||
|
#define u8_13 u8_._13
|
||||||
|
#define u8_14 u8_._14
|
||||||
|
#define u8_15 u8_._15
|
||||||
|
#define u8_16 u8_._16
|
||||||
|
#define u8_17 u8_._17
|
||||||
|
#define u8_18 u8_._18
|
||||||
|
#define u8_19 u8_._19
|
||||||
|
#define u8_1a u8_._1a
|
||||||
|
#define u8_1b u8_._1b
|
||||||
|
#define u8_1c u8_._1c
|
||||||
|
#define u8_1d u8_._1d
|
||||||
|
#define u8_1e u8_._1e
|
||||||
|
#define u8_1f u8_._1f
|
||||||
|
|
||||||
|
|
||||||
|
// This is in use by, coincidentally, simd hash.
|
||||||
|
union _m256_v16 {
|
||||||
|
uint16_t u16[16];
|
||||||
|
__m256i v256;
|
||||||
|
};
|
||||||
|
typedef union _m256_v16 m256_v16;
|
||||||
|
|
||||||
|
|
||||||
|
|
Reference in New Issue
Block a user