This commit is contained in:
Jay D Dee
2021-03-08 22:44:44 -05:00
parent dc6b007a18
commit 40089428c5
26 changed files with 601 additions and 2164 deletions

View File

@@ -1,5 +1,9 @@
Instructions for compiling cpuminer-opt for Windows. Instructions for compiling cpuminer-opt for Windows.
Thwaw intructions nay be out of date. Please consult the wiki for
the latest:
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
Windows compilation using Visual Studio is not supported. Mingw64 is Windows compilation using Visual Studio is not supported. Mingw64 is
used on a Linux system (bare metal or virtual machine) to cross-compile used on a Linux system (bare metal or virtual machine) to cross-compile
@@ -24,79 +28,76 @@ Refer to Linux compile instructions and install required packages.
Additionally, install mingw-w64. Additionally, install mingw-w64.
sudo apt-get install mingw-w64 sudo apt-get install mingw-w64 libz-mingw-w64-dev
2. Create a local library directory for packages to be compiled in the next 2. Create a local library directory for packages to be compiled in the next
step. Suggested location is $HOME/usr/lib/ step. Suggested location is $HOME/usr/lib/
$ mkdir $HOME/usr/lib
3. Download and build other packages for mingw that don't have a mingw64 3. Download and build other packages for mingw that don't have a mingw64
version available in the repositories. version available in the repositories.
Download the following source code packages from their respective and Download the following source code packages from their respective and
respected download locations, copy them to ~/usr/lib/ and uncompress them. respected download locations, copy them to ~/usr/lib/ and uncompress them.
openssl openssl: https://github.com/openssl/openssl/releases
curl
gmp
In most cases the latest vesrion is ok but it's safest to download curl: https://github.com/curl/curl/releases
the same major and minor version as included in your distribution.
Run the following commands or follow the supplied instructions. gmp: https://gmplib.org/download/gmp/
Do not run "make install" unless you are using ~/usr/lib, which isn't
recommended.
Some instructions insist on running "make check". If make check fails In most cases the latest version is ok but it's safest to download the same major and minor version as included in your distribution. The following uses versions from Ubuntu 20.04. Change version numbers as required.
it may still work, YMMV.
You can speed up "make" by using all CPU cores available with "-j n" where Run the following commands or follow the supplied instructions. Do not run "make install" unless you are using /usr/lib, which isn't recommended.
n is the number of CPU threads you want to use.
Some instructions insist on running "make check". If make check fails it may still work, YMMV.
You can speed up "make" by using all CPU cores available with "-j n" where n is the number of CPU threads you want to use.
openssl: openssl:
./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32 $ ./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32-
make $ make
Make may fail with an ld error, just ensure libcrypto-1_1-x64.dll is created.
curl: curl:
./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32 $ ./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32
make $ make
gmp: gmp:
./configure --host=x86_64-w64-mingw32 $ ./configure --host=x86_64-w64-mingw32
make $ make
4. Tweak the environment. 4. Tweak the environment.
This step is required everytime you login or the commands can be added to This step is required everytime you login or the commands can be added to .bashrc.
.bashrc.
Define some local variables to point to local library. Define some local variables to point to local library.
export LOCAL_LIB="$HOME/usr/lib" $ export LOCAL_LIB="$HOME/usr/lib"
export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl" $ export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32" $ export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
Create a release directory and copy some dll files previously built. Adjust for gcc version:
This can be done outside of cpuminer-opt and only needs to be done once.
If the release directory is in cpuminer-opt directory it needs to be
recreated every a source package is decompressed.
mkdir release $ export GCC_MINGW_LIB="/usr/lib/gcc/x86_64-w64-mingw32/9.3-win32"
cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libgcc_s_seh-1.dll release/
cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/
cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
Create a release directory and copy some dll files previously built. This can be done outside of cpuminer-opt and only needs to be done once. If the release directory is in cpuminer-opt directory it needs to be recreated every time a source package is decompressed.
$ mkdir release
$ cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
$ cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
$ cp $GCC_MINGW_LIB/libstdc++-6.dll release/
$ cp $GCC_MINGW_LIB/libgcc_s_seh-1.dll release/
$ cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/
$ cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
The following steps need to be done every time a new source package is The following steps need to be done every time a new source package is
opened. opened.
@@ -110,13 +111,73 @@ https://github.com/JayDDee/cpuminer-opt/releases
Decompress and change to the cpuminer-opt directory. Decompress and change to the cpuminer-opt directory.
6. compile
6. Prepare to compile
Create a link to the locally compiled version of gmp.h Create a link to the locally compiled version of gmp.h
ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h $ ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
$ ./autogen.sh
Configure the compiler for the CPU architecture of the host machine:
CFLAGS="-O3 -march=native -Wall" ./configure $CONFIGURE_ARGS
or cross compile for a specific CPU architecture:
CFLAGS="-O3 -march=znver1 -Wall" ./configure $CONFIGURE_ARGS
This will compile for AMD Ryzen.
You can compile more generically for a set of specific CPU features if you know what features you want:
CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure $CONFIGURE_ARGS
This will compile for an older CPU that does not have AVX.
You can find several examples in README.txt
If you have a CPU with more than 64 threads and Windows 7 or higher you can enable the CPU Groups feature by adding the following to CFLAGS:
"-D_WIN32_WINNT=0x0601"
Once you have run configure successfully run the compiler with n CPU threads:
$ make -j n
Copy cpuminer.exe to the release directory, compress and copy the release directory to a Windows system and run cpuminer.exe from the command line.
Run cpuminer
In a command windows change directories to the unzipped release folder. to get a list of all options:
cpuminer.exe --help
Command options are specific to where you mine. Refer to the pool's instructions on how to set them.
Create a link to the locally compiled version of gmp.h
$ ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
Edit configure.ac to fix lipthread package name. Edit configure.ac to fix lipthread package name.

View File

@@ -65,6 +65,14 @@ If not what makes it happen or not happen?
Change Log Change Log
---------- ----------
v3.15.7
Added accepted/stale/rejected percentage to summary log report.
Added warning if share counters mismatch which could corrupt stats.
Linux: CPU temperature reporting is more responsive to rising temperature.
A few AVX2 & AVX512 tweaks.
Removed some dead code and other cleanup.
v3.15.6 v3.15.6
Implement keccak pre-hash optimization for x16* algos. Implement keccak pre-hash optimization for x16* algos.

View File

@@ -55,8 +55,8 @@ MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x2
#define ECHO_SUBBYTES(state, i, j) \ #define ECHO_SUBBYTES(state, i, j) \
state[i][j] = _mm_aesenc_si128(state[i][j], k1);\ state[i][j] = _mm_aesenc_si128(state[i][j], k1);\
state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero));\ k1 = _mm_add_epi32(k1, M128(const1));\
k1 = _mm_add_epi32(k1, M128(const1)) state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero))
#define ECHO_MIXBYTES(state1, state2, j, t1, t2, s2) \ #define ECHO_MIXBYTES(state1, state2, j, t1, t2, s2) \
s2 = _mm_add_epi8(state1[0][j], state1[0][j]);\ s2 = _mm_add_epi8(state1[0][j], state1[0][j]);\

View File

@@ -10,22 +10,20 @@ static const unsigned int mul2ipt[] __attribute__ ((aligned (64))) =
0xfd5ba600, 0x2a8c71d7, 0x1eb845e3, 0xc96f9234 0xfd5ba600, 0x2a8c71d7, 0x1eb845e3, 0xc96f9234
}; };
*/ */
// do these need to be reversed?
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) #if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mul2mask \ //#define mul2mask m512_const2_64( 0, 0x00001b00 )
m512_const2_64( 0, 0x00001b00 )
//_mm512_set4_epi32( 0, 0, 0, 0x00001b00 ) //_mm512_set4_epi32( 0, 0, 0, 0x00001b00 )
// _mm512_set4_epi32( 0x00001b00, 0, 0, 0 ) //_mm512_set4_epi32( 0x00001b00, 0, 0, 0 )
#define lsbmask m512_const1_32( 0x01010101 ) //#define lsbmask m512_const1_32( 0x01010101 )
#define ECHO_SUBBYTES( state, i, j ) \ #define ECHO_SUBBYTES( state, i, j ) \
state[i][j] = _mm512_aesenc_epi128( state[i][j], k1 ); \ state[i][j] = _mm512_aesenc_epi128( state[i][j], k1 ); \
state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero ); \ k1 = _mm512_add_epi32( k1, one ); \
k1 = _mm512_add_epi32( k1, m512_one_128 ); state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero );
#define ECHO_MIXBYTES( state1, state2, j, t1, t2, s2 ) do \ #define ECHO_MIXBYTES( state1, state2, j, t1, t2, s2 ) do \
{ \ { \
@@ -140,6 +138,9 @@ void echo_4way_compress( echo_4way_context *ctx, const __m512i *pmsg,
unsigned int r, b, i, j; unsigned int r, b, i, j;
__m512i t1, t2, s2, k1; __m512i t1, t2, s2, k1;
__m512i _state[4][4], _state2[4][4], _statebackup[4][4]; __m512i _state[4][4], _state2[4][4], _statebackup[4][4];
__m512i one = m512_one_128;
__m512i mul2mask = m512_const2_64( 0, 0x00001b00 );
__m512i lsbmask = m512_const1_32( 0x01010101 );
_state[ 0 ][ 0 ] = ctx->state[ 0 ][ 0 ]; _state[ 0 ][ 0 ] = ctx->state[ 0 ][ 0 ];
_state[ 0 ][ 1 ] = ctx->state[ 0 ][ 1 ]; _state[ 0 ][ 1 ] = ctx->state[ 0 ][ 1 ];
@@ -406,8 +407,8 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
#define ECHO_SUBBYTES_2WAY( state, i, j ) \ #define ECHO_SUBBYTES_2WAY( state, i, j ) \
state[i][j] = _mm256_aesenc_epi128( state[i][j], k1 ); \ state[i][j] = _mm256_aesenc_epi128( state[i][j], k1 ); \
k1 = _mm256_add_epi32( k1, m256_one_128 ); \
state[i][j] = _mm256_aesenc_epi128( state[i][j], m256_zero ); \ state[i][j] = _mm256_aesenc_epi128( state[i][j], m256_zero ); \
k1 = _mm256_add_epi32( k1, m256_one_128 );
#define ECHO_MIXBYTES_2WAY( state1, state2, j, t1, t2, s2 ) do \ #define ECHO_MIXBYTES_2WAY( state1, state2, j, t1, t2, s2 ) do \
{ \ { \

View File

@@ -14,7 +14,11 @@
#ifndef FUGUE_HASH_API_H #ifndef FUGUE_HASH_API_H
#define FUGUE_HASH_API_H #define FUGUE_HASH_API_H
#if defined(__AES__) #if defined(__AES__)
#if !defined(__SSE4_1__)
#error "Unsupported configuration, AES needs SSE4.1. Compile without AES."
#endif
#include "algo/sha/sha3_common.h" #include "algo/sha/sha3_common.h"
#include "simd-utils.h" #include "simd-utils.h"

View File

@@ -51,7 +51,7 @@ int groestl256_4way_full( groestl256_4way_context* ctx, void* output,
const int hashlen_m128i = 32 >> 4; // bytes to __m128i const int hashlen_m128i = 32 >> 4; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i; const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr; int rem = ctx->rem_ptr;
int blocks = len / SIZE256; uint64_t blocks = len / SIZE256;
__m512i* in = (__m512i*)input; __m512i* in = (__m512i*)input;
int i; int i;
@@ -89,21 +89,21 @@ int groestl256_4way_full( groestl256_4way_context* ctx, void* output,
if ( i == SIZE256 - 1 ) if ( i == SIZE256 - 1 )
{ {
// only 1 vector left in buffer, all padding at once // only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0x80 ); ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
} }
else else
{ {
// add first padding // add first padding
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); ctx->buffer[i] = m512_const2_64( 0, 0x80 );
// add zero padding // add zero padding
for ( i += 1; i < SIZE256 - 1; i++ ) for ( i += 1; i < SIZE256 - 1; i++ )
ctx->buffer[i] = m512_zero; ctx->buffer[i] = m512_zero;
// add length padding, second last byte is zero unless blocks > 255 // add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0 ); ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
} }
// digest final padding block and do output transform // digest final padding block and do output transform
TF512_4way( ctx->chaining, ctx->buffer ); TF512_4way( ctx->chaining, ctx->buffer );
OF512_4way( ctx->chaining ); OF512_4way( ctx->chaining );
@@ -122,7 +122,7 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i; const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr; int rem = ctx->rem_ptr;
int blocks = len / SIZE256; uint64_t blocks = len / SIZE256;
__m512i* in = (__m512i*)input; __m512i* in = (__m512i*)input;
int i; int i;
@@ -146,20 +146,18 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
if ( i == SIZE256 - 1 ) if ( i == SIZE256 - 1 )
{ {
// only 1 vector left in buffer, all padding at once // only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m512_const1_128( _mm_set_epi8( ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
} }
else else
{ {
// add first padding // add first padding
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); ctx->buffer[i] = m512_const2_64( 0, 0x80 );
// add zero padding // add zero padding
for ( i += 1; i < SIZE256 - 1; i++ ) for ( i += 1; i < SIZE256 - 1; i++ )
ctx->buffer[i] = m512_zero; ctx->buffer[i] = m512_zero;
// add length padding, second last byte is zero unless blocks > 255 // add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m512_const1_128( _mm_set_epi8( ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
} }
// digest final padding block and do output transform // digest final padding block and do output transform
@@ -209,23 +207,23 @@ int groestl256_2way_full( groestl256_2way_context* ctx, void* output,
const int hashlen_m128i = 32 >> 4; // bytes to __m128i const int hashlen_m128i = 32 >> 4; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i; const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr; int rem = ctx->rem_ptr;
int blocks = len / SIZE256; uint64_t blocks = len / SIZE256;
__m256i* in = (__m256i*)input; __m256i* in = (__m256i*)input;
int i; int i;
if (ctx->chaining == NULL || ctx->buffer == NULL) if (ctx->chaining == NULL || ctx->buffer == NULL)
return 1; return 1;
for ( i = 0; i < SIZE256; i++ ) for ( i = 0; i < SIZE256; i++ )
{ {
ctx->chaining[i] = m256_zero; ctx->chaining[i] = m256_zero;
ctx->buffer[i] = m256_zero; ctx->buffer[i] = m256_zero;
} }
// The only non-zero in the IV is len. It can be hard coded. // The only non-zero in the IV is len. It can be hard coded.
ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 ); ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 );
ctx->buf_ptr = 0; ctx->buf_ptr = 0;
ctx->rem_ptr = 0; ctx->rem_ptr = 0;
// --- update --- // --- update ---
@@ -247,7 +245,7 @@ int groestl256_2way_full( groestl256_2way_context* ctx, void* output,
if ( i == SIZE256 - 1 ) if ( i == SIZE256 - 1 )
{ {
// only 1 vector left in buffer, all padding at once // only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m256_const2_64( (uint64_t)blocks << 56, 0x80 ); ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
} }
else else
{ {
@@ -258,10 +256,10 @@ int groestl256_2way_full( groestl256_2way_context* ctx, void* output,
ctx->buffer[i] = m256_zero; ctx->buffer[i] = m256_zero;
// add length padding, second last byte is zero unless blocks > 255 // add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m256_const2_64( (uint64_t)blocks << 56, 0 ); ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
} }
// digest final padding block and do output transform // digest final padding block and do output transform
TF512_2way( ctx->chaining, ctx->buffer ); TF512_2way( ctx->chaining, ctx->buffer );
OF512_2way( ctx->chaining ); OF512_2way( ctx->chaining );
@@ -279,7 +277,7 @@ int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output,
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i; const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr; int rem = ctx->rem_ptr;
int blocks = len / SIZE256; uint64_t blocks = len / SIZE256;
__m256i* in = (__m256i*)input; __m256i* in = (__m256i*)input;
int i; int i;
@@ -303,8 +301,7 @@ int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output,
if ( i == SIZE256 - 1 ) if ( i == SIZE256 - 1 )
{ {
// only 1 vector left in buffer, all padding at once // only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m256_const1_128( _mm_set_epi8( ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
} }
else else
{ {
@@ -315,8 +312,7 @@ int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output,
ctx->buffer[i] = m256_zero; ctx->buffer[i] = m256_zero;
// add length padding, second last byte is zero unless blocks > 255 // add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m256_const1_128( _mm_set_epi8( ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
} }
// digest final padding block and do output transform // digest final padding block and do output transform

View File

@@ -43,7 +43,7 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output,
const int hashlen_m128i = 64 / 16; // bytes to __m128i const int hashlen_m128i = 64 / 16; // bytes to __m128i
const int hash_offset = SIZE512 - hashlen_m128i; const int hash_offset = SIZE512 - hashlen_m128i;
int rem = ctx->rem_ptr; int rem = ctx->rem_ptr;
int blocks = len / SIZE512; uint64_t blocks = len / SIZE512;
__m512i* in = (__m512i*)input; __m512i* in = (__m512i*)input;
int i; int i;
@@ -64,16 +64,14 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output,
if ( i == SIZE512 - 1 ) if ( i == SIZE512 - 1 )
{ {
// only 1 vector left in buffer, all padding at once // only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m512_const1_128( _mm_set_epi8( ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
} }
else else
{ {
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); ctx->buffer[i] = m512_const2_64( 0, 0x80 );
for ( i += 1; i < SIZE512 - 1; i++ ) for ( i += 1; i < SIZE512 - 1; i++ )
ctx->buffer[i] = m512_zero; ctx->buffer[i] = m512_zero;
ctx->buffer[i] = m512_const1_128( _mm_set_epi8( ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
} }
TF1024_4way( ctx->chaining, ctx->buffer ); TF1024_4way( ctx->chaining, ctx->buffer );
@@ -124,7 +122,7 @@ int groestl512_4way_full( groestl512_4way_context* ctx, void* output,
} }
else else
{ {
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 ); ctx->buffer[i] = m512_const2_64( 0, 0x80 );
for ( i += 1; i < SIZE512 - 1; i++ ) for ( i += 1; i < SIZE512 - 1; i++ )
ctx->buffer[i] = m512_zero; ctx->buffer[i] = m512_zero;
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 ); ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
@@ -168,7 +166,7 @@ int groestl512_2way_update_close( groestl512_2way_context* ctx, void* output,
const int hashlen_m128i = 64 / 16; // bytes to __m128i const int hashlen_m128i = 64 / 16; // bytes to __m128i
const int hash_offset = SIZE512 - hashlen_m128i; const int hash_offset = SIZE512 - hashlen_m128i;
int rem = ctx->rem_ptr; int rem = ctx->rem_ptr;
int blocks = len / SIZE512; uint64_t blocks = len / SIZE512;
__m256i* in = (__m256i*)input; __m256i* in = (__m256i*)input;
int i; int i;
@@ -189,16 +187,14 @@ int groestl512_2way_update_close( groestl512_2way_context* ctx, void* output,
if ( i == SIZE512 - 1 ) if ( i == SIZE512 - 1 )
{ {
// only 1 vector left in buffer, all padding at once // only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m256_const1_128( _mm_set_epi8( ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
} }
else else
{ {
ctx->buffer[i] = m256_const2_64( 0, 0x80 ); ctx->buffer[i] = m256_const2_64( 0, 0x80 );
for ( i += 1; i < SIZE512 - 1; i++ ) for ( i += 1; i < SIZE512 - 1; i++ )
ctx->buffer[i] = m256_zero; ctx->buffer[i] = m256_zero;
ctx->buffer[i] = m256_const1_128( _mm_set_epi8( ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
} }
TF1024_2way( ctx->chaining, ctx->buffer ); TF1024_2way( ctx->chaining, ctx->buffer );

View File

@@ -548,7 +548,7 @@ static const sph_u32 T512[64][16] = {
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) #if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Hamsi 8 way // Hamsi 8 way AVX512
#define INPUT_BIG8 \ #define INPUT_BIG8 \
do { \ do { \
@@ -849,13 +849,11 @@ void hamsi512_8way_update( hamsi_8way_big_context *sc, const void *data,
void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst ) void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst )
{ {
__m512i pad[1]; __m512i pad[1];
int ch, cl; uint32_t ch, cl;
sph_enc32be( &ch, sc->count_high ); sph_enc32be( &ch, sc->count_high );
sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) ); sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) );
pad[0] = _mm512_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch, pad[0] = _mm512_set1_epi64( ((uint64_t)cl << 32 ) | (uint64_t)ch );
cl, ch, cl, ch, cl, ch, cl, ch );
// pad[0] = m512_const2_32( cl, ch );
sc->buf[0] = m512_const1_64( 0x80 ); sc->buf[0] = m512_const1_64( 0x80 );
hamsi_8way_big( sc, sc->buf, 1 ); hamsi_8way_big( sc, sc->buf, 1 );
hamsi_8way_big_final( sc, pad ); hamsi_8way_big_final( sc, pad );
@@ -863,11 +861,9 @@ void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst )
mm512_block_bswap_32( (__m512i*)dst, sc->h ); mm512_block_bswap_32( (__m512i*)dst, sc->h );
} }
#endif // AVX512 #endif // AVX512
// Hamsi 4 way AVX2
// Hamsi 4 way
#define INPUT_BIG \ #define INPUT_BIG \
do { \ do { \
@@ -1186,14 +1182,12 @@ void hamsi512_4way_update( hamsi_4way_big_context *sc, const void *data,
void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst ) void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
{ {
__m256i pad[1]; __m256i pad[1];
int ch, cl; uint32_t ch, cl;
sph_enc32be( &ch, sc->count_high ); sph_enc32be( &ch, sc->count_high );
sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) ); sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) );
pad[0] = _mm256_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch ); pad[0] = _mm256_set1_epi64x( ((uint64_t)cl << 32 ) | (uint64_t)ch );
sc->buf[0] = m256_const1_64( 0x80 ); sc->buf[0] = m256_const1_64( 0x80 );
// sc->buf[0] = _mm256_set_epi32( 0UL, 0x80UL, 0UL, 0x80UL,
// 0UL, 0x80UL, 0UL, 0x80UL );
hamsi_big( sc, sc->buf, 1 ); hamsi_big( sc, sc->buf, 1 );
hamsi_big_final( sc, pad ); hamsi_big_final( sc, pad );

View File

@@ -134,65 +134,47 @@
do { \ do { \
DECL64(c0); \ DECL64(c0); \
DECL64(c1); \ DECL64(c1); \
DECL64(c2); \
DECL64(c3); \
DECL64(c4); \
DECL64(bnn); \ DECL64(bnn); \
NOT64(bnn, b20); \ NOT64(bnn, b20); \
KHI_XO(c0, b00, b10, b20); \ KHI_XO(c0, b00, b10, b20); \
KHI_XO(c1, b10, bnn, b30); \ KHI_XO(c1, b10, bnn, b30); \
KHI_XA(c2, b20, b30, b40); \ KHI_XA(b20, b20, b30, b40); \
KHI_XO(c3, b30, b40, b00); \ KHI_XO(b30, b30, b40, b00); \
KHI_XA(c4, b40, b00, b10); \ KHI_XA(b40, b40, b00, b10); \
MOV64(b00, c0); \ MOV64(b00, c0); \
MOV64(b10, c1); \ MOV64(b10, c1); \
MOV64(b20, c2); \
MOV64(b30, c3); \
MOV64(b40, c4); \
NOT64(bnn, b41); \ NOT64(bnn, b41); \
KHI_XO(c0, b01, b11, b21); \ KHI_XO(c0, b01, b11, b21); \
KHI_XA(c1, b11, b21, b31); \ KHI_XA(c1, b11, b21, b31); \
KHI_XO(c2, b21, b31, bnn); \ KHI_XO(b21, b21, b31, bnn); \
KHI_XO(c3, b31, b41, b01); \ KHI_XO(b31, b31, b41, b01); \
KHI_XA(c4, b41, b01, b11); \ KHI_XA(b41, b41, b01, b11); \
MOV64(b01, c0); \ MOV64(b01, c0); \
MOV64(b11, c1); \ MOV64(b11, c1); \
MOV64(b21, c2); \
MOV64(b31, c3); \
MOV64(b41, c4); \
NOT64(bnn, b32); \ NOT64(bnn, b32); \
KHI_XO(c0, b02, b12, b22); \ KHI_XO(c0, b02, b12, b22); \
KHI_XA(c1, b12, b22, b32); \ KHI_XA(c1, b12, b22, b32); \
KHI_XA(c2, b22, bnn, b42); \ KHI_XA(b22, b22, bnn, b42); \
KHI_XO(c3, bnn, b42, b02); \ KHI_XO(b32, bnn, b42, b02); \
KHI_XA(c4, b42, b02, b12); \ KHI_XA(b42, b42, b02, b12); \
MOV64(b02, c0); \ MOV64(b02, c0); \
MOV64(b12, c1); \ MOV64(b12, c1); \
MOV64(b22, c2); \
MOV64(b32, c3); \
MOV64(b42, c4); \
NOT64(bnn, b33); \ NOT64(bnn, b33); \
KHI_XA(c0, b03, b13, b23); \ KHI_XA(c0, b03, b13, b23); \
KHI_XO(c1, b13, b23, b33); \ KHI_XO(c1, b13, b23, b33); \
KHI_XO(c2, b23, bnn, b43); \ KHI_XO(b23, b23, bnn, b43); \
KHI_XA(c3, bnn, b43, b03); \ KHI_XA(b33, bnn, b43, b03); \
KHI_XO(c4, b43, b03, b13); \ KHI_XO(b43, b43, b03, b13); \
MOV64(b03, c0); \ MOV64(b03, c0); \
MOV64(b13, c1); \ MOV64(b13, c1); \
MOV64(b23, c2); \
MOV64(b33, c3); \
MOV64(b43, c4); \
NOT64(bnn, b14); \ NOT64(bnn, b14); \
KHI_XA(c0, b04, bnn, b24); \ KHI_XA(c0, b04, bnn, b24); \
KHI_XO(c1, bnn, b24, b34); \ KHI_XO(c1, bnn, b24, b34); \
KHI_XA(c2, b24, b34, b44); \ KHI_XA(b24, b24, b34, b44); \
KHI_XO(c3, b34, b44, b04); \ KHI_XO(b34, b34, b44, b04); \
KHI_XA(c4, b44, b04, b14); \ KHI_XA(b44, b44, b04, b14); \
MOV64(b04, c0); \ MOV64(b04, c0); \
MOV64(b14, c1); \ MOV64(b14, c1); \
MOV64(b24, c2); \
MOV64(b34, c3); \
MOV64(b44, c4); \
} while (0) } while (0)
#ifdef IOTA #ifdef IOTA

View File

@@ -66,6 +66,17 @@ static const uint32 CNS_INIT[128] __attribute((aligned(64))) = {
a = _mm512_xor_si512(a,c0);\ a = _mm512_xor_si512(a,c0);\
b = _mm512_xor_si512(b,c1); b = _mm512_xor_si512(b,c1);
#define MULT24W( a0, a1 ) \
do { \
__m512i b = _mm512_xor_si512( a0, \
_mm512_maskz_shuffle_epi32( 0xbbbb, a1, 16 ) ); \
a0 = _mm512_or_si512( _mm512_bsrli_epi128( b, 4 ), \
_mm512_bslli_epi128( a1,12 ) ); \
a1 = _mm512_or_si512( _mm512_bsrli_epi128( a1, 4 ), \
_mm512_bslli_epi128( b,12 ) ); \
} while(0)
/*
#define MULT24W( a0, a1, mask ) \ #define MULT24W( a0, a1, mask ) \
do { \ do { \
__m512i b = _mm512_xor_si512( a0, \ __m512i b = _mm512_xor_si512( a0, \
@@ -73,6 +84,7 @@ do { \
a0 = _mm512_or_si512( _mm512_bsrli_epi128(b,4), _mm512_bslli_epi128(a1,12) );\ a0 = _mm512_or_si512( _mm512_bsrli_epi128(b,4), _mm512_bslli_epi128(a1,12) );\
a1 = _mm512_or_si512( _mm512_bsrli_epi128(a1,4), _mm512_bslli_epi128(b,12) );\ a1 = _mm512_or_si512( _mm512_bsrli_epi128(a1,4), _mm512_bslli_epi128(b,12) );\
} while(0) } while(0)
*/
// confirm pointer arithmetic // confirm pointer arithmetic
// ok but use array indexes // ok but use array indexes
@@ -235,7 +247,6 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
__m512i msg0, msg1; __m512i msg0, msg1;
__m512i tmp[2]; __m512i tmp[2];
__m512i x[8]; __m512i x[8];
const __m512i MASK = m512_const2_64( 0, 0x00000000ffffffff );
t0 = chainv[0]; t0 = chainv[0];
t1 = chainv[1]; t1 = chainv[1];
@@ -249,7 +260,7 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
t0 = _mm512_xor_si512( t0, chainv[8] ); t0 = _mm512_xor_si512( t0, chainv[8] );
t1 = _mm512_xor_si512( t1, chainv[9] ); t1 = _mm512_xor_si512( t1, chainv[9] );
MULT24W( t0, t1, MASK ); MULT24W( t0, t1 );
msg0 = _mm512_shuffle_epi32( msg[0], 27 ); msg0 = _mm512_shuffle_epi32( msg[0], 27 );
msg1 = _mm512_shuffle_epi32( msg[1], 27 ); msg1 = _mm512_shuffle_epi32( msg[1], 27 );
@@ -268,68 +279,67 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
t0 = chainv[0]; t0 = chainv[0];
t1 = chainv[1]; t1 = chainv[1];
MULT24W( chainv[0], chainv[1], MASK ); MULT24W( chainv[0], chainv[1] );
chainv[0] = _mm512_xor_si512( chainv[0], chainv[2] ); chainv[0] = _mm512_xor_si512( chainv[0], chainv[2] );
chainv[1] = _mm512_xor_si512( chainv[1], chainv[3] ); chainv[1] = _mm512_xor_si512( chainv[1], chainv[3] );
MULT24W( chainv[2], chainv[3], MASK ); MULT24W( chainv[2], chainv[3] );
chainv[2] = _mm512_xor_si512(chainv[2], chainv[4]); chainv[2] = _mm512_xor_si512(chainv[2], chainv[4]);
chainv[3] = _mm512_xor_si512(chainv[3], chainv[5]); chainv[3] = _mm512_xor_si512(chainv[3], chainv[5]);
MULT24W( chainv[4], chainv[5], MASK ); MULT24W( chainv[4], chainv[5] );
chainv[4] = _mm512_xor_si512(chainv[4], chainv[6]); chainv[4] = _mm512_xor_si512(chainv[4], chainv[6]);
chainv[5] = _mm512_xor_si512(chainv[5], chainv[7]); chainv[5] = _mm512_xor_si512(chainv[5], chainv[7]);
MULT24W( chainv[6], chainv[7], MASK ); MULT24W( chainv[6], chainv[7] );
chainv[6] = _mm512_xor_si512(chainv[6], chainv[8]); chainv[6] = _mm512_xor_si512(chainv[6], chainv[8]);
chainv[7] = _mm512_xor_si512(chainv[7], chainv[9]); chainv[7] = _mm512_xor_si512(chainv[7], chainv[9]);
MULT24W( chainv[8], chainv[9], MASK ); MULT24W( chainv[8], chainv[9] );
chainv[8] = _mm512_xor_si512( chainv[8], t0 ); chainv[8] = _mm512_xor_si512( chainv[8], t0 );
chainv[9] = _mm512_xor_si512( chainv[9], t1 ); chainv[9] = _mm512_xor_si512( chainv[9], t1 );
t0 = chainv[8]; t0 = chainv[8];
t1 = chainv[9]; t1 = chainv[9];
MULT24W( chainv[8], chainv[9], MASK ); MULT24W( chainv[8], chainv[9] );
chainv[8] = _mm512_xor_si512( chainv[8], chainv[6] ); chainv[8] = _mm512_xor_si512( chainv[8], chainv[6] );
chainv[9] = _mm512_xor_si512( chainv[9], chainv[7] ); chainv[9] = _mm512_xor_si512( chainv[9], chainv[7] );
MULT24W( chainv[6], chainv[7], MASK ); MULT24W( chainv[6], chainv[7] );
chainv[6] = _mm512_xor_si512( chainv[6], chainv[4] ); chainv[6] = _mm512_xor_si512( chainv[6], chainv[4] );
chainv[7] = _mm512_xor_si512( chainv[7], chainv[5] ); chainv[7] = _mm512_xor_si512( chainv[7], chainv[5] );
MULT24W( chainv[4], chainv[5], MASK ); MULT24W( chainv[4], chainv[5] );
chainv[4] = _mm512_xor_si512( chainv[4], chainv[2] ); chainv[4] = _mm512_xor_si512( chainv[4], chainv[2] );
chainv[5] = _mm512_xor_si512( chainv[5], chainv[3] ); chainv[5] = _mm512_xor_si512( chainv[5], chainv[3] );
MULT24W( chainv[2], chainv[3], MASK ); MULT24W( chainv[2], chainv[3] );
chainv[2] = _mm512_xor_si512( chainv[2], chainv[0] ); chainv[2] = _mm512_xor_si512( chainv[2], chainv[0] );
chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] ); chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] );
MULT24W( chainv[0], chainv[1], MASK ); MULT24W( chainv[0], chainv[1] );
chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 ); chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 );
chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 ); chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 );
MULT24W( msg0, msg1, MASK ); MULT24W( msg0, msg1 );
chainv[2] = _mm512_xor_si512( chainv[2], msg0 ); chainv[2] = _mm512_xor_si512( chainv[2], msg0 );
chainv[3] = _mm512_xor_si512( chainv[3], msg1 ); chainv[3] = _mm512_xor_si512( chainv[3], msg1 );
MULT24W( msg0, msg1, MASK ); MULT24W( msg0, msg1 );
chainv[4] = _mm512_xor_si512( chainv[4], msg0 ); chainv[4] = _mm512_xor_si512( chainv[4], msg0 );
chainv[5] = _mm512_xor_si512( chainv[5], msg1 ); chainv[5] = _mm512_xor_si512( chainv[5], msg1 );
MULT24W( msg0, msg1, MASK ); MULT24W( msg0, msg1 );
chainv[6] = _mm512_xor_si512( chainv[6], msg0 ); chainv[6] = _mm512_xor_si512( chainv[6], msg0 );
chainv[7] = _mm512_xor_si512( chainv[7], msg1 ); chainv[7] = _mm512_xor_si512( chainv[7], msg1 );
MULT24W( msg0, msg1, MASK ); MULT24W( msg0, msg1);
chainv[8] = _mm512_xor_si512( chainv[8], msg0 ); chainv[8] = _mm512_xor_si512( chainv[8], msg0 );
chainv[9] = _mm512_xor_si512( chainv[9], msg1 ); chainv[9] = _mm512_xor_si512( chainv[9], msg1 );
MULT24W( msg0, msg1, MASK ); MULT24W( msg0, msg1 );
// replace with ror
chainv[3] = _mm512_rol_epi32( chainv[3], 1 ); chainv[3] = _mm512_rol_epi32( chainv[3], 1 );
chainv[5] = _mm512_rol_epi32( chainv[5], 2 ); chainv[5] = _mm512_rol_epi32( chainv[5], 2 );
chainv[7] = _mm512_rol_epi32( chainv[7], 3 ); chainv[7] = _mm512_rol_epi32( chainv[7], 3 );
@@ -496,7 +506,7 @@ int luffa_4way_update( luffa_4way_context *state, const void *data,
{ {
// remaining data bytes // remaining data bytes
buffer[0] = _mm512_shuffle_epi8( vdata[0], shuff_bswap32 ); buffer[0] = _mm512_shuffle_epi8( vdata[0], shuff_bswap32 );
buffer[1] = m512_const2_64( 0, 0x0000000080000000 ); buffer[1] = m512_const1_i128( 0x0000000080000000 );
} }
return 0; return 0;
} }
@@ -520,7 +530,7 @@ int luffa_4way_close( luffa_4way_context *state, void *hashval )
rnd512_4way( state, buffer ); rnd512_4way( state, buffer );
else else
{ // empty pad block, constant data { // empty pad block, constant data
msg[0] = m512_const2_64( 0, 0x0000000080000000 ); msg[0] = m512_const1_i128( 0x0000000080000000 );
msg[1] = m512_zero; msg[1] = m512_zero;
rnd512_4way( state, msg ); rnd512_4way( state, msg );
} }
@@ -583,13 +593,13 @@ int luffa512_4way_full( luffa_4way_context *state, void *output,
{ {
// padding of partial block // padding of partial block
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m512_const2_64( 0, 0x0000000080000000 ); msg[1] = m512_const1_i128( 0x0000000080000000 );
rnd512_4way( state, msg ); rnd512_4way( state, msg );
} }
else else
{ {
// empty pad block // empty pad block
msg[0] = m512_const2_64( 0, 0x0000000080000000 ); msg[0] = m512_const1_i128( 0x0000000080000000 );
msg[1] = m512_zero; msg[1] = m512_zero;
rnd512_4way( state, msg ); rnd512_4way( state, msg );
} }
@@ -631,13 +641,13 @@ int luffa_4way_update_close( luffa_4way_context *state,
{ {
// padding of partial block // padding of partial block
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m512_const2_64( 0, 0x0000000080000000 ); msg[1] = m512_const1_i128( 0x0000000080000000 );
rnd512_4way( state, msg ); rnd512_4way( state, msg );
} }
else else
{ {
// empty pad block // empty pad block
msg[0] = m512_const2_64( 0, 0x0000000080000000 ); msg[0] = m512_const1_i128( 0x0000000080000000 );
msg[1] = m512_zero; msg[1] = m512_zero;
rnd512_4way( state, msg ); rnd512_4way( state, msg );
} }
@@ -832,7 +842,7 @@ void rnd512_2way( luffa_2way_context *state, __m256i *msg )
__m256i msg0, msg1; __m256i msg0, msg1;
__m256i tmp[2]; __m256i tmp[2];
__m256i x[8]; __m256i x[8];
const __m256i MASK = m256_const2_64( 0, 0x00000000ffffffff ); const __m256i MASK = m256_const1_i128( 0x00000000ffffffff );
t0 = chainv[0]; t0 = chainv[0];
t1 = chainv[1]; t1 = chainv[1];
@@ -1088,7 +1098,7 @@ int luffa_2way_update( luffa_2way_context *state, const void *data,
{ {
// remaining data bytes // remaining data bytes
buffer[0] = _mm256_shuffle_epi8( vdata[0], shuff_bswap32 ); buffer[0] = _mm256_shuffle_epi8( vdata[0], shuff_bswap32 );
buffer[1] = m256_const2_64( 0, 0x0000000080000000 ); buffer[1] = m256_const1_i128( 0x0000000080000000 );
} }
return 0; return 0;
} }
@@ -1104,7 +1114,7 @@ int luffa_2way_close( luffa_2way_context *state, void *hashval )
rnd512_2way( state, buffer ); rnd512_2way( state, buffer );
else else
{ // empty pad block, constant data { // empty pad block, constant data
msg[0] = m256_const2_64( 0, 0x0000000080000000 ); msg[0] = m256_const1_i128( 0x0000000080000000 );
msg[1] = m256_zero; msg[1] = m256_zero;
rnd512_2way( state, msg ); rnd512_2way( state, msg );
} }
@@ -1159,13 +1169,13 @@ int luffa512_2way_full( luffa_2way_context *state, void *output,
{ {
// padding of partial block // padding of partial block
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m256_const2_64( 0, 0x0000000080000000 ); msg[1] = m256_const1_i128( 0x0000000080000000 );
rnd512_2way( state, msg ); rnd512_2way( state, msg );
} }
else else
{ {
// empty pad block // empty pad block
msg[0] = m256_const2_64( 0, 0x0000000080000000 ); msg[0] = m256_const1_i128( 0x0000000080000000 );
msg[1] = m256_zero; msg[1] = m256_zero;
rnd512_2way( state, msg ); rnd512_2way( state, msg );
} }
@@ -1206,13 +1216,13 @@ int luffa_2way_update_close( luffa_2way_context *state,
{ {
// padding of partial block // padding of partial block
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 ); msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m256_const2_64( 0, 0x0000000080000000 ); msg[1] = m256_const1_i128( 0x0000000080000000 );
rnd512_2way( state, msg ); rnd512_2way( state, msg );
} }
else else
{ {
// empty pad block // empty pad block
msg[0] = m256_const2_64( 0, 0x0000000080000000 ); msg[0] = m256_const1_i128( 0x0000000080000000 );
msg[1] = m256_zero; msg[1] = m256_zero;
rnd512_2way( state, msg ); rnd512_2way( state, msg );
} }

View File

@@ -23,7 +23,7 @@
#include "simd-utils.h" #include "simd-utils.h"
#include "luffa_for_sse2.h" #include "luffa_for_sse2.h"
#define MULT2(a0,a1) do \ #define MULT2( a0, a1 ) do \
{ \ { \
__m128i b = _mm_xor_si128( a0, _mm_shuffle_epi32( _mm_and_si128(a1,MASK), 16 ) ); \ __m128i b = _mm_xor_si128( a0, _mm_shuffle_epi32( _mm_and_si128(a1,MASK), 16 ) ); \
a0 = _mm_or_si128( _mm_srli_si128(b,4), _mm_slli_si128(a1,12) ); \ a0 = _mm_or_si128( _mm_srli_si128(b,4), _mm_slli_si128(a1,12) ); \
@@ -345,11 +345,11 @@ HashReturn update_and_final_luffa( hashState_luffa *state, BitSequence* output,
// 16 byte partial block exists for 80 byte len // 16 byte partial block exists for 80 byte len
if ( state->rembytes ) if ( state->rembytes )
// padding of partial block // padding of partial block
rnd512( state, m128_const_64( 0, 0x80000000 ), rnd512( state, m128_const_i128( 0x80000000 ),
mm128_bswap_32( cast_m128i( data ) ) ); mm128_bswap_32( cast_m128i( data ) ) );
else else
// empty pad block // empty pad block
rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) ); rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) );
finalization512( state, (uint32*) output ); finalization512( state, (uint32*) output );
if ( state->hashbitlen > 512 ) if ( state->hashbitlen > 512 )
@@ -394,11 +394,11 @@ int luffa_full( hashState_luffa *state, BitSequence* output, int hashbitlen,
// 16 byte partial block exists for 80 byte len // 16 byte partial block exists for 80 byte len
if ( state->rembytes ) if ( state->rembytes )
// padding of partial block // padding of partial block
rnd512( state, m128_const_64( 0, 0x80000000 ), rnd512( state, m128_const_i128( 0x80000000 ),
mm128_bswap_32( cast_m128i( data ) ) ); mm128_bswap_32( cast_m128i( data ) ) );
else else
// empty pad block // empty pad block
rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) ); rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) );
finalization512( state, (uint32*) output ); finalization512( state, (uint32*) output );
if ( state->hashbitlen > 512 ) if ( state->hashbitlen > 512 )
@@ -606,7 +606,6 @@ static void finalization512( hashState_luffa *state, uint32 *b )
casti_m256i( b, 0 ) = _mm256_shuffle_epi8( casti_m256i( b, 0 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 0 ), shuff_bswap32 ); casti_m256i( hash, 0 ), shuff_bswap32 );
// casti_m256i( b, 0 ) = mm256_bswap_32( casti_m256i( hash, 0 ) );
rnd512( state, zero, zero ); rnd512( state, zero, zero );
@@ -621,7 +620,6 @@ static void finalization512( hashState_luffa *state, uint32 *b )
casti_m256i( b, 1 ) = _mm256_shuffle_epi8( casti_m256i( b, 1 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 0 ), shuff_bswap32 ); casti_m256i( hash, 0 ), shuff_bswap32 );
// casti_m256i( b, 1 ) = mm256_bswap_32( casti_m256i( hash, 0 ) );
} }
#else #else

View File

@@ -77,6 +77,7 @@ static const sph_u32 H256[8] = {
#else // no SHA #else // no SHA
/*
static const sph_u32 K[64] = { static const sph_u32 K[64] = {
SPH_C32(0x428A2F98), SPH_C32(0x71374491), SPH_C32(0x428A2F98), SPH_C32(0x71374491),
SPH_C32(0xB5C0FBCF), SPH_C32(0xE9B5DBA5), SPH_C32(0xB5C0FBCF), SPH_C32(0xE9B5DBA5),
@@ -111,6 +112,7 @@ static const sph_u32 K[64] = {
SPH_C32(0x90BEFFFA), SPH_C32(0xA4506CEB), SPH_C32(0x90BEFFFA), SPH_C32(0xA4506CEB),
SPH_C32(0xBEF9A3F7), SPH_C32(0xC67178F2) SPH_C32(0xBEF9A3F7), SPH_C32(0xC67178F2)
}; };
*/
#if SPH_SMALL_FOOTPRINT_SHA2 #if SPH_SMALL_FOOTPRINT_SHA2

View File

@@ -23,14 +23,23 @@ static const uint32_t IV512[] =
_mm256_blend_epi32( mm256_ror128_32( a ), \ _mm256_blend_epi32( mm256_ror128_32( a ), \
mm256_ror128_32( b ), 0x88 ) mm256_ror128_32( b ), 0x88 )
#if defined(__VAES__)
#define mm256_aesenc_2x128( x, k ) \
_mm256_aesenc_epi128( x, _mm256_castsi128_si256( k ) )
#else
#define mm256_aesenc_2x128( x, k ) \
mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \
_mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) )
#endif
static void static void
c512_2way( shavite512_2way_context *ctx, const void *msg ) c512_2way( shavite512_2way_context *ctx, const void *msg )
{ {
#if defined(__VAES__)
const __m256i zero = _mm256_setzero_si256();
#else
const __m128i zero = _mm_setzero_si128(); const __m128i zero = _mm_setzero_si128();
#endif
__m256i p0, p1, p2, p3, x; __m256i p0, p1, p2, p3, x;
__m256i k00, k01, k02, k03, k10, k11, k12, k13; __m256i k00, k01, k02, k03, k10, k11, k12, k13;
__m256i *m = (__m256i*)msg; __m256i *m = (__m256i*)msg;
@@ -308,7 +317,7 @@ void shavite512_2way_close( shavite512_2way_context *ctx, void *dst )
uint32_t vp = ctx->ptr>>5; uint32_t vp = ctx->ptr>>5;
// Terminating byte then zero pad // Terminating byte then zero pad
casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 ); casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 );
// Zero pad full vectors up to count // Zero pad full vectors up to count
for ( ; vp < 6; vp++ ) for ( ; vp < 6; vp++ )
@@ -388,13 +397,13 @@ void shavite512_2way_update_close( shavite512_2way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan. if ( vp == 0 ) // empty buf, xevan.
{ {
casti_m256i( buf, 0 ) = m256_const2_64( 0, 0x0000000000000080 ); casti_m256i( buf, 0 ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + 1, 5 ); memset_zero_256( (__m256i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
} }
else // half full buf, everyone else. else // half full buf, everyone else.
{ {
casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 ); casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + vp, 6 - vp ); memset_zero_256( (__m256i*)buf + vp, 6 - vp );
} }
@@ -478,13 +487,13 @@ void shavite512_2way_full( shavite512_2way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan. if ( vp == 0 ) // empty buf, xevan.
{ {
casti_m256i( buf, 0 ) = m256_const2_64( 0, 0x0000000000000080 ); casti_m256i( buf, 0 ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + 1, 5 ); memset_zero_256( (__m256i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
} }
else // half full buf, everyone else. else // half full buf, everyone else.
{ {
casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 ); casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + vp, 6 - vp ); memset_zero_256( (__m256i*)buf + vp, 6 - vp );
} }

View File

@@ -292,7 +292,7 @@ void shavite512_4way_close( shavite512_4way_context *ctx, void *dst )
uint32_t vp = ctx->ptr>>6; uint32_t vp = ctx->ptr>>6;
// Terminating byte then zero pad // Terminating byte then zero pad
casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 ); casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 );
// Zero pad full vectors up to count // Zero pad full vectors up to count
for ( ; vp < 6; vp++ ) for ( ; vp < 6; vp++ )
@@ -372,13 +372,13 @@ void shavite512_4way_update_close( shavite512_4way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan. if ( vp == 0 ) // empty buf, xevan.
{ {
casti_m512i( buf, 0 ) = m512_const2_64( 0, 0x0000000000000080 ); casti_m512i( buf, 0 ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + 1, 5 ); memset_zero_512( (__m512i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
} }
else // half full buf, everyone else. else // half full buf, everyone else.
{ {
casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 ); casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + vp, 6 - vp ); memset_zero_512( (__m512i*)buf + vp, 6 - vp );
} }
@@ -463,13 +463,13 @@ void shavite512_4way_full( shavite512_4way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan. if ( vp == 0 ) // empty buf, xevan.
{ {
casti_m512i( buf, 0 ) = m512_const2_64( 0, 0x0000000000000080 ); casti_m512i( buf, 0 ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + 1, 5 ); memset_zero_512( (__m512i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0; ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
} }
else // half full buf, everyone else. else // half full buf, everyone else.
{ {
casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 ); casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + vp, 6 - vp ); memset_zero_512( (__m512i*)buf + vp, 6 - vp );
} }

View File

@@ -1,47 +0,0 @@
/*
* Copyright (c) 2000 Jeroen Ruigrok van der Werven <asmodai@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/include/stdbool.h,v 1.6 2002/08/16 07:33:14 alfred Exp $
*/
#ifndef _STDBOOL_H_
#define _STDBOOL_H_
#define __bool_true_false_are_defined 1
#ifndef __cplusplus
#define false 0
#define true 1
//#define bool _Bool
//#if __STDC_VERSION__ < 199901L && __GNUC__ < 3
//typedef int _Bool;
//#endif
typedef int bool;
#endif /* !__cplusplus */
#endif /* !_STDBOOL_H_ */

File diff suppressed because it is too large Load Diff

20
configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# Guess values for system-dependent variables and create Makefiles. # Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.15.6. # Generated by GNU Autoconf 2.69 for cpuminer-opt 3.15.7.
# #
# #
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package. # Identity of this package.
PACKAGE_NAME='cpuminer-opt' PACKAGE_NAME='cpuminer-opt'
PACKAGE_TARNAME='cpuminer-opt' PACKAGE_TARNAME='cpuminer-opt'
PACKAGE_VERSION='3.15.6' PACKAGE_VERSION='3.15.7'
PACKAGE_STRING='cpuminer-opt 3.15.6' PACKAGE_STRING='cpuminer-opt 3.15.7'
PACKAGE_BUGREPORT='' PACKAGE_BUGREPORT=''
PACKAGE_URL='' PACKAGE_URL=''
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing. # Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh. # This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF cat <<_ACEOF
\`configure' configures cpuminer-opt 3.15.6 to adapt to many kinds of systems. \`configure' configures cpuminer-opt 3.15.7 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]... Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1404,7 +1404,7 @@ fi
if test -n "$ac_init_help"; then if test -n "$ac_init_help"; then
case $ac_init_help in case $ac_init_help in
short | recursive ) echo "Configuration of cpuminer-opt 3.15.6:";; short | recursive ) echo "Configuration of cpuminer-opt 3.15.7:";;
esac esac
cat <<\_ACEOF cat <<\_ACEOF
@@ -1509,7 +1509,7 @@ fi
test -n "$ac_init_help" && exit $ac_status test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then if $ac_init_version; then
cat <<\_ACEOF cat <<\_ACEOF
cpuminer-opt configure 3.15.6 cpuminer-opt configure 3.15.7
generated by GNU Autoconf 2.69 generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc. Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake. running configure, to aid debugging if configure makes a mistake.
It was created by cpuminer-opt $as_me 3.15.6, which was It was created by cpuminer-opt $as_me 3.15.7, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@ $ $0 $@
@@ -2993,7 +2993,7 @@ fi
# Define the identity of the package. # Define the identity of the package.
PACKAGE='cpuminer-opt' PACKAGE='cpuminer-opt'
VERSION='3.15.6' VERSION='3.15.7'
cat >>confdefs.h <<_ACEOF cat >>confdefs.h <<_ACEOF
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their # report actual input values of CONFIG_FILES etc. instead of their
# values after options handling. # values after options handling.
ac_log=" ac_log="
This file was extended by cpuminer-opt $as_me 3.15.6, which was This file was extended by cpuminer-opt $as_me 3.15.7, which was
generated by GNU Autoconf 2.69. Invocation command line was generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES CONFIG_FILES = $CONFIG_FILES
@@ -6756,7 +6756,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\ ac_cs_version="\\
cpuminer-opt config.status 3.15.6 cpuminer-opt config.status 3.15.7
configured by $0, generated by GNU Autoconf 2.69, configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\" with options \\"\$ac_cs_config\\"

View File

@@ -1,4 +1,4 @@
AC_INIT([cpuminer-opt], [3.15.6]) AC_INIT([cpuminer-opt], [3.15.7])
AC_PREREQ([2.59c]) AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM AC_CANONICAL_SYSTEM

View File

@@ -204,6 +204,7 @@ static double lowest_share = 9e99; // lowest accepted share diff
static double last_targetdiff = 0.; static double last_targetdiff = 0.;
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32)) #if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
static uint32_t hi_temp = 0; static uint32_t hi_temp = 0;
static uint32_t prev_temp = 0;
#endif #endif
@@ -998,32 +999,67 @@ static struct timeval last_submit_time = {0};
static inline int stats_ptr_incr( int p ) static inline int stats_ptr_incr( int p )
{ {
return ++p < s_stats_size ? p : 0; return ++p % s_stats_size;
} }
void report_summary_log( bool force ) void report_summary_log( bool force )
{ {
struct timeval now, et, uptime, start_time; struct timeval now, et, uptime, start_time;
pthread_mutex_lock( &stats_lock );
gettimeofday( &now, NULL ); gettimeofday( &now, NULL );
timeval_subtract( &et, &now, &five_min_start ); timeval_subtract( &et, &now, &five_min_start );
if ( !( force && ( submit_sum || ( et.tv_sec > 5 ) ) ) #if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
&& ( et.tv_sec < 300 ) )
// Display CPU temperature and clock rate.
int curr_temp = cpu_temp(0);
static struct timeval cpu_temp_time = {0};
struct timeval diff;
if ( !opt_quiet || ( curr_temp >= 80 ) )
{ {
pthread_mutex_unlock( &stats_lock ); int wait_time = curr_temp >= 90 ? 5 : curr_temp >= 80 ? 30 :
return; curr_temp >= 70 ? 60 : 120;
timeval_subtract( &diff, &now, &cpu_temp_time );
if ( ( diff.tv_sec > wait_time )
|| ( ( curr_temp > prev_temp ) && ( curr_temp >= 75 ) ) )
{
char tempstr[32];
float lo_freq = 0., hi_freq = 0.;
memcpy( &cpu_temp_time, &now, sizeof(cpu_temp_time) );
linux_cpu_hilo_freq( &lo_freq, &hi_freq );
if ( use_colors && ( curr_temp >= 70 ) )
{
if ( curr_temp >= 80 )
sprintf( tempstr, "%s%d C%s", CL_RED, curr_temp, CL_WHT );
else
sprintf( tempstr, "%s%d C%s", CL_YLW, curr_temp, CL_WHT );
}
else
sprintf( tempstr, "%d C", curr_temp );
applog( LOG_NOTICE,"CPU temp: curr %s max %d, Freq: %.3f/%.3f GHz",
tempstr, hi_temp, lo_freq / 1e6, hi_freq / 1e6 );
if ( curr_temp > hi_temp ) hi_temp = curr_temp;
prev_temp = curr_temp;
}
} }
#endif
if ( !( force && ( submit_sum || ( et.tv_sec > 5 ) ) )
&& ( et.tv_sec < 300 ) )
return;
// collect and reset periodic counters // collect and reset periodic counters
pthread_mutex_lock( &stats_lock );
uint64_t submits = submit_sum; submit_sum = 0; uint64_t submits = submit_sum; submit_sum = 0;
uint64_t accepts = accept_sum; accept_sum = 0; uint64_t accepts = accept_sum; accept_sum = 0;
uint64_t rejects = reject_sum; reject_sum = 0; uint64_t rejects = reject_sum; reject_sum = 0;
uint64_t stales = stale_sum; stale_sum = 0; uint64_t stales = stale_sum; stale_sum = 0;
uint64_t solved = solved_sum; solved_sum = 0; uint64_t solved = solved_sum; solved_sum = 0;
memcpy( &start_time, &five_min_start, sizeof start_time ); memcpy( &start_time, &five_min_start, sizeof start_time );
memcpy( &five_min_start, &now, sizeof now ); memcpy( &five_min_start, &now, sizeof now );
@@ -1080,27 +1116,38 @@ void report_summary_log( bool force )
applog2( LOG_INFO,"Submitted %6d %6d", applog2( LOG_INFO,"Submitted %6d %6d",
submits, submitted_share_count ); submits, submitted_share_count );
applog2( LOG_INFO,"Accepted %6d %6d", applog2( LOG_INFO,"Accepted %6d %6d %5.1f%%",
accepts, accepted_share_count ); accepts, accepted_share_count,
100. * accepted_share_count / submitted_share_count );
if ( stale_share_count ) if ( stale_share_count )
applog2( LOG_INFO,"Stale %6d %6d", applog2( LOG_INFO,"Stale %6d %6d %5.1f%%",
stales, stale_share_count ); stales, stale_share_count,
100. * stale_share_count / submitted_share_count );
if ( rejected_share_count ) if ( rejected_share_count )
applog2( LOG_INFO,"Rejected %6d %6d", applog2( LOG_INFO,"Rejected %6d %6d %5.1f%%",
rejects, rejected_share_count ); rejects, rejected_share_count,
100. * rejected_share_count / submitted_share_count );
if ( solved_block_count ) if ( solved_block_count )
applog2( LOG_INFO,"Blocks Solved %6d %6d", applog2( LOG_INFO,"Blocks Solved %6d %6d",
solved, solved_block_count ); solved, solved_block_count );
applog2( LOG_INFO, "Hi/Lo Share Diff %.5g / %.5g", applog2( LOG_INFO, "Hi/Lo Share Diff %.5g / %.5g",
highest_share, lowest_share ); highest_share, lowest_share );
}
bool lowdiff_debug = false; static int64_t no_acks = 0;
if ( no_acks )
{
no_acks = submitted_share_count
- ( accepted_share_count + stale_share_count + rejected_share_count );
if ( no_acks ) // 2 consecutive cycles non zero
applog(LOG_WARNING,"Share count mismatch: %d, stats may be incorrect",
no_acks );
}
}
static int share_result( int result, struct work *work, static int share_result( int result, struct work *work,
const char *reason ) const char *reason )
{ {
double share_time = 0.; //, share_ratio = 0.; double share_time = 0.;
double hashrate = 0.; double hashrate = 0.;
int latency = 0; int latency = 0;
struct share_stats_t my_stats = {0}; struct share_stats_t my_stats = {0};
@@ -1141,11 +1188,6 @@ static int share_result( int result, struct work *work,
sizeof last_submit_time ); sizeof last_submit_time );
} }
/*
share_ratio = my_stats.net_diff == 0. ? 0. : my_stats.share_diff /
my_stats.net_diff;
*/
// check result // check result
if ( likely( result ) ) if ( likely( result ) )
{ {
@@ -2324,6 +2366,8 @@ static void *miner_thread( void *userdata )
pthread_mutex_unlock( &stats_lock ); pthread_mutex_unlock( &stats_lock );
} }
// This code is deprecated, scanhash should never return true.
// This remains as a backup in case some old implementations still exist.
// If unsubmiited nonce(s) found, submit now. // If unsubmiited nonce(s) found, submit now.
if ( unlikely( nonce_found && !opt_benchmark ) ) if ( unlikely( nonce_found && !opt_benchmark ) )
{ {
@@ -2350,48 +2394,6 @@ static void *miner_thread( void *userdata )
} }
} }
#if !(defined(__WINDOWS__) || defined(_WIN64) || defined(_WIN32))
// Display CPU temperature and clock rate.
int curr_temp, prev_hi_temp;
static struct timeval cpu_temp_time = {0};
pthread_mutex_lock( &stats_lock );
prev_hi_temp = hi_temp;
curr_temp = cpu_temp(0);
if ( curr_temp > hi_temp ) hi_temp = curr_temp;
pthread_mutex_unlock( &stats_lock );
if ( !opt_quiet || ( curr_temp >= 80 ) )
{
int wait_time = curr_temp >= 80 ? 20 : curr_temp >= 70 ? 60 : 120;
timeval_subtract( &diff, &tv_end, &cpu_temp_time );
if ( ( diff.tv_sec > wait_time ) || ( curr_temp > prev_hi_temp ) )
{
char tempstr[32];
float lo_freq = 0., hi_freq = 0.;
memcpy( &cpu_temp_time, &tv_end, sizeof(cpu_temp_time) );
linux_cpu_hilo_freq( &lo_freq, &hi_freq );
if ( use_colors && ( curr_temp >= 70 ) )
{
if ( curr_temp >= 80 )
sprintf( tempstr, "%s%d C%s", CL_RED, curr_temp, CL_WHT );
else
sprintf( tempstr, "%s%d C%s", CL_YLW, curr_temp, CL_WHT );
}
else
sprintf( tempstr, "%d C", curr_temp );
applog( LOG_NOTICE,"CPU temp: curr %s (max %d), Freq: %.3f/%.3f GHz",
tempstr, prev_hi_temp, lo_freq / 1e6, hi_freq / 1e6 );
}
}
#endif
// display hashrate // display hashrate
if ( unlikely( opt_hash_meter ) ) if ( unlikely( opt_hash_meter ) )
{ {

View File

@@ -457,9 +457,6 @@ bool stratum_subscribe(struct stratum_ctx *sctx);
bool stratum_authorize(struct stratum_ctx *sctx, const char *user, const char *pass); bool stratum_authorize(struct stratum_ctx *sctx, const char *user, const char *pass);
bool stratum_handle_method(struct stratum_ctx *sctx, const char *s); bool stratum_handle_method(struct stratum_ctx *sctx, const char *s);
extern bool lowdiff_debug;
extern bool aes_ni_supported; extern bool aes_ni_supported;
extern char *rpc_user; extern char *rpc_user;
@@ -549,7 +546,7 @@ enum algos {
ALGO_LYRA2REV3, ALGO_LYRA2REV3,
ALGO_LYRA2Z, ALGO_LYRA2Z,
ALGO_LYRA2Z330, ALGO_LYRA2Z330,
ALGO_M7M, ALGO_M7M,
ALGO_MINOTAUR, ALGO_MINOTAUR,
ALGO_MYR_GR, ALGO_MYR_GR,
ALGO_NEOSCRYPT, ALGO_NEOSCRYPT,

View File

@@ -131,7 +131,7 @@
// If a sequence of constants is to be used it can be more efficient to // If a sequence of constants is to be used it can be more efficient to
// use arithmetic with already existing constants to generate new ones. // use arithmetic with already existing constants to generate new ones.
// //
// ex: const __m512i one = _mm512_const1_64( 1 ); // ex: const __m512i one = m512_one_64;
// const __m512i two = _mm512_add_epi64( one, one ); // const __m512i two = _mm512_add_epi64( one, one );
// //
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////

View File

@@ -27,13 +27,15 @@
// All of the utilities here assume all data is in registers except // All of the utilities here assume all data is in registers except
// in rare cases where arguments are pointers. // in rare cases where arguments are pointers.
// //
// Some constants are generated using a memory overlay on the stack.
//
// Intrinsics automatically promote from REX to VEX when AVX is available // Intrinsics automatically promote from REX to VEX when AVX is available
// but ASM needs to be done manually. // but ASM needs to be done manually.
// //
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// Efficient and convenient moving bwtween GP & low bits of XMM. // Efficient and convenient moving between GP & low bits of XMM.
// Use VEX when available to give access to xmm8-15 and zero extend for // Use VEX when available to give access to xmm8-15 and zero extend for
// larger vectors. // larger vectors.
@@ -81,6 +83,23 @@ static inline uint32_t mm128_mov128_32( const __m128i a )
return n; return n;
} }
// Equivalent of set1, broadcast integer to all elements.
#define m128_const_i128( i ) mm128_mov64_128( i )
#define m128_const1_64( i ) _mm_shuffle_epi32( mm128_mov64_128( i ), 0x44 )
#define m128_const1_32( i ) _mm_shuffle_epi32( mm128_mov32_128( i ), 0x00 )
#if defined(__SSE4_1__)
// Assign 64 bit integers to respective elements: {hi, lo}
#define m128_const_64( hi, lo ) \
_mm_insert_epi64( mm128_mov64_128( lo ), hi, 1 )
#else // No insert in SSE2
#define m128_const_64 _mm_set_epi64x
#endif
// Pseudo constants // Pseudo constants
#define m128_zero _mm_setzero_si128() #define m128_zero _mm_setzero_si128()
@@ -107,27 +126,53 @@ static inline __m128i mm128_neg1_fn()
} }
#define m128_neg1 mm128_neg1_fn() #define m128_neg1 mm128_neg1_fn()
// const functions work best when arguments are immediate constants or
// are known to be in registers. If data needs to loaded from memory or cache
// use set.
// Equivalent of set1, broadcast 64 bit integer to all elements.
#define m128_const1_64( i ) _mm_shuffle_epi32( mm128_mov64_128( i ), 0x44 )
#define m128_const1_32( i ) _mm_shuffle_epi32( mm128_mov32_128( i ), 0x00 )
#if defined(__SSE4_1__) #if defined(__SSE4_1__)
// Assign 64 bit integers to respective elements: {hi, lo} /////////////////////////////
#define m128_const_64( hi, lo ) \ //
_mm_insert_epi64( mm128_mov64_128( lo ), hi, 1 ) // _mm_insert_ps( _mm128i v1, __m128i v2, imm8 c )
//
// Fast and powerful but very limited in its application.
// It requires SSE4.1 but only works with 128 bit vectors with 32 bit
// elements. There is no equivalent instruction for 256 bit or 512 bit vectors.
// There's no integer version. There's no 64 bit, 16 bit or byte element
// sizing. It's unique.
//
// It can:
// - zero 32 bit elements of a 128 bit vector.
// - extract any 32 bit element from one 128 bit vector and insert the
// data to any 32 bit element of another 128 bit vector, or the same vector.
// - do both simultaneoulsly.
//
// It can be used as a more efficient replacement for _mm_insert_epi32
// or _mm_extract_epi32.
//
// Control byte definition:
// c[3:0] zero mask
// c[5:4] destination element selector
// c[7:6] source element selector
#else // No insert in SSE2 // Convert type and abbreviate name: e"x"tract "i"nsert "m"ask
#define mm128_xim_32( v1, v2, c ) \
_mm_castps_si128( _mm_insert_ps( _mm_castsi128_ps( v1 ), \
_mm_castsi128_ps( v2 ), c ) )
#define m128_const_64 _mm_set_epi64x // Some examples of simple operations:
#endif // Insert 32 bit integer into v at element c and return modified v.
static inline __m128i mm128_insert_32( const __m128i v, const uint32_t i,
const int c )
{ return mm128_xim_32( v, mm128_mov32_128( i ), c<<4 ); }
// Extract 32 bit element c from v and return as integer.
static inline uint32_t mm128_extract_32( const __m128i v, const int c )
{ return mm128_mov128_32( mm128_xim_32( v, v, c<<6 ) ); }
// Clear (zero) 32 bit elements based on bits set in 4 bit mask.
static inline __m128i mm128_mask_32( const __m128i v, const int m )
{ return mm128_xim_32( v, v, m ); }
#endif // SSE4_1
// //
// Basic operations without equivalent SIMD intrinsic // Basic operations without equivalent SIMD intrinsic
@@ -140,11 +185,6 @@ static inline __m128i mm128_neg1_fn()
#define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v ) #define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v )
#define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v ) #define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v )
// Clear (zero) 32 bit elements based on bits set in 4 bit mask.
// Fast, avoids using vector mask, but only available for 128 bit vectors.
#define mm128_mask_32( a, mask ) \
_mm_castps_si128( _mm_insert_ps( _mm_castsi128_ps( a ), \
_mm_castsi128_ps( a ), mask ) )
// Add 4 values, fewer dependencies than sequential addition. // Add 4 values, fewer dependencies than sequential addition.
#define mm128_add4_64( a, b, c, d ) \ #define mm128_add4_64( a, b, c, d ) \
@@ -162,27 +202,6 @@ static inline __m128i mm128_neg1_fn()
#define mm128_xor4( a, b, c, d ) \ #define mm128_xor4( a, b, c, d ) \
_mm_xor_si128( _mm_xor_si128( a, b ), _mm_xor_si128( c, d ) ) _mm_xor_si128( _mm_xor_si128( a, b ), _mm_xor_si128( c, d ) )
// Horizontal vector testing
#if defined(__SSE4_1__)
#define mm128_allbits0( a ) _mm_testz_si128( a, a )
#define mm128_allbits1( a ) _mm_testc_si128( a, m128_neg1 )
// probably broken, avx2 is
//#define mm128_allbitsne( a ) _mm_testnzc_si128( a, m128_neg1 )
#define mm128_anybits0( a ) mm128_allbits1( a )
#define mm128_anybits1( a ) mm128_allbits0( a )
#else // SSE2
// Bit-wise test of entire vector, useful to test results of cmp.
#define mm128_anybits0( a ) (uint128_t)(a)
#define mm128_anybits1( a ) (((uint128_t)(a))+1)
#define mm128_allbits0( a ) ( !mm128_anybits1(a) )
#define mm128_allbits1( a ) ( !mm128_anybits0(a) )
#endif // SSE4.1 else SSE2
// //
// Vector pointer cast // Vector pointer cast
@@ -204,11 +223,6 @@ static inline __m128i mm128_neg1_fn()
#define casto_m128i(p,o) (((__m128i*)(p))+(o)) #define casto_m128i(p,o) (((__m128i*)(p))+(o))
// Memory functions
// Mostly for convenience, avoids calculating bytes.
// Assumes data is alinged and integral.
// n = number of __m128i, bytes/16
// Memory functions // Memory functions
// Mostly for convenience, avoids calculating bytes. // Mostly for convenience, avoids calculating bytes.
// Assumes data is alinged and integral. // Assumes data is alinged and integral.
@@ -256,14 +270,14 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_ror_32 _mm_ror_epi32 #define mm128_ror_32 _mm_ror_epi32
#define mm128_rol_32 _mm_rol_epi32 #define mm128_rol_32 _mm_rol_epi32
#else #else // SSE2
#define mm128_ror_64 mm128_ror_var_64 #define mm128_ror_64 mm128_ror_var_64
#define mm128_rol_64 mm128_rol_var_64 #define mm128_rol_64 mm128_rol_var_64
#define mm128_ror_32 mm128_ror_var_32 #define mm128_ror_32 mm128_ror_var_32
#define mm128_rol_32 mm128_rol_var_32 #define mm128_rol_32 mm128_rol_var_32
#endif // AVX512 else #endif // AVX512 else SSE2
#define mm128_ror_16( v, c ) \ #define mm128_ror_16( v, c ) \
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) ) _mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )
@@ -280,58 +294,19 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
//#define mm128_swap_64( v ) _mm_alignr_epi8( v, v, 8 ) //#define mm128_swap_64( v ) _mm_alignr_epi8( v, v, 8 )
//#define mm128_ror_1x32( v ) _mm_alignr_epi8( v, v, 4 ) //#define mm128_ror_1x32( v ) _mm_alignr_epi8( v, v, 4 )
//#define mm128_rol_1x32( v ) _mm_alignr_epi8( v, v, 12 ) //#define mm128_rol_1x32( v ) _mm_alignr_epi8( v, v, 12 )
#define mm128_ror_1x16( v ) _mm_alignr_epi8( v, v, 2 )
#define mm128_rol_1x16( v ) _mm_alignr_epi8( v, v, 14 )
#define mm128_ror_1x8( v ) _mm_alignr_epi8( v, v, 1 )
#define mm128_rol_1x8( v ) _mm_alignr_epi8( v, v, 15 )
// Rotate by c bytes // Swap 32 bit elements in 64 bit lanes
#define mm128_ror_x8( v, c ) _mm_alignr_epi8( v, c ) #define mm128_swap64_32( v ) _mm_shuffle_epi32( v, 0xb1 )
#define mm128_rol_x8( v, c ) _mm_alignr_epi8( v, 16-(c) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm128_invert_32( v ) _mm_shuffle_epi32( v, 0x1b )
#if defined(__SSSE3__) #if defined(__SSSE3__)
#define mm128_invert_16( v ) \ // Rotate right by c bytes
_mm_shuffle_epi8( v, mm128_const_64( 0x0100030205040706, \ static inline __m128i mm128_ror_x8( const __m128i v, const int c )
0x09080b0a0d0c0f0e ) { return _mm_alignr_epi8( v, v, c ); }
#define mm128_invert_8( v ) \
_mm_shuffle_epi8( v, mm128_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f )
#endif // SSSE3
//
// Rotate elements within lanes.
#define mm128_swap64_32( v ) _mm_shuffle_epi32( v, 0xb1 )
#define mm128_rol64_8( v, c ) \
_mm_or_si128( _mm_slli_epi64( v, ( ( (c)<<3 ) ), \
_mm_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
#define mm128_ror64_8( v, c ) \
_mm_or_si128( _mm_srli_epi64( v, ( ( (c)<<3 ) ), \
_mm_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
#define mm128_rol32_8( v, c ) \
_mm_or_si128( _mm_slli_epi32( v, ( ( (c)<<3 ) ), \
_mm_srli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) )
#define mm128_ror32_8( v, c ) \
_mm_or_si128( _mm_srli_epi32( v, ( ( (c)<<3 ) ), \
_mm_slli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) )
// //
// Endian byte swap. // Endian byte swap.
#if defined(__SSSE3__)
#define mm128_bswap_64( v ) \ #define mm128_bswap_64( v ) \
_mm_shuffle_epi8( v, m128_const_64( 0x08090a0b0c0d0e0f, \ _mm_shuffle_epi8( v, m128_const_64( 0x08090a0b0c0d0e0f, \
0x0001020304050607 ) ) 0x0001020304050607 ) )
@@ -374,7 +349,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#else // SSE2 #else // SSE2
// Use inline function instead of macro due to multiple statements.
static inline __m128i mm128_bswap_64( __m128i v ) static inline __m128i mm128_bswap_64( __m128i v )
{ {
v = _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) ); v = _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) );

View File

@@ -15,33 +15,35 @@
// is available. // is available.
// Move integer to low element of vector, other elements are set to zero. // Move integer to low element of vector, other elements are set to zero.
#define mm256_mov64_256( i ) _mm256_castsi128_si256( mm128_mov64_128( i ) )
#define mm256_mov32_256( i ) _mm256_castsi128_si256( mm128_mov32_128( i ) )
#define mm256_mov64_256( n ) _mm256_castsi128_si256( mm128_mov64_128( n ) ) // Mo0ve low element of vector to integer.
#define mm256_mov32_256( n ) _mm256_castsi128_si256( mm128_mov32_128( n ) ) #define mm256_mov256_64( v ) mm128_mov128_64( _mm256_castsi256_si128( v ) )
#define mm256_mov256_32( v ) mm128_mov128_32( _mm256_castsi256_si128( v ) )
#define mm256_mov256_64( a ) mm128_mov128_64( _mm256_castsi256_si128( a ) )
#define mm256_mov256_32( a ) mm128_mov128_32( _mm256_castsi256_si128( a ) )
// concatenate two 128 bit vectors into one 256 bit vector: { hi, lo } // concatenate two 128 bit vectors into one 256 bit vector: { hi, lo }
#define mm256_concat_128( hi, lo ) \ #define mm256_concat_128( hi, lo ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 ) _mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 )
// Equavalent of set, move 64 bit integer constants to respective 64 bit // Equivalent of set, move 64 bit integer constants to respective 64 bit
// elements. // elements.
static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2, static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 ) const uint64_t i1, const uint64_t i0 )
{ {
__m128i hi, lo; union { __m256i m256i;
lo = mm128_mov64_128( i0 ); uint64_t u64[4]; } v;
hi = mm128_mov64_128( i2 ); v.u64[0] = i0; v.u64[1] = i1; v.u64[2] = i2; v.u64[3] = i3;
lo = _mm_insert_epi64( lo, i1, 1 ); return v.m256i;
hi = _mm_insert_epi64( hi, i3, 1 );
return mm256_concat_128( hi, lo );
} }
// Equivalent of set1, broadcast integer constant to all elements. // Equivalent of set1.
#define m256_const1_128( v ) _mm256_broadcastsi128_si256( v ) // 128 bit vector argument
#define m256_const1_128( v ) \
_mm256_permute4x64_epi64( _mm256_castsi128_si256( v ), 0x44 )
// 64 bit integer argument
#define m256_const1_i128( i ) m256_const1_128( mm128_mov64_128( i ) )
#define m256_const1_64( i ) _mm256_broadcastq_epi64( mm128_mov64_128( i ) ) #define m256_const1_64( i ) _mm256_broadcastq_epi64( mm128_mov64_128( i ) )
#define m256_const1_32( i ) _mm256_broadcastd_epi32( mm128_mov32_128( i ) ) #define m256_const1_32( i ) _mm256_broadcastd_epi32( mm128_mov32_128( i ) )
#define m256_const1_16( i ) _mm256_broadcastw_epi16( mm128_mov32_128( i ) ) #define m256_const1_16( i ) _mm256_broadcastw_epi16( mm128_mov32_128( i ) )
@@ -50,119 +52,29 @@ static inline __m256i m256_const_64( const uint64_t i3, const uint64_t i2,
#define m256_const2_64( i1, i0 ) \ #define m256_const2_64( i1, i0 ) \
m256_const1_128( m128_const_64( i1, i0 ) ) m256_const1_128( m128_const_64( i1, i0 ) )
#define m126_const2_32( i1, i0 ) \
m256_const1_64( ( (uint64_t)(i1) << 32 ) | ( (uint64_t)(i0) & 0xffffffff ) )
// //
// All SIMD constant macros are actually functions containing executable // All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers. // code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256() #define m256_zero _mm256_setzero_si256()
#define m256_one_256 mm256_mov64_256( 1 ) #define m256_one_256 mm256_mov64_256( 1 )
#define m256_one_128 \ #define m256_one_128 m256_const1_i128( 1 )
_mm256_permute4x64_epi64( _mm256_castsi128_si256( \ #define m256_one_64 _mm256_broadcastq_epi64( mm128_mov64_128( 1 ) )
mm128_mov64_128( 1 ) ), 0x44 ) #define m256_one_32 _mm256_broadcastd_epi32( mm128_mov64_128( 1 ) )
#define m256_one_64 _mm256_broadcastq_epi64( mm128_mov64_128( 1 ) ) #define m256_one_16 _mm256_broadcastw_epi16( mm128_mov64_128( 1 ) )
#define m256_one_32 _mm256_broadcastd_epi32( mm128_mov64_128( 1 ) ) #define m256_one_8 _mm256_broadcastb_epi8 ( mm128_mov64_128( 1 ) )
#define m256_one_16 _mm256_broadcastw_epi16( mm128_mov64_128( 1 ) )
#define m256_one_8 _mm256_broadcastb_epi8 ( mm128_mov64_128( 1 ) )
static inline __m256i mm256_neg1_fn() static inline __m256i mm256_neg1_fn()
{ {
__m256i a; __m256i v;
asm( "vpcmpeqq %0, %0, %0\n\t" : "=x"(a) ); asm( "vpcmpeqq %0, %0, %0\n\t" : "=x"(v) );
return a; return v;
} }
#define m256_neg1 mm256_neg1_fn() #define m256_neg1 mm256_neg1_fn()
// Consistent naming for similar operations.
// #define mm128_extr_lo128_256( v ) _mm256_castsi256_si128( v )
// Vector size conversion. #define mm128_extr_hi128_256( v ) _mm256_extracti128_si256( v, 1 )
//
// Allows operations on either or both halves of a 256 bit vector serially.
// Handy for parallel AES.
// Caveats when writing:
// _mm256_castsi256_si128 is free and without side effects.
// _mm256_castsi128_si256 is also free but leaves the high half
// undefined. That's ok if the hi half will be subseqnently assigned.
// If assigning both, do lo first, If assigning only 1, use
// _mm256_inserti128_si256.
//
#define mm128_extr_lo128_256( a ) _mm256_castsi256_si128( a )
#define mm128_extr_hi128_256( a ) _mm256_extracti128_si256( a, 1 )
// Extract integers from 256 bit vector, ineficient, avoid if possible..
#define mm256_extr_4x64( a3, a2, a1, a0, src ) \
do { \
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
a0 = mm128_mov128_64( _mm256_castsi256_si128( src) ); \
a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
a2 = mm128_mov128_64( hi ); \
a3 = _mm_extract_epi64( hi, 1 ); \
} while(0)
#define mm256_extr_8x32( a7, a6, a5, a4, a3, a2, a1, a0, src ) \
do { \
uint64_t t = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \
__m128i hi = _mm256_extracti128_si256( src, 1 ); \
a0 = mm256_mov256_32( src ); \
a1 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 1 ); \
a2 = (uint32_t)( t ); \
a3 = (uint32_t)( t<<32 ); \
t = _mm_extract_epi64( hi, 1 ); \
a4 = mm128_mov128_32( hi ); \
a5 = _mm_extract_epi32( hi, 1 ); \
a6 = (uint32_t)( t ); \
a7 = (uint32_t)( t<<32 ); \
} while(0)
// Bytewise test of all 256 bits
#define mm256_all0_8( a ) \
( _mm256_movemask_epi8( a ) == 0 )
#define mm256_all1_8( a ) \
( _mm256_movemask_epi8( a ) == -1 )
#define mm256_anybits0( a ) \
( _mm256_movemask_epi8( a ) & 0xffffffff )
#define mm256_anybits1( a ) \
( ( _mm256_movemask_epi8( a ) & 0xffffffff ) != 0xffffffff )
// Bitwise test of all 256 bits
#define mm256_allbits0( a ) _mm256_testc_si256( a, m256_neg1 )
#define mm256_allbits1( a ) _mm256_testc_si256( m256_zero, a )
//#define mm256_anybits0( a ) !mm256_allbits1( a )
//#define mm256_anybits1( a ) !mm256_allbits0( a )
// Parallel AES, for when x is expected to be in a 256 bit register.
// Use same 128 bit key.
#if defined(__VAES__)
#define mm256_aesenc_2x128( x, k ) \
_mm256_aesenc_epi128( x, k )
#else
#define mm256_aesenc_2x128( x, k ) \
mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \
_mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) )
#endif
#define mm256_paesenc_2x128( y, x, k ) do \
{ \
__m128i *X = (__m128i*)x; \
__m128i *Y = (__m128i*)y; \
Y[0] = _mm_aesenc_si128( X[0], k ); \
Y[1] = _mm_aesenc_si128( X[1], k ); \
} while(0);
// //
// Pointer casting // Pointer casting
@@ -201,13 +113,13 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// //
// Basic operations without SIMD equivalent // Basic operations without SIMD equivalent
// Bitwise not ( ~x ) // Bitwise not ( ~v )
#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \ #define mm256_not( v ) _mm256_xor_si256( v, m256_neg1 ) \
// Unary negation of each element ( -a ) // Unary negation of each element ( -v )
#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a ) #define mm256_negate_64( v ) _mm256_sub_epi64( m256_zero, v )
#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a ) #define mm256_negate_32( v ) _mm256_sub_epi32( m256_zero, v )
#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a ) #define mm256_negate_16( v ) _mm256_sub_epi16( m256_zero, v )
// Add 4 values, fewer dependencies than sequential addition. // Add 4 values, fewer dependencies than sequential addition.
@@ -265,17 +177,14 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#define mm256_ror_32 _mm256_ror_epi32 #define mm256_ror_32 _mm256_ror_epi32
#define mm256_rol_32 _mm256_rol_epi32 #define mm256_rol_32 _mm256_rol_epi32
#else #else // AVX2
// No AVX512, use fallback.
#define mm256_ror_64 mm256_ror_var_64 #define mm256_ror_64 mm256_ror_var_64
#define mm256_rol_64 mm256_rol_var_64 #define mm256_rol_64 mm256_rol_var_64
#define mm256_ror_32 mm256_ror_var_32 #define mm256_ror_32 mm256_ror_var_32
#define mm256_rol_32 mm256_rol_var_32 #define mm256_rol_32 mm256_rol_var_32
#endif // AVX512 else #endif // AVX512 else AVX2
#define mm256_ror_16( v, c ) \ #define mm256_ror_16( v, c ) \
_mm256_or_si256( _mm256_srli_epi16( v, c ), \ _mm256_or_si256( _mm256_srli_epi16( v, c ), \
@@ -285,46 +194,6 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_or_si256( _mm256_slli_epi16( v, c ), \ _mm256_or_si256( _mm256_slli_epi16( v, c ), \
_mm256_srli_epi16( v, 16-(c) ) ) _mm256_srli_epi16( v, 16-(c) ) )
// Rotate bits in each element of v by the amount in corresponding element of
// index vector c
#define mm256_rorv_64( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi64( v, c ), \
_mm256_sllv_epi64( v, _mm256_sub_epi64( \
_mm256_set1_epi64x( 64 ), c ) ) )
#define mm256_rolv_64( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi64( v, c ), \
_mm256_srlv_epi64( v, _mm256_sub_epi64( \
_mm256_set1_epi64x( 64 ), c ) ) )
#define mm256_rorv_32( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi32( v, c ), \
_mm256_sllv_epi32( v, _mm256_sub_epi32( \
_mm256_set1_epi32( 32 ), c ) ) )
#define mm256_rolv_32( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi32( v, c ), \
_mm256_srlv_epi32( v, _mm256_sub_epi32( \
_mm256_set1_epi32( 32 ), c ) ) )
// AVX512 can do 16 bit elements.
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_rorv_16( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#define mm256_rolv_16( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#endif // AVX512
// //
// Rotate elements accross all lanes. // Rotate elements accross all lanes.
@@ -336,13 +205,26 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) #if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_swap_128( v ) _mm256_alignr_epi64( v, v, 2 ) static inline __m256i mm256_swap_128( const __m256i v )
#define mm256_ror_1x64( v ) _mm256_alignr_epi64( v, v, 1 ) { return _mm256_alignr_epi64( v, v, 2 ); }
#define mm256_rol_1x64( v ) _mm256_alignr_epi64( v, v, 3 )
#define mm256_ror_1x32( v ) _mm256_alignr_epi32( v, v, 1 ) static inline __m256i mm256_ror_1x64( const __m256i v )
#define mm256_rol_1x32( v ) _mm256_alignr_epi32( v, v, 7 ) { return _mm256_alignr_epi64( v, v, 1 ); }
#define mm256_ror_3x32( v ) _mm256_alignr_epi32( v, v, 3 )
#define mm256_rol_3x32( v ) _mm256_alignr_epi32( v, v, 5 ) static inline __m256i mm256_rol_1x64( const __m256i v )
{ return _mm256_alignr_epi64( v, v, 3 ); }
static inline __m256i mm256_ror_1x32( const __m256i v )
{ return _mm256_alignr_epi32( v, v, 1 ); }
static inline __m256i mm256_rol_1x32( const __m256i v )
{ return _mm256_alignr_epi32( v, v, 7 ); }
static inline __m256i mm256_ror_3x32( const __m256i v )
{ return _mm256_alignr_epi32( v, v, 3 ); }
static inline __m256i mm256_rol_3x32( const __m256i v )
{ return _mm256_alignr_epi32( v, v, 5 ); }
#else // AVX2 #else // AVX2
@@ -377,131 +259,18 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
#endif // AVX512 else AVX2 #endif // AVX512 else AVX2
// AVX512 can do 16 & 8 bit elements.
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Rotate 256 bit vector by one 16 bit element.
#define mm256_ror_1x16( v ) \
_mm256_permutexvar_epi16( m256_const_64( \
0x0000000f000e000d, 0x000c000b000a0009, \
0x0008000700060005, 0x0004000300020001 ), v )
#define mm256_rol_1x16( v ) \
_mm256_permutexvar_epi16( m256_const_64( \
0x000e000d000c000b, 0x000a000900080007, \
0x0006000500040003, 0x000200010000000f ), v )
#if defined (__AVX512VBMI__)
// Rotate 256 bit vector by one byte.
#define mm256_ror_1x8( v ) _mm256_permutexvar_epi8( m256_const_64( \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
#define mm256_rol_1x8( v ) _mm256_permutexvar_epi16( m256_const_64( \
0x1e1d1c1b1a191817, 0x161514131211100f, \
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
#endif // VBMI
#endif // AVX512
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm256_invert_64 ( v ) _mm256_permute4x64_epi64( v, 0x1b )
#define mm256_invert_32 ( v ) _mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000000000001, 0x0000000200000003 \
0x0000000400000005, 0x0000000600000007 )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
#define mm256_invert_16 ( v ) \
_mm256_permutexvar_epi16( m256_const_64( \
0x0000000100020003, 0x0004000500060007, \
0x00080009000a000b, 0x000c000d000e000f ), v )
#if defined(__AVX512VBMI__)
#define mm256_invert_8( v ) \
_mm256_permutexvar_epi8( m256_const_64( \
0x0001020304050607, 0x08090a0b0c0d0e0f, \
0x1011121314151617, 0x18191a1b1c1d1e1f ), v )
#endif // VBMI
#endif // AVX512
// //
// Rotate elements within each 128 bit lane of 256 bit vector. // Rotate elements within each 128 bit lane of 256 bit vector.
#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e ) #define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e )
#define mm256_ror128_32( v ) _mm256_shuffle_epi32( v, 0x39 )
#define mm256_rol128_32( v ) _mm256_shuffle_epi32( v, 0x93 )
#define mm256_ror128_32( v ) _mm256_shuffle_epi32( v, 0x39 ) static inline __m256i mm256_ror128_x8( const __m256i v, const int c )
{ return _mm256_alignr_epi8( v, v, c ); }
#define mm256_rol128_32( v ) _mm256_shuffle_epi32( v, 0x93 )
#define mm256_ror128_x8( v, c ) _mm256_alignr_epi8( v, v, c )
/*
// Rotate each 128 bit lane by c elements.
#define mm256_ror128_8( v, c ) \
_mm256_or_si256( _mm256_bsrli_epi128( v, c ), \
_mm256_bslli_epi128( v, 16-(c) ) )
#define mm256_rol128_8( v, c ) \
_mm256_or_si256( _mm256_bslli_epi128( v, c ), \
_mm256_bsrli_epi128( v, 16-(c) ) )
*/
// Rotate elements in each 64 bit lane
#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_rol64_8( v, c ) _mm256_rol_epi64( v, ((c)<<3) )
#define mm256_ror64_8( v, c ) _mm256_ror_epi64( v, ((c)<<3) )
#else
#define mm256_rol64_8( v, c ) \
_mm256_or_si256( _mm256_slli_epi64( v, ( ( (c)<<3 ) ), \
_mm256_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
#define mm256_ror64_8( v, c ) \
_mm256_or_si256( _mm256_srli_epi64( v, ( ( (c)<<3 ) ), \
_mm256_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
#endif
// Rotate elements in each 32 bit lane
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_swap32_16( v ) _mm256_rol_epi32( v, 16 )
#define mm256_rol32_8( v ) _mm256_rol_epi32( v, 8 )
#define mm256_ror32_8( v ) _mm256_ror_epi32( v, 8 )
#else
#define mm256_swap32_16( v ) \
_mm256_or_si256( _mm256_slli_epi32( v, 16 ), \
_mm256_srli_epi32( v, 16 ) )
#define mm256_rol32_8( v ) \
_mm256_or_si256( _mm256_slli_epi32( v, 8 ), \
_mm256_srli_epi32( v, 8 ) )
#define mm256_ror32_8( v, c ) \
_mm256_or_si256( _mm256_srli_epi32( v, 8 ), \
_mm256_slli_epi32( v, 8 ) )
#endif
// Swap 32 bit elements in each 64 bit lane.
#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 )
// //
// Swap bytes in vector elements, endian bswap. // Swap bytes in vector elements, endian bswap.

View File

@@ -26,9 +26,6 @@
// _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute // _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute
// usually shuffles accross all lanes. // usually shuffles accross all lanes.
// //
// Some instructions like cmp and blend use a mask regsiter now instead
// a mask vector.
//
// permutexvar has args reversed, index is first arg. Previously all // permutexvar has args reversed, index is first arg. Previously all
// permutes and shuffles have the index last. // permutes and shuffles have the index last.
// //
@@ -85,52 +82,43 @@
#define mm512_mov256_64( a ) mm128_mov128_64( _mm256_castsi512_si128( a ) ) #define mm512_mov256_64( a ) mm128_mov128_64( _mm256_castsi512_si128( a ) )
#define mm512_mov256_32( a ) mm128_mov128_32( _mm256_castsi512_si128( a ) ) #define mm512_mov256_32( a ) mm128_mov128_32( _mm256_castsi512_si128( a ) )
// A simple 128 bit permute, using function instead of macro avoids
// Insert and extract integers is a multistage operation. // problems if the v arg passed as an expression.
// Insert integer into __m128i, then insert __m128i to __m256i, finally static inline __m512i mm512_perm_128( const __m512i v, const int c )
// insert __256i into __m512i. Reverse the order for extract. { return _mm512_shuffle_i64x2( v, v, c ); }
// Do not use __m512_insert_epi64 or _mm256_insert_epi64 to perform multiple
// inserts.
// Avoid small integers for multiple inserts.
// Shortcuts:
// Use castsi to reference the low bits of a vector or sub-vector. (free)
// Use mov to insert integer into low bits of vector or sub-vector. (cheap)
// Use _mm_insert only to reference the high bits of __m128i. (expensive)
// Sequence instructions to minimize data dependencies.
// Use const or const1 only when integer is either immediate or known to be in
// a GP register. Use set/set1 when data needs to be loaded from memory or
// cache.
// Concatenate two 256 bit vectors into one 512 bit vector {hi, lo} // Concatenate two 256 bit vectors into one 512 bit vector {hi, lo}
#define mm512_concat_256( hi, lo ) \ #define mm512_concat_256( hi, lo ) \
_mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 ) _mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 )
// Equivalent of set, assign 64 bit integers to respective 64 bit elements. // Equivalent of set, assign 64 bit integers to respective 64 bit elements.
// Use stack memory overlay
static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6, static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
const uint64_t i5, const uint64_t i4, const uint64_t i5, const uint64_t i4,
const uint64_t i3, const uint64_t i2, const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 ) const uint64_t i1, const uint64_t i0 )
{ {
__m256i hi, lo; union { __m512i m512i;
__m128i hi1, lo1; uint64_t u64[8]; } v;
lo = mm256_mov64_256( i0 ); v.u64[0] = i0; v.u64[1] = i1;
lo1 = mm128_mov64_128( i2 ); v.u64[2] = i2; v.u64[3] = i3;
hi = mm256_mov64_256( i4 ); v.u64[4] = i4; v.u64[5] = i5;
hi1 = mm128_mov64_128( i6 ); v.u64[6] = i6; v.u64[7] = i7;
lo = _mm256_castsi128_si256( return v.m512i;
_mm_insert_epi64( _mm256_castsi256_si128( lo ), i1, 1 ) );
lo1 = _mm_insert_epi64( lo1, i3, 1 );
hi = _mm256_castsi128_si256(
_mm_insert_epi64( _mm256_castsi256_si128( hi ), i5, 1 ) );
hi1 = _mm_insert_epi64( hi1, i7, 1 );
lo = _mm256_inserti128_si256( lo, lo1, 1 );
hi = _mm256_inserti128_si256( hi, hi1, 1 );
return mm512_concat_256( hi, lo );
} }
// Equivalent of set1, broadcast 64 bit constant to all 64 bit elements. // Equivalent of set1, broadcast lo element all elements.
#define m512_const1_256( v ) _mm512_broadcast_i64x4( v ) static inline __m512i m512_const1_256( const __m256i v )
#define m512_const1_128( v ) _mm512_broadcast_i64x2( v ) { return _mm512_inserti64x4( _mm512_castsi256_si512( v ), v, 1 ); }
#define m512_const1_128( v ) \
mm512_perm_128( _mm512_castsi128_si512( v ), 0 )
// Integer input argument up to 64 bits
#define m512_const1_i128( i ) \
mm512_perm_128( _mm512_castsi128_si512( mm128_mov64_128( i ) ), 0 )
//#define m512_const1_256( v ) _mm512_broadcast_i64x4( v )
//#define m512_const1_128( v ) _mm512_broadcast_i64x2( v )
#define m512_const1_64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) ) #define m512_const1_64( i ) _mm512_broadcastq_epi64( mm128_mov64_128( i ) )
#define m512_const1_32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) ) #define m512_const1_32( i ) _mm512_broadcastd_epi32( mm128_mov32_128( i ) )
#define m512_const1_16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) ) #define m512_const1_16( i ) _mm512_broadcastw_epi16( mm128_mov32_128( i ) )
@@ -142,23 +130,17 @@ static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
#define m512_const2_64( i1, i0 ) \ #define m512_const2_64( i1, i0 ) \
m512_const1_128( m128_const_64( i1, i0 ) ) m512_const1_128( m128_const_64( i1, i0 ) )
#define m512_const2_32( i1, i0 ) \
m512_const1_64( ( (uint64_t)(i1) << 32 ) | ( (uint64_t)(i0) & 0xffffffff ) )
// { m128_1, m128_1, m128_0, m128_0 }
#define m512_const_2x128( v1, v0 ) \
m512_mask_blend_epi64( 0x0f, m512_const1_128( v1 ), m512_const1_128( v0 ) )
static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2, static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
const uint64_t i1, const uint64_t i0 ) const uint64_t i1, const uint64_t i0 )
{ {
__m256i lo = mm256_mov64_256( i0 ); union { __m512i m512i;
__m128i hi = mm128_mov64_128( i2 ); uint64_t u64[8]; } v;
lo = _mm256_castsi128_si256( v.u64[0] = v.u64[4] = i0;
_mm_insert_epi64( _mm256_castsi256_si128( v.u64[1] = v.u64[5] = i1;
lo ), i1, 1 ) ); v.u64[2] = v.u64[6] = i2;
hi = _mm_insert_epi64( hi, i3, 1 ); v.u64[3] = v.u64[7] = i3;
return _mm512_broadcast_i64x4( _mm256_inserti128_si256( lo, hi, 1 ) ); return v.m512i;
} }
// //
@@ -170,14 +152,15 @@ static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
#define m512_zero _mm512_setzero_si512() #define m512_zero _mm512_setzero_si512()
#define m512_one_512 mm512_mov64_512( 1 ) #define m512_one_512 mm512_mov64_512( 1 )
#define m512_one_256 _mm512_broadcast_i64x4 ( mm256_mov64_256( 1 ) ) #define m512_one_256 _mm512_inserti64x4( m512_one_512, m256_one_256, 1 )
#define m512_one_128 _mm512_broadcast_i64x2 ( mm128_mov64_128( 1 ) ) #define m512_one_128 m512_const1_i128( 1 )
#define m512_one_64 _mm512_broadcastq_epi64( mm128_mov64_128( 1 ) ) #define m512_one_64 m512_const1_64( 1 )
#define m512_one_32 _mm512_broadcastd_epi32( mm128_mov64_128( 1 ) ) #define m512_one_32 m512_const1_32( 1 )
#define m512_one_16 _mm512_broadcastw_epi16( mm128_mov64_128( 1 ) ) #define m512_one_16 m512_const1_16( 1 )
#define m512_one_8 _mm512_broadcastb_epi8 ( mm128_mov64_128( 1 ) ) #define m512_one_8 m512_const1_8( 1 )
#define m512_neg1 m512_const1_64( 0xffffffffffffffff ) //#define m512_neg1 m512_const1_64( 0xffffffffffffffff )
#define m512_neg1 _mm512_movm_epi64( 0xff )
// //
// Basic operations without SIMD equivalent // Basic operations without SIMD equivalent
@@ -242,15 +225,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
_mm512_xor_si512( _mm512_xor_si512( a, b ), _mm512_xor_si512( c, d ) ) _mm512_xor_si512( _mm512_xor_si512( a, b ), _mm512_xor_si512( c, d ) )
// Horizontal vector testing
// Returns bit __mmask8
#define mm512_allbits0( a ) _mm512_cmpeq_epi64_mask( a, m512_zero )
#define mm512_allbits1( a ) _mm512_cmpeq_epi64_mask( a, m512_neg1 )
#define mm512_anybits0( a ) _mm512_cmpneq_epi64_mask( a, m512_neg1 )
#define mm512_anybits1( a ) _mm512_cmpneq_epi64_mask( a, m512_zero )
// //
// Bit rotations. // Bit rotations.
@@ -262,37 +236,47 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
// _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32 // _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32
// //
// For convenience and consistency with AVX2
#define mm512_ror_64 _mm512_ror_epi64 #define mm512_ror_64 _mm512_ror_epi64
#define mm512_rol_64 _mm512_rol_epi64 #define mm512_rol_64 _mm512_rol_epi64
#define mm512_ror_32 _mm512_ror_epi32 #define mm512_ror_32 _mm512_ror_epi32
#define mm512_rol_32 _mm512_rol_epi32 #define mm512_rol_32 _mm512_rol_epi32
#define mm512_ror_var_64( v, c ) \ static inline __m512i mm512_ror_var_64( const __m512i v, const int c )
_mm512_or_si512( _mm512_srli_epi64( v, c ), \ {
_mm512_slli_epi64( v, 64-(c) ) ) return _mm512_or_si512( _mm512_srli_epi64( v, c ),
_mm512_slli_epi64( v, 64-c ) );
}
#define mm512_rol_var_64( v, c ) \ static inline __m512i mm512_rol_var_64( const __m512i v, const int c )
_mm512_or_si512( _mm512_slli_epi64( v, c ), \ {
_mm512_srli_epi64( v, 64-(c) ) ) return _mm512_or_si512( _mm512_slli_epi64( v, c ),
_mm512_srli_epi64( v, 64-c ) );
}
#define mm512_ror_var_32( v, c ) \ static inline __m512i mm512_ror_var_32( const __m512i v, const int c )
_mm512_or_si512( _mm512_srli_epi32( v, c ), \ {
_mm512_slli_epi32( v, 32-(c) ) ) return _mm512_or_si512( _mm512_srli_epi32( v, c ),
_mm512_slli_epi32( v, 32-c ) );
}
#define mm512_rol_var_32( v, c ) \ static inline __m512i mm512_rol_var_32( const __m512i v, const int c )
_mm512_or_si512( _mm512_slli_epi32( v, c ), \ {
_mm512_srli_epi32( v, 32-(c) ) ) return _mm512_or_si512( _mm512_slli_epi32( v, c ),
_mm512_srli_epi32( v, 32-c ) );
}
// Here is a fixed bit rotate for 16 bit elements:
#define mm512_ror_16( v, c ) \
_mm512_or_si512( _mm512_srli_epi16( v, c ), \
_mm512_slli_epi16( v, 16-(c) )
#define mm512_rol_16( v, c ) \
_mm512_or_si512( _mm512_slli_epi16( v, c ), \
_mm512_srli_epi16( v, 16-(c) )
static inline __m512i mm512_ror_16( __m512i const v, const int c )
{
return _mm512_or_si512( _mm512_srli_epi16( v, c ),
_mm512_slli_epi16( v, 16-c ) );
}
static inline __m512i mm512_rol_16( const __m512i v, const int c )
{
return _mm512_or_si512( _mm512_slli_epi16( v, c ),
_mm512_srli_epi16( v, 16-c ) );
}
// Rotations using a vector control index are very slow due to overhead // Rotations using a vector control index are very slow due to overhead
// to generate the index vector. Repeated rotations using the same index // to generate the index vector. Repeated rotations using the same index
@@ -363,25 +347,32 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
// //
// Rotate elements in 512 bit vector. // Rotate elements in 512 bit vector.
static inline __m512i mm512_swap_256( const __m512i v )
{ return _mm512_alignr_epi64( v, v, 4 ); }
#define mm512_swap_256( v ) _mm512_alignr_epi64( v, v, 4 ) static inline __m512i mm512_ror_1x128( const __m512i v )
{ return _mm512_alignr_epi64( v, v, 2 ); }
// 1x64 notation used to disinguish from bit rotation. static inline __m512i mm512_rol_1x128( const __m512i v )
#define mm512_ror_1x128( v ) _mm512_alignr_epi64( v, v, 2 ) { return _mm512_alignr_epi64( v, v, 6 ); }
#define mm512_rol_1x128( v ) _mm512_alignr_epi64( v, v, 6 )
#define mm512_ror_1x64( v ) _mm512_alignr_epi64( v, v, 1 ) static inline __m512i mm512_ror_1x64( const __m512i v )
#define mm512_rol_1x64( v ) _mm512_alignr_epi64( v, v, 7 ) { return _mm512_alignr_epi64( v, v, 1 ); }
#define mm512_ror_1x32( v ) _mm512_alignr_epi32( v, v, 1 ) static inline __m512i mm512_rol_1x64( const __m512i v )
#define mm512_rol_1x32( v ) _mm512_alignr_epi32( v, v, 15 ) { return _mm512_alignr_epi64( v, v, 7 ); }
// Generic for odd rotations static inline __m512i mm512_ror_1x32( const __m512i v )
#define mm512_ror_x64( v, n ) _mm512_alignr_epi64( v, v, n ) { return _mm512_alignr_epi32( v, v, 1 ); }
#define mm512_rol_x64( v, n ) _mm512_alignr_epi64( v, v, 8-(n) )
#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n ) static inline __m512i mm512_rol_1x32( const __m512i v )
#define mm512_rol_x32( v, n ) _mm512_alignr_epi32( v, v, 16-(n) ) { return _mm512_alignr_epi32( v, v, 15 ); }
static inline __m512i mm512_ror_x64( const __m512i v, const int n )
{ return _mm512_alignr_epi64( v, v, n ); }
static inline __m512i mm512_ror_x32( const __m512i v, const int n )
{ return _mm512_alignr_epi32( v, v, n ); }
#define mm512_ror_1x16( v ) \ #define mm512_ror_1x16( v ) \
_mm512_permutexvar_epi16( m512_const_64( \ _mm512_permutexvar_epi16( m512_const_64( \
@@ -411,38 +402,6 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
0x1E1D1C1B1A191817, 0x161514131211100F, \ 0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201003F ) ) 0x0E0D0C0B0A090807, 0x060504030201003F ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm512_invert_256( v ) \
_mm512_permutexvar_epi64( v, m512_const_64( 3,2,1,0,7,6,5,4 ) )
#define mm512_invert_128( v ) \
_mm512_permutexvar_epi64( v, m512_const_64( 1,0,3,2,5,4,7,6 ) )
#define mm512_invert_64( v ) \
_mm512_permutexvar_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
#define mm512_invert_32( v ) \
_mm512_permutexvar_epi32( m512_const_64( \
0x0000000000000001,0x0000000200000003, \
0x0000000400000005,0x0000000600000007, \
0x0000000800000009,0x0000000a0000000b, \
0x0000000c0000000d,0x0000000e0000000f ), v )
#define mm512_invert_16( v ) \
_mm512_permutexvar_epi16( m512_const_64( \
0x0000000100020003, 0x0004000500060007, \
0x00080009000A000B, 0x000C000D000E000F, \
0x0010001100120013, 0x0014001500160017, \
0x00180019001A001B, 0x001C001D001E001F ), v )
#define mm512_invert_8( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x0001020304050607, 0x08090A0B0C0D0E0F, \
0x1011121314151617, 0x18191A1B1C1D1E1F, \
0x2021222324252627, 0x28292A2B2C2D2E2F, \
0x3031323334353637, 0x38393A3B3C3D3E3F ) )
// //
// Rotate elements within 256 bit lanes of 512 bit vector. // Rotate elements within 256 bit lanes of 512 bit vector.
@@ -450,11 +409,10 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
#define mm512_swap256_128( v ) _mm512_permutex_epi64( v, 0x4e ) #define mm512_swap256_128( v ) _mm512_permutex_epi64( v, 0x4e )
// Rotate 256 bit lanes by one 64 bit element // Rotate 256 bit lanes by one 64 bit element
#define mm512_ror256_64( v ) _mm512_permutex_epi64( v, 0x39 ) #define mm512_ror256_64( v ) _mm512_permutex_epi64( v, 0x39 )
#define mm512_rol256_64( v ) _mm512_permutex_epi64( v, 0x93 ) #define mm512_rol256_64( v ) _mm512_permutex_epi64( v, 0x93 )
// Rotate 256 bit lanes by one 32 bit element // Rotate 256 bit lanes by one 32 bit element
#define mm512_ror256_32( v ) \ #define mm512_ror256_32( v ) \
_mm512_permutexvar_epi32( m512_const_64( \ _mm512_permutexvar_epi32( m512_const_64( \
0x000000080000000f, 0x0000000e0000000d, \ 0x000000080000000f, 0x0000000e0000000d, \
@@ -488,68 +446,41 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
0x203f3e3d3c3b3a39, 0x3837363534333231, \ 0x203f3e3d3c3b3a39, 0x3837363534333231, \
0x302f2e2d2c2b2a29, 0x2827262524232221, \ 0x302f2e2d2c2b2a29, 0x2827262524232221, \
0x001f1e1d1c1b1a19, 0x1817161514131211, \ 0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v ) 0x100f0e0d0c0b0a09, 0x0807060504030201 ) )
#define mm512_rol256_8( v ) \ #define mm512_rol256_8( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \ _mm512_shuffle_epi8( v, m512_const_64( \
0x3e3d3c3b3a393837, 0x363534333231302f, \ 0x3e3d3c3b3a393837, 0x363534333231302f, \
0x2e2d2c2b2a292827, 0x262524232221203f, \ 0x2e2d2c2b2a292827, 0x262524232221203f, \
0x1e1d1c1b1a191817, 0x161514131211100f, \ 0x1e1d1c1b1a191817, 0x161514131211100f, \
0x0e0d0c0b0a090807, 0x060504030201001f ), v ) 0x0e0d0c0b0a090807, 0x060504030201001f ) )
// //
// Rotate elements within 128 bit lanes of 512 bit vector. // Rotate elements within 128 bit lanes of 512 bit vector.
// Swap hi & lo 64 bits in each 128 bit lane // Swap 64 bits in each 128 bit lane
#define mm512_swap128_64( v ) _mm512_shuffle_epi32( v, 0x4e ) #define mm512_swap128_64( v ) _mm512_shuffle_epi32( v, 0x4e )
// Rotate 128 bit lanes by one 32 bit element // Rotate 128 bit lanes by one 32 bit element
#define mm512_ror128_32( v ) _mm512_shuffle_epi32( v, 0x39 ) #define mm512_ror128_32( v ) _mm512_shuffle_epi32( v, 0x39 )
#define mm512_rol128_32( v ) _mm512_shuffle_epi32( v, 0x93 ) #define mm512_rol128_32( v ) _mm512_shuffle_epi32( v, 0x93 )
#define mm512_ror128_x8( v, c ) _mm512_alignr_epi8( v, v, c ) // Rotate right 128 bit lanes by c bytes
static inline __m512i mm512_ror128_x8( const __m512i v, const int c )
{ return _mm512_alignr_epi8( v, v, c ); }
/* // Swap 32 bits in each 64 bit lane.
// Rotate 128 bit lanes by c bytes, faster than building that monstrous #define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 )
// constant above.
#define mm512_ror128_8( v, c ) \
_mm512_or_si512( _mm512_bsrli_epi128( v, c ), \
_mm512_bslli_epi128( v, 16-(c) ) )
#define mm512_rol128_8( v, c ) \
_mm512_or_si512( _mm512_bslli_epi128( v, c ), \
_mm512_bsrli_epi128( v, 16-(c) ) )
*/
//
// Rotate elements within 64 bit lanes.
#define mm512_rol64_x8( v, c ) _mm512_rol_epi64( v, ((c)<<3) )
#define mm512_ror64_x8( v, c ) _mm512_ror_epi64( v, ((c)<<3) )
// Swap 32 bit elements in each 64 bit lane
#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 )
// Rotate each 64 bit lane by one 16 bit element.
#define mm512_ror64_16( v ) _mm512_ror_epi64( v, 16 )
#define mm512_rol64_16( v ) _mm512_rol_epi64( v, 16 )
#define mm512_ror64_8( v ) _mm512_ror_epi64( v, 8 )
#define mm512_rol64_8( v ) _mm512_rol_epi64( v, 8 )
//
// Rotate elements within 32 bit lanes.
#define mm512_rol32_x8( v, c ) _mm512_rol_epi32( v, ((c)<<2) )
#define mm512_ror32_x8( v, c ) _mm512_ror_epi32( v, ((c)<<2) )
// //
// Rotate elements from 2 512 bit vectors in place, source arguments // Rotate elements from 2 512 bit vectors in place, source arguments
// are overwritten. // are overwritten.
#define mm512_swap1024_512(v1, v2) \ #define mm512_swap1024_512( v1, v2 ) \
v1 = _mm512_xor_si512(v1, v2); \ v1 = _mm512_xor_si512( v1, v2 ); \
v2 = _mm512_xor_si512(v1, v2); \ v2 = _mm512_xor_si512( v1, v2 ); \
v1 = _mm512_xor_si512(v1, v2); v1 = _mm512_xor_si512( v1, v2 );
#define mm512_ror1024_256( v1, v2 ) \ #define mm512_ror1024_256( v1, v2 ) \
do { \ do { \

View File

@@ -1,18 +1,18 @@
#if !defined(SIMD_64_H__) #if !defined(SIMD_64_H__)
#define SIMD_64_H__ 1 #define SIMD_64_H__ 1
#if defined(__MMX__) #if defined(__MMX__) && defined(__SSE__)
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// //
// 64 bit MMX vectors. // 64 bit MMX vectors.
// //
// There are rumours MMX wil be removed. Although casting with int64 // This code is not used anywhere annd likely never will. It's intent was
// works there is likely some overhead to move the data to An MMX register // to support 2 way parallel hashing using SSE2 for 64 bit, and MMX for 32
// and back. // bit hash functions, but was never implemented.
// Pseudo constants // Pseudo constants
/* /*
#define m64_zero _mm_setzero_si64() #define m64_zero _mm_setzero_si64()
#define m64_one_64 _mm_set_pi32( 0UL, 1UL ) #define m64_one_64 _mm_set_pi32( 0UL, 1UL )
@@ -30,79 +30,67 @@
#define casti_m64(p,i) (((__m64*)(p))[(i)]) #define casti_m64(p,i) (((__m64*)(p))[(i)])
// cast all arguments as the're likely to be uint64_t
// Bitwise not: ~(a) // Bitwise not: ~(a)
//#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 ) //#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 )
#define mm64_not( a ) ( (__m64)( ~( (uint64_t)(a) ) ) #define mm64_not( a ) ( (__m64)( ~( (uint64_t)(a) ) )
// Unary negate elements // Unary negate elements
#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, (__m64)v ) #define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, v )
#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, (__m64)v ) #define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, v )
#define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, (__m64)v ) #define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, v )
// Rotate bits in packed elements of 64 bit vector // Rotate bits in packed elements of 64 bit vector
#define mm64_rol_64( a, n ) \ #define mm64_rol_64( a, n ) \
_mm_or_si64( _mm_slli_si64( (__m64)(a), n ), \ _mm_or_si64( _mm_slli_si64( a, n ), \
_mm_srli_si64( (__m64)(a), 64-(n) ) ) _mm_srli_si64( a, 64-(n) ) )
#define mm64_ror_64( a, n ) \ #define mm64_ror_64( a, n ) \
_mm_or_si64( _mm_srli_si64( (__m64)(a), n ), \ _mm_or_si64( _mm_srli_si64( a, n ), \
_mm_slli_si64( (__m64)(a), 64-(n) ) ) _mm_slli_si64( a, 64-(n) ) )
#define mm64_rol_32( a, n ) \ #define mm64_rol_32( a, n ) \
_mm_or_si64( _mm_slli_pi32( (__m64)(a), n ), \ _mm_or_si64( _mm_slli_pi32( a, n ), \
_mm_srli_pi32( (__m64)(a), 32-(n) ) ) _mm_srli_pi32( a, 32-(n) ) )
#define mm64_ror_32( a, n ) \ #define mm64_ror_32( a, n ) \
_mm_or_si64( _mm_srli_pi32( (__m64)(a), n ), \ _mm_or_si64( _mm_srli_pi32( a, n ), \
_mm_slli_pi32( (__m64)(a), 32-(n) ) ) _mm_slli_pi32( a, 32-(n) ) )
#define mm64_rol_16( a, n ) \ #define mm64_rol_16( a, n ) \
_mm_or_si64( _mm_slli_pi16( (__m64)(a), n ), \ _mm_or_si64( _mm_slli_pi16( a, n ), \
_mm_srli_pi16( (__m64)(a), 16-(n) ) ) _mm_srli_pi16( a, 16-(n) ) )
#define mm64_ror_16( a, n ) \ #define mm64_ror_16( a, n ) \
_mm_or_si64( _mm_srli_pi16( (__m64)(a), n ), \ _mm_or_si64( _mm_srli_pi16( a, n ), \
_mm_slli_pi16( (__m64)(a), 16-(n) ) ) _mm_slli_pi16( a, 16-(n) ) )
// Rotate packed elements accross lanes. Useful for byte swap and byte // Rotate packed elements accross lanes. Useful for byte swap and byte
// rotation. // rotation.
// _mm_shuffle_pi8 requires SSSE3 while _mm_shuffle_pi16 requires SSE
// even though these are MMX instructions.
// Swap hi & lo 32 bits. // Swap hi & lo 32 bits.
#define mm64_swap32( a ) _mm_shuffle_pi16( (__m64)(a), 0x4e ) #define mm64_swap_32( a ) _mm_shuffle_pi16( a, 0x4e )
#define mm64_ror1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x39 ) #define mm64_ror64_1x16( a ) _mm_shuffle_pi16( a, 0x39 )
#define mm64_rol1x16_64( a ) _mm_shuffle_pi16( (__m64)(a), 0x93 ) #define mm64_rol64_1x16( a ) _mm_shuffle_pi16( a, 0x93 )
// Swap hi & lo 16 bits of each 32 bit element // Swap hi & lo 16 bits of each 32 bit element
#define mm64_swap16_32( a ) _mm_shuffle_pi16( (__m64)(a), 0xb1 ) #define mm64_swap32_16( a ) _mm_shuffle_pi16( a, 0xb1 )
#if defined(__SSSE3__) #if defined(__SSSE3__)
// Endian byte swap packed elements // Endian byte swap packed elements
// A vectorized version of the u64 bswap, use when data already in MMX reg.
#define mm64_bswap_64( v ) \
_mm_shuffle_pi8( (__m64)v, (__m64)0x0001020304050607 )
#define mm64_bswap_32( v ) \ #define mm64_bswap_32( v ) \
_mm_shuffle_pi8( (__m64)v, (__m64)0x0405060700010203 ) _mm_shuffle_pi8( v, (__m64)0x0405060700010203 )
#define mm64_bswap_16( v ) \ #define mm64_bswap_16( v ) \
_mm_shuffle_pi8( (__m64)v, (__m64)0x0607040502030001 ); _mm_shuffle_pi8( v, (__m64)0x0607040502030001 );
// Rotate right by c bytes
static inline __m64 mm64_ror_x8( __m64 v, const int c )
{ return _mm_alignr_pi8( v, v, c ); }
#else #else
#define mm64_bswap_64( v ) \
(__m64)__builtin_bswap64( (uint64_t)v )
// These exist only for compatibility with CPUs without SSSE3. MMX doesn't
// have extract 32 instruction so pointers are needed to access elements.
// It' more efficient for the caller to use scalar variables and call
// bswap_32 directly.
#define mm64_bswap_32( v ) \ #define mm64_bswap_32( v ) \
_mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \ _mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \
__builtin_bswap32( ((uint32_t*)&v)[0] ) ) __builtin_bswap32( ((uint32_t*)&v)[0] ) )
@@ -115,17 +103,6 @@
#endif #endif
// 64 bit mem functions use integral sizes instead of bytes, data must
// be aligned to 64 bits.
static inline void memcpy_m64( __m64 *dst, const __m64 *src, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; }
static inline void memset_zero_m64( __m64 *src, int n )
{ for ( int i = 0; i < n; i++ ) src[i] = (__m64)0ULL; }
static inline void memset_m64( __m64 *dst, const __m64 a, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
#endif // MMX #endif // MMX
#endif // SIMD_64_H__ #endif // SIMD_64_H__

View File

@@ -1,69 +1,16 @@
#if !defined(SIMD_INT_H__) #if !defined(SIMD_INT_H__)
#define SIMD_INT_H__ 1 #define SIMD_INT_H__ 1
///////////////////////////////////
//
// Integers up to 128 bits.
//
// These utilities enhance support for integers up to 128 bits.
// All standard operations are supported on 128 bit integers except
// numeric constant representation and IO. 128 bit integers must be built
// and displayed as 2 64 bit halves, just like the old times.
//
// Some utilities are also provided for smaller integers, most notably
// bit rotation.
// MMX has no extract instruction for 32 bit elements so this:
// Lo is trivial, high is a simple shift.
// Input may be uint64_t or __m64, returns uint32_t.
#define u64_extr_lo32(a) ( (uint32_t)( (uint64_t)(a) ) )
#define u64_extr_hi32(a) ( (uint32_t)( ((uint64_t)(a)) >> 32) )
#define u64_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 2-(n)) <<5 ) ) )
#define u64_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 4-(n)) <<4 ) ) )
#define u64_extr_8( a, n ) ( (uint8_t) ( (a) >> ( ( 8-(n)) <<3 ) ) )
// Rotate bits in various sized integers.
#define u64_ror_64( x, c ) \
(uint64_t)( ( (uint64_t)(x) >> (c) ) | ( (uint64_t)(x) << (64-(c)) ) )
#define u64_rol_64( x, c ) \
(uint64_t)( ( (uint64_t)(x) << (c) ) | ( (uint64_t)(x) >> (64-(c)) ) )
#define u32_ror_32( x, c ) \
(uint32_t)( ( (uint32_t)(x) >> (c) ) | ( (uint32_t)(x) << (32-(c)) ) )
#define u32_rol_32( x, c ) \
(uint32_t)( ( (uint32_t)(x) << (c) ) | ( (uint32_t)(x) >> (32-(c)) ) )
#define u16_ror_16( x, c ) \
(uint16_t)( ( (uint16_t)(x) >> (c) ) | ( (uint16_t)(x) << (16-(c)) ) )
#define u16_rol_16( x, c ) \
(uint16_t)( ( (uint16_t)(x) << (c) ) | ( (uint16_t)(x) >> (16-(c)) ) )
#define u8_ror_8( x, c ) \
(uint8_t) ( ( (uint8_t) (x) >> (c) ) | ( (uint8_t) (x) << ( 8-(c)) ) )
#define u8_rol_8( x, c ) \
(uint8_t) ( ( (uint8_t) (x) << (c) ) | ( (uint8_t) (x) >> ( 8-(c)) ) )
// Endian byte swap // Endian byte swap
#define bswap_64( a ) __builtin_bswap64( a ) #define bswap_64( a ) __builtin_bswap64( a )
#define bswap_32( a ) __builtin_bswap32( a ) #define bswap_32( a ) __builtin_bswap32( a )
// 64 bit mem functions use integral sizes instead of bytes, data must
// be aligned to 64 bits. Mostly for scaled indexing convenience.
static inline void memcpy_64( uint64_t *dst, const uint64_t *src, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = src[i]; }
static inline void memset_zero_64( uint64_t *src, int n )
{ for ( int i = 0; i < n; i++ ) src[i] = 0ull; }
static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
/////////////////////////////////////// ///////////////////////////////////////
// //
// 128 bit integers // 128 bit integers
// //
// 128 bit integers are inneficient and not a shortcut for __m128i. // 128 bit integers are inneficient and not a shortcut for __m128i.
// Native type __int128 supported starting with GCC-4.8. // Native type __int128 supported starting with GCC-4.8.
// //
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are // __int128 uses two 64 bit GPRs to hold the data. The main benefits are
@@ -94,31 +41,12 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
typedef __int128 int128_t; typedef __int128 int128_t;
typedef unsigned __int128 uint128_t; typedef unsigned __int128 uint128_t;
// Maybe usefull for making constants.
#define mk_uint128( hi, lo ) \
( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) )
// Extracting the low bits is a trivial cast. // Extracting the low bits is a trivial cast.
// These specialized functions are optimized while providing a // These specialized functions are optimized while providing a
// consistent interface. // consistent interface.
#define u128_hi64( x ) ( (uint64_t)( (uint128_t)(x) >> 64 ) ) #define u128_hi64( x ) ( (uint64_t)( (uint128_t)(x) >> 64 ) )
#define u128_lo64( x ) ( (uint64_t)(x) ) #define u128_lo64( x ) ( (uint64_t)(x) )
// Generic extract, don't use for extracting low bits, cast instead.
#define u128_extr_64( a, n ) ( (uint64_t)( (a) >> ( ( 2-(n)) <<6 ) ) )
#define u128_extr_32( a, n ) ( (uint32_t)( (a) >> ( ( 4-(n)) <<5 ) ) )
#define u128_extr_16( a, n ) ( (uint16_t)( (a) >> ( ( 8-(n)) <<4 ) ) )
#define u128_extr_8( a, n ) ( (uint8_t) ( (a) >> ( (16-(n)) <<3 ) ) )
// Not much need for this but it fills a gap.
#define u128_ror_128( x, c ) \
( ( (uint128_t)(x) >> (c) ) | ( (uint128_t)(x) << (128-(c)) ) )
#define u128_rol_128( x, c ) \
( ( (uint128_t)(x) << (c) ) | ( (uint128_t)(x) >> (128-(c)) ) )
#endif // GCC_INT128 #endif // GCC_INT128
#endif // SIMD_INT_H__ #endif // SIMD_INT_H__