mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Compare commits
46 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5b678d2481 | ||
![]() |
90137b391e | ||
![]() |
8727d79182 | ||
![]() |
17ccbc328f | ||
![]() |
0e3945ddb5 | ||
![]() |
7d2ef7973d | ||
![]() |
e6fd9b1d69 | ||
![]() |
1a234cbe53 | ||
![]() |
47cc5dcff5 | ||
![]() |
2cd1507c2e | ||
![]() |
9b905fccc8 | ||
![]() |
92b3733925 | ||
![]() |
19cc88d102 | ||
![]() |
a053690170 | ||
![]() |
3c5e8921b7 | ||
![]() |
f3333b0070 | ||
![]() |
902ec046dd | ||
![]() |
d0b4941321 | ||
![]() |
40089428c5 | ||
![]() |
dc6b007a18 | ||
![]() |
06bfaa1249 | ||
![]() |
6566e99a13 | ||
![]() |
ccfccbadd5 | ||
![]() |
45ecd0de14 | ||
![]() |
4fa8fcea8b | ||
![]() |
c85fb3842b | ||
![]() |
cdd587537e | ||
![]() |
51a1d91abd | ||
![]() |
13563e2598 | ||
![]() |
9571f85d53 | ||
![]() |
0e69756634 | ||
![]() |
9653bca1e2 | ||
![]() |
1c0719e8a4 | ||
![]() |
8b4b4dc613 | ||
![]() |
e76feaced8 | ||
![]() |
5e088d00d0 | ||
![]() |
972d4d70db | ||
![]() |
e96a6bd699 | ||
![]() |
fb9163185a | ||
![]() |
6e8b8ed34f | ||
![]() |
c0aadbcc99 | ||
![]() |
3da149418a | ||
![]() |
720610cce5 | ||
![]() |
cedcf4d070 | ||
![]() |
81b50c3c71 | ||
![]() |
0e1e88f53e |
@@ -32,14 +32,26 @@ but different package names.
|
||||
$ sudo apt-get install build-essential automake libssl-dev libcurl4-openssl-dev libjansson-dev libgmp-dev zlib1g-dev git
|
||||
|
||||
SHA support on AMD Ryzen CPUs requires gcc version 5 or higher and
|
||||
openssl 1.1.0e or higher. Add one of the following to CFLAGS for SHA
|
||||
support depending on your CPU and compiler version:
|
||||
openssl 1.1.0e or higher.
|
||||
|
||||
"-march=native" is always the best choice
|
||||
znver1 and znver2 should be recognized on most recent version of GCC and
|
||||
znver3 is expected with GCC 11. GCC 11 also includes rocketlake support.
|
||||
In the meantime here are some suggestions to compile with new CPUs:
|
||||
|
||||
"-march=znver1" for Ryzen 1000 & 2000 series, znver2 for 3000.
|
||||
"-march=native" is usually the best choice, used by build.sh.
|
||||
|
||||
"-msha" Add SHA to other tuning options
|
||||
"-march=znver2 -mvaes" can be used for Ryzen 5000 if znver3 is not recongized.
|
||||
|
||||
"-mcascadelake -msha" or
|
||||
"-mcometlake -mavx512 -msha" can be used for Rocket Lake.
|
||||
|
||||
Features can also be added individually:
|
||||
|
||||
"-msha" adds support for HW accelerated sha256.
|
||||
|
||||
"-mavx512" adds support for 512 bit vectors
|
||||
|
||||
"-mvaes" add support for parallel AES
|
||||
|
||||
Additional instructions for static compilalation can be found here:
|
||||
https://lxadm.com/Static_compilation_of_cpuminer
|
||||
|
132
INSTALL_WINDOWS
132
INSTALL_WINDOWS
@@ -1,5 +1,9 @@
|
||||
Instructions for compiling cpuminer-opt for Windows.
|
||||
|
||||
Thwaw intructions nay be out of date. Please consult the wiki for
|
||||
the latest:
|
||||
|
||||
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
|
||||
|
||||
Windows compilation using Visual Studio is not supported. Mingw64 is
|
||||
used on a Linux system (bare metal or virtual machine) to cross-compile
|
||||
@@ -24,79 +28,76 @@ Refer to Linux compile instructions and install required packages.
|
||||
|
||||
Additionally, install mingw-w64.
|
||||
|
||||
sudo apt-get install mingw-w64
|
||||
sudo apt-get install mingw-w64 libz-mingw-w64-dev
|
||||
|
||||
|
||||
2. Create a local library directory for packages to be compiled in the next
|
||||
step. Suggested location is $HOME/usr/lib/
|
||||
|
||||
$ mkdir $HOME/usr/lib
|
||||
|
||||
3. Download and build other packages for mingw that don't have a mingw64
|
||||
version available in the repositories.
|
||||
|
||||
Download the following source code packages from their respective and
|
||||
respected download locations, copy them to ~/usr/lib/ and uncompress them.
|
||||
respected download locations, copy them to $HOME/usr/lib/ and uncompress them.
|
||||
|
||||
openssl
|
||||
curl
|
||||
gmp
|
||||
openssl: https://github.com/openssl/openssl/releases
|
||||
|
||||
In most cases the latest vesrion is ok but it's safest to download
|
||||
the same major and minor version as included in your distribution.
|
||||
curl: https://github.com/curl/curl/releases
|
||||
|
||||
Run the following commands or follow the supplied instructions.
|
||||
Do not run "make install" unless you are using ~/usr/lib, which isn't
|
||||
recommended.
|
||||
gmp: https://gmplib.org/download/gmp/
|
||||
|
||||
Some instructions insist on running "make check". If make check fails
|
||||
it may still work, YMMV.
|
||||
In most cases the latest version is ok but it's safest to download the same major and minor version as included in your distribution. The following uses versions from Ubuntu 20.04. Change version numbers as required.
|
||||
|
||||
You can speed up "make" by using all CPU cores available with "-j n" where
|
||||
n is the number of CPU threads you want to use.
|
||||
Run the following commands or follow the supplied instructions. Do not run "make install" unless you are using /usr/lib, which isn't recommended.
|
||||
|
||||
Some instructions insist on running "make check". If make check fails it may still work, YMMV.
|
||||
|
||||
You can speed up "make" by using all CPU cores available with "-j n" where n is the number of CPU threads you want to use.
|
||||
|
||||
openssl:
|
||||
|
||||
./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32
|
||||
make
|
||||
$ ./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32-
|
||||
$ make
|
||||
|
||||
Make may fail with an ld error, just ensure libcrypto-1_1-x64.dll is created.
|
||||
|
||||
curl:
|
||||
|
||||
./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32
|
||||
make
|
||||
$ ./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32
|
||||
$ make
|
||||
|
||||
gmp:
|
||||
|
||||
./configure --host=x86_64-w64-mingw32
|
||||
make
|
||||
|
||||
|
||||
$ ./configure --host=x86_64-w64-mingw32
|
||||
$ make
|
||||
|
||||
4. Tweak the environment.
|
||||
|
||||
This step is required everytime you login or the commands can be added to
|
||||
.bashrc.
|
||||
This step is required everytime you login or the commands can be added to .bashrc.
|
||||
|
||||
Define some local variables to point to local library.
|
||||
Define some local variables to point to local library.
|
||||
|
||||
export LOCAL_LIB="$HOME/usr/lib"
|
||||
$ export LOCAL_LIB="$HOME/usr/lib"
|
||||
|
||||
export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
|
||||
$ export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
|
||||
|
||||
export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
|
||||
$ export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
|
||||
|
||||
Create a release directory and copy some dll files previously built.
|
||||
This can be done outside of cpuminer-opt and only needs to be done once.
|
||||
If the release directory is in cpuminer-opt directory it needs to be
|
||||
recreated every a source package is decompressed.
|
||||
Adjust for gcc version:
|
||||
|
||||
mkdir release
|
||||
cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
|
||||
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
|
||||
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/
|
||||
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libgcc_s_seh-1.dll release/
|
||||
cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/
|
||||
cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
|
||||
$ export GCC_MINGW_LIB="/usr/lib/gcc/x86_64-w64-mingw32/9.3-win32"
|
||||
|
||||
Create a release directory and copy some dll files previously built. This can be done outside of cpuminer-opt and only needs to be done once. If the release directory is in cpuminer-opt directory it needs to be recreated every time a source package is decompressed.
|
||||
|
||||
$ mkdir release
|
||||
$ cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
|
||||
$ cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
|
||||
$ cp $GCC_MINGW_LIB/libstdc++-6.dll release/
|
||||
$ cp $GCC_MINGW_LIB/libgcc_s_seh-1.dll release/
|
||||
$ cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/
|
||||
$ cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
|
||||
|
||||
The following steps need to be done every time a new source package is
|
||||
opened.
|
||||
@@ -110,63 +111,48 @@ https://github.com/JayDDee/cpuminer-opt/releases
|
||||
|
||||
Decompress and change to the cpuminer-opt directory.
|
||||
|
||||
|
||||
|
||||
6. Prepare to compile
|
||||
6. compile
|
||||
|
||||
Create a link to the locally compiled version of gmp.h
|
||||
|
||||
ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
|
||||
$ ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
|
||||
|
||||
Edit configure.ac to fix lipthread package name.
|
||||
$ ./autogen.sh
|
||||
|
||||
sed -i 's/"-lpthread"/"-lpthreadGC2"/g' configure.ac
|
||||
Configure the compiler for the CPU architecture of the host machine:
|
||||
|
||||
CFLAGS="-O3 -march=native -Wall" ./configure $CONFIGURE_ARGS
|
||||
|
||||
7. Compile
|
||||
or cross compile for a specific CPU architecture:
|
||||
|
||||
you can use the default compile if you intend to use cpuminer-opt on the
|
||||
same CPU and the virtual machine supports that architecture.
|
||||
|
||||
./build.sh
|
||||
|
||||
Otherwise you can compile manually while setting options in CFLAGS.
|
||||
|
||||
Some common options:
|
||||
|
||||
To compile for a specific CPU architecture:
|
||||
|
||||
CFLAGS="-O3 -march=znver1 -Wall" ./configure --with-curl
|
||||
CFLAGS="-O3 -march=znver1 -Wall" ./configure $CONFIGURE_ARGS
|
||||
|
||||
This will compile for AMD Ryzen.
|
||||
|
||||
You can compile more generically for a set of specific CPU features
|
||||
if you know what features you want:
|
||||
You can compile more generically for a set of specific CPU features if you know what features you want:
|
||||
|
||||
CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure --with-curl
|
||||
CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure $CONFIGURE_ARGS
|
||||
|
||||
This will compile for an older CPU that does not have AVX.
|
||||
|
||||
You can find several examples in build-allarch.sh
|
||||
You can find several examples in README.txt
|
||||
|
||||
If you have a CPU with more than 64 threads and Windows 7 or higher you
|
||||
can enable the CPU Groups feature:
|
||||
If you have a CPU with more than 64 threads and Windows 7 or higher you can enable the CPU Groups feature by adding the following to CFLAGS:
|
||||
|
||||
-D_WIN32_WINNT==0x0601
|
||||
"-D_WIN32_WINNT=0x0601"
|
||||
|
||||
Once you have run configure successfully run make with n CPU threads:
|
||||
Once you have run configure successfully run the compiler with n CPU threads:
|
||||
|
||||
make -j n
|
||||
$ make -j n
|
||||
|
||||
Copy cpuminer.exe to the release directory, compress and copy the release
|
||||
directory to a Windows system and run cpuminer.exe from the command line.
|
||||
Copy cpuminer.exe to the release directory, compress and copy the release directory to a Windows system and run cpuminer.exe from the command line.
|
||||
|
||||
Run cpuminer
|
||||
|
||||
In a command windows change directories to the unzipped release folder.
|
||||
to get a list of all options:
|
||||
In a command windows change directories to the unzipped release folder. To get a list of all options:
|
||||
|
||||
cpuminer.exe --help
|
||||
|
||||
Command options are specific to where you mine. Refer to the pool's
|
||||
instructions on how to set them.
|
||||
Command options are specific to where you mine. Refer to the pool's instructions on how to set them.
|
||||
|
||||
|
||||
|
17
Makefile.am
17
Makefile.am
@@ -21,6 +21,7 @@ cpuminer_SOURCES = \
|
||||
api.c \
|
||||
sysinfos.c \
|
||||
algo-gate-api.c\
|
||||
malloc-huge.c \
|
||||
algo/argon2/argon2a/argon2a.c \
|
||||
algo/argon2/argon2a/ar2/argon2.c \
|
||||
algo/argon2/argon2a/ar2/opt.c \
|
||||
@@ -85,6 +86,7 @@ cpuminer_SOURCES = \
|
||||
algo/groestl/aes_ni/hash-groestl.c \
|
||||
algo/groestl/aes_ni/hash-groestl256.c \
|
||||
algo/fugue/sph_fugue.c \
|
||||
algo/fugue/fugue-aesni.c \
|
||||
algo/hamsi/sph_hamsi.c \
|
||||
algo/hamsi/hamsi-hash-4way.c \
|
||||
algo/haval/haval.c \
|
||||
@@ -128,7 +130,7 @@ cpuminer_SOURCES = \
|
||||
algo/lyra2/allium.c \
|
||||
algo/lyra2/phi2-4way.c \
|
||||
algo/lyra2/phi2.c \
|
||||
algo//m7m/m7m.c \
|
||||
algo/m7m/m7m.c \
|
||||
algo/m7m/magimath.cpp \
|
||||
algo/nist5/nist5-gate.c \
|
||||
algo/nist5/nist5-4way.c \
|
||||
@@ -157,13 +159,20 @@ cpuminer_SOURCES = \
|
||||
algo/ripemd/lbry.c \
|
||||
algo/ripemd/lbry-4way.c \
|
||||
algo/scrypt/scrypt.c \
|
||||
algo/scrypt/scrypt-core-4way.c \
|
||||
algo/scrypt/neoscrypt.c \
|
||||
algo/sha/sha256-hash.c \
|
||||
algo/sha/sph_sha2.c \
|
||||
algo/sha/sph_sha2big.c \
|
||||
algo/sha/sha256-hash-4way.c \
|
||||
algo/sha/sha512-hash-4way.c \
|
||||
algo/sha/sha256-hash-opt.c \
|
||||
algo/sha/sha256-hash-2way-ni.c \
|
||||
algo/sha/hmac-sha256-hash.c \
|
||||
algo/sha/hmac-sha256-hash-4way.c \
|
||||
algo/sha/sha256d.c \
|
||||
algo/sha/sha2.c \
|
||||
algo/sha/sha256d-4way.c \
|
||||
algo/sha/sha256t-gate.c \
|
||||
algo/sha/sha256t-4way.c \
|
||||
algo/sha/sha256t.c \
|
||||
@@ -190,6 +199,11 @@ cpuminer_SOURCES = \
|
||||
algo/sm3/sm3-hash-4way.c \
|
||||
algo/swifftx/swifftx.c \
|
||||
algo/tiger/sph_tiger.c \
|
||||
algo/verthash/verthash-gate.c \
|
||||
algo/verthash/Verthash.c \
|
||||
algo/verthash/fopen_utf8.c \
|
||||
algo/verthash/tiny_sha3/sha3.c \
|
||||
algo/verthash/tiny_sha3/sha3-4way.c \
|
||||
algo/whirlpool/sph_whirlpool.c \
|
||||
algo/whirlpool/whirlpool-hash-4way.c \
|
||||
algo/whirlpool/whirlpool-gate.c \
|
||||
@@ -256,6 +270,7 @@ cpuminer_SOURCES = \
|
||||
algo/x16/hex.c \
|
||||
algo/x16/x21s-4way.c \
|
||||
algo/x16/x21s.c \
|
||||
algo/x16/minotaur.c \
|
||||
algo/x17/x17-gate.c \
|
||||
algo/x17/x17.c \
|
||||
algo/x17/x17-4way.c \
|
||||
|
47
README.md
47
README.md
@@ -37,25 +37,25 @@ Requirements
|
||||
------------
|
||||
|
||||
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
|
||||
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI
|
||||
optimizations a CPU with AES_NI is required. This includes Intel Westmere
|
||||
and newer and AMD equivalents. Further optimizations are available on some
|
||||
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
|
||||
Intel Core2 and newer and AMD equivalents. Further optimizations are available
|
||||
on some algoritms for CPUs with AES, AVX, AVX2, SHA, AVX512 and VAES.
|
||||
|
||||
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
|
||||
performance.
|
||||
|
||||
ARM CPUs are not supported.
|
||||
ARM and Aarch64 CPUs are not supported.
|
||||
|
||||
2. 64 bit Linux OS. Ubuntu and Fedora based distributions, including Mint and
|
||||
Centos, are known to work and have all dependencies in their repositories.
|
||||
Others may work but may require more effort. Older versions such as Centos 6
|
||||
don't work due to missing features.
|
||||
2. 64 bit Linux or Windows OS. Ubuntu and Fedora based distributions,
|
||||
including Mint and Centos, are known to work and have all dependencies
|
||||
in their repositories. Others may work but may require more effort. Older
|
||||
versions such as Centos 6 don't work due to missing features.
|
||||
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
|
||||
|
||||
MacOS, OSx and Android are not supported.
|
||||
|
||||
3. Stratum pool. Some algos may work wallet mining using getwork or GBT. YMMV.
|
||||
3. Stratum pool supporting stratum+tcp:// or stratum+ssl:// protocols or
|
||||
RPC getwork using http:// or https://.
|
||||
GBT is YMMV.
|
||||
|
||||
Supported Algorithms
|
||||
--------------------
|
||||
@@ -89,10 +89,11 @@ Supported Algorithms
|
||||
lyra2h Hppcoin
|
||||
lyra2re lyra2
|
||||
lyra2rev2 lyra2v2
|
||||
lyra2rev3 lyrav2v3, Vertcoin
|
||||
lyra2rev3 lyrav2v3
|
||||
lyra2z
|
||||
lyra2z330 Lyra2 330 rows, Zoin (ZOI)
|
||||
m7m Magi (XMG)
|
||||
minotaur Ringcoin (RNG)
|
||||
myr-gr Myriad-Groestl
|
||||
neoscrypt NeoScrypt(128, 2, 1)
|
||||
nist5 Nist5
|
||||
@@ -121,6 +122,7 @@ Supported Algorithms
|
||||
tribus Denarius (DNR)
|
||||
vanilla blake256r8vnl (VCash)
|
||||
veltor (VLT)
|
||||
verthash Vertcoin
|
||||
whirlpool
|
||||
whirlpoolx
|
||||
x11 Dash
|
||||
@@ -133,7 +135,7 @@ Supported Algorithms
|
||||
x14 X14
|
||||
x15 X15
|
||||
x16r
|
||||
x16rv2 Ravencoin (RVN)
|
||||
x16rv2
|
||||
x16rt Gincoin (GIN)
|
||||
x16rt-veil Veil (VEIL)
|
||||
x16s Pigeoncoin (PGN)
|
||||
@@ -152,6 +154,27 @@ Supported Algorithms
|
||||
yespower-b2b generic yespower + blake2b
|
||||
zr5 Ziftr
|
||||
|
||||
Many variations of scrypt based algos can be mine by specifying their
|
||||
parameters:
|
||||
|
||||
scryptn2: --algo scrypt --param-n 1048576
|
||||
|
||||
cpupower: --algo yespower --param-key "CPUpower: The number of CPU working or available for proof-of-work mining"
|
||||
|
||||
power2b: --algo yespower-b2b --param-n 2048 --param-r 32 --param-key "Now I am become Death, the destroyer of worlds"
|
||||
|
||||
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
|
||||
|
||||
yespoweriots: --algo yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
|
||||
|
||||
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
|
||||
|
||||
yespoweric: --algo yespower --param-n 2048 --param-r 32 --param-key "IsotopeC"
|
||||
|
||||
yespowerurx: --algo yespower --param-n 2048 --param-r 32 --param-key "UraniumX"
|
||||
|
||||
yespowerltncg: --algo yespower --param-n 2048 --param-r 32 --param-key "LTNCGYES"
|
||||
|
||||
Errata
|
||||
------
|
||||
|
||||
|
57
README.txt
57
README.txt
@@ -1,8 +1,12 @@
|
||||
This file is included in the Windows binary package. Compile instructions
|
||||
for Linux and Windows can be found in RELEASE_NOTES.
|
||||
|
||||
cpuminer is a console program that is executed from a DOS command prompt.
|
||||
There is no GUI and no mouse support.
|
||||
This package is officially avalable only from:
|
||||
https://github.com/JayDDee/cpuminer-opt
|
||||
No other sources should be trusted.
|
||||
|
||||
cpuminer is a console program that is executed from a DOS or Powershell
|
||||
prompt. There is no GUI and no mouse support.
|
||||
|
||||
Miner programs are often flagged as malware by antivirus programs. This is
|
||||
a false positive, they are flagged simply because they are cryptocurrency
|
||||
@@ -10,18 +14,18 @@ miners. The source code is open for anyone to inspect. If you don't trust
|
||||
the software, don't use it.
|
||||
|
||||
Choose the exe that best matches you CPU's features or use trial and
|
||||
error to find the fastest one that doesn't crash. Pay attention to
|
||||
error to find the fastest one that works. Pay attention to
|
||||
the features listed at cpuminer startup to ensure you are mining at
|
||||
optimum speed using the best available features.
|
||||
|
||||
Architecture names and compile options used are only provided for Intel
|
||||
Core series. Budget CPUs like Pentium and Celeron are often missing the
|
||||
latest features.
|
||||
Architecture names and compile options used are only provided for
|
||||
mainstream desktop CPUs. Budget CPUs like Pentium and Celeron are often
|
||||
missing some features. Check your CPU.
|
||||
|
||||
AMD CPUs older than Piledriver, including Athlon x2 and Phenom II x4, are not
|
||||
supported by cpuminer-opt due to an incompatible implementation of SSE2 on
|
||||
these CPUs. Some algos may crash the miner with an invalid instruction.
|
||||
Users are recommended to use an unoptimized miner such as cpuminer-multi.
|
||||
Support for AMD CPUs older than Ryzen is incomplete and without specific
|
||||
recommendations. Find the best fit. CPUs older than Piledriver, including
|
||||
Athlon x2 and Phenom II x4, are not supported by cpuminer-opt due to an
|
||||
incompatible implementation of SSE2 on these CPUs.
|
||||
|
||||
More information for Intel and AMD CPU architectures and their features
|
||||
can be found on Wikipedia.
|
||||
@@ -30,15 +34,34 @@ https://en.wikipedia.org/wiki/List_of_Intel_CPU_microarchitectures
|
||||
|
||||
https://en.wikipedia.org/wiki/List_of_AMD_CPU_microarchitectures
|
||||
|
||||
File name Architecture name
|
||||
|
||||
Exe name Compile flags Arch name
|
||||
cpuminer-sse2.exe Core2, Nehalem, generic x86_64 with SSE2
|
||||
cpuminer-aes-sse42.exe Westmere
|
||||
cpuminer-avx.exe Sandybridge, Ivybridge
|
||||
cpuminer-avx2.exe Haswell, Skylake, Kabylake, Coffeelake, Cometlake
|
||||
cpuminer-avx2-sha.exe AMD Zen1, Zen2
|
||||
cpuminer-avx2-sha-vaes.exe Intel Alderlake*, AMD Zen3
|
||||
cpuminer-avx512.exe Intel HEDT Skylake-X, Cascadelake
|
||||
cpuminer-avx512-sha-vaes.exe Icelake, Tigerlake, Rocketlake
|
||||
|
||||
cpuminer-sse2.exe "-msse2" Core2, Nehalem
|
||||
cpuminer-aes-sse42.exe "-march=westmere" Westmere
|
||||
cpuminer-avx.exe "-march=corei7-avx" Sandybridge
|
||||
cpuminer-avx2.exe "-march=core-avx2 -maes" Haswell, Skylake, Coffeelake
|
||||
cpuminer-avx512.exe "-march=skylake-avx512" Skylake-X, Cascadelake-X
|
||||
cpuminer-zen "-march=znver1" AMD Ryzen, Threadripper
|
||||
* Alderlake is a hybrid architecture. With the E-cores disabled it may be
|
||||
possible to enable AVX512 on the the P-cores and use the avx512-sha-vaes
|
||||
build. This is not officially supported by Intel at time of writing.
|
||||
Check for current information.
|
||||
|
||||
Notes about included DLL files:
|
||||
|
||||
Downloading DLL files from alternative sources presents an inherent
|
||||
security risk if their source is unknown. All DLL files included have
|
||||
been copied from the Ubuntu-20.04 installation or compiled by me from
|
||||
source code obtained from the author's official repository. The exact
|
||||
procedure is documented in the build instructions for Windows:
|
||||
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
|
||||
|
||||
Some DLL filess may already be installed on the system by Windows or third
|
||||
party packages. They often will work and may be used instead of the included
|
||||
file.
|
||||
|
||||
If you like this software feel free to donate:
|
||||
|
||||
|
402
RELEASE_NOTES
402
RELEASE_NOTES
@@ -44,7 +44,7 @@ Please include the following information:
|
||||
1. CPU model, operating system, cpuminer-opt version (must be latest),
|
||||
binary file for Windows, changes to default build procedure for Linux.
|
||||
|
||||
2. Exact comand line (except user and pw) and intial output showing
|
||||
2. Exact command line (except user and pw) and intial output showing
|
||||
the above requested info.
|
||||
|
||||
3. Additional program output showing any error messages or other
|
||||
@@ -65,6 +65,406 @@ If not what makes it happen or not happen?
|
||||
Change Log
|
||||
----------
|
||||
|
||||
v3.19.6
|
||||
|
||||
#363 Fixed a stratum bug where the first job may be ignored delaying start of hashing
|
||||
Fixed handling of nonce exhaust when hashing a fast algo with extranonce disabled
|
||||
Small optimization to Shavite.
|
||||
|
||||
v3.19.5
|
||||
|
||||
Enhanced stratum-keepalive preemptively resets the stratum connection
|
||||
before the server to avoid lost shares.
|
||||
|
||||
Added build-msys2.sh scrypt for easier compiling on Windows, see Wiki for details.
|
||||
|
||||
X16RT: eliminate unnecessary recalculations of the hash order.
|
||||
|
||||
Fix a few compiler warnings.
|
||||
|
||||
Fixed log colour error when a block is solved.
|
||||
|
||||
v3.19.4
|
||||
|
||||
#359: Fix verthash memory allocation for non-hugepages, broken in v3.19.3.
|
||||
|
||||
New option stratum-keepalive prevents stratum timeouts when no shares are
|
||||
submitted for several minutes due to high difficulty.
|
||||
|
||||
Fixed a bug displaying optimizations for some algos.
|
||||
|
||||
v3.19.3
|
||||
|
||||
Linux: Faster verthash (+25%), scryptn2 (+2%) when huge pages are available.
|
||||
|
||||
Small speed up for Hamsi AVX2 & AVX512, Keccak AVX512.
|
||||
|
||||
v3.19.2
|
||||
|
||||
Fixed log displaying incorrect memory usage for scrypt, broken in v3.19.1.
|
||||
|
||||
Reduce log noise when replies to submitted shares are lost due to stratum errors.
|
||||
|
||||
Fugue prehash optimization for X16r family AVX2 & AVX512.
|
||||
|
||||
Small speed improvement for Hamsi AVX2 & AVX512.
|
||||
|
||||
Win: With CPU groups enabled the number of CPUs displayed in the ASCII art
|
||||
affinity map is the number of CPUs in a CPU group, was number of CPUs up to 64.
|
||||
|
||||
v3.19.1
|
||||
|
||||
Changes to Windows binaries package:
|
||||
- builds for CPUs with AVX or lower have CPU groups disabled,
|
||||
- zen3 build renamed to avx2-sha-vaes to support Alderlake as well as Zen3,
|
||||
- zen build renamed to avx2-sha, supports Zen1 & Zen2,
|
||||
- avx512-sha build removed, Rocketlake CPUs can use avx512-sha-vaes,
|
||||
- see README.txt for compatibility details.
|
||||
|
||||
Fixed a few compiler warnings that are new in GCC 11.
|
||||
Other minor fixes.
|
||||
|
||||
v3.19.0
|
||||
|
||||
Windows binaries now built with support for CPU groups, requires Windows 7.
|
||||
|
||||
Changes to cpu-affinity:
|
||||
- PR#346: Fixed incorrect CPU affinity on Windows built for CPU groups,
|
||||
- added support for CPU affinity for up to 256 threads or CPUs,
|
||||
- streamlined code for more efficient initialization of miner threads,
|
||||
- precise affining of each miner thread to a specific CPU,
|
||||
- added an option to disable CPU affinity with "--cpu-affinity 0"
|
||||
|
||||
Faster sha256t with AVX512 & AVX2.
|
||||
|
||||
Added stratum error count to stats log, reported only when non-zero.
|
||||
|
||||
v3.18.2
|
||||
|
||||
Issue #342, fixed Groestl AES on Windows, broken in v3.18.0.
|
||||
|
||||
AVX512 for sha256d.
|
||||
|
||||
SSE42 and AVX may now be displayed as mining features at startup.
|
||||
This is hard coded for each algo, and is only implemented for scrypt
|
||||
at this time as it is the only algo with significant performance differences
|
||||
with those features.
|
||||
|
||||
Fixed an issue where a high hashrate algo could cause excessive invalid hash
|
||||
rate log reports when starting up in benchmark mode.
|
||||
|
||||
v3.18.1
|
||||
|
||||
More speed for scrypt:
|
||||
- additional scryptn2 optimizations for all CPU architectures,
|
||||
- AVX2 is now used by default on CPUS with SHA but not AVX512,
|
||||
- scrypt:1024 performance lost in v3.18.0 is restored,
|
||||
- AVX512 & AVX2 improvements to scrypt:1024.
|
||||
|
||||
Big speedup for SwiFFTx AVX2 & SSE4.1: x22i +55%, x25x +22%.
|
||||
|
||||
Issue #337: fixed a problem that could display negative stats values in the
|
||||
first summary report if the report was forced prematurely due to a stratum
|
||||
diff change. The stats will still be invalid but should display zeros.
|
||||
|
||||
v3.18.0
|
||||
|
||||
Complete rewrite of Scrypt code, optimized for large N factor (scryptn2):
|
||||
- AVX512 & SHA support for sha256, AVX512 has priority,
|
||||
- up to 50% increase in hashrate,
|
||||
- memory requirements reduced 30-60% depending on CPU architecture,
|
||||
- memory usage displayed at startup,
|
||||
- scrypt, default N=1024 (LTC), will likely perform slower.
|
||||
|
||||
Improved stale share detection and handling for Scrypt with large N factor:
|
||||
- abort and discard partially computed hash when new work is detected,
|
||||
- quicker response to new job, less time wasted mining stale job.
|
||||
|
||||
Improved stale share handling for all algorithms:
|
||||
- report possible stale share when new work received with a previously
|
||||
submitted share still pending,
|
||||
- when new work is detected report the submission of an already completed,
|
||||
otherwise valid, but likely stale, share,
|
||||
- fixed incorrect block height in stale share log.
|
||||
|
||||
Small performance improvements to sha, bmw, cube & hamsi for AVX512 & AVX2.
|
||||
|
||||
When stratum disconnects miner threads go to idle until reconnected.
|
||||
|
||||
Colour changes to some logs.
|
||||
|
||||
Some low level function name changes for clarity and consistency.
|
||||
|
||||
The reference hashrate in the summary log and the benchmark total hashrate
|
||||
are now the mean hashrate for the session.
|
||||
|
||||
v3.17.1
|
||||
|
||||
Fixed Windows build for AES+SSE4.2 (Westmere), was missing AES.
|
||||
More ternary logic optimizations for AVX512, AVX512+VAES, and AVX512+AES.
|
||||
Fixed my-gr algo for VAES.
|
||||
|
||||
v3.17.0
|
||||
|
||||
AVX512 optimized using ternary logic instructions.
|
||||
Faster sha256t on all CPU architectures: AVX512 +30%, SHA +30%, AVX2 +9%.
|
||||
Use SHA on supported CPUs to produce merkle hash.
|
||||
Fixed byte order in Extranonce2 log & replaced Block height with Job ID.
|
||||
|
||||
v3.16.5
|
||||
|
||||
#329: Fixed GBT incorrect target diff in stats, second attempt.
|
||||
Fixed formatting error in share result log when --no-color option is used.
|
||||
|
||||
v3.16.4
|
||||
|
||||
Faster sha512 and sha256 when not using SHA CPU extension.
|
||||
#329: Fixed GBT incorrect target diff in stats.
|
||||
|
||||
v3.16.3
|
||||
|
||||
#313 Fix compile error with GCC 11.
|
||||
Incremental improvements to verthash.
|
||||
|
||||
v3.16.2
|
||||
|
||||
Verthash: midstate prehash optimization for all architectures.
|
||||
Verthash: AVX2 optimization.
|
||||
GBT: added support for Bech32 addresses.
|
||||
Linux: added CPU frequency to benchmark log.
|
||||
Fixed integer overflow in time calculations.
|
||||
|
||||
v3.16.1
|
||||
|
||||
New options for verthash:
|
||||
--data-file to specify the name, and optionally the path, of the verthash
|
||||
data file, default is "verthash.dat" in the current directory.
|
||||
--verify to perform the data file integrity check at startup, default is
|
||||
not to verify data file integrity.
|
||||
Support for creation of default verthash data file if:
|
||||
1) --data-file option is not used,
|
||||
2) no default data file is found in the current directory, and,
|
||||
3) --verify option is used.
|
||||
More detailed logs related to verthash data file.
|
||||
Small verthash performance improvement.
|
||||
Fixed detection of corrupt stats caused by networking issues.
|
||||
|
||||
v3.16.0
|
||||
|
||||
Added verthash algo.
|
||||
|
||||
v3.15.7
|
||||
|
||||
Added accepted/stale/rejected percentage to summary log report.
|
||||
Added warning if share counters mismatch which could corrupt stats.
|
||||
Linux: CPU temperature reporting is more responsive to rising temperature.
|
||||
A few AVX2 & AVX512 tweaks.
|
||||
Removed some dead code and other cleanup.
|
||||
|
||||
v3.15.6
|
||||
|
||||
Implement keccak pre-hash optimization for x16* algos.
|
||||
Move conditional mining test to before get_new_work in miner thread.
|
||||
Add test for share reject reason when solo mining.
|
||||
Add support for floating point, as well as integer, "networkhasps" in
|
||||
RPC getmininginfo method.
|
||||
|
||||
v3.15.5
|
||||
|
||||
Fix stratum jobs lost if 2 jobs received in less than one second.
|
||||
|
||||
v3.15.4
|
||||
|
||||
Fixed yescryptr16 broken in v3.15.3.
|
||||
|
||||
v3.15.3
|
||||
|
||||
Yescrypt algos now use yespower v0.5, a little faster.
|
||||
New implementation of sha256 using SHA CPU extension.
|
||||
Replace Openssl with SPH for sha256 & sha512.
|
||||
AVX512 optimization for sha256t & sha256q.
|
||||
Faster sha256t, sha256q, x21s, x22i & x25x on CPUs with SHA without AVX512.
|
||||
AVX512+SHA build for Intel Rocketlake added to Windows binary package.
|
||||
|
||||
v3.15.2
|
||||
|
||||
Zen3 AVX2+VAES optimization for x16*, x17, sonoa, xevan, x21s, x22i, x25x,
|
||||
allium.
|
||||
Zen3 (AVX2+SHA+VAES) build added to Windows binary package.
|
||||
|
||||
v3.15.1
|
||||
|
||||
Fix compile on AMD Zen3 CPUs with VAES.
|
||||
Force new work immediately after solving a block solo.
|
||||
|
||||
|
||||
v3.15.0
|
||||
|
||||
Fugue optimized with AES, improves many sha3 algos.
|
||||
Minotaur algo optimized for all architectures.
|
||||
Fixed neoscrypt BUG log.
|
||||
|
||||
v3.14.3
|
||||
|
||||
#265: more mutex changes to reduce blocking with high thread count.
|
||||
|
||||
#267: fixed hodl algo potential memory alignment issue,
|
||||
add warning when thread count is not valid for mining hodl algo.
|
||||
|
||||
v3.14.2
|
||||
|
||||
The second line of the Share Accepted log is no longer displayed,
|
||||
new Xnonce log is added and other small log tweaks.
|
||||
|
||||
#265: Cleanup use of mutex.
|
||||
|
||||
v3.14.1
|
||||
|
||||
GBT and getwork log changes:
|
||||
fixed missing TTF in New Block log,
|
||||
ntime no longer byte-swapped for display in New Work log,
|
||||
fixed zero effective hash rate in Periodic Report log,
|
||||
deleted "Current block is..." log.
|
||||
|
||||
Renamed stratum "New Job" log to "New Work" to be consistent with the solo
|
||||
version of the log. Added more data to both versions.
|
||||
|
||||
v3.14.0
|
||||
|
||||
Changes to solo mining:
|
||||
- segwit is supported by getblocktemplate,
|
||||
- longpolling is not working and is disabled,
|
||||
- Periodic Report log is output,
|
||||
- New Block log includes TTF estimates,
|
||||
- Stratum thread no longer created when using getwork or GBT.
|
||||
|
||||
Fixed BUG log mining sha256d.
|
||||
|
||||
v3.13.1.1
|
||||
|
||||
Fixed Windows crash mining minotaur algo.
|
||||
|
||||
Fixed GCC 10 compile again.
|
||||
Added -fno-common to testing to be consistent with GCC 10 default.
|
||||
|
||||
v3.13.1
|
||||
|
||||
Added minotaur algo for Ringcoin.
|
||||
|
||||
v3.13.0.1
|
||||
|
||||
Issue #262: Fixed xevan AVX2 invalid shares.
|
||||
|
||||
v3.13.0
|
||||
|
||||
Updated Windows binaries compiled with GCC 9. Included DLLs also updated.
|
||||
Icelake build (cpuminer-avx512-sha-vaes.exe) now included in Windows
|
||||
binaries package.
|
||||
|
||||
No source code changes.
|
||||
|
||||
v3.12.8.2
|
||||
|
||||
Fixed x12 AVX2 rejects.
|
||||
Fixed phi AVX2 crash.
|
||||
|
||||
v3.12.8.1
|
||||
|
||||
Issue #261: Fixed yescryptr8g invalid shares.
|
||||
|
||||
v3.12.8
|
||||
|
||||
Yespower sha256 prehash made thread safe.
|
||||
|
||||
Rewrote diff conversion functions from scratch to be simpler and use
|
||||
long double (float80) and int128 arithmetic for improved accuracy and
|
||||
precision.
|
||||
|
||||
Some code cleanup and assorted small changes.
|
||||
|
||||
v3.12.7
|
||||
|
||||
Issue #257: fixed a file descriptor leak which caused the CPU temperature
|
||||
and frequency query to report zeros after mining for a couple of hours.
|
||||
|
||||
Issue #253: stale share reduction for yescrypt, sonoa.
|
||||
|
||||
v3.12.6.1
|
||||
|
||||
Issue #252: Fixed SSL mining (stratum+tcps://)
|
||||
|
||||
Issue #254 Fixed benchmark.
|
||||
|
||||
Issue #253: Implemented stale share reduction for yespower, x25x, x22i, x21s,
|
||||
x16*, scryptn2, more to come.
|
||||
|
||||
v3.12.6
|
||||
|
||||
Issue #246: improved stale share detection for getwork.
|
||||
|
||||
Improved precision of target_to_diff conversion from 4 digits to 20+.
|
||||
|
||||
Display hash and target debug data for all rejected shares.
|
||||
|
||||
A graphical representation of CPU affinity is displayed when using --threads.
|
||||
|
||||
Added highest and lowest accepted share to summary log.
|
||||
|
||||
Other small changes to logs to improve consistency and clarity.
|
||||
|
||||
v3.12.5
|
||||
|
||||
Issues #246 & #251: fixed incorrect share diff for stratum and getwork,
|
||||
fixed incorrect target diff for getwork. Stats should now be correct for
|
||||
getwork as well as stratum.
|
||||
|
||||
Issue #252: Fixed stratum+tcps not using curl ssl.
|
||||
|
||||
Getwork: reduce stale blocks, faster response to new work.
|
||||
|
||||
Added ntime to new job/work logs.
|
||||
|
||||
README.md now lists the parameters for yespower variations that don't have
|
||||
a specific algo name.
|
||||
|
||||
v3.12.4.6
|
||||
|
||||
Issue #246: fixed getwork repeated new block logs with same height. New work
|
||||
for the same block is now reported as "New work" instead of "New block".
|
||||
Also added a check that work is new before generating "New work" log.
|
||||
|
||||
Added target diff to getwork new block log.
|
||||
|
||||
Changed share ratio in share result log to simple fraction, no longer %.
|
||||
|
||||
Added debug log to display mininginfo, use -D.
|
||||
|
||||
v3.12.4.5
|
||||
|
||||
Issue #246: better stale share detection for getwork, and enhanced logging
|
||||
of stale shares for stratum & getwork.
|
||||
|
||||
Issue #251: fixed incorrect share difficulty and share ratio in share
|
||||
result log.
|
||||
|
||||
Changed submit log to include share diff and block height.
|
||||
|
||||
Small cosmetic changes to logs.
|
||||
|
||||
v3.12.4.4
|
||||
|
||||
Issue #246: Fixed net hashrate in getwork block log,
|
||||
removed duplicate getwork block log,
|
||||
other small tweaks to stats logs for getwork.
|
||||
|
||||
Issue #248: Fixed chronic stale shares with scrypt:1048576 (scryptn2).
|
||||
|
||||
v3.12.4.3
|
||||
|
||||
Fixed segfault in new block log for getwork.
|
||||
|
||||
Disabled silent discarding of stale work after the submit is logged.
|
||||
|
||||
v3.12.4.2
|
||||
|
||||
Issue #245: fixed getwork stale shares, solo mining with getwork now works.
|
||||
|
193
aclocal.m4
vendored
193
aclocal.m4
vendored
@@ -1,6 +1,6 @@
|
||||
# generated automatically by aclocal 1.15.1 -*- Autoconf -*-
|
||||
# generated automatically by aclocal 1.16.1 -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
|
||||
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -20,7 +20,7 @@ You have another version of autoconf. It may work, but is not guaranteed to.
|
||||
If you have problems, you may need to regenerate the build system entirely.
|
||||
To do so, use the procedure documented by the package, typically 'autoreconf'.])])
|
||||
|
||||
# Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2002-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -32,10 +32,10 @@ To do so, use the procedure documented by the package, typically 'autoreconf'.])
|
||||
# generated from the m4 files accompanying Automake X.Y.
|
||||
# (This private macro should not be called outside this file.)
|
||||
AC_DEFUN([AM_AUTOMAKE_VERSION],
|
||||
[am__api_version='1.15'
|
||||
[am__api_version='1.16'
|
||||
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
|
||||
dnl require some minimum version. Point them to the right macro.
|
||||
m4_if([$1], [1.15.1], [],
|
||||
m4_if([$1], [1.16.1], [],
|
||||
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
|
||||
])
|
||||
|
||||
@@ -51,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], [])
|
||||
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
|
||||
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
|
||||
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
|
||||
[AM_AUTOMAKE_VERSION([1.15.1])dnl
|
||||
[AM_AUTOMAKE_VERSION([1.16.1])dnl
|
||||
m4_ifndef([AC_AUTOCONF_VERSION],
|
||||
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
|
||||
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
|
||||
|
||||
# Figure out how to run the assembler. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -78,7 +78,7 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
|
||||
|
||||
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -130,7 +130,7 @@ am_aux_dir=`cd "$ac_aux_dir" && pwd`
|
||||
|
||||
# AM_CONDITIONAL -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1997-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1997-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -161,7 +161,7 @@ AC_CONFIG_COMMANDS_PRE(
|
||||
Usually this means the macro was only invoked conditionally.]])
|
||||
fi])])
|
||||
|
||||
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1999-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -352,13 +352,12 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
|
||||
|
||||
# Generate code to set up dependency tracking. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1999-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
# with or without modifications, as long as this notice is preserved.
|
||||
|
||||
|
||||
# _AM_OUTPUT_DEPENDENCY_COMMANDS
|
||||
# ------------------------------
|
||||
AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
|
||||
@@ -366,49 +365,41 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
|
||||
# Older Autoconf quotes --file arguments for eval, but not when files
|
||||
# are listed without --file. Let's play safe and only enable the eval
|
||||
# if we detect the quoting.
|
||||
case $CONFIG_FILES in
|
||||
*\'*) eval set x "$CONFIG_FILES" ;;
|
||||
*) set x $CONFIG_FILES ;;
|
||||
esac
|
||||
# TODO: see whether this extra hack can be removed once we start
|
||||
# requiring Autoconf 2.70 or later.
|
||||
AS_CASE([$CONFIG_FILES],
|
||||
[*\'*], [eval set x "$CONFIG_FILES"],
|
||||
[*], [set x $CONFIG_FILES])
|
||||
shift
|
||||
for mf
|
||||
# Used to flag and report bootstrapping failures.
|
||||
am_rc=0
|
||||
for am_mf
|
||||
do
|
||||
# Strip MF so we end up with the name of the file.
|
||||
mf=`echo "$mf" | sed -e 's/:.*$//'`
|
||||
# Check whether this is an Automake generated Makefile or not.
|
||||
# We used to match only the files named 'Makefile.in', but
|
||||
# some people rename them; so instead we look at the file content.
|
||||
# Grep'ing the first line is not enough: some people post-process
|
||||
# each Makefile.in and add a new line on top of each file to say so.
|
||||
# Grep'ing the whole file is not good either: AIX grep has a line
|
||||
am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'`
|
||||
# Check whether this is an Automake generated Makefile which includes
|
||||
# dependency-tracking related rules and includes.
|
||||
# Grep'ing the whole file directly is not great: AIX grep has a line
|
||||
# limit of 2048, but all sed's we know have understand at least 4000.
|
||||
if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
|
||||
dirpart=`AS_DIRNAME("$mf")`
|
||||
else
|
||||
continue
|
||||
fi
|
||||
# Extract the definition of DEPDIR, am__include, and am__quote
|
||||
# from the Makefile without running 'make'.
|
||||
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
|
||||
test -z "$DEPDIR" && continue
|
||||
am__include=`sed -n 's/^am__include = //p' < "$mf"`
|
||||
test -z "$am__include" && continue
|
||||
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
|
||||
# Find all dependency output files, they are included files with
|
||||
# $(DEPDIR) in their names. We invoke sed twice because it is the
|
||||
# simplest approach to changing $(DEPDIR) to its actual value in the
|
||||
# expansion.
|
||||
for file in `sed -n "
|
||||
s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
|
||||
sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
|
||||
# Make sure the directory exists.
|
||||
test -f "$dirpart/$file" && continue
|
||||
fdir=`AS_DIRNAME(["$file"])`
|
||||
AS_MKDIR_P([$dirpart/$fdir])
|
||||
# echo "creating $dirpart/$file"
|
||||
echo '# dummy' > "$dirpart/$file"
|
||||
done
|
||||
sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \
|
||||
|| continue
|
||||
am_dirpart=`AS_DIRNAME(["$am_mf"])`
|
||||
am_filepart=`AS_BASENAME(["$am_mf"])`
|
||||
AM_RUN_LOG([cd "$am_dirpart" \
|
||||
&& sed -e '/# am--include-marker/d' "$am_filepart" \
|
||||
| $MAKE -f - am--depfiles]) || am_rc=$?
|
||||
done
|
||||
if test $am_rc -ne 0; then
|
||||
AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments
|
||||
for automatic dependency tracking. Try re-running configure with the
|
||||
'--disable-dependency-tracking' option to at least be able to build
|
||||
the package (albeit without support for automatic dependency tracking).])
|
||||
fi
|
||||
AS_UNSET([am_dirpart])
|
||||
AS_UNSET([am_filepart])
|
||||
AS_UNSET([am_mf])
|
||||
AS_UNSET([am_rc])
|
||||
rm -f conftest-deps.mk
|
||||
}
|
||||
])# _AM_OUTPUT_DEPENDENCY_COMMANDS
|
||||
|
||||
@@ -417,18 +408,17 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
|
||||
# -----------------------------
|
||||
# This macro should only be invoked once -- use via AC_REQUIRE.
|
||||
#
|
||||
# This code is only required when automatic dependency tracking
|
||||
# is enabled. FIXME. This creates each '.P' file that we will
|
||||
# need in order to bootstrap the dependency handling code.
|
||||
# This code is only required when automatic dependency tracking is enabled.
|
||||
# This creates each '.Po' and '.Plo' makefile fragment that we'll need in
|
||||
# order to bootstrap the dependency handling code.
|
||||
AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
|
||||
[AC_CONFIG_COMMANDS([depfiles],
|
||||
[test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
|
||||
[AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
|
||||
])
|
||||
[AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])])
|
||||
|
||||
# Do all the work for Automake. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -515,8 +505,8 @@ AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
|
||||
AC_REQUIRE([AC_PROG_MKDIR_P])dnl
|
||||
# For better backward compatibility. To be removed once Automake 1.9.x
|
||||
# dies out for good. For more background, see:
|
||||
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
|
||||
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
|
||||
# <https://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
|
||||
# <https://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
|
||||
AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
|
||||
# We need awk for the "check" target (and possibly the TAP driver). The
|
||||
# system "awk" is bad on some platforms.
|
||||
@@ -583,7 +573,7 @@ END
|
||||
Aborting the configuration process, to ensure you take notice of the issue.
|
||||
|
||||
You can download and install GNU coreutils to get an 'rm' implementation
|
||||
that behaves properly: <http://www.gnu.org/software/coreutils/>.
|
||||
that behaves properly: <https://www.gnu.org/software/coreutils/>.
|
||||
|
||||
If you want to complete the configuration process using your problematic
|
||||
'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
|
||||
@@ -625,7 +615,7 @@ for _am_header in $config_headers :; do
|
||||
done
|
||||
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
|
||||
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -646,7 +636,7 @@ if test x"${install_sh+set}" != xset; then
|
||||
fi
|
||||
AC_SUBST([install_sh])])
|
||||
|
||||
# Copyright (C) 2003-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2003-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -668,7 +658,7 @@ AC_SUBST([am__leading_dot])])
|
||||
# Add --enable-maintainer-mode option to configure. -*- Autoconf -*-
|
||||
# From Jim Meyering
|
||||
|
||||
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -703,7 +693,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
|
||||
|
||||
# Check to see how 'make' treats includes. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -711,49 +701,42 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
|
||||
|
||||
# AM_MAKE_INCLUDE()
|
||||
# -----------------
|
||||
# Check to see how make treats includes.
|
||||
# Check whether make has an 'include' directive that can support all
|
||||
# the idioms we need for our automatic dependency tracking code.
|
||||
AC_DEFUN([AM_MAKE_INCLUDE],
|
||||
[am_make=${MAKE-make}
|
||||
cat > confinc << 'END'
|
||||
[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive])
|
||||
cat > confinc.mk << 'END'
|
||||
am__doit:
|
||||
@echo this is the am__doit target
|
||||
@echo this is the am__doit target >confinc.out
|
||||
.PHONY: am__doit
|
||||
END
|
||||
# If we don't find an include directive, just comment out the code.
|
||||
AC_MSG_CHECKING([for style of include used by $am_make])
|
||||
am__include="#"
|
||||
am__quote=
|
||||
_am_result=none
|
||||
# First try GNU make style include.
|
||||
echo "include confinc" > confmf
|
||||
# Ignore all kinds of additional output from 'make'.
|
||||
case `$am_make -s -f confmf 2> /dev/null` in #(
|
||||
*the\ am__doit\ target*)
|
||||
am__include=include
|
||||
am__quote=
|
||||
_am_result=GNU
|
||||
;;
|
||||
esac
|
||||
# Now try BSD make style include.
|
||||
if test "$am__include" = "#"; then
|
||||
echo '.include "confinc"' > confmf
|
||||
case `$am_make -s -f confmf 2> /dev/null` in #(
|
||||
*the\ am__doit\ target*)
|
||||
am__include=.include
|
||||
am__quote="\""
|
||||
_am_result=BSD
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
AC_SUBST([am__include])
|
||||
AC_SUBST([am__quote])
|
||||
AC_MSG_RESULT([$_am_result])
|
||||
rm -f confinc confmf
|
||||
])
|
||||
# BSD make does it like this.
|
||||
echo '.include "confinc.mk" # ignored' > confmf.BSD
|
||||
# Other make implementations (GNU, Solaris 10, AIX) do it like this.
|
||||
echo 'include confinc.mk # ignored' > confmf.GNU
|
||||
_am_result=no
|
||||
for s in GNU BSD; do
|
||||
AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out])
|
||||
AS_CASE([$?:`cat confinc.out 2>/dev/null`],
|
||||
['0:this is the am__doit target'],
|
||||
[AS_CASE([$s],
|
||||
[BSD], [am__include='.include' am__quote='"'],
|
||||
[am__include='include' am__quote=''])])
|
||||
if test "$am__include" != "#"; then
|
||||
_am_result="yes ($s style)"
|
||||
break
|
||||
fi
|
||||
done
|
||||
rm -f confinc.* confmf.*
|
||||
AC_MSG_RESULT([${_am_result}])
|
||||
AC_SUBST([am__include])])
|
||||
AC_SUBST([am__quote])])
|
||||
|
||||
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1997-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1997-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -792,7 +775,7 @@ fi
|
||||
|
||||
# Helper functions for option handling. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -821,7 +804,7 @@ AC_DEFUN([_AM_SET_OPTIONS],
|
||||
AC_DEFUN([_AM_IF_OPTION],
|
||||
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
|
||||
|
||||
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1999-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -868,7 +851,7 @@ AC_LANG_POP([C])])
|
||||
# For backward compatibility.
|
||||
AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
|
||||
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -887,7 +870,7 @@ AC_DEFUN([AM_RUN_LOG],
|
||||
|
||||
# Check to make sure that the build environment is sane. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -968,7 +951,7 @@ AC_CONFIG_COMMANDS_PRE(
|
||||
rm -f conftest.file
|
||||
])
|
||||
|
||||
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2009-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -1028,7 +1011,7 @@ AC_SUBST([AM_BACKSLASH])dnl
|
||||
_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
|
||||
])
|
||||
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -1056,7 +1039,7 @@ fi
|
||||
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
|
||||
AC_SUBST([INSTALL_STRIP_PROGRAM])])
|
||||
|
||||
# Copyright (C) 2006-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2006-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -1075,7 +1058,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
|
||||
|
||||
# Check how to create a tarball. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 2004-2017 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2004-2018 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
|
358
algo-gate-api.c
358
algo-gate-api.c
@@ -15,8 +15,6 @@
|
||||
#include <stdbool.h>
|
||||
#include <memory.h>
|
||||
#include <unistd.h>
|
||||
#include <openssl/sha.h>
|
||||
//#include "miner.h"
|
||||
#include "algo-gate-api.h"
|
||||
|
||||
// Define null and standard functions.
|
||||
@@ -90,33 +88,172 @@ void algo_not_implemented()
|
||||
}
|
||||
|
||||
// default null functions
|
||||
|
||||
// deprecated, use generic as default
|
||||
int null_scanhash()
|
||||
{
|
||||
applog(LOG_WARNING,"SWERR: undefined scanhash function in algo_gate");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void null_hash()
|
||||
// Default generic scanhash can be used in many cases.
|
||||
int scanhash_generic( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t edata[20] __attribute__((aligned(64)));
|
||||
uint32_t hash[8] __attribute__((aligned(64)));
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 1;
|
||||
uint32_t n = first_nonce;
|
||||
const int thr_id = mythr->id;
|
||||
const bool bench = opt_benchmark;
|
||||
|
||||
mm128_bswap32_80( edata, pdata );
|
||||
do
|
||||
{
|
||||
edata[19] = n;
|
||||
if ( likely( algo_gate.hash( hash, edata, thr_id ) ) )
|
||||
if ( unlikely( valid_hash( hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n );
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
n++;
|
||||
} while ( n < last_nonce && !work_restart[thr_id].restart );
|
||||
*hashes_done = n - first_nonce;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
//int scanhash_4way_64_64( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr )
|
||||
|
||||
//int scanhash_4way_64_640( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr )
|
||||
|
||||
int scanhash_4way_64in_32out( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t hash32[8*4] __attribute__ ((aligned (64)));
|
||||
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
||||
uint32_t *hash32_d7 = &(hash32[ 7*4 ]);
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 4;
|
||||
__m256i *noncev = (__m256i*)vdata + 9;
|
||||
uint32_t n = first_nonce;
|
||||
const int thr_id = mythr->id;
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const bool bench = opt_benchmark;
|
||||
|
||||
mm256_bswap32_intrlv80_4x64( vdata, pdata );
|
||||
*noncev = mm256_intrlv_blend_32(
|
||||
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
if ( likely( algo_gate.hash( hash32, vdata, thr_id ) ) )
|
||||
for ( int lane = 0; lane < 4; lane++ )
|
||||
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 && !bench ) )
|
||||
{
|
||||
extr_lane_4x32( lane_hash, hash32, lane, 256 );
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
m256_const1_64( 0x0000000400000000 ) );
|
||||
n += 4;
|
||||
} while ( likely( ( n <= last_nonce ) && !work_restart[thr_id].restart ) );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
//int scanhash_8way_32_32( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr )
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
//int scanhash_8way_64_64( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr )
|
||||
|
||||
//int scanhash_8way_64_640( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr )
|
||||
|
||||
int scanhash_8way_64in_32out( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t hash32[8*8] __attribute__ ((aligned (128)));
|
||||
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
|
||||
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
|
||||
uint32_t *hash32_d7 = &(hash32[7*8]);
|
||||
uint32_t *pdata = work->data;
|
||||
const uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t last_nonce = max_nonce - 8;
|
||||
__m512i *noncev = (__m512i*)vdata + 9;
|
||||
uint32_t n = first_nonce;
|
||||
const int thr_id = mythr->id;
|
||||
const uint32_t targ32_d7 = ptarget[7];
|
||||
const bool bench = opt_benchmark;
|
||||
|
||||
mm512_bswap32_intrlv80_8x64( vdata, pdata );
|
||||
*noncev = mm512_intrlv_blend_32(
|
||||
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
|
||||
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
|
||||
do
|
||||
{
|
||||
if ( likely( algo_gate.hash( hash32, vdata, thr_id ) ) )
|
||||
for ( int lane = 0; lane < 8; lane++ )
|
||||
if ( unlikely( ( hash32_d7[ lane ] <= targ32_d7 ) && !bench ) )
|
||||
{
|
||||
extr_lane_8x32( lane_hash, hash32, lane, 256 );
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
m512_const1_64( 0x0000000800000000 ) );
|
||||
n += 8;
|
||||
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
|
||||
pdata[19] = n;
|
||||
*hashes_done = n - first_nonce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
//int scanhash_16way_32_32( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr )
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
int null_hash()
|
||||
{
|
||||
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
|
||||
};
|
||||
void null_hash_suw()
|
||||
{
|
||||
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
|
||||
return 0;
|
||||
};
|
||||
|
||||
void init_algo_gate( algo_gate_t* gate )
|
||||
{
|
||||
gate->miner_thread_init = (void*)&return_true;
|
||||
gate->scanhash = (void*)&null_scanhash;
|
||||
gate->scanhash = (void*)&scanhash_generic;
|
||||
gate->hash = (void*)&null_hash;
|
||||
gate->hash_suw = (void*)&null_hash_suw;
|
||||
gate->get_new_work = (void*)&std_get_new_work;
|
||||
gate->work_decode = (void*)&std_le_work_decode;
|
||||
gate->decode_extra_data = (void*)&do_nothing;
|
||||
gate->gen_merkle_root = (void*)&sha256d_gen_merkle_root;
|
||||
gate->stratum_gen_work = (void*)&std_stratum_gen_work;
|
||||
gate->build_stratum_request = (void*)&std_le_build_stratum_request;
|
||||
gate->malloc_txs_request = (void*)&std_malloc_txs_request;
|
||||
gate->submit_getwork_result = (void*)&std_le_submit_getwork_result;
|
||||
@@ -140,9 +277,11 @@ void init_algo_gate( algo_gate_t* gate )
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wimplicit-function-declaration"
|
||||
|
||||
// called by each thread that uses the gate
|
||||
// Called once by main
|
||||
bool register_algo_gate( int algo, algo_gate_t *gate )
|
||||
{
|
||||
bool rc = false;
|
||||
|
||||
if ( NULL == gate )
|
||||
{
|
||||
applog(LOG_ERR,"FAIL: algo_gate registration failed, NULL gate\n");
|
||||
@@ -151,108 +290,108 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
|
||||
|
||||
init_algo_gate( gate );
|
||||
|
||||
switch (algo)
|
||||
switch ( algo )
|
||||
{
|
||||
case ALGO_ALLIUM: register_allium_algo ( gate ); break;
|
||||
case ALGO_ANIME: register_anime_algo ( gate ); break;
|
||||
case ALGO_ARGON2: register_argon2_algo ( gate ); break;
|
||||
case ALGO_ARGON2D250: register_argon2d_crds_algo ( gate ); break;
|
||||
case ALGO_ARGON2D500: register_argon2d_dyn_algo ( gate ); break;
|
||||
case ALGO_ARGON2D4096: register_argon2d4096_algo ( gate ); break;
|
||||
case ALGO_AXIOM: register_axiom_algo ( gate ); break;
|
||||
case ALGO_BLAKE: register_blake_algo ( gate ); break;
|
||||
case ALGO_BLAKE2B: register_blake2b_algo ( gate ); break;
|
||||
case ALGO_BLAKE2S: register_blake2s_algo ( gate ); break;
|
||||
case ALGO_BLAKECOIN: register_blakecoin_algo ( gate ); break;
|
||||
case ALGO_BMW512: register_bmw512_algo ( gate ); break;
|
||||
case ALGO_C11: register_c11_algo ( gate ); break;
|
||||
case ALGO_DECRED: register_decred_algo ( gate ); break;
|
||||
case ALGO_DEEP: register_deep_algo ( gate ); break;
|
||||
case ALGO_DMD_GR: register_dmd_gr_algo ( gate ); break;
|
||||
case ALGO_GROESTL: register_groestl_algo ( gate ); break;
|
||||
case ALGO_HEX: register_hex_algo ( gate ); break;
|
||||
case ALGO_HMQ1725: register_hmq1725_algo ( gate ); break;
|
||||
case ALGO_HODL: register_hodl_algo ( gate ); break;
|
||||
case ALGO_JHA: register_jha_algo ( gate ); break;
|
||||
case ALGO_KECCAK: register_keccak_algo ( gate ); break;
|
||||
case ALGO_KECCAKC: register_keccakc_algo ( gate ); break;
|
||||
case ALGO_LBRY: register_lbry_algo ( gate ); break;
|
||||
case ALGO_LYRA2H: register_lyra2h_algo ( gate ); break;
|
||||
case ALGO_LYRA2RE: register_lyra2re_algo ( gate ); break;
|
||||
case ALGO_LYRA2REV2: register_lyra2rev2_algo ( gate ); break;
|
||||
case ALGO_LYRA2REV3: register_lyra2rev3_algo ( gate ); break;
|
||||
case ALGO_LYRA2Z: register_lyra2z_algo ( gate ); break;
|
||||
case ALGO_LYRA2Z330: register_lyra2z330_algo ( gate ); break;
|
||||
case ALGO_M7M: register_m7m_algo ( gate ); break;
|
||||
case ALGO_MYR_GR: register_myriad_algo ( gate ); break;
|
||||
case ALGO_NEOSCRYPT: register_neoscrypt_algo ( gate ); break;
|
||||
case ALGO_NIST5: register_nist5_algo ( gate ); break;
|
||||
case ALGO_PENTABLAKE: register_pentablake_algo ( gate ); break;
|
||||
case ALGO_PHI1612: register_phi1612_algo ( gate ); break;
|
||||
case ALGO_PHI2: register_phi2_algo ( gate ); break;
|
||||
case ALGO_POLYTIMOS: register_polytimos_algo ( gate ); break;
|
||||
case ALGO_POWER2B: register_power2b_algo ( gate ); break;
|
||||
case ALGO_QUARK: register_quark_algo ( gate ); break;
|
||||
case ALGO_QUBIT: register_qubit_algo ( gate ); break;
|
||||
case ALGO_SCRYPT: register_scrypt_algo ( gate ); break;
|
||||
case ALGO_SHA256D: register_sha256d_algo ( gate ); break;
|
||||
case ALGO_SHA256Q: register_sha256q_algo ( gate ); break;
|
||||
case ALGO_SHA256T: register_sha256t_algo ( gate ); break;
|
||||
case ALGO_SHA3D: register_sha3d_algo ( gate ); break;
|
||||
case ALGO_SHAVITE3: register_shavite_algo ( gate ); break;
|
||||
case ALGO_SKEIN: register_skein_algo ( gate ); break;
|
||||
case ALGO_SKEIN2: register_skein2_algo ( gate ); break;
|
||||
case ALGO_SKUNK: register_skunk_algo ( gate ); break;
|
||||
case ALGO_SONOA: register_sonoa_algo ( gate ); break;
|
||||
case ALGO_TIMETRAVEL: register_timetravel_algo ( gate ); break;
|
||||
case ALGO_TIMETRAVEL10: register_timetravel10_algo ( gate ); break;
|
||||
case ALGO_TRIBUS: register_tribus_algo ( gate ); break;
|
||||
case ALGO_VANILLA: register_vanilla_algo ( gate ); break;
|
||||
case ALGO_VELTOR: register_veltor_algo ( gate ); break;
|
||||
case ALGO_WHIRLPOOL: register_whirlpool_algo ( gate ); break;
|
||||
case ALGO_WHIRLPOOLX: register_whirlpoolx_algo ( gate ); break;
|
||||
case ALGO_X11: register_x11_algo ( gate ); break;
|
||||
case ALGO_X11EVO: register_x11evo_algo ( gate ); break;
|
||||
case ALGO_X11GOST: register_x11gost_algo ( gate ); break;
|
||||
case ALGO_X12: register_x12_algo ( gate ); break;
|
||||
case ALGO_X13: register_x13_algo ( gate ); break;
|
||||
case ALGO_X13BCD: register_x13bcd_algo ( gate ); break;
|
||||
case ALGO_X13SM3: register_x13sm3_algo ( gate ); break;
|
||||
case ALGO_X14: register_x14_algo ( gate ); break;
|
||||
case ALGO_X15: register_x15_algo ( gate ); break;
|
||||
case ALGO_X16R: register_x16r_algo ( gate ); break;
|
||||
case ALGO_X16RV2: register_x16rv2_algo ( gate ); break;
|
||||
case ALGO_X16RT: register_x16rt_algo ( gate ); break;
|
||||
case ALGO_X16RT_VEIL: register_x16rt_veil_algo ( gate ); break;
|
||||
case ALGO_X16S: register_x16s_algo ( gate ); break;
|
||||
case ALGO_X17: register_x17_algo ( gate ); break;
|
||||
case ALGO_X21S: register_x21s_algo ( gate ); break;
|
||||
case ALGO_X22I: register_x22i_algo ( gate ); break;
|
||||
case ALGO_X25X: register_x25x_algo ( gate ); break;
|
||||
case ALGO_XEVAN: register_xevan_algo ( gate ); break;
|
||||
/* case ALGO_YESCRYPT: register_yescrypt_05_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8: register_yescryptr8_05_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR16: register_yescryptr16_05_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR32: register_yescryptr32_05_algo ( gate ); break;
|
||||
*/
|
||||
case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8: register_yescryptr8_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8G: register_yescryptr8g_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR16: register_yescryptr16_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR32: register_yescryptr32_algo ( gate ); break;
|
||||
case ALGO_YESPOWER: register_yespower_algo ( gate ); break;
|
||||
case ALGO_YESPOWERR16: register_yespowerr16_algo ( gate ); break;
|
||||
case ALGO_YESPOWER_B2B: register_yespower_b2b_algo ( gate ); break;
|
||||
case ALGO_ZR5: register_zr5_algo ( gate ); break;
|
||||
case ALGO_ALLIUM: rc = register_allium_algo ( gate ); break;
|
||||
case ALGO_ANIME: rc = register_anime_algo ( gate ); break;
|
||||
case ALGO_ARGON2: rc = register_argon2_algo ( gate ); break;
|
||||
case ALGO_ARGON2D250: rc = register_argon2d_crds_algo ( gate ); break;
|
||||
case ALGO_ARGON2D500: rc = register_argon2d_dyn_algo ( gate ); break;
|
||||
case ALGO_ARGON2D4096: rc = register_argon2d4096_algo ( gate ); break;
|
||||
case ALGO_AXIOM: rc = register_axiom_algo ( gate ); break;
|
||||
case ALGO_BLAKE: rc = register_blake_algo ( gate ); break;
|
||||
case ALGO_BLAKE2B: rc = register_blake2b_algo ( gate ); break;
|
||||
case ALGO_BLAKE2S: rc = register_blake2s_algo ( gate ); break;
|
||||
case ALGO_BLAKECOIN: rc = register_blakecoin_algo ( gate ); break;
|
||||
case ALGO_BMW512: rc = register_bmw512_algo ( gate ); break;
|
||||
case ALGO_C11: rc = register_c11_algo ( gate ); break;
|
||||
case ALGO_DECRED: rc = register_decred_algo ( gate ); break;
|
||||
case ALGO_DEEP: rc = register_deep_algo ( gate ); break;
|
||||
case ALGO_DMD_GR: rc = register_dmd_gr_algo ( gate ); break;
|
||||
case ALGO_GROESTL: rc = register_groestl_algo ( gate ); break;
|
||||
case ALGO_HEX: rc = register_hex_algo ( gate ); break;
|
||||
case ALGO_HMQ1725: rc = register_hmq1725_algo ( gate ); break;
|
||||
case ALGO_HODL: rc = register_hodl_algo ( gate ); break;
|
||||
case ALGO_JHA: rc = register_jha_algo ( gate ); break;
|
||||
case ALGO_KECCAK: rc = register_keccak_algo ( gate ); break;
|
||||
case ALGO_KECCAKC: rc = register_keccakc_algo ( gate ); break;
|
||||
case ALGO_LBRY: rc = register_lbry_algo ( gate ); break;
|
||||
case ALGO_LYRA2H: rc = register_lyra2h_algo ( gate ); break;
|
||||
case ALGO_LYRA2RE: rc = register_lyra2re_algo ( gate ); break;
|
||||
case ALGO_LYRA2REV2: rc = register_lyra2rev2_algo ( gate ); break;
|
||||
case ALGO_LYRA2REV3: rc = register_lyra2rev3_algo ( gate ); break;
|
||||
case ALGO_LYRA2Z: rc = register_lyra2z_algo ( gate ); break;
|
||||
case ALGO_LYRA2Z330: rc = register_lyra2z330_algo ( gate ); break;
|
||||
case ALGO_M7M: rc = register_m7m_algo ( gate ); break;
|
||||
case ALGO_MINOTAUR: rc = register_minotaur_algo ( gate ); break;
|
||||
case ALGO_MYR_GR: rc = register_myriad_algo ( gate ); break;
|
||||
case ALGO_NEOSCRYPT: rc = register_neoscrypt_algo ( gate ); break;
|
||||
case ALGO_NIST5: rc = register_nist5_algo ( gate ); break;
|
||||
case ALGO_PENTABLAKE: rc = register_pentablake_algo ( gate ); break;
|
||||
case ALGO_PHI1612: rc = register_phi1612_algo ( gate ); break;
|
||||
case ALGO_PHI2: rc = register_phi2_algo ( gate ); break;
|
||||
case ALGO_POLYTIMOS: rc = register_polytimos_algo ( gate ); break;
|
||||
case ALGO_POWER2B: rc = register_power2b_algo ( gate ); break;
|
||||
case ALGO_QUARK: rc = register_quark_algo ( gate ); break;
|
||||
case ALGO_QUBIT: rc = register_qubit_algo ( gate ); break;
|
||||
case ALGO_SCRYPT: rc = register_scrypt_algo ( gate ); break;
|
||||
case ALGO_SHA256D: rc = register_sha256d_algo ( gate ); break;
|
||||
case ALGO_SHA256Q: rc = register_sha256q_algo ( gate ); break;
|
||||
case ALGO_SHA256T: rc = register_sha256t_algo ( gate ); break;
|
||||
case ALGO_SHA3D: rc = register_sha3d_algo ( gate ); break;
|
||||
case ALGO_SHAVITE3: rc = register_shavite_algo ( gate ); break;
|
||||
case ALGO_SKEIN: rc = register_skein_algo ( gate ); break;
|
||||
case ALGO_SKEIN2: rc = register_skein2_algo ( gate ); break;
|
||||
case ALGO_SKUNK: rc = register_skunk_algo ( gate ); break;
|
||||
case ALGO_SONOA: rc = register_sonoa_algo ( gate ); break;
|
||||
case ALGO_TIMETRAVEL: rc = register_timetravel_algo ( gate ); break;
|
||||
case ALGO_TIMETRAVEL10: rc = register_timetravel10_algo ( gate ); break;
|
||||
case ALGO_TRIBUS: rc = register_tribus_algo ( gate ); break;
|
||||
case ALGO_VANILLA: rc = register_vanilla_algo ( gate ); break;
|
||||
case ALGO_VELTOR: rc = register_veltor_algo ( gate ); break;
|
||||
case ALGO_VERTHASH: rc = register_verthash_algo ( gate ); break;
|
||||
case ALGO_WHIRLPOOL: rc = register_whirlpool_algo ( gate ); break;
|
||||
case ALGO_WHIRLPOOLX: rc = register_whirlpoolx_algo ( gate ); break;
|
||||
case ALGO_X11: rc = register_x11_algo ( gate ); break;
|
||||
case ALGO_X11EVO: rc = register_x11evo_algo ( gate ); break;
|
||||
case ALGO_X11GOST: rc = register_x11gost_algo ( gate ); break;
|
||||
case ALGO_X12: rc = register_x12_algo ( gate ); break;
|
||||
case ALGO_X13: rc = register_x13_algo ( gate ); break;
|
||||
case ALGO_X13BCD: rc = register_x13bcd_algo ( gate ); break;
|
||||
case ALGO_X13SM3: rc = register_x13sm3_algo ( gate ); break;
|
||||
case ALGO_X14: rc = register_x14_algo ( gate ); break;
|
||||
case ALGO_X15: rc = register_x15_algo ( gate ); break;
|
||||
case ALGO_X16R: rc = register_x16r_algo ( gate ); break;
|
||||
case ALGO_X16RV2: rc = register_x16rv2_algo ( gate ); break;
|
||||
case ALGO_X16RT: rc = register_x16rt_algo ( gate ); break;
|
||||
case ALGO_X16RT_VEIL: rc = register_x16rt_veil_algo ( gate ); break;
|
||||
case ALGO_X16S: rc = register_x16s_algo ( gate ); break;
|
||||
case ALGO_X17: rc = register_x17_algo ( gate ); break;
|
||||
case ALGO_X21S: rc = register_x21s_algo ( gate ); break;
|
||||
case ALGO_X22I: rc = register_x22i_algo ( gate ); break;
|
||||
case ALGO_X25X: rc = register_x25x_algo ( gate ); break;
|
||||
case ALGO_XEVAN: rc = register_xevan_algo ( gate ); break;
|
||||
case ALGO_YESCRYPT: rc = register_yescrypt_05_algo ( gate ); break;
|
||||
// case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8: rc = register_yescryptr8_05_algo ( gate ); break;
|
||||
// case ALGO_YESCRYPTR8: register_yescryptr8_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR8G: rc = register_yescryptr8g_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR16: rc = register_yescryptr16_05_algo( gate ); break;
|
||||
// case ALGO_YESCRYPTR16: register_yescryptr16_algo ( gate ); break;
|
||||
case ALGO_YESCRYPTR32: rc = register_yescryptr32_05_algo( gate ); break;
|
||||
// case ALGO_YESCRYPTR32: register_yescryptr32_algo ( gate ); break;
|
||||
case ALGO_YESPOWER: rc = register_yespower_algo ( gate ); break;
|
||||
case ALGO_YESPOWERR16: rc = register_yespowerr16_algo ( gate ); break;
|
||||
case ALGO_YESPOWER_B2B: rc = register_yespower_b2b_algo ( gate ); break;
|
||||
case ALGO_ZR5: rc = register_zr5_algo ( gate ); break;
|
||||
default:
|
||||
applog(LOG_ERR,"FAIL: algo_gate registration failed, unknown algo %s.\n", algo_names[opt_algo] );
|
||||
applog(LOG_ERR,"BUG: unregistered algorithm %s.\n", algo_names[opt_algo] );
|
||||
return false;
|
||||
} // switch
|
||||
|
||||
// ensure required functions were defined.
|
||||
if ( gate->scanhash == (void*)&null_scanhash )
|
||||
if ( !rc )
|
||||
{
|
||||
applog(LOG_ERR, "FAIL: Required algo_gate functions undefined\n");
|
||||
applog(LOG_ERR, "FAIL: %s algorithm failed to initialize\n", algo_names[opt_algo] );
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -261,7 +400,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
|
||||
// restore warnings
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
// run the alternate hash function for a specific algo
|
||||
void exec_hash_function( int algo, void *output, const void *pdata )
|
||||
{
|
||||
algo_gate_t gate;
|
||||
@@ -281,7 +419,6 @@ void exec_hash_function( int algo, void *output, const void *pdata )
|
||||
const char* const algo_alias_map[][2] =
|
||||
{
|
||||
// alias proper
|
||||
{ "argon2d-crds", "argon2d250" },
|
||||
{ "argon2d-dyn", "argon2d500" },
|
||||
{ "argon2d-uis", "argon2d4096" },
|
||||
{ "bcd", "x13bcd" },
|
||||
@@ -296,7 +433,6 @@ const char* const algo_alias_map[][2] =
|
||||
{ "flax", "c11" },
|
||||
{ "hsr", "x13sm3" },
|
||||
{ "jackpot", "jha" },
|
||||
{ "jane", "scryptjane" },
|
||||
{ "lyra2", "lyra2re" },
|
||||
{ "lyra2v2", "lyra2rev2" },
|
||||
{ "lyra2v3", "lyra2rev3" },
|
||||
|
114
algo-gate-api.h
114
algo-gate-api.h
@@ -1,3 +1,6 @@
|
||||
#ifndef __ALGO_GATE_API_H__
|
||||
#define __ALGO_GATE_API_H__ 1
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
@@ -75,7 +78,7 @@
|
||||
|
||||
// my hack at creating a set data type using bit masks. Set inclusion,
|
||||
// exclusion union and intersection operations are provided for convenience. In // some cases it may be desireable to use boolean algebra directly on the
|
||||
// data to perfomr set operations. Sets can be represented as single
|
||||
// data to perform set operations. Sets can be represented as single
|
||||
// elements, a bitwise OR of multiple elements, a bitwise OR of multiple
|
||||
// set variables or constants, or combinations of the above.
|
||||
// Examples:
|
||||
@@ -90,10 +93,10 @@ typedef uint32_t set_t;
|
||||
#define AES_OPT 2
|
||||
#define SSE42_OPT 4
|
||||
#define AVX_OPT 8 // Sandybridge
|
||||
#define AVX2_OPT 0x10 // Haswell
|
||||
#define SHA_OPT 0x20 // sha256 (Ryzen, Ice Lake)
|
||||
#define AVX512_OPT 0x40 // AVX512- F, VL, DQ, BW (Skylake-X)
|
||||
#define VAES_OPT 0x80 // VAES (Ice Lake)
|
||||
#define AVX2_OPT 0x10 // Haswell, Zen1
|
||||
#define SHA_OPT 0x20 // Zen1, Icelake (sha256)
|
||||
#define AVX512_OPT 0x40 // Skylake-X (AVX512[F,VL,DQ,BW])
|
||||
#define VAES_OPT 0x80 // Icelake (VAES & AVX512)
|
||||
|
||||
|
||||
// return set containing all elements from sets a & b
|
||||
@@ -110,27 +113,25 @@ inline bool set_excl ( set_t a, set_t b ) { return (a & b) == 0; }
|
||||
|
||||
typedef struct
|
||||
{
|
||||
// mandatory functions, must be overwritten
|
||||
// Mandatory functions, one of these is mandatory. If a generic scanhash
|
||||
// is used a custom target hash function must be registered, with a custom
|
||||
// scanhash the target hash function can be called directly and doesn't need
|
||||
// to be registered with the gate.
|
||||
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
|
||||
|
||||
// optional unsafe, must be overwritten if algo uses function
|
||||
void ( *hash ) ( void*, const void*, uint32_t ) ;
|
||||
void ( *hash_suw ) ( void*, const void* );
|
||||
int ( *hash ) ( void*, const void*, int );
|
||||
|
||||
//optional, safe to use default in most cases
|
||||
|
||||
// Allocate thread local buffers and other initialization specific to miner
|
||||
// threads.
|
||||
// Called once by each miner thread to allocate thread local buffers and
|
||||
// other initialization specific to miner threads.
|
||||
bool ( *miner_thread_init ) ( int );
|
||||
|
||||
// Generate global blockheader from stratum data.
|
||||
void ( *stratum_gen_work ) ( struct stratum_ctx*, struct work* );
|
||||
|
||||
// Get thread local copy of blockheader with unique nonce.
|
||||
void ( *get_new_work ) ( struct work*, struct work*, int, uint32_t* );
|
||||
|
||||
// Decode getwork blockheader
|
||||
bool ( *work_decode ) ( const json_t*, struct work* );
|
||||
bool ( *work_decode ) ( struct work* );
|
||||
|
||||
// Extra getwork data
|
||||
void ( *decode_extra_data ) ( struct work*, uint64_t* );
|
||||
@@ -151,7 +152,7 @@ void ( *build_stratum_request ) ( char*, struct work*, struct stratum_ctx* );
|
||||
|
||||
char* ( *malloc_txs_request ) ( struct work* );
|
||||
|
||||
// Big or little
|
||||
// Big endian or little endian
|
||||
void ( *set_work_data_endian ) ( struct work* );
|
||||
|
||||
double ( *calc_network_diff ) ( struct work* );
|
||||
@@ -163,9 +164,11 @@ bool ( *ready_to_mine ) ( struct work*, struct stratum_ctx*, int );
|
||||
bool ( *do_this_thread ) ( int );
|
||||
|
||||
// After do_this_thread
|
||||
void ( *resync_threads ) ( struct work* );
|
||||
void ( *resync_threads ) ( int, struct work* );
|
||||
|
||||
// No longer needed
|
||||
json_t* (*longpoll_rpc_call) ( CURL*, int*, char* );
|
||||
|
||||
set_t optimizations;
|
||||
int ( *get_work_data_size ) ();
|
||||
int ntime_index;
|
||||
@@ -203,31 +206,74 @@ void four_way_not_tested();
|
||||
#define STD_WORK_DATA_SIZE 128
|
||||
#define STD_WORK_CMP_SIZE 76
|
||||
|
||||
#define JR2_NONCE_INDEX 39 // 8 bit offset
|
||||
//#define JR2_NONCE_INDEX 39 // 8 bit offset
|
||||
|
||||
// These indexes are only used with JSON RPC2 and are not gated.
|
||||
#define JR2_WORK_CMP_INDEX_2 43
|
||||
#define JR2_WORK_CMP_SIZE_2 33
|
||||
//#define JR2_WORK_CMP_INDEX_2 43
|
||||
//#define JR2_WORK_CMP_SIZE_2 33
|
||||
|
||||
// allways returns failure
|
||||
// deprecated, use generic instead
|
||||
int null_scanhash();
|
||||
|
||||
// Default generic, may be used in many cases.
|
||||
// N-way is more complicated, requires many different implementations
|
||||
// depending on architecture, input format, and output format.
|
||||
// Naming convention is scanhash_[N]way_[input format]in_[output format]out
|
||||
// N = number of lanes
|
||||
// input/output format:
|
||||
// 32: 32 bit interleaved parallel lanes
|
||||
// 64: 64 bit interleaved parallel lanes
|
||||
// 640: input only, not interleaved, contiguous serial 640 bit lanes.
|
||||
// 256: output only, not interleaved, contiguous serial 256 bit lanes.
|
||||
|
||||
int scanhash_generic( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
//int scanhash_4way_64in_64out( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
//int scanhash_4way_64in_256out( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
int scanhash_4way_64in_32out( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
//int scanhash_8way_32in_32out( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
//int scanhash_8way_64in_64out( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
//int scanhash_8way_64in_256out( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
int scanhash_8way_64in_32out( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
//int scanhash_16way_32in_32out( struct work *work, uint32_t max_nonce,
|
||||
// uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
||||
#endif
|
||||
|
||||
// displays warning
|
||||
void null_hash ();
|
||||
void null_hash_suw();
|
||||
int null_hash();
|
||||
|
||||
// optional safe targets, default listed first unless noted.
|
||||
|
||||
void std_get_new_work( struct work *work, struct work *g_work, int thr_id,
|
||||
uint32_t* end_nonce_ptr );
|
||||
|
||||
void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *work );
|
||||
|
||||
void sha256d_gen_merkle_root( char *merkle_root, struct stratum_ctx *sctx );
|
||||
void SHA256_gen_merkle_root ( char *merkle_root, struct stratum_ctx *sctx );
|
||||
|
||||
bool std_le_work_decode( const json_t *val, struct work *work );
|
||||
bool std_be_work_decode( const json_t *val, struct work *work );
|
||||
bool std_le_work_decode( struct work *work );
|
||||
bool std_be_work_decode( struct work *work );
|
||||
|
||||
bool std_le_submit_getwork_result( CURL *curl, struct work *work );
|
||||
bool std_be_submit_getwork_result( CURL *curl, struct work *work );
|
||||
@@ -237,7 +283,7 @@ void std_be_build_stratum_request( char *req, struct work *work );
|
||||
|
||||
char* std_malloc_txs_request( struct work *work );
|
||||
|
||||
// Default is do_nothing (assumed LE)
|
||||
// Default is do_nothing, little endian is assumed
|
||||
void set_work_data_big_endian( struct work *work );
|
||||
|
||||
double std_calc_network_diff( struct work *work );
|
||||
@@ -250,10 +296,6 @@ void std_build_block_header( struct work* g_work, uint32_t version,
|
||||
void std_build_extraheader( struct work *work, struct stratum_ctx *sctx );
|
||||
|
||||
json_t* std_longpoll_rpc_call( CURL *curl, int *err, char *lp_url );
|
||||
//json_t* jr2_longpoll_rpc_call( CURL *curl, int *err );
|
||||
|
||||
//bool std_stratum_handle_response( json_t *val );
|
||||
//bool jr2_stratum_handle_response( json_t *val );
|
||||
|
||||
bool std_ready_to_mine( struct work* work, struct stratum_ctx* stratum,
|
||||
int thr_id );
|
||||
@@ -266,17 +308,12 @@ int std_get_work_data_size();
|
||||
// by calling the algo's register function.
|
||||
bool register_algo_gate( int algo, algo_gate_t *gate );
|
||||
|
||||
// Called by algos toverride any default gate functions that are applicable
|
||||
// Called by algos to verride any default gate functions that are applicable
|
||||
// and do any other algo-specific initialization.
|
||||
// The register functions for all the algos can be declared here to reduce
|
||||
// compiler warnings but that's just more work for devs adding new algos.
|
||||
bool register_algo( algo_gate_t *gate );
|
||||
|
||||
// Overrides a common set of functions used by RPC2 and other RPC2-specific
|
||||
// init. Called by algo's register function before initializing algo-specific
|
||||
// functions and data.
|
||||
//bool register_json_rpc2( algo_gate_t *gate );
|
||||
|
||||
// use this to call the hash function of an algo directly, ie util.c test.
|
||||
void exec_hash_function( int algo, void *output, const void *pdata );
|
||||
|
||||
@@ -284,3 +321,4 @@ void exec_hash_function( int algo, void *output, const void *pdata );
|
||||
// algo name if valid alias, NULL if invalid alias or algo.
|
||||
void get_algo_alias( char **algo_or_alias );
|
||||
|
||||
#endif
|
||||
|
@@ -344,7 +344,7 @@ static size_t
|
||||
detect_cpu(void) {
|
||||
//union { uint8_t s[12]; uint32_t i[3]; } vendor_string;
|
||||
//cpu_vendors_x86 vendor = cpu_nobody;
|
||||
x86_regs regs;
|
||||
x86_regs regs; regs.eax = regs.ebx = regs.ecx = 0;
|
||||
uint32_t max_level, max_ext_level;
|
||||
size_t cpu_flags = 0;
|
||||
#if defined(X86ASM_AVX) || defined(X86_64ASM_AVX)
|
||||
@@ -460,4 +460,4 @@ get_top_cpuflag_desc(size_t flag) {
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* defined(CPU_X86) || defined(CPU_X86_64) */
|
||||
#endif /* defined(CPU_X86) || defined(CPU_X86_64) */
|
||||
|
@@ -4,11 +4,12 @@ typedef void (FASTCALL *scrypt_ROMixfn)(scrypt_mix_word_t *X/*[chunkWords]*/, sc
|
||||
#endif
|
||||
|
||||
/* romix pre/post nop function */
|
||||
/*
|
||||
static void asm_calling_convention
|
||||
scrypt_romix_nop(scrypt_mix_word_t *blocks, size_t nblocks) {
|
||||
(void)blocks; (void)nblocks;
|
||||
}
|
||||
|
||||
*/
|
||||
/* romix pre/post endian conversion function */
|
||||
static void asm_calling_convention
|
||||
scrypt_romix_convert_endian(scrypt_mix_word_t *blocks, size_t nblocks) {
|
||||
|
@@ -37,6 +37,13 @@
|
||||
|
||||
#if defined(__AVX512F__)
|
||||
|
||||
static inline __m512i blamka( __m512i x, __m512i y )
|
||||
{
|
||||
__m512i xy = _mm512_mul_epu32( x, y );
|
||||
return _mm512_add_epi64( _mm512_add_epi64( x, y ),
|
||||
_mm512_add_epi64( xy, xy ) );
|
||||
}
|
||||
|
||||
static void fill_block( __m512i *state, const block *ref_block,
|
||||
block *next_block, int with_xor )
|
||||
{
|
||||
|
@@ -328,9 +328,7 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
#define ror64(x, n) _mm512_ror_epi64((x), (n))
|
||||
|
||||
static __m512i muladd(__m512i x, __m512i y)
|
||||
static inline __m512i muladd(__m512i x, __m512i y)
|
||||
{
|
||||
__m512i z = _mm512_mul_epu32(x, y);
|
||||
return _mm512_add_epi64(_mm512_add_epi64(x, y), _mm512_add_epi64(z, z));
|
||||
@@ -344,8 +342,8 @@ static __m512i muladd(__m512i x, __m512i y)
|
||||
D0 = _mm512_xor_si512(D0, A0); \
|
||||
D1 = _mm512_xor_si512(D1, A1); \
|
||||
\
|
||||
D0 = ror64(D0, 32); \
|
||||
D1 = ror64(D1, 32); \
|
||||
D0 = _mm512_ror_epi64(D0, 32); \
|
||||
D1 = _mm512_ror_epi64(D1, 32); \
|
||||
\
|
||||
C0 = muladd(C0, D0); \
|
||||
C1 = muladd(C1, D1); \
|
||||
@@ -353,8 +351,8 @@ static __m512i muladd(__m512i x, __m512i y)
|
||||
B0 = _mm512_xor_si512(B0, C0); \
|
||||
B1 = _mm512_xor_si512(B1, C1); \
|
||||
\
|
||||
B0 = ror64(B0, 24); \
|
||||
B1 = ror64(B1, 24); \
|
||||
B0 = _mm512_ror_epi64(B0, 24); \
|
||||
B1 = _mm512_ror_epi64(B1, 24); \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
|
||||
@@ -365,8 +363,8 @@ static __m512i muladd(__m512i x, __m512i y)
|
||||
D0 = _mm512_xor_si512(D0, A0); \
|
||||
D1 = _mm512_xor_si512(D1, A1); \
|
||||
\
|
||||
D0 = ror64(D0, 16); \
|
||||
D1 = ror64(D1, 16); \
|
||||
D0 = _mm512_ror_epi64(D0, 16); \
|
||||
D1 = _mm512_ror_epi64(D1, 16); \
|
||||
\
|
||||
C0 = muladd(C0, D0); \
|
||||
C1 = muladd(C1, D1); \
|
||||
@@ -374,8 +372,8 @@ static __m512i muladd(__m512i x, __m512i y)
|
||||
B0 = _mm512_xor_si512(B0, C0); \
|
||||
B1 = _mm512_xor_si512(B1, C1); \
|
||||
\
|
||||
B0 = ror64(B0, 63); \
|
||||
B1 = ror64(B1, 63); \
|
||||
B0 = _mm512_ror_epi64(B0, 63); \
|
||||
B1 = _mm512_ror_epi64(B1, 63); \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
|
||||
@@ -417,11 +415,10 @@ static __m512i muladd(__m512i x, __m512i y)
|
||||
|
||||
#define SWAP_HALVES(A0, A1) \
|
||||
do { \
|
||||
__m512i t0, t1; \
|
||||
t0 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(1, 0, 1, 0)); \
|
||||
t1 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(3, 2, 3, 2)); \
|
||||
A0 = t0; \
|
||||
A1 = t1; \
|
||||
__m512i t; \
|
||||
t = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(1, 0, 1, 0)); \
|
||||
A1 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(3, 2, 3, 2)); \
|
||||
A0 = t; \
|
||||
} while((void)0, 0)
|
||||
|
||||
#define SWAP_QUARTERS(A0, A1) \
|
||||
|
@@ -48,7 +48,7 @@ int scanhash_blake_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
submit_lane_solution( work, hash+(i<<3), mythr, i );
|
||||
submit_solution( work, hash+(i<<3), mythr );
|
||||
}
|
||||
n += 4;
|
||||
|
||||
@@ -107,7 +107,7 @@ int scanhash_blake_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( (hash+i)[7] <= HTarget && fulltest( hash+i, ptarget ) )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
submit_lane_solution( work, hash+(i<<3), mythr, i );
|
||||
submit_solution( work, hash+(i<<3), mythr );
|
||||
}
|
||||
n += 8;
|
||||
|
||||
|
@@ -180,6 +180,7 @@ void blake512_8way_update( void *cc, const void *data, size_t len );
|
||||
void blake512_8way_close( void *cc, void *dst );
|
||||
void blake512_8way_full( blake_8way_big_context *sc, void * dst,
|
||||
const void *data, size_t len );
|
||||
void blake512_8way_hash_le80( void *hash, const void *data );
|
||||
|
||||
#endif // AVX512
|
||||
#endif // AVX2
|
||||
|
@@ -669,14 +669,14 @@ do { \
|
||||
ROUND_S_8WAY(2); \
|
||||
ROUND_S_8WAY(3); \
|
||||
} \
|
||||
H0 = _mm256_xor_si256( _mm256_xor_si256( V8, V0 ), H0 ); \
|
||||
H1 = _mm256_xor_si256( _mm256_xor_si256( V9, V1 ), H1 ); \
|
||||
H2 = _mm256_xor_si256( _mm256_xor_si256( VA, V2 ), H2 ); \
|
||||
H3 = _mm256_xor_si256( _mm256_xor_si256( VB, V3 ), H3 ); \
|
||||
H4 = _mm256_xor_si256( _mm256_xor_si256( VC, V4 ), H4 ); \
|
||||
H5 = _mm256_xor_si256( _mm256_xor_si256( VD, V5 ), H5 ); \
|
||||
H6 = _mm256_xor_si256( _mm256_xor_si256( VE, V6 ), H6 ); \
|
||||
H7 = _mm256_xor_si256( _mm256_xor_si256( VF, V7 ), H7 ); \
|
||||
H0 = mm256_xor3( V8, V0, H0 ); \
|
||||
H1 = mm256_xor3( V9, V1, H1 ); \
|
||||
H2 = mm256_xor3( VA, V2, H2 ); \
|
||||
H3 = mm256_xor3( VB, V3, H3 ); \
|
||||
H4 = mm256_xor3( VC, V4, H4 ); \
|
||||
H5 = mm256_xor3( VD, V5, H5 ); \
|
||||
H6 = mm256_xor3( VE, V6, H6 ); \
|
||||
H7 = mm256_xor3( VF, V7, H7 ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@@ -808,14 +808,14 @@ do { \
|
||||
ROUND_S_16WAY(2); \
|
||||
ROUND_S_16WAY(3); \
|
||||
} \
|
||||
H0 = _mm512_xor_si512( _mm512_xor_si512( V8, V0 ), H0 ); \
|
||||
H1 = _mm512_xor_si512( _mm512_xor_si512( V9, V1 ), H1 ); \
|
||||
H2 = _mm512_xor_si512( _mm512_xor_si512( VA, V2 ), H2 ); \
|
||||
H3 = _mm512_xor_si512( _mm512_xor_si512( VB, V3 ), H3 ); \
|
||||
H4 = _mm512_xor_si512( _mm512_xor_si512( VC, V4 ), H4 ); \
|
||||
H5 = _mm512_xor_si512( _mm512_xor_si512( VD, V5 ), H5 ); \
|
||||
H6 = _mm512_xor_si512( _mm512_xor_si512( VE, V6 ), H6 ); \
|
||||
H7 = _mm512_xor_si512( _mm512_xor_si512( VF, V7 ), H7 ); \
|
||||
H0 = mm512_xor3( V8, V0, H0 ); \
|
||||
H1 = mm512_xor3( V9, V1, H1 ); \
|
||||
H2 = mm512_xor3( VA, V2, H2 ); \
|
||||
H3 = mm512_xor3( VB, V3, H3 ); \
|
||||
H4 = mm512_xor3( VC, V4, H4 ); \
|
||||
H5 = mm512_xor3( VD, V5, H5 ); \
|
||||
H6 = mm512_xor3( VE, V6, H6 ); \
|
||||
H7 = mm512_xor3( VF, V7, H7 ); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
@@ -45,7 +45,7 @@ int scanhash_blake2b_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
@@ -100,7 +100,7 @@ int scanhash_blake2b_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
|
@@ -122,14 +122,14 @@ static void blake2b_8way_compress( blake2b_8way_ctx *ctx, int last )
|
||||
B2B8W_G( 3, 4, 9, 14, m[ sigma[i][14] ], m[ sigma[i][15] ] );
|
||||
}
|
||||
|
||||
ctx->h[0] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[0], v[0] ), v[ 8] );
|
||||
ctx->h[1] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[1], v[1] ), v[ 9] );
|
||||
ctx->h[2] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[2], v[2] ), v[10] );
|
||||
ctx->h[3] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[3], v[3] ), v[11] );
|
||||
ctx->h[4] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[4], v[4] ), v[12] );
|
||||
ctx->h[5] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[5], v[5] ), v[13] );
|
||||
ctx->h[6] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[6], v[6] ), v[14] );
|
||||
ctx->h[7] = _mm512_xor_si512( _mm512_xor_si512( ctx->h[7], v[7] ), v[15] );
|
||||
ctx->h[0] = mm512_xor3( ctx->h[0], v[0], v[ 8] );
|
||||
ctx->h[1] = mm512_xor3( ctx->h[1], v[1], v[ 9] );
|
||||
ctx->h[2] = mm512_xor3( ctx->h[2], v[2], v[10] );
|
||||
ctx->h[3] = mm512_xor3( ctx->h[3], v[3], v[11] );
|
||||
ctx->h[4] = mm512_xor3( ctx->h[4], v[4], v[12] );
|
||||
ctx->h[5] = mm512_xor3( ctx->h[5], v[5], v[13] );
|
||||
ctx->h[6] = mm512_xor3( ctx->h[6], v[6], v[14] );
|
||||
ctx->h[7] = mm512_xor3( ctx->h[7], v[7], v[15] );
|
||||
}
|
||||
|
||||
int blake2b_8way_init( blake2b_8way_ctx *ctx )
|
||||
|
@@ -17,7 +17,7 @@
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
ALIGN(128) typedef struct {
|
||||
typedef struct ALIGN( 64 ) {
|
||||
__m512i b[16]; // input buffer
|
||||
__m512i h[8]; // chained state
|
||||
uint64_t t[2]; // total number of bytes
|
||||
@@ -35,7 +35,7 @@ void blake2b_8way_final( blake2b_8way_ctx *ctx, void *out );
|
||||
#if defined(__AVX2__)
|
||||
|
||||
// state context
|
||||
ALIGN(128) typedef struct {
|
||||
typedef struct ALIGN( 64 ) {
|
||||
__m256i b[16]; // input buffer
|
||||
__m256i h[8]; // chained state
|
||||
uint64_t t[2]; // total number of bytes
|
||||
|
@@ -49,7 +49,7 @@ int scanhash_blake2s_16way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 16;
|
||||
@@ -104,7 +104,7 @@ int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
@@ -157,7 +157,7 @@ int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
|
@@ -4,7 +4,6 @@
|
||||
#include <stdint.h>
|
||||
#include "algo-gate-api.h"
|
||||
|
||||
//#if defined(__SSE4_2__)
|
||||
#if defined(__SSE2__)
|
||||
#define BLAKE2S_4WAY
|
||||
#endif
|
||||
@@ -27,8 +26,6 @@ int scanhash_blake2s_16way( struct work *work, uint32_t max_nonce,
|
||||
|
||||
#elif defined (BLAKE2S_8WAY)
|
||||
|
||||
//#if defined(BLAKE2S_8WAY)
|
||||
|
||||
void blake2s_8way_hash( void *state, const void *input );
|
||||
int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr );
|
||||
|
@@ -368,7 +368,7 @@ do { \
|
||||
ROUND8W( 9 );
|
||||
|
||||
for( size_t i = 0; i < 8; ++i )
|
||||
S->h[i] = _mm256_xor_si256( _mm256_xor_si256( S->h[i], v[i] ), v[i + 8] );
|
||||
S->h[i] = mm256_xor3( S->h[i], v[i], v[i + 8] );
|
||||
|
||||
#undef G8W
|
||||
#undef ROUND8W
|
||||
@@ -566,7 +566,7 @@ do { \
|
||||
ROUND16W( 9 );
|
||||
|
||||
for( size_t i = 0; i < 8; ++i )
|
||||
S->h[i] = _mm512_xor_si512( _mm512_xor_si512( S->h[i], v[i] ), v[i + 8] );
|
||||
S->h[i] = mm512_xor3( S->h[i], v[i], v[i + 8] );
|
||||
|
||||
#undef G16W
|
||||
#undef ROUND16W
|
||||
|
@@ -60,7 +60,7 @@ typedef struct __blake2s_nway_param
|
||||
} blake2s_nway_param;
|
||||
#pragma pack(pop)
|
||||
|
||||
ALIGN( 64 ) typedef struct __blake2s_4way_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_4way_state
|
||||
{
|
||||
__m128i h[8];
|
||||
uint8_t buf[ BLAKE2S_BLOCKBYTES * 4 ];
|
||||
@@ -80,7 +80,7 @@ int blake2s_4way_full_blocks( blake2s_4way_state *S, void *out,
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
ALIGN( 64 ) typedef struct __blake2s_8way_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_8way_state
|
||||
{
|
||||
__m256i h[8];
|
||||
uint8_t buf[ BLAKE2S_BLOCKBYTES * 8 ];
|
||||
@@ -101,7 +101,7 @@ int blake2s_8way_full_blocks( blake2s_8way_state *S, void *out,
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
ALIGN( 128 ) typedef struct __blake2s_16way_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_16way_state
|
||||
{
|
||||
__m512i h[8];
|
||||
uint8_t buf[ BLAKE2S_BLOCKBYTES * 16 ];
|
||||
|
@@ -293,10 +293,6 @@ static const sph_u64 CB[16] = {
|
||||
H5 = (state)->H[5]; \
|
||||
H6 = (state)->H[6]; \
|
||||
H7 = (state)->H[7]; \
|
||||
S0 = (state)->S[0]; \
|
||||
S1 = (state)->S[1]; \
|
||||
S2 = (state)->S[2]; \
|
||||
S3 = (state)->S[3]; \
|
||||
T0 = (state)->T0; \
|
||||
T1 = (state)->T1; \
|
||||
} while (0)
|
||||
@@ -310,10 +306,6 @@ static const sph_u64 CB[16] = {
|
||||
(state)->H[5] = H5; \
|
||||
(state)->H[6] = H6; \
|
||||
(state)->H[7] = H7; \
|
||||
(state)->S[0] = S0; \
|
||||
(state)->S[1] = S1; \
|
||||
(state)->S[2] = S2; \
|
||||
(state)->S[3] = S3; \
|
||||
(state)->T0 = T0; \
|
||||
(state)->T1 = T1; \
|
||||
} while (0)
|
||||
@@ -348,7 +340,6 @@ static const sph_u64 CB[16] = {
|
||||
|
||||
#define DECL_STATE64_8WAY \
|
||||
__m512i H0, H1, H2, H3, H4, H5, H6, H7; \
|
||||
__m512i S0, S1, S2, S3; \
|
||||
uint64_t T0, T1;
|
||||
|
||||
#define COMPRESS64_8WAY( buf ) do \
|
||||
@@ -366,10 +357,10 @@ static const sph_u64 CB[16] = {
|
||||
V5 = H5; \
|
||||
V6 = H6; \
|
||||
V7 = H7; \
|
||||
V8 = _mm512_xor_si512( S0, m512_const1_64( CB0 ) ); \
|
||||
V9 = _mm512_xor_si512( S1, m512_const1_64( CB1 ) ); \
|
||||
VA = _mm512_xor_si512( S2, m512_const1_64( CB2 ) ); \
|
||||
VB = _mm512_xor_si512( S3, m512_const1_64( CB3 ) ); \
|
||||
V8 = m512_const1_64( CB0 ); \
|
||||
V9 = m512_const1_64( CB1 ); \
|
||||
VA = m512_const1_64( CB2 ); \
|
||||
VB = m512_const1_64( CB3 ); \
|
||||
VC = _mm512_xor_si512( _mm512_set1_epi64( T0 ), \
|
||||
m512_const1_64( CB4 ) ); \
|
||||
VD = _mm512_xor_si512( _mm512_set1_epi64( T0 ), \
|
||||
@@ -414,14 +405,14 @@ static const sph_u64 CB[16] = {
|
||||
ROUND_B_8WAY(3); \
|
||||
ROUND_B_8WAY(4); \
|
||||
ROUND_B_8WAY(5); \
|
||||
H0 = mm512_xor4( V8, V0, S0, H0 ); \
|
||||
H1 = mm512_xor4( V9, V1, S1, H1 ); \
|
||||
H2 = mm512_xor4( VA, V2, S2, H2 ); \
|
||||
H3 = mm512_xor4( VB, V3, S3, H3 ); \
|
||||
H4 = mm512_xor4( VC, V4, S0, H4 ); \
|
||||
H5 = mm512_xor4( VD, V5, S1, H5 ); \
|
||||
H6 = mm512_xor4( VE, V6, S2, H6 ); \
|
||||
H7 = mm512_xor4( VF, V7, S3, H7 ); \
|
||||
H0 = mm512_xor3( V8, V0, H0 ); \
|
||||
H1 = mm512_xor3( V9, V1, H1 ); \
|
||||
H2 = mm512_xor3( VA, V2, H2 ); \
|
||||
H3 = mm512_xor3( VB, V3, H3 ); \
|
||||
H4 = mm512_xor3( VC, V4, H4 ); \
|
||||
H5 = mm512_xor3( VD, V5, H5 ); \
|
||||
H6 = mm512_xor3( VE, V6, H6 ); \
|
||||
H7 = mm512_xor3( VF, V7, H7 ); \
|
||||
} while (0)
|
||||
|
||||
void blake512_8way_compress( blake_8way_big_context *sc )
|
||||
@@ -440,10 +431,10 @@ void blake512_8way_compress( blake_8way_big_context *sc )
|
||||
V5 = sc->H[5];
|
||||
V6 = sc->H[6];
|
||||
V7 = sc->H[7];
|
||||
V8 = _mm512_xor_si512( sc->S[0], m512_const1_64( CB0 ) );
|
||||
V9 = _mm512_xor_si512( sc->S[1], m512_const1_64( CB1 ) );
|
||||
VA = _mm512_xor_si512( sc->S[2], m512_const1_64( CB2 ) );
|
||||
VB = _mm512_xor_si512( sc->S[3], m512_const1_64( CB3 ) );
|
||||
V8 = m512_const1_64( CB0 );
|
||||
V9 = m512_const1_64( CB1 );
|
||||
VA = m512_const1_64( CB2 );
|
||||
VB = m512_const1_64( CB3 );
|
||||
VC = _mm512_xor_si512( _mm512_set1_epi64( sc->T0 ),
|
||||
m512_const1_64( CB4 ) );
|
||||
VD = _mm512_xor_si512( _mm512_set1_epi64( sc->T0 ),
|
||||
@@ -492,19 +483,18 @@ void blake512_8way_compress( blake_8way_big_context *sc )
|
||||
ROUND_B_8WAY(4);
|
||||
ROUND_B_8WAY(5);
|
||||
|
||||
sc->H[0] = mm512_xor4( V8, V0, sc->S[0], sc->H[0] );
|
||||
sc->H[1] = mm512_xor4( V9, V1, sc->S[1], sc->H[1] );
|
||||
sc->H[2] = mm512_xor4( VA, V2, sc->S[2], sc->H[2] );
|
||||
sc->H[3] = mm512_xor4( VB, V3, sc->S[3], sc->H[3] );
|
||||
sc->H[4] = mm512_xor4( VC, V4, sc->S[0], sc->H[4] );
|
||||
sc->H[5] = mm512_xor4( VD, V5, sc->S[1], sc->H[5] );
|
||||
sc->H[6] = mm512_xor4( VE, V6, sc->S[2], sc->H[6] );
|
||||
sc->H[7] = mm512_xor4( VF, V7, sc->S[3], sc->H[7] );
|
||||
sc->H[0] = mm512_xor3( V8, V0, sc->H[0] );
|
||||
sc->H[1] = mm512_xor3( V9, V1, sc->H[1] );
|
||||
sc->H[2] = mm512_xor3( VA, V2, sc->H[2] );
|
||||
sc->H[3] = mm512_xor3( VB, V3, sc->H[3] );
|
||||
sc->H[4] = mm512_xor3( VC, V4, sc->H[4] );
|
||||
sc->H[5] = mm512_xor3( VD, V5, sc->H[5] );
|
||||
sc->H[6] = mm512_xor3( VE, V6, sc->H[6] );
|
||||
sc->H[7] = mm512_xor3( VF, V7, sc->H[7] );
|
||||
}
|
||||
|
||||
void blake512_8way_init( blake_8way_big_context *sc )
|
||||
{
|
||||
__m512i zero = m512_zero;
|
||||
casti_m512i( sc->H, 0 ) = m512_const1_64( 0x6A09E667F3BCC908 );
|
||||
casti_m512i( sc->H, 1 ) = m512_const1_64( 0xBB67AE8584CAA73B );
|
||||
casti_m512i( sc->H, 2 ) = m512_const1_64( 0x3C6EF372FE94F82B );
|
||||
@@ -514,11 +504,6 @@ void blake512_8way_init( blake_8way_big_context *sc )
|
||||
casti_m512i( sc->H, 6 ) = m512_const1_64( 0x1F83D9ABFB41BD6B );
|
||||
casti_m512i( sc->H, 7 ) = m512_const1_64( 0x5BE0CD19137E2179 );
|
||||
|
||||
casti_m512i( sc->S, 0 ) = zero;
|
||||
casti_m512i( sc->S, 1 ) = zero;
|
||||
casti_m512i( sc->S, 2 ) = zero;
|
||||
casti_m512i( sc->S, 3 ) = zero;
|
||||
|
||||
sc->T0 = sc->T1 = 0;
|
||||
sc->ptr = 0;
|
||||
}
|
||||
@@ -641,11 +626,6 @@ void blake512_8way_full( blake_8way_big_context *sc, void * dst,
|
||||
casti_m512i( sc->H, 6 ) = m512_const1_64( 0x1F83D9ABFB41BD6B );
|
||||
casti_m512i( sc->H, 7 ) = m512_const1_64( 0x5BE0CD19137E2179 );
|
||||
|
||||
casti_m512i( sc->S, 0 ) = m512_zero;
|
||||
casti_m512i( sc->S, 1 ) = m512_zero;
|
||||
casti_m512i( sc->S, 2 ) = m512_zero;
|
||||
casti_m512i( sc->S, 3 ) = m512_zero;
|
||||
|
||||
sc->T0 = sc->T1 = 0;
|
||||
sc->ptr = 0;
|
||||
|
||||
@@ -740,7 +720,6 @@ blake512_8way_close(void *cc, void *dst)
|
||||
|
||||
#define DECL_STATE64_4WAY \
|
||||
__m256i H0, H1, H2, H3, H4, H5, H6, H7; \
|
||||
__m256i S0, S1, S2, S3; \
|
||||
uint64_t T0, T1;
|
||||
|
||||
#define COMPRESS64_4WAY do \
|
||||
@@ -758,10 +737,10 @@ blake512_8way_close(void *cc, void *dst)
|
||||
V5 = H5; \
|
||||
V6 = H6; \
|
||||
V7 = H7; \
|
||||
V8 = _mm256_xor_si256( S0, m256_const1_64( CB0 ) ); \
|
||||
V9 = _mm256_xor_si256( S1, m256_const1_64( CB1 ) ); \
|
||||
VA = _mm256_xor_si256( S2, m256_const1_64( CB2 ) ); \
|
||||
VB = _mm256_xor_si256( S3, m256_const1_64( CB3 ) ); \
|
||||
V8 = m256_const1_64( CB0 ); \
|
||||
V9 = m256_const1_64( CB1 ); \
|
||||
VA = m256_const1_64( CB2 ); \
|
||||
VB = m256_const1_64( CB3 ); \
|
||||
VC = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
|
||||
m256_const1_64( CB4 ) ); \
|
||||
VD = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
|
||||
@@ -804,14 +783,14 @@ blake512_8way_close(void *cc, void *dst)
|
||||
ROUND_B_4WAY(3); \
|
||||
ROUND_B_4WAY(4); \
|
||||
ROUND_B_4WAY(5); \
|
||||
H0 = mm256_xor4( V8, V0, S0, H0 ); \
|
||||
H1 = mm256_xor4( V9, V1, S1, H1 ); \
|
||||
H2 = mm256_xor4( VA, V2, S2, H2 ); \
|
||||
H3 = mm256_xor4( VB, V3, S3, H3 ); \
|
||||
H4 = mm256_xor4( VC, V4, S0, H4 ); \
|
||||
H5 = mm256_xor4( VD, V5, S1, H5 ); \
|
||||
H6 = mm256_xor4( VE, V6, S2, H6 ); \
|
||||
H7 = mm256_xor4( VF, V7, S3, H7 ); \
|
||||
H0 = mm256_xor3( V8, V0, H0 ); \
|
||||
H1 = mm256_xor3( V9, V1, H1 ); \
|
||||
H2 = mm256_xor3( VA, V2, H2 ); \
|
||||
H3 = mm256_xor3( VB, V3, H3 ); \
|
||||
H4 = mm256_xor3( VC, V4, H4 ); \
|
||||
H5 = mm256_xor3( VD, V5, H5 ); \
|
||||
H6 = mm256_xor3( VE, V6, H6 ); \
|
||||
H7 = mm256_xor3( VF, V7, H7 ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@@ -831,10 +810,10 @@ void blake512_4way_compress( blake_4way_big_context *sc )
|
||||
V5 = sc->H[5];
|
||||
V6 = sc->H[6];
|
||||
V7 = sc->H[7];
|
||||
V8 = _mm256_xor_si256( sc->S[0], m256_const1_64( CB0 ) );
|
||||
V9 = _mm256_xor_si256( sc->S[1], m256_const1_64( CB1 ) );
|
||||
VA = _mm256_xor_si256( sc->S[2], m256_const1_64( CB2 ) );
|
||||
VB = _mm256_xor_si256( sc->S[3], m256_const1_64( CB3 ) );
|
||||
V8 = m256_const1_64( CB0 );
|
||||
V9 = m256_const1_64( CB1 );
|
||||
VA = m256_const1_64( CB2 );
|
||||
VB = m256_const1_64( CB3 );
|
||||
VC = _mm256_xor_si256( _mm256_set1_epi64x( sc->T0 ),
|
||||
m256_const1_64( CB4 ) );
|
||||
VD = _mm256_xor_si256( _mm256_set1_epi64x( sc->T0 ),
|
||||
@@ -880,19 +859,18 @@ void blake512_4way_compress( blake_4way_big_context *sc )
|
||||
ROUND_B_4WAY(4);
|
||||
ROUND_B_4WAY(5);
|
||||
|
||||
sc->H[0] = mm256_xor4( V8, V0, sc->S[0], sc->H[0] );
|
||||
sc->H[1] = mm256_xor4( V9, V1, sc->S[1], sc->H[1] );
|
||||
sc->H[2] = mm256_xor4( VA, V2, sc->S[2], sc->H[2] );
|
||||
sc->H[3] = mm256_xor4( VB, V3, sc->S[3], sc->H[3] );
|
||||
sc->H[4] = mm256_xor4( VC, V4, sc->S[0], sc->H[4] );
|
||||
sc->H[5] = mm256_xor4( VD, V5, sc->S[1], sc->H[5] );
|
||||
sc->H[6] = mm256_xor4( VE, V6, sc->S[2], sc->H[6] );
|
||||
sc->H[7] = mm256_xor4( VF, V7, sc->S[3], sc->H[7] );
|
||||
sc->H[0] = mm256_xor3( V8, V0, sc->H[0] );
|
||||
sc->H[1] = mm256_xor3( V9, V1, sc->H[1] );
|
||||
sc->H[2] = mm256_xor3( VA, V2, sc->H[2] );
|
||||
sc->H[3] = mm256_xor3( VB, V3, sc->H[3] );
|
||||
sc->H[4] = mm256_xor3( VC, V4, sc->H[4] );
|
||||
sc->H[5] = mm256_xor3( VD, V5, sc->H[5] );
|
||||
sc->H[6] = mm256_xor3( VE, V6, sc->H[6] );
|
||||
sc->H[7] = mm256_xor3( VF, V7, sc->H[7] );
|
||||
}
|
||||
|
||||
void blake512_4way_init( blake_4way_big_context *sc )
|
||||
{
|
||||
__m256i zero = m256_zero;
|
||||
casti_m256i( sc->H, 0 ) = m256_const1_64( 0x6A09E667F3BCC908 );
|
||||
casti_m256i( sc->H, 1 ) = m256_const1_64( 0xBB67AE8584CAA73B );
|
||||
casti_m256i( sc->H, 2 ) = m256_const1_64( 0x3C6EF372FE94F82B );
|
||||
@@ -902,11 +880,6 @@ void blake512_4way_init( blake_4way_big_context *sc )
|
||||
casti_m256i( sc->H, 6 ) = m256_const1_64( 0x1F83D9ABFB41BD6B );
|
||||
casti_m256i( sc->H, 7 ) = m256_const1_64( 0x5BE0CD19137E2179 );
|
||||
|
||||
casti_m256i( sc->S, 0 ) = zero;
|
||||
casti_m256i( sc->S, 1 ) = zero;
|
||||
casti_m256i( sc->S, 2 ) = zero;
|
||||
casti_m256i( sc->S, 3 ) = zero;
|
||||
|
||||
sc->T0 = sc->T1 = 0;
|
||||
sc->ptr = 0;
|
||||
}
|
||||
@@ -1026,11 +999,6 @@ void blake512_4way_full( blake_4way_big_context *sc, void * dst,
|
||||
casti_m256i( sc->H, 6 ) = m256_const1_64( 0x1F83D9ABFB41BD6B );
|
||||
casti_m256i( sc->H, 7 ) = m256_const1_64( 0x5BE0CD19137E2179 );
|
||||
|
||||
casti_m256i( sc->S, 0 ) = m256_zero;
|
||||
casti_m256i( sc->S, 1 ) = m256_zero;
|
||||
casti_m256i( sc->S, 2 ) = m256_zero;
|
||||
casti_m256i( sc->S, 3 ) = m256_zero;
|
||||
|
||||
sc->T0 = sc->T1 = 0;
|
||||
sc->ptr = 0;
|
||||
|
||||
|
@@ -49,7 +49,7 @@ int scanhash_blakecoin_4way( struct work *work, uint32_t max_nonce,
|
||||
&& !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
submit_lane_solution( work, hash+(i<<3), mythr, i );
|
||||
submit_solution( work, hash+(i<<3), mythr );
|
||||
}
|
||||
n += 4;
|
||||
|
||||
@@ -108,7 +108,7 @@ int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce,
|
||||
&& !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
submit_lane_solution( work, hash+(i<<3), mythr, i );
|
||||
submit_solution( work, hash+(i<<3), mythr );
|
||||
}
|
||||
n += 8;
|
||||
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
|
||||
|
@@ -62,7 +62,7 @@ int scanhash_decred_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[DECRED_NONCE_INDEX] = n+i;
|
||||
submit_lane_solution( work, hash+(i<<3), mythr, i );
|
||||
submit_solution( work, hash+(i<<3), mythr );
|
||||
}
|
||||
n += 4;
|
||||
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
|
||||
|
@@ -8,7 +8,7 @@ uint32_t *decred_get_nonceptr( uint32_t *work_data )
|
||||
return &work_data[ DECRED_NONCE_INDEX ];
|
||||
}
|
||||
|
||||
double decred_calc_network_diff( struct work* work )
|
||||
long double decred_calc_network_diff( struct work* work )
|
||||
{
|
||||
// sample for diff 43.281 : 1c05ea29
|
||||
// todo: endian reversed on longpoll could be zr5 specific...
|
||||
@@ -16,7 +16,7 @@ double decred_calc_network_diff( struct work* work )
|
||||
uint32_t bits = ( nbits & 0xffffff );
|
||||
int16_t shift = ( swab32(nbits) & 0xff ); // 0x1c = 28
|
||||
int m;
|
||||
double d = (double)0x0000ffff / (double)bits;
|
||||
long double d = (long double)0x0000ffff / (long double)bits;
|
||||
|
||||
for ( m = shift; m < 29; m++ )
|
||||
d *= 256.0;
|
||||
@@ -25,7 +25,7 @@ double decred_calc_network_diff( struct work* work )
|
||||
if ( shift == 28 )
|
||||
d *= 256.0; // testnet
|
||||
if ( opt_debug_diff )
|
||||
applog( LOG_DEBUG, "net diff: %f -> shift %u, bits %08x", d,
|
||||
applog( LOG_DEBUG, "net diff: %f -> shift %u, bits %08x", (double)d,
|
||||
shift, bits );
|
||||
return net_diff;
|
||||
}
|
||||
@@ -70,7 +70,10 @@ void decred_be_build_stratum_request( char *req, struct work *work,
|
||||
rpc_user, work->job_id, xnonce2str, ntimestr, noncestr );
|
||||
free(xnonce2str);
|
||||
}
|
||||
|
||||
#if !defined(min)
|
||||
#define min(a,b) (a>b ? (b) :(a))
|
||||
#endif
|
||||
|
||||
void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
|
||||
{
|
||||
@@ -78,7 +81,6 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
|
||||
uint32_t extraheader[32] = { 0 };
|
||||
int headersize = 0;
|
||||
uint32_t* extradata = (uint32_t*) sctx->xnonce1;
|
||||
size_t t;
|
||||
int i;
|
||||
|
||||
// getwork over stratum, getwork merkle + header passed in coinb1
|
||||
@@ -87,9 +89,6 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
|
||||
sizeof(extraheader) );
|
||||
memcpy( extraheader, &sctx->job.coinbase[32], headersize );
|
||||
|
||||
// Increment extranonce2
|
||||
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
|
||||
|
||||
// Assemble block header
|
||||
memset( g_work->data, 0, sizeof(g_work->data) );
|
||||
g_work->data[0] = le32dec( sctx->job.version );
|
||||
|
@@ -105,7 +105,7 @@ int scanhash_pentablake_4way( struct work *work,
|
||||
&& fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + i;
|
||||
submit_lane_solution( work, hash+(i<<3), mythr, i );
|
||||
submit_solution( work, hash+(i<<3), mythr );
|
||||
}
|
||||
n += 4;
|
||||
|
||||
|
@@ -323,7 +323,7 @@ int blake2s_final( blake2s_state *S, uint8_t *out, uint8_t outlen )
|
||||
|
||||
int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen )
|
||||
{
|
||||
blake2s_state S[1];
|
||||
blake2s_state S;
|
||||
|
||||
/* Verify parameters */
|
||||
if ( NULL == in ) return -1;
|
||||
@@ -334,15 +334,15 @@ int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen
|
||||
|
||||
if( keylen > 0 )
|
||||
{
|
||||
if( blake2s_init_key( S, outlen, key, keylen ) < 0 ) return -1;
|
||||
if( blake2s_init_key( &S, outlen, key, keylen ) < 0 ) return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( blake2s_init( S, outlen ) < 0 ) return -1;
|
||||
if( blake2s_init( &S, outlen ) < 0 ) return -1;
|
||||
}
|
||||
|
||||
blake2s_update( S, ( uint8_t * )in, inlen );
|
||||
blake2s_final( S, out, outlen );
|
||||
blake2s_update( &S, ( uint8_t * )in, inlen );
|
||||
blake2s_final( &S, out, outlen );
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -116,7 +116,7 @@ extern "C" {
|
||||
uint8_t personal[BLAKE2S_PERSONALBYTES]; // 32
|
||||
} blake2s_param;
|
||||
|
||||
ALIGN( 64 ) typedef struct __blake2s_state
|
||||
typedef struct ALIGN( 64 ) __blake2s_state
|
||||
{
|
||||
uint32_t h[8];
|
||||
uint32_t t[2];
|
||||
|
@@ -18,7 +18,7 @@
|
||||
#endif
|
||||
|
||||
// state context
|
||||
ALIGN(64) typedef struct {
|
||||
typedef ALIGN(64) struct {
|
||||
uint8_t b[128]; // input buffer
|
||||
uint64_t h[8]; // chained state
|
||||
uint64_t t[2]; // total number of bytes
|
||||
|
@@ -867,40 +867,35 @@ void compress_small_8way( const __m256i *M, const __m256i H[16],
|
||||
qt[30] = expand2s8( qt, M, H, 30 );
|
||||
qt[31] = expand2s8( qt, M, H, 31 );
|
||||
|
||||
xl = _mm256_xor_si256(
|
||||
mm256_xor4( qt[16], qt[17], qt[18], qt[19] ),
|
||||
mm256_xor4( qt[20], qt[21], qt[22], qt[23] ) );
|
||||
xh = _mm256_xor_si256( xl, _mm256_xor_si256(
|
||||
mm256_xor4( qt[24], qt[25], qt[26], qt[27] ),
|
||||
mm256_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
|
||||
xl = mm256_xor3( mm256_xor3( qt[16], qt[17], qt[18] ),
|
||||
mm256_xor3( qt[19], qt[20], qt[21] ),
|
||||
_mm256_xor_si256( qt[22], qt[23] ) );
|
||||
|
||||
xh = mm256_xor3( mm256_xor3( xl, qt[24], qt[25] ),
|
||||
mm256_xor3( qt[26], qt[27], qt[28] ),
|
||||
mm256_xor3( qt[29], qt[30], qt[31] ) );
|
||||
|
||||
#define DH1L( m, sl, sr, a, b, c ) \
|
||||
_mm256_add_epi32( \
|
||||
_mm256_xor_si256( M[m], \
|
||||
_mm256_xor_si256( _mm256_slli_epi32( xh, sl ), \
|
||||
_mm256_srli_epi32( qt[a], sr ) ) ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[b] ), qt[c] ) )
|
||||
_mm256_add_epi32( mm256_xor3( M[m], _mm256_slli_epi32( xh, sl ), \
|
||||
_mm256_srli_epi32( qt[a], sr ) ), \
|
||||
mm256_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH1R( m, sl, sr, a, b, c ) \
|
||||
_mm256_add_epi32( \
|
||||
_mm256_xor_si256( M[m], \
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xh, sl ), \
|
||||
_mm256_slli_epi32( qt[a], sr ) ) ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[b] ), qt[c] ) )
|
||||
_mm256_add_epi32( mm256_xor3( M[m], _mm256_srli_epi32( xh, sl ), \
|
||||
_mm256_slli_epi32( qt[a], sr ) ), \
|
||||
mm256_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH2L( m, rl, sl, h, a, b, c ) \
|
||||
_mm256_add_epi32( _mm256_add_epi32( \
|
||||
mm256_rol_32( dH[h], rl ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[a] ), M[m] )), \
|
||||
_mm256_xor_si256( _mm256_slli_epi32( xl, sl ), \
|
||||
_mm256_xor_si256( qt[b], qt[c] ) ) );
|
||||
mm256_rol_32( dH[h], rl ), \
|
||||
mm256_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm256_xor3( _mm256_slli_epi32( xl, sl ), qt[b], qt[c] ) )
|
||||
|
||||
#define DH2R( m, rl, sr, h, a, b, c ) \
|
||||
_mm256_add_epi32( _mm256_add_epi32( \
|
||||
mm256_rol_32( dH[h], rl ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[a] ), M[m] )), \
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xl, sr ), \
|
||||
_mm256_xor_si256( qt[b], qt[c] ) ) );
|
||||
mm256_rol_32( dH[h], rl ), \
|
||||
mm256_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm256_xor3( _mm256_srli_epi32( xl, sr ), qt[b], qt[c] ) )
|
||||
|
||||
dH[ 0] = DH1L( 0, 5, 5, 16, 24, 0 );
|
||||
dH[ 1] = DH1R( 1, 7, 8, 17, 25, 1 );
|
||||
@@ -924,88 +919,6 @@ void compress_small_8way( const __m256i *M, const __m256i H[16],
|
||||
#undef DH2L
|
||||
#undef DH2R
|
||||
|
||||
/*
|
||||
dH[ 0] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[0],
|
||||
_mm256_xor_si256( _mm256_slli_epi32( xh, 5 ),
|
||||
_mm256_srli_epi32( qt[16], 5 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[24] ), qt[ 0] ));
|
||||
dH[ 1] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[1],
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xh, 7 ),
|
||||
_mm256_slli_epi32( qt[17], 8 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[25] ), qt[ 1] ));
|
||||
dH[ 2] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[2],
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xh, 5 ),
|
||||
_mm256_slli_epi32( qt[18], 5 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[26] ), qt[ 2] ));
|
||||
dH[ 3] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[3],
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xh, 1 ),
|
||||
_mm256_slli_epi32( qt[19], 5 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[27] ), qt[ 3] ));
|
||||
dH[ 4] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[4],
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xh, 3 ),
|
||||
_mm256_slli_epi32( qt[20], 0 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[28] ), qt[ 4] ));
|
||||
dH[ 5] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[5],
|
||||
_mm256_xor_si256( _mm256_slli_epi32( xh, 6 ),
|
||||
_mm256_srli_epi32( qt[21], 6 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[29] ), qt[ 5] ));
|
||||
dH[ 6] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[6],
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xh, 4 ),
|
||||
_mm256_slli_epi32( qt[22], 6 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[30] ), qt[ 6] ));
|
||||
dH[ 7] = _mm256_add_epi32(
|
||||
_mm256_xor_si256( M[7],
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xh, 11 ),
|
||||
_mm256_slli_epi32( qt[23], 2 ) ) ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xl, qt[31] ), qt[ 7] ));
|
||||
dH[ 8] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[4], 9 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[24] ), M[ 8] )),
|
||||
_mm256_xor_si256( _mm256_slli_epi32( xl, 8 ),
|
||||
_mm256_xor_si256( qt[23], qt[ 8] ) ) );
|
||||
dH[ 9] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[5], 10 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[25] ), M[ 9] )),
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xl, 6 ),
|
||||
_mm256_xor_si256( qt[16], qt[ 9] ) ) );
|
||||
dH[10] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[6], 11 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[26] ), M[10] )),
|
||||
_mm256_xor_si256( _mm256_slli_epi32( xl, 6 ),
|
||||
_mm256_xor_si256( qt[17], qt[10] ) ) );
|
||||
dH[11] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[7], 12 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[27] ), M[11] )),
|
||||
_mm256_xor_si256( _mm256_slli_epi32( xl, 4 ),
|
||||
_mm256_xor_si256( qt[18], qt[11] ) ) );
|
||||
dH[12] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[0], 13 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[28] ), M[12] )),
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xl, 3 ),
|
||||
_mm256_xor_si256( qt[19], qt[12] ) ) );
|
||||
dH[13] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[1], 14 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[29] ), M[13] )),
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xl, 4 ),
|
||||
_mm256_xor_si256( qt[20], qt[13] ) ) );
|
||||
dH[14] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[2], 15 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[30] ), M[14] )),
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xl, 7 ),
|
||||
_mm256_xor_si256( qt[21], qt[14] ) ) );
|
||||
dH[15] = _mm256_add_epi32( _mm256_add_epi32(
|
||||
mm256_rol_32( dH[3], 16 ),
|
||||
_mm256_xor_si256( _mm256_xor_si256( xh, qt[31] ), M[15] )),
|
||||
_mm256_xor_si256( _mm256_srli_epi32( xl, 2 ),
|
||||
_mm256_xor_si256( qt[22], qt[15] ) ) );
|
||||
*/
|
||||
}
|
||||
|
||||
static const __m256i final_s8[16] =
|
||||
@@ -1422,40 +1335,35 @@ void compress_small_16way( const __m512i *M, const __m512i H[16],
|
||||
qt[30] = expand2s16( qt, M, H, 30 );
|
||||
qt[31] = expand2s16( qt, M, H, 31 );
|
||||
|
||||
xl = _mm512_xor_si512(
|
||||
mm512_xor4( qt[16], qt[17], qt[18], qt[19] ),
|
||||
mm512_xor4( qt[20], qt[21], qt[22], qt[23] ) );
|
||||
xh = _mm512_xor_si512( xl, _mm512_xor_si512(
|
||||
mm512_xor4( qt[24], qt[25], qt[26], qt[27] ),
|
||||
mm512_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
|
||||
xl = mm512_xor3( mm512_xor3( qt[16], qt[17], qt[18] ),
|
||||
mm512_xor3( qt[19], qt[20], qt[21] ),
|
||||
_mm512_xor_si512( qt[22], qt[23] ) );
|
||||
|
||||
xh = mm512_xor3( mm512_xor3( xl, qt[24], qt[25] ),
|
||||
mm512_xor3( qt[26], qt[27], qt[28] ),
|
||||
mm512_xor3( qt[29], qt[30], qt[31] ) );
|
||||
|
||||
#define DH1L( m, sl, sr, a, b, c ) \
|
||||
_mm512_add_epi32( \
|
||||
_mm512_xor_si512( M[m], \
|
||||
_mm512_xor_si512( _mm512_slli_epi32( xh, sl ), \
|
||||
_mm512_srli_epi32( qt[a], sr ) ) ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
|
||||
_mm512_add_epi32( mm512_xor3( M[m], _mm512_slli_epi32( xh, sl ), \
|
||||
_mm512_srli_epi32( qt[a], sr ) ), \
|
||||
mm512_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH1R( m, sl, sr, a, b, c ) \
|
||||
_mm512_add_epi32( \
|
||||
_mm512_xor_si512( M[m], \
|
||||
_mm512_xor_si512( _mm512_srli_epi32( xh, sl ), \
|
||||
_mm512_slli_epi32( qt[a], sr ) ) ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
|
||||
_mm512_add_epi32( mm512_xor3( M[m], _mm512_srli_epi32( xh, sl ), \
|
||||
_mm512_slli_epi32( qt[a], sr ) ), \
|
||||
mm512_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH2L( m, rl, sl, h, a, b, c ) \
|
||||
_mm512_add_epi32( _mm512_add_epi32( \
|
||||
mm512_rol_32( dH[h], rl ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
|
||||
_mm512_xor_si512( _mm512_slli_epi32( xl, sl ), \
|
||||
_mm512_xor_si512( qt[b], qt[c] ) ) );
|
||||
mm512_rol_32( dH[h], rl ), \
|
||||
mm512_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm512_xor3( _mm512_slli_epi32( xl, sl ), qt[b], qt[c] ) )
|
||||
|
||||
#define DH2R( m, rl, sr, h, a, b, c ) \
|
||||
_mm512_add_epi32( _mm512_add_epi32( \
|
||||
mm512_rol_32( dH[h], rl ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
|
||||
_mm512_xor_si512( _mm512_srli_epi32( xl, sr ), \
|
||||
_mm512_xor_si512( qt[b], qt[c] ) ) );
|
||||
mm512_rol_32( dH[h], rl ), \
|
||||
mm512_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm512_xor3( _mm512_srli_epi32( xl, sr ), qt[b], qt[c] ) )
|
||||
|
||||
dH[ 0] = DH1L( 0, 5, 5, 16, 24, 0 );
|
||||
dH[ 1] = DH1R( 1, 7, 8, 17, 25, 1 );
|
||||
|
@@ -46,7 +46,7 @@ int scanhash_bmw512_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
@@ -99,7 +99,7 @@ int scanhash_bmw512_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
|
@@ -594,22 +594,15 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst )
|
||||
#define rb6(x) mm256_rol_64( x, 43 )
|
||||
#define rb7(x) mm256_rol_64( x, 53 )
|
||||
|
||||
#define rol_off_64( M, j, off ) \
|
||||
mm256_rol_64( M[ ( (j) + (off) ) & 0xF ] , \
|
||||
( ( (j) + (off) ) & 0xF ) + 1 )
|
||||
#define rol_off_64( M, j ) \
|
||||
mm256_rol_64( M[ (j) & 0xF ], ( (j) & 0xF ) + 1 )
|
||||
|
||||
#define add_elt_b( M, H, j ) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( _mm256_add_epi64( rol_off_64( M, j, 0 ), \
|
||||
rol_off_64( M, j, 3 ) ), \
|
||||
rol_off_64( M, j, 10 ) ), \
|
||||
_mm256_set1_epi64x( ( (j) + 16 ) * 0x0555555555555555ULL ) ), \
|
||||
H[ ( (j)+7 ) & 0xF ] )
|
||||
#define add_elt_b( mj0, mj3, mj10, h, K ) \
|
||||
_mm256_xor_si256( h, _mm256_add_epi64( K, \
|
||||
_mm256_sub_epi64( _mm256_add_epi64( mj0, mj3 ), mj10 ) ) )
|
||||
|
||||
|
||||
#define expand1b( qt, M, H, i ) \
|
||||
_mm256_add_epi64( mm256_add4_64( \
|
||||
#define expand1_b( qt, i ) \
|
||||
mm256_add4_64( \
|
||||
mm256_add4_64( sb1( qt[ (i)-16 ] ), sb2( qt[ (i)-15 ] ), \
|
||||
sb3( qt[ (i)-14 ] ), sb0( qt[ (i)-13 ] )), \
|
||||
mm256_add4_64( sb1( qt[ (i)-12 ] ), sb2( qt[ (i)-11 ] ), \
|
||||
@@ -617,11 +610,10 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst )
|
||||
mm256_add4_64( sb1( qt[ (i)- 8 ] ), sb2( qt[ (i)- 7 ] ), \
|
||||
sb3( qt[ (i)- 6 ] ), sb0( qt[ (i)- 5 ] )), \
|
||||
mm256_add4_64( sb1( qt[ (i)- 4 ] ), sb2( qt[ (i)- 3 ] ), \
|
||||
sb3( qt[ (i)- 2 ] ), sb0( qt[ (i)- 1 ] ) ) ), \
|
||||
add_elt_b( M, H, (i)-16 ) )
|
||||
sb3( qt[ (i)- 2 ] ), sb0( qt[ (i)- 1 ] ) ) )
|
||||
|
||||
#define expand2b( qt, M, H, i) \
|
||||
_mm256_add_epi64( mm256_add4_64( \
|
||||
#define expand2_b( qt, i) \
|
||||
mm256_add4_64( \
|
||||
mm256_add4_64( qt[ (i)-16 ], rb1( qt[ (i)-15 ] ), \
|
||||
qt[ (i)-14 ], rb2( qt[ (i)-13 ] ) ), \
|
||||
mm256_add4_64( qt[ (i)-12 ], rb3( qt[ (i)-11 ] ), \
|
||||
@@ -629,159 +621,98 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst )
|
||||
mm256_add4_64( qt[ (i)- 8 ], rb5( qt[ (i)- 7 ] ), \
|
||||
qt[ (i)- 6 ], rb6( qt[ (i)- 5 ] ) ), \
|
||||
mm256_add4_64( qt[ (i)- 4 ], rb7( qt[ (i)- 3 ] ), \
|
||||
sb4( qt[ (i)- 2 ] ), sb5( qt[ (i)- 1 ] ) ) ), \
|
||||
add_elt_b( M, H, (i)-16 ) )
|
||||
|
||||
|
||||
sb4( qt[ (i)- 2 ] ), sb5( qt[ (i)- 1 ] ) ) )
|
||||
|
||||
#define Wb0 \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 5], H[ 5] ), \
|
||||
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
|
||||
_mm256_xor_si256( M[10], H[10] ) ), \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[13], H[13] ), \
|
||||
_mm256_xor_si256( M[14], H[14] ) ) )
|
||||
_mm256_add_epi64( _mm256_sub_epi64( mh[ 5], mh[ 7] ), mh[10] ), \
|
||||
_mm256_add_epi64( mh[13], mh[14] ) )
|
||||
|
||||
#define Wb1 \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 6], H[ 6] ), \
|
||||
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
|
||||
_mm256_xor_si256( M[11], H[11] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[14], H[14] ), \
|
||||
_mm256_xor_si256( M[15], H[15] ) ) )
|
||||
_mm256_add_epi64( _mm256_sub_epi64( mh[ 6], mh[ 8] ), mh[11] ), \
|
||||
_mm256_sub_epi64( mh[14], mh[15] ) )
|
||||
|
||||
#define Wb2 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[ 0], H[ 0] ), \
|
||||
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
|
||||
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[12], H[12] ), \
|
||||
_mm256_xor_si256( M[15], H[15] ) ) )
|
||||
_mm256_add_epi64( _mm256_add_epi64( mh[ 0], mh[ 7] ), mh[ 9] ), \
|
||||
_mm256_sub_epi64( mh[12], mh[15] ) )
|
||||
|
||||
#define Wb3 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 0], H[ 0] ), \
|
||||
_mm256_xor_si256( M[ 1], H[ 1] ) ), \
|
||||
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[10], H[10] ), \
|
||||
_mm256_xor_si256( M[13], H[13] ) ) )
|
||||
_mm256_add_epi64( _mm256_sub_epi64( mh[ 0], mh[ 1] ), mh[ 8] ), \
|
||||
_mm256_sub_epi64( mh[10], \
|
||||
mh[13] ) )
|
||||
|
||||
#define Wb4 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[ 1], H[ 1] ), \
|
||||
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
|
||||
_mm256_xor_si256( M[ 9], H[ 9] ) ), \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[11], H[11] ), \
|
||||
_mm256_xor_si256( M[14], H[14] ) ) )
|
||||
_mm256_add_epi64( _mm256_add_epi64( mh[ 1], mh[ 2] ), mh[ 9] ), \
|
||||
_mm256_add_epi64( mh[11], mh[14] ) )
|
||||
|
||||
#define Wb5 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 3], H[ 3] ), \
|
||||
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
|
||||
_mm256_xor_si256( M[10], H[10] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[12], H[12] ), \
|
||||
_mm256_xor_si256( M[15], H[15] ) ) )
|
||||
_mm256_add_epi64( _mm256_sub_epi64( mh[ 3], mh[ 2] ), mh[10] ), \
|
||||
_mm256_sub_epi64( mh[12], mh[15] ) )
|
||||
|
||||
#define Wb6 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 4], H[ 4] ), \
|
||||
_mm256_xor_si256( M[ 0], H[ 0] ) ), \
|
||||
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[11], H[11] ), \
|
||||
_mm256_xor_si256( M[13], H[13] ) ) )
|
||||
_mm256_sub_epi64( _mm256_sub_epi64( mh[ 4], mh[ 0] ), mh[ 3] ), \
|
||||
_mm256_sub_epi64( mh[11], mh[13] ) )
|
||||
|
||||
#define Wb7 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 1], H[ 1] ), \
|
||||
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
|
||||
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[12], H[12] ), \
|
||||
_mm256_xor_si256( M[14], H[14] ) ) )
|
||||
_mm256_sub_epi64( _mm256_sub_epi64( mh[ 1], mh[ 4] ), mh[ 5] ), \
|
||||
_mm256_add_epi64( mh[12], mh[14] ) )
|
||||
|
||||
#define Wb8 \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 2], H[ 2] ), \
|
||||
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
|
||||
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[13], H[13] ), \
|
||||
_mm256_xor_si256( M[15], H[15] ) ) )
|
||||
_mm256_sub_epi64( _mm256_sub_epi64( mh[ 2], mh[ 5] ), mh[ 6] ), \
|
||||
_mm256_sub_epi64( mh[13], mh[15] ) )
|
||||
|
||||
#define Wb9 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 0], H[ 0] ), \
|
||||
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
|
||||
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 7], H[ 7] ), \
|
||||
_mm256_xor_si256( M[14], H[14] ) ) )
|
||||
_mm256_add_epi64( _mm256_sub_epi64( mh[ 0], mh[ 3] ), mh[ 6] ), \
|
||||
_mm256_sub_epi64( mh[ 7], mh[14] ) )
|
||||
|
||||
#define Wb10 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 8], H[ 8] ), \
|
||||
_mm256_xor_si256( M[ 1], H[ 1] ) ), \
|
||||
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 7], H[ 7] ), \
|
||||
_mm256_xor_si256( M[15], H[15] ) ) )
|
||||
_mm256_sub_epi64( _mm256_sub_epi64( mh[ 8], mh[ 1] ), mh[ 4] ), \
|
||||
_mm256_sub_epi64( mh[ 7], mh[15] ) )
|
||||
|
||||
#define Wb11 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 8], H[ 8] ), \
|
||||
_mm256_xor_si256( M[ 0], H[ 0] ) ), \
|
||||
_mm256_xor_si256( M[ 2], H[ 2] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 5], H[ 5] ), \
|
||||
_mm256_xor_si256( M[ 9], H[ 9] ) ) )
|
||||
_mm256_sub_epi64( _mm256_sub_epi64( mh[ 8], mh[ 0] ), mh[ 2] ), \
|
||||
_mm256_sub_epi64( mh[ 5], mh[ 9] ) )
|
||||
|
||||
#define Wb12 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[ 1], H[ 1] ), \
|
||||
_mm256_xor_si256( M[ 3], H[ 3] ) ), \
|
||||
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 9], H[ 9] ), \
|
||||
_mm256_xor_si256( M[10], H[10] ) ) )
|
||||
_mm256_sub_epi64( _mm256_add_epi64( mh[ 1], mh[ 3] ), mh[ 6] ), \
|
||||
_mm256_sub_epi64( mh[ 9], mh[10] ) )
|
||||
|
||||
#define Wb13 \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[ 2], H[ 2] ), \
|
||||
_mm256_xor_si256( M[ 4], H[ 4] ) ), \
|
||||
_mm256_xor_si256( M[ 7], H[ 7] ) ), \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[10], H[10] ), \
|
||||
_mm256_xor_si256( M[11], H[11] ) ) )
|
||||
_mm256_add_epi64( _mm256_add_epi64( mh[ 2], mh[ 4] ), mh[ 7] ), \
|
||||
_mm256_add_epi64( mh[10], mh[11] ) )
|
||||
|
||||
#define Wb14 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 3], H[ 3] ), \
|
||||
_mm256_xor_si256( M[ 5], H[ 5] ) ), \
|
||||
_mm256_xor_si256( M[ 8], H[ 8] ) ), \
|
||||
_mm256_add_epi64( _mm256_xor_si256( M[11], H[11] ), \
|
||||
_mm256_xor_si256( M[12], H[12] ) ) )
|
||||
_mm256_add_epi64( _mm256_sub_epi64( mh[ 3], mh[ 5] ), mh[ 8] ), \
|
||||
_mm256_add_epi64( mh[11], mh[12] ) )
|
||||
|
||||
#define Wb15 \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[12], H[12] ), \
|
||||
_mm256_xor_si256( M[ 4], H[4] ) ), \
|
||||
_mm256_xor_si256( M[ 6], H[ 6] ) ), \
|
||||
_mm256_sub_epi64( _mm256_xor_si256( M[ 9], H[ 9] ), \
|
||||
_mm256_xor_si256( M[13], H[13] ) ) )
|
||||
_mm256_sub_epi64( _mm256_sub_epi64( mh[12], mh[ 4] ), mh[ 6] ), \
|
||||
_mm256_sub_epi64( mh[ 9], mh[13] ) )
|
||||
|
||||
|
||||
void compress_big( const __m256i *M, const __m256i H[16], __m256i dH[16] )
|
||||
{
|
||||
__m256i qt[32], xl, xh;
|
||||
__m256i mh[16];
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < 16; i++ )
|
||||
mh[i] = _mm256_xor_si256( M[i], H[i] );
|
||||
|
||||
qt[ 0] = _mm256_add_epi64( sb0( Wb0 ), H[ 1] );
|
||||
qt[ 1] = _mm256_add_epi64( sb1( Wb1 ), H[ 2] );
|
||||
@@ -799,22 +730,60 @@ void compress_big( const __m256i *M, const __m256i H[16], __m256i dH[16] )
|
||||
qt[13] = _mm256_add_epi64( sb3( Wb13), H[14] );
|
||||
qt[14] = _mm256_add_epi64( sb4( Wb14), H[15] );
|
||||
qt[15] = _mm256_add_epi64( sb0( Wb15), H[ 0] );
|
||||
qt[16] = expand1b( qt, M, H, 16 );
|
||||
qt[17] = expand1b( qt, M, H, 17 );
|
||||
qt[18] = expand2b( qt, M, H, 18 );
|
||||
qt[19] = expand2b( qt, M, H, 19 );
|
||||
qt[20] = expand2b( qt, M, H, 20 );
|
||||
qt[21] = expand2b( qt, M, H, 21 );
|
||||
qt[22] = expand2b( qt, M, H, 22 );
|
||||
qt[23] = expand2b( qt, M, H, 23 );
|
||||
qt[24] = expand2b( qt, M, H, 24 );
|
||||
qt[25] = expand2b( qt, M, H, 25 );
|
||||
qt[26] = expand2b( qt, M, H, 26 );
|
||||
qt[27] = expand2b( qt, M, H, 27 );
|
||||
qt[28] = expand2b( qt, M, H, 28 );
|
||||
qt[29] = expand2b( qt, M, H, 29 );
|
||||
qt[30] = expand2b( qt, M, H, 30 );
|
||||
qt[31] = expand2b( qt, M, H, 31 );
|
||||
|
||||
__m256i mj[16];
|
||||
for ( i = 0; i < 16; i++ )
|
||||
mj[i] = rol_off_64( M, i );
|
||||
|
||||
qt[16] = add_elt_b( mj[ 0], mj[ 3], mj[10], H[ 7],
|
||||
(const __m256i)_mm256_set1_epi64x( 16 * 0x0555555555555555ULL ) );
|
||||
qt[17] = add_elt_b( mj[ 1], mj[ 4], mj[11], H[ 8],
|
||||
(const __m256i)_mm256_set1_epi64x( 17 * 0x0555555555555555ULL ) );
|
||||
qt[18] = add_elt_b( mj[ 2], mj[ 5], mj[12], H[ 9],
|
||||
(const __m256i)_mm256_set1_epi64x( 18 * 0x0555555555555555ULL ) );
|
||||
qt[19] = add_elt_b( mj[ 3], mj[ 6], mj[13], H[10],
|
||||
(const __m256i)_mm256_set1_epi64x( 19 * 0x0555555555555555ULL ) );
|
||||
qt[20] = add_elt_b( mj[ 4], mj[ 7], mj[14], H[11],
|
||||
(const __m256i)_mm256_set1_epi64x( 20 * 0x0555555555555555ULL ) );
|
||||
qt[21] = add_elt_b( mj[ 5], mj[ 8], mj[15], H[12],
|
||||
(const __m256i)_mm256_set1_epi64x( 21 * 0x0555555555555555ULL ) );
|
||||
qt[22] = add_elt_b( mj[ 6], mj[ 9], mj[ 0], H[13],
|
||||
(const __m256i)_mm256_set1_epi64x( 22 * 0x0555555555555555ULL ) );
|
||||
qt[23] = add_elt_b( mj[ 7], mj[10], mj[ 1], H[14],
|
||||
(const __m256i)_mm256_set1_epi64x( 23 * 0x0555555555555555ULL ) );
|
||||
qt[24] = add_elt_b( mj[ 8], mj[11], mj[ 2], H[15],
|
||||
(const __m256i)_mm256_set1_epi64x( 24 * 0x0555555555555555ULL ) );
|
||||
qt[25] = add_elt_b( mj[ 9], mj[12], mj[ 3], H[ 0],
|
||||
(const __m256i)_mm256_set1_epi64x( 25 * 0x0555555555555555ULL ) );
|
||||
qt[26] = add_elt_b( mj[10], mj[13], mj[ 4], H[ 1],
|
||||
(const __m256i)_mm256_set1_epi64x( 26 * 0x0555555555555555ULL ) );
|
||||
qt[27] = add_elt_b( mj[11], mj[14], mj[ 5], H[ 2],
|
||||
(const __m256i)_mm256_set1_epi64x( 27 * 0x0555555555555555ULL ) );
|
||||
qt[28] = add_elt_b( mj[12], mj[15], mj[ 6], H[ 3],
|
||||
(const __m256i)_mm256_set1_epi64x( 28 * 0x0555555555555555ULL ) );
|
||||
qt[29] = add_elt_b( mj[13], mj[ 0], mj[ 7], H[ 4],
|
||||
(const __m256i)_mm256_set1_epi64x( 29 * 0x0555555555555555ULL ) );
|
||||
qt[30] = add_elt_b( mj[14], mj[ 1], mj[ 8], H[ 5],
|
||||
(const __m256i)_mm256_set1_epi64x( 30 * 0x0555555555555555ULL ) );
|
||||
qt[31] = add_elt_b( mj[15], mj[ 2], mj[ 9], H[ 6],
|
||||
(const __m256i)_mm256_set1_epi64x( 31 * 0x0555555555555555ULL ) );
|
||||
|
||||
qt[16] = _mm256_add_epi64( qt[16], expand1_b( qt, 16 ) );
|
||||
qt[17] = _mm256_add_epi64( qt[17], expand1_b( qt, 17 ) );
|
||||
qt[18] = _mm256_add_epi64( qt[18], expand2_b( qt, 18 ) );
|
||||
qt[19] = _mm256_add_epi64( qt[19], expand2_b( qt, 19 ) );
|
||||
qt[20] = _mm256_add_epi64( qt[20], expand2_b( qt, 20 ) );
|
||||
qt[21] = _mm256_add_epi64( qt[21], expand2_b( qt, 21 ) );
|
||||
qt[22] = _mm256_add_epi64( qt[22], expand2_b( qt, 22 ) );
|
||||
qt[23] = _mm256_add_epi64( qt[23], expand2_b( qt, 23 ) );
|
||||
qt[24] = _mm256_add_epi64( qt[24], expand2_b( qt, 24 ) );
|
||||
qt[25] = _mm256_add_epi64( qt[25], expand2_b( qt, 25 ) );
|
||||
qt[26] = _mm256_add_epi64( qt[26], expand2_b( qt, 26 ) );
|
||||
qt[27] = _mm256_add_epi64( qt[27], expand2_b( qt, 27 ) );
|
||||
qt[28] = _mm256_add_epi64( qt[28], expand2_b( qt, 28 ) );
|
||||
qt[29] = _mm256_add_epi64( qt[29], expand2_b( qt, 29 ) );
|
||||
qt[30] = _mm256_add_epi64( qt[30], expand2_b( qt, 30 ) );
|
||||
qt[31] = _mm256_add_epi64( qt[31], expand2_b( qt, 31 ) );
|
||||
|
||||
xl = _mm256_xor_si256(
|
||||
mm256_xor4( qt[16], qt[17], qt[18], qt[19] ),
|
||||
@@ -823,7 +792,6 @@ void compress_big( const __m256i *M, const __m256i H[16], __m256i dH[16] )
|
||||
mm256_xor4( qt[24], qt[25], qt[26], qt[27] ),
|
||||
mm256_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
|
||||
|
||||
|
||||
#define DH1L( m, sl, sr, a, b, c ) \
|
||||
_mm256_add_epi64( \
|
||||
_mm256_xor_si256( M[m], \
|
||||
@@ -1066,21 +1034,15 @@ bmw512_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
|
||||
#define r8b6(x) mm512_rol_64( x, 43 )
|
||||
#define r8b7(x) mm512_rol_64( x, 53 )
|
||||
|
||||
#define rol8w_off_64( M, j, off ) \
|
||||
mm512_rol_64( M[ ( (j) + (off) ) & 0xF ] , \
|
||||
( ( (j) + (off) ) & 0xF ) + 1 )
|
||||
#define rol8w_off_64( M, j ) \
|
||||
mm512_rol_64( M[ (j) & 0xF ], ( (j) & 0xF ) + 1 )
|
||||
|
||||
#define add_elt_b8( M, H, j ) \
|
||||
_mm512_xor_si512( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( _mm512_add_epi64( rol8w_off_64( M, j, 0 ), \
|
||||
rol8w_off_64( M, j, 3 ) ), \
|
||||
rol8w_off_64( M, j, 10 ) ), \
|
||||
_mm512_set1_epi64( ( (j) + 16 ) * 0x0555555555555555ULL ) ), \
|
||||
H[ ( (j)+7 ) & 0xF ] )
|
||||
#define add_elt_b8( mj0, mj3, mj10, h, K ) \
|
||||
_mm512_xor_si512( h, _mm512_add_epi64( K, \
|
||||
_mm512_sub_epi64( _mm512_add_epi64( mj0, mj3 ), mj10 ) ) )
|
||||
|
||||
#define expand1b8( qt, M, H, i ) \
|
||||
_mm512_add_epi64( mm512_add4_64( \
|
||||
#define expand1_b8( qt, i ) \
|
||||
mm512_add4_64( \
|
||||
mm512_add4_64( s8b1( qt[ (i)-16 ] ), s8b2( qt[ (i)-15 ] ), \
|
||||
s8b3( qt[ (i)-14 ] ), s8b0( qt[ (i)-13 ] )), \
|
||||
mm512_add4_64( s8b1( qt[ (i)-12 ] ), s8b2( qt[ (i)-11 ] ), \
|
||||
@@ -1088,11 +1050,10 @@ bmw512_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
|
||||
mm512_add4_64( s8b1( qt[ (i)- 8 ] ), s8b2( qt[ (i)- 7 ] ), \
|
||||
s8b3( qt[ (i)- 6 ] ), s8b0( qt[ (i)- 5 ] )), \
|
||||
mm512_add4_64( s8b1( qt[ (i)- 4 ] ), s8b2( qt[ (i)- 3 ] ), \
|
||||
s8b3( qt[ (i)- 2 ] ), s8b0( qt[ (i)- 1 ] ) ) ), \
|
||||
add_elt_b8( M, H, (i)-16 ) )
|
||||
s8b3( qt[ (i)- 2 ] ), s8b0( qt[ (i)- 1 ] ) ) )
|
||||
|
||||
#define expand2b8( qt, M, H, i) \
|
||||
_mm512_add_epi64( mm512_add4_64( \
|
||||
#define expand2_b8( qt, i) \
|
||||
mm512_add4_64( \
|
||||
mm512_add4_64( qt[ (i)-16 ], r8b1( qt[ (i)-15 ] ), \
|
||||
qt[ (i)-14 ], r8b2( qt[ (i)-13 ] ) ), \
|
||||
mm512_add4_64( qt[ (i)-12 ], r8b3( qt[ (i)-11 ] ), \
|
||||
@@ -1100,157 +1061,97 @@ bmw512_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
|
||||
mm512_add4_64( qt[ (i)- 8 ], r8b5( qt[ (i)- 7 ] ), \
|
||||
qt[ (i)- 6 ], r8b6( qt[ (i)- 5 ] ) ), \
|
||||
mm512_add4_64( qt[ (i)- 4 ], r8b7( qt[ (i)- 3 ] ), \
|
||||
s8b4( qt[ (i)- 2 ] ), s8b5( qt[ (i)- 1 ] ) ) ), \
|
||||
add_elt_b8( M, H, (i)-16 ) )
|
||||
s8b4( qt[ (i)- 2 ] ), s8b5( qt[ (i)- 1 ] ) ) )
|
||||
|
||||
#define W8b0 \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 5], H[ 5] ), \
|
||||
_mm512_xor_si512( M[ 7], H[ 7] ) ), \
|
||||
_mm512_xor_si512( M[10], H[10] ) ), \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[13], H[13] ), \
|
||||
_mm512_xor_si512( M[14], H[14] ) ) )
|
||||
_mm512_add_epi64( _mm512_sub_epi64( mh[ 5], mh[ 7] ), mh[10] ), \
|
||||
_mm512_add_epi64( mh[13], mh[14] ) )
|
||||
|
||||
#define W8b1 \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 6], H[ 6] ), \
|
||||
_mm512_xor_si512( M[ 8], H[ 8] ) ), \
|
||||
_mm512_xor_si512( M[11], H[11] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[14], H[14] ), \
|
||||
_mm512_xor_si512( M[15], H[15] ) ) )
|
||||
_mm512_add_epi64( _mm512_sub_epi64( mh[ 6], mh[ 8] ), mh[11] ), \
|
||||
_mm512_sub_epi64( mh[14], mh[15] ) )
|
||||
|
||||
#define W8b2 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[ 0], H[ 0] ), \
|
||||
_mm512_xor_si512( M[ 7], H[ 7] ) ), \
|
||||
_mm512_xor_si512( M[ 9], H[ 9] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[12], H[12] ), \
|
||||
_mm512_xor_si512( M[15], H[15] ) ) )
|
||||
_mm512_add_epi64( _mm512_add_epi64( mh[ 0], mh[ 7] ), mh[ 9] ), \
|
||||
_mm512_sub_epi64( mh[12], mh[15] ) )
|
||||
|
||||
#define W8b3 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 0], H[ 0] ), \
|
||||
_mm512_xor_si512( M[ 1], H[ 1] ) ), \
|
||||
_mm512_xor_si512( M[ 8], H[ 8] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[10], H[10] ), \
|
||||
_mm512_xor_si512( M[13], H[13] ) ) )
|
||||
_mm512_add_epi64( _mm512_sub_epi64( mh[ 0], mh[ 1] ), mh[ 8] ), \
|
||||
_mm512_sub_epi64( mh[10], mh[13] ) )
|
||||
|
||||
#define W8b4 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[ 1], H[ 1] ), \
|
||||
_mm512_xor_si512( M[ 2], H[ 2] ) ), \
|
||||
_mm512_xor_si512( M[ 9], H[ 9] ) ), \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[11], H[11] ), \
|
||||
_mm512_xor_si512( M[14], H[14] ) ) )
|
||||
_mm512_add_epi64( _mm512_add_epi64( mh[ 1], mh[ 2] ), mh[ 9] ), \
|
||||
_mm512_add_epi64( mh[11], mh[14] ) )
|
||||
|
||||
#define W8b5 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 3], H[ 3] ), \
|
||||
_mm512_xor_si512( M[ 2], H[ 2] ) ), \
|
||||
_mm512_xor_si512( M[10], H[10] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[12], H[12] ), \
|
||||
_mm512_xor_si512( M[15], H[15] ) ) )
|
||||
_mm512_add_epi64( _mm512_sub_epi64( mh[ 3], mh[ 2] ), mh[10] ), \
|
||||
_mm512_sub_epi64( mh[12], mh[15] ) )
|
||||
|
||||
#define W8b6 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 4], H[ 4] ), \
|
||||
_mm512_xor_si512( M[ 0], H[ 0] ) ), \
|
||||
_mm512_xor_si512( M[ 3], H[ 3] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[11], H[11] ), \
|
||||
_mm512_xor_si512( M[13], H[13] ) ) )
|
||||
_mm512_sub_epi64( _mm512_sub_epi64( mh[ 4], mh[ 0] ), mh[ 3] ), \
|
||||
_mm512_sub_epi64( mh[11], mh[13] ) )
|
||||
|
||||
#define W8b7 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 1], H[ 1] ), \
|
||||
_mm512_xor_si512( M[ 4], H[ 4] ) ), \
|
||||
_mm512_xor_si512( M[ 5], H[ 5] ) ), \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[12], H[12] ), \
|
||||
_mm512_xor_si512( M[14], H[14] ) ) )
|
||||
_mm512_sub_epi64( _mm512_sub_epi64( mh[ 1], mh[ 4] ), mh[ 5] ), \
|
||||
_mm512_add_epi64( mh[12], mh[14] ) )
|
||||
|
||||
#define W8b8 \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 2], H[ 2] ), \
|
||||
_mm512_xor_si512( M[ 5], H[ 5] ) ), \
|
||||
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[13], H[13] ), \
|
||||
_mm512_xor_si512( M[15], H[15] ) ) )
|
||||
_mm512_sub_epi64( _mm512_sub_epi64( mh[ 2], mh[ 5] ), mh[ 6] ), \
|
||||
_mm512_sub_epi64( mh[13], mh[15] ) )
|
||||
|
||||
#define W8b9 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 0], H[ 0] ), \
|
||||
_mm512_xor_si512( M[ 3], H[ 3] ) ), \
|
||||
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 7], H[ 7] ), \
|
||||
_mm512_xor_si512( M[14], H[14] ) ) )
|
||||
_mm512_add_epi64( _mm512_sub_epi64( mh[ 0], mh[ 3] ), mh[ 6] ), \
|
||||
_mm512_sub_epi64( mh[ 7], mh[14] ) )
|
||||
|
||||
#define W8b10 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 8], H[ 8] ), \
|
||||
_mm512_xor_si512( M[ 1], H[ 1] ) ), \
|
||||
_mm512_xor_si512( M[ 4], H[ 4] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 7], H[ 7] ), \
|
||||
_mm512_xor_si512( M[15], H[15] ) ) )
|
||||
_mm512_sub_epi64( _mm512_sub_epi64( mh[ 8], mh[ 1] ), mh[ 4] ), \
|
||||
_mm512_sub_epi64( mh[ 7], mh[15] ) )
|
||||
|
||||
#define W8b11 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 8], H[ 8] ), \
|
||||
_mm512_xor_si512( M[ 0], H[ 0] ) ), \
|
||||
_mm512_xor_si512( M[ 2], H[ 2] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 5], H[ 5] ), \
|
||||
_mm512_xor_si512( M[ 9], H[ 9] ) ) )
|
||||
_mm512_sub_epi64( _mm512_sub_epi64( mh[ 8], mh[ 0] ), mh[ 2] ), \
|
||||
_mm512_sub_epi64( mh[ 5], mh[ 9] ) )
|
||||
|
||||
#define W8b12 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[ 1], H[ 1] ), \
|
||||
_mm512_xor_si512( M[ 3], H[ 3] ) ), \
|
||||
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 9], H[ 9] ), \
|
||||
_mm512_xor_si512( M[10], H[10] ) ) )
|
||||
_mm512_sub_epi64( _mm512_add_epi64( mh[ 1], mh[ 3] ), mh[ 6] ), \
|
||||
_mm512_sub_epi64( mh[ 9], mh[10] ) )
|
||||
|
||||
#define W8b13 \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[ 2], H[ 2] ), \
|
||||
_mm512_xor_si512( M[ 4], H[ 4] ) ), \
|
||||
_mm512_xor_si512( M[ 7], H[ 7] ) ), \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[10], H[10] ), \
|
||||
_mm512_xor_si512( M[11], H[11] ) ) )
|
||||
_mm512_add_epi64( _mm512_add_epi64( mh[ 2], mh[ 4] ), mh[ 7] ), \
|
||||
_mm512_add_epi64( mh[10], mh[11] ) )
|
||||
|
||||
#define W8b14 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 3], H[ 3] ), \
|
||||
_mm512_xor_si512( M[ 5], H[ 5] ) ), \
|
||||
_mm512_xor_si512( M[ 8], H[ 8] ) ), \
|
||||
_mm512_add_epi64( _mm512_xor_si512( M[11], H[11] ), \
|
||||
_mm512_xor_si512( M[12], H[12] ) ) )
|
||||
_mm512_add_epi64( _mm512_sub_epi64( mh[ 3], mh[ 5] ), mh[ 8] ), \
|
||||
_mm512_add_epi64( mh[11], mh[12] ) )
|
||||
|
||||
#define W8b15 \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[12], H[12] ), \
|
||||
_mm512_xor_si512( M[ 4], H[4] ) ), \
|
||||
_mm512_xor_si512( M[ 6], H[ 6] ) ), \
|
||||
_mm512_sub_epi64( _mm512_xor_si512( M[ 9], H[ 9] ), \
|
||||
_mm512_xor_si512( M[13], H[13] ) ) )
|
||||
_mm512_sub_epi64( _mm512_sub_epi64( mh[12], mh[ 4] ), mh[ 6] ), \
|
||||
_mm512_sub_epi64( mh[ 9], mh[13] ) )
|
||||
|
||||
void compress_big_8way( const __m512i *M, const __m512i H[16],
|
||||
__m512i dH[16] )
|
||||
{
|
||||
__m512i qt[32], xl, xh;
|
||||
__m512i mh[16];
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < 16; i++ )
|
||||
mh[i] = _mm512_xor_si512( M[i], H[i] );
|
||||
|
||||
qt[ 0] = _mm512_add_epi64( s8b0( W8b0 ), H[ 1] );
|
||||
qt[ 1] = _mm512_add_epi64( s8b1( W8b1 ), H[ 2] );
|
||||
@@ -1268,57 +1169,90 @@ void compress_big_8way( const __m512i *M, const __m512i H[16],
|
||||
qt[13] = _mm512_add_epi64( s8b3( W8b13), H[14] );
|
||||
qt[14] = _mm512_add_epi64( s8b4( W8b14), H[15] );
|
||||
qt[15] = _mm512_add_epi64( s8b0( W8b15), H[ 0] );
|
||||
qt[16] = expand1b8( qt, M, H, 16 );
|
||||
qt[17] = expand1b8( qt, M, H, 17 );
|
||||
qt[18] = expand2b8( qt, M, H, 18 );
|
||||
qt[19] = expand2b8( qt, M, H, 19 );
|
||||
qt[20] = expand2b8( qt, M, H, 20 );
|
||||
qt[21] = expand2b8( qt, M, H, 21 );
|
||||
qt[22] = expand2b8( qt, M, H, 22 );
|
||||
qt[23] = expand2b8( qt, M, H, 23 );
|
||||
qt[24] = expand2b8( qt, M, H, 24 );
|
||||
qt[25] = expand2b8( qt, M, H, 25 );
|
||||
qt[26] = expand2b8( qt, M, H, 26 );
|
||||
qt[27] = expand2b8( qt, M, H, 27 );
|
||||
qt[28] = expand2b8( qt, M, H, 28 );
|
||||
qt[29] = expand2b8( qt, M, H, 29 );
|
||||
qt[30] = expand2b8( qt, M, H, 30 );
|
||||
qt[31] = expand2b8( qt, M, H, 31 );
|
||||
|
||||
xl = _mm512_xor_si512(
|
||||
mm512_xor4( qt[16], qt[17], qt[18], qt[19] ),
|
||||
mm512_xor4( qt[20], qt[21], qt[22], qt[23] ) );
|
||||
xh = _mm512_xor_si512( xl, _mm512_xor_si512(
|
||||
mm512_xor4( qt[24], qt[25], qt[26], qt[27] ),
|
||||
mm512_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
|
||||
__m512i mj[16];
|
||||
for ( i = 0; i < 16; i++ )
|
||||
mj[i] = rol8w_off_64( M, i );
|
||||
|
||||
qt[16] = add_elt_b8( mj[ 0], mj[ 3], mj[10], H[ 7],
|
||||
(const __m512i)_mm512_set1_epi64( 16 * 0x0555555555555555ULL ) );
|
||||
qt[17] = add_elt_b8( mj[ 1], mj[ 4], mj[11], H[ 8],
|
||||
(const __m512i)_mm512_set1_epi64( 17 * 0x0555555555555555ULL ) );
|
||||
qt[18] = add_elt_b8( mj[ 2], mj[ 5], mj[12], H[ 9],
|
||||
(const __m512i)_mm512_set1_epi64( 18 * 0x0555555555555555ULL ) );
|
||||
qt[19] = add_elt_b8( mj[ 3], mj[ 6], mj[13], H[10],
|
||||
(const __m512i)_mm512_set1_epi64( 19 * 0x0555555555555555ULL ) );
|
||||
qt[20] = add_elt_b8( mj[ 4], mj[ 7], mj[14], H[11],
|
||||
(const __m512i)_mm512_set1_epi64( 20 * 0x0555555555555555ULL ) );
|
||||
qt[21] = add_elt_b8( mj[ 5], mj[ 8], mj[15], H[12],
|
||||
(const __m512i)_mm512_set1_epi64( 21 * 0x0555555555555555ULL ) );
|
||||
qt[22] = add_elt_b8( mj[ 6], mj[ 9], mj[ 0], H[13],
|
||||
(const __m512i)_mm512_set1_epi64( 22 * 0x0555555555555555ULL ) );
|
||||
qt[23] = add_elt_b8( mj[ 7], mj[10], mj[ 1], H[14],
|
||||
(const __m512i)_mm512_set1_epi64( 23 * 0x0555555555555555ULL ) );
|
||||
qt[24] = add_elt_b8( mj[ 8], mj[11], mj[ 2], H[15],
|
||||
(const __m512i)_mm512_set1_epi64( 24 * 0x0555555555555555ULL ) );
|
||||
qt[25] = add_elt_b8( mj[ 9], mj[12], mj[ 3], H[ 0],
|
||||
(const __m512i)_mm512_set1_epi64( 25 * 0x0555555555555555ULL ) );
|
||||
qt[26] = add_elt_b8( mj[10], mj[13], mj[ 4], H[ 1],
|
||||
(const __m512i)_mm512_set1_epi64( 26 * 0x0555555555555555ULL ) );
|
||||
qt[27] = add_elt_b8( mj[11], mj[14], mj[ 5], H[ 2],
|
||||
(const __m512i)_mm512_set1_epi64( 27 * 0x0555555555555555ULL ) );
|
||||
qt[28] = add_elt_b8( mj[12], mj[15], mj[ 6], H[ 3],
|
||||
(const __m512i)_mm512_set1_epi64( 28 * 0x0555555555555555ULL ) );
|
||||
qt[29] = add_elt_b8( mj[13], mj[ 0], mj[ 7], H[ 4],
|
||||
(const __m512i)_mm512_set1_epi64( 29 * 0x0555555555555555ULL ) );
|
||||
qt[30] = add_elt_b8( mj[14], mj[ 1], mj[ 8], H[ 5],
|
||||
(const __m512i)_mm512_set1_epi64( 30 * 0x0555555555555555ULL ) );
|
||||
qt[31] = add_elt_b8( mj[15], mj[ 2], mj[ 9], H[ 6],
|
||||
(const __m512i)_mm512_set1_epi64( 31 * 0x0555555555555555ULL ) );
|
||||
|
||||
qt[16] = _mm512_add_epi64( qt[16], expand1_b8( qt, 16 ) );
|
||||
qt[17] = _mm512_add_epi64( qt[17], expand1_b8( qt, 17 ) );
|
||||
qt[18] = _mm512_add_epi64( qt[18], expand2_b8( qt, 18 ) );
|
||||
qt[19] = _mm512_add_epi64( qt[19], expand2_b8( qt, 19 ) );
|
||||
qt[20] = _mm512_add_epi64( qt[20], expand2_b8( qt, 20 ) );
|
||||
qt[21] = _mm512_add_epi64( qt[21], expand2_b8( qt, 21 ) );
|
||||
qt[22] = _mm512_add_epi64( qt[22], expand2_b8( qt, 22 ) );
|
||||
qt[23] = _mm512_add_epi64( qt[23], expand2_b8( qt, 23 ) );
|
||||
qt[24] = _mm512_add_epi64( qt[24], expand2_b8( qt, 24 ) );
|
||||
qt[25] = _mm512_add_epi64( qt[25], expand2_b8( qt, 25 ) );
|
||||
qt[26] = _mm512_add_epi64( qt[26], expand2_b8( qt, 26 ) );
|
||||
qt[27] = _mm512_add_epi64( qt[27], expand2_b8( qt, 27 ) );
|
||||
qt[28] = _mm512_add_epi64( qt[28], expand2_b8( qt, 28 ) );
|
||||
qt[29] = _mm512_add_epi64( qt[29], expand2_b8( qt, 29 ) );
|
||||
qt[30] = _mm512_add_epi64( qt[30], expand2_b8( qt, 30 ) );
|
||||
qt[31] = _mm512_add_epi64( qt[31], expand2_b8( qt, 31 ) );
|
||||
|
||||
xl = mm512_xor3( mm512_xor3( qt[16], qt[17], qt[18] ),
|
||||
mm512_xor3( qt[19], qt[20], qt[21] ),
|
||||
_mm512_xor_si512( qt[22], qt[23] ) );
|
||||
|
||||
xh = mm512_xor3( mm512_xor3( xl, qt[24], qt[25] ),
|
||||
mm512_xor3( qt[26], qt[27], qt[28] ),
|
||||
mm512_xor3( qt[29], qt[30], qt[31] ) );
|
||||
|
||||
#define DH1L( m, sl, sr, a, b, c ) \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_xor_si512( M[m], \
|
||||
_mm512_xor_si512( _mm512_slli_epi64( xh, sl ), \
|
||||
_mm512_srli_epi64( qt[a], sr ) ) ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
|
||||
_mm512_add_epi64( mm512_xor3( M[m], _mm512_slli_epi64( xh, sl ), \
|
||||
_mm512_srli_epi64( qt[a], sr ) ), \
|
||||
mm512_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH1R( m, sl, sr, a, b, c ) \
|
||||
_mm512_add_epi64( \
|
||||
_mm512_xor_si512( M[m], \
|
||||
_mm512_xor_si512( _mm512_srli_epi64( xh, sl ), \
|
||||
_mm512_slli_epi64( qt[a], sr ) ) ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
|
||||
_mm512_add_epi64( mm512_xor3( M[m], _mm512_srli_epi64( xh, sl ), \
|
||||
_mm512_slli_epi64( qt[a], sr ) ), \
|
||||
mm512_xor3( xl, qt[b], qt[c] ) )
|
||||
|
||||
#define DH2L( m, rl, sl, h, a, b, c ) \
|
||||
_mm512_add_epi64( _mm512_add_epi64( \
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
|
||||
_mm512_xor_si512( _mm512_slli_epi64( xl, sl ), \
|
||||
_mm512_xor_si512( qt[b], qt[c] ) ) );
|
||||
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
mm512_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm512_xor3( _mm512_slli_epi64( xl, sl ), qt[b], qt[c] ) )
|
||||
|
||||
#define DH2R( m, rl, sr, h, a, b, c ) \
|
||||
_mm512_add_epi64( _mm512_add_epi64( \
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
|
||||
_mm512_xor_si512( _mm512_srli_epi64( xl, sr ), \
|
||||
_mm512_xor_si512( qt[b], qt[c] ) ) );
|
||||
mm512_rol_64( dH[h], rl ), \
|
||||
mm512_xor3( xh, qt[a], M[m] ) ), \
|
||||
mm512_xor3( _mm512_srli_epi64( xl, sr ), qt[b], qt[c] ) )
|
||||
|
||||
|
||||
dH[ 0] = DH1L( 0, 5, 5, 16, 24, 0 );
|
||||
|
@@ -98,6 +98,138 @@ static void transform_4way( cube_4way_context *sp )
|
||||
_mm512_store_si512( (__m512i*)sp->h + 7, x7 );
|
||||
}
|
||||
|
||||
// 8 ways, 4 way parallel double buffered
|
||||
static void transform_4way_2buf( cube_4way_2buf_context *sp )
|
||||
{
|
||||
int r;
|
||||
const int rounds = sp->rounds;
|
||||
|
||||
__m512i x0, x1, x2, x3, x4, x5, x6, x7;
|
||||
__m512i y0, y1, y2, y3, y4, y5, y6, y7;
|
||||
__m512i tx0, tx1, ty0, ty1;
|
||||
|
||||
x0 = _mm512_load_si512( (__m512i*)sp->h0 );
|
||||
x1 = _mm512_load_si512( (__m512i*)sp->h0 + 1 );
|
||||
x2 = _mm512_load_si512( (__m512i*)sp->h0 + 2 );
|
||||
x3 = _mm512_load_si512( (__m512i*)sp->h0 + 3 );
|
||||
x4 = _mm512_load_si512( (__m512i*)sp->h0 + 4 );
|
||||
x5 = _mm512_load_si512( (__m512i*)sp->h0 + 5 );
|
||||
x6 = _mm512_load_si512( (__m512i*)sp->h0 + 6 );
|
||||
x7 = _mm512_load_si512( (__m512i*)sp->h0 + 7 );
|
||||
|
||||
y0 = _mm512_load_si512( (__m512i*)sp->h1 );
|
||||
y1 = _mm512_load_si512( (__m512i*)sp->h1 + 1 );
|
||||
y2 = _mm512_load_si512( (__m512i*)sp->h1 + 2 );
|
||||
y3 = _mm512_load_si512( (__m512i*)sp->h1 + 3 );
|
||||
y4 = _mm512_load_si512( (__m512i*)sp->h1 + 4 );
|
||||
y5 = _mm512_load_si512( (__m512i*)sp->h1 + 5 );
|
||||
y6 = _mm512_load_si512( (__m512i*)sp->h1 + 6 );
|
||||
y7 = _mm512_load_si512( (__m512i*)sp->h1 + 7 );
|
||||
|
||||
|
||||
for ( r = 0; r < rounds; ++r )
|
||||
{
|
||||
x4 = _mm512_add_epi32( x0, x4 );
|
||||
y4 = _mm512_add_epi32( y0, y4 );
|
||||
tx0 = x0;
|
||||
ty0 = y0;
|
||||
x5 = _mm512_add_epi32( x1, x5 );
|
||||
y5 = _mm512_add_epi32( y1, y5 );
|
||||
tx1 = x1;
|
||||
ty1 = y1;
|
||||
x0 = mm512_rol_32( x2, 7 );
|
||||
y0 = mm512_rol_32( y2, 7 );
|
||||
x6 = _mm512_add_epi32( x2, x6 );
|
||||
y6 = _mm512_add_epi32( y2, y6 );
|
||||
x1 = mm512_rol_32( x3, 7 );
|
||||
y1 = mm512_rol_32( y3, 7 );
|
||||
x7 = _mm512_add_epi32( x3, x7 );
|
||||
y7 = _mm512_add_epi32( y3, y7 );
|
||||
|
||||
|
||||
x2 = mm512_rol_32( tx0, 7 );
|
||||
y2 = mm512_rol_32( ty0, 7 );
|
||||
x0 = _mm512_xor_si512( x0, x4 );
|
||||
y0 = _mm512_xor_si512( y0, y4 );
|
||||
x4 = mm512_swap128_64( x4 );
|
||||
x3 = mm512_rol_32( tx1, 7 );
|
||||
y3 = mm512_rol_32( ty1, 7 );
|
||||
y4 = mm512_swap128_64( y4 );
|
||||
|
||||
x1 = _mm512_xor_si512( x1, x5 );
|
||||
y1 = _mm512_xor_si512( y1, y5 );
|
||||
x5 = mm512_swap128_64( x5 );
|
||||
x2 = _mm512_xor_si512( x2, x6 );
|
||||
y2 = _mm512_xor_si512( y2, y6 );
|
||||
y5 = mm512_swap128_64( y5 );
|
||||
x3 = _mm512_xor_si512( x3, x7 );
|
||||
y3 = _mm512_xor_si512( y3, y7 );
|
||||
|
||||
x6 = mm512_swap128_64( x6 );
|
||||
x4 = _mm512_add_epi32( x0, x4 );
|
||||
y4 = _mm512_add_epi32( y0, y4 );
|
||||
y6 = mm512_swap128_64( y6 );
|
||||
x5 = _mm512_add_epi32( x1, x5 );
|
||||
y5 = _mm512_add_epi32( y1, y5 );
|
||||
x7 = mm512_swap128_64( x7 );
|
||||
x6 = _mm512_add_epi32( x2, x6 );
|
||||
y6 = _mm512_add_epi32( y2, y6 );
|
||||
tx0 = x0;
|
||||
ty0 = y0;
|
||||
y7 = mm512_swap128_64( y7 );
|
||||
tx1 = x2;
|
||||
ty1 = y2;
|
||||
x0 = mm512_rol_32( x1, 11 );
|
||||
y0 = mm512_rol_32( y1, 11 );
|
||||
|
||||
x7 = _mm512_add_epi32( x3, x7 );
|
||||
y7 = _mm512_add_epi32( y3, y7 );
|
||||
|
||||
x1 = mm512_rol_32( tx0, 11 );
|
||||
y1 = mm512_rol_32( ty0, 11 );
|
||||
x0 = _mm512_xor_si512( x0, x4 );
|
||||
x4 = mm512_swap64_32( x4 );
|
||||
y0 = _mm512_xor_si512( y0, y4 );
|
||||
x2 = mm512_rol_32( x3, 11 );
|
||||
y4 = mm512_swap64_32( y4 );
|
||||
y2 = mm512_rol_32( y3, 11 );
|
||||
x1 = _mm512_xor_si512( x1, x5 );
|
||||
x5 = mm512_swap64_32( x5 );
|
||||
y1 = _mm512_xor_si512( y1, y5 );
|
||||
x3 = mm512_rol_32( tx1, 11 );
|
||||
y5 = mm512_swap64_32( y5 );
|
||||
y3 = mm512_rol_32( ty1, 11 );
|
||||
|
||||
x2 = _mm512_xor_si512( x2, x6 );
|
||||
x6 = mm512_swap64_32( x6 );
|
||||
y2 = _mm512_xor_si512( y2, y6 );
|
||||
y6 = mm512_swap64_32( y6 );
|
||||
x3 = _mm512_xor_si512( x3, x7 );
|
||||
x7 = mm512_swap64_32( x7 );
|
||||
y3 = _mm512_xor_si512( y3, y7 );
|
||||
|
||||
y7 = mm512_swap64_32( y7 );
|
||||
}
|
||||
|
||||
_mm512_store_si512( (__m512i*)sp->h0, x0 );
|
||||
_mm512_store_si512( (__m512i*)sp->h0 + 1, x1 );
|
||||
_mm512_store_si512( (__m512i*)sp->h0 + 2, x2 );
|
||||
_mm512_store_si512( (__m512i*)sp->h0 + 3, x3 );
|
||||
_mm512_store_si512( (__m512i*)sp->h0 + 4, x4 );
|
||||
_mm512_store_si512( (__m512i*)sp->h0 + 5, x5 );
|
||||
_mm512_store_si512( (__m512i*)sp->h0 + 6, x6 );
|
||||
_mm512_store_si512( (__m512i*)sp->h0 + 7, x7 );
|
||||
|
||||
_mm512_store_si512( (__m512i*)sp->h1, y0 );
|
||||
_mm512_store_si512( (__m512i*)sp->h1 + 1, y1 );
|
||||
_mm512_store_si512( (__m512i*)sp->h1 + 2, y2 );
|
||||
_mm512_store_si512( (__m512i*)sp->h1 + 3, y3 );
|
||||
_mm512_store_si512( (__m512i*)sp->h1 + 4, y4 );
|
||||
_mm512_store_si512( (__m512i*)sp->h1 + 5, y5 );
|
||||
_mm512_store_si512( (__m512i*)sp->h1 + 6, y6 );
|
||||
_mm512_store_si512( (__m512i*)sp->h1 + 7, y7 );
|
||||
}
|
||||
|
||||
int cube_4way_init( cube_4way_context *sp, int hashbitlen, int rounds,
|
||||
int blockbytes )
|
||||
{
|
||||
@@ -219,6 +351,67 @@ int cube_4way_full( cube_4way_context *sp, void *output, int hashbitlen,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cube_4way_2buf_full( cube_4way_2buf_context *sp,
|
||||
void *output0, void *output1, int hashbitlen,
|
||||
const void *data0, const void *data1, size_t size )
|
||||
{
|
||||
__m512i *h0 = (__m512i*)sp->h0;
|
||||
__m512i *h1 = (__m512i*)sp->h1;
|
||||
__m128i *iv = (__m128i*)( hashbitlen == 512 ? (__m128i*)IV512
|
||||
: (__m128i*)IV256 );
|
||||
sp->hashlen = hashbitlen/128;
|
||||
sp->blocksize = 32/16;
|
||||
sp->rounds = 16;
|
||||
sp->pos = 0;
|
||||
|
||||
h1[0] = h0[0] = m512_const1_128( iv[0] );
|
||||
h1[1] = h0[1] = m512_const1_128( iv[1] );
|
||||
h1[2] = h0[2] = m512_const1_128( iv[2] );
|
||||
h1[3] = h0[3] = m512_const1_128( iv[3] );
|
||||
h1[4] = h0[4] = m512_const1_128( iv[4] );
|
||||
h1[5] = h0[5] = m512_const1_128( iv[5] );
|
||||
h1[6] = h0[6] = m512_const1_128( iv[6] );
|
||||
h1[7] = h0[7] = m512_const1_128( iv[7] );
|
||||
|
||||
const int len = size >> 4;
|
||||
const __m512i *in0 = (__m512i*)data0;
|
||||
const __m512i *in1 = (__m512i*)data1;
|
||||
__m512i *hash0 = (__m512i*)output0;
|
||||
__m512i *hash1 = (__m512i*)output1;
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < len; i++ )
|
||||
{
|
||||
sp->h0[ sp->pos ] = _mm512_xor_si512( sp->h0[ sp->pos ], in0[i] );
|
||||
sp->h1[ sp->pos ] = _mm512_xor_si512( sp->h1[ sp->pos ], in1[i] );
|
||||
sp->pos++;
|
||||
if ( sp->pos == sp->blocksize )
|
||||
{
|
||||
transform_4way_2buf( sp );
|
||||
sp->pos = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// pos is zero for 64 byte data, 1 for 80 byte data.
|
||||
__m512i tmp = m512_const2_64( 0, 0x0000000000000080 );
|
||||
sp->h0[ sp->pos ] = _mm512_xor_si512( sp->h0[ sp->pos ], tmp );
|
||||
sp->h1[ sp->pos ] = _mm512_xor_si512( sp->h1[ sp->pos ], tmp );
|
||||
|
||||
transform_4way_2buf( sp );
|
||||
|
||||
tmp = m512_const2_64( 0x0000000100000000, 0 );
|
||||
sp->h0[7] = _mm512_xor_si512( sp->h0[7], tmp );
|
||||
sp->h1[7] = _mm512_xor_si512( sp->h1[7], tmp );
|
||||
|
||||
for ( i = 0; i < 10; ++i )
|
||||
transform_4way_2buf( sp );
|
||||
|
||||
memcpy( hash0, sp->h0, sp->hashlen<<6);
|
||||
memcpy( hash1, sp->h1, sp->hashlen<<6);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int cube_4way_update_close( cube_4way_context *sp, void *output,
|
||||
const void *data, size_t size )
|
||||
@@ -259,6 +452,21 @@ int cube_4way_update_close( cube_4way_context *sp, void *output,
|
||||
|
||||
// 2 way 128
|
||||
|
||||
// This isn't expected to be used with AVX512 so HW rotate intruction
|
||||
// is assumed not avaiable.
|
||||
// Use double buffering to optimize serial bit rotations. Full double
|
||||
// buffering isn't practical because it needs twice as many registers
|
||||
// with AVX2 having only half as many as AVX512.
|
||||
#define ROL2( out0, out1, in0, in1, c ) \
|
||||
{ \
|
||||
__m256i t0 = _mm256_slli_epi32( in0, c ); \
|
||||
__m256i t1 = _mm256_slli_epi32( in1, c ); \
|
||||
out0 = _mm256_srli_epi32( in0, 32-(c) ); \
|
||||
out1 = _mm256_srli_epi32( in1, 32-(c) ); \
|
||||
out0 = _mm256_or_si256( out0, t0 ); \
|
||||
out1 = _mm256_or_si256( out1, t1 ); \
|
||||
}
|
||||
|
||||
static void transform_2way( cube_2way_context *sp )
|
||||
{
|
||||
int r;
|
||||
@@ -283,35 +491,31 @@ static void transform_2way( cube_2way_context *sp )
|
||||
x7 = _mm256_add_epi32( x3, x7 );
|
||||
y0 = x0;
|
||||
y1 = x1;
|
||||
x0 = mm256_rol_32( x2, 7 );
|
||||
x1 = mm256_rol_32( x3, 7 );
|
||||
x2 = mm256_rol_32( y0, 7 );
|
||||
x3 = mm256_rol_32( y1, 7 );
|
||||
ROL2( x0, x1, x2, x3, 7 );
|
||||
ROL2( x2, x3, y0, y1, 7 );
|
||||
x0 = _mm256_xor_si256( x0, x4 );
|
||||
x1 = _mm256_xor_si256( x1, x5 );
|
||||
x2 = _mm256_xor_si256( x2, x6 );
|
||||
x3 = _mm256_xor_si256( x3, x7 );
|
||||
x4 = mm256_swap128_64( x4 );
|
||||
x5 = mm256_swap128_64( x5 );
|
||||
x6 = mm256_swap128_64( x6 );
|
||||
x7 = mm256_swap128_64( x7 );
|
||||
x4 = _mm256_add_epi32( x0, x4 );
|
||||
x5 = _mm256_add_epi32( x1, x5 );
|
||||
x6 = _mm256_add_epi32( x2, x6 );
|
||||
x7 = _mm256_add_epi32( x3, x7 );
|
||||
y0 = x0;
|
||||
y1 = x2;
|
||||
x0 = mm256_rol_32( x1, 11 );
|
||||
x1 = mm256_rol_32( y0, 11 );
|
||||
x2 = mm256_rol_32( x3, 11 );
|
||||
x3 = mm256_rol_32( y1, 11 );
|
||||
x0 = _mm256_xor_si256( x0, x4 );
|
||||
x1 = _mm256_xor_si256( x1, x5 );
|
||||
x2 = _mm256_xor_si256( x2, x6 );
|
||||
x5 = mm256_swap128_64( x5 );
|
||||
x3 = _mm256_xor_si256( x3, x7 );
|
||||
x4 = _mm256_add_epi32( x0, x4 );
|
||||
x6 = mm256_swap128_64( x6 );
|
||||
y0 = x0;
|
||||
x5 = _mm256_add_epi32( x1, x5 );
|
||||
x7 = mm256_swap128_64( x7 );
|
||||
x6 = _mm256_add_epi32( x2, x6 );
|
||||
y1 = x2;
|
||||
ROL2( x0, x1, x1, y0, 11 );
|
||||
x7 = _mm256_add_epi32( x3, x7 );
|
||||
ROL2( x2, x3, x3, y1, 11 );
|
||||
x0 = _mm256_xor_si256( x0, x4 );
|
||||
x4 = mm256_swap64_32( x4 );
|
||||
x1 = _mm256_xor_si256( x1, x5 );
|
||||
x5 = mm256_swap64_32( x5 );
|
||||
x2 = _mm256_xor_si256( x2, x6 );
|
||||
x6 = mm256_swap64_32( x6 );
|
||||
x3 = _mm256_xor_si256( x3, x7 );
|
||||
x7 = mm256_swap64_32( x7 );
|
||||
}
|
||||
|
||||
|
@@ -17,41 +17,41 @@ struct _cube_4way_context
|
||||
int pos;
|
||||
} __attribute__ ((aligned (128)));
|
||||
|
||||
struct _cube_4way_2buf_context
|
||||
{
|
||||
__m512i h0[8];
|
||||
__m512i h1[8];
|
||||
int hashlen;
|
||||
int rounds;
|
||||
int blocksize;
|
||||
int pos;
|
||||
} __attribute__ ((aligned (128)));
|
||||
|
||||
|
||||
typedef struct _cube_4way_context cube_4way_context;
|
||||
|
||||
typedef struct _cube_4way_2buf_context cube_4way_2buf_context;
|
||||
|
||||
int cube_4way_init( cube_4way_context* sp, int hashbitlen, int rounds,
|
||||
int blockbytes );
|
||||
int blockbytes );
|
||||
|
||||
int cube_4way_update( cube_4way_context *sp, const void *data, size_t size );
|
||||
|
||||
int cube_4way_close( cube_4way_context *sp, void *output );
|
||||
|
||||
int cube_4way_update_close( cube_4way_context *sp, void *output,
|
||||
const void *data, size_t size );
|
||||
|
||||
int cube_4way_full( cube_4way_context *sp, void *output, int hashbitlen,
|
||||
const void *data, size_t size );
|
||||
|
||||
int cube_4x256_full( cube_4way_context *sp, void *output, int hashbitlen,
|
||||
const void *data, size_t size );
|
||||
|
||||
#define cube512_4way_init( sp ) cube_4way_update( sp, 512 )
|
||||
#define cube512_4way_update cube_4way_update
|
||||
#define cube512_4way_update_close cube_4way_update
|
||||
#define cube512_4way_close cube_4way_update
|
||||
#define cube512_4way_full( sp, output, data, size ) \
|
||||
cube_4way_full( sp, output, 512, data, size )
|
||||
#define cube512_4x256_full( sp, output, data, size ) \
|
||||
cube_4x256_full( sp, output, 512, data, size )
|
||||
|
||||
#define cube256_4way_init( sp ) cube_4way_update( sp, 256 )
|
||||
#define cube256_4way_update cube_4way_update
|
||||
#define cube256_4way_update_close cube_4way_update
|
||||
#define cube256_4way_close cube_4way_update
|
||||
#define cube256_4way_full( sp, output, data, size ) \
|
||||
cube_4way_full( sp, output, 256, data, size )
|
||||
#define cube256_4x256_full( sp, output, data, size ) \
|
||||
cube_4x256_full( sp, output, 256, data, size )
|
||||
int cube_4way_2buf_full( cube_4way_2buf_context *sp,
|
||||
void *output0, void *output1, int hashbitlen,
|
||||
const void *data0, const void *data1, size_t size );
|
||||
|
||||
#endif
|
||||
|
||||
// 2x128, 2 way parallel SSE2
|
||||
// 2x128, 2 way parallel AVX2
|
||||
|
||||
struct _cube_2way_context
|
||||
{
|
||||
|
@@ -31,10 +31,14 @@ static void transform( cubehashParam *sp )
|
||||
for ( r = 0; r < rounds; ++r )
|
||||
{
|
||||
x1 = _mm512_add_epi32( x0, x1 );
|
||||
x0 = _mm512_xor_si512( mm512_rol_32( mm512_swap_256( x0 ), 7 ), x1 );
|
||||
x1 = _mm512_add_epi32( x0, mm512_swap128_64( x1 ) );
|
||||
x0 = _mm512_xor_si512( mm512_rol_32(
|
||||
mm512_swap256_128( x0 ), 11 ), x1 );
|
||||
x0 = mm512_swap_256( x0 );
|
||||
x0 = mm512_rol_32( x0, 7 );
|
||||
x0 = _mm512_xor_si512( x0, x1 );
|
||||
x1 = mm512_swap128_64( x1 );
|
||||
x1 = _mm512_add_epi32( x0, x1 );
|
||||
x0 = mm512_swap256_128( x0 );
|
||||
x0 = mm512_rol_32( x0, 11 );
|
||||
x0 = _mm512_xor_si512( x0, x1 );
|
||||
x1 = mm512_swap64_32( x1 );
|
||||
}
|
||||
|
||||
|
@@ -53,10 +53,24 @@ MYALIGN const unsigned int zero[] = {0x00000000, 0x00000000, 0x00000000, 0x000
|
||||
MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x25d9ab57, 0xfd5ba600, 0x2a8c71d7, 0x1eb845e3, 0xc96f9234};
|
||||
|
||||
|
||||
#define ECHO_SUBBYTES4(state, j) \
|
||||
state[0][j] = _mm_aesenc_si128(state[0][j], k1);\
|
||||
k1 = _mm_add_epi32(k1, M128(const1));\
|
||||
state[1][j] = _mm_aesenc_si128(state[1][j], k1);\
|
||||
k1 = _mm_add_epi32(k1, M128(const1));\
|
||||
state[2][j] = _mm_aesenc_si128(state[2][j], k1);\
|
||||
k1 = _mm_add_epi32(k1, M128(const1));\
|
||||
state[3][j] = _mm_aesenc_si128(state[3][j], k1);\
|
||||
k1 = _mm_add_epi32(k1, M128(const1));\
|
||||
state[0][j] = _mm_aesenc_si128(state[0][j], m128_zero ); \
|
||||
state[1][j] = _mm_aesenc_si128(state[1][j], m128_zero ); \
|
||||
state[2][j] = _mm_aesenc_si128(state[2][j], m128_zero ); \
|
||||
state[3][j] = _mm_aesenc_si128(state[3][j], m128_zero )
|
||||
|
||||
#define ECHO_SUBBYTES(state, i, j) \
|
||||
state[i][j] = _mm_aesenc_si128(state[i][j], k1);\
|
||||
state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero));\
|
||||
k1 = _mm_add_epi32(k1, M128(const1))
|
||||
k1 = _mm_add_epi32(k1, M128(const1));\
|
||||
state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero))
|
||||
|
||||
#define ECHO_MIXBYTES(state1, state2, j, t1, t2, s2) \
|
||||
s2 = _mm_add_epi8(state1[0][j], state1[0][j]);\
|
||||
@@ -73,7 +87,7 @@ MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x2
|
||||
t1 = _mm_and_si128(t1, M128(lsbmask));\
|
||||
t2 = _mm_shuffle_epi8(M128(mul2mask), t1);\
|
||||
s2 = _mm_xor_si128(s2, t2);\
|
||||
state2[0][j] = _mm_xor_si128(state2[0][j], _mm_xor_si128(s2, state1[1][(j + 1) & 3]));\
|
||||
state2[0][j] = mm128_xor3(state2[0][j], s2, state1[1][(j + 1) & 3] );\
|
||||
state2[1][j] = _mm_xor_si128(state2[1][j], s2);\
|
||||
state2[2][j] = _mm_xor_si128(state2[2][j], state1[1][(j + 1) & 3]);\
|
||||
state2[3][j] = _mm_xor_si128(state2[3][j], state1[1][(j + 1) & 3]);\
|
||||
@@ -83,7 +97,7 @@ MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x2
|
||||
t2 = _mm_shuffle_epi8(M128(mul2mask), t1);\
|
||||
s2 = _mm_xor_si128(s2, t2);\
|
||||
state2[0][j] = _mm_xor_si128(state2[0][j], state1[2][(j + 2) & 3]);\
|
||||
state2[1][j] = _mm_xor_si128(state2[1][j], _mm_xor_si128(s2, state1[2][(j + 2) & 3]));\
|
||||
state2[1][j] = mm128_xor3(state2[1][j], s2, state1[2][(j + 2) & 3] );\
|
||||
state2[2][j] = _mm_xor_si128(state2[2][j], s2);\
|
||||
state2[3][j] = _mm_xor_si128(state2[3][j], state1[2][(j + 2) & 3]);\
|
||||
s2 = _mm_add_epi8(state1[3][(j + 3) & 3], state1[3][(j + 3) & 3]);\
|
||||
@@ -93,10 +107,29 @@ MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x2
|
||||
s2 = _mm_xor_si128(s2, t2);\
|
||||
state2[0][j] = _mm_xor_si128(state2[0][j], state1[3][(j + 3) & 3]);\
|
||||
state2[1][j] = _mm_xor_si128(state2[1][j], state1[3][(j + 3) & 3]);\
|
||||
state2[2][j] = _mm_xor_si128(state2[2][j], _mm_xor_si128(s2, state1[3][(j + 3) & 3]));\
|
||||
state2[2][j] = mm128_xor3(state2[2][j], s2, state1[3][(j + 3) & 3] );\
|
||||
state2[3][j] = _mm_xor_si128(state2[3][j], s2)
|
||||
|
||||
|
||||
#define ECHO_ROUND_UNROLL2 \
|
||||
ECHO_SUBBYTES4(_state, 0);\
|
||||
ECHO_SUBBYTES4(_state, 1);\
|
||||
ECHO_SUBBYTES4(_state, 2);\
|
||||
ECHO_SUBBYTES4(_state, 3);\
|
||||
ECHO_MIXBYTES(_state, _state2, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 3, t1, t2, s2);\
|
||||
ECHO_SUBBYTES4(_state2, 0);\
|
||||
ECHO_SUBBYTES4(_state2, 1);\
|
||||
ECHO_SUBBYTES4(_state2, 2);\
|
||||
ECHO_SUBBYTES4(_state2, 3);\
|
||||
ECHO_MIXBYTES(_state2, _state, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
|
||||
|
||||
/*
|
||||
#define ECHO_ROUND_UNROLL2 \
|
||||
ECHO_SUBBYTES(_state, 0, 0);\
|
||||
ECHO_SUBBYTES(_state, 1, 0);\
|
||||
@@ -138,7 +171,7 @@ MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x2
|
||||
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
|
||||
|
||||
*/
|
||||
|
||||
|
||||
#define SAVESTATE(dst, src)\
|
||||
|
@@ -1,5 +1,4 @@
|
||||
//#if 0
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__VAES__)
|
||||
|
||||
#include "simd-utils.h"
|
||||
#include "echo-hash-4way.h"
|
||||
@@ -11,18 +10,27 @@ static const unsigned int mul2ipt[] __attribute__ ((aligned (64))) =
|
||||
0xfd5ba600, 0x2a8c71d7, 0x1eb845e3, 0xc96f9234
|
||||
};
|
||||
*/
|
||||
// do these need to be reversed?
|
||||
|
||||
#define mul2mask \
|
||||
_mm512_set4_epi32( 0, 0, 0, 0x00001b00 )
|
||||
// _mm512_set4_epi32( 0x00001b00, 0, 0, 0 )
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define lsbmask m512_const1_32( 0x01010101 )
|
||||
#define ECHO_SUBBYTES4(state, j) \
|
||||
state[0][j] = _mm512_aesenc_epi128( state[0][j], k1 ); \
|
||||
k1 = _mm512_add_epi32( k1, one ); \
|
||||
state[1][j] = _mm512_aesenc_epi128( state[1][j], k1 ); \
|
||||
k1 = _mm512_add_epi32( k1, one ); \
|
||||
state[2][j] = _mm512_aesenc_epi128( state[2][j], k1 ); \
|
||||
k1 = _mm512_add_epi32( k1, one ); \
|
||||
state[3][j] = _mm512_aesenc_epi128( state[3][j], k1 ); \
|
||||
k1 = _mm512_add_epi32( k1, one ); \
|
||||
state[0][j] = _mm512_aesenc_epi128( state[0][j], m512_zero ); \
|
||||
state[1][j] = _mm512_aesenc_epi128( state[1][j], m512_zero ); \
|
||||
state[2][j] = _mm512_aesenc_epi128( state[2][j], m512_zero ); \
|
||||
state[3][j] = _mm512_aesenc_epi128( state[3][j], m512_zero )
|
||||
|
||||
#define ECHO_SUBBYTES( state, i, j ) \
|
||||
state[i][j] = _mm512_aesenc_epi128( state[i][j], k1 ); \
|
||||
state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero ); \
|
||||
k1 = _mm512_add_epi32( k1, m512_one_128 );
|
||||
k1 = _mm512_add_epi32( k1, one ); \
|
||||
state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero );
|
||||
|
||||
#define ECHO_MIXBYTES( state1, state2, j, t1, t2, s2 ) do \
|
||||
{ \
|
||||
@@ -30,87 +38,104 @@ static const unsigned int mul2ipt[] __attribute__ ((aligned (64))) =
|
||||
const int j2 = ( (j)+2 ) & 3; \
|
||||
const int j3 = ( (j)+3 ) & 3; \
|
||||
s2 = _mm512_add_epi8( state1[ 0 ] [j ], state1[ 0 ][ j ] ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 0 ][ j ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask );\
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 ); \
|
||||
state2[ 0 ] [j ] = s2; \
|
||||
state2[ 1 ] [j ] = state1[ 0 ][ j ]; \
|
||||
state2[ 2 ] [j ] = state1[ 0 ][ j ]; \
|
||||
state2[ 3 ] [j ] = _mm512_xor_si512( s2, state1[ 0 ][ j ] );\
|
||||
s2 = _mm512_add_epi8( state1[ 1 ][ j1 ], state1[ 1 ][ j1 ] ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 1 ][ j1 ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask ); \
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 );\
|
||||
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], \
|
||||
_mm512_xor_si512( s2, state1[ 1 ][ j1 ] ) ); \
|
||||
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], s2 ); \
|
||||
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], state1[ 1 ][ j1 ] ); \
|
||||
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], state1[ 1 ][ j1 ] ); \
|
||||
s2 = _mm512_add_epi8( state1[ 2 ][ j2 ], state1[ 2 ][ j2 ] ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 2 ][ j2 ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask ); \
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 ); \
|
||||
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 2 ][ j2 ] ); \
|
||||
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], \
|
||||
_mm512_xor_si512( s2, state1[ 2 ][ j2 ] ) ); \
|
||||
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], s2 ); \
|
||||
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3][ j ], state1[ 2 ][ j2 ] ); \
|
||||
s2 = _mm512_add_epi8( state1[ 3 ][ j3 ], state1[ 3 ][ j3 ] ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 3 ][ j3 ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask ); \
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 ); \
|
||||
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 3 ][ j3 ] ); \
|
||||
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], state1[ 3 ][ j3 ] ); \
|
||||
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], \
|
||||
_mm512_xor_si512( s2, state1[ 3 ][ j3] ) ); \
|
||||
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], s2 ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 0 ][ j ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask );\
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 ); \
|
||||
state2[ 0 ] [j ] = s2; \
|
||||
state2[ 1 ] [j ] = state1[ 0 ][ j ]; \
|
||||
state2[ 2 ] [j ] = state1[ 0 ][ j ]; \
|
||||
state2[ 3 ] [j ] = _mm512_xor_si512( s2, state1[ 0 ][ j ] );\
|
||||
s2 = _mm512_add_epi8( state1[ 1 ][ j1 ], state1[ 1 ][ j1 ] ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 1 ][ j1 ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask ); \
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 );\
|
||||
state2[ 0 ][ j ] = mm512_xor3( state2[ 0 ][ j ], s2, state1[ 1 ][ j1 ] ); \
|
||||
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], s2 ); \
|
||||
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], state1[ 1 ][ j1 ] ); \
|
||||
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], state1[ 1 ][ j1 ] ); \
|
||||
s2 = _mm512_add_epi8( state1[ 2 ][ j2 ], state1[ 2 ][ j2 ] ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 2 ][ j2 ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask ); \
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 ); \
|
||||
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 2 ][ j2 ] ); \
|
||||
state2[ 1 ][ j ] = mm512_xor3( state2[ 1 ][ j ], s2, state1[ 2 ][ j2 ] ); \
|
||||
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], s2 ); \
|
||||
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3][ j ], state1[ 2 ][ j2 ] ); \
|
||||
s2 = _mm512_add_epi8( state1[ 3 ][ j3 ], state1[ 3 ][ j3 ] ); \
|
||||
t1 = _mm512_srli_epi16( state1[ 3 ][ j3 ], 7 ); \
|
||||
t1 = _mm512_and_si512( t1, lsbmask ); \
|
||||
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
|
||||
s2 = _mm512_xor_si512( s2, t2 ); \
|
||||
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 3 ][ j3 ] ); \
|
||||
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], state1[ 3 ][ j3 ] ); \
|
||||
state2[ 2 ][ j ] = mm512_xor3( state2[ 2 ][ j ], s2, state1[ 3 ][ j3] ); \
|
||||
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], s2 ); \
|
||||
} while(0)
|
||||
|
||||
#define ECHO_ROUND_UNROLL2 \
|
||||
ECHO_SUBBYTES(_state, 0, 0);\
|
||||
ECHO_SUBBYTES4(_state, 0);\
|
||||
ECHO_SUBBYTES4(_state, 1);\
|
||||
ECHO_SUBBYTES4(_state, 2);\
|
||||
ECHO_SUBBYTES4(_state, 3);\
|
||||
ECHO_MIXBYTES(_state, _state2, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 3, t1, t2, s2);\
|
||||
ECHO_SUBBYTES4(_state2, 0);\
|
||||
ECHO_SUBBYTES4(_state2, 1);\
|
||||
ECHO_SUBBYTES4(_state2, 2);\
|
||||
ECHO_SUBBYTES4(_state2, 3);\
|
||||
ECHO_MIXBYTES(_state2, _state, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
|
||||
|
||||
/*
|
||||
#define ECHO_ROUND_UNROLL2 \
|
||||
ECHO_SUBBYTES(_state, 0, 0);\
|
||||
ECHO_SUBBYTES(_state, 1, 0);\
|
||||
ECHO_SUBBYTES(_state, 2, 0);\
|
||||
ECHO_SUBBYTES(_state, 3, 0);\
|
||||
ECHO_SUBBYTES(_state, 0, 1);\
|
||||
ECHO_SUBBYTES(_state, 1, 1);\
|
||||
ECHO_SUBBYTES(_state, 2, 1);\
|
||||
ECHO_SUBBYTES(_state, 3, 1);\
|
||||
ECHO_SUBBYTES(_state, 0, 2);\
|
||||
ECHO_SUBBYTES(_state, 1, 2);\
|
||||
ECHO_SUBBYTES(_state, 2, 2);\
|
||||
ECHO_SUBBYTES(_state, 3, 2);\
|
||||
ECHO_SUBBYTES(_state, 0, 3);\
|
||||
ECHO_SUBBYTES(_state, 1, 3);\
|
||||
ECHO_SUBBYTES(_state, 2, 3);\
|
||||
ECHO_SUBBYTES(_state, 3, 3);\
|
||||
ECHO_MIXBYTES(_state, _state2, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 3, t1, t2, s2);\
|
||||
ECHO_SUBBYTES(_state2, 0, 0);\
|
||||
ECHO_SUBBYTES(_state2, 1, 0);\
|
||||
ECHO_SUBBYTES(_state2, 2, 0);\
|
||||
ECHO_SUBBYTES(_state2, 3, 0);\
|
||||
ECHO_SUBBYTES(_state2, 0, 1);\
|
||||
ECHO_SUBBYTES(_state2, 1, 1);\
|
||||
ECHO_SUBBYTES(_state2, 2, 1);\
|
||||
ECHO_SUBBYTES(_state2, 3, 1);\
|
||||
ECHO_SUBBYTES(_state2, 0, 2);\
|
||||
ECHO_SUBBYTES(_state2, 1, 2);\
|
||||
ECHO_SUBBYTES(_state2, 2, 2);\
|
||||
ECHO_SUBBYTES(_state2, 3, 2);\
|
||||
ECHO_SUBBYTES(_state2, 0, 3);\
|
||||
ECHO_SUBBYTES(_state2, 1, 3);\
|
||||
ECHO_SUBBYTES(_state2, 2, 3);\
|
||||
ECHO_SUBBYTES(_state2, 3, 3);\
|
||||
ECHO_MIXBYTES(_state2, _state, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
|
||||
ECHO_SUBBYTES(_state, 2, 0);\
|
||||
ECHO_SUBBYTES(_state, 3, 0);\
|
||||
ECHO_SUBBYTES(_state, 0, 1);\
|
||||
ECHO_SUBBYTES(_state, 1, 1);\
|
||||
ECHO_SUBBYTES(_state, 2, 1);\
|
||||
ECHO_SUBBYTES(_state, 3, 1);\
|
||||
ECHO_SUBBYTES(_state, 0, 2);\
|
||||
ECHO_SUBBYTES(_state, 1, 2);\
|
||||
ECHO_SUBBYTES(_state, 2, 2);\
|
||||
ECHO_SUBBYTES(_state, 3, 2);\
|
||||
ECHO_SUBBYTES(_state, 0, 3);\
|
||||
ECHO_SUBBYTES(_state, 1, 3);\
|
||||
ECHO_SUBBYTES(_state, 2, 3);\
|
||||
ECHO_SUBBYTES(_state, 3, 3);\
|
||||
ECHO_MIXBYTES(_state, _state2, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state, _state2, 3, t1, t2, s2);\
|
||||
ECHO_SUBBYTES(_state2, 0, 0);\
|
||||
ECHO_SUBBYTES(_state2, 1, 0);\
|
||||
ECHO_SUBBYTES(_state2, 2, 0);\
|
||||
ECHO_SUBBYTES(_state2, 3, 0);\
|
||||
ECHO_SUBBYTES(_state2, 0, 1);\
|
||||
ECHO_SUBBYTES(_state2, 1, 1);\
|
||||
ECHO_SUBBYTES(_state2, 2, 1);\
|
||||
ECHO_SUBBYTES(_state2, 3, 1);\
|
||||
ECHO_SUBBYTES(_state2, 0, 2);\
|
||||
ECHO_SUBBYTES(_state2, 1, 2);\
|
||||
ECHO_SUBBYTES(_state2, 2, 2);\
|
||||
ECHO_SUBBYTES(_state2, 3, 2);\
|
||||
ECHO_SUBBYTES(_state2, 0, 3);\
|
||||
ECHO_SUBBYTES(_state2, 1, 3);\
|
||||
ECHO_SUBBYTES(_state2, 2, 3);\
|
||||
ECHO_SUBBYTES(_state2, 3, 3);\
|
||||
ECHO_MIXBYTES(_state2, _state, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
|
||||
*/
|
||||
|
||||
#define SAVESTATE(dst, src)\
|
||||
dst[0][0] = src[0][0];\
|
||||
@@ -137,6 +162,9 @@ void echo_4way_compress( echo_4way_context *ctx, const __m512i *pmsg,
|
||||
unsigned int r, b, i, j;
|
||||
__m512i t1, t2, s2, k1;
|
||||
__m512i _state[4][4], _state2[4][4], _statebackup[4][4];
|
||||
__m512i one = m512_one_128;
|
||||
__m512i mul2mask = m512_const2_64( 0, 0x00001b00 );
|
||||
__m512i lsbmask = m512_const1_32( 0x01010101 );
|
||||
|
||||
_state[ 0 ][ 0 ] = ctx->state[ 0 ][ 0 ];
|
||||
_state[ 0 ][ 1 ] = ctx->state[ 0 ][ 1 ];
|
||||
@@ -224,43 +252,43 @@ void echo_4way_compress( echo_4way_context *ctx, const __m512i *pmsg,
|
||||
|
||||
int echo_4way_init( echo_4way_context *ctx, int nHashSize )
|
||||
{
|
||||
int i, j;
|
||||
int i, j;
|
||||
|
||||
ctx->k = m512_zero;
|
||||
ctx->processed_bits = 0;
|
||||
ctx->uBufferBytes = 0;
|
||||
ctx->processed_bits = 0;
|
||||
ctx->uBufferBytes = 0;
|
||||
|
||||
switch( nHashSize )
|
||||
{
|
||||
case 256:
|
||||
ctx->uHashSize = 256;
|
||||
ctx->uBlockLength = 192;
|
||||
ctx->uRounds = 8;
|
||||
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x100 );
|
||||
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x600 );
|
||||
break;
|
||||
switch( nHashSize )
|
||||
{
|
||||
case 256:
|
||||
ctx->uHashSize = 256;
|
||||
ctx->uBlockLength = 192;
|
||||
ctx->uRounds = 8;
|
||||
ctx->hashsize = m512_const2_64( 0, 0x100 );
|
||||
ctx->const1536 = m512_const2_64( 0, 0x600 );
|
||||
break;
|
||||
|
||||
case 512:
|
||||
ctx->uHashSize = 512;
|
||||
ctx->uBlockLength = 128;
|
||||
ctx->uRounds = 10;
|
||||
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x200 );
|
||||
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x400);
|
||||
break;
|
||||
case 512:
|
||||
ctx->uHashSize = 512;
|
||||
ctx->uBlockLength = 128;
|
||||
ctx->uRounds = 10;
|
||||
ctx->hashsize = m512_const2_64( 0, 0x200 );
|
||||
ctx->const1536 = m512_const2_64( 0, 0x400);
|
||||
break;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = 0; j < nHashSize / 256; j++ )
|
||||
ctx->state[ i ][ j ] = ctx->hashsize;
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = 0; j < nHashSize / 256; j++ )
|
||||
ctx->state[ i ][ j ] = ctx->hashsize;
|
||||
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = nHashSize / 256; j < 4; j++ )
|
||||
ctx->state[ i ][ j ] = m512_zero;
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = nHashSize / 256; j < 4; j++ )
|
||||
ctx->state[ i ][ j ] = m512_zero;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int echo_4way_update_close( echo_4way_context *state, void *hashval,
|
||||
@@ -285,17 +313,13 @@ int echo_4way_update_close( echo_4way_context *state, void *hashval,
|
||||
vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
|
||||
memcpy_512( state->buffer, data, vlen );
|
||||
state->processed_bits += (unsigned int)( databitlen );
|
||||
remainingbits = _mm512_set4_epi32( 0, 0, 0, databitlen );
|
||||
|
||||
remainingbits = m512_const2_64( 0, (uint64_t)databitlen );
|
||||
}
|
||||
|
||||
state->buffer[ vlen ] = _mm512_set4_epi32( 0, 0, 0, 0x80 );
|
||||
state->buffer[ vlen ] = m512_const2_64( 0, 0x80 );
|
||||
memset_zero_512( state->buffer + vlen + 1, vblen - vlen - 2 );
|
||||
state->buffer[ vblen-2 ] =
|
||||
_mm512_set4_epi32( (uint32_t)state->uHashSize << 16, 0, 0, 0 );
|
||||
state->buffer[ vblen-1 ] =
|
||||
_mm512_set4_epi64( 0, state->processed_bits,
|
||||
0, state->processed_bits );
|
||||
state->buffer[ vblen-2 ] = m512_const2_64( (uint64_t)state->uHashSize << 48, 0 );
|
||||
state->buffer[ vblen-1 ] = m512_const2_64( 0, state->processed_bits);
|
||||
|
||||
state->k = _mm512_add_epi64( state->k, remainingbits );
|
||||
state->k = _mm512_sub_epi64( state->k, state->const1536 );
|
||||
@@ -328,16 +352,16 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
|
||||
ctx->uHashSize = 256;
|
||||
ctx->uBlockLength = 192;
|
||||
ctx->uRounds = 8;
|
||||
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x100 );
|
||||
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x600 );
|
||||
ctx->hashsize = m512_const2_64( 0, 0x100 );
|
||||
ctx->const1536 = m512_const2_64( 0, 0x600 );
|
||||
break;
|
||||
|
||||
case 512:
|
||||
ctx->uHashSize = 512;
|
||||
ctx->uBlockLength = 128;
|
||||
ctx->uRounds = 10;
|
||||
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x200 );
|
||||
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x400);
|
||||
ctx->hashsize = m512_const2_64( 0, 0x200 );
|
||||
ctx->const1536 = m512_const2_64( 0, 0x400 );
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -372,17 +396,14 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
|
||||
vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
|
||||
memcpy_512( ctx->buffer, data, vlen );
|
||||
ctx->processed_bits += (unsigned int)( databitlen );
|
||||
remainingbits = _mm512_set4_epi32( 0, 0, 0, databitlen );
|
||||
|
||||
remainingbits = m512_const2_64( 0, databitlen );
|
||||
}
|
||||
|
||||
ctx->buffer[ vlen ] = _mm512_set4_epi32( 0, 0, 0, 0x80 );
|
||||
ctx->buffer[ vlen ] = m512_const2_64( 0, 0x80 );
|
||||
memset_zero_512( ctx->buffer + vlen + 1, vblen - vlen - 2 );
|
||||
ctx->buffer[ vblen-2 ] =
|
||||
_mm512_set4_epi32( (uint32_t)ctx->uHashSize << 16, 0, 0, 0 );
|
||||
ctx->buffer[ vblen-1 ] =
|
||||
_mm512_set4_epi64( 0, ctx->processed_bits,
|
||||
0, ctx->processed_bits );
|
||||
m512_const2_64( (uint64_t)ctx->uHashSize << 48, 0 );
|
||||
ctx->buffer[ vblen-1 ] = m512_const2_64( 0, ctx->processed_bits);
|
||||
|
||||
ctx->k = _mm512_add_epi64( ctx->k, remainingbits );
|
||||
ctx->k = _mm512_sub_epi64( ctx->k, ctx->const1536 );
|
||||
@@ -400,5 +421,414 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
#endif
|
||||
// AVX2 + VAES
|
||||
|
||||
#define mul2mask_2way m256_const2_64( 0, 0x0000000000001b00 )
|
||||
|
||||
#define lsbmask_2way m256_const1_32( 0x01010101 )
|
||||
|
||||
#define ECHO_SUBBYTES4_2WAY( state, j ) \
|
||||
state[0][j] = _mm256_aesenc_epi128( state[0][j], k1 ); \
|
||||
k1 = _mm256_add_epi32( k1, m256_one_128 ); \
|
||||
state[1][j] = _mm256_aesenc_epi128( state[1][j], k1 ); \
|
||||
k1 = _mm256_add_epi32( k1, m256_one_128 ); \
|
||||
state[2][j] = _mm256_aesenc_epi128( state[2][j], k1 ); \
|
||||
k1 = _mm256_add_epi32( k1, m256_one_128 ); \
|
||||
state[3][j] = _mm256_aesenc_epi128( state[3][j], k1 ); \
|
||||
k1 = _mm256_add_epi32( k1, m256_one_128 ); \
|
||||
state[0][j] = _mm256_aesenc_epi128( state[0][j], m256_zero ); \
|
||||
state[1][j] = _mm256_aesenc_epi128( state[1][j], m256_zero ); \
|
||||
state[2][j] = _mm256_aesenc_epi128( state[2][j], m256_zero ); \
|
||||
state[3][j] = _mm256_aesenc_epi128( state[3][j], m256_zero )
|
||||
|
||||
#define ECHO_SUBBYTES_2WAY( state, i, j ) \
|
||||
state[i][j] = _mm256_aesenc_epi128( state[i][j], k1 ); \
|
||||
k1 = _mm256_add_epi32( k1, m256_one_128 ); \
|
||||
state[i][j] = _mm256_aesenc_epi128( state[i][j], m256_zero ); \
|
||||
|
||||
#define ECHO_MIXBYTES_2WAY( state1, state2, j, t1, t2, s2 ) do \
|
||||
{ \
|
||||
const int j1 = ( (j)+1 ) & 3; \
|
||||
const int j2 = ( (j)+2 ) & 3; \
|
||||
const int j3 = ( (j)+3 ) & 3; \
|
||||
s2 = _mm256_add_epi8( state1[ 0 ] [j ], state1[ 0 ][ j ] ); \
|
||||
t1 = _mm256_srli_epi16( state1[ 0 ][ j ], 7 ); \
|
||||
t1 = _mm256_and_si256( t1, lsbmask_2way );\
|
||||
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
|
||||
s2 = _mm256_xor_si256( s2, t2 ); \
|
||||
state2[ 0 ] [j ] = s2; \
|
||||
state2[ 1 ] [j ] = state1[ 0 ][ j ]; \
|
||||
state2[ 2 ] [j ] = state1[ 0 ][ j ]; \
|
||||
state2[ 3 ] [j ] = _mm256_xor_si256( s2, state1[ 0 ][ j ] );\
|
||||
s2 = _mm256_add_epi8( state1[ 1 ][ j1 ], state1[ 1 ][ j1 ] ); \
|
||||
t1 = _mm256_srli_epi16( state1[ 1 ][ j1 ], 7 ); \
|
||||
t1 = _mm256_and_si256( t1, lsbmask_2way ); \
|
||||
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
|
||||
s2 = _mm256_xor_si256( s2, t2 );\
|
||||
state2[ 0 ][ j ] = _mm256_xor_si256( state2[ 0 ][ j ], \
|
||||
_mm256_xor_si256( s2, state1[ 1 ][ j1 ] ) ); \
|
||||
state2[ 1 ][ j ] = _mm256_xor_si256( state2[ 1 ][ j ], s2 ); \
|
||||
state2[ 2 ][ j ] = _mm256_xor_si256( state2[ 2 ][ j ], state1[ 1 ][ j1 ] ); \
|
||||
state2[ 3 ][ j ] = _mm256_xor_si256( state2[ 3 ][ j ], state1[ 1 ][ j1 ] ); \
|
||||
s2 = _mm256_add_epi8( state1[ 2 ][ j2 ], state1[ 2 ][ j2 ] ); \
|
||||
t1 = _mm256_srli_epi16( state1[ 2 ][ j2 ], 7 ); \
|
||||
t1 = _mm256_and_si256( t1, lsbmask_2way ); \
|
||||
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
|
||||
s2 = _mm256_xor_si256( s2, t2 ); \
|
||||
state2[ 0 ][ j ] = _mm256_xor_si256( state2[ 0 ][ j ], state1[ 2 ][ j2 ] ); \
|
||||
state2[ 1 ][ j ] = _mm256_xor_si256( state2[ 1 ][ j ], \
|
||||
_mm256_xor_si256( s2, state1[ 2 ][ j2 ] ) ); \
|
||||
state2[ 2 ][ j ] = _mm256_xor_si256( state2[ 2 ][ j ], s2 ); \
|
||||
state2[ 3 ][ j ] = _mm256_xor_si256( state2[ 3][ j ], state1[ 2 ][ j2 ] ); \
|
||||
s2 = _mm256_add_epi8( state1[ 3 ][ j3 ], state1[ 3 ][ j3 ] ); \
|
||||
t1 = _mm256_srli_epi16( state1[ 3 ][ j3 ], 7 ); \
|
||||
t1 = _mm256_and_si256( t1, lsbmask_2way ); \
|
||||
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
|
||||
s2 = _mm256_xor_si256( s2, t2 ); \
|
||||
state2[ 0 ][ j ] = _mm256_xor_si256( state2[ 0 ][ j ], state1[ 3 ][ j3 ] ); \
|
||||
state2[ 1 ][ j ] = _mm256_xor_si256( state2[ 1 ][ j ], state1[ 3 ][ j3 ] ); \
|
||||
state2[ 2 ][ j ] = _mm256_xor_si256( state2[ 2 ][ j ], \
|
||||
_mm256_xor_si256( s2, state1[ 3 ][ j3] ) ); \
|
||||
state2[ 3 ][ j ] = _mm256_xor_si256( state2[ 3 ][ j ], s2 ); \
|
||||
} while(0)
|
||||
|
||||
#define ECHO_ROUND_UNROLL2_2WAY \
|
||||
ECHO_SUBBYTES4_2WAY(_state, 0);\
|
||||
ECHO_SUBBYTES4_2WAY(_state, 1);\
|
||||
ECHO_SUBBYTES4_2WAY(_state, 2);\
|
||||
ECHO_SUBBYTES4_2WAY(_state, 3);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 3, t1, t2, s2);\
|
||||
ECHO_SUBBYTES4_2WAY(_state2, 0);\
|
||||
ECHO_SUBBYTES4_2WAY(_state2, 1);\
|
||||
ECHO_SUBBYTES4_2WAY(_state2, 2);\
|
||||
ECHO_SUBBYTES4_2WAY(_state2, 3);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 3, t1, t2, s2)
|
||||
|
||||
/*
|
||||
#define ECHO_ROUND_UNROLL2_2WAY \
|
||||
ECHO_SUBBYTES_2WAY(_state, 0, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 1, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 2, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 3, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 0, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 1, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 2, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 3, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 0, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 1, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 2, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 3, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 0, 3);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 1, 3);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 2, 3);\
|
||||
ECHO_SUBBYTES_2WAY(_state, 3, 3);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state, _state2, 3, t1, t2, s2);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 0, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 1, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 2, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 3, 0);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 0, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 1, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 2, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 3, 1);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 0, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 1, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 2, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 3, 2);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 0, 3);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 1, 3);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 2, 3);\
|
||||
ECHO_SUBBYTES_2WAY(_state2, 3, 3);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 0, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 1, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 2, t1, t2, s2);\
|
||||
ECHO_MIXBYTES_2WAY(_state2, _state, 3, t1, t2, s2)
|
||||
*/
|
||||
|
||||
#define SAVESTATE_2WAY(dst, src)\
|
||||
dst[0][0] = src[0][0];\
|
||||
dst[0][1] = src[0][1];\
|
||||
dst[0][2] = src[0][2];\
|
||||
dst[0][3] = src[0][3];\
|
||||
dst[1][0] = src[1][0];\
|
||||
dst[1][1] = src[1][1];\
|
||||
dst[1][2] = src[1][2];\
|
||||
dst[1][3] = src[1][3];\
|
||||
dst[2][0] = src[2][0];\
|
||||
dst[2][1] = src[2][1];\
|
||||
dst[2][2] = src[2][2];\
|
||||
dst[2][3] = src[2][3];\
|
||||
dst[3][0] = src[3][0];\
|
||||
dst[3][1] = src[3][1];\
|
||||
dst[3][2] = src[3][2];\
|
||||
dst[3][3] = src[3][3]
|
||||
|
||||
// blockcount always 1
|
||||
void echo_2way_compress( echo_2way_context *ctx, const __m256i *pmsg,
|
||||
unsigned int uBlockCount )
|
||||
{
|
||||
unsigned int r, b, i, j;
|
||||
__m256i t1, t2, s2, k1;
|
||||
__m256i _state[4][4], _state2[4][4], _statebackup[4][4];
|
||||
|
||||
_state[ 0 ][ 0 ] = ctx->state[ 0 ][ 0 ];
|
||||
_state[ 0 ][ 1 ] = ctx->state[ 0 ][ 1 ];
|
||||
_state[ 0 ][ 2 ] = ctx->state[ 0 ][ 2 ];
|
||||
_state[ 0 ][ 3 ] = ctx->state[ 0 ][ 3 ];
|
||||
_state[ 1 ][ 0 ] = ctx->state[ 1 ][ 0 ];
|
||||
_state[ 1 ][ 1 ] = ctx->state[ 1 ][ 1 ];
|
||||
_state[ 1 ][ 2 ] = ctx->state[ 1 ][ 2 ];
|
||||
_state[ 1 ][ 3 ] = ctx->state[ 1 ][ 3 ];
|
||||
_state[ 2 ][ 0 ] = ctx->state[ 2 ][ 0 ];
|
||||
_state[ 2 ][ 1 ] = ctx->state[ 2 ][ 1 ];
|
||||
_state[ 2 ][ 2 ] = ctx->state[ 2 ][ 2 ];
|
||||
_state[ 2 ][ 3 ] = ctx->state[ 2 ][ 3 ];
|
||||
_state[ 3 ][ 0 ] = ctx->state[ 3 ][ 0 ];
|
||||
_state[ 3 ][ 1 ] = ctx->state[ 3 ][ 1 ];
|
||||
_state[ 3 ][ 2 ] = ctx->state[ 3 ][ 2 ];
|
||||
_state[ 3 ][ 3 ] = ctx->state[ 3 ][ 3 ];
|
||||
|
||||
for ( b = 0; b < uBlockCount; b++ )
|
||||
{
|
||||
ctx->k = _mm256_add_epi64( ctx->k, ctx->const1536 );
|
||||
|
||||
for( j = ctx->uHashSize / 256; j < 4; j++ )
|
||||
{
|
||||
for ( i = 0; i < 4; i++ )
|
||||
{
|
||||
_state[ i ][ j ] = _mm256_load_si256(
|
||||
pmsg + 4 * (j - (ctx->uHashSize / 256)) + i );
|
||||
}
|
||||
}
|
||||
|
||||
// save state
|
||||
SAVESTATE_2WAY( _statebackup, _state );
|
||||
|
||||
k1 = ctx->k;
|
||||
|
||||
for ( r = 0; r < ctx->uRounds / 2; r++ )
|
||||
{
|
||||
ECHO_ROUND_UNROLL2_2WAY;
|
||||
}
|
||||
|
||||
if ( ctx->uHashSize == 256 )
|
||||
{
|
||||
for ( i = 0; i < 4; i++ )
|
||||
{
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_state[ i ][ 1 ] );
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_state[ i ][ 2 ] );
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_state[ i ][ 3 ] );
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_statebackup[ i ][ 0 ] );
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_statebackup[ i ][ 1 ] );
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_statebackup[ i ][ 2 ] ) ;
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_statebackup[ i ][ 3 ] );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for ( i = 0; i < 4; i++ )
|
||||
{
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_state[ i ][ 2 ] );
|
||||
_state[ i ][ 1 ] = _mm256_xor_si256( _state[ i ][ 1 ],
|
||||
_state[ i ][ 3 ] );
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
|
||||
_statebackup[ i ][ 0 ] );
|
||||
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ] [0 ],
|
||||
_statebackup[ i ][ 2 ] );
|
||||
_state[ i ][ 1 ] = _mm256_xor_si256( _state[ i ][ 1 ],
|
||||
_statebackup[ i ][ 1 ] );
|
||||
_state[ i ][ 1 ] = _mm256_xor_si256( _state[ i ][ 1 ],
|
||||
_statebackup[ i ][ 3 ] );
|
||||
}
|
||||
}
|
||||
pmsg += ctx->uBlockLength;
|
||||
}
|
||||
SAVESTATE_2WAY(ctx->state, _state);
|
||||
|
||||
}
|
||||
int echo_2way_init( echo_2way_context *ctx, int nHashSize )
|
||||
{
|
||||
int i, j;
|
||||
|
||||
ctx->k = m256_zero;
|
||||
ctx->processed_bits = 0;
|
||||
ctx->uBufferBytes = 0;
|
||||
|
||||
switch( nHashSize )
|
||||
{
|
||||
case 256:
|
||||
ctx->uHashSize = 256;
|
||||
ctx->uBlockLength = 192;
|
||||
ctx->uRounds = 8;
|
||||
ctx->hashsize = m256_const2_64( 0, 0x100 );
|
||||
ctx->const1536 = m256_const2_64( 0, 0x600 );
|
||||
break;
|
||||
|
||||
case 512:
|
||||
ctx->uHashSize = 512;
|
||||
ctx->uBlockLength = 128;
|
||||
ctx->uRounds = 10;
|
||||
ctx->hashsize = m256_const2_64( 0, 0x200 );
|
||||
ctx->const1536 = m256_const2_64( 0, 0x400 );
|
||||
break;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = 0; j < nHashSize / 256; j++ )
|
||||
ctx->state[ i ][ j ] = ctx->hashsize;
|
||||
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = nHashSize / 256; j < 4; j++ )
|
||||
ctx->state[ i ][ j ] = m256_zero;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int echo_2way_update_close( echo_2way_context *state, void *hashval,
|
||||
const void *data, int databitlen )
|
||||
{
|
||||
// bytelen is either 32 (maybe), 64 or 80 or 128!
|
||||
// all are less than full block.
|
||||
|
||||
int vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
|
||||
const int vblen = state->uBlockLength / 16; // 16 bytes per lane
|
||||
__m256i remainingbits;
|
||||
|
||||
if ( databitlen == 1024 )
|
||||
{
|
||||
echo_2way_compress( state, data, 1 );
|
||||
state->processed_bits = 1024;
|
||||
remainingbits = m256_const2_64( 0, -1024 );
|
||||
vlen = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy_256( state->buffer, data, vlen );
|
||||
state->processed_bits += (unsigned int)( databitlen );
|
||||
remainingbits = m256_const2_64( 0, databitlen );
|
||||
}
|
||||
|
||||
state->buffer[ vlen ] = m256_const2_64( 0, 0x80 );
|
||||
memset_zero_256( state->buffer + vlen + 1, vblen - vlen - 2 );
|
||||
state->buffer[ vblen-2 ] = m256_const2_64( (uint64_t)state->uHashSize << 48, 0 );
|
||||
state->buffer[ vblen-1 ] = m256_const2_64( 0, state->processed_bits );
|
||||
|
||||
state->k = _mm256_add_epi64( state->k, remainingbits );
|
||||
state->k = _mm256_sub_epi64( state->k, state->const1536 );
|
||||
|
||||
echo_2way_compress( state, state->buffer, 1 );
|
||||
|
||||
_mm256_store_si256( (__m256i*)hashval + 0, state->state[ 0 ][ 0] );
|
||||
_mm256_store_si256( (__m256i*)hashval + 1, state->state[ 1 ][ 0] );
|
||||
|
||||
if ( state->uHashSize == 512 )
|
||||
{
|
||||
_mm256_store_si256( (__m256i*)hashval + 2, state->state[ 2 ][ 0 ] );
|
||||
_mm256_store_si256( (__m256i*)hashval + 3, state->state[ 3 ][ 0 ] );
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int echo_2way_full( echo_2way_context *ctx, void *hashval, int nHashSize,
|
||||
const void *data, int datalen )
|
||||
{
|
||||
int i, j;
|
||||
int databitlen = datalen * 8;
|
||||
ctx->k = m256_zero;
|
||||
ctx->processed_bits = 0;
|
||||
ctx->uBufferBytes = 0;
|
||||
|
||||
switch( nHashSize )
|
||||
{
|
||||
case 256:
|
||||
ctx->uHashSize = 256;
|
||||
ctx->uBlockLength = 192;
|
||||
ctx->uRounds = 8;
|
||||
ctx->hashsize = m256_const2_64( 0, 0x100 );
|
||||
ctx->const1536 = m256_const2_64( 0, 0x600 );
|
||||
break;
|
||||
|
||||
case 512:
|
||||
ctx->uHashSize = 512;
|
||||
ctx->uBlockLength = 128;
|
||||
ctx->uRounds = 10;
|
||||
ctx->hashsize = m256_const2_64( 0, 0x200 );
|
||||
ctx->const1536 = m256_const2_64( 0, 0x400 );
|
||||
break;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = 0; j < nHashSize / 256; j++ )
|
||||
ctx->state[ i ][ j ] = ctx->hashsize;
|
||||
|
||||
for( i = 0; i < 4; i++ )
|
||||
for( j = nHashSize / 256; j < 4; j++ )
|
||||
ctx->state[ i ][ j ] = m256_zero;
|
||||
|
||||
int vlen = datalen / 32;
|
||||
const int vblen = ctx->uBlockLength / 16; // 16 bytes per lane
|
||||
__m256i remainingbits;
|
||||
|
||||
if ( databitlen == 1024 )
|
||||
{
|
||||
echo_2way_compress( ctx, data, 1 );
|
||||
ctx->processed_bits = 1024;
|
||||
remainingbits = m256_const2_64( 0, -1024 );
|
||||
vlen = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
|
||||
memcpy_256( ctx->buffer, data, vlen );
|
||||
ctx->processed_bits += (unsigned int)( databitlen );
|
||||
remainingbits = m256_const2_64( 0, databitlen );
|
||||
}
|
||||
|
||||
ctx->buffer[ vlen ] = m256_const2_64( 0, 0x80 );
|
||||
memset_zero_256( ctx->buffer + vlen + 1, vblen - vlen - 2 );
|
||||
ctx->buffer[ vblen-2 ] = m256_const2_64( (uint64_t)ctx->uHashSize << 48, 0 );
|
||||
ctx->buffer[ vblen-1 ] = m256_const2_64( 0, ctx->processed_bits );
|
||||
|
||||
ctx->k = _mm256_add_epi64( ctx->k, remainingbits );
|
||||
ctx->k = _mm256_sub_epi64( ctx->k, ctx->const1536 );
|
||||
|
||||
echo_2way_compress( ctx, ctx->buffer, 1 );
|
||||
|
||||
_mm256_store_si256( (__m256i*)hashval + 0, ctx->state[ 0 ][ 0] );
|
||||
_mm256_store_si256( (__m256i*)hashval + 1, ctx->state[ 1 ][ 0] );
|
||||
|
||||
if ( ctx->uHashSize == 512 )
|
||||
{
|
||||
_mm256_store_si256( (__m256i*)hashval + 2, ctx->state[ 2 ][ 0 ] );
|
||||
_mm256_store_si256( (__m256i*)hashval + 3, ctx->state[ 3 ][ 0 ] );
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif // VAES
|
||||
|
@@ -1,10 +1,12 @@
|
||||
#if !defined(ECHO_HASH_4WAY_H__)
|
||||
#define ECHO_HASH_4WAY_H__ 1
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__VAES__)
|
||||
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__m512i state[4][4];
|
||||
@@ -20,6 +22,7 @@ typedef struct
|
||||
unsigned int processed_bits;
|
||||
|
||||
} echo_4way_context __attribute__ ((aligned (64)));
|
||||
#define echo512_4way_context echo_4way_context
|
||||
|
||||
int echo_4way_init( echo_4way_context *state, int hashbitlen );
|
||||
#define echo512_4way_init( state ) echo_4way_init( state, 512 )
|
||||
@@ -29,8 +32,8 @@ int echo_4way_update( echo_4way_context *state, const void *data,
|
||||
unsigned int databitlen);
|
||||
#define echo512_4way_update echo_4way_update
|
||||
|
||||
int echo_close( echo_4way_context *state, void *hashval );
|
||||
#define echo512_4way_close echo_4way_close
|
||||
// int echo_4way_close( echo_4way_context *state, void *hashval );
|
||||
// #define echo512_4way_close echo_4way_close
|
||||
|
||||
int echo_4way_update_close( echo_4way_context *state, void *hashval,
|
||||
const void *data, int databitlen );
|
||||
@@ -43,5 +46,45 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
|
||||
#define echo256_4way_full( state, hashval, data, datalen ) \
|
||||
echo_4way_full( state, hashval, 256, data, datalen )
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif // AVX512
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__m256i state[4][4];
|
||||
__m256i buffer[ 4 * 192 / 16 ]; // 4x128 interleaved 192 bytes
|
||||
__m256i k;
|
||||
__m256i hashsize;
|
||||
__m256i const1536;
|
||||
|
||||
unsigned int uRounds;
|
||||
unsigned int uHashSize;
|
||||
unsigned int uBlockLength;
|
||||
unsigned int uBufferBytes;
|
||||
unsigned int processed_bits;
|
||||
|
||||
} echo_2way_context __attribute__ ((aligned (64)));
|
||||
#define echo512_2way_context echo_2way_context
|
||||
|
||||
int echo_2way_init( echo_2way_context *state, int hashbitlen );
|
||||
#define echo512_2way_init( state ) echo_2way_init( state, 512 )
|
||||
#define echo256_2way_init( state ) echo_2way_init( state, 256 )
|
||||
|
||||
int echo_2way_update( echo_2way_context *state, const void *data,
|
||||
unsigned int databitlen);
|
||||
#define echo512_2way_update echo_2way_update
|
||||
|
||||
int echo_2way_update_close( echo_2way_context *state, void *hashval,
|
||||
const void *data, int databitlen );
|
||||
#define echo512_2way_update_close echo_2way_update_close
|
||||
|
||||
int echo_2way_full( echo_2way_context *ctx, void *hashval, int nHashSize,
|
||||
const void *data, int datalen );
|
||||
#define echo512_2way_full( state, hashval, data, datalen ) \
|
||||
echo_2way_full( state, hashval, 512, data, datalen )
|
||||
#define echo256_2way_full( state, hashval, data, datalen ) \
|
||||
echo_2way_full( state, hashval, 256, data, datalen )
|
||||
|
||||
|
||||
#endif // VAES
|
||||
|
||||
#endif // ECHO_HASH_4WAY_H__
|
||||
|
590
algo/fugue/fugue-aesni.c
Normal file
590
algo/fugue/fugue-aesni.c
Normal file
@@ -0,0 +1,590 @@
|
||||
/*
|
||||
* file : fugue_vperm.c
|
||||
* version : 1.0.208
|
||||
* date : 14.12.2010
|
||||
*
|
||||
* - vperm and aes_ni implementations of hash function Fugue
|
||||
* - implements NIST hash api
|
||||
* - assumes that message lenght is multiple of 8-bits
|
||||
* - _FUGUE_VPERM_ must be defined if compiling with ../main.c
|
||||
* - default version is vperm, define AES_NI for aes_ni version
|
||||
*
|
||||
* Cagdas Calik
|
||||
* ccalik@metu.edu.tr
|
||||
* Institute of Applied Mathematics, Middle East Technical University, Turkey.
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(__AES__)
|
||||
|
||||
#include <x86intrin.h>
|
||||
|
||||
#include <memory.h>
|
||||
#include "fugue-aesni.h"
|
||||
|
||||
|
||||
MYALIGN const unsigned long long _supermix1a[] = {0x0202010807020100, 0x0a05000f06010c0b};
|
||||
MYALIGN const unsigned long long _supermix1b[] = {0x0b0d080703060504, 0x0e0a090c050e0f0a};
|
||||
MYALIGN const unsigned long long _supermix1c[] = {0x0402060c070d0003, 0x090a060580808080};
|
||||
MYALIGN const unsigned long long _supermix1d[] = {0x808080800f0e0d0c, 0x0f0e0d0c80808080};
|
||||
MYALIGN const unsigned long long _supermix2a[] = {0x07020d0880808080, 0x0b06010c050e0f0a};
|
||||
MYALIGN const unsigned long long _supermix4a[] = {0x000f0a050c0b0601, 0x0302020404030e09};
|
||||
MYALIGN const unsigned long long _supermix4b[] = {0x07020d08080e0d0d, 0x07070908050e0f0a};
|
||||
MYALIGN const unsigned long long _supermix4c[] = {0x0706050403020000, 0x0302000007060504};
|
||||
MYALIGN const unsigned long long _supermix7a[] = {0x010c0b060d080702, 0x0904030e03000104};
|
||||
MYALIGN const unsigned long long _supermix7b[] = {0x8080808080808080, 0x0504070605040f06};
|
||||
MYALIGN const unsigned long long _k_n[] = {0x4E4E4E4E4E4E4E4E, 0x1B1B1B1B0E0E0E0E};
|
||||
MYALIGN const unsigned char _shift_one_mask[] = {7, 4, 5, 6, 11, 8, 9, 10, 15, 12, 13, 14, 3, 0, 1, 2};
|
||||
MYALIGN const unsigned char _shift_four_mask[] = {13, 14, 15, 12, 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8};
|
||||
MYALIGN const unsigned char _shift_seven_mask[] = {10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6, 7, 4, 5};
|
||||
MYALIGN const unsigned char _aes_shift_rows[] = {0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11};
|
||||
MYALIGN const unsigned int _inv_shift_rows[] = {0x070a0d00, 0x0b0e0104, 0x0f020508, 0x0306090c};
|
||||
MYALIGN const unsigned int _mul2mask[] = {0x1b1b0000, 0x00000000, 0x00000000, 0x00000000};
|
||||
MYALIGN const unsigned int _mul4mask[] = {0x2d361b00, 0x00000000, 0x00000000, 0x00000000};
|
||||
MYALIGN const unsigned int _lsbmask2[] = {0x03030303, 0x03030303, 0x03030303, 0x03030303};
|
||||
|
||||
|
||||
MYALIGN const unsigned int _IV512[] = {
|
||||
0x00000000, 0x00000000, 0x7ea50788, 0x00000000,
|
||||
0x75af16e6, 0xdbe4d3c5, 0x27b09aac, 0x00000000,
|
||||
0x17f115d9, 0x54cceeb6, 0x0b02e806, 0x00000000,
|
||||
0xd1ef924a, 0xc9e2c6aa, 0x9813b2dd, 0x00000000,
|
||||
0x3858e6ca, 0x3f207f43, 0xe778ea25, 0x00000000,
|
||||
0xd6dd1f95, 0x1dd16eda, 0x67353ee1, 0x00000000};
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
#define PACK_S0(s0, s1, t1)\
|
||||
s0 = _mm_castps_si128(_mm_insert_ps(_mm_castsi128_ps(s0), _mm_castsi128_ps(s1), 0x30))
|
||||
|
||||
#define UNPACK_S0(s0, s1, t1)\
|
||||
s1 = _mm_castps_si128(_mm_insert_ps(_mm_castsi128_ps(s1), _mm_castsi128_ps(s0), 0xc0));\
|
||||
s0 = mm128_mask_32( s0, 8 )
|
||||
|
||||
#define CMIX(s1, s2, r1, r2, t1, t2)\
|
||||
t1 = s1;\
|
||||
t1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(t1), _mm_castsi128_ps(s2), _MM_SHUFFLE(3, 0, 2, 1)));\
|
||||
r1 = _mm_xor_si128(r1, t1);\
|
||||
r2 = _mm_xor_si128(r2, t1);
|
||||
|
||||
#else // SSE2
|
||||
|
||||
#define PACK_S0(s0, s1, t1)\
|
||||
t1 = _mm_shuffle_epi32(s1, _MM_SHUFFLE(0, 3, 3, 3));\
|
||||
s0 = _mm_xor_si128(s0, t1);
|
||||
|
||||
#define UNPACK_S0(s0, s1, t1)\
|
||||
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 3, 3));\
|
||||
s1 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s1), _mm_castsi128_ps(t1)));\
|
||||
s0 = mm128_mask_32( s0, 8 )
|
||||
|
||||
#define CMIX(s1, s2, r1, r2, t1, t2)\
|
||||
t1 = _mm_shuffle_epi32(s1, 0xf9);\
|
||||
t2 = _mm_shuffle_epi32(s2, 0xcf);\
|
||||
t1 = _mm_xor_si128(t1, t2);\
|
||||
r1 = _mm_xor_si128(r1, t1);\
|
||||
r2 = _mm_xor_si128(r2, t1)
|
||||
|
||||
#endif
|
||||
|
||||
#define TIX256(msg, s10, s8, s24, s0, t1, t2, t3)\
|
||||
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s10 = _mm_xor_si128(s10, t1);\
|
||||
t1 = _mm_castps_si128(_mm_load_ss((float*)msg));\
|
||||
s0 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s0), _mm_castsi128_ps(t1)));\
|
||||
t1 = _mm_slli_si128(t1, 8);\
|
||||
s8 = _mm_xor_si128(s8, t1);\
|
||||
t1 = _mm_shuffle_epi32(s24, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s0 = _mm_xor_si128(s0, t1)
|
||||
|
||||
|
||||
#define TIX384(msg, s16, s8, s27, s30, s0, s4, t1, t2, t3)\
|
||||
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s16 = _mm_xor_si128(s16, t1);\
|
||||
t1 = _mm_castps_si128(_mm_load_ss((float*)msg));\
|
||||
s0 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s0), _mm_castsi128_ps(t1)));\
|
||||
t1 = _mm_slli_si128(t1, 8);\
|
||||
s8 = _mm_xor_si128(s8, t1);\
|
||||
t1 = _mm_shuffle_epi32(s27, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s0 = _mm_xor_si128(s0, t1);\
|
||||
t1 = _mm_shuffle_epi32(s30, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s4 = _mm_xor_si128(s4, t1)
|
||||
|
||||
#define TIX512(msg, s22, s8, s24, s27, s30, s0, s4, s7, t1, t2, t3)\
|
||||
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s22 = _mm_xor_si128(s22, t1);\
|
||||
t1 = _mm_castps_si128(_mm_load_ss((float*)msg));\
|
||||
s0 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s0), _mm_castsi128_ps(t1)));\
|
||||
t1 = _mm_slli_si128(t1, 8);\
|
||||
s8 = _mm_xor_si128(s8, t1);\
|
||||
t1 = _mm_shuffle_epi32(s24, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s0 = _mm_xor_si128(s0, t1);\
|
||||
t1 = _mm_shuffle_epi32(s27, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s4 = _mm_xor_si128(s4, t1);\
|
||||
t1 = _mm_shuffle_epi32(s30, _MM_SHUFFLE(3, 3, 0, 3));\
|
||||
s7 = _mm_xor_si128(s7, t1)
|
||||
|
||||
#define PRESUPERMIX(t0, t1, t2, t3, t4)\
|
||||
t2 = t0;\
|
||||
t3 = _mm_add_epi8(t0, t0);\
|
||||
t4 = _mm_add_epi8(t3, t3);\
|
||||
t1 = _mm_srli_epi16(t0, 6);\
|
||||
t1 = _mm_and_si128(t1, M128(_lsbmask2));\
|
||||
t3 = _mm_xor_si128(t3, _mm_shuffle_epi8(M128(_mul2mask), t1));\
|
||||
t0 = _mm_xor_si128(t4, _mm_shuffle_epi8(M128(_mul4mask), t1))
|
||||
|
||||
/*
|
||||
#define PRESUPERMIX(x, t1, s1, s2, t2)\
|
||||
s1 = x;\
|
||||
s2 = _mm_add_epi8(x, x);\
|
||||
t2 = _mm_add_epi8(s2, s2);\
|
||||
t1 = _mm_srli_epi16(x, 6);\
|
||||
t1 = _mm_and_si128(t1, M128(_lsbmask2));\
|
||||
s2 = _mm_xor_si128(s2, _mm_shuffle_epi8(M128(_mul2mask), t1));\
|
||||
x = _mm_xor_si128(t2, _mm_shuffle_epi8(M128(_mul4mask), t1))
|
||||
*/
|
||||
|
||||
#define SUBSTITUTE(r0, _t2 )\
|
||||
_t2 = _mm_shuffle_epi8(r0, M128(_inv_shift_rows));\
|
||||
_t2 = _mm_aesenclast_si128( _t2, m128_zero )
|
||||
|
||||
#define SUPERMIX(t0, t1, t2, t3, t4)\
|
||||
t2 = t0;\
|
||||
t3 = _mm_add_epi8(t0, t0);\
|
||||
t4 = _mm_add_epi8(t3, t3);\
|
||||
t1 = _mm_srli_epi16(t0, 6);\
|
||||
t1 = _mm_and_si128(t1, M128(_lsbmask2));\
|
||||
t0 = _mm_xor_si128(t4, _mm_shuffle_epi8(M128(_mul4mask), t1)); \
|
||||
t4 = _mm_shuffle_epi8(t2, M128(_supermix1b));\
|
||||
t3 = _mm_xor_si128(t3, _mm_shuffle_epi8(M128(_mul2mask), t1));\
|
||||
t1 = _mm_shuffle_epi8(t4, M128(_supermix1c));\
|
||||
t4 = _mm_xor_si128(t4, t1);\
|
||||
t1 = _mm_shuffle_epi8(t4, M128(_supermix1d));\
|
||||
t4 = _mm_xor_si128(t4, t1);\
|
||||
t1 = _mm_shuffle_epi8(t2, M128(_supermix1a));\
|
||||
t2 = mm128_xor3(t2, t3, t0 );\
|
||||
t2 = _mm_shuffle_epi8(t2, M128(_supermix7a));\
|
||||
t4 = mm128_xor3( t4, t1, t2 ); \
|
||||
t2 = _mm_shuffle_epi8(t2, M128(_supermix7b));\
|
||||
t3 = _mm_shuffle_epi8(t3, M128(_supermix2a));\
|
||||
t1 = _mm_shuffle_epi8(t0, M128(_supermix4a));\
|
||||
t0 = _mm_shuffle_epi8(t0, M128(_supermix4b));\
|
||||
t4 = mm128_xor3( t4, t2, t1 ); \
|
||||
t0 = _mm_xor_si128(t0, t3);\
|
||||
t4 = mm128_xor3(t4, t0, _mm_shuffle_epi8(t0, M128(_supermix4c)));
|
||||
|
||||
/*
|
||||
#define SUPERMIX(t0, t1, t2, t3, t4)\
|
||||
PRESUPERMIX(t0, t1, t2, t3, t4);\
|
||||
POSTSUPERMIX(t0, t1, t2, t3, t4)
|
||||
*/
|
||||
|
||||
#define POSTSUPERMIX(t0, t1, t2, t3, t4)\
|
||||
t1 = _mm_shuffle_epi8(t2, M128(_supermix1b));\
|
||||
t4 = t1;\
|
||||
t1 = _mm_shuffle_epi8(t1, M128(_supermix1c));\
|
||||
t4 = _mm_xor_si128(t4, t1);\
|
||||
t1 = _mm_shuffle_epi8(t4, M128(_supermix1d));\
|
||||
t4 = _mm_xor_si128(t4, t1);\
|
||||
t1 = _mm_shuffle_epi8(t2, M128(_supermix1a));\
|
||||
t4 = _mm_xor_si128(t4, t1);\
|
||||
t2 = mm128_xor3(t2, t3, t0 );\
|
||||
t2 = _mm_shuffle_epi8(t2, M128(_supermix7a));\
|
||||
t4 = _mm_xor_si128(t4, t2);\
|
||||
t2 = _mm_shuffle_epi8(t2, M128(_supermix7b));\
|
||||
t4 = _mm_xor_si128(t4, t2);\
|
||||
t3 = _mm_shuffle_epi8(t3, M128(_supermix2a));\
|
||||
t1 = _mm_shuffle_epi8(t0, M128(_supermix4a));\
|
||||
t4 = _mm_xor_si128(t4, t1);\
|
||||
t0 = _mm_shuffle_epi8(t0, M128(_supermix4b));\
|
||||
t0 = _mm_xor_si128(t0, t3);\
|
||||
t4 = _mm_xor_si128(t4, t0);\
|
||||
t0 = _mm_shuffle_epi8(t0, M128(_supermix4c));\
|
||||
t4 = _mm_xor_si128(t4, t0)
|
||||
|
||||
#define SUBROUND512_3(r1a, r1b, r1c, r1d, r2a, r2b, r2c, r2d, r3a, r3b, r3c, r3d)\
|
||||
CMIX(r1a, r1b, r1c, r1d, _t0, _t1);\
|
||||
PACK_S0(r1c, r1a, _t0);\
|
||||
SUBSTITUTE(r1c, _t2 );\
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r1c);\
|
||||
_t0 = _mm_shuffle_epi32(r1c, 0x39);\
|
||||
r2c = _mm_xor_si128(r2c, _t0);\
|
||||
_t0 = mm128_mask_32( _t0, 8 ); \
|
||||
r2d = _mm_xor_si128(r2d, _t0);\
|
||||
UNPACK_S0(r1c, r1a, _t3);\
|
||||
SUBSTITUTE(r2c, _t2 );\
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r2c);\
|
||||
_t0 = _mm_shuffle_epi32(r2c, 0x39);\
|
||||
r3c = _mm_xor_si128(r3c, _t0);\
|
||||
_t0 = mm128_mask_32( _t0, 8 ); \
|
||||
r3d = _mm_xor_si128(r3d, _t0);\
|
||||
UNPACK_S0(r2c, r2a, _t3);\
|
||||
SUBSTITUTE(r3c, _t2 );\
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r3c);\
|
||||
UNPACK_S0(r3c, r3a, _t3)
|
||||
|
||||
#define SUBROUND512_4(r1a, r1b, r1c, r1d, r2a, r2b, r2c, r2d, r3a, r3b, r3c, r3d, r4a, r4b, r4c, r4d)\
|
||||
CMIX(r1a, r1b, r1c, r1d, _t0, _t1);\
|
||||
PACK_S0(r1c, r1a, _t0);\
|
||||
SUBSTITUTE( r1c, _t2 );\
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r1c);\
|
||||
_t0 = _mm_shuffle_epi32(r1c, 0x39);\
|
||||
r2c = _mm_xor_si128(r2c, _t0);\
|
||||
_t0 = mm128_mask_32( _t0, 8 ); \
|
||||
r2d = _mm_xor_si128(r2d, _t0);\
|
||||
UNPACK_S0(r1c, r1a, _t3);\
|
||||
SUBSTITUTE(r2c, _t2 );\
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r2c);\
|
||||
_t0 = _mm_shuffle_epi32(r2c, 0x39);\
|
||||
r3c = _mm_xor_si128(r3c, _t0);\
|
||||
_t0 = mm128_mask_32( _t0, 8 ); \
|
||||
r3d = _mm_xor_si128(r3d, _t0);\
|
||||
UNPACK_S0(r2c, r2a, _t3);\
|
||||
SUBSTITUTE( r3c, _t2 );\
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r3c);\
|
||||
_t0 = _mm_shuffle_epi32(r3c, 0x39);\
|
||||
r4c = _mm_xor_si128(r4c, _t0);\
|
||||
_t0 = mm128_mask_32( _t0, 8 ); \
|
||||
r4d = _mm_xor_si128(r4d, _t0);\
|
||||
UNPACK_S0(r3c, r3a, _t3);\
|
||||
SUBSTITUTE( r4c, _t2 );\
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r4c);\
|
||||
UNPACK_S0(r4c, r4a, _t3)
|
||||
|
||||
#define LOADCOLUMN(x, s, a)\
|
||||
block[0] = col[(base + a + 0) % s];\
|
||||
block[1] = col[(base + a + 1) % s];\
|
||||
block[2] = col[(base + a + 2) % s];\
|
||||
block[3] = col[(base + a + 3) % s];\
|
||||
x = _mm_load_si128((__m128i*)block)
|
||||
|
||||
#define STORECOLUMN(x, s)\
|
||||
_mm_store_si128((__m128i*)block, x);\
|
||||
col[(base + 0) % s] = block[0];\
|
||||
col[(base + 1) % s] = block[1];\
|
||||
col[(base + 2) % s] = block[2];\
|
||||
col[(base + 3) % s] = block[3]
|
||||
|
||||
void Compress512(hashState_fugue *ctx, const unsigned char *pmsg, unsigned int uBlockCount)
|
||||
{
|
||||
__m128i _t0, _t1, _t2, _t3;
|
||||
|
||||
switch(ctx->base)
|
||||
{
|
||||
case 1:
|
||||
TIX512( pmsg, ctx->state[3], ctx->state[10], ctx->state[4],
|
||||
ctx->state[5], ctx->state[ 6], ctx->state[8],
|
||||
ctx->state[9], ctx->state[10], _t0, _t1, _t2 );
|
||||
|
||||
SUBROUND512_4( ctx->state[8], ctx->state[9], ctx->state[7],
|
||||
ctx->state[1], ctx->state[7], ctx->state[8],
|
||||
ctx->state[6], ctx->state[0], ctx->state[6],
|
||||
ctx->state[7], ctx->state[5], ctx->state[11],
|
||||
ctx->state[5], ctx->state[6], ctx->state[4],
|
||||
ctx->state[10] );
|
||||
ctx->base++;
|
||||
pmsg += 4;
|
||||
uBlockCount--;
|
||||
if( uBlockCount == 0 ) break;
|
||||
|
||||
case 2:
|
||||
TIX512( pmsg, ctx->state[11], ctx->state[6], ctx->state[0],
|
||||
ctx->state[ 1], ctx->state[2], ctx->state[4],
|
||||
ctx->state[ 5], ctx->state[6], _t0, _t1, _t2);
|
||||
|
||||
SUBROUND512_4( ctx->state[4], ctx->state[5], ctx->state[3],
|
||||
ctx->state[9], ctx->state[3], ctx->state[4],
|
||||
ctx->state[2], ctx->state[8], ctx->state[2],
|
||||
ctx->state[3], ctx->state[1], ctx->state[7],
|
||||
ctx->state[1], ctx->state[2], ctx->state[0],
|
||||
ctx->state[6]);
|
||||
|
||||
ctx->base = 0;
|
||||
pmsg += 4;
|
||||
uBlockCount--;
|
||||
break;
|
||||
}
|
||||
|
||||
while( uBlockCount > 0 )
|
||||
{
|
||||
TIX512( pmsg, ctx->state[ 7],ctx->state[2],ctx->state[8],ctx->state[9],
|
||||
ctx->state[10],ctx->state[0],ctx->state[1],ctx->state[2],
|
||||
_t0, _t1, _t2 );
|
||||
SUBROUND512_4( ctx->state[0], ctx->state[1],ctx->state[11],ctx->state[5],
|
||||
ctx->state[11],ctx->state[0],ctx->state[10],ctx->state[4],
|
||||
ctx->state[10],ctx->state[11],ctx->state[9],ctx->state[3],
|
||||
ctx->state[9],ctx->state[10],ctx->state[8],ctx->state[2] );
|
||||
|
||||
ctx->base++;
|
||||
pmsg += 4;
|
||||
uBlockCount--;
|
||||
if( uBlockCount == 0 ) break;
|
||||
|
||||
TIX512( pmsg, ctx->state[3],ctx->state[10],ctx->state[4],ctx->state[5],
|
||||
ctx->state[6],ctx->state[8], ctx->state[9],ctx->state[10],
|
||||
_t0, _t1, _t2 );
|
||||
|
||||
SUBROUND512_4( ctx->state[8],ctx->state[9],ctx->state[7],ctx->state[1],
|
||||
ctx->state[7],ctx->state[8],ctx->state[6],ctx->state[0],
|
||||
ctx->state[6],ctx->state[7],ctx->state[5],ctx->state[11],
|
||||
ctx->state[5],ctx->state[6],ctx->state[4],ctx->state[10] );
|
||||
|
||||
ctx->base++;
|
||||
pmsg += 4;
|
||||
uBlockCount--;
|
||||
if( uBlockCount == 0 ) break;
|
||||
|
||||
TIX512( pmsg, ctx->state[11],ctx->state[6],ctx->state[0],ctx->state[1],
|
||||
ctx->state[2], ctx->state[4],ctx->state[5],ctx->state[6],
|
||||
_t0, _t1, _t2);
|
||||
SUBROUND512_4( ctx->state[4],ctx->state[5],ctx->state[3],ctx->state[9],
|
||||
ctx->state[3],ctx->state[4],ctx->state[2],ctx->state[8],
|
||||
ctx->state[2],ctx->state[3],ctx->state[1],ctx->state[7],
|
||||
ctx->state[1],ctx->state[2],ctx->state[0],ctx->state[6]);
|
||||
|
||||
ctx->base = 0;
|
||||
pmsg += 4;
|
||||
uBlockCount--;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void Final512(hashState_fugue *ctx, BitSequence *hashval)
|
||||
{
|
||||
unsigned int block[4] __attribute__ ((aligned (32)));
|
||||
unsigned int col[36] __attribute__ ((aligned (16)));
|
||||
unsigned int i, base;
|
||||
__m128i r0, _t0, _t1, _t2, _t3;
|
||||
|
||||
for(i = 0; i < 12; i++)
|
||||
{
|
||||
_mm_store_si128((__m128i*)block, ctx->state[i]);
|
||||
|
||||
col[3 * i + 0] = block[0];
|
||||
col[3 * i + 1] = block[1];
|
||||
col[3 * i + 2] = block[2];
|
||||
}
|
||||
|
||||
base = (36 - (12 * ctx->base)) % 36;
|
||||
|
||||
for(i = 0; i < 32; i++)
|
||||
{
|
||||
// ROR3
|
||||
base = (base + 33) % 36;
|
||||
|
||||
// CMIX
|
||||
col[(base + 0) % 36] ^= col[(base + 4) % 36];
|
||||
col[(base + 1) % 36] ^= col[(base + 5) % 36];
|
||||
col[(base + 2) % 36] ^= col[(base + 6) % 36];
|
||||
col[(base + 18) % 36] ^= col[(base + 4) % 36];
|
||||
col[(base + 19) % 36] ^= col[(base + 5) % 36];
|
||||
col[(base + 20) % 36] ^= col[(base + 6) % 36];
|
||||
|
||||
// SMIX
|
||||
LOADCOLUMN(r0, 36, 0);
|
||||
SUBSTITUTE(r0, _t2);
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r0);
|
||||
STORECOLUMN(r0, 36);
|
||||
}
|
||||
|
||||
for(i = 0; i < 13; i++)
|
||||
{
|
||||
// S4 += S0; S9 += S0; S18 += S0; S27 += S0;
|
||||
col[(base + 4) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 9) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 18) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 27) % 36] ^= col[(base + 0) % 36];
|
||||
|
||||
// ROR9
|
||||
base = (base + 27) % 36;
|
||||
|
||||
// SMIX
|
||||
LOADCOLUMN(r0, 36, 0);
|
||||
SUBSTITUTE(r0, _t2);
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r0);
|
||||
STORECOLUMN(r0, 36);
|
||||
|
||||
// S4 += S0; S10 += S0; S18 += S0; S27 += S0;
|
||||
col[(base + 4) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 10) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 18) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 27) % 36] ^= col[(base + 0) % 36];
|
||||
|
||||
// ROR9
|
||||
base = (base + 27) % 36;
|
||||
|
||||
// SMIX
|
||||
LOADCOLUMN(r0, 36, 0);
|
||||
SUBSTITUTE(r0, _t2);
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r0);
|
||||
STORECOLUMN(r0, 36);
|
||||
|
||||
// S4 += S0; S10 += S0; S19 += S0; S27 += S0;
|
||||
col[(base + 4) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 10) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 19) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 27) % 36] ^= col[(base + 0) % 36];
|
||||
|
||||
// ROR9
|
||||
base = (base + 27) % 36;
|
||||
|
||||
// SMIX
|
||||
LOADCOLUMN(r0, 36, 0);
|
||||
SUBSTITUTE(r0, _t2);
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r0);
|
||||
STORECOLUMN(r0, 36);
|
||||
|
||||
// S4 += S0; S10 += S0; S19 += S0; S28 += S0;
|
||||
col[(base + 4) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 10) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 19) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 28) % 36] ^= col[(base + 0) % 36];
|
||||
|
||||
// ROR8
|
||||
base = (base + 28) % 36;
|
||||
|
||||
// SMIX
|
||||
LOADCOLUMN(r0, 36, 0);
|
||||
SUBSTITUTE(r0, _t2);
|
||||
SUPERMIX(_t2, _t3, _t0, _t1, r0);
|
||||
STORECOLUMN(r0, 36);
|
||||
}
|
||||
|
||||
// S4 += S0; S9 += S0; S18 += S0; S27 += S0;
|
||||
col[(base + 4) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 9) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 18) % 36] ^= col[(base + 0) % 36];
|
||||
col[(base + 27) % 36] ^= col[(base + 0) % 36];
|
||||
|
||||
// Transform to the standard basis and store output; S1 || S2 || S3 || S4
|
||||
LOADCOLUMN(r0, 36, 1);
|
||||
_mm_store_si128((__m128i*)hashval, r0);
|
||||
|
||||
// Transform to the standard basis and store output; S9 || S10 || S11 || S12
|
||||
LOADCOLUMN(r0, 36, 9);
|
||||
_mm_store_si128((__m128i*)hashval + 1, r0);
|
||||
|
||||
// Transform to the standard basis and store output; S18 || S19 || S20 || S21
|
||||
LOADCOLUMN(r0, 36, 18);
|
||||
_mm_store_si128((__m128i*)hashval + 2, r0);
|
||||
|
||||
// Transform to the standard basis and store output; S27 || S28 || S29 || S30
|
||||
LOADCOLUMN(r0, 36, 27);
|
||||
_mm_store_si128((__m128i*)hashval + 3, r0);
|
||||
}
|
||||
|
||||
HashReturn fugue512_Init(hashState_fugue *ctx, int nHashSize)
|
||||
{
|
||||
int i;
|
||||
ctx->processed_bits = 0;
|
||||
ctx->uBufferBytes = 0;
|
||||
ctx->base = 0;
|
||||
|
||||
|
||||
ctx->uHashSize = 512;
|
||||
ctx->uBlockLength = 4;
|
||||
|
||||
for(i = 0; i < 6; i++)
|
||||
ctx->state[i] = m128_zero;
|
||||
|
||||
ctx->state[6] = _mm_load_si128((__m128i*)_IV512 + 0);
|
||||
ctx->state[7] = _mm_load_si128((__m128i*)_IV512 + 1);
|
||||
ctx->state[8] = _mm_load_si128((__m128i*)_IV512 + 2);
|
||||
ctx->state[9] = _mm_load_si128((__m128i*)_IV512 + 3);
|
||||
ctx->state[10] = _mm_load_si128((__m128i*)_IV512 + 4);
|
||||
ctx->state[11] = _mm_load_si128((__m128i*)_IV512 + 5);
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
HashReturn fugue512_Update(hashState_fugue *state, const void *data, DataLength databitlen)
|
||||
{
|
||||
unsigned int uByteLength, uBlockCount, uRemainingBytes;
|
||||
|
||||
uByteLength = (unsigned int)(databitlen / 8);
|
||||
|
||||
if(state->uBufferBytes + uByteLength >= state->uBlockLength)
|
||||
{
|
||||
if(state->uBufferBytes != 0)
|
||||
{
|
||||
// Fill the buffer
|
||||
memcpy(state->buffer + state->uBufferBytes, (void*)data, state->uBlockLength - state->uBufferBytes);
|
||||
|
||||
// Process the buffer
|
||||
Compress512(state, state->buffer, 1);
|
||||
|
||||
state->processed_bits += state->uBlockLength * 8;
|
||||
data += state->uBlockLength - state->uBufferBytes;
|
||||
uByteLength -= state->uBlockLength - state->uBufferBytes;
|
||||
}
|
||||
|
||||
// buffer now does not contain any unprocessed bytes
|
||||
|
||||
uBlockCount = uByteLength / state->uBlockLength;
|
||||
uRemainingBytes = uByteLength % state->uBlockLength;
|
||||
|
||||
if(uBlockCount > 0)
|
||||
{
|
||||
Compress512(state, data, uBlockCount);
|
||||
|
||||
state->processed_bits += uBlockCount * state->uBlockLength * 8;
|
||||
data += uBlockCount * state->uBlockLength;
|
||||
}
|
||||
|
||||
if(uRemainingBytes > 0)
|
||||
{
|
||||
memcpy(state->buffer, (void*)data, uRemainingBytes);
|
||||
}
|
||||
|
||||
state->uBufferBytes = uRemainingBytes;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(state->buffer + state->uBufferBytes, (void*)data, uByteLength);
|
||||
state->uBufferBytes += uByteLength;
|
||||
}
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
HashReturn fugue512_Final(hashState_fugue *state, void *hashval)
|
||||
{
|
||||
unsigned int i;
|
||||
BitSequence lengthbuf[8] __attribute__((aligned(64)));
|
||||
|
||||
// Update message bit count
|
||||
state->processed_bits += state->uBufferBytes * 8;
|
||||
|
||||
// Pad the remaining buffer bytes with zero
|
||||
if(state->uBufferBytes != 0)
|
||||
{
|
||||
if ( state->uBufferBytes != state->uBlockLength)
|
||||
memset(state->buffer + state->uBufferBytes, 0, state->uBlockLength - state->uBufferBytes);
|
||||
|
||||
Compress512(state, state->buffer, 1);
|
||||
}
|
||||
|
||||
// Last two blocks are message length in bits
|
||||
for(i = 0; i < 8; i++)
|
||||
lengthbuf[i] = ((state->processed_bits) >> (8 * (7 - i))) & 0xff;
|
||||
|
||||
// Process the last two blocks
|
||||
Compress512(state, lengthbuf, 2);
|
||||
|
||||
// Finalization
|
||||
Final512(state, hashval);
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
HashReturn fugue512_full(hashState_fugue *hs, void *hashval, const void *data, DataLength databitlen)
|
||||
{
|
||||
fugue512_Init(hs, 512);
|
||||
fugue512_Update(hs, data, databitlen*8);
|
||||
fugue512_Final(hs, hashval);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
#endif // AES
|
61
algo/fugue/fugue-aesni.h
Normal file
61
algo/fugue/fugue-aesni.h
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* file : hash_api.h
|
||||
* version : 1.0.208
|
||||
* date : 14.12.2010
|
||||
*
|
||||
* Fugue vperm implementation Hash API
|
||||
*
|
||||
* Cagdas Calik
|
||||
* ccalik@metu.edu.tr
|
||||
* Institute of Applied Mathematics, Middle East Technical University, Turkey.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef FUGUE_HASH_API_H
|
||||
#define FUGUE_HASH_API_H
|
||||
|
||||
#if defined(__AES__)
|
||||
|
||||
#if !defined(__SSE4_1__)
|
||||
#error "Unsupported configuration, AES needs SSE4.1. Compile without AES."
|
||||
#endif
|
||||
|
||||
#include "algo/sha/sha3_common.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__m128i state[12];
|
||||
unsigned int base;
|
||||
|
||||
unsigned int uHashSize;
|
||||
unsigned int uBlockLength;
|
||||
unsigned int uBufferBytes;
|
||||
DataLength processed_bits;
|
||||
BitSequence buffer[4];
|
||||
|
||||
} hashState_fugue __attribute__ ((aligned (64)));
|
||||
|
||||
|
||||
// These functions are deprecated, use the lower case macro aliases that use
|
||||
// the standard interface. This will be cleaned up at a later date.
|
||||
HashReturn fugue512_Init(hashState_fugue *state, int hashbitlen);
|
||||
|
||||
HashReturn fugue512_Update(hashState_fugue *state, const void *data, DataLength databitlen);
|
||||
|
||||
HashReturn fugue512_Final(hashState_fugue *state, void *hashval);
|
||||
|
||||
#define fugue512_init( state ) \
|
||||
fugue512_Init( state, 512 )
|
||||
#define fugue512_update( state, data, len ) \
|
||||
fugue512_Update( state, data, (len)<<3 )
|
||||
#define fugue512_final \
|
||||
fugue512_Final
|
||||
|
||||
|
||||
HashReturn fugue512_full(hashState_fugue *hs, void *hashval, const void *data, DataLength databitlen);
|
||||
|
||||
#endif // AES
|
||||
#endif // HASH_API_H
|
||||
|
@@ -67,11 +67,9 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2(i, j, k){\
|
||||
j = _mm_xor_si128(j, j);\
|
||||
j = _mm_cmpgt_epi8(j, i);\
|
||||
j = _mm_cmpgt_epi8( m128_zero, i);\
|
||||
i = _mm_add_epi8(i, i);\
|
||||
j = _mm_and_si128(j, k);\
|
||||
i = _mm_xor_si128(i, j);\
|
||||
i = mm128_xorand(i, j, k );\
|
||||
}
|
||||
|
||||
/**/
|
||||
@@ -93,6 +91,96 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
We almost fit into 16 registers, need only 3 spills to memory.
|
||||
This implementation costs 7.7 c/b giving total speed on SNB: 10.7c/b.
|
||||
K. Matusiewicz, 2011/05/29 */
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
#define MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
b1 = a3;\
|
||||
TEMP2 = _mm_xor_si128(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
TEMP0 = mm128_xor3( b0, a4, a6 ); \
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP1 = mm128_xor3( b1, a5, a7 );\
|
||||
b2 = mm128_xor3( b2, a6, a0 ); \
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b3 = mm128_xor3( b3, a7, a1 ); \
|
||||
b1 = a1;\
|
||||
b6 = mm128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = mm128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = mm128_xor3( b7, a5, a3 ); \
|
||||
b5 = mm128_xor3( b5, a1, a3 ); \
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(TEMP2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m128_const1_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#else
|
||||
|
||||
#define MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
@@ -189,6 +277,8 @@ static const __m128i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003 };
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* one round
|
||||
* a0-a7 = input rows
|
||||
|
@@ -58,11 +58,9 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2(i, j, k){\
|
||||
j = _mm_xor_si128(j, j);\
|
||||
j = _mm_cmpgt_epi8(j, i);\
|
||||
j = _mm_cmpgt_epi8( m128_zero, i);\
|
||||
i = _mm_add_epi8(i, i);\
|
||||
j = _mm_and_si128(j, k);\
|
||||
i = _mm_xor_si128(i, j);\
|
||||
i = mm128_xorand(i, j, k );\
|
||||
}
|
||||
|
||||
/* Yet another implementation of MixBytes.
|
||||
@@ -82,6 +80,96 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
We almost fit into 16 registers, need only 3 spills to memory.
|
||||
This implementation costs 7.7 c/b giving total speed on SNB: 10.7c/b.
|
||||
K. Matusiewicz, 2011/05/29 */
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
#define MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm_xor_si128(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm_xor_si128(a1, a2);\
|
||||
b1 = a3;\
|
||||
TEMP2 = _mm_xor_si128(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm_xor_si128(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm_xor_si128(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm_xor_si128(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm_xor_si128(a6, a7);\
|
||||
a7 = _mm_xor_si128(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
TEMP0 = mm128_xor3( b0, a4, a6 ); \
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP1 = mm128_xor3( b1, a5, a7 );\
|
||||
b2 = mm128_xor3( b2, a6, a0 ); \
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b3 = mm128_xor3( b3, a7, a1 ); \
|
||||
b1 = a1;\
|
||||
b6 = mm128_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = mm128_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = mm128_xor3( b7, a5, a3 ); \
|
||||
b5 = mm128_xor3( b5, a1, a3 ); \
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm_xor_si128(a0, a3);\
|
||||
a1 = _mm_xor_si128(a1, a4);\
|
||||
a2 = _mm_xor_si128(TEMP2, a5);\
|
||||
a3 = _mm_xor_si128(a3, a6);\
|
||||
a4 = _mm_xor_si128(a4, a7);\
|
||||
a5 = _mm_xor_si128(a5, b0);\
|
||||
a6 = _mm_xor_si128(a6, b1);\
|
||||
a7 = _mm_xor_si128(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m128_const1_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm_xor_si128(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm_xor_si128(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm_xor_si128(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm_xor_si128(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm_xor_si128(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm_xor_si128(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm_xor_si128(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm_xor_si128(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm_xor_si128(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm_xor_si128(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm_xor_si128(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm_xor_si128(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm_xor_si128(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm_xor_si128(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm_xor_si128(b0, a3);\
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#else
|
||||
|
||||
#define MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
@@ -178,6 +266,8 @@ static const __m128i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e };
|
||||
b1 = _mm_xor_si128(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#endif
|
||||
|
||||
/* one round
|
||||
* i = round number
|
||||
* a0-a7 = input rows
|
||||
|
@@ -43,7 +43,8 @@
|
||||
#define ROUNDS (ROUNDS1024)
|
||||
//#endif
|
||||
|
||||
#define ROTL64(a,n) ((((a)<<(n))|((a)>>(64-(n))))&li_64(ffffffffffffffff))
|
||||
//#define ROTL64(a,n) ((((a)<<(n))|((a)>>(64-(n))))&li_64(ffffffffffffffff))
|
||||
#define ROTL64(a,n) rol64( a, n )
|
||||
|
||||
#if (PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN)
|
||||
#define EXT_BYTE(var,n) ((u8)((u64)(var) >> (8*(7-(n)))))
|
||||
|
@@ -63,7 +63,8 @@ typedef crypto_uint64 u64;
|
||||
//#define ROUNDS (ROUNDS1024)
|
||||
//#endif
|
||||
|
||||
#define ROTL64(a,n) ((((a)<<(n))|((a)>>(64-(n))))&li_64(ffffffffffffffff))
|
||||
//#define ROTL64(a,n) ((((a)<<(n))|((a)>>(64-(n))))&li_64(ffffffffffffffff))
|
||||
#define ROTL64(a,n) rol64( a, n )
|
||||
|
||||
#if (PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN)
|
||||
#define EXT_BYTE(var,n) ((u8)((u64)(var) >> (8*(7-(n)))))
|
||||
|
@@ -53,7 +53,7 @@ int scanhash_groestl_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( hash+(lane<<3), ptarget) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, hash+(lane<<3), mythr, lane );
|
||||
submit_solution( work, hash+(lane<<3), mythr );
|
||||
}
|
||||
n += 4;
|
||||
} while ( ( n < last_nonce ) && !work_restart[thr_id].restart );
|
||||
|
@@ -15,7 +15,9 @@
|
||||
#include "miner.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
|
||||
int groestl256_4way_init( groestl256_4way_context* ctx, uint64_t hashlen )
|
||||
@@ -43,13 +45,13 @@ int groestl256_4way_init( groestl256_4way_context* ctx, uint64_t hashlen )
|
||||
}
|
||||
|
||||
int groestl256_4way_full( groestl256_4way_context* ctx, void* output,
|
||||
const void* input, uint64_t databitlen )
|
||||
const void* input, uint64_t datalen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = 32 / 16; // bytes to __m128i
|
||||
const int len = (int)datalen >> 4;
|
||||
const int hashlen_m128i = 32 >> 4; // bytes to __m128i
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE256;
|
||||
uint64_t blocks = len / SIZE256;
|
||||
__m512i* in = (__m512i*)input;
|
||||
int i;
|
||||
|
||||
@@ -87,21 +89,21 @@ int groestl256_4way_full( groestl256_4way_context* ctx, void* output,
|
||||
if ( i == SIZE256 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0x80 );
|
||||
ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
|
||||
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = m512_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0 );
|
||||
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
// digest final padding block and do output transform
|
||||
TF512_4way( ctx->chaining, ctx->buffer );
|
||||
|
||||
OF512_4way( ctx->chaining );
|
||||
@@ -120,7 +122,7 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE256;
|
||||
uint64_t blocks = len / SIZE256;
|
||||
__m512i* in = (__m512i*)input;
|
||||
int i;
|
||||
|
||||
@@ -144,20 +146,18 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
|
||||
if ( i == SIZE256 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
|
||||
ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
|
||||
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = m512_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
|
||||
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
@@ -172,5 +172,159 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
#endif // AVX512
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
int groestl256_2way_init( groestl256_2way_context* ctx, uint64_t hashlen )
|
||||
{
|
||||
int i;
|
||||
|
||||
ctx->hashlen = hashlen;
|
||||
|
||||
if (ctx->chaining == NULL || ctx->buffer == NULL)
|
||||
return 1;
|
||||
|
||||
for ( i = 0; i < SIZE256; i++ )
|
||||
{
|
||||
ctx->chaining[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_zero;
|
||||
}
|
||||
|
||||
// The only non-zero in the IV is len. It can be hard coded.
|
||||
ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 );
|
||||
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl256_2way_full( groestl256_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t datalen )
|
||||
{
|
||||
const int len = (int)datalen >> 4;
|
||||
const int hashlen_m128i = 32 >> 4; // bytes to __m128i
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
uint64_t blocks = len / SIZE256;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
if (ctx->chaining == NULL || ctx->buffer == NULL)
|
||||
return 1;
|
||||
|
||||
for ( i = 0; i < SIZE256; i++ )
|
||||
{
|
||||
ctx->chaining[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_zero;
|
||||
}
|
||||
|
||||
// The only non-zero in the IV is len. It can be hard coded.
|
||||
ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 );
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
// --- update ---
|
||||
|
||||
// digest any full blocks, process directly from input
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF512_2way( ctx->chaining, &in[ i * SIZE256 ] );
|
||||
ctx->buf_ptr = blocks * SIZE256;
|
||||
|
||||
// copy any remaining data to buffer, it may already contain data
|
||||
// from a previous update for a midstate precalc
|
||||
for ( i = 0; i < len % SIZE256; i++ )
|
||||
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += rem; // use i as rem_ptr in final
|
||||
|
||||
//--- final ---
|
||||
|
||||
blocks++; // adjust for final block
|
||||
|
||||
if ( i == SIZE256 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
TF512_2way( ctx->chaining, ctx->buffer );
|
||||
|
||||
OF512_2way( ctx->chaining );
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t databitlen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
|
||||
const int hash_offset = SIZE256 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
uint64_t blocks = len / SIZE256;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
// --- update ---
|
||||
|
||||
// digest any full blocks, process directly from input
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF512_2way( ctx->chaining, &in[ i * SIZE256 ] );
|
||||
ctx->buf_ptr = blocks * SIZE256;
|
||||
|
||||
// copy any remaining data to buffer, it may already contain data
|
||||
// from a previous update for a midstate precalc
|
||||
for ( i = 0; i < len % SIZE256; i++ )
|
||||
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += rem; // use i as rem_ptr in final
|
||||
|
||||
//--- final ---
|
||||
|
||||
blocks++; // adjust for final block
|
||||
|
||||
if ( i == SIZE256 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
// add first padding
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
// add zero padding
|
||||
for ( i += 1; i < SIZE256 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
|
||||
// add length padding, second last byte is zero unless blocks > 255
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
// digest final padding block and do output transform
|
||||
TF512_2way( ctx->chaining, ctx->buffer );
|
||||
|
||||
OF512_2way( ctx->chaining );
|
||||
|
||||
// store hash result in output
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
|
@@ -18,8 +18,8 @@
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#define LENGTH (256)
|
||||
|
||||
//#include "brg_endian.h"
|
||||
@@ -48,6 +48,8 @@
|
||||
|
||||
#define SIZE256 (SIZE_512/16)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m512i chaining[SIZE256];
|
||||
__attribute__ ((aligned (64))) __m512i buffer[SIZE256];
|
||||
@@ -55,7 +57,7 @@ typedef struct {
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
int databitlen; // bits
|
||||
// int databitlen; // bits
|
||||
} groestl256_4way_context;
|
||||
|
||||
|
||||
@@ -74,5 +76,25 @@ int groestl256_4way_update_close( groestl256_4way_context*, void*,
|
||||
int groestl256_4way_full( groestl256_4way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif // AVX512
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m256i chaining[SIZE256];
|
||||
__attribute__ ((aligned (64))) __m256i buffer[SIZE256];
|
||||
int hashlen; // byte
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
// int databitlen; // bits
|
||||
} groestl256_2way_context;
|
||||
|
||||
int groestl256_2way_init( groestl256_2way_context*, uint64_t );
|
||||
|
||||
int groestl256_2way_update_close( groestl256_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
int groestl256_2way_full( groestl256_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL256_HASH_4WAY_H__
|
||||
|
@@ -7,13 +7,13 @@
|
||||
* This code is placed in the public domain
|
||||
*/
|
||||
|
||||
|
||||
#if !defined(GROESTL256_INTR_4WAY_H__)
|
||||
#define GROESTL256_INTR_4WAY_H__ 1
|
||||
|
||||
#include "groestl256-hash-4way.h"
|
||||
|
||||
#if defined(__VAES__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
static const __m128i round_const_l0[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
{ 0x7060504030201000, 0xffffffffffffffff },
|
||||
@@ -42,6 +42,8 @@ static const __m128i round_const_l7[] __attribute__ ((aligned (64))) =
|
||||
{ 0x0000000000000000, 0x8696a6b6c6d6e6f6 }
|
||||
};
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
static const __m512i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12,
|
||||
0x2d2529212c242820, 0x2f272b232e262a22,
|
||||
@@ -94,11 +96,9 @@ static const __m512i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e,
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2(i, j, k){\
|
||||
j = _mm512_xor_si512(j, j);\
|
||||
j = _mm512_movm_epi8( _mm512_cmpgt_epi8_mask(j, i) );\
|
||||
j = _mm512_movm_epi8( _mm512_cmpgt_epi8_mask( m512_zero, i) );\
|
||||
i = _mm512_add_epi8(i, i);\
|
||||
j = _mm512_and_si512(j, k);\
|
||||
i = _mm512_xor_si512(i, j);\
|
||||
i = mm512_xorand( i, j, k );\
|
||||
}
|
||||
|
||||
/* Yet another implementation of MixBytes.
|
||||
@@ -118,6 +118,95 @@ static const __m512i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e,
|
||||
We almost fit into 16 registers, need only 3 spills to memory.
|
||||
This implementation costs 7.7 c/b giving total speed on SNB: 10.7c/b.
|
||||
K. Matusiewicz, 2011/05/29 */
|
||||
|
||||
#define MixBytes( a0, a1, a2, a3, a4, a5, a6, a7, \
|
||||
b0, b1, b2, b3, b4, b5, b6, b7) { \
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0; \
|
||||
b7 = a1; \
|
||||
a0 = _mm512_xor_si512( a0, a1 ); \
|
||||
b0 = a2; \
|
||||
a1 = _mm512_xor_si512( a1, a2 ); \
|
||||
b1 = a3; \
|
||||
TEMP2 = _mm512_xor_si512( a2, a3 ); \
|
||||
b2 = a4; \
|
||||
a3 = _mm512_xor_si512( a3, a4 ); \
|
||||
b3 = a5; \
|
||||
a4 = _mm512_xor_si512( a4, a5 );\
|
||||
b4 = a6; \
|
||||
a5 = _mm512_xor_si512( a5, a6 ); \
|
||||
b5 = a7; \
|
||||
a6 = _mm512_xor_si512( a6, a7 ); \
|
||||
a7 = _mm512_xor_si512( a7, b6 ); \
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
TEMP0 = mm512_xor3( b0, a4, a6 ); \
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP1 = mm512_xor3( b1, a5, a7 ); \
|
||||
b2 = mm512_xor3( b2, a6, a0 ); \
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0; \
|
||||
b3 = mm512_xor3( b3, a7, a1 ); \
|
||||
b1 = a1; \
|
||||
b6 = mm512_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = mm512_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = mm512_xor3( b7, a5, a3 ); \
|
||||
b5 = mm512_xor3( b5, a1, a3 ); \
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm512_xor_si512( a0, a3 ); \
|
||||
a1 = _mm512_xor_si512( a1, a4 ); \
|
||||
a2 = _mm512_xor_si512( TEMP2, a5 ); \
|
||||
a3 = _mm512_xor_si512( a3, a6 ); \
|
||||
a4 = _mm512_xor_si512( a4, a7 ); \
|
||||
a5 = _mm512_xor_si512( a5, b0 ); \
|
||||
a6 = _mm512_xor_si512( a6, b1 ); \
|
||||
a7 = _mm512_xor_si512( a7, TEMP2 ); \
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m512_const1_64( 0x1b1b1b1b1b1b1b1b ); \
|
||||
MUL2( a0, b0, b1 ); \
|
||||
a0 = _mm512_xor_si512( a0, TEMP0 ); \
|
||||
MUL2( a1, b0, b1 ); \
|
||||
a1 = _mm512_xor_si512( a1, TEMP1 ); \
|
||||
MUL2( a2, b0, b1 ); \
|
||||
a2 = _mm512_xor_si512( a2, b2 ); \
|
||||
MUL2( a3, b0, b1 ); \
|
||||
a3 = _mm512_xor_si512( a3, b3 ); \
|
||||
MUL2( a4, b0, b1 ); \
|
||||
a4 = _mm512_xor_si512( a4, b4 ); \
|
||||
MUL2( a5, b0, b1 ); \
|
||||
a5 = _mm512_xor_si512( a5, b5 ); \
|
||||
MUL2( a6, b0, b1 ); \
|
||||
a6 = _mm512_xor_si512( a6, b6 ); \
|
||||
MUL2( a7, b0, b1 ); \
|
||||
a7 = _mm512_xor_si512( a7, b7 ); \
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2( a0, b0, b1 ); \
|
||||
b5 = _mm512_xor_si512( b5, a0 ); \
|
||||
MUL2( a1, b0, b1 ); \
|
||||
b6 = _mm512_xor_si512( b6, a1 ); \
|
||||
MUL2( a2, b0, b1 ); \
|
||||
b7 = _mm512_xor_si512( b7, a2 ); \
|
||||
MUL2( a5, b0, b1 ); \
|
||||
b2 = _mm512_xor_si512( b2, a5 ); \
|
||||
MUL2( a6, b0, b1 ); \
|
||||
b3 = _mm512_xor_si512( b3, a6 ); \
|
||||
MUL2( a7, b0, b1 ); \
|
||||
b4 = _mm512_xor_si512( b4, a7 ); \
|
||||
MUL2( a3, b0, b1 ); \
|
||||
MUL2( a4, b0, b1 ); \
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm512_xor_si512( b0, a3 ); \
|
||||
b1 = _mm512_xor_si512( b1, a4 ); \
|
||||
}/*MixBytes*/
|
||||
|
||||
|
||||
#if 0
|
||||
#define MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
@@ -213,7 +302,7 @@ static const __m512i SUBSH_MASK7 = { 0x090c000306080b07, 0x02050f0a0d01040e,
|
||||
b0 = _mm512_xor_si512(b0, a3);\
|
||||
b1 = _mm512_xor_si512(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#endif
|
||||
|
||||
#define ROUND(i, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* AddRoundConstant */\
|
||||
@@ -499,5 +588,398 @@ void OF512_4way( __m512i* chaining )
|
||||
chaining[3] = xmm11;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
static const __m256i TRANSP_MASK_2WAY =
|
||||
{ 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12 };
|
||||
|
||||
static const __m256i SUBSH_MASK0_2WAY =
|
||||
{ 0x0c0f0104070b0e00, 0x03060a0d08020509,
|
||||
0x1c1f1114171b1e10, 0x13161a1d18121519 };
|
||||
|
||||
static const __m256i SUBSH_MASK1_2WAY =
|
||||
{ 0x0e090205000d0801, 0x04070c0f0a03060b,
|
||||
0x1e191215101d1801, 0x14171c1f1a13161b };
|
||||
|
||||
static const __m256i SUBSH_MASK2_2WAY =
|
||||
{ 0x080b0306010f0a02, 0x05000e090c04070d,
|
||||
0x181b1316111f1a12, 0x15101e191c14171d };
|
||||
|
||||
static const __m256i SUBSH_MASK3_2WAY =
|
||||
{ 0x0a0d040702090c03, 0x0601080b0e05000f,
|
||||
0x1a1d141712191c13, 0x1611181b1e15101f };
|
||||
|
||||
static const __m256i SUBSH_MASK4_2WAY =
|
||||
{ 0x0b0e0500030a0d04, 0x0702090c0f060108,
|
||||
0x1b1e1510131a1d14, 0x1712191c1f161118 };
|
||||
|
||||
static const __m256i SUBSH_MASK5_2WAY =
|
||||
{ 0x0d080601040c0f05, 0x00030b0e0907020a,
|
||||
0x1d181611141c1f15, 0x10131b1e1917121a };
|
||||
|
||||
static const __m256i SUBSH_MASK6_2WAY =
|
||||
{ 0x0f0a0702050e0906, 0x01040d080b00030c,
|
||||
0x1f1a1712151e1916, 0x11141d181b10131c };
|
||||
|
||||
static const __m256i SUBSH_MASK7_2WAY =
|
||||
{ 0x090c000306080b07, 0x02050f0a0d01040e,
|
||||
0x191c101316181b17, 0x12151f1a1d11141e, };
|
||||
|
||||
#define tos(a) #a
|
||||
#define tostr(a) tos(a)
|
||||
|
||||
/* xmm[i] will be multiplied by 2
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2_2WAY(i, j, k){\
|
||||
j = _mm256_xor_si256(j, j);\
|
||||
j = _mm256_cmpgt_epi8(j, i );\
|
||||
i = _mm256_add_epi8(i, i);\
|
||||
j = _mm256_and_si256(j, k);\
|
||||
i = _mm256_xor_si256(i, j);\
|
||||
}
|
||||
|
||||
#define MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm256_xor_si256(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm256_xor_si256(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm256_xor_si256(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm256_xor_si256(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm256_xor_si256(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm256_xor_si256(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm256_xor_si256(a6, a7);\
|
||||
a7 = _mm256_xor_si256(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm256_xor_si256(b0, a4);\
|
||||
b6 = _mm256_xor_si256(b6, a4);\
|
||||
b1 = _mm256_xor_si256(b1, a5);\
|
||||
b7 = _mm256_xor_si256(b7, a5);\
|
||||
b2 = _mm256_xor_si256(b2, a6);\
|
||||
b0 = _mm256_xor_si256(b0, a6);\
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm256_xor_si256(b3, a7);\
|
||||
b1 = _mm256_xor_si256(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm256_xor_si256(b4, a0);\
|
||||
b2 = _mm256_xor_si256(b2, a0);\
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm256_xor_si256(b5, a1);\
|
||||
b3 = _mm256_xor_si256(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm256_xor_si256(b6, a2);\
|
||||
b4 = _mm256_xor_si256(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm256_xor_si256(b7, a3);\
|
||||
b5 = _mm256_xor_si256(b5, a3);\
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm256_xor_si256(a0, a3);\
|
||||
a1 = _mm256_xor_si256(a1, a4);\
|
||||
a2 = _mm256_xor_si256(a2, a5);\
|
||||
a3 = _mm256_xor_si256(a3, a6);\
|
||||
a4 = _mm256_xor_si256(a4, a7);\
|
||||
a5 = _mm256_xor_si256(a5, b0);\
|
||||
a6 = _mm256_xor_si256(a6, b1);\
|
||||
a7 = _mm256_xor_si256(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m256_const1_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
a0 = _mm256_xor_si256(a0, TEMP0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
a1 = _mm256_xor_si256(a1, TEMP1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
a2 = _mm256_xor_si256(a2, b2);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
a3 = _mm256_xor_si256(a3, b3);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
a4 = _mm256_xor_si256(a4, b4);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
a5 = _mm256_xor_si256(a5, b5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
a6 = _mm256_xor_si256(a6, b6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
a7 = _mm256_xor_si256(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
b5 = _mm256_xor_si256(b5, a0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
b6 = _mm256_xor_si256(b6, a1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
b7 = _mm256_xor_si256(b7, a2);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
b2 = _mm256_xor_si256(b2, a5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
b3 = _mm256_xor_si256(b3, a6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
b4 = _mm256_xor_si256(b4, a7);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm256_xor_si256(b0, a3);\
|
||||
b1 = _mm256_xor_si256(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
#define ROUND_2WAY(i, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* AddRoundConstant */\
|
||||
b1 = m256_const2_64( 0xffffffffffffffff, 0 ); \
|
||||
a0 = _mm256_xor_si256( a0, m256_const1_128( round_const_l0[i] ) );\
|
||||
a1 = _mm256_xor_si256( a1, b1 );\
|
||||
a2 = _mm256_xor_si256( a2, b1 );\
|
||||
a3 = _mm256_xor_si256( a3, b1 );\
|
||||
a4 = _mm256_xor_si256( a4, b1 );\
|
||||
a5 = _mm256_xor_si256( a5, b1 );\
|
||||
a6 = _mm256_xor_si256( a6, b1 );\
|
||||
a7 = _mm256_xor_si256( a7, m256_const1_128( round_const_l7[i] ) );\
|
||||
\
|
||||
/* ShiftBytes + SubBytes (interleaved) */\
|
||||
b0 = _mm256_xor_si256( b0, b0 );\
|
||||
a0 = _mm256_shuffle_epi8( a0, SUBSH_MASK0_2WAY );\
|
||||
a0 = _mm256_aesenclast_epi128(a0, b0 );\
|
||||
a1 = _mm256_shuffle_epi8( a1, SUBSH_MASK1_2WAY );\
|
||||
a1 = _mm256_aesenclast_epi128(a1, b0 );\
|
||||
a2 = _mm256_shuffle_epi8( a2, SUBSH_MASK2_2WAY );\
|
||||
a2 = _mm256_aesenclast_epi128(a2, b0 );\
|
||||
a3 = _mm256_shuffle_epi8( a3, SUBSH_MASK3_2WAY );\
|
||||
a3 = _mm256_aesenclast_epi128(a3, b0 );\
|
||||
a4 = _mm256_shuffle_epi8( a4, SUBSH_MASK4_2WAY );\
|
||||
a4 = _mm256_aesenclast_epi128(a4, b0 );\
|
||||
a5 = _mm256_shuffle_epi8( a5, SUBSH_MASK5_2WAY );\
|
||||
a5 = _mm256_aesenclast_epi128(a5, b0 );\
|
||||
a6 = _mm256_shuffle_epi8( a6, SUBSH_MASK6_2WAY );\
|
||||
a6 = _mm256_aesenclast_epi128(a6, b0 );\
|
||||
a7 = _mm256_shuffle_epi8( a7, SUBSH_MASK7_2WAY );\
|
||||
a7 = _mm256_aesenclast_epi128( a7, b0 );\
|
||||
\
|
||||
/* MixBytes */\
|
||||
MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
|
||||
\
|
||||
}
|
||||
|
||||
/* 10 rounds, P and Q in parallel */
|
||||
#define ROUNDS_P_Q_2WAY(){\
|
||||
ROUND_2WAY(0, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
ROUND_2WAY(8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
ROUND_2WAY(9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
}
|
||||
|
||||
#define Matrix_Transpose_A_2way(i0, i1, i2, i3, o1, o2, o3, t0){\
|
||||
t0 = TRANSP_MASK_2WAY;\
|
||||
\
|
||||
i0 = _mm256_shuffle_epi8( i0, t0 );\
|
||||
i1 = _mm256_shuffle_epi8( i1, t0 );\
|
||||
i2 = _mm256_shuffle_epi8( i2, t0 );\
|
||||
i3 = _mm256_shuffle_epi8( i3, t0 );\
|
||||
\
|
||||
o1 = i0;\
|
||||
t0 = i2;\
|
||||
\
|
||||
i0 = _mm256_unpacklo_epi16( i0, i1 );\
|
||||
o1 = _mm256_unpackhi_epi16( o1, i1 );\
|
||||
i2 = _mm256_unpacklo_epi16( i2, i3 );\
|
||||
t0 = _mm256_unpackhi_epi16( t0, i3 );\
|
||||
\
|
||||
i0 = _mm256_shuffle_epi32( i0, 216 );\
|
||||
o1 = _mm256_shuffle_epi32( o1, 216 );\
|
||||
i2 = _mm256_shuffle_epi32( i2, 216 );\
|
||||
t0 = _mm256_shuffle_epi32( t0, 216 );\
|
||||
\
|
||||
o2 = i0;\
|
||||
o3 = o1;\
|
||||
\
|
||||
i0 = _mm256_unpacklo_epi32( i0, i2 );\
|
||||
o1 = _mm256_unpacklo_epi32( o1, t0 );\
|
||||
o2 = _mm256_unpackhi_epi32( o2, i2 );\
|
||||
o3 = _mm256_unpackhi_epi32( o3, t0 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_B_2way(i0, i1, i2, i3, i4, i5, i6, i7, o1, o2, o3, o4, o5, o6, o7){\
|
||||
o1 = i0;\
|
||||
o2 = i1;\
|
||||
i0 = _mm256_unpacklo_epi64( i0, i4 );\
|
||||
o1 = _mm256_unpackhi_epi64( o1, i4 );\
|
||||
o3 = i1;\
|
||||
o4 = i2;\
|
||||
o2 = _mm256_unpacklo_epi64( o2, i5 );\
|
||||
o3 = _mm256_unpackhi_epi64( o3, i5 );\
|
||||
o5 = i2;\
|
||||
o6 = i3;\
|
||||
o4 = _mm256_unpacklo_epi64( o4, i6 );\
|
||||
o5 = _mm256_unpackhi_epi64( o5, i6 );\
|
||||
o7 = i3;\
|
||||
o6 = _mm256_unpacklo_epi64( o6, i7 );\
|
||||
o7 = _mm256_unpackhi_epi64( o7, i7 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_B_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, o3){\
|
||||
o0 = i0;\
|
||||
i0 = _mm256_unpacklo_epi64( i0, i1 );\
|
||||
o0 = _mm256_unpackhi_epi64( o0, i1 );\
|
||||
o1 = i2;\
|
||||
i2 = _mm256_unpacklo_epi64( i2, i3 );\
|
||||
o1 = _mm256_unpackhi_epi64( o1, i3 );\
|
||||
o2 = i4;\
|
||||
i4 = _mm256_unpacklo_epi64( i4, i5 );\
|
||||
o2 = _mm256_unpackhi_epi64( o2, i5 );\
|
||||
o3 = i6;\
|
||||
i6 = _mm256_unpacklo_epi64( i6, i7 );\
|
||||
o3 = _mm256_unpackhi_epi64( o3, i7 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_O_B_2way(i0, i1, i2, i3, i4, i5, i6, i7, t0){\
|
||||
t0 = _mm256_xor_si256( t0, t0 );\
|
||||
i1 = i0;\
|
||||
i3 = i2;\
|
||||
i5 = i4;\
|
||||
i7 = i6;\
|
||||
i0 = _mm256_unpacklo_epi64( i0, t0 );\
|
||||
i1 = _mm256_unpackhi_epi64( i1, t0 );\
|
||||
i2 = _mm256_unpacklo_epi64( i2, t0 );\
|
||||
i3 = _mm256_unpackhi_epi64( i3, t0 );\
|
||||
i4 = _mm256_unpacklo_epi64( i4, t0 );\
|
||||
i5 = _mm256_unpackhi_epi64( i5, t0 );\
|
||||
i6 = _mm256_unpacklo_epi64( i6, t0 );\
|
||||
i7 = _mm256_unpackhi_epi64( i7, t0 );\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_O_B_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7){\
|
||||
i0 = _mm256_unpacklo_epi64( i0, i1 );\
|
||||
i2 = _mm256_unpacklo_epi64( i2, i3 );\
|
||||
i4 = _mm256_unpacklo_epi64( i4, i5 );\
|
||||
i6 = _mm256_unpacklo_epi64( i6, i7 );\
|
||||
}/**/
|
||||
|
||||
void TF512_2way( __m256i* chaining, __m256i* message )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load message into registers xmm12 - xmm15 */
|
||||
xmm12 = message[0];
|
||||
xmm13 = message[1];
|
||||
xmm14 = message[2];
|
||||
xmm15 = message[3];
|
||||
|
||||
/* transform message M from column ordering into row ordering */
|
||||
/* we first put two rows (64 bit) of the message into one 128-bit xmm register */
|
||||
Matrix_Transpose_A_2way(xmm12, xmm13, xmm14, xmm15, xmm2, xmm6, xmm7, xmm0);
|
||||
|
||||
/* load previous chaining value */
|
||||
/* we first put two rows (64 bit) of the CV into one 128-bit xmm register */
|
||||
xmm8 = chaining[0];
|
||||
xmm0 = chaining[1];
|
||||
xmm4 = chaining[2];
|
||||
xmm5 = chaining[3];
|
||||
|
||||
/* xor message to CV get input of P */
|
||||
/* result: CV+M in xmm8, xmm0, xmm4, xmm5 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, xmm12 );
|
||||
xmm0 = _mm256_xor_si256( xmm0, xmm2 );
|
||||
xmm4 = _mm256_xor_si256( xmm4, xmm6 );
|
||||
xmm5 = _mm256_xor_si256( xmm5, xmm7 );
|
||||
|
||||
/* there are now 2 rows of the Groestl state (P and Q) in each xmm register */
|
||||
/* unpack to get 1 row of P (64 bit) and Q (64 bit) into one xmm register */
|
||||
/* result: the 8 rows of P and Q in xmm8 - xmm12 */
|
||||
Matrix_Transpose_B_2way(xmm8, xmm0, xmm4, xmm5, xmm12, xmm2, xmm6, xmm7, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);
|
||||
|
||||
/* compute the two permutations P and Q in parallel */
|
||||
ROUNDS_P_Q_2WAY();
|
||||
|
||||
/* unpack again to get two rows of P or two rows of Q in one xmm register */
|
||||
Matrix_Transpose_B_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3);
|
||||
|
||||
/* xor output of P and Q */
|
||||
/* result: P(CV+M)+Q(M) in xmm0...xmm3 */
|
||||
xmm0 = _mm256_xor_si256( xmm0, xmm8 );
|
||||
xmm1 = _mm256_xor_si256( xmm1, xmm10 );
|
||||
xmm2 = _mm256_xor_si256( xmm2, xmm12 );
|
||||
xmm3 = _mm256_xor_si256( xmm3, xmm14 );
|
||||
|
||||
/* xor CV (feed-forward) */
|
||||
/* result: P(CV+M)+Q(M)+CV in xmm0...xmm3 */
|
||||
xmm0 = _mm256_xor_si256( xmm0, (chaining[0]) );
|
||||
xmm1 = _mm256_xor_si256( xmm1, (chaining[1]) );
|
||||
xmm2 = _mm256_xor_si256( xmm2, (chaining[2]) );
|
||||
xmm3 = _mm256_xor_si256( xmm3, (chaining[3]) );
|
||||
|
||||
/* store CV */
|
||||
chaining[0] = xmm0;
|
||||
chaining[1] = xmm1;
|
||||
chaining[2] = xmm2;
|
||||
chaining[3] = xmm3;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void OF512_2way( __m256i* chaining )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load CV into registers xmm8, xmm10, xmm12, xmm14 */
|
||||
xmm8 = chaining[0];
|
||||
xmm10 = chaining[1];
|
||||
xmm12 = chaining[2];
|
||||
xmm14 = chaining[3];
|
||||
|
||||
/* there are now 2 rows of the CV in one xmm register */
|
||||
/* unpack to get 1 row of P (64 bit) into one half of an xmm register */
|
||||
/* result: the 8 input rows of P in xmm8 - xmm15 */
|
||||
Matrix_Transpose_O_B_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0);
|
||||
|
||||
/* compute the permutation P */
|
||||
/* result: the output of P(CV) in xmm8 - xmm15 */
|
||||
ROUNDS_P_Q_2WAY();
|
||||
|
||||
/* unpack again to get two rows of P in one xmm register */
|
||||
/* result: P(CV) in xmm8, xmm10, xmm12, xmm14 */
|
||||
Matrix_Transpose_O_B_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV)+CV in xmm8, xmm10, xmm12, xmm14 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[1]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[2]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[3]) );
|
||||
|
||||
/* transform state back from row ordering into column ordering */
|
||||
/* result: final hash value in xmm9, xmm11 */
|
||||
Matrix_Transpose_A_2way(xmm8, xmm10, xmm12, xmm14, xmm4, xmm9, xmm11, xmm0);
|
||||
|
||||
/* we only need to return the truncated half of the state */
|
||||
chaining[2] = xmm9;
|
||||
chaining[3] = xmm11;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL512_INTR_4WAY_H__
|
||||
#endif // GROESTL256_INTR_4WAY_H__
|
||||
|
@@ -15,7 +15,9 @@
|
||||
#include "miner.h"
|
||||
#include "simd-utils.h"
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
int groestl512_4way_init( groestl512_4way_context* ctx, uint64_t hashlen )
|
||||
{
|
||||
@@ -41,7 +43,7 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output,
|
||||
const int hashlen_m128i = 64 / 16; // bytes to __m128i
|
||||
const int hash_offset = SIZE512 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
int blocks = len / SIZE512;
|
||||
uint64_t blocks = len / SIZE512;
|
||||
__m512i* in = (__m512i*)input;
|
||||
int i;
|
||||
|
||||
@@ -62,16 +64,14 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output,
|
||||
if ( i == SIZE512 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
|
||||
ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
|
||||
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = m512_zero;
|
||||
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
|
||||
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
|
||||
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
TF1024_4way( ctx->chaining, ctx->buffer );
|
||||
@@ -122,7 +122,7 @@ int groestl512_4way_full( groestl512_4way_context* ctx, void* output,
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
|
||||
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = m512_zero;
|
||||
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
|
||||
@@ -137,5 +137,128 @@ int groestl512_4way_full( groestl512_4way_context* ctx, void* output,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
int groestl512_2way_init( groestl512_2way_context* ctx, uint64_t hashlen )
|
||||
{
|
||||
if (ctx->chaining == NULL || ctx->buffer == NULL)
|
||||
return 1;
|
||||
|
||||
memset_zero_256( ctx->chaining, SIZE512 );
|
||||
memset_zero_256( ctx->buffer, SIZE512 );
|
||||
|
||||
// The only non-zero in the IV is len. It can be hard coded.
|
||||
ctx->chaining[ 6 ] = m256_const2_64( 0x0200000000000000, 0 );
|
||||
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl512_2way_update_close( groestl512_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t databitlen )
|
||||
{
|
||||
const int len = (int)databitlen / 128;
|
||||
const int hashlen_m128i = 64 / 16; // bytes to __m128i
|
||||
const int hash_offset = SIZE512 - hashlen_m128i;
|
||||
int rem = ctx->rem_ptr;
|
||||
uint64_t blocks = len / SIZE512;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
// --- update ---
|
||||
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF1024_2way( ctx->chaining, &in[ i * SIZE512 ] );
|
||||
ctx->buf_ptr = blocks * SIZE512;
|
||||
|
||||
for ( i = 0; i < len % SIZE512; i++ )
|
||||
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += rem;
|
||||
|
||||
//--- final ---
|
||||
|
||||
blocks++; // adjust for final block
|
||||
|
||||
if ( i == SIZE512 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
TF1024_2way( ctx->chaining, ctx->buffer );
|
||||
OF1024_2way( ctx->chaining );
|
||||
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int groestl512_2way_full( groestl512_2way_context* ctx, void* output,
|
||||
const void* input, uint64_t datalen )
|
||||
{
|
||||
const int len = (int)datalen >> 4;
|
||||
const int hashlen_m128i = 64 >> 4; // bytes to __m128i
|
||||
const int hash_offset = SIZE512 - hashlen_m128i;
|
||||
uint64_t blocks = len / SIZE512;
|
||||
__m256i* in = (__m256i*)input;
|
||||
int i;
|
||||
|
||||
// --- init ---
|
||||
|
||||
memset_zero_256( ctx->chaining, SIZE512 );
|
||||
memset_zero_256( ctx->buffer, SIZE512 );
|
||||
ctx->chaining[ 6 ] = m256_const2_64( 0x0200000000000000, 0 );
|
||||
ctx->buf_ptr = 0;
|
||||
ctx->rem_ptr = 0;
|
||||
|
||||
// --- update ---
|
||||
|
||||
for ( i = 0; i < blocks; i++ )
|
||||
TF1024_2way( ctx->chaining, &in[ i * SIZE512 ] );
|
||||
ctx->buf_ptr = blocks * SIZE512;
|
||||
|
||||
for ( i = 0; i < len % SIZE512; i++ )
|
||||
ctx->buffer[ ctx->rem_ptr + i ] = in[ ctx->buf_ptr + i ];
|
||||
i += ctx->rem_ptr;
|
||||
|
||||
// --- close ---
|
||||
|
||||
blocks++;
|
||||
|
||||
if ( i == SIZE512 - 1 )
|
||||
{
|
||||
// only 1 vector left in buffer, all padding at once
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
|
||||
for ( i += 1; i < SIZE512 - 1; i++ )
|
||||
ctx->buffer[i] = m256_zero;
|
||||
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
|
||||
}
|
||||
|
||||
TF1024_2way( ctx->chaining, ctx->buffer );
|
||||
OF1024_2way( ctx->chaining );
|
||||
|
||||
for ( i = 0; i < hashlen_m128i; i++ )
|
||||
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // VAES
|
||||
|
||||
|
@@ -10,7 +10,7 @@
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
#define LENGTH (512)
|
||||
|
||||
@@ -36,20 +36,19 @@
|
||||
|
||||
#define SIZE512 (SIZE_1024/16)
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m512i chaining[SIZE512];
|
||||
__attribute__ ((aligned (64))) __m512i buffer[SIZE512];
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
int databitlen; // bits
|
||||
} groestl512_4way_context;
|
||||
|
||||
|
||||
int groestl512_4way_init( groestl512_4way_context*, uint64_t );
|
||||
|
||||
//int reinit_groestl( hashState_groestl* );
|
||||
|
||||
int groestl512_4way_update( groestl512_4way_context*, const void*,
|
||||
uint64_t );
|
||||
int groestl512_4way_close( groestl512_4way_context*, void* );
|
||||
@@ -58,5 +57,29 @@ int groestl512_4way_update_close( groestl512_4way_context*, void*,
|
||||
int groestl512_4way_full( groestl512_4way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
typedef struct {
|
||||
__attribute__ ((aligned (128))) __m256i chaining[SIZE512];
|
||||
__attribute__ ((aligned (64))) __m256i buffer[SIZE512];
|
||||
int blk_count; // SIZE_m128i
|
||||
int buf_ptr; // __m128i offset
|
||||
int rem_ptr;
|
||||
} groestl512_2way_context;
|
||||
|
||||
|
||||
int groestl512_2way_init( groestl512_2way_context*, uint64_t );
|
||||
|
||||
int groestl512_2way_update( groestl512_2way_context*, const void*,
|
||||
uint64_t );
|
||||
int groestl512_2way_close( groestl512_2way_context*, void* );
|
||||
int groestl512_2way_update_close( groestl512_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
int groestl512_2way_full( groestl512_2way_context*, void*,
|
||||
const void*, uint64_t );
|
||||
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL512_HASH_4WAY_H__
|
||||
|
@@ -7,13 +7,12 @@
|
||||
* This code is placed in the public domain
|
||||
*/
|
||||
|
||||
|
||||
#if !defined(GROESTL512_INTR_4WAY_H__)
|
||||
#define GROESTL512_INTR_4WAY_H__ 1
|
||||
|
||||
#include "groestl512-hash-4way.h"
|
||||
|
||||
#if defined(__VAES__)
|
||||
#if defined(__AVX2__) && defined(__VAES__)
|
||||
|
||||
static const __m128i round_const_p[] __attribute__ ((aligned (64))) =
|
||||
{
|
||||
@@ -51,6 +50,8 @@ static const __m128i round_const_q[] __attribute__ ((aligned (64))) =
|
||||
{ 0x8292a2b2c2d2e2f2, 0x0212223242526272 }
|
||||
};
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
static const __m512i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12,
|
||||
0x2d2529212c242820, 0x2f272b232e262a22,
|
||||
@@ -103,11 +104,9 @@ static const __m512i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003,
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2(i, j, k){\
|
||||
j = _mm512_xor_si512(j, j);\
|
||||
j = _mm512_movm_epi8( _mm512_cmpgt_epi8_mask(j, i) );\
|
||||
j = _mm512_movm_epi8( _mm512_cmpgt_epi8_mask( m512_zero, i) );\
|
||||
i = _mm512_add_epi8(i, i);\
|
||||
j = _mm512_and_si512(j, k);\
|
||||
i = _mm512_xor_si512(i, j);\
|
||||
i = mm512_xorand( i, j, k );\
|
||||
}
|
||||
|
||||
/**/
|
||||
@@ -129,100 +128,90 @@ static const __m512i SUBSH_MASK7 = { 0x06090c0f0205080b, 0x0e0104070a0d0003,
|
||||
We almost fit into 16 registers, need only 3 spills to memory.
|
||||
This implementation costs 7.7 c/b giving total speed on SNB: 10.7c/b.
|
||||
K. Matusiewicz, 2011/05/29 */
|
||||
#define MixBytes(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
#define MixBytes( a0, a1, a2, a3, a4, a5, a6, a7, \
|
||||
b0, b1, b2, b3, b4, b5, b6, b7) { \
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm512_xor_si512(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm512_xor_si512(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm512_xor_si512(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm512_xor_si512(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm512_xor_si512(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm512_xor_si512(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm512_xor_si512(a6, a7);\
|
||||
a7 = _mm512_xor_si512(a7, b6);\
|
||||
b6 = a0; \
|
||||
b7 = a1; \
|
||||
a0 = _mm512_xor_si512( a0, a1 ); \
|
||||
b0 = a2; \
|
||||
a1 = _mm512_xor_si512( a1, a2 ); \
|
||||
b1 = a3; \
|
||||
TEMP2 = _mm512_xor_si512( a2, a3 ); \
|
||||
b2 = a4; \
|
||||
a3 = _mm512_xor_si512( a3, a4 ); \
|
||||
b3 = a5; \
|
||||
a4 = _mm512_xor_si512( a4, a5 );\
|
||||
b4 = a6; \
|
||||
a5 = _mm512_xor_si512( a5, a6 ); \
|
||||
b5 = a7; \
|
||||
a6 = _mm512_xor_si512( a6, a7 ); \
|
||||
a7 = _mm512_xor_si512( a7, b6 ); \
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm512_xor_si512(b0, a4);\
|
||||
b6 = _mm512_xor_si512(b6, a4);\
|
||||
b1 = _mm512_xor_si512(b1, a5);\
|
||||
b7 = _mm512_xor_si512(b7, a5);\
|
||||
b2 = _mm512_xor_si512(b2, a6);\
|
||||
b0 = _mm512_xor_si512(b0, a6);\
|
||||
TEMP0 = mm512_xor3( b0, a4, a6 ); \
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm512_xor_si512(b3, a7);\
|
||||
b1 = _mm512_xor_si512(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm512_xor_si512(b4, a0);\
|
||||
b2 = _mm512_xor_si512(b2, a0);\
|
||||
TEMP1 = mm512_xor3( b1, a5, a7 ); \
|
||||
b2 = mm512_xor3( b2, a6, a0 ); \
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm512_xor_si512(b5, a1);\
|
||||
b3 = _mm512_xor_si512(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm512_xor_si512(b6, a2);\
|
||||
b4 = _mm512_xor_si512(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm512_xor_si512(b7, a3);\
|
||||
b5 = _mm512_xor_si512(b5, a3);\
|
||||
b0 = a0; \
|
||||
b3 = mm512_xor3( b3, a7, a1 ); \
|
||||
b1 = a1; \
|
||||
b6 = mm512_xor3( b6, a4, TEMP2 ); \
|
||||
b4 = mm512_xor3( b4, a0, TEMP2 ); \
|
||||
b7 = mm512_xor3( b7, a5, a3 ); \
|
||||
b5 = mm512_xor3( b5, a1, a3 ); \
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm512_xor_si512(a0, a3);\
|
||||
a1 = _mm512_xor_si512(a1, a4);\
|
||||
a2 = _mm512_xor_si512(a2, a5);\
|
||||
a3 = _mm512_xor_si512(a3, a6);\
|
||||
a4 = _mm512_xor_si512(a4, a7);\
|
||||
a5 = _mm512_xor_si512(a5, b0);\
|
||||
a6 = _mm512_xor_si512(a6, b1);\
|
||||
a7 = _mm512_xor_si512(a7, TEMP2);\
|
||||
a0 = _mm512_xor_si512( a0, a3 ); \
|
||||
a1 = _mm512_xor_si512( a1, a4 ); \
|
||||
a2 = _mm512_xor_si512( TEMP2, a5 ); \
|
||||
a3 = _mm512_xor_si512( a3, a6 ); \
|
||||
a4 = _mm512_xor_si512( a4, a7 ); \
|
||||
a5 = _mm512_xor_si512( a5, b0 ); \
|
||||
a6 = _mm512_xor_si512( a6, b1 ); \
|
||||
a7 = _mm512_xor_si512( a7, TEMP2 ); \
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m512_const1_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2(a0, b0, b1);\
|
||||
a0 = _mm512_xor_si512(a0, TEMP0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
a1 = _mm512_xor_si512(a1, TEMP1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
a2 = _mm512_xor_si512(a2, b2);\
|
||||
MUL2(a3, b0, b1);\
|
||||
a3 = _mm512_xor_si512(a3, b3);\
|
||||
MUL2(a4, b0, b1);\
|
||||
a4 = _mm512_xor_si512(a4, b4);\
|
||||
MUL2(a5, b0, b1);\
|
||||
a5 = _mm512_xor_si512(a5, b5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
a6 = _mm512_xor_si512(a6, b6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
a7 = _mm512_xor_si512(a7, b7);\
|
||||
b1 = m512_const1_64( 0x1b1b1b1b1b1b1b1b ); \
|
||||
MUL2( a0, b0, b1 ); \
|
||||
a0 = _mm512_xor_si512( a0, TEMP0 ); \
|
||||
MUL2( a1, b0, b1 ); \
|
||||
a1 = _mm512_xor_si512( a1, TEMP1 ); \
|
||||
MUL2( a2, b0, b1 ); \
|
||||
a2 = _mm512_xor_si512( a2, b2 ); \
|
||||
MUL2( a3, b0, b1 ); \
|
||||
a3 = _mm512_xor_si512( a3, b3 ); \
|
||||
MUL2( a4, b0, b1 ); \
|
||||
a4 = _mm512_xor_si512( a4, b4 ); \
|
||||
MUL2( a5, b0, b1 ); \
|
||||
a5 = _mm512_xor_si512( a5, b5 ); \
|
||||
MUL2( a6, b0, b1 ); \
|
||||
a6 = _mm512_xor_si512( a6, b6 ); \
|
||||
MUL2( a7, b0, b1 ); \
|
||||
a7 = _mm512_xor_si512( a7, b7 ); \
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2(a0, b0, b1);\
|
||||
b5 = _mm512_xor_si512(b5, a0);\
|
||||
MUL2(a1, b0, b1);\
|
||||
b6 = _mm512_xor_si512(b6, a1);\
|
||||
MUL2(a2, b0, b1);\
|
||||
b7 = _mm512_xor_si512(b7, a2);\
|
||||
MUL2(a5, b0, b1);\
|
||||
b2 = _mm512_xor_si512(b2, a5);\
|
||||
MUL2(a6, b0, b1);\
|
||||
b3 = _mm512_xor_si512(b3, a6);\
|
||||
MUL2(a7, b0, b1);\
|
||||
b4 = _mm512_xor_si512(b4, a7);\
|
||||
MUL2(a3, b0, b1);\
|
||||
MUL2(a4, b0, b1);\
|
||||
MUL2( a0, b0, b1 ); \
|
||||
b5 = _mm512_xor_si512( b5, a0 ); \
|
||||
MUL2( a1, b0, b1 ); \
|
||||
b6 = _mm512_xor_si512( b6, a1 ); \
|
||||
MUL2( a2, b0, b1 ); \
|
||||
b7 = _mm512_xor_si512( b7, a2 ); \
|
||||
MUL2( a5, b0, b1 ); \
|
||||
b2 = _mm512_xor_si512( b2, a5 ); \
|
||||
MUL2( a6, b0, b1 ); \
|
||||
b3 = _mm512_xor_si512( b3, a6 ); \
|
||||
MUL2( a7, b0, b1 ); \
|
||||
b4 = _mm512_xor_si512( b4, a7 ); \
|
||||
MUL2( a3, b0, b1 ); \
|
||||
MUL2( a4, b0, b1 ); \
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm512_xor_si512(b0, a3);\
|
||||
b1 = _mm512_xor_si512(b1, a4);\
|
||||
b0 = _mm512_xor_si512( b0, a3 ); \
|
||||
b1 = _mm512_xor_si512( b1, a4 ); \
|
||||
}/*MixBytes*/
|
||||
|
||||
/* one round
|
||||
@@ -661,5 +650,576 @@ void OF1024_4way( __m512i* chaining )
|
||||
return;
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
// AVX2 + VAES
|
||||
|
||||
static const __m256i TRANSP_MASK_2WAY =
|
||||
{ 0x0d0509010c040800, 0x0f070b030e060a02,
|
||||
0x1d1519111c141810, 0x1f171b131e161a12 };
|
||||
|
||||
static const __m256i SUBSH_MASK0_2WAY =
|
||||
{ 0x0b0e0104070a0d00, 0x0306090c0f020508,
|
||||
0x1b1e1114171a1d10, 0x1316191c1f121518 };
|
||||
|
||||
static const __m256i SUBSH_MASK1_2WAY =
|
||||
{ 0x0c0f0205080b0e01, 0x04070a0d00030609,
|
||||
0x1c1f1215181b1e11, 0x14171a1d10131619 };
|
||||
|
||||
static const __m256i SUBSH_MASK2_2WAY =
|
||||
{ 0x0d000306090c0f02, 0x05080b0e0104070a,
|
||||
0x1d101316191c1f12, 0x15181b1e1114171a };
|
||||
|
||||
static const __m256i SUBSH_MASK3_2WAY =
|
||||
{ 0x0e0104070a0d0003, 0x06090c0f0205080b,
|
||||
0x1e1114171a1d1013, 0x16191c1f1215181b };
|
||||
|
||||
static const __m256i SUBSH_MASK4_2WAY =
|
||||
{ 0x0f0205080b0e0104, 0x070a0d000306090c,
|
||||
0x1f1215181b1e1114, 0x171a1d101316191c };
|
||||
|
||||
static const __m256i SUBSH_MASK5_2WAY =
|
||||
{ 0x000306090c0f0205, 0x080b0e0104070a0d,
|
||||
0x101316191c1f1215, 0x181b1e1114171a1d };
|
||||
|
||||
static const __m256i SUBSH_MASK6_2WAY =
|
||||
{ 0x0104070a0d000306, 0x090c0f0205080b0e,
|
||||
0x1114171a1d101316, 0x191c1f1215181b1e };
|
||||
|
||||
static const __m256i SUBSH_MASK7_2WAY =
|
||||
{ 0x06090c0f0205080b, 0x0e0104070a0d0003,
|
||||
0x16191c1f1215181b, 0x1e1114171a1d1013 };
|
||||
|
||||
#define tos(a) #a
|
||||
#define tostr(a) tos(a)
|
||||
|
||||
/* xmm[i] will be multiplied by 2
|
||||
* xmm[j] will be lost
|
||||
* xmm[k] has to be all 0x1b */
|
||||
#define MUL2_2WAY(i, j, k){\
|
||||
j = _mm256_cmpgt_epi8( m256_zero, i );\
|
||||
i = _mm256_add_epi8(i, i);\
|
||||
i = mm256_xorand( i, j, k );\
|
||||
}
|
||||
|
||||
#define MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* t_i = a_i + a_{i+1} */\
|
||||
b6 = a0;\
|
||||
b7 = a1;\
|
||||
a0 = _mm256_xor_si256(a0, a1);\
|
||||
b0 = a2;\
|
||||
a1 = _mm256_xor_si256(a1, a2);\
|
||||
b1 = a3;\
|
||||
a2 = _mm256_xor_si256(a2, a3);\
|
||||
b2 = a4;\
|
||||
a3 = _mm256_xor_si256(a3, a4);\
|
||||
b3 = a5;\
|
||||
a4 = _mm256_xor_si256(a4, a5);\
|
||||
b4 = a6;\
|
||||
a5 = _mm256_xor_si256(a5, a6);\
|
||||
b5 = a7;\
|
||||
a6 = _mm256_xor_si256(a6, a7);\
|
||||
a7 = _mm256_xor_si256(a7, b6);\
|
||||
\
|
||||
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
|
||||
b0 = _mm256_xor_si256(b0, a4);\
|
||||
b6 = _mm256_xor_si256(b6, a4);\
|
||||
b1 = _mm256_xor_si256(b1, a5);\
|
||||
b7 = _mm256_xor_si256(b7, a5);\
|
||||
b2 = _mm256_xor_si256(b2, a6);\
|
||||
b0 = _mm256_xor_si256(b0, a6);\
|
||||
/* spill values y_4, y_5 to memory */\
|
||||
TEMP0 = b0;\
|
||||
b3 = _mm256_xor_si256(b3, a7);\
|
||||
b1 = _mm256_xor_si256(b1, a7);\
|
||||
TEMP1 = b1;\
|
||||
b4 = _mm256_xor_si256(b4, a0);\
|
||||
b2 = _mm256_xor_si256(b2, a0);\
|
||||
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
|
||||
b0 = a0;\
|
||||
b5 = _mm256_xor_si256(b5, a1);\
|
||||
b3 = _mm256_xor_si256(b3, a1);\
|
||||
b1 = a1;\
|
||||
b6 = _mm256_xor_si256(b6, a2);\
|
||||
b4 = _mm256_xor_si256(b4, a2);\
|
||||
TEMP2 = a2;\
|
||||
b7 = _mm256_xor_si256(b7, a3);\
|
||||
b5 = _mm256_xor_si256(b5, a3);\
|
||||
\
|
||||
/* compute x_i = t_i + t_{i+3} */\
|
||||
a0 = _mm256_xor_si256(a0, a3);\
|
||||
a1 = _mm256_xor_si256(a1, a4);\
|
||||
a2 = _mm256_xor_si256(a2, a5);\
|
||||
a3 = _mm256_xor_si256(a3, a6);\
|
||||
a4 = _mm256_xor_si256(a4, a7);\
|
||||
a5 = _mm256_xor_si256(a5, b0);\
|
||||
a6 = _mm256_xor_si256(a6, b1);\
|
||||
a7 = _mm256_xor_si256(a7, TEMP2);\
|
||||
\
|
||||
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
|
||||
/* compute w_i : add y_{i+4} */\
|
||||
b1 = m256_const1_64( 0x1b1b1b1b1b1b1b1b );\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
a0 = _mm256_xor_si256(a0, TEMP0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
a1 = _mm256_xor_si256(a1, TEMP1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
a2 = _mm256_xor_si256(a2, b2);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
a3 = _mm256_xor_si256(a3, b3);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
a4 = _mm256_xor_si256(a4, b4);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
a5 = _mm256_xor_si256(a5, b5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
a6 = _mm256_xor_si256(a6, b6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
a7 = _mm256_xor_si256(a7, b7);\
|
||||
\
|
||||
/* compute v_i : double w_i */\
|
||||
/* add to y_4 y_5 .. v3, v4, ... */\
|
||||
MUL2_2WAY(a0, b0, b1);\
|
||||
b5 = _mm256_xor_si256(b5, a0);\
|
||||
MUL2_2WAY(a1, b0, b1);\
|
||||
b6 = _mm256_xor_si256(b6, a1);\
|
||||
MUL2_2WAY(a2, b0, b1);\
|
||||
b7 = _mm256_xor_si256(b7, a2);\
|
||||
MUL2_2WAY(a5, b0, b1);\
|
||||
b2 = _mm256_xor_si256(b2, a5);\
|
||||
MUL2_2WAY(a6, b0, b1);\
|
||||
b3 = _mm256_xor_si256(b3, a6);\
|
||||
MUL2_2WAY(a7, b0, b1);\
|
||||
b4 = _mm256_xor_si256(b4, a7);\
|
||||
MUL2_2WAY(a3, b0, b1);\
|
||||
MUL2_2WAY(a4, b0, b1);\
|
||||
b0 = TEMP0;\
|
||||
b1 = TEMP1;\
|
||||
b0 = _mm256_xor_si256(b0, a3);\
|
||||
b1 = _mm256_xor_si256(b1, a4);\
|
||||
}/*MixBytes*/
|
||||
|
||||
/* one round
|
||||
* a0-a7 = input rows
|
||||
* b0-b7 = output rows
|
||||
*/
|
||||
#define SUBMIX_2WAY(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
|
||||
/* SubBytes */\
|
||||
b0 = _mm256_xor_si256( b0, b0 );\
|
||||
a0 = _mm256_aesenclast_epi128( a0, b0 );\
|
||||
a1 = _mm256_aesenclast_epi128( a1, b0 );\
|
||||
a2 = _mm256_aesenclast_epi128( a2, b0 );\
|
||||
a3 = _mm256_aesenclast_epi128( a3, b0 );\
|
||||
a4 = _mm256_aesenclast_epi128( a4, b0 );\
|
||||
a5 = _mm256_aesenclast_epi128( a5, b0 );\
|
||||
a6 = _mm256_aesenclast_epi128( a6, b0 );\
|
||||
a7 = _mm256_aesenclast_epi128( a7, b0 );\
|
||||
/* MixBytes */\
|
||||
MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
|
||||
}
|
||||
|
||||
#define ROUNDS_P_2WAY(){\
|
||||
uint8_t round_counter = 0;\
|
||||
for ( round_counter = 0; round_counter < 14; round_counter += 2 ) \
|
||||
{ \
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm8 = _mm256_xor_si256( xmm8, m256_const1_128( \
|
||||
casti_m128i( round_const_p, round_counter ) ) ); \
|
||||
/* ShiftBytes P1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm256_shuffle_epi8( xmm8, SUBSH_MASK0_2WAY ); \
|
||||
xmm9 = _mm256_shuffle_epi8( xmm9, SUBSH_MASK1_2WAY );\
|
||||
xmm10 = _mm256_shuffle_epi8( xmm10, SUBSH_MASK2_2WAY );\
|
||||
xmm11 = _mm256_shuffle_epi8( xmm11, SUBSH_MASK3_2WAY );\
|
||||
xmm12 = _mm256_shuffle_epi8( xmm12, SUBSH_MASK4_2WAY );\
|
||||
xmm13 = _mm256_shuffle_epi8( xmm13, SUBSH_MASK5_2WAY );\
|
||||
xmm14 = _mm256_shuffle_epi8( xmm14, SUBSH_MASK6_2WAY );\
|
||||
xmm15 = _mm256_shuffle_epi8( xmm15, SUBSH_MASK7_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
\
|
||||
/* AddRoundConstant P1024 */\
|
||||
xmm0 = _mm256_xor_si256( xmm0, m256_const1_128( \
|
||||
casti_m128i( round_const_p, round_counter+1 ) ) ); \
|
||||
/* ShiftBytes P1024 + pre-AESENCLAST */\
|
||||
xmm0 = _mm256_shuffle_epi8( xmm0, SUBSH_MASK0_2WAY );\
|
||||
xmm1 = _mm256_shuffle_epi8( xmm1, SUBSH_MASK1_2WAY );\
|
||||
xmm2 = _mm256_shuffle_epi8( xmm2, SUBSH_MASK2_2WAY );\
|
||||
xmm3 = _mm256_shuffle_epi8( xmm3, SUBSH_MASK3_2WAY );\
|
||||
xmm4 = _mm256_shuffle_epi8( xmm4, SUBSH_MASK4_2WAY );\
|
||||
xmm5 = _mm256_shuffle_epi8( xmm5, SUBSH_MASK5_2WAY );\
|
||||
xmm6 = _mm256_shuffle_epi8( xmm6, SUBSH_MASK6_2WAY );\
|
||||
xmm7 = _mm256_shuffle_epi8( xmm7, SUBSH_MASK7_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
}\
|
||||
}
|
||||
|
||||
#define ROUNDS_Q_2WAY(){\
|
||||
uint8_t round_counter = 0;\
|
||||
for ( round_counter = 0; round_counter < 14; round_counter += 2) \
|
||||
{ \
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm1 = m256_neg1;\
|
||||
xmm8 = _mm256_xor_si256( xmm8, xmm1 );\
|
||||
xmm9 = _mm256_xor_si256( xmm9, xmm1 );\
|
||||
xmm10 = _mm256_xor_si256( xmm10, xmm1 );\
|
||||
xmm11 = _mm256_xor_si256( xmm11, xmm1 );\
|
||||
xmm12 = _mm256_xor_si256( xmm12, xmm1 );\
|
||||
xmm13 = _mm256_xor_si256( xmm13, xmm1 );\
|
||||
xmm14 = _mm256_xor_si256( xmm14, xmm1 );\
|
||||
xmm15 = _mm256_xor_si256( xmm15, m256_const1_128( \
|
||||
casti_m128i( round_const_q, round_counter ) ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm8 = _mm256_shuffle_epi8( xmm8, SUBSH_MASK1_2WAY );\
|
||||
xmm9 = _mm256_shuffle_epi8( xmm9, SUBSH_MASK3_2WAY );\
|
||||
xmm10 = _mm256_shuffle_epi8( xmm10, SUBSH_MASK5_2WAY );\
|
||||
xmm11 = _mm256_shuffle_epi8( xmm11, SUBSH_MASK7_2WAY );\
|
||||
xmm12 = _mm256_shuffle_epi8( xmm12, SUBSH_MASK0_2WAY );\
|
||||
xmm13 = _mm256_shuffle_epi8( xmm13, SUBSH_MASK2_2WAY );\
|
||||
xmm14 = _mm256_shuffle_epi8( xmm14, SUBSH_MASK4_2WAY );\
|
||||
xmm15 = _mm256_shuffle_epi8( xmm15, SUBSH_MASK6_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
|
||||
\
|
||||
/* AddRoundConstant Q1024 */\
|
||||
xmm9 = m256_neg1;\
|
||||
xmm0 = _mm256_xor_si256( xmm0, xmm9 );\
|
||||
xmm1 = _mm256_xor_si256( xmm1, xmm9 );\
|
||||
xmm2 = _mm256_xor_si256( xmm2, xmm9 );\
|
||||
xmm3 = _mm256_xor_si256( xmm3, xmm9 );\
|
||||
xmm4 = _mm256_xor_si256( xmm4, xmm9 );\
|
||||
xmm5 = _mm256_xor_si256( xmm5, xmm9 );\
|
||||
xmm6 = _mm256_xor_si256( xmm6, xmm9 );\
|
||||
xmm7 = _mm256_xor_si256( xmm7, m256_const1_128( \
|
||||
casti_m128i( round_const_q, round_counter+1 ) ) ); \
|
||||
/* ShiftBytes Q1024 + pre-AESENCLAST */\
|
||||
xmm0 = _mm256_shuffle_epi8( xmm0, SUBSH_MASK1_2WAY );\
|
||||
xmm1 = _mm256_shuffle_epi8( xmm1, SUBSH_MASK3_2WAY );\
|
||||
xmm2 = _mm256_shuffle_epi8( xmm2, SUBSH_MASK5_2WAY );\
|
||||
xmm3 = _mm256_shuffle_epi8( xmm3, SUBSH_MASK7_2WAY );\
|
||||
xmm4 = _mm256_shuffle_epi8( xmm4, SUBSH_MASK0_2WAY );\
|
||||
xmm5 = _mm256_shuffle_epi8( xmm5, SUBSH_MASK2_2WAY );\
|
||||
xmm6 = _mm256_shuffle_epi8( xmm6, SUBSH_MASK4_2WAY );\
|
||||
xmm7 = _mm256_shuffle_epi8( xmm7, SUBSH_MASK6_2WAY );\
|
||||
/* SubBytes + MixBytes */\
|
||||
SUBMIX_2WAY(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
|
||||
}\
|
||||
}
|
||||
|
||||
#define Matrix_Transpose_2way(i0, i1, i2, i3, i4, i5, i6, i7, t0, t1, t2, t3, t4, t5, t6, t7){\
|
||||
t0 = TRANSP_MASK_2WAY;\
|
||||
\
|
||||
i6 = _mm256_shuffle_epi8(i6, t0);\
|
||||
i0 = _mm256_shuffle_epi8(i0, t0);\
|
||||
i1 = _mm256_shuffle_epi8(i1, t0);\
|
||||
i2 = _mm256_shuffle_epi8(i2, t0);\
|
||||
i3 = _mm256_shuffle_epi8(i3, t0);\
|
||||
t1 = i2;\
|
||||
i4 = _mm256_shuffle_epi8(i4, t0);\
|
||||
i5 = _mm256_shuffle_epi8(i5, t0);\
|
||||
t2 = i4;\
|
||||
t3 = i6;\
|
||||
i7 = _mm256_shuffle_epi8(i7, t0);\
|
||||
\
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t0 = i0;\
|
||||
t2 = _mm256_unpackhi_epi16(t2, i5);\
|
||||
i4 = _mm256_unpacklo_epi16(i4, i5);\
|
||||
t3 = _mm256_unpackhi_epi16(t3, i7);\
|
||||
i6 = _mm256_unpacklo_epi16(i6, i7);\
|
||||
t0 = _mm256_unpackhi_epi16(t0, i1);\
|
||||
t1 = _mm256_unpackhi_epi16(t1, i3);\
|
||||
i2 = _mm256_unpacklo_epi16(i2, i3);\
|
||||
i0 = _mm256_unpacklo_epi16(i0, i1);\
|
||||
\
|
||||
/* shuffle with immediate */\
|
||||
t0 = _mm256_shuffle_epi32(t0, 216);\
|
||||
t1 = _mm256_shuffle_epi32(t1, 216);\
|
||||
t2 = _mm256_shuffle_epi32(t2, 216);\
|
||||
t3 = _mm256_shuffle_epi32(t3, 216);\
|
||||
i0 = _mm256_shuffle_epi32(i0, 216);\
|
||||
i2 = _mm256_shuffle_epi32(i2, 216);\
|
||||
i4 = _mm256_shuffle_epi32(i4, 216);\
|
||||
i6 = _mm256_shuffle_epi32(i6, 216);\
|
||||
\
|
||||
/* continue with unpack */\
|
||||
t4 = i0;\
|
||||
i0 = _mm256_unpacklo_epi32(i0, i2);\
|
||||
t4 = _mm256_unpackhi_epi32(t4, i2);\
|
||||
t5 = t0;\
|
||||
t0 = _mm256_unpacklo_epi32(t0, t1);\
|
||||
t5 = _mm256_unpackhi_epi32(t5, t1);\
|
||||
t6 = i4;\
|
||||
i4 = _mm256_unpacklo_epi32(i4, i6);\
|
||||
t7 = t2;\
|
||||
t6 = _mm256_unpackhi_epi32(t6, i6);\
|
||||
i2 = t0;\
|
||||
t2 = _mm256_unpacklo_epi32(t2, t3);\
|
||||
i3 = t0;\
|
||||
t7 = _mm256_unpackhi_epi32(t7, t3);\
|
||||
\
|
||||
/* there are now 2 rows in each xmm */\
|
||||
/* unpack to get 1 row of CV in each xmm */\
|
||||
i1 = i0;\
|
||||
i1 = _mm256_unpackhi_epi64(i1, i4);\
|
||||
i0 = _mm256_unpacklo_epi64(i0, i4);\
|
||||
i4 = t4;\
|
||||
i3 = _mm256_unpackhi_epi64(i3, t2);\
|
||||
i5 = t4;\
|
||||
i2 = _mm256_unpacklo_epi64(i2, t2);\
|
||||
i6 = t5;\
|
||||
i5 = _mm256_unpackhi_epi64(i5, t6);\
|
||||
i7 = t5;\
|
||||
i4 = _mm256_unpacklo_epi64(i4, t6);\
|
||||
i7 = _mm256_unpackhi_epi64(i7, t7);\
|
||||
i6 = _mm256_unpacklo_epi64(i6, t7);\
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
#define Matrix_Transpose_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, t0, t1, t2, t3, t4){\
|
||||
/* transpose matrix to get output format */\
|
||||
o1 = i0;\
|
||||
i0 = _mm256_unpacklo_epi64(i0, i1);\
|
||||
o1 = _mm256_unpackhi_epi64(o1, i1);\
|
||||
t0 = i2;\
|
||||
i2 = _mm256_unpacklo_epi64(i2, i3);\
|
||||
t0 = _mm256_unpackhi_epi64(t0, i3);\
|
||||
t1 = i4;\
|
||||
i4 = _mm256_unpacklo_epi64(i4, i5);\
|
||||
t1 = _mm256_unpackhi_epi64(t1, i5);\
|
||||
t2 = i6;\
|
||||
o0 = TRANSP_MASK_2WAY;\
|
||||
i6 = _mm256_unpacklo_epi64(i6, i7);\
|
||||
t2 = _mm256_unpackhi_epi64(t2, i7);\
|
||||
/* load transpose mask into a register, because it will be used 8 times */\
|
||||
i0 = _mm256_shuffle_epi8(i0, o0);\
|
||||
i2 = _mm256_shuffle_epi8(i2, o0);\
|
||||
i4 = _mm256_shuffle_epi8(i4, o0);\
|
||||
i6 = _mm256_shuffle_epi8(i6, o0);\
|
||||
o1 = _mm256_shuffle_epi8(o1, o0);\
|
||||
t0 = _mm256_shuffle_epi8(t0, o0);\
|
||||
t1 = _mm256_shuffle_epi8(t1, o0);\
|
||||
t2 = _mm256_shuffle_epi8(t2, o0);\
|
||||
/* continue with unpack using 4 temp registers */\
|
||||
t3 = i4;\
|
||||
o2 = o1;\
|
||||
o0 = i0;\
|
||||
t4 = t1;\
|
||||
\
|
||||
t3 = _mm256_unpackhi_epi16(t3, i6);\
|
||||
i4 = _mm256_unpacklo_epi16(i4, i6);\
|
||||
o0 = _mm256_unpackhi_epi16(o0, i2);\
|
||||
i0 = _mm256_unpacklo_epi16(i0, i2);\
|
||||
o2 = _mm256_unpackhi_epi16(o2, t0);\
|
||||
o1 = _mm256_unpacklo_epi16(o1, t0);\
|
||||
t4 = _mm256_unpackhi_epi16(t4, t2);\
|
||||
t1 = _mm256_unpacklo_epi16(t1, t2);\
|
||||
/* shuffle with immediate */\
|
||||
i4 = _mm256_shuffle_epi32(i4, 216);\
|
||||
t3 = _mm256_shuffle_epi32(t3, 216);\
|
||||
o1 = _mm256_shuffle_epi32(o1, 216);\
|
||||
o2 = _mm256_shuffle_epi32(o2, 216);\
|
||||
i0 = _mm256_shuffle_epi32(i0, 216);\
|
||||
o0 = _mm256_shuffle_epi32(o0, 216);\
|
||||
t1 = _mm256_shuffle_epi32(t1, 216);\
|
||||
t4 = _mm256_shuffle_epi32(t4, 216);\
|
||||
/* continue with unpack */\
|
||||
i1 = i0;\
|
||||
i3 = o0;\
|
||||
i5 = o1;\
|
||||
i7 = o2;\
|
||||
i0 = _mm256_unpacklo_epi32(i0, i4);\
|
||||
i1 = _mm256_unpackhi_epi32(i1, i4);\
|
||||
o0 = _mm256_unpacklo_epi32(o0, t3);\
|
||||
i3 = _mm256_unpackhi_epi32(i3, t3);\
|
||||
o1 = _mm256_unpacklo_epi32(o1, t1);\
|
||||
i5 = _mm256_unpackhi_epi32(i5, t1);\
|
||||
o2 = _mm256_unpacklo_epi32(o2, t4);\
|
||||
i7 = _mm256_unpackhi_epi32(i7, t4);\
|
||||
/* transpose done */\
|
||||
}/**/
|
||||
|
||||
void INIT_2way( __m256i *chaining )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
|
||||
/* load IV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
xmm9 = chaining[1];
|
||||
xmm10 = chaining[2];
|
||||
xmm11 = chaining[3];
|
||||
xmm12 = chaining[4];
|
||||
xmm13 = chaining[5];
|
||||
xmm14 = chaining[6];
|
||||
xmm15 = chaining[7];
|
||||
|
||||
/* transform chaining value from column ordering into row ordering */
|
||||
Matrix_Transpose_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
|
||||
|
||||
/* store transposed IV */
|
||||
chaining[0] = xmm8;
|
||||
chaining[1] = xmm9;
|
||||
chaining[2] = xmm10;
|
||||
chaining[3] = xmm11;
|
||||
chaining[4] = xmm12;
|
||||
chaining[5] = xmm13;
|
||||
chaining[6] = xmm14;
|
||||
chaining[7] = xmm15;
|
||||
}
|
||||
|
||||
void TF1024_2way( __m256i *chaining, const __m256i *message )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i QTEMP[8];
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load message into registers xmm8 - xmm15 (Q = message) */
|
||||
xmm8 = message[0];
|
||||
xmm9 = message[1];
|
||||
xmm10 = message[2];
|
||||
xmm11 = message[3];
|
||||
xmm12 = message[4];
|
||||
xmm13 = message[5];
|
||||
xmm14 = message[6];
|
||||
xmm15 = message[7];
|
||||
|
||||
/* transform message M from column ordering into row ordering */
|
||||
Matrix_Transpose_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
|
||||
|
||||
/* store message M (Q input) for later */
|
||||
QTEMP[0] = xmm8;
|
||||
QTEMP[1] = xmm9;
|
||||
QTEMP[2] = xmm10;
|
||||
QTEMP[3] = xmm11;
|
||||
QTEMP[4] = xmm12;
|
||||
QTEMP[5] = xmm13;
|
||||
QTEMP[6] = xmm14;
|
||||
QTEMP[7] = xmm15;
|
||||
|
||||
/* xor CV to message to get P input */
|
||||
/* result: CV+M in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* compute permutation P */
|
||||
/* result: P(CV+M) in xmm8...xmm15 */
|
||||
ROUNDS_P_2WAY();
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV+M)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* store P(CV+M)+CV */
|
||||
chaining[0] = xmm8;
|
||||
chaining[1] = xmm9;
|
||||
chaining[2] = xmm10;
|
||||
chaining[3] = xmm11;
|
||||
chaining[4] = xmm12;
|
||||
chaining[5] = xmm13;
|
||||
chaining[6] = xmm14;
|
||||
chaining[7] = xmm15;
|
||||
|
||||
/* load message M (Q input) into xmm8-15 */
|
||||
xmm8 = QTEMP[0];
|
||||
xmm9 = QTEMP[1];
|
||||
xmm10 = QTEMP[2];
|
||||
xmm11 = QTEMP[3];
|
||||
xmm12 = QTEMP[4];
|
||||
xmm13 = QTEMP[5];
|
||||
xmm14 = QTEMP[6];
|
||||
xmm15 = QTEMP[7];
|
||||
|
||||
/* compute permutation Q */
|
||||
/* result: Q(M) in xmm8...xmm15 */
|
||||
ROUNDS_Q_2WAY();
|
||||
|
||||
/* xor Q output */
|
||||
/* result: P(CV+M)+CV+Q(M) in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* store CV */
|
||||
chaining[0] = xmm8;
|
||||
chaining[1] = xmm9;
|
||||
chaining[2] = xmm10;
|
||||
chaining[3] = xmm11;
|
||||
chaining[4] = xmm12;
|
||||
chaining[5] = xmm13;
|
||||
chaining[6] = xmm14;
|
||||
chaining[7] = xmm15;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void OF1024_2way( __m256i* chaining )
|
||||
{
|
||||
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
|
||||
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
|
||||
static __m256i TEMP0;
|
||||
static __m256i TEMP1;
|
||||
static __m256i TEMP2;
|
||||
|
||||
/* load CV into registers xmm8 - xmm15 */
|
||||
xmm8 = chaining[0];
|
||||
xmm9 = chaining[1];
|
||||
xmm10 = chaining[2];
|
||||
xmm11 = chaining[3];
|
||||
xmm12 = chaining[4];
|
||||
xmm13 = chaining[5];
|
||||
xmm14 = chaining[6];
|
||||
xmm15 = chaining[7];
|
||||
|
||||
/* compute permutation P */
|
||||
/* result: P(CV) in xmm8...xmm15 */
|
||||
ROUNDS_P_2WAY();
|
||||
|
||||
/* xor CV to P output (feed-forward) */
|
||||
/* result: P(CV)+CV in xmm8...xmm15 */
|
||||
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
|
||||
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
|
||||
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
|
||||
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
|
||||
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
|
||||
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
|
||||
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
|
||||
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
|
||||
|
||||
/* transpose CV back from row ordering to column ordering */
|
||||
/* result: final hash value in xmm0, xmm6, xmm13, xmm15 */
|
||||
Matrix_Transpose_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm4, xmm0, xmm6, xmm1, xmm2, xmm3, xmm5, xmm7);
|
||||
|
||||
/* we only need to return the truncated half of the state */
|
||||
chaining[4] = xmm0;
|
||||
chaining[5] = xmm6;
|
||||
chaining[6] = xmm13;
|
||||
chaining[7] = xmm15;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif // VAES
|
||||
#endif // GROESTL512_INTR_4WAY_H__
|
||||
|
@@ -11,7 +11,7 @@
|
||||
#else
|
||||
#include "sph_groestl.h"
|
||||
#endif
|
||||
#include <openssl/sha.h>
|
||||
#include "algo/sha/sha256-hash.h"
|
||||
|
||||
typedef struct {
|
||||
#ifdef __AES__
|
||||
@@ -19,7 +19,6 @@ typedef struct {
|
||||
#else
|
||||
sph_groestl512_context groestl;
|
||||
#endif
|
||||
SHA256_CTX sha;
|
||||
} myrgr_ctx_holder;
|
||||
|
||||
myrgr_ctx_holder myrgr_ctx;
|
||||
@@ -31,7 +30,6 @@ void init_myrgr_ctx()
|
||||
#else
|
||||
sph_groestl512_init( &myrgr_ctx.groestl );
|
||||
#endif
|
||||
SHA256_Init( &myrgr_ctx.sha );
|
||||
}
|
||||
|
||||
void myriad_hash(void *output, const void *input)
|
||||
@@ -39,54 +37,54 @@ void myriad_hash(void *output, const void *input)
|
||||
myrgr_ctx_holder ctx;
|
||||
memcpy( &ctx, &myrgr_ctx, sizeof(myrgr_ctx) );
|
||||
|
||||
uint32_t _ALIGN(32) hash[16];
|
||||
uint32_t _ALIGN(32) hash[16];
|
||||
|
||||
#ifdef __AES__
|
||||
update_groestl( &ctx.groestl, (char*)input, 640 );
|
||||
final_groestl( &ctx.groestl, (char*)hash);
|
||||
#else
|
||||
sph_groestl512(&ctx.groestl, input, 80);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
sph_groestl512(&ctx.groestl, input, 80);
|
||||
sph_groestl512_close(&ctx.groestl, hash);
|
||||
#endif
|
||||
|
||||
SHA256_Update( &ctx.sha, (unsigned char*)hash, 64 );
|
||||
SHA256_Final( (unsigned char*)hash, &ctx.sha );
|
||||
sha256_full( hash, hash, 64 );
|
||||
|
||||
memcpy(output, hash, 32);
|
||||
memcpy(output, hash, 32);
|
||||
}
|
||||
|
||||
int scanhash_myriad( struct work *work, uint32_t max_nonce,
|
||||
uint64_t *hashes_done, struct thr_info *mythr )
|
||||
{
|
||||
uint32_t _ALIGN(64) endiandata[20];
|
||||
uint32_t _ALIGN(64) endiandata[20];
|
||||
uint32_t *pdata = work->data;
|
||||
uint32_t *ptarget = work->target;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
uint32_t nonce = first_nonce;
|
||||
int thr_id = mythr->id; // thr_id arg is deprecated
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
uint32_t nonce = first_nonce;
|
||||
int thr_id = mythr->id;
|
||||
|
||||
if (opt_benchmark)
|
||||
((uint32_t*)ptarget)[7] = 0x0000ff;
|
||||
if (opt_benchmark)
|
||||
((uint32_t*)ptarget)[7] = 0x0000ff;
|
||||
|
||||
swab32_array( endiandata, pdata, 20 );
|
||||
swab32_array( endiandata, pdata, 20 );
|
||||
|
||||
do {
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
uint32_t hash[8];
|
||||
be32enc(&endiandata[19], nonce);
|
||||
myriad_hash(hash, endiandata);
|
||||
do {
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
uint32_t hash[8];
|
||||
be32enc(&endiandata[19], nonce);
|
||||
myriad_hash(hash, endiandata);
|
||||
|
||||
if (hash[7] <= Htarg && fulltest(hash, ptarget)) {
|
||||
pdata[19] = nonce;
|
||||
*hashes_done = pdata[19] - first_nonce;
|
||||
return 1;
|
||||
}
|
||||
nonce++;
|
||||
if (hash[7] <= Htarg && fulltest(hash, ptarget))
|
||||
{
|
||||
pdata[19] = nonce;
|
||||
*hashes_done = pdata[19] - first_nonce;
|
||||
return 1;
|
||||
}
|
||||
nonce++;
|
||||
|
||||
} while (nonce < max_nonce && !work_restart[thr_id].restart);
|
||||
} while (nonce < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
pdata[19] = nonce;
|
||||
*hashes_done = pdata[19] - first_nonce + 1;
|
||||
return 0;
|
||||
pdata[19] = nonce;
|
||||
*hashes_done = pdata[19] - first_nonce + 1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@@ -44,6 +44,7 @@ void myriad_8way_hash( void *output, const void *input )
|
||||
|
||||
rintrlv_8x64_4x128( vhashA, vhashB, input, 640 );
|
||||
groestl512_4way_update_close( &ctx.groestl, vhashA, vhashA, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(groestl512_4way_context) );
|
||||
groestl512_4way_update_close( &ctx.groestl, vhashB, vhashB, 640 );
|
||||
|
||||
uint32_t hash0[20] __attribute__ ((aligned (64)));
|
||||
@@ -58,8 +59,6 @@ void myriad_8way_hash( void *output, const void *input )
|
||||
// rintrlv_4x128_8x32( vhash, vhashA, vhashB, 512 );
|
||||
dintrlv_4x128_512( hash0, hash1, hash2, hash3, vhashA );
|
||||
dintrlv_4x128_512( hash4, hash5, hash6, hash7, vhashB );
|
||||
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5,
|
||||
hash6, hash7 );
|
||||
|
||||
#else
|
||||
|
||||
@@ -76,27 +75,27 @@ void myriad_8way_hash( void *output, const void *input )
|
||||
hash4, hash5, hash6, hash7, input, 640 );
|
||||
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash4, (char*)hash4, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash5, (char*)hash5, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash6, (char*)hash6, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
update_and_final_groestl( &ctx.groestl, (char*)hash7, (char*)hash7, 640 );
|
||||
memcpy( &ctx.groestl, &myrgr_4way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
|
||||
intrlv_8x32( vhash, hash0, hash1, hash2, hash3,
|
||||
hash4, hash5, hash6, hash7, 512 );
|
||||
memcpy( &ctx.groestl, &myrgr_8way_ctx.groestl, sizeof(hashState_groestl) );
|
||||
|
||||
#endif
|
||||
|
||||
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5,
|
||||
hash6, hash7 );
|
||||
|
||||
sha256_8way_update( &ctx.sha, vhash, 64 );
|
||||
sha256_8way_close( &ctx.sha, output );
|
||||
}
|
||||
@@ -143,7 +142,7 @@ int scanhash_myriad_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
@@ -226,7 +225,7 @@ int scanhash_myriad_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
|
@@ -545,39 +545,33 @@ static const sph_u32 T512[64][16] = {
|
||||
#define sE c7
|
||||
#define sF m7
|
||||
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
// Hamsi 8 way
|
||||
// Hamsi 8 way AVX512
|
||||
|
||||
// Intel says _mm512_movepi64_mask has (1L/1T) timimg while
|
||||
// _mm512_cmplt_epi64_mask as (3L/1T) timing, however, when tested hashing X13
|
||||
// on i9-9940x cmplt with zero was 3% faster than movepi.
|
||||
|
||||
#define INPUT_BIG8 \
|
||||
do { \
|
||||
__m512i db = *buf; \
|
||||
const uint64_t *tp = (uint64_t*)&T512[0][0]; \
|
||||
m0 = m1 = m2 = m3 = m4 = m5 = m6 = m7 = m512_zero; \
|
||||
__m512i db = _mm512_ror_epi64( *buf, 1 ); \
|
||||
const __m512i zero = m512_zero; \
|
||||
const uint64_t *tp = (const uint64_t*)T512; \
|
||||
m0 = m1 = m2 = m3 = m4 = m5 = m6 = m7 = zero; \
|
||||
for ( int u = 0; u < 64; u++ ) \
|
||||
{ \
|
||||
__m512i dm = _mm512_and_si512( db, m512_one_64 ) ; \
|
||||
dm = mm512_negate_32( _mm512_or_si512( dm, \
|
||||
_mm512_slli_epi64( dm, 32 ) ) ); \
|
||||
m0 = _mm512_xor_si512( m0, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[0] ) ) ); \
|
||||
m1 = _mm512_xor_si512( m1, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[1] ) ) ); \
|
||||
m2 = _mm512_xor_si512( m2, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[2] ) ) ); \
|
||||
m3 = _mm512_xor_si512( m3, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[3] ) ) ); \
|
||||
m4 = _mm512_xor_si512( m4, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[4] ) ) ); \
|
||||
m5 = _mm512_xor_si512( m5, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[5] ) ) ); \
|
||||
m6 = _mm512_xor_si512( m6, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[6] ) ) ); \
|
||||
m7 = _mm512_xor_si512( m7, _mm512_and_si512( dm, \
|
||||
m512_const1_64( tp[7] ) ) ); \
|
||||
const __mmask8 dm = _mm512_cmplt_epi64_mask( db, zero ); \
|
||||
m0 = _mm512_mask_xor_epi64( m0, dm, m0, m512_const1_64( tp[0] ) ); \
|
||||
m1 = _mm512_mask_xor_epi64( m1, dm, m1, m512_const1_64( tp[1] ) ); \
|
||||
m2 = _mm512_mask_xor_epi64( m2, dm, m2, m512_const1_64( tp[2] ) ); \
|
||||
m3 = _mm512_mask_xor_epi64( m3, dm, m3, m512_const1_64( tp[3] ) ); \
|
||||
m4 = _mm512_mask_xor_epi64( m4, dm, m4, m512_const1_64( tp[4] ) ); \
|
||||
m5 = _mm512_mask_xor_epi64( m5, dm, m5, m512_const1_64( tp[5] ) ); \
|
||||
m6 = _mm512_mask_xor_epi64( m6, dm, m6, m512_const1_64( tp[6] ) ); \
|
||||
m7 = _mm512_mask_xor_epi64( m7, dm, m7, m512_const1_64( tp[7] ) ); \
|
||||
db = _mm512_ror_epi64( db, 1 ); \
|
||||
tp += 8; \
|
||||
db = _mm512_srli_epi64( db, 1 ); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@@ -585,20 +579,13 @@ do { \
|
||||
do { \
|
||||
__m512i t; \
|
||||
t = a; \
|
||||
a = _mm512_and_si512( a, c ); \
|
||||
a = _mm512_xor_si512( a, d ); \
|
||||
c = _mm512_xor_si512( c, b ); \
|
||||
c = _mm512_xor_si512( c, a ); \
|
||||
d = _mm512_or_si512( d, t ); \
|
||||
d = _mm512_xor_si512( d, b ); \
|
||||
a = mm512_xorand( d, a, c ); \
|
||||
c = mm512_xor3( a, b, c ); \
|
||||
b = mm512_xoror( b, d, t ); \
|
||||
t = _mm512_xor_si512( t, c ); \
|
||||
b = d; \
|
||||
d = _mm512_or_si512( d, t ); \
|
||||
d = _mm512_xor_si512( d, a ); \
|
||||
a = _mm512_and_si512( a, b ); \
|
||||
t = _mm512_xor_si512( t, a ); \
|
||||
b = _mm512_xor_si512( b, d ); \
|
||||
b = _mm512_xor_si512( b, t ); \
|
||||
d = mm512_xoror( a, b, t ); \
|
||||
t = mm512_xorand( t, a, b ); \
|
||||
b = mm512_xor3( b, d, t ); \
|
||||
a = c; \
|
||||
c = b; \
|
||||
b = d; \
|
||||
@@ -609,14 +596,12 @@ do { \
|
||||
do { \
|
||||
a = mm512_rol_32( a, 13 ); \
|
||||
c = mm512_rol_32( c, 3 ); \
|
||||
b = _mm512_xor_si512( b, _mm512_xor_si512( a, c ) ); \
|
||||
d = _mm512_xor_si512( d, _mm512_xor_si512( c, \
|
||||
_mm512_slli_epi32( a, 3 ) ) ); \
|
||||
b = mm512_xor3( a, b, c ); \
|
||||
d = mm512_xor3( d, c, _mm512_slli_epi32( a, 3 ) ); \
|
||||
b = mm512_rol_32( b, 1 ); \
|
||||
d = mm512_rol_32( d, 7 ); \
|
||||
a = _mm512_xor_si512( a, _mm512_xor_si512( b, d ) ); \
|
||||
c = _mm512_xor_si512( c, _mm512_xor_si512( d, \
|
||||
_mm512_slli_epi32( b, 7 ) ) ); \
|
||||
a = mm512_xor3( a, b, d ); \
|
||||
c = mm512_xor3( c, d, _mm512_slli_epi32( b, 7 ) ); \
|
||||
a = mm512_rol_32( a, 5 ); \
|
||||
c = mm512_rol_32( c, 22 ); \
|
||||
} while (0)
|
||||
@@ -626,162 +611,192 @@ do { \
|
||||
|
||||
#define READ_STATE_BIG8(sc) \
|
||||
do { \
|
||||
c0 = sc->h[0x0]; \
|
||||
c1 = sc->h[0x1]; \
|
||||
c2 = sc->h[0x2]; \
|
||||
c3 = sc->h[0x3]; \
|
||||
c4 = sc->h[0x4]; \
|
||||
c5 = sc->h[0x5]; \
|
||||
c6 = sc->h[0x6]; \
|
||||
c7 = sc->h[0x7]; \
|
||||
c0 = sc->h[0]; \
|
||||
c1 = sc->h[1]; \
|
||||
c2 = sc->h[2]; \
|
||||
c3 = sc->h[3]; \
|
||||
c4 = sc->h[4]; \
|
||||
c5 = sc->h[5]; \
|
||||
c6 = sc->h[6]; \
|
||||
c7 = sc->h[7]; \
|
||||
} while (0)
|
||||
|
||||
#define WRITE_STATE_BIG8(sc) \
|
||||
do { \
|
||||
sc->h[0x0] = c0; \
|
||||
sc->h[0x1] = c1; \
|
||||
sc->h[0x2] = c2; \
|
||||
sc->h[0x3] = c3; \
|
||||
sc->h[0x4] = c4; \
|
||||
sc->h[0x5] = c5; \
|
||||
sc->h[0x6] = c6; \
|
||||
sc->h[0x7] = c7; \
|
||||
sc->h[0] = c0; \
|
||||
sc->h[1] = c1; \
|
||||
sc->h[2] = c2; \
|
||||
sc->h[3] = c3; \
|
||||
sc->h[4] = c4; \
|
||||
sc->h[5] = c5; \
|
||||
sc->h[6] = c6; \
|
||||
sc->h[7] = c7; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define ROUND_BIG8(rc, alpha) \
|
||||
#define ROUND_BIG8( alpha ) \
|
||||
do { \
|
||||
__m512i t0, t1, t2, t3; \
|
||||
s0 = _mm512_xor_si512( s0, m512_const1_64( \
|
||||
( (uint64_t)(rc) << 32 ) ^ ( (uint64_t*)(alpha) )[ 0] ) ); \
|
||||
s1 = _mm512_xor_si512( s1, m512_const1_64( ( (uint64_t*)(alpha) )[ 1] ) ); \
|
||||
s2 = _mm512_xor_si512( s2, m512_const1_64( ( (uint64_t*)(alpha) )[ 2] ) ); \
|
||||
s3 = _mm512_xor_si512( s3, m512_const1_64( ( (uint64_t*)(alpha) )[ 3] ) ); \
|
||||
s4 = _mm512_xor_si512( s4, m512_const1_64( ( (uint64_t*)(alpha) )[ 4] ) ); \
|
||||
s5 = _mm512_xor_si512( s5, m512_const1_64( ( (uint64_t*)(alpha) )[ 5] ) ); \
|
||||
s6 = _mm512_xor_si512( s6, m512_const1_64( ( (uint64_t*)(alpha) )[ 6] ) ); \
|
||||
s7 = _mm512_xor_si512( s7, m512_const1_64( ( (uint64_t*)(alpha) )[ 7] ) ); \
|
||||
s8 = _mm512_xor_si512( s8, m512_const1_64( ( (uint64_t*)(alpha) )[ 8] ) ); \
|
||||
s9 = _mm512_xor_si512( s9, m512_const1_64( ( (uint64_t*)(alpha) )[ 9] ) ); \
|
||||
sA = _mm512_xor_si512( sA, m512_const1_64( ( (uint64_t*)(alpha) )[10] ) ); \
|
||||
sB = _mm512_xor_si512( sB, m512_const1_64( ( (uint64_t*)(alpha) )[11] ) ); \
|
||||
sC = _mm512_xor_si512( sC, m512_const1_64( ( (uint64_t*)(alpha) )[12] ) ); \
|
||||
sD = _mm512_xor_si512( sD, m512_const1_64( ( (uint64_t*)(alpha) )[13] ) ); \
|
||||
sE = _mm512_xor_si512( sE, m512_const1_64( ( (uint64_t*)(alpha) )[14] ) ); \
|
||||
sF = _mm512_xor_si512( sF, m512_const1_64( ( (uint64_t*)(alpha) )[15] ) ); \
|
||||
s0 = _mm512_xor_si512( s0, alpha[ 0] ); /* m0 */ \
|
||||
s1 = _mm512_xor_si512( s1, alpha[ 1] ); /* c0 */ \
|
||||
s2 = _mm512_xor_si512( s2, alpha[ 2] ); /* m1 */ \
|
||||
s3 = _mm512_xor_si512( s3, alpha[ 3] ); /* c1 */ \
|
||||
s4 = _mm512_xor_si512( s4, alpha[ 4] ); /* c2 */ \
|
||||
s5 = _mm512_xor_si512( s5, alpha[ 5] ); /* m2 */ \
|
||||
s6 = _mm512_xor_si512( s6, alpha[ 6] ); /* c3 */ \
|
||||
s7 = _mm512_xor_si512( s7, alpha[ 7] ); /* m3 */ \
|
||||
s8 = _mm512_xor_si512( s8, alpha[ 8] ); /* m4 */ \
|
||||
s9 = _mm512_xor_si512( s9, alpha[ 9] ); /* c4 */ \
|
||||
sA = _mm512_xor_si512( sA, alpha[10] ); /* m5 */ \
|
||||
sB = _mm512_xor_si512( sB, alpha[11] ); /* c5 */ \
|
||||
sC = _mm512_xor_si512( sC, alpha[12] ); /* c6 */ \
|
||||
sD = _mm512_xor_si512( sD, alpha[13] ); /* m6 */ \
|
||||
sE = _mm512_xor_si512( sE, alpha[14] ); /* c7 */ \
|
||||
sF = _mm512_xor_si512( sF, alpha[15] ); /* m7 */ \
|
||||
\
|
||||
SBOX8( s0, s4, s8, sC ); \
|
||||
SBOX8( s1, s5, s9, sD ); \
|
||||
SBOX8( s2, s6, sA, sE ); \
|
||||
SBOX8( s3, s7, sB, sF ); \
|
||||
SBOX8( s0, s4, s8, sC ); /* ( m0, c2, m4, c6 ) */ \
|
||||
SBOX8( s1, s5, s9, sD ); /* ( c0, m2, c4, m6 ) */ \
|
||||
SBOX8( s2, s6, sA, sE ); /* ( m1, c3, m5, c7 ) */ \
|
||||
SBOX8( s3, s7, sB, sF ); /* ( c1, m3, c5, m7 ) */ \
|
||||
\
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s4, 4 ), \
|
||||
_mm512_bslli_epi128( s5, 4 ) ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( sD, 4 ), \
|
||||
_mm512_bslli_epi128( sE, 4 ) ); \
|
||||
s4 = mm512_swap64_32( s4 ); \
|
||||
s5 = mm512_swap64_32( s5 ); \
|
||||
sD = mm512_swap64_32( sD ); \
|
||||
sE = mm512_swap64_32( sE ); \
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, s4, s5 ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, sD, sE ); \
|
||||
L8( s0, t1, s9, t3 ); \
|
||||
s4 = _mm512_mask_blend_epi32( 0xaaaa, s4, _mm512_bslli_epi128( t1, 4 ) ); \
|
||||
s5 = _mm512_mask_blend_epi32( 0x5555, s5, _mm512_bsrli_epi128( t1, 4 ) ); \
|
||||
sD = _mm512_mask_blend_epi32( 0xaaaa, sD, _mm512_bslli_epi128( t3, 4 ) ); \
|
||||
sE = _mm512_mask_blend_epi32( 0x5555, sE, _mm512_bsrli_epi128( t3, 4 ) ); \
|
||||
s4 = _mm512_mask_blend_epi32( 0x5555, s4, t1 ); \
|
||||
s5 = _mm512_mask_blend_epi32( 0xaaaa, s5, t1 ); \
|
||||
sD = _mm512_mask_blend_epi32( 0x5555, sD, t3 ); \
|
||||
sE = _mm512_mask_blend_epi32( 0xaaaa, sE, t3 ); \
|
||||
\
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s5, 4 ), \
|
||||
_mm512_bslli_epi128( s6, 4 ) ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( sE, 4 ), \
|
||||
_mm512_bslli_epi128( sF, 4 ) ); \
|
||||
s6 = mm512_swap64_32( s6 ); \
|
||||
sF = mm512_swap64_32( sF ); \
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, s5, s6 ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, sE, sF ); \
|
||||
L8( s1, t1, sA, t3 ); \
|
||||
s5 = _mm512_mask_blend_epi32( 0xaaaa, s5, _mm512_bslli_epi128( t1, 4 ) ); \
|
||||
s6 = _mm512_mask_blend_epi32( 0x5555, s6, _mm512_bsrli_epi128( t1, 4 ) ); \
|
||||
sE = _mm512_mask_blend_epi32( 0xaaaa, sE, _mm512_bslli_epi128( t3, 4 ) ); \
|
||||
sF = _mm512_mask_blend_epi32( 0x5555, sF, _mm512_bsrli_epi128( t3, 4 ) ); \
|
||||
s5 = _mm512_mask_blend_epi32( 0x5555, s5, t1 ); \
|
||||
s6 = _mm512_mask_blend_epi32( 0xaaaa, s6, t1 ); \
|
||||
sE = _mm512_mask_blend_epi32( 0x5555, sE, t3 ); \
|
||||
sF = _mm512_mask_blend_epi32( 0xaaaa, sF, t3 ); \
|
||||
\
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s6, 4 ), \
|
||||
_mm512_bslli_epi128( s7, 4 ) ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( sF, 4 ), \
|
||||
_mm512_bslli_epi128( sC, 4 ) ); \
|
||||
s7 = mm512_swap64_32( s7 ); \
|
||||
sC = mm512_swap64_32( sC ); \
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, s6, s7 ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, sF, sC ); \
|
||||
L8( s2, t1, sB, t3 ); \
|
||||
s6 = _mm512_mask_blend_epi32( 0xaaaa, s6, _mm512_bslli_epi128( t1, 4 ) ); \
|
||||
s7 = _mm512_mask_blend_epi32( 0x5555, s7, _mm512_bsrli_epi128( t1, 4 ) ); \
|
||||
sF = _mm512_mask_blend_epi32( 0xaaaa, sF, _mm512_bslli_epi128( t3, 4 ) ); \
|
||||
sC = _mm512_mask_blend_epi32( 0x5555, sC, _mm512_bsrli_epi128( t3, 4 ) ); \
|
||||
s6 = _mm512_mask_blend_epi32( 0x5555, s6, t1 ); \
|
||||
s7 = _mm512_mask_blend_epi32( 0xaaaa, s7, t1 ); \
|
||||
sF = _mm512_mask_blend_epi32( 0x5555, sF, t3 ); \
|
||||
sC = _mm512_mask_blend_epi32( 0xaaaa, sC, t3 ); \
|
||||
s6 = mm512_swap64_32( s6 ); \
|
||||
sF = mm512_swap64_32( sF ); \
|
||||
\
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s7, 4 ), \
|
||||
_mm512_bslli_epi128( s4, 4 ) ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( sC, 4 ), \
|
||||
_mm512_bslli_epi128( sD, 4 ) ); \
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, s7, s4 ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, sC, sD ); \
|
||||
L8( s3, t1, s8, t3 ); \
|
||||
s7 = _mm512_mask_blend_epi32( 0xaaaa, s7, _mm512_bslli_epi128( t1, 4 ) ); \
|
||||
s4 = _mm512_mask_blend_epi32( 0x5555, s4, _mm512_bsrli_epi128( t1, 4 ) ); \
|
||||
sC = _mm512_mask_blend_epi32( 0xaaaa, sC, _mm512_bslli_epi128( t3, 4 ) ); \
|
||||
sD = _mm512_mask_blend_epi32( 0x5555, sD, _mm512_bsrli_epi128( t3, 4 ) ); \
|
||||
s7 = _mm512_mask_blend_epi32( 0x5555, s7, t1 ); \
|
||||
s4 = _mm512_mask_blend_epi32( 0xaaaa, s4, t1 ); \
|
||||
sC = _mm512_mask_blend_epi32( 0x5555, sC, t3 ); \
|
||||
sD = _mm512_mask_blend_epi32( 0xaaaa, sD, t3 ); \
|
||||
s7 = mm512_swap64_32( s7 ); \
|
||||
sC = mm512_swap64_32( sC ); \
|
||||
\
|
||||
t0 = _mm512_mask_blend_epi32( 0xaaaa, s0, _mm512_bslli_epi128( s8, 4 ) ); \
|
||||
t0 = _mm512_mask_blend_epi32( 0xaaaa, s0, mm512_swap64_32( s8 ) ); \
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, s1, s9 ); \
|
||||
t2 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s2, 4 ), sA ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s3, 4 ), \
|
||||
_mm512_bslli_epi128( sB, 4 ) ); \
|
||||
t2 = _mm512_mask_blend_epi32( 0xaaaa, mm512_swap64_32( s2 ), sA ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0x5555, s3, sB ); \
|
||||
t3 = mm512_swap64_32( t3 ); \
|
||||
L8( t0, t1, t2, t3 ); \
|
||||
t3 = mm512_swap64_32( t3 ); \
|
||||
s0 = _mm512_mask_blend_epi32( 0x5555, s0, t0 ); \
|
||||
s8 = _mm512_mask_blend_epi32( 0x5555, s8, _mm512_bsrli_epi128( t0, 4 ) ); \
|
||||
s8 = _mm512_mask_blend_epi32( 0x5555, s8, mm512_swap64_32( t0 ) ); \
|
||||
s1 = _mm512_mask_blend_epi32( 0x5555, s1, t1 ); \
|
||||
s9 = _mm512_mask_blend_epi32( 0xaaaa, s9, t1 ); \
|
||||
s2 = _mm512_mask_blend_epi32( 0xaaaa, s2, _mm512_bslli_epi128( t2, 4 ) ); \
|
||||
s2 = _mm512_mask_blend_epi32( 0xaaaa, s2, mm512_swap64_32( t2 ) ); \
|
||||
sA = _mm512_mask_blend_epi32( 0xaaaa, sA, t2 ); \
|
||||
s3 = _mm512_mask_blend_epi32( 0xaaaa, s3, _mm512_bslli_epi128( t3, 4 ) ); \
|
||||
sB = _mm512_mask_blend_epi32( 0x5555, sB, _mm512_bsrli_epi128( t3, 4 ) ); \
|
||||
s3 = _mm512_mask_blend_epi32( 0xaaaa, s3, t3 ); \
|
||||
sB = _mm512_mask_blend_epi32( 0x5555, sB, t3 ); \
|
||||
\
|
||||
t0 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s4, 4 ), sC ); \
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, _mm512_bsrli_epi128( s5, 4 ), \
|
||||
_mm512_bslli_epi128( sD, 4 ) ); \
|
||||
t2 = _mm512_mask_blend_epi32( 0xaaaa, s6, _mm512_bslli_epi128( sE, 4 ) ); \
|
||||
t0 = _mm512_mask_blend_epi32( 0xaaaa, s4, sC ); \
|
||||
t1 = _mm512_mask_blend_epi32( 0xaaaa, s5, sD ); \
|
||||
t2 = _mm512_mask_blend_epi32( 0xaaaa, s6, sE ); \
|
||||
t3 = _mm512_mask_blend_epi32( 0xaaaa, s7, sF ); \
|
||||
L8( t0, t1, t2, t3 ); \
|
||||
s4 = _mm512_mask_blend_epi32( 0xaaaa, s4, _mm512_bslli_epi128( t0, 4 ) ); \
|
||||
s4 = _mm512_mask_blend_epi32( 0x5555, s4, t0 ); \
|
||||
sC = _mm512_mask_blend_epi32( 0xaaaa, sC, t0 ); \
|
||||
s5 = _mm512_mask_blend_epi32( 0xaaaa, s5, _mm512_bslli_epi128( t1, 4 ) ); \
|
||||
sD = _mm512_mask_blend_epi32( 0x5555, sD, _mm512_bsrli_epi128( t1, 4 ) ); \
|
||||
s5 = _mm512_mask_blend_epi32( 0x5555, s5, t1 ); \
|
||||
sD = _mm512_mask_blend_epi32( 0xaaaa, sD, t1 ); \
|
||||
s6 = _mm512_mask_blend_epi32( 0x5555, s6, t2 ); \
|
||||
sE = _mm512_mask_blend_epi32( 0x5555, sE, _mm512_bsrli_epi128( t2, 4 ) ); \
|
||||
sE = _mm512_mask_blend_epi32( 0xaaaa, sE, t2 ); \
|
||||
s7 = _mm512_mask_blend_epi32( 0x5555, s7, t3 ); \
|
||||
sF = _mm512_mask_blend_epi32( 0xaaaa, sF, t3 ); \
|
||||
s4 = mm512_swap64_32( s4 ); \
|
||||
s5 = mm512_swap64_32( s5 ); \
|
||||
sD = mm512_swap64_32( sD ); \
|
||||
sE = mm512_swap64_32( sE ); \
|
||||
} while (0)
|
||||
|
||||
#define P_BIG8 \
|
||||
do { \
|
||||
ROUND_BIG8(0, alpha_n); \
|
||||
ROUND_BIG8(1, alpha_n); \
|
||||
ROUND_BIG8(2, alpha_n); \
|
||||
ROUND_BIG8(3, alpha_n); \
|
||||
ROUND_BIG8(4, alpha_n); \
|
||||
ROUND_BIG8(5, alpha_n); \
|
||||
__m512i alpha[16]; \
|
||||
const uint64_t A0 = ( (uint64_t*)alpha_n )[0]; \
|
||||
for( int i = 0; i < 16; i++ ) \
|
||||
alpha[i] = m512_const1_64( ( (uint64_t*)alpha_n )[i] ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( (1ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( (2ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( (3ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( (4ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( (5ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
} while (0)
|
||||
|
||||
#define PF_BIG8 \
|
||||
do { \
|
||||
ROUND_BIG8( 0, alpha_f); \
|
||||
ROUND_BIG8( 1, alpha_f); \
|
||||
ROUND_BIG8( 2, alpha_f); \
|
||||
ROUND_BIG8( 3, alpha_f); \
|
||||
ROUND_BIG8( 4, alpha_f); \
|
||||
ROUND_BIG8( 5, alpha_f); \
|
||||
ROUND_BIG8( 6, alpha_f); \
|
||||
ROUND_BIG8( 7, alpha_f); \
|
||||
ROUND_BIG8( 8, alpha_f); \
|
||||
ROUND_BIG8( 9, alpha_f); \
|
||||
ROUND_BIG8(10, alpha_f); \
|
||||
ROUND_BIG8(11, alpha_f); \
|
||||
__m512i alpha[16]; \
|
||||
const uint64_t A0 = ( (uint64_t*)alpha_f )[0]; \
|
||||
for( int i = 0; i < 16; i++ ) \
|
||||
alpha[i] = m512_const1_64( ( (uint64_t*)alpha_f )[i] ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 1ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 2ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 3ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 4ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 5ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 6ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 7ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 8ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( ( 9ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( (10ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
alpha[0] = m512_const1_64( (11ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG8( alpha ); \
|
||||
} while (0)
|
||||
|
||||
#define T_BIG8 \
|
||||
do { /* order is important */ \
|
||||
c7 = sc->h[ 0x7 ] = _mm512_xor_si512( sc->h[ 0x7 ], sB ); \
|
||||
c6 = sc->h[ 0x6 ] = _mm512_xor_si512( sc->h[ 0x6 ], sA ); \
|
||||
c5 = sc->h[ 0x5 ] = _mm512_xor_si512( sc->h[ 0x5 ], s9 ); \
|
||||
c4 = sc->h[ 0x4 ] = _mm512_xor_si512( sc->h[ 0x4 ], s8 ); \
|
||||
c3 = sc->h[ 0x3 ] = _mm512_xor_si512( sc->h[ 0x3 ], s3 ); \
|
||||
c2 = sc->h[ 0x2 ] = _mm512_xor_si512( sc->h[ 0x2 ], s2 ); \
|
||||
c1 = sc->h[ 0x1 ] = _mm512_xor_si512( sc->h[ 0x1 ], s1 ); \
|
||||
c0 = sc->h[ 0x0 ] = _mm512_xor_si512( sc->h[ 0x0 ], s0 ); \
|
||||
c7 = sc->h[ 7 ] = _mm512_xor_si512( sc->h[ 7 ], sB ); /* c5 */ \
|
||||
c6 = sc->h[ 6 ] = _mm512_xor_si512( sc->h[ 6 ], sA ); /* m5 */ \
|
||||
c5 = sc->h[ 5 ] = _mm512_xor_si512( sc->h[ 5 ], s9 ); /* c4 */ \
|
||||
c4 = sc->h[ 4 ] = _mm512_xor_si512( sc->h[ 4 ], s8 ); /* m4 */ \
|
||||
c3 = sc->h[ 3 ] = _mm512_xor_si512( sc->h[ 3 ], s3 ); /* c1 */ \
|
||||
c2 = sc->h[ 2 ] = _mm512_xor_si512( sc->h[ 2 ], s2 ); /* m1 */ \
|
||||
c1 = sc->h[ 1 ] = _mm512_xor_si512( sc->h[ 1 ], s1 ); /* c0 */ \
|
||||
c0 = sc->h[ 0 ] = _mm512_xor_si512( sc->h[ 0 ], s0 ); /* m0 */ \
|
||||
} while (0)
|
||||
|
||||
void hamsi_8way_big( hamsi_8way_big_context *sc, __m512i *buf, size_t num )
|
||||
@@ -818,7 +833,6 @@ void hamsi_8way_big_final( hamsi_8way_big_context *sc, __m512i *buf )
|
||||
WRITE_STATE_BIG8( sc );
|
||||
}
|
||||
|
||||
|
||||
void hamsi512_8way_init( hamsi_8way_big_context *sc )
|
||||
{
|
||||
sc->partial_len = 0;
|
||||
@@ -849,13 +863,11 @@ void hamsi512_8way_update( hamsi_8way_big_context *sc, const void *data,
|
||||
void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst )
|
||||
{
|
||||
__m512i pad[1];
|
||||
int ch, cl;
|
||||
uint32_t ch, cl;
|
||||
|
||||
sph_enc32be( &ch, sc->count_high );
|
||||
sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) );
|
||||
pad[0] = _mm512_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch,
|
||||
cl, ch, cl, ch, cl, ch, cl, ch );
|
||||
// pad[0] = m512_const2_32( cl, ch );
|
||||
pad[0] = _mm512_set1_epi64( ((uint64_t)cl << 32 ) | (uint64_t)ch );
|
||||
sc->buf[0] = m512_const1_64( 0x80 );
|
||||
hamsi_8way_big( sc, sc->buf, 1 );
|
||||
hamsi_8way_big_final( sc, pad );
|
||||
@@ -863,22 +875,19 @@ void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst )
|
||||
mm512_block_bswap_32( (__m512i*)dst, sc->h );
|
||||
}
|
||||
|
||||
|
||||
#endif // AVX512
|
||||
|
||||
|
||||
// Hamsi 4 way
|
||||
// Hamsi 4 way AVX2
|
||||
|
||||
#define INPUT_BIG \
|
||||
do { \
|
||||
__m256i db = *buf; \
|
||||
const uint64_t *tp = (uint64_t*)&T512[0][0]; \
|
||||
m0 = m1 = m2 = m3 = m4 = m5 = m6 = m7 = m256_zero; \
|
||||
for ( int u = 0; u < 64; u++ ) \
|
||||
const __m256i zero = m256_zero; \
|
||||
const uint64_t *tp = (const uint64_t*)T512; \
|
||||
m0 = m1 = m2 = m3 = m4 = m5 = m6 = m7 = zero; \
|
||||
for ( int u = 63; u >= 0; u-- ) \
|
||||
{ \
|
||||
__m256i dm = _mm256_and_si256( db, m256_one_64 ) ; \
|
||||
dm = mm256_negate_32( _mm256_or_si256( dm, \
|
||||
_mm256_slli_epi64( dm, 32 ) ) ); \
|
||||
__m256i dm = _mm256_cmpgt_epi64( zero, _mm256_slli_epi64( db, u ) ); \
|
||||
m0 = _mm256_xor_si256( m0, _mm256_and_si256( dm, \
|
||||
m256_const1_64( tp[0] ) ) ); \
|
||||
m1 = _mm256_xor_si256( m1, _mm256_and_si256( dm, \
|
||||
@@ -896,7 +905,6 @@ do { \
|
||||
m7 = _mm256_xor_si256( m7, _mm256_and_si256( dm, \
|
||||
m256_const1_64( tp[7] ) ) ); \
|
||||
tp += 8; \
|
||||
db = _mm256_srli_epi64( db, 1 ); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@@ -945,180 +953,192 @@ do { \
|
||||
|
||||
#define READ_STATE_BIG(sc) \
|
||||
do { \
|
||||
c0 = sc->h[0x0]; \
|
||||
c1 = sc->h[0x1]; \
|
||||
c2 = sc->h[0x2]; \
|
||||
c3 = sc->h[0x3]; \
|
||||
c4 = sc->h[0x4]; \
|
||||
c5 = sc->h[0x5]; \
|
||||
c6 = sc->h[0x6]; \
|
||||
c7 = sc->h[0x7]; \
|
||||
c0 = sc->h[0]; \
|
||||
c1 = sc->h[1]; \
|
||||
c2 = sc->h[2]; \
|
||||
c3 = sc->h[3]; \
|
||||
c4 = sc->h[4]; \
|
||||
c5 = sc->h[5]; \
|
||||
c6 = sc->h[6]; \
|
||||
c7 = sc->h[7]; \
|
||||
} while (0)
|
||||
|
||||
#define WRITE_STATE_BIG(sc) \
|
||||
do { \
|
||||
sc->h[0x0] = c0; \
|
||||
sc->h[0x1] = c1; \
|
||||
sc->h[0x2] = c2; \
|
||||
sc->h[0x3] = c3; \
|
||||
sc->h[0x4] = c4; \
|
||||
sc->h[0x5] = c5; \
|
||||
sc->h[0x6] = c6; \
|
||||
sc->h[0x7] = c7; \
|
||||
sc->h[0] = c0; \
|
||||
sc->h[1] = c1; \
|
||||
sc->h[2] = c2; \
|
||||
sc->h[3] = c3; \
|
||||
sc->h[4] = c4; \
|
||||
sc->h[5] = c5; \
|
||||
sc->h[6] = c6; \
|
||||
sc->h[7] = c7; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
#define s0 m0
|
||||
#define s1 c0
|
||||
#define s2 m1
|
||||
#define s3 c1
|
||||
#define s4 c2
|
||||
#define s5 m2
|
||||
#define s6 c3
|
||||
#define s7 m3
|
||||
#define s8 m4
|
||||
#define s9 c4
|
||||
#define sA m5
|
||||
#define sB c5
|
||||
#define sC c6
|
||||
#define sD m6
|
||||
#define sE c7
|
||||
#define sF m7
|
||||
*/
|
||||
|
||||
#define ROUND_BIG(rc, alpha) \
|
||||
#define ROUND_BIG( alpha ) \
|
||||
do { \
|
||||
__m256i t0, t1, t2, t3; \
|
||||
s0 = _mm256_xor_si256( s0, m256_const1_64( \
|
||||
( (uint64_t)(rc) << 32 ) ^ ( (uint64_t*)(alpha) )[ 0] ) ); \
|
||||
s1 = _mm256_xor_si256( s1, m256_const1_64( ( (uint64_t*)(alpha) )[ 1] ) ); \
|
||||
s2 = _mm256_xor_si256( s2, m256_const1_64( ( (uint64_t*)(alpha) )[ 2] ) ); \
|
||||
s3 = _mm256_xor_si256( s3, m256_const1_64( ( (uint64_t*)(alpha) )[ 3] ) ); \
|
||||
s4 = _mm256_xor_si256( s4, m256_const1_64( ( (uint64_t*)(alpha) )[ 4] ) ); \
|
||||
s5 = _mm256_xor_si256( s5, m256_const1_64( ( (uint64_t*)(alpha) )[ 5] ) ); \
|
||||
s6 = _mm256_xor_si256( s6, m256_const1_64( ( (uint64_t*)(alpha) )[ 6] ) ); \
|
||||
s7 = _mm256_xor_si256( s7, m256_const1_64( ( (uint64_t*)(alpha) )[ 7] ) ); \
|
||||
s8 = _mm256_xor_si256( s8, m256_const1_64( ( (uint64_t*)(alpha) )[ 8] ) ); \
|
||||
s9 = _mm256_xor_si256( s9, m256_const1_64( ( (uint64_t*)(alpha) )[ 9] ) ); \
|
||||
sA = _mm256_xor_si256( sA, m256_const1_64( ( (uint64_t*)(alpha) )[10] ) ); \
|
||||
sB = _mm256_xor_si256( sB, m256_const1_64( ( (uint64_t*)(alpha) )[11] ) ); \
|
||||
sC = _mm256_xor_si256( sC, m256_const1_64( ( (uint64_t*)(alpha) )[12] ) ); \
|
||||
sD = _mm256_xor_si256( sD, m256_const1_64( ( (uint64_t*)(alpha) )[13] ) ); \
|
||||
sE = _mm256_xor_si256( sE, m256_const1_64( ( (uint64_t*)(alpha) )[14] ) ); \
|
||||
sF = _mm256_xor_si256( sF, m256_const1_64( ( (uint64_t*)(alpha) )[15] ) ); \
|
||||
s0 = _mm256_xor_si256( s0, alpha[ 0] ); \
|
||||
s1 = _mm256_xor_si256( s1, alpha[ 1] ); \
|
||||
s2 = _mm256_xor_si256( s2, alpha[ 2] ); \
|
||||
s3 = _mm256_xor_si256( s3, alpha[ 3] ); \
|
||||
s4 = _mm256_xor_si256( s4, alpha[ 4] ); \
|
||||
s5 = _mm256_xor_si256( s5, alpha[ 5] ); \
|
||||
s6 = _mm256_xor_si256( s6, alpha[ 6] ); \
|
||||
s7 = _mm256_xor_si256( s7, alpha[ 7] ); \
|
||||
s8 = _mm256_xor_si256( s8, alpha[ 8] ); \
|
||||
s9 = _mm256_xor_si256( s9, alpha[ 9] ); \
|
||||
sA = _mm256_xor_si256( sA, alpha[10] ); \
|
||||
sB = _mm256_xor_si256( sB, alpha[11] ); \
|
||||
sC = _mm256_xor_si256( sC, alpha[12] ); \
|
||||
sD = _mm256_xor_si256( sD, alpha[13] ); \
|
||||
sE = _mm256_xor_si256( sE, alpha[14] ); \
|
||||
sF = _mm256_xor_si256( sF, alpha[15] ); \
|
||||
\
|
||||
SBOX( s0, s4, s8, sC ); \
|
||||
SBOX( s1, s5, s9, sD ); \
|
||||
SBOX( s2, s6, sA, sE ); \
|
||||
SBOX( s3, s7, sB, sF ); \
|
||||
\
|
||||
t1 = _mm256_blend_epi32( _mm256_bsrli_epi128( s4, 4 ), \
|
||||
_mm256_bslli_epi128( s5, 4 ), 0xAA ); \
|
||||
t3 = _mm256_blend_epi32( _mm256_bsrli_epi128( sD, 4 ), \
|
||||
_mm256_bslli_epi128( sE, 4 ), 0xAA ); \
|
||||
s4 = mm256_swap64_32( s4 ); \
|
||||
s5 = mm256_swap64_32( s5 ); \
|
||||
sD = mm256_swap64_32( sD ); \
|
||||
sE = mm256_swap64_32( sE ); \
|
||||
t1 = _mm256_blend_epi32( s4, s5, 0xaa ); \
|
||||
t3 = _mm256_blend_epi32( sD, sE, 0xaa ); \
|
||||
L( s0, t1, s9, t3 ); \
|
||||
s4 = _mm256_blend_epi32( s4, _mm256_bslli_epi128( t1, 4 ), 0xAA );\
|
||||
s5 = _mm256_blend_epi32( s5, _mm256_bsrli_epi128( t1, 4 ), 0x55 );\
|
||||
sD = _mm256_blend_epi32( sD, _mm256_bslli_epi128( t3, 4 ), 0xAA );\
|
||||
sE = _mm256_blend_epi32( sE, _mm256_bsrli_epi128( t3, 4 ), 0x55 );\
|
||||
s4 = _mm256_blend_epi32( s4, t1, 0x55 ); \
|
||||
s5 = _mm256_blend_epi32( s5, t1, 0xaa ); \
|
||||
sD = _mm256_blend_epi32( sD, t3, 0x55 ); \
|
||||
sE = _mm256_blend_epi32( sE, t3, 0xaa ); \
|
||||
\
|
||||
t1 = _mm256_blend_epi32( _mm256_bsrli_epi128( s5, 4 ), \
|
||||
_mm256_bslli_epi128( s6, 4 ), 0xAA ); \
|
||||
t3 = _mm256_blend_epi32( _mm256_bsrli_epi128( sE, 4 ), \
|
||||
_mm256_bslli_epi128( sF, 4 ), 0xAA ); \
|
||||
s6 = mm256_swap64_32( s6 ); \
|
||||
sF = mm256_swap64_32( sF ); \
|
||||
t1 = _mm256_blend_epi32( s5, s6, 0xaa ); \
|
||||
t3 = _mm256_blend_epi32( sE, sF, 0xaa ); \
|
||||
L( s1, t1, sA, t3 ); \
|
||||
s5 = _mm256_blend_epi32( s5, _mm256_bslli_epi128( t1, 4 ), 0xAA );\
|
||||
s6 = _mm256_blend_epi32( s6, _mm256_bsrli_epi128( t1, 4 ), 0x55 );\
|
||||
sE = _mm256_blend_epi32( sE, _mm256_bslli_epi128( t3, 4 ), 0xAA );\
|
||||
sF = _mm256_blend_epi32( sF, _mm256_bsrli_epi128( t3, 4 ), 0x55 );\
|
||||
s5 = _mm256_blend_epi32( s5, t1, 0x55 ); \
|
||||
s6 = _mm256_blend_epi32( s6, t1, 0xaa ); \
|
||||
sE = _mm256_blend_epi32( sE, t3, 0x55 ); \
|
||||
sF = _mm256_blend_epi32( sF, t3, 0xaa ); \
|
||||
\
|
||||
t1 = _mm256_blend_epi32( _mm256_bsrli_epi128( s6, 4 ), \
|
||||
_mm256_bslli_epi128( s7, 4 ), 0xAA ); \
|
||||
t3 = _mm256_blend_epi32( _mm256_bsrli_epi128( sF, 4 ), \
|
||||
_mm256_bslli_epi128( sC, 4 ), 0xAA ); \
|
||||
s7 = mm256_swap64_32( s7 ); \
|
||||
sC = mm256_swap64_32( sC ); \
|
||||
t1 = _mm256_blend_epi32( s6, s7, 0xaa ); \
|
||||
t3 = _mm256_blend_epi32( sF, sC, 0xaa ); \
|
||||
L( s2, t1, sB, t3 ); \
|
||||
s6 = _mm256_blend_epi32( s6, _mm256_bslli_epi128( t1, 4 ), 0xAA );\
|
||||
s7 = _mm256_blend_epi32( s7, _mm256_bsrli_epi128( t1, 4 ), 0x55 );\
|
||||
sF = _mm256_blend_epi32( sF, _mm256_bslli_epi128( t3, 4 ), 0xAA );\
|
||||
sC = _mm256_blend_epi32( sC, _mm256_bsrli_epi128( t3, 4 ), 0x55 );\
|
||||
s6 = _mm256_blend_epi32( s6, t1, 0x55 ); \
|
||||
s7 = _mm256_blend_epi32( s7, t1, 0xaa ); \
|
||||
sF = _mm256_blend_epi32( sF, t3, 0x55 ); \
|
||||
sC = _mm256_blend_epi32( sC, t3, 0xaa ); \
|
||||
s6 = mm256_swap64_32( s6 ); \
|
||||
sF = mm256_swap64_32( sF ); \
|
||||
\
|
||||
t1 = _mm256_blend_epi32( _mm256_bsrli_epi128( s7, 4 ), \
|
||||
_mm256_bslli_epi128( s4, 4 ), 0xAA ); \
|
||||
t3 = _mm256_blend_epi32( _mm256_bsrli_epi128( sC, 4 ), \
|
||||
_mm256_bslli_epi128( sD, 4 ), 0xAA ); \
|
||||
t1 = _mm256_blend_epi32( s7, s4, 0xaa ); \
|
||||
t3 = _mm256_blend_epi32( sC, sD, 0xaa ); \
|
||||
L( s3, t1, s8, t3 ); \
|
||||
s7 = _mm256_blend_epi32( s7, _mm256_bslli_epi128( t1, 4 ), 0xAA );\
|
||||
s4 = _mm256_blend_epi32( s4, _mm256_bsrli_epi128( t1, 4 ), 0x55 );\
|
||||
sC = _mm256_blend_epi32( sC, _mm256_bslli_epi128( t3, 4 ), 0xAA );\
|
||||
sD = _mm256_blend_epi32( sD, _mm256_bsrli_epi128( t3, 4 ), 0x55 );\
|
||||
s7 = _mm256_blend_epi32( s7, t1, 0x55 ); \
|
||||
s4 = _mm256_blend_epi32( s4, t1, 0xaa ); \
|
||||
sC = _mm256_blend_epi32( sC, t3, 0x55 ); \
|
||||
sD = _mm256_blend_epi32( sD, t3, 0xaa ); \
|
||||
s7 = mm256_swap64_32( s7 ); \
|
||||
sC = mm256_swap64_32( sC ); \
|
||||
\
|
||||
t0 = _mm256_blend_epi32( s0, _mm256_bslli_epi128( s8, 4 ), 0xAA ); \
|
||||
t1 = _mm256_blend_epi32( s1, s9, 0xAA ); \
|
||||
t2 = _mm256_blend_epi32( _mm256_bsrli_epi128( s2, 4 ), sA, 0xAA ); \
|
||||
t3 = _mm256_blend_epi32( _mm256_bsrli_epi128( s3, 4 ), \
|
||||
_mm256_bslli_epi128( sB, 4 ), 0xAA ); \
|
||||
t0 = _mm256_blend_epi32( s0, mm256_swap64_32( s8 ), 0xaa ); \
|
||||
t1 = _mm256_blend_epi32( s1, s9, 0xaa ); \
|
||||
t2 = _mm256_blend_epi32( mm256_swap64_32( s2 ), sA, 0xaa ); \
|
||||
t3 = _mm256_blend_epi32( s3, sB, 0x55 ); \
|
||||
t3 = mm256_swap64_32( t3 ); \
|
||||
L( t0, t1, t2, t3 ); \
|
||||
t3 = mm256_swap64_32( t3 ); \
|
||||
s0 = _mm256_blend_epi32( s0, t0, 0x55 ); \
|
||||
s8 = _mm256_blend_epi32( s8, _mm256_bsrli_epi128( t0, 4 ), 0x55 ); \
|
||||
s8 = _mm256_blend_epi32( s8, mm256_swap64_32( t0 ), 0x55 ); \
|
||||
s1 = _mm256_blend_epi32( s1, t1, 0x55 ); \
|
||||
s9 = _mm256_blend_epi32( s9, t1, 0xAA ); \
|
||||
s2 = _mm256_blend_epi32( s2, _mm256_bslli_epi128( t2, 4 ), 0xAA ); \
|
||||
sA = _mm256_blend_epi32( sA, t2, 0xAA ); \
|
||||
s3 = _mm256_blend_epi32( s3, _mm256_bslli_epi128( t3, 4 ), 0xAA ); \
|
||||
sB = _mm256_blend_epi32( sB, _mm256_bsrli_epi128( t3, 4 ), 0x55 ); \
|
||||
s9 = _mm256_blend_epi32( s9, t1, 0xaa ); \
|
||||
s2 = _mm256_blend_epi32( s2, mm256_swap64_32( t2 ), 0xaa ); \
|
||||
sA = _mm256_blend_epi32( sA, t2, 0xaa ); \
|
||||
s3 = _mm256_blend_epi32( s3, t3, 0xaa ); \
|
||||
sB = _mm256_blend_epi32( sB, t3, 0x55 ); \
|
||||
\
|
||||
t0 = _mm256_blend_epi32( _mm256_bsrli_epi128( s4, 4 ), sC, 0xAA ); \
|
||||
t1 = _mm256_blend_epi32( _mm256_bsrli_epi128( s5, 4 ), \
|
||||
_mm256_bslli_epi128( sD, 4 ), 0xAA ); \
|
||||
t2 = _mm256_blend_epi32( s6, _mm256_bslli_epi128( sE, 4 ), 0xAA ); \
|
||||
t3 = _mm256_blend_epi32( s7, sF, 0xAA ); \
|
||||
t0 = _mm256_blend_epi32( s4, sC, 0xaa ); \
|
||||
t1 = _mm256_blend_epi32( s5, sD, 0xaa ); \
|
||||
t2 = _mm256_blend_epi32( s6, sE, 0xaa ); \
|
||||
t3 = _mm256_blend_epi32( s7, sF, 0xaa ); \
|
||||
L( t0, t1, t2, t3 ); \
|
||||
s4 = _mm256_blend_epi32( s4, _mm256_bslli_epi128( t0, 4 ), 0xAA ); \
|
||||
sC = _mm256_blend_epi32( sC, t0, 0xAA ); \
|
||||
s5 = _mm256_blend_epi32( s5, _mm256_bslli_epi128( t1, 4 ), 0xAA ); \
|
||||
sD = _mm256_blend_epi32( sD, _mm256_bsrli_epi128( t1, 4 ), 0x55 ); \
|
||||
s4 = _mm256_blend_epi32( s4, t0, 0x55 ); \
|
||||
sC = _mm256_blend_epi32( sC, t0, 0xaa ); \
|
||||
s5 = _mm256_blend_epi32( s5, t1, 0x55 ); \
|
||||
sD = _mm256_blend_epi32( sD, t1, 0xaa ); \
|
||||
s6 = _mm256_blend_epi32( s6, t2, 0x55 ); \
|
||||
sE = _mm256_blend_epi32( sE, _mm256_bsrli_epi128( t2, 4 ), 0x55 ); \
|
||||
sE = _mm256_blend_epi32( sE, t2, 0xaa ); \
|
||||
s7 = _mm256_blend_epi32( s7, t3, 0x55 ); \
|
||||
sF = _mm256_blend_epi32( sF, t3, 0xAA ); \
|
||||
sF = _mm256_blend_epi32( sF, t3, 0xaa ); \
|
||||
s4 = mm256_swap64_32( s4 ); \
|
||||
s5 = mm256_swap64_32( s5 ); \
|
||||
sD = mm256_swap64_32( sD ); \
|
||||
sE = mm256_swap64_32( sE ); \
|
||||
} while (0)
|
||||
|
||||
#define P_BIG \
|
||||
do { \
|
||||
ROUND_BIG(0, alpha_n); \
|
||||
ROUND_BIG(1, alpha_n); \
|
||||
ROUND_BIG(2, alpha_n); \
|
||||
ROUND_BIG(3, alpha_n); \
|
||||
ROUND_BIG(4, alpha_n); \
|
||||
ROUND_BIG(5, alpha_n); \
|
||||
__m256i alpha[16]; \
|
||||
const uint64_t A0 = ( (uint64_t*)alpha_n )[0]; \
|
||||
for( int i = 0; i < 16; i++ ) \
|
||||
alpha[i] = m256_const1_64( ( (uint64_t*)alpha_n )[i] ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( (1ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( (2ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( (3ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( (4ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( (5ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
} while (0)
|
||||
|
||||
#define PF_BIG \
|
||||
do { \
|
||||
ROUND_BIG( 0, alpha_f); \
|
||||
ROUND_BIG( 1, alpha_f); \
|
||||
ROUND_BIG( 2, alpha_f); \
|
||||
ROUND_BIG( 3, alpha_f); \
|
||||
ROUND_BIG( 4, alpha_f); \
|
||||
ROUND_BIG( 5, alpha_f); \
|
||||
ROUND_BIG( 6, alpha_f); \
|
||||
ROUND_BIG( 7, alpha_f); \
|
||||
ROUND_BIG( 8, alpha_f); \
|
||||
ROUND_BIG( 9, alpha_f); \
|
||||
ROUND_BIG(10, alpha_f); \
|
||||
ROUND_BIG(11, alpha_f); \
|
||||
__m256i alpha[16]; \
|
||||
const uint64_t A0 = ( (uint64_t*)alpha_f )[0]; \
|
||||
for( int i = 0; i < 16; i++ ) \
|
||||
alpha[i] = m256_const1_64( ( (uint64_t*)alpha_f )[i] ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 1ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 2ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 3ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 4ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 5ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 6ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 7ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 8ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( ( 9ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( (10ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
alpha[0] = m256_const1_64( (11ULL << 32) ^ A0 ); \
|
||||
ROUND_BIG( alpha ); \
|
||||
} while (0)
|
||||
|
||||
#define T_BIG \
|
||||
do { /* order is important */ \
|
||||
c7 = sc->h[ 0x7 ] = _mm256_xor_si256( sc->h[ 0x7 ], sB ); \
|
||||
c6 = sc->h[ 0x6 ] = _mm256_xor_si256( sc->h[ 0x6 ], sA ); \
|
||||
c5 = sc->h[ 0x5 ] = _mm256_xor_si256( sc->h[ 0x5 ], s9 ); \
|
||||
c4 = sc->h[ 0x4 ] = _mm256_xor_si256( sc->h[ 0x4 ], s8 ); \
|
||||
c3 = sc->h[ 0x3 ] = _mm256_xor_si256( sc->h[ 0x3 ], s3 ); \
|
||||
c2 = sc->h[ 0x2 ] = _mm256_xor_si256( sc->h[ 0x2 ], s2 ); \
|
||||
c1 = sc->h[ 0x1 ] = _mm256_xor_si256( sc->h[ 0x1 ], s1 ); \
|
||||
c0 = sc->h[ 0x0 ] = _mm256_xor_si256( sc->h[ 0x0 ], s0 ); \
|
||||
c7 = sc->h[ 7 ] = _mm256_xor_si256( sc->h[ 7 ], sB ); \
|
||||
c6 = sc->h[ 6 ] = _mm256_xor_si256( sc->h[ 6 ], sA ); \
|
||||
c5 = sc->h[ 5 ] = _mm256_xor_si256( sc->h[ 5 ], s9 ); \
|
||||
c4 = sc->h[ 4 ] = _mm256_xor_si256( sc->h[ 4 ], s8 ); \
|
||||
c3 = sc->h[ 3 ] = _mm256_xor_si256( sc->h[ 3 ], s3 ); \
|
||||
c2 = sc->h[ 2 ] = _mm256_xor_si256( sc->h[ 2 ], s2 ); \
|
||||
c1 = sc->h[ 1 ] = _mm256_xor_si256( sc->h[ 1 ], s1 ); \
|
||||
c0 = sc->h[ 0 ] = _mm256_xor_si256( sc->h[ 0 ], s0 ); \
|
||||
} while (0)
|
||||
|
||||
void hamsi_big( hamsi_4way_big_context *sc, __m256i *buf, size_t num )
|
||||
@@ -1186,14 +1206,12 @@ void hamsi512_4way_update( hamsi_4way_big_context *sc, const void *data,
|
||||
void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
|
||||
{
|
||||
__m256i pad[1];
|
||||
int ch, cl;
|
||||
uint32_t ch, cl;
|
||||
|
||||
sph_enc32be( &ch, sc->count_high );
|
||||
sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) );
|
||||
pad[0] = _mm256_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch );
|
||||
pad[0] = _mm256_set1_epi64x( ((uint64_t)cl << 32 ) | (uint64_t)ch );
|
||||
sc->buf[0] = m256_const1_64( 0x80 );
|
||||
// sc->buf[0] = _mm256_set_epi32( 0UL, 0x80UL, 0UL, 0x80UL,
|
||||
// 0UL, 0x80UL, 0UL, 0x80UL );
|
||||
hamsi_big( sc, sc->buf, 1 );
|
||||
hamsi_big_final( sc, pad );
|
||||
|
||||
|
@@ -522,50 +522,53 @@ do { \
|
||||
|
||||
// Haval-256 8 way 32 bit avx2
|
||||
|
||||
#if defined (__AVX512VL__)
|
||||
|
||||
// ( ~( a ^ b ) ) & c
|
||||
#define mm256_andnotxor( a, b, c ) \
|
||||
_mm256_ternarylogic_epi32( a, b, c, 0x82 )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_andnotxor( a, b, c ) \
|
||||
_mm256_andnot_si256( _mm256_xor_si256( a, b ), c )
|
||||
|
||||
#endif
|
||||
|
||||
#define F1_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( x0, \
|
||||
_mm256_xor_si256( _mm256_and_si256(_mm256_xor_si256( x0, x4 ), x1 ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x2, x5 ), \
|
||||
_mm256_and_si256( x3, x6 ) ) ) ) \
|
||||
mm256_xor3( x0, mm256_andxor( x1, x0, x4 ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x2, x5 ), \
|
||||
_mm256_and_si256( x3, x6 ) ) ) \
|
||||
|
||||
#define F2_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x2, \
|
||||
_mm256_xor_si256( _mm256_andnot_si256( x3, x1 ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x4, x5 ), \
|
||||
_mm256_xor_si256( x6, x0 ) ) ) ), \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x4, _mm256_xor_si256( x1, x5 ) ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x3, x5 ), x0 ) ) ) \
|
||||
mm256_xor3( mm256_andxor( x2, _mm256_andnot_si256( x3, x1 ), \
|
||||
mm256_xor3( _mm256_and_si256( x4, x5 ), x6, x0 ) ), \
|
||||
mm256_andxor( x4, x1, x5 ), \
|
||||
mm256_xorand( x0, x3, x5 ) ) \
|
||||
|
||||
#define F3_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x3, \
|
||||
_mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
|
||||
_mm256_xor_si256( x6, x0 ) ) ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256(_mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ) ), x0 ) )
|
||||
mm256_xor3( x0, \
|
||||
_mm256_and_si256( x3, \
|
||||
mm256_xor3( _mm256_and_si256( x1, x2 ), x6, x0 ) ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ) ) )
|
||||
|
||||
#define F4_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x3, \
|
||||
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
|
||||
_mm256_or_si256( x4, x6 ) ), x5 ) ), \
|
||||
_mm256_and_si256( x4, \
|
||||
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( mm256_not(x2), x5 ), \
|
||||
_mm256_xor_si256( x1, x6 ) ), x0 ) ) ), \
|
||||
_mm256_xor_si256( _mm256_and_si256( x2, x6 ), x0 ) )
|
||||
|
||||
mm256_xor3( \
|
||||
mm256_andxor( x3, x5, \
|
||||
_mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
|
||||
_mm256_or_si256( x4, x6 ) ) ), \
|
||||
_mm256_and_si256( x4, \
|
||||
mm256_xor3( x0, _mm256_andnot_si256( x2, x5 ), \
|
||||
_mm256_xor_si256( x1, x6 ) ) ), \
|
||||
mm256_xorand( x0, x2, x6 ) )
|
||||
|
||||
#define F5_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
_mm256_xor_si256( \
|
||||
_mm256_and_si256( x0, \
|
||||
mm256_not( _mm256_xor_si256( \
|
||||
_mm256_and_si256( _mm256_and_si256( x1, x2 ), x3 ), x5 ) ) ), \
|
||||
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ) ), \
|
||||
_mm256_and_si256( x3, x6 ) ) )
|
||||
mm256_andnotxor( mm256_and3( x1, x2, x3 ), x5, x0 ), \
|
||||
mm256_xor3( _mm256_and_si256( x1, x4 ), \
|
||||
_mm256_and_si256( x2, x5 ), \
|
||||
_mm256_and_si256( x3, x6 ) ) )
|
||||
|
||||
#define FP3_1_8W(x6, x5, x4, x3, x2, x1, x0) \
|
||||
F1_8W(x1, x0, x3, x5, x6, x2, x4)
|
||||
|
@@ -99,9 +99,13 @@ void hodl_build_block_header( struct work* g_work, uint32_t version,
|
||||
// called only by thread 0, saves a backup of g_work
|
||||
void hodl_get_new_work( struct work* work, struct work* g_work)
|
||||
{
|
||||
work_free( &hodl_work );
|
||||
work_copy( &hodl_work, g_work );
|
||||
hodl_work.data[ algo_gate.nonce_index ] = ( clock() + rand() ) % 9999;
|
||||
// pthread_rwlock_rdlock( &g_work_lock );
|
||||
|
||||
work_free( &hodl_work );
|
||||
work_copy( &hodl_work, g_work );
|
||||
hodl_work.data[ algo_gate.nonce_index ] = ( clock() + rand() ) % 9999;
|
||||
|
||||
// pthread_rwlock_unlock( &g_work_lock );
|
||||
}
|
||||
|
||||
json_t *hodl_longpoll_rpc_call( CURL *curl, int *err, char* lp_url )
|
||||
@@ -121,7 +125,7 @@ json_t *hodl_longpoll_rpc_call( CURL *curl, int *err, char* lp_url )
|
||||
}
|
||||
|
||||
// called by every thread, copies the backup to each thread's work.
|
||||
void hodl_resync_threads( struct work* work )
|
||||
void hodl_resync_threads( int thr_id, struct work* work )
|
||||
{
|
||||
int nonce_index = algo_gate.nonce_index;
|
||||
pthread_barrier_wait( &hodl_barrier );
|
||||
@@ -131,6 +135,7 @@ void hodl_resync_threads( struct work* work )
|
||||
work_copy( work, &hodl_work );
|
||||
}
|
||||
work->data[ nonce_index ] = swab32( hodl_work.data[ nonce_index ] );
|
||||
work_restart[thr_id].restart = 0;
|
||||
}
|
||||
|
||||
bool hodl_do_this_thread( int thr_id )
|
||||
@@ -155,11 +160,10 @@ bool register_hodl_algo( algo_gate_t* gate )
|
||||
applog( LOG_ERR, "Only CPUs with AES are supported, use legacy version.");
|
||||
return false;
|
||||
#endif
|
||||
// if ( TOTAL_CHUNKS % opt_n_threads )
|
||||
// {
|
||||
// applog(LOG_ERR,"Thread count must be power of 2.");
|
||||
// return false;
|
||||
// }
|
||||
|
||||
if ( GARBAGE_SIZE % opt_n_threads )
|
||||
applog( LOG_WARNING,"WARNING: Thread count must be power of 2. Miner may crash or produce invalid hash!" );
|
||||
|
||||
pthread_barrier_init( &hodl_barrier, NULL, opt_n_threads );
|
||||
gate->optimizations = SSE42_OPT | AES_OPT | AVX2_OPT;
|
||||
gate->scanhash = (void*)&hodl_scanhash;
|
||||
@@ -171,7 +175,7 @@ bool register_hodl_algo( algo_gate_t* gate )
|
||||
gate->resync_threads = (void*)&hodl_resync_threads;
|
||||
gate->do_this_thread = (void*)&hodl_do_this_thread;
|
||||
gate->work_cmp_size = 76;
|
||||
hodl_scratchbuf = (unsigned char*)malloc( 1 << 30 );
|
||||
hodl_scratchbuf = (unsigned char*)_mm_malloc( 1 << 30, 64 );
|
||||
allow_getwork = false;
|
||||
opt_target_factor = 8388608.0;
|
||||
return ( hodl_scratchbuf != NULL );
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include "hodl-gate.h"
|
||||
#include "hodl-wolf.h"
|
||||
#include "miner.h"
|
||||
#include "algo/sha/sha256d.h"
|
||||
|
||||
#if defined(__AES__)
|
||||
|
||||
@@ -70,7 +71,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
|
||||
uint32_t *ptarget = work->target;
|
||||
int threadNumber = mythr->id;
|
||||
CacheEntry *Garbage = (CacheEntry*)hodl_scratchbuf;
|
||||
CacheEntry Cache[AES_PARALLEL_N];
|
||||
CacheEntry Cache[AES_PARALLEL_N] __attribute__ ((aligned (64)));
|
||||
__m128i* data[AES_PARALLEL_N];
|
||||
const __m128i* next[AES_PARALLEL_N];
|
||||
uint32_t CollisionCount = 0;
|
||||
|
@@ -45,6 +45,6 @@ void sha512Compute32b_parallel(
|
||||
uint64_t *data[SHA512_PARALLEL_N],
|
||||
uint64_t *digest[SHA512_PARALLEL_N]);
|
||||
|
||||
void sha512ProcessBlock(Sha512Context *context);
|
||||
void sha512ProcessBlock(Sha512Context contexti[2] );
|
||||
|
||||
#endif
|
||||
|
@@ -51,15 +51,15 @@ extern "C"{
|
||||
do { \
|
||||
__m512i cc = _mm512_set1_epi64( c ); \
|
||||
x3 = mm512_not( x3 ); \
|
||||
x0 = _mm512_xor_si512( x0, _mm512_andnot_si512( x2, cc ) ); \
|
||||
tmp = _mm512_xor_si512( cc, _mm512_and_si512( x0, x1 ) ); \
|
||||
x0 = _mm512_xor_si512( x0, _mm512_and_si512( x2, x3 ) ); \
|
||||
x3 = _mm512_xor_si512( x3, _mm512_andnot_si512( x1, x2 ) ); \
|
||||
x1 = _mm512_xor_si512( x1, _mm512_and_si512( x0, x2 ) ); \
|
||||
x2 = _mm512_xor_si512( x2, _mm512_andnot_si512( x3, x0 ) ); \
|
||||
x0 = _mm512_xor_si512( x0, _mm512_or_si512( x1, x3 ) ); \
|
||||
x3 = _mm512_xor_si512( x3, _mm512_and_si512( x1, x2 ) ); \
|
||||
x1 = _mm512_xor_si512( x1, _mm512_and_si512( tmp, x0 ) ); \
|
||||
x0 = mm512_xorandnot( x0, x2, cc ); \
|
||||
tmp = mm512_xorand( cc, x0, x1 ); \
|
||||
x0 = mm512_xorand( x0, x2, x3 ); \
|
||||
x3 = mm512_xorandnot( x3, x1, x2 ); \
|
||||
x1 = mm512_xorand( x1, x0, x2 ); \
|
||||
x2 = mm512_xorandnot( x2, x3, x0 ); \
|
||||
x0 = mm512_xoror( x0, x1, x3 ); \
|
||||
x3 = mm512_xorand( x3, x1, x2 ); \
|
||||
x1 = mm512_xorand( x1, tmp, x0 ); \
|
||||
x2 = _mm512_xor_si512( x2, tmp ); \
|
||||
} while (0)
|
||||
|
||||
@@ -67,11 +67,11 @@ do { \
|
||||
do { \
|
||||
x4 = _mm512_xor_si512( x4, x1 ); \
|
||||
x5 = _mm512_xor_si512( x5, x2 ); \
|
||||
x6 = _mm512_xor_si512( x6, _mm512_xor_si512( x3, x0 ) ); \
|
||||
x6 = mm512_xor3( x6, x3, x0 ); \
|
||||
x7 = _mm512_xor_si512( x7, x0 ); \
|
||||
x0 = _mm512_xor_si512( x0, x5 ); \
|
||||
x1 = _mm512_xor_si512( x1, x6 ); \
|
||||
x2 = _mm512_xor_si512( x2, _mm512_xor_si512( x7, x4 ) ); \
|
||||
x2 = mm512_xor3( x2, x7, x4 ); \
|
||||
x3 = _mm512_xor_si512( x3, x4 ); \
|
||||
} while (0)
|
||||
|
||||
@@ -318,12 +318,12 @@ static const sph_u64 C[] = {
|
||||
#define Wz_8W(x, c, n) \
|
||||
do { \
|
||||
__m512i t = _mm512_slli_epi64( _mm512_and_si512(x ## h, (c)), (n) ); \
|
||||
x ## h = _mm512_or_si512( _mm512_and_si512( \
|
||||
_mm512_srli_epi64(x ## h, (n)), (c)), t ); \
|
||||
x ## h = mm512_orand( t, _mm512_srli_epi64( x ## h, (n) ), (c) ); \
|
||||
t = _mm512_slli_epi64( _mm512_and_si512(x ## l, (c)), (n) ); \
|
||||
x ## l = _mm512_or_si512( _mm512_and_si512((x ## l >> (n)), (c)), t ); \
|
||||
x ## l = mm512_orand( t, (x ## l >> (n)), (c) ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define W80(x) Wz_8W(x, m512_const1_64( 0x5555555555555555 ), 1 )
|
||||
#define W81(x) Wz_8W(x, m512_const1_64( 0x3333333333333333 ), 2 )
|
||||
#define W82(x) Wz_8W(x, m512_const1_64( 0x0F0F0F0F0F0F0F0F ), 4 )
|
||||
|
@@ -129,7 +129,7 @@ int scanhash_jha_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
submit_lane_solution( work, lane_hash, mythr, i );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
|
@@ -45,7 +45,7 @@ int scanhash_keccak_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -97,7 +97,7 @@ int scanhash_keccak_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ))
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#include "keccak-gate.h"
|
||||
#include "sph_keccak.h"
|
||||
#include "algo/sha/sha256d.h"
|
||||
|
||||
int hard_coded_eb = 1;
|
||||
|
||||
|
@@ -53,7 +53,8 @@ static const uint64_t RC[] = {
|
||||
#define WRITE_STATE(sc)
|
||||
|
||||
#define MOV64(d, s) (d = s)
|
||||
#define XOR64_IOTA XOR64
|
||||
#define XOR64_IOTA XOR
|
||||
|
||||
|
||||
#define LPAR (
|
||||
#define RPAR )
|
||||
@@ -70,12 +71,16 @@ static const uint64_t RC[] = {
|
||||
|
||||
// Targetted macros, keccak-macros.h is included for each target.
|
||||
|
||||
#define DECL64(x) __m512i x
|
||||
#define XOR64(d, a, b) (d = _mm512_xor_si512(a,b))
|
||||
#define AND64(d, a, b) (d = _mm512_and_si512(a,b))
|
||||
#define OR64(d, a, b) (d = _mm512_or_si512(a,b))
|
||||
#define NOT64(d, s) (d = _mm512_xor_si512(s,m512_neg1))
|
||||
#define ROL64(d, v, n) (d = mm512_rol_64(v, n))
|
||||
#define DECL64(x) __m512i x
|
||||
#define XOR(d, a, b) (d = _mm512_xor_si512(a,b))
|
||||
#define XOR64 XOR
|
||||
#define AND64(d, a, b) (d = _mm512_and_si512(a,b))
|
||||
#define OR64(d, a, b) (d = _mm512_or_si512(a,b))
|
||||
#define NOT64(d, s) (d = _mm512_xor_si512(s,m512_neg1))
|
||||
#define ROL64(d, v, n) (d = mm512_rol_64(v, n))
|
||||
#define XOROR(d, a, b, c) (d = mm512_xoror(a, b, c))
|
||||
#define XORAND(d, a, b, c) (d = mm512_xorand(a, b, c))
|
||||
#define XOR3( d, a, b, c ) (d = mm512_xor3( a, b, c ))
|
||||
|
||||
#include "keccak-macros.c"
|
||||
|
||||
@@ -233,12 +238,15 @@ keccak512_8way_close(void *cc, void *dst)
|
||||
#undef INPUT_BUF
|
||||
#undef DECL64
|
||||
#undef XOR64
|
||||
#undef XOR
|
||||
#undef AND64
|
||||
#undef OR64
|
||||
#undef NOT64
|
||||
#undef ROL64
|
||||
#undef KECCAK_F_1600
|
||||
|
||||
#undef XOROR
|
||||
#undef XORAND
|
||||
#undef XOR3
|
||||
#endif // AVX512
|
||||
|
||||
// AVX2
|
||||
@@ -250,11 +258,15 @@ keccak512_8way_close(void *cc, void *dst)
|
||||
} while (0)
|
||||
|
||||
#define DECL64(x) __m256i x
|
||||
#define XOR64(d, a, b) (d = _mm256_xor_si256(a,b))
|
||||
#define XOR(d, a, b) (d = _mm256_xor_si256(a,b))
|
||||
#define XOR64 XOR
|
||||
#define AND64(d, a, b) (d = _mm256_and_si256(a,b))
|
||||
#define OR64(d, a, b) (d = _mm256_or_si256(a,b))
|
||||
#define NOT64(d, s) (d = _mm256_xor_si256(s,m256_neg1))
|
||||
#define ROL64(d, v, n) (d = mm256_rol_64(v, n))
|
||||
#define XOROR(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_or_si256(b, c)))
|
||||
#define XORAND(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_and_si256(b, c)))
|
||||
#define XOR3( d, a, b, c ) (d = mm256_xor3( a, b, c ))
|
||||
|
||||
#include "keccak-macros.c"
|
||||
|
||||
@@ -414,10 +426,14 @@ keccak512_4way_close(void *cc, void *dst)
|
||||
#undef INPUT_BUF
|
||||
#undef DECL64
|
||||
#undef XOR64
|
||||
#undef XOR
|
||||
#undef AND64
|
||||
#undef OR64
|
||||
#undef NOT64
|
||||
#undef ROL64
|
||||
#undef KECCAK_F_1600
|
||||
#undef XOROR
|
||||
#undef XORAND
|
||||
#undef XOR3
|
||||
|
||||
#endif // AVX2
|
||||
|
@@ -1,6 +1,19 @@
|
||||
#ifdef TH_ELT
|
||||
#undef TH_ELT
|
||||
#endif
|
||||
|
||||
#define TH_ELT(t, c0, c1, c2, c3, c4, d0, d1, d2, d3, d4) do { \
|
||||
DECL64(tt0); \
|
||||
DECL64(tt1); \
|
||||
XOR3( tt0, d0, d1, d4 ); \
|
||||
XOR( tt1, d2, d3 ); \
|
||||
XOR( tt0, tt0, tt1 ); \
|
||||
ROL64( tt0, tt0, 1 ); \
|
||||
XOR3( tt1, c0, c1, c4 ); \
|
||||
XOR3( tt0, tt0, c2, c3 ); \
|
||||
XOR( t, tt0, tt1 ); \
|
||||
} while (0)
|
||||
/*
|
||||
#define TH_ELT(t, c0, c1, c2, c3, c4, d0, d1, d2, d3, d4) do { \
|
||||
DECL64(tt0); \
|
||||
DECL64(tt1); \
|
||||
@@ -17,7 +30,7 @@
|
||||
XOR64(tt2, tt2, tt3); \
|
||||
XOR64(t, tt0, tt2); \
|
||||
} while (0)
|
||||
|
||||
*/
|
||||
#ifdef THETA
|
||||
#undef THETA
|
||||
#endif
|
||||
@@ -110,20 +123,34 @@
|
||||
#ifdef KHI_XO
|
||||
#undef KHI_XO
|
||||
#endif
|
||||
|
||||
#define KHI_XO(d, a, b, c) do { \
|
||||
XOROR(d, a, b, c); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
#define KHI_XO(d, a, b, c) do { \
|
||||
DECL64(kt); \
|
||||
OR64(kt, b, c); \
|
||||
XOR64(d, a, kt); \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
#ifdef KHI_XA
|
||||
#undef KHI_XA
|
||||
#endif
|
||||
|
||||
#define KHI_XA(d, a, b, c) do { \
|
||||
XORAND(d, a, b, c); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
#define KHI_XA(d, a, b, c) do { \
|
||||
DECL64(kt); \
|
||||
AND64(kt, b, c); \
|
||||
XOR64(d, a, kt); \
|
||||
} while (0)
|
||||
*/
|
||||
|
||||
#ifdef KHI
|
||||
#undef KHI
|
||||
@@ -134,65 +161,47 @@
|
||||
do { \
|
||||
DECL64(c0); \
|
||||
DECL64(c1); \
|
||||
DECL64(c2); \
|
||||
DECL64(c3); \
|
||||
DECL64(c4); \
|
||||
DECL64(bnn); \
|
||||
NOT64(bnn, b20); \
|
||||
KHI_XO(c0, b00, b10, b20); \
|
||||
KHI_XO(c1, b10, bnn, b30); \
|
||||
KHI_XA(c2, b20, b30, b40); \
|
||||
KHI_XO(c3, b30, b40, b00); \
|
||||
KHI_XA(c4, b40, b00, b10); \
|
||||
KHI_XA(b20, b20, b30, b40); \
|
||||
KHI_XO(b30, b30, b40, b00); \
|
||||
KHI_XA(b40, b40, b00, b10); \
|
||||
MOV64(b00, c0); \
|
||||
MOV64(b10, c1); \
|
||||
MOV64(b20, c2); \
|
||||
MOV64(b30, c3); \
|
||||
MOV64(b40, c4); \
|
||||
NOT64(bnn, b41); \
|
||||
KHI_XO(c0, b01, b11, b21); \
|
||||
KHI_XA(c1, b11, b21, b31); \
|
||||
KHI_XO(c2, b21, b31, bnn); \
|
||||
KHI_XO(c3, b31, b41, b01); \
|
||||
KHI_XA(c4, b41, b01, b11); \
|
||||
KHI_XO(b21, b21, b31, bnn); \
|
||||
KHI_XO(b31, b31, b41, b01); \
|
||||
KHI_XA(b41, b41, b01, b11); \
|
||||
MOV64(b01, c0); \
|
||||
MOV64(b11, c1); \
|
||||
MOV64(b21, c2); \
|
||||
MOV64(b31, c3); \
|
||||
MOV64(b41, c4); \
|
||||
NOT64(bnn, b32); \
|
||||
KHI_XO(c0, b02, b12, b22); \
|
||||
KHI_XA(c1, b12, b22, b32); \
|
||||
KHI_XA(c2, b22, bnn, b42); \
|
||||
KHI_XO(c3, bnn, b42, b02); \
|
||||
KHI_XA(c4, b42, b02, b12); \
|
||||
KHI_XA(b22, b22, bnn, b42); \
|
||||
KHI_XO(b32, bnn, b42, b02); \
|
||||
KHI_XA(b42, b42, b02, b12); \
|
||||
MOV64(b02, c0); \
|
||||
MOV64(b12, c1); \
|
||||
MOV64(b22, c2); \
|
||||
MOV64(b32, c3); \
|
||||
MOV64(b42, c4); \
|
||||
NOT64(bnn, b33); \
|
||||
KHI_XA(c0, b03, b13, b23); \
|
||||
KHI_XO(c1, b13, b23, b33); \
|
||||
KHI_XO(c2, b23, bnn, b43); \
|
||||
KHI_XA(c3, bnn, b43, b03); \
|
||||
KHI_XO(c4, b43, b03, b13); \
|
||||
KHI_XO(b23, b23, bnn, b43); \
|
||||
KHI_XA(b33, bnn, b43, b03); \
|
||||
KHI_XO(b43, b43, b03, b13); \
|
||||
MOV64(b03, c0); \
|
||||
MOV64(b13, c1); \
|
||||
MOV64(b23, c2); \
|
||||
MOV64(b33, c3); \
|
||||
MOV64(b43, c4); \
|
||||
NOT64(bnn, b14); \
|
||||
KHI_XA(c0, b04, bnn, b24); \
|
||||
KHI_XO(c1, bnn, b24, b34); \
|
||||
KHI_XA(c2, b24, b34, b44); \
|
||||
KHI_XO(c3, b34, b44, b04); \
|
||||
KHI_XA(c4, b44, b04, b14); \
|
||||
KHI_XA(b24, b24, b34, b44); \
|
||||
KHI_XO(b34, b34, b44, b04); \
|
||||
KHI_XA(b44, b44, b04, b14); \
|
||||
MOV64(b04, c0); \
|
||||
MOV64(b14, c1); \
|
||||
MOV64(b24, c2); \
|
||||
MOV64(b34, c3); \
|
||||
MOV64(b44, c4); \
|
||||
} while (0)
|
||||
|
||||
#ifdef IOTA
|
||||
@@ -201,6 +210,7 @@
|
||||
#define IOTA(r) XOR64_IOTA(a00, a00, r)
|
||||
|
||||
#ifdef P0
|
||||
#undef P0
|
||||
#undef P1
|
||||
#undef P2
|
||||
#undef P3
|
||||
|
@@ -52,7 +52,7 @@ int scanhash_sha3d_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -111,7 +111,7 @@ int scanhash_sha3d_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -66,6 +66,17 @@ static const uint32 CNS_INIT[128] __attribute((aligned(64))) = {
|
||||
a = _mm512_xor_si512(a,c0);\
|
||||
b = _mm512_xor_si512(b,c1);
|
||||
|
||||
#define MULT24W( a0, a1 ) \
|
||||
do { \
|
||||
__m512i b = _mm512_xor_si512( a0, \
|
||||
_mm512_maskz_shuffle_epi32( 0xbbbb, a1, 16 ) ); \
|
||||
a0 = _mm512_or_si512( _mm512_bsrli_epi128( b, 4 ), \
|
||||
_mm512_bslli_epi128( a1,12 ) ); \
|
||||
a1 = _mm512_or_si512( _mm512_bsrli_epi128( a1, 4 ), \
|
||||
_mm512_bslli_epi128( b,12 ) ); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
#define MULT24W( a0, a1, mask ) \
|
||||
do { \
|
||||
__m512i b = _mm512_xor_si512( a0, \
|
||||
@@ -73,6 +84,7 @@ do { \
|
||||
a0 = _mm512_or_si512( _mm512_bsrli_epi128(b,4), _mm512_bslli_epi128(a1,12) );\
|
||||
a1 = _mm512_or_si512( _mm512_bsrli_epi128(a1,4), _mm512_bslli_epi128(b,12) );\
|
||||
} while(0)
|
||||
*/
|
||||
|
||||
// confirm pointer arithmetic
|
||||
// ok but use array indexes
|
||||
@@ -85,6 +97,21 @@ do { \
|
||||
MIXWORD4W(*(x+3),*(x+7),*t,*(t+1));\
|
||||
ADD_CONSTANT4W(*x, *(x+4), c0, c1);
|
||||
|
||||
#define SUBCRUMB4W(a0,a1,a2,a3,t)\
|
||||
t = a0;\
|
||||
a0 = mm512_xoror( a3, a0, a1 ); \
|
||||
a2 = _mm512_xor_si512(a2,a3);\
|
||||
a1 = _mm512_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 xnor (a3 & t) */ \
|
||||
a3 = mm512_xorand( a2, a3, t ); \
|
||||
a2 = mm512_xorand( a1, a2, a0);\
|
||||
a1 = _mm512_or_si512(a1,a3);\
|
||||
a3 = _mm512_xor_si512(a3,a2);\
|
||||
t = _mm512_xor_si512(t,a1);\
|
||||
a2 = _mm512_and_si512(a2,a1);\
|
||||
a1 = mm512_xnor(a1,a0);\
|
||||
a0 = t;
|
||||
|
||||
/*
|
||||
#define SUBCRUMB4W(a0,a1,a2,a3,t)\
|
||||
t = _mm512_load_si512(&a0);\
|
||||
a0 = _mm512_or_si512(a0,a1);\
|
||||
@@ -103,7 +130,25 @@ do { \
|
||||
a2 = _mm512_and_si512(a2,a1);\
|
||||
a1 = _mm512_xor_si512(a1,a0);\
|
||||
a0 = _mm512_load_si512(&t);
|
||||
*/
|
||||
|
||||
#define MIXWORD4W(a,b,t1,t2)\
|
||||
b = _mm512_xor_si512(a,b);\
|
||||
t1 = _mm512_slli_epi32(a,2);\
|
||||
t2 = _mm512_srli_epi32(a,30);\
|
||||
a = mm512_xoror( b, t1, t2 ); \
|
||||
t1 = _mm512_slli_epi32(b,14);\
|
||||
t2 = _mm512_srli_epi32(b,18);\
|
||||
b = _mm512_or_si512(t1,t2);\
|
||||
b = mm512_xoror( a, t1, t2 ); \
|
||||
t1 = _mm512_slli_epi32(a,10);\
|
||||
t2 = _mm512_srli_epi32(a,22);\
|
||||
a = mm512_xoror( b, t1, t2 ); \
|
||||
t1 = _mm512_slli_epi32(b,1);\
|
||||
t2 = _mm512_srli_epi32(b,31);\
|
||||
b = _mm512_or_si512(t1,t2);
|
||||
|
||||
/*
|
||||
#define MIXWORD4W(a,b,t1,t2)\
|
||||
b = _mm512_xor_si512(a,b);\
|
||||
t1 = _mm512_slli_epi32(a,2);\
|
||||
@@ -121,6 +166,7 @@ do { \
|
||||
t1 = _mm512_slli_epi32(b,1);\
|
||||
t2 = _mm512_srli_epi32(b,31);\
|
||||
b = _mm512_or_si512(t1,t2);
|
||||
*/
|
||||
|
||||
#define STEP_PART24W(a0,a1,t0,t1,c0,c1,tmp0,tmp1)\
|
||||
a1 = _mm512_shuffle_epi32(a1,147);\
|
||||
@@ -235,21 +281,13 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
|
||||
__m512i msg0, msg1;
|
||||
__m512i tmp[2];
|
||||
__m512i x[8];
|
||||
const __m512i MASK = m512_const2_64( 0, 0x00000000ffffffff );
|
||||
|
||||
t0 = chainv[0];
|
||||
t1 = chainv[1];
|
||||
t0 = mm512_xor3( chainv[0], chainv[2], chainv[4] );
|
||||
t1 = mm512_xor3( chainv[1], chainv[3], chainv[5] );
|
||||
t0 = mm512_xor3( t0, chainv[6], chainv[8] );
|
||||
t1 = mm512_xor3( t1, chainv[7], chainv[9] );
|
||||
|
||||
t0 = _mm512_xor_si512( t0, chainv[2] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[3] );
|
||||
t0 = _mm512_xor_si512( t0, chainv[4] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[5] );
|
||||
t0 = _mm512_xor_si512( t0, chainv[6] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[7] );
|
||||
t0 = _mm512_xor_si512( t0, chainv[8] );
|
||||
t1 = _mm512_xor_si512( t1, chainv[9] );
|
||||
|
||||
MULT24W( t0, t1, MASK );
|
||||
MULT24W( t0, t1 );
|
||||
|
||||
msg0 = _mm512_shuffle_epi32( msg[0], 27 );
|
||||
msg1 = _mm512_shuffle_epi32( msg[1], 27 );
|
||||
@@ -268,68 +306,67 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
|
||||
t0 = chainv[0];
|
||||
t1 = chainv[1];
|
||||
|
||||
MULT24W( chainv[0], chainv[1], MASK );
|
||||
MULT24W( chainv[0], chainv[1] );
|
||||
chainv[0] = _mm512_xor_si512( chainv[0], chainv[2] );
|
||||
chainv[1] = _mm512_xor_si512( chainv[1], chainv[3] );
|
||||
|
||||
MULT24W( chainv[2], chainv[3], MASK );
|
||||
MULT24W( chainv[2], chainv[3] );
|
||||
chainv[2] = _mm512_xor_si512(chainv[2], chainv[4]);
|
||||
chainv[3] = _mm512_xor_si512(chainv[3], chainv[5]);
|
||||
|
||||
MULT24W( chainv[4], chainv[5], MASK );
|
||||
MULT24W( chainv[4], chainv[5] );
|
||||
chainv[4] = _mm512_xor_si512(chainv[4], chainv[6]);
|
||||
chainv[5] = _mm512_xor_si512(chainv[5], chainv[7]);
|
||||
|
||||
MULT24W( chainv[6], chainv[7], MASK );
|
||||
MULT24W( chainv[6], chainv[7] );
|
||||
chainv[6] = _mm512_xor_si512(chainv[6], chainv[8]);
|
||||
chainv[7] = _mm512_xor_si512(chainv[7], chainv[9]);
|
||||
|
||||
MULT24W( chainv[8], chainv[9], MASK );
|
||||
MULT24W( chainv[8], chainv[9] );
|
||||
chainv[8] = _mm512_xor_si512( chainv[8], t0 );
|
||||
chainv[9] = _mm512_xor_si512( chainv[9], t1 );
|
||||
|
||||
t0 = chainv[8];
|
||||
t1 = chainv[9];
|
||||
|
||||
MULT24W( chainv[8], chainv[9], MASK );
|
||||
MULT24W( chainv[8], chainv[9] );
|
||||
chainv[8] = _mm512_xor_si512( chainv[8], chainv[6] );
|
||||
chainv[9] = _mm512_xor_si512( chainv[9], chainv[7] );
|
||||
|
||||
MULT24W( chainv[6], chainv[7], MASK );
|
||||
MULT24W( chainv[6], chainv[7] );
|
||||
chainv[6] = _mm512_xor_si512( chainv[6], chainv[4] );
|
||||
chainv[7] = _mm512_xor_si512( chainv[7], chainv[5] );
|
||||
|
||||
MULT24W( chainv[4], chainv[5], MASK );
|
||||
MULT24W( chainv[4], chainv[5] );
|
||||
chainv[4] = _mm512_xor_si512( chainv[4], chainv[2] );
|
||||
chainv[5] = _mm512_xor_si512( chainv[5], chainv[3] );
|
||||
|
||||
MULT24W( chainv[2], chainv[3], MASK );
|
||||
MULT24W( chainv[2], chainv[3] );
|
||||
chainv[2] = _mm512_xor_si512( chainv[2], chainv[0] );
|
||||
chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] );
|
||||
|
||||
MULT24W( chainv[0], chainv[1], MASK );
|
||||
chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 );
|
||||
chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 );
|
||||
MULT24W( chainv[0], chainv[1] );
|
||||
chainv[0] = mm512_xor3( chainv[0], t0, msg0 );
|
||||
chainv[1] = mm512_xor3( chainv[1], t1, msg1 );
|
||||
|
||||
MULT24W( msg0, msg1, MASK );
|
||||
MULT24W( msg0, msg1 );
|
||||
chainv[2] = _mm512_xor_si512( chainv[2], msg0 );
|
||||
chainv[3] = _mm512_xor_si512( chainv[3], msg1 );
|
||||
|
||||
MULT24W( msg0, msg1, MASK );
|
||||
MULT24W( msg0, msg1 );
|
||||
chainv[4] = _mm512_xor_si512( chainv[4], msg0 );
|
||||
chainv[5] = _mm512_xor_si512( chainv[5], msg1 );
|
||||
|
||||
MULT24W( msg0, msg1, MASK );
|
||||
MULT24W( msg0, msg1 );
|
||||
chainv[6] = _mm512_xor_si512( chainv[6], msg0 );
|
||||
chainv[7] = _mm512_xor_si512( chainv[7], msg1 );
|
||||
|
||||
MULT24W( msg0, msg1, MASK );
|
||||
MULT24W( msg0, msg1);
|
||||
chainv[8] = _mm512_xor_si512( chainv[8], msg0 );
|
||||
chainv[9] = _mm512_xor_si512( chainv[9], msg1 );
|
||||
|
||||
MULT24W( msg0, msg1, MASK );
|
||||
MULT24W( msg0, msg1 );
|
||||
|
||||
// replace with ror
|
||||
chainv[3] = _mm512_rol_epi32( chainv[3], 1 );
|
||||
chainv[5] = _mm512_rol_epi32( chainv[5], 2 );
|
||||
chainv[7] = _mm512_rol_epi32( chainv[7], 3 );
|
||||
@@ -388,19 +425,11 @@ void finalization512_4way( luffa_4way_context *state, uint32 *b )
|
||||
|
||||
/*---- blank round with m=0 ----*/
|
||||
rnd512_4way( state, zero );
|
||||
|
||||
t[0] = chainv[0];
|
||||
t[1] = chainv[1];
|
||||
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[2] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[3] );
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[4] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[5] );
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[6] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[7] );
|
||||
t[0] = _mm512_xor_si512( t[0], chainv[8] );
|
||||
t[1] = _mm512_xor_si512( t[1], chainv[9] );
|
||||
|
||||
|
||||
t[0] = mm512_xor3( chainv[0], chainv[2], chainv[4] );
|
||||
t[1] = mm512_xor3( chainv[1], chainv[3], chainv[5] );
|
||||
t[0] = mm512_xor3( t[0], chainv[6], chainv[8] );
|
||||
t[1] = mm512_xor3( t[1], chainv[7], chainv[9] );
|
||||
t[0] = _mm512_shuffle_epi32( t[0], 27 );
|
||||
t[1] = _mm512_shuffle_epi32( t[1], 27 );
|
||||
|
||||
@@ -496,7 +525,7 @@ int luffa_4way_update( luffa_4way_context *state, const void *data,
|
||||
{
|
||||
// remaining data bytes
|
||||
buffer[0] = _mm512_shuffle_epi8( vdata[0], shuff_bswap32 );
|
||||
buffer[1] = m512_const2_64( 0, 0x0000000080000000 );
|
||||
buffer[1] = m512_const1_i128( 0x0000000080000000 );
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -520,7 +549,7 @@ int luffa_4way_close( luffa_4way_context *state, void *hashval )
|
||||
rnd512_4way( state, buffer );
|
||||
else
|
||||
{ // empty pad block, constant data
|
||||
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
|
||||
msg[0] = m512_const1_i128( 0x0000000080000000 );
|
||||
msg[1] = m512_zero;
|
||||
rnd512_4way( state, msg );
|
||||
}
|
||||
@@ -583,13 +612,13 @@ int luffa512_4way_full( luffa_4way_context *state, void *output,
|
||||
{
|
||||
// padding of partial block
|
||||
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
|
||||
msg[1] = m512_const2_64( 0, 0x0000000080000000 );
|
||||
msg[1] = m512_const1_i128( 0x0000000080000000 );
|
||||
rnd512_4way( state, msg );
|
||||
}
|
||||
else
|
||||
{
|
||||
// empty pad block
|
||||
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
|
||||
msg[0] = m512_const1_i128( 0x0000000080000000 );
|
||||
msg[1] = m512_zero;
|
||||
rnd512_4way( state, msg );
|
||||
}
|
||||
@@ -631,13 +660,13 @@ int luffa_4way_update_close( luffa_4way_context *state,
|
||||
{
|
||||
// padding of partial block
|
||||
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
|
||||
msg[1] = m512_const2_64( 0, 0x0000000080000000 );
|
||||
msg[1] = m512_const1_i128( 0x0000000080000000 );
|
||||
rnd512_4way( state, msg );
|
||||
}
|
||||
else
|
||||
{
|
||||
// empty pad block
|
||||
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
|
||||
msg[0] = m512_const1_i128( 0x0000000080000000 );
|
||||
msg[1] = m512_zero;
|
||||
rnd512_4way( state, msg );
|
||||
}
|
||||
@@ -666,8 +695,6 @@ do { \
|
||||
a1 = _mm256_or_si256( _mm256_srli_si256(a1,4), _mm256_slli_si256(b,12) ); \
|
||||
} while(0)
|
||||
|
||||
// confirm pointer arithmetic
|
||||
// ok but use array indexes
|
||||
#define STEP_PART(x,c0,c1,t)\
|
||||
SUBCRUMB(*x,*(x+1),*(x+2),*(x+3),*t);\
|
||||
SUBCRUMB(*(x+5),*(x+6),*(x+7),*(x+4),*t);\
|
||||
@@ -678,23 +705,23 @@ do { \
|
||||
ADD_CONSTANT(*x, *(x+4), c0, c1);
|
||||
|
||||
#define SUBCRUMB(a0,a1,a2,a3,t)\
|
||||
t = _mm256_load_si256(&a0);\
|
||||
t = a0;\
|
||||
a0 = _mm256_or_si256(a0,a1);\
|
||||
a2 = _mm256_xor_si256(a2,a3);\
|
||||
a1 = _mm256_andnot_si256(a1, m256_neg1 );\
|
||||
a1 = mm256_not( a1 );\
|
||||
a0 = _mm256_xor_si256(a0,a3);\
|
||||
a3 = _mm256_and_si256(a3,t);\
|
||||
a1 = _mm256_xor_si256(a1,a3);\
|
||||
a3 = _mm256_xor_si256(a3,a2);\
|
||||
a2 = _mm256_and_si256(a2,a0);\
|
||||
a0 = _mm256_andnot_si256(a0, m256_neg1 );\
|
||||
a0 = mm256_not( a0 );\
|
||||
a2 = _mm256_xor_si256(a2,a1);\
|
||||
a1 = _mm256_or_si256(a1,a3);\
|
||||
t = _mm256_xor_si256(t,a1);\
|
||||
a3 = _mm256_xor_si256(a3,a2);\
|
||||
a2 = _mm256_and_si256(a2,a1);\
|
||||
a1 = _mm256_xor_si256(a1,a0);\
|
||||
a0 = _mm256_load_si256(&t);\
|
||||
a0 = t;\
|
||||
|
||||
#define MIXWORD(a,b,t1,t2)\
|
||||
b = _mm256_xor_si256(a,b);\
|
||||
@@ -832,7 +859,7 @@ void rnd512_2way( luffa_2way_context *state, __m256i *msg )
|
||||
__m256i msg0, msg1;
|
||||
__m256i tmp[2];
|
||||
__m256i x[8];
|
||||
const __m256i MASK = m256_const2_64( 0, 0x00000000ffffffff );
|
||||
const __m256i MASK = m256_const1_i128( 0x00000000ffffffff );
|
||||
|
||||
t0 = chainv[0];
|
||||
t1 = chainv[1];
|
||||
@@ -1088,7 +1115,7 @@ int luffa_2way_update( luffa_2way_context *state, const void *data,
|
||||
{
|
||||
// remaining data bytes
|
||||
buffer[0] = _mm256_shuffle_epi8( vdata[0], shuff_bswap32 );
|
||||
buffer[1] = m256_const2_64( 0, 0x0000000080000000 );
|
||||
buffer[1] = m256_const1_i128( 0x0000000080000000 );
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1104,7 +1131,7 @@ int luffa_2way_close( luffa_2way_context *state, void *hashval )
|
||||
rnd512_2way( state, buffer );
|
||||
else
|
||||
{ // empty pad block, constant data
|
||||
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
|
||||
msg[0] = m256_const1_i128( 0x0000000080000000 );
|
||||
msg[1] = m256_zero;
|
||||
rnd512_2way( state, msg );
|
||||
}
|
||||
@@ -1159,13 +1186,13 @@ int luffa512_2way_full( luffa_2way_context *state, void *output,
|
||||
{
|
||||
// padding of partial block
|
||||
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
|
||||
msg[1] = m256_const2_64( 0, 0x0000000080000000 );
|
||||
msg[1] = m256_const1_i128( 0x0000000080000000 );
|
||||
rnd512_2way( state, msg );
|
||||
}
|
||||
else
|
||||
{
|
||||
// empty pad block
|
||||
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
|
||||
msg[0] = m256_const1_i128( 0x0000000080000000 );
|
||||
msg[1] = m256_zero;
|
||||
rnd512_2way( state, msg );
|
||||
}
|
||||
@@ -1206,13 +1233,13 @@ int luffa_2way_update_close( luffa_2way_context *state,
|
||||
{
|
||||
// padding of partial block
|
||||
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
|
||||
msg[1] = m256_const2_64( 0, 0x0000000080000000 );
|
||||
msg[1] = m256_const1_i128( 0x0000000080000000 );
|
||||
rnd512_2way( state, msg );
|
||||
}
|
||||
else
|
||||
{
|
||||
// empty pad block
|
||||
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
|
||||
msg[0] = m256_const1_i128( 0x0000000080000000 );
|
||||
msg[1] = m256_zero;
|
||||
rnd512_2way( state, msg );
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#include "simd-utils.h"
|
||||
#include "luffa_for_sse2.h"
|
||||
|
||||
#define MULT2(a0,a1) do \
|
||||
#define MULT2( a0, a1 ) do \
|
||||
{ \
|
||||
__m128i b = _mm_xor_si128( a0, _mm_shuffle_epi32( _mm_and_si128(a1,MASK), 16 ) ); \
|
||||
a0 = _mm_or_si128( _mm_srli_si128(b,4), _mm_slli_si128(a1,12) ); \
|
||||
@@ -345,11 +345,11 @@ HashReturn update_and_final_luffa( hashState_luffa *state, BitSequence* output,
|
||||
// 16 byte partial block exists for 80 byte len
|
||||
if ( state->rembytes )
|
||||
// padding of partial block
|
||||
rnd512( state, m128_const_64( 0, 0x80000000 ),
|
||||
rnd512( state, m128_const_i128( 0x80000000 ),
|
||||
mm128_bswap_32( cast_m128i( data ) ) );
|
||||
else
|
||||
// empty pad block
|
||||
rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) );
|
||||
rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) );
|
||||
|
||||
finalization512( state, (uint32*) output );
|
||||
if ( state->hashbitlen > 512 )
|
||||
@@ -394,11 +394,11 @@ int luffa_full( hashState_luffa *state, BitSequence* output, int hashbitlen,
|
||||
// 16 byte partial block exists for 80 byte len
|
||||
if ( state->rembytes )
|
||||
// padding of partial block
|
||||
rnd512( state, m128_const_64( 0, 0x80000000 ),
|
||||
rnd512( state, m128_const_i128( 0x80000000 ),
|
||||
mm128_bswap_32( cast_m128i( data ) ) );
|
||||
else
|
||||
// empty pad block
|
||||
rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) );
|
||||
rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) );
|
||||
|
||||
finalization512( state, (uint32*) output );
|
||||
if ( state->hashbitlen > 512 )
|
||||
@@ -606,7 +606,6 @@ static void finalization512( hashState_luffa *state, uint32 *b )
|
||||
|
||||
casti_m256i( b, 0 ) = _mm256_shuffle_epi8(
|
||||
casti_m256i( hash, 0 ), shuff_bswap32 );
|
||||
// casti_m256i( b, 0 ) = mm256_bswap_32( casti_m256i( hash, 0 ) );
|
||||
|
||||
rnd512( state, zero, zero );
|
||||
|
||||
@@ -621,7 +620,6 @@ static void finalization512( hashState_luffa *state, uint32 *b )
|
||||
|
||||
casti_m256i( b, 1 ) = _mm256_shuffle_epi8(
|
||||
casti_m256i( hash, 0 ), shuff_bswap32 );
|
||||
// casti_m256i( b, 1 ) = mm256_bswap_32( casti_m256i( hash, 0 ) );
|
||||
}
|
||||
|
||||
#else
|
||||
|
@@ -16,7 +16,7 @@
|
||||
typedef struct {
|
||||
blake256_16way_context blake;
|
||||
keccak256_8way_context keccak;
|
||||
cube_4way_context cube;
|
||||
cube_4way_2buf_context cube;
|
||||
skein256_8way_context skein;
|
||||
#if defined(__VAES__)
|
||||
groestl256_4way_context groestl;
|
||||
@@ -30,13 +30,7 @@ static __thread allium_16way_ctx_holder allium_16way_ctx;
|
||||
bool init_allium_16way_ctx()
|
||||
{
|
||||
keccak256_8way_init( &allium_16way_ctx.keccak );
|
||||
cube_4way_init( &allium_16way_ctx.cube, 256, 16, 32 );
|
||||
skein256_8way_init( &allium_16way_ctx.skein );
|
||||
#if defined(__VAES__)
|
||||
groestl256_4way_init( &allium_16way_ctx.groestl, 32 );
|
||||
#else
|
||||
init_groestl256( &allium_16way_ctx.groestl, 32 );
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -75,7 +69,6 @@ void allium_16way_hash( void *state, const void *input )
|
||||
intrlv_8x64( vhashB, hash8, hash9, hash10, hash11, hash12, hash13, hash14,
|
||||
hash15, 256 );
|
||||
|
||||
// rintrlv_8x32_8x64( vhashA, vhash, 256 );
|
||||
keccak256_8way_update( &ctx.keccak, vhashA, 32 );
|
||||
keccak256_8way_close( &ctx.keccak, vhashA);
|
||||
keccak256_8way_init( &ctx.keccak );
|
||||
@@ -111,12 +104,11 @@ void allium_16way_hash( void *state, const void *input )
|
||||
intrlv_2x256( vhash, hash14, hash15, 256 );
|
||||
LYRA2RE_2WAY( vhash, 32, vhash, 32, 1, 8, 8 );
|
||||
dintrlv_2x256( hash14, hash15, vhash, 256 );
|
||||
|
||||
|
||||
intrlv_4x128( vhashA, hash0, hash1, hash2, hash3, 256 );
|
||||
intrlv_4x128( vhashB, hash4, hash5, hash6, hash7, 256 );
|
||||
|
||||
cube_4way_full( &ctx.cube, vhashA, 256, vhashA, 32 );
|
||||
cube_4way_full( &ctx.cube, vhashB, 256, vhashB, 32 );
|
||||
cube_4way_2buf_full( &ctx.cube, vhashA, vhashB, 256, vhashA, vhashB, 32 );
|
||||
|
||||
dintrlv_4x128( hash0, hash1, hash2, hash3, vhashA, 256 );
|
||||
dintrlv_4x128( hash4, hash5, hash6, hash7, vhashB, 256 );
|
||||
@@ -124,8 +116,7 @@ void allium_16way_hash( void *state, const void *input )
|
||||
intrlv_4x128( vhashA, hash8, hash9, hash10, hash11, 256 );
|
||||
intrlv_4x128( vhashB, hash12, hash13, hash14, hash15, 256 );
|
||||
|
||||
cube_4way_full( &ctx.cube, vhashA, 256, vhashA, 32 );
|
||||
cube_4way_full( &ctx.cube, vhashB, 256, vhashB, 32 );
|
||||
cube_4way_2buf_full( &ctx.cube, vhashA, vhashB, 256, vhashA, vhashB, 32 );
|
||||
|
||||
dintrlv_4x128( hash8, hash9, hash10, hash11, vhashA, 256 );
|
||||
dintrlv_4x128( hash12, hash13, hash14, hash15, vhashB, 256 );
|
||||
@@ -174,24 +165,19 @@ void allium_16way_hash( void *state, const void *input )
|
||||
#if defined(__VAES__)
|
||||
|
||||
intrlv_4x128( vhash, hash0, hash1, hash2, hash3, 256 );
|
||||
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
|
||||
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
|
||||
dintrlv_4x128( state, state+32, state+64, state+96, vhash, 256 );
|
||||
|
||||
intrlv_4x128( vhash, hash4, hash5, hash6, hash7, 256 );
|
||||
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
|
||||
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
|
||||
dintrlv_4x128( state+128, state+160, state+192, state+224, vhash, 256 );
|
||||
|
||||
intrlv_4x128( vhash, hash8, hash9, hash10, hash11, 256 );
|
||||
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
|
||||
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
|
||||
dintrlv_4x128( state+256, state+288, state+320, state+352, vhash, 256 );
|
||||
intrlv_4x128( vhash, hash12, hash13, hash14, hash15, 256 );
|
||||
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
|
||||
|
||||
intrlv_4x128( vhash, hash12, hash13, hash14, hash15, 256 );
|
||||
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
|
||||
dintrlv_4x128( state+384, state+416, state+448, state+480, vhash, 256 );
|
||||
|
||||
#else
|
||||
@@ -245,7 +231,7 @@ int scanhash_allium_16way( struct work *work, uint32_t max_nonce,
|
||||
if ( unlikely( valid_hash( hash+(lane<<3), ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, hash+(lane<<3), mythr, lane );
|
||||
submit_solution( work, hash+(lane<<3), mythr );
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev, m512_const1_32( 16 ) );
|
||||
n += 16;
|
||||
@@ -260,10 +246,13 @@ int scanhash_allium_16way( struct work *work, uint32_t max_nonce,
|
||||
typedef struct {
|
||||
blake256_8way_context blake;
|
||||
keccak256_4way_context keccak;
|
||||
cubehashParam cube;
|
||||
cube_2way_context cube;
|
||||
skein256_4way_context skein;
|
||||
#if defined(__VAES__)
|
||||
groestl256_2way_context groestl;
|
||||
#else
|
||||
hashState_groestl256 groestl;
|
||||
|
||||
#endif
|
||||
} allium_8way_ctx_holder;
|
||||
|
||||
static __thread allium_8way_ctx_holder allium_8way_ctx;
|
||||
@@ -271,9 +260,7 @@ static __thread allium_8way_ctx_holder allium_8way_ctx;
|
||||
bool init_allium_8way_ctx()
|
||||
{
|
||||
keccak256_4way_init( &allium_8way_ctx.keccak );
|
||||
cubehashInit( &allium_8way_ctx.cube, 256, 16, 32 );
|
||||
skein256_4way_init( &allium_8way_ctx.skein );
|
||||
init_groestl256( &allium_8way_ctx.groestl, 32 );
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -296,7 +283,7 @@ void allium_8way_hash( void *hash, const void *input )
|
||||
blake256_8way_close( &ctx.blake, vhashA );
|
||||
|
||||
dintrlv_8x32( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
|
||||
vhashA, 256 );
|
||||
vhashA, 256 );
|
||||
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 256 );
|
||||
intrlv_4x64( vhashB, hash4, hash5, hash6, hash7, 256 );
|
||||
|
||||
@@ -318,21 +305,20 @@ void allium_8way_hash( void *hash, const void *input )
|
||||
LYRA2RE( hash6, 32, hash6, 32, hash6, 32, 1, 8, 8 );
|
||||
LYRA2RE( hash7, 32, hash7, 32, hash7, 32, 1, 8, 8 );
|
||||
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*)hash0, 32 );
|
||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash1, (const byte*)hash1, 32 );
|
||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash2, (const byte*)hash2, 32 );
|
||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*)hash3, 32 );
|
||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash4, (const byte*)hash4, 32 );
|
||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash5, (const byte*)hash5, 32 );
|
||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash6, (const byte*)hash6, 32 );
|
||||
cubehashInit( &ctx.cube, 256, 16, 32 );
|
||||
cubehashUpdateDigest( &ctx.cube, (byte*)hash7, (const byte*)hash7, 32 );
|
||||
|
||||
intrlv_2x128( vhashA, hash0, hash1, 256 );
|
||||
intrlv_2x128( vhashB, hash2, hash3, 256 );
|
||||
cube_2way_full( &ctx.cube, vhashA, 256, vhashA, 32 );
|
||||
cube_2way_full( &ctx.cube, vhashB, 256, vhashB, 32 );
|
||||
dintrlv_2x128( hash0, hash1, vhashA, 256 );
|
||||
dintrlv_2x128( hash2, hash3, vhashB, 256 );
|
||||
|
||||
intrlv_2x128( vhashA, hash4, hash5, 256 );
|
||||
intrlv_2x128( vhashB, hash6, hash7, 256 );
|
||||
cube_2way_full( &ctx.cube, vhashA, 256, vhashA, 32 );
|
||||
cube_2way_full( &ctx.cube, vhashB, 256, vhashB, 32 );
|
||||
dintrlv_2x128( hash4, hash5, vhashA, 256 );
|
||||
dintrlv_2x128( hash6, hash7, vhashB, 256 );
|
||||
|
||||
LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 );
|
||||
LYRA2RE( hash1, 32, hash1, 32, hash1, 32, 1, 8, 8 );
|
||||
@@ -352,9 +338,28 @@ void allium_8way_hash( void *hash, const void *input )
|
||||
skein256_4way_update( &ctx.skein, vhashB, 32 );
|
||||
skein256_4way_close( &ctx.skein, vhashB );
|
||||
|
||||
#if defined(__VAES__)
|
||||
|
||||
uint64_t vhashC[4*2] __attribute__ ((aligned (64)));
|
||||
uint64_t vhashD[4*2] __attribute__ ((aligned (64)));
|
||||
|
||||
rintrlv_4x64_2x128( vhashC, vhashD, vhashA, 256 );
|
||||
groestl256_2way_full( &ctx.groestl, vhashC, vhashC, 32 );
|
||||
groestl256_2way_full( &ctx.groestl, vhashD, vhashD, 32 );
|
||||
dintrlv_2x128( hash0, hash1, vhashC, 256 );
|
||||
dintrlv_2x128( hash2, hash3, vhashD, 256 );
|
||||
|
||||
rintrlv_4x64_2x128( vhashC, vhashD, vhashB, 256 );
|
||||
groestl256_2way_full( &ctx.groestl, vhashC, vhashC, 32 );
|
||||
groestl256_2way_full( &ctx.groestl, vhashD, vhashD, 32 );
|
||||
dintrlv_2x128( hash4, hash5, vhashC, 256 );
|
||||
dintrlv_2x128( hash6, hash7, vhashD, 256 );
|
||||
|
||||
#else
|
||||
|
||||
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, 256 );
|
||||
dintrlv_4x64( hash4, hash5, hash6, hash7, vhashB, 256 );
|
||||
|
||||
|
||||
groestl256_full( &ctx.groestl, hash0, hash0, 256 );
|
||||
groestl256_full( &ctx.groestl, hash1, hash1, 256 );
|
||||
groestl256_full( &ctx.groestl, hash2, hash2, 256 );
|
||||
@@ -363,6 +368,8 @@ void allium_8way_hash( void *hash, const void *input )
|
||||
groestl256_full( &ctx.groestl, hash5, hash5, 256 );
|
||||
groestl256_full( &ctx.groestl, hash6, hash6, 256 );
|
||||
groestl256_full( &ctx.groestl, hash7, hash7, 256 );
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
int scanhash_allium_8way( struct work *work, uint32_t max_nonce,
|
||||
@@ -394,7 +401,7 @@ int scanhash_allium_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( unlikely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
|
@@ -187,7 +187,8 @@ bool register_allium_algo( algo_gate_t* gate )
|
||||
gate->scanhash = (void*)&scanhash_allium;
|
||||
gate->hash = (void*)&allium_hash;
|
||||
#endif
|
||||
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;
|
||||
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT
|
||||
| VAES_OPT;
|
||||
opt_target_factor = 256.0;
|
||||
return true;
|
||||
};
|
||||
@@ -215,9 +216,6 @@ void phi2_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
|
||||
size_t t;
|
||||
|
||||
algo_gate.gen_merkle_root( merkle_tree, sctx );
|
||||
// Increment extranonce2
|
||||
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
|
||||
// Assemble block header
|
||||
algo_gate.build_block_header( g_work, le32dec( sctx->job.version ),
|
||||
(uint32_t*) sctx->job.prevhash, (uint32_t*) merkle_tree,
|
||||
le32dec( sctx->job.ntime ), le32dec(sctx->job.nbits), NULL );
|
||||
@@ -225,7 +223,6 @@ void phi2_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
|
||||
g_work->data[ 20+t ] = ((uint32_t*)sctx->job.extra)[t];
|
||||
}
|
||||
|
||||
|
||||
bool register_phi2_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;
|
||||
|
@@ -76,7 +76,7 @@ int scanhash_lyra2h_4way( struct work *work, uint32_t max_nonce,
|
||||
&& !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+i;
|
||||
submit_lane_solution( work, hash+(i<<3), mythr, i );
|
||||
submit_solution( work, hash+(i<<3), mythr );
|
||||
}
|
||||
n += 4;
|
||||
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
|
||||
|
@@ -200,7 +200,7 @@ int scanhash_lyra2rev2_16way( struct work *work, const uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev, m512_const1_32( 16 ) );
|
||||
@@ -342,7 +342,7 @@ int scanhash_lyra2rev2_8way( struct work *work, const uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev, m256_const1_32( 8 ) );
|
||||
@@ -469,7 +469,7 @@ int scanhash_lyra2rev2_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
|
@@ -165,7 +165,7 @@ int scanhash_lyra2rev3_16way( struct work *work, const uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 16;
|
||||
@@ -284,7 +284,7 @@ int scanhash_lyra2rev3_8way( struct work *work, const uint32_t max_nonce,
|
||||
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev, m256_const1_32( 8 ) );
|
||||
@@ -386,7 +386,7 @@ int scanhash_lyra2rev3_4way( struct work *work, const uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm_add_epi32( *noncev, m128_const1_32( 4 ) );
|
||||
|
@@ -49,7 +49,7 @@ void lyra2z_16way_hash( void *state, const void *input )
|
||||
|
||||
dintrlv_16x32( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
|
||||
hash8, hash9, hash10, hash11 ,hash12, hash13, hash14, hash15,
|
||||
vhash, 256 );
|
||||
vhash, 256 );
|
||||
|
||||
intrlv_2x256( vhash, hash0, hash1, 256 );
|
||||
LYRA2Z_2WAY( lyra2z_16way_matrix, vhash, 32, vhash, 32, 8, 8, 8 );
|
||||
@@ -124,7 +124,7 @@ int scanhash_lyra2z_16way( struct work *work, uint32_t max_nonce,
|
||||
if ( unlikely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev, m512_const1_32( 16 ) );
|
||||
@@ -222,7 +222,7 @@ int scanhash_lyra2z_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( unlikely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev, m256_const1_32( 8 ) );
|
||||
@@ -301,7 +301,7 @@ int scanhash_lyra2z_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( unlikely( valid_hash( lane_hash, ptarget ) && !bench ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm_add_epi32( *noncev, m128_const1_32( 4 ) );
|
||||
|
@@ -68,7 +68,7 @@ bool lyra2z330_thread_init()
|
||||
|
||||
bool register_lyra2z330_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->optimizations = SSE42_OPT | AVX2_OPT;
|
||||
gate->optimizations = SSE2_OPT | AVX2_OPT;
|
||||
gate->miner_thread_init = (void*)&lyra2z330_thread_init;
|
||||
gate->scanhash = (void*)&scanhash_lyra2z330;
|
||||
gate->hash = (void*)&lyra2z330_hash;
|
||||
|
@@ -4,7 +4,7 @@
|
||||
#include "algo/gost/sph_gost.h"
|
||||
#include "algo/cubehash/cubehash_sse2.h"
|
||||
#include "lyra2.h"
|
||||
#if defined(__VAES__)
|
||||
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
#include "algo/echo/echo-hash-4way.h"
|
||||
#elif defined(__AES__)
|
||||
#include "algo/echo/aes_ni/hash_api.h"
|
||||
@@ -302,7 +302,7 @@ int scanhash_phi2_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
be32enc( pdata + 19, n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
@@ -483,7 +483,7 @@ int scanhash_phi2_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
be32enc( pdata + 19, n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
edata[ 19 ] += 4;
|
||||
|
@@ -66,13 +66,13 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
|
||||
|
||||
#define LYRA_ROUND_2WAY_AVX512( s0, s1, s2, s3 ) \
|
||||
G2W_4X64( s0, s1, s2, s3 ); \
|
||||
s1 = mm512_ror256_64( s1); \
|
||||
s3 = mm512_shufll256_64( s3 ); \
|
||||
s1 = mm512_shuflr256_64( s1); \
|
||||
s2 = mm512_swap256_128( s2 ); \
|
||||
s3 = mm512_rol256_64( s3 ); \
|
||||
G2W_4X64( s0, s1, s2, s3 ); \
|
||||
s1 = mm512_rol256_64( s1 ); \
|
||||
s2 = mm512_swap256_128( s2 ); \
|
||||
s3 = mm512_ror256_64( s3 );
|
||||
s3 = mm512_shuflr256_64( s3 ); \
|
||||
s1 = mm512_shufll256_64( s1 ); \
|
||||
s2 = mm512_swap256_128( s2 );
|
||||
|
||||
#define LYRA_12_ROUNDS_2WAY_AVX512( s0, s1, s2, s3 ) \
|
||||
LYRA_ROUND_2WAY_AVX512( s0, s1, s2, s3 ) \
|
||||
@@ -107,13 +107,13 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
|
||||
|
||||
#define LYRA_ROUND_AVX2( s0, s1, s2, s3 ) \
|
||||
G_4X64( s0, s1, s2, s3 ); \
|
||||
s1 = mm256_ror_1x64( s1); \
|
||||
s3 = mm256_shufll_64( s3 ); \
|
||||
s1 = mm256_shuflr_64( s1); \
|
||||
s2 = mm256_swap_128( s2 ); \
|
||||
s3 = mm256_rol_1x64( s3 ); \
|
||||
G_4X64( s0, s1, s2, s3 ); \
|
||||
s1 = mm256_rol_1x64( s1 ); \
|
||||
s2 = mm256_swap_128( s2 ); \
|
||||
s3 = mm256_ror_1x64( s3 );
|
||||
s3 = mm256_shuflr_64( s3 ); \
|
||||
s1 = mm256_shufll_64( s1 ); \
|
||||
s2 = mm256_swap_128( s2 );
|
||||
|
||||
#define LYRA_12_ROUNDS_AVX2( s0, s1, s2, s3 ) \
|
||||
LYRA_ROUND_AVX2( s0, s1, s2, s3 ) \
|
||||
@@ -148,14 +148,14 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){
|
||||
#define LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
|
||||
G_2X64( s0, s2, s4, s6 ); \
|
||||
G_2X64( s1, s3, s5, s7 ); \
|
||||
mm128_ror256_64( s2, s3 ); \
|
||||
mm128_vrol256_64( s6, s7 ); \
|
||||
mm128_vror256_64( s2, s3 ); \
|
||||
mm128_swap256_128( s4, s5 ); \
|
||||
mm128_rol256_64( s6, s7 ); \
|
||||
G_2X64( s0, s2, s4, s6 ); \
|
||||
G_2X64( s1, s3, s5, s7 ); \
|
||||
mm128_rol256_64( s2, s3 ); \
|
||||
mm128_swap256_128( s4, s5 ); \
|
||||
mm128_ror256_64( s6, s7 );
|
||||
mm128_vror256_64( s6, s7 ); \
|
||||
mm128_vrol256_64( s2, s3 ); \
|
||||
mm128_swap256_128( s4, s5 );
|
||||
|
||||
#define LYRA_12_ROUNDS_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
|
||||
LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \
|
||||
|
@@ -12,8 +12,8 @@
|
||||
#include "algo/tiger/sph_tiger.h"
|
||||
#include "algo/whirlpool/sph_whirlpool.h"
|
||||
#include "algo/ripemd/sph_ripemd.h"
|
||||
#include <openssl/sha.h>
|
||||
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
#include "algo/sha/sha256-hash.h"
|
||||
|
||||
#define EPSa DBL_EPSILON
|
||||
#define EPS1 DBL_EPSILON
|
||||
@@ -105,8 +105,8 @@ uint32_t sw2_( int nnounce )
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
SHA256_CTX sha256;
|
||||
SHA512_CTX sha512;
|
||||
sha256_context sha256;
|
||||
sph_sha512_context sha512;
|
||||
sph_keccak512_context keccak;
|
||||
sph_whirlpool_context whirlpool;
|
||||
sph_haval256_5_context haval;
|
||||
@@ -118,8 +118,8 @@ m7m_ctx_holder m7m_ctx;
|
||||
|
||||
void init_m7m_ctx()
|
||||
{
|
||||
SHA256_Init( &m7m_ctx.sha256 );
|
||||
SHA512_Init( &m7m_ctx.sha512 );
|
||||
sha256_ctx_init( &m7m_ctx.sha256 );
|
||||
sph_sha512_init( &m7m_ctx.sha512 );
|
||||
sph_keccak512_init( &m7m_ctx.keccak );
|
||||
sph_whirlpool_init( &m7m_ctx.whirlpool );
|
||||
sph_haval256_5_init( &m7m_ctx.haval );
|
||||
@@ -143,11 +143,10 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
|
||||
uint32_t hash[8] __attribute__((aligned(64)));
|
||||
uint8_t bhash[7][64] __attribute__((aligned(64)));
|
||||
uint32_t n = pdata[19] - 1;
|
||||
int thr_id = mythr->id; // thr_id arg is deprecated
|
||||
int thr_id = mythr->id;
|
||||
uint32_t usw_, mpzscale;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
char data_str[161], hash_str[65], target_str[65];
|
||||
//uint8_t *bdata = 0;
|
||||
uint8_t bdata[8192] __attribute__ ((aligned (64)));
|
||||
int i, digits;
|
||||
int bytes;
|
||||
@@ -155,12 +154,11 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
|
||||
|
||||
m7m_ctx_holder ctx1, ctx2 __attribute__ ((aligned (64)));
|
||||
memcpy( &ctx1, &m7m_ctx, sizeof(m7m_ctx) );
|
||||
SHA256_CTX ctxf_sha256;
|
||||
|
||||
memcpy(data, pdata, 80);
|
||||
|
||||
SHA256_Update( &ctx1.sha256, data, M7_MIDSTATE_LEN );
|
||||
SHA512_Update( &ctx1.sha512, data, M7_MIDSTATE_LEN );
|
||||
sha256_update( &ctx1.sha256, data, M7_MIDSTATE_LEN );
|
||||
sph_sha512( &ctx1.sha512, data, M7_MIDSTATE_LEN );
|
||||
sph_keccak512( &ctx1.keccak, data, M7_MIDSTATE_LEN );
|
||||
sph_whirlpool( &ctx1.whirlpool, data, M7_MIDSTATE_LEN );
|
||||
sph_haval256_5( &ctx1.haval, data, M7_MIDSTATE_LEN );
|
||||
@@ -191,11 +189,11 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
|
||||
|
||||
memcpy( &ctx2, &ctx1, sizeof(m7m_ctx) );
|
||||
|
||||
SHA256_Update( &ctx2.sha256, data_p64, 80 - M7_MIDSTATE_LEN );
|
||||
SHA256_Final( (unsigned char*) (bhash[0]), &ctx2.sha256 );
|
||||
sha256_update( &ctx2.sha256, data_p64, 80 - M7_MIDSTATE_LEN );
|
||||
sha256_final( &ctx2.sha256, bhash[0] );
|
||||
|
||||
SHA512_Update( &ctx2.sha512, data_p64, 80 - M7_MIDSTATE_LEN );
|
||||
SHA512_Final( (unsigned char*) (bhash[1]), &ctx2.sha512 );
|
||||
sph_sha512( &ctx2.sha512, data_p64, 80 - M7_MIDSTATE_LEN );
|
||||
sph_sha512_close( &ctx2.sha512, bhash[1] );
|
||||
|
||||
sph_keccak512( &ctx2.keccak, data_p64, 80 - M7_MIDSTATE_LEN );
|
||||
sph_keccak512_close( &ctx2.keccak, (void*)(bhash[2]) );
|
||||
@@ -227,9 +225,7 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
|
||||
bytes = mpz_sizeinbase(product, 256);
|
||||
mpz_export((void *)bdata, NULL, -1, 1, 0, 0, product);
|
||||
|
||||
SHA256_Init( &ctxf_sha256 );
|
||||
SHA256_Update( &ctxf_sha256, bdata, bytes );
|
||||
SHA256_Final( (unsigned char*) hash, &ctxf_sha256 );
|
||||
sha256_full( hash, bdata, bytes );
|
||||
|
||||
digits=(int)((sqrt((double)(n/2))*(1.+EPS))/9000+75);
|
||||
mp_bitcnt_t prec = (long int)(digits*BITS_PER_DIGIT+16);
|
||||
@@ -262,18 +258,11 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
|
||||
mpzscale=bytes;
|
||||
mpz_export(bdata, NULL, -1, 1, 0, 0, product);
|
||||
|
||||
SHA256_Init( &ctxf_sha256 );
|
||||
SHA256_Update( &ctxf_sha256, bdata, bytes );
|
||||
SHA256_Final( (unsigned char*) hash, &ctxf_sha256 );
|
||||
}
|
||||
|
||||
sha256_full( hash, bdata, bytes );
|
||||
}
|
||||
|
||||
if ( unlikely( valid_hash( (uint64_t*)hash, (uint64_t*)ptarget )
|
||||
&& !opt_benchmark ) )
|
||||
|
||||
|
||||
// if ( unlikely( hash[7] <= ptarget[7] ) )
|
||||
// if ( likely( fulltest( hash, ptarget ) && !opt_benchmark ) )
|
||||
{
|
||||
if ( opt_debug )
|
||||
{
|
||||
@@ -311,7 +300,7 @@ bool register_m7m_algo( algo_gate_t *gate )
|
||||
{
|
||||
gate->optimizations = SHA_OPT;
|
||||
init_m7m_ctx();
|
||||
gate->scanhash = (void*)scanhash_m7m_hash;
|
||||
gate->scanhash = (void*)&scanhash_m7m_hash;
|
||||
gate->build_stratum_request = (void*)&std_be_build_stratum_request;
|
||||
gate->work_decode = (void*)&std_be_work_decode;
|
||||
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
|
||||
|
@@ -108,7 +108,7 @@ int scanhash_nist5_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
@@ -196,7 +196,7 @@ int scanhash_nist5_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 4;
|
||||
|
@@ -156,6 +156,8 @@ int scanhash_zr5( struct work *work, uint32_t max_nonce,
|
||||
void zr5_get_new_work( struct work* work, struct work* g_work, int thr_id,
|
||||
uint32_t* end_nonce_ptr )
|
||||
{
|
||||
// pthread_rwlock_rdlock( &g_work_lock );
|
||||
|
||||
// ignore POK in first word
|
||||
const int wkcmp_sz = 72; // (19-1) * sizeof(uint32_t)
|
||||
uint32_t *nonceptr = work->data + algo_gate.nonce_index;
|
||||
@@ -171,6 +173,8 @@ void zr5_get_new_work( struct work* work, struct work* g_work, int thr_id,
|
||||
}
|
||||
else
|
||||
++(*nonceptr);
|
||||
|
||||
// pthread_rwlock_unlock( &g_work_lock );
|
||||
}
|
||||
|
||||
void zr5_display_pok( struct work* work )
|
||||
|
@@ -312,10 +312,26 @@ do { \
|
||||
BUPDATE1_8W( 7, 1 ); \
|
||||
} while (0)
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
#define GAMMA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = _mm256_ternarylogic_epi32( a ## n0, a ## n2, a ## n1, 0x4b ) )
|
||||
|
||||
#define THETA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = mm256_xor3( a ## n0, a ## n1, a ## n4 ) )
|
||||
|
||||
#else
|
||||
|
||||
#define GAMMA_8W(n0, n1, n2, n4) \
|
||||
(g ## n0 = _mm256_xor_si256( a ## n0, \
|
||||
_mm256_or_si256( a ## n1, mm256_not( a ## n2 ) ) ) )
|
||||
|
||||
#define THETA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = _mm256_xor_si256( a ## n0, _mm256_xor_si256( a ## n1, \
|
||||
a ## n4 ) ) )
|
||||
|
||||
#endif
|
||||
|
||||
#define PI_ALL_8W do { \
|
||||
a0 = g0; \
|
||||
a1 = mm256_rol_32( g7, 1 ); \
|
||||
@@ -336,9 +352,6 @@ do { \
|
||||
a16 = mm256_rol_32( g10, 8 ); \
|
||||
} while (0)
|
||||
|
||||
#define THETA_8W(n0, n1, n2, n4) \
|
||||
( g ## n0 = _mm256_xor_si256( a ## n0, _mm256_xor_si256( a ## n1, \
|
||||
a ## n4 ) ) )
|
||||
|
||||
#define SIGMA_ALL_8W do { \
|
||||
a0 = _mm256_xor_si256( g0, m256_one_32 ); \
|
||||
|
@@ -223,7 +223,7 @@ int scanhash_anime_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -383,7 +383,7 @@ int scanhash_anime_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -16,7 +16,7 @@
|
||||
#include "algo/simd/simd-hash-2way.h"
|
||||
#include "algo/echo/aes_ni/hash_api.h"
|
||||
#include "algo/hamsi/hamsi-hash-4way.h"
|
||||
#include "algo/fugue/sph_fugue.h"
|
||||
#include "algo/fugue/fugue-aesni.h"
|
||||
#include "algo/shabal/shabal-hash-4way.h"
|
||||
#include "algo/whirlpool/sph_whirlpool.h"
|
||||
#include "algo/haval/haval-hash-4way.h"
|
||||
@@ -40,7 +40,7 @@ union _hmq1725_8way_context_overlay
|
||||
cube_4way_context cube;
|
||||
simd_4way_context simd;
|
||||
hamsi512_8way_context hamsi;
|
||||
sph_fugue512_context fugue;
|
||||
hashState_fugue fugue;
|
||||
shabal512_8way_context shabal;
|
||||
sph_whirlpool_context whirlpool;
|
||||
sha512_8way_context sha512;
|
||||
@@ -363,14 +363,14 @@ extern void hmq1725_8way_hash(void *state, const void *input)
|
||||
dintrlv_8x64_512( hash0, hash1, hash2, hash3,
|
||||
hash4, hash5, hash6, hash7, vhash );
|
||||
|
||||
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash4, hash4, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash5, hash5, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash6, hash6, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash7, hash7, 64 );
|
||||
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
fugue512_full( &ctx.fugue, hash4, hash4, 64 );
|
||||
fugue512_full( &ctx.fugue, hash5, hash5, 64 );
|
||||
fugue512_full( &ctx.fugue, hash6, hash6, 64 );
|
||||
fugue512_full( &ctx.fugue, hash7, hash7, 64 );
|
||||
|
||||
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3,
|
||||
hash4, hash5, hash6, hash7 );
|
||||
@@ -459,21 +459,21 @@ extern void hmq1725_8way_hash(void *state, const void *input)
|
||||
m512_zero );
|
||||
|
||||
if ( hash0[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
if ( hash1[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
if ( hash2[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
if ( hash3[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
if ( hash4[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash4, hash4, 64 );
|
||||
fugue512_full( &ctx.fugue, hash4, hash4, 64 );
|
||||
if ( hash5[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash5, hash5, 64 );
|
||||
fugue512_full( &ctx.fugue, hash5, hash5, 64 );
|
||||
if ( hash6[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash6, hash6, 64 );
|
||||
fugue512_full( &ctx.fugue, hash6, hash6, 64 );
|
||||
if ( hash7[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash7, hash7, 64 );
|
||||
fugue512_full( &ctx.fugue, hash7, hash7, 64 );
|
||||
|
||||
intrlv_8x64_512( vhashA, hash0, hash1, hash2, hash3,
|
||||
hash4, hash5, hash6, hash7 );
|
||||
@@ -596,7 +596,7 @@ int scanhash_hmq1725_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -628,7 +628,7 @@ union _hmq1725_4way_context_overlay
|
||||
simd_2way_context simd;
|
||||
hashState_echo echo;
|
||||
hamsi512_4way_context hamsi;
|
||||
sph_fugue512_context fugue;
|
||||
hashState_fugue fugue;
|
||||
shabal512_4way_context shabal;
|
||||
sph_whirlpool_context whirlpool;
|
||||
sha512_4way_context sha512;
|
||||
@@ -846,10 +846,10 @@ extern void hmq1725_4way_hash(void *state, const void *input)
|
||||
|
||||
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
|
||||
|
||||
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
|
||||
// In this situation serial simd seems to be faster.
|
||||
|
||||
@@ -912,7 +912,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
|
||||
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
|
||||
|
||||
// A = fugue serial, B = sha512 prarallel
|
||||
// A = fugue serial, B = sha512 parallel
|
||||
|
||||
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
@@ -920,13 +920,13 @@ extern void hmq1725_4way_hash(void *state, const void *input)
|
||||
h_mask = _mm256_movemask_epi8( vh_mask );
|
||||
|
||||
if ( hash0[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
|
||||
if ( hash1[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
|
||||
if ( hash2[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
|
||||
if ( hash3[0] & mask )
|
||||
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
|
||||
|
||||
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );
|
||||
|
||||
@@ -1018,7 +1018,7 @@ int scanhash_hmq1725_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -17,13 +17,15 @@
|
||||
#include "algo/shabal/sph_shabal.h"
|
||||
#include "algo/whirlpool/sph_whirlpool.h"
|
||||
#include "algo/haval/sph-haval.h"
|
||||
#include <openssl/sha.h>
|
||||
#include "algo/sha/sph_sha2.h"
|
||||
#if defined(__AES__)
|
||||
#include "algo/groestl/aes_ni/hash-groestl.h"
|
||||
#include "algo/echo/aes_ni/hash_api.h"
|
||||
#include "algo/fugue/fugue-aesni.h"
|
||||
#else
|
||||
#include "algo/groestl/sph_groestl.h"
|
||||
#include "algo/echo/sph_echo.h"
|
||||
#include "algo/fugue/sph_fugue.h"
|
||||
#endif
|
||||
#include "algo/luffa/luffa_for_sse2.h"
|
||||
#include "algo/cubehash/cubehash_sse2.h"
|
||||
@@ -40,17 +42,18 @@ typedef struct {
|
||||
sph_shavite512_context shavite1, shavite2;
|
||||
hashState_sd simd1, simd2;
|
||||
sph_hamsi512_context hamsi1;
|
||||
sph_fugue512_context fugue1, fugue2;
|
||||
sph_shabal512_context shabal1;
|
||||
sph_whirlpool_context whirlpool1, whirlpool2, whirlpool3, whirlpool4;
|
||||
SHA512_CTX sha1, sha2;
|
||||
sph_sha512_context sha1, sha2;
|
||||
sph_haval256_5_context haval1, haval2;
|
||||
#if defined(__AES__)
|
||||
hashState_echo echo1, echo2;
|
||||
hashState_groestl groestl1, groestl2;
|
||||
hashState_fugue fugue1, fugue2;
|
||||
#else
|
||||
sph_groestl512_context groestl1, groestl2;
|
||||
sph_echo512_context echo1, echo2;
|
||||
sph_fugue512_context fugue1, fugue2;
|
||||
#endif
|
||||
} hmq1725_ctx_holder;
|
||||
|
||||
@@ -88,8 +91,13 @@ void init_hmq1725_ctx()
|
||||
|
||||
sph_hamsi512_init(&hmq1725_ctx.hamsi1);
|
||||
|
||||
#if defined(__AES__)
|
||||
fugue512_Init( &hmq1725_ctx.fugue1, 512 );
|
||||
fugue512_Init( &hmq1725_ctx.fugue2, 512 );
|
||||
#else
|
||||
sph_fugue512_init(&hmq1725_ctx.fugue1);
|
||||
sph_fugue512_init(&hmq1725_ctx.fugue2);
|
||||
#endif
|
||||
|
||||
sph_shabal512_init(&hmq1725_ctx.shabal1);
|
||||
|
||||
@@ -98,8 +106,8 @@ void init_hmq1725_ctx()
|
||||
sph_whirlpool_init(&hmq1725_ctx.whirlpool3);
|
||||
sph_whirlpool_init(&hmq1725_ctx.whirlpool4);
|
||||
|
||||
SHA512_Init( &hmq1725_ctx.sha1 );
|
||||
SHA512_Init( &hmq1725_ctx.sha2 );
|
||||
sph_sha512_init( &hmq1725_ctx.sha1 );
|
||||
sph_sha512_init( &hmq1725_ctx.sha2 );
|
||||
|
||||
sph_haval256_5_init(&hmq1725_ctx.haval1);
|
||||
sph_haval256_5_init(&hmq1725_ctx.haval2);
|
||||
@@ -235,8 +243,13 @@ extern void hmq1725hash(void *state, const void *input)
|
||||
sph_hamsi512 (&h_ctx.hamsi1, hashA, 64); //3
|
||||
sph_hamsi512_close(&h_ctx.hamsi1, hashB); //4
|
||||
|
||||
#if defined(__AES__)
|
||||
fugue512_Update( &h_ctx.fugue1, hashB, 512 ); //2 ////
|
||||
fugue512_Final( &h_ctx.fugue1, hashA ); //3
|
||||
#else
|
||||
sph_fugue512 (&h_ctx.fugue1, hashB, 64); //2 ////
|
||||
sph_fugue512_close(&h_ctx.fugue1, hashA); //3
|
||||
#endif
|
||||
|
||||
if ( hashA[0] & mask ) //4
|
||||
{
|
||||
@@ -262,13 +275,18 @@ extern void hmq1725hash(void *state, const void *input)
|
||||
|
||||
if ( hashB[0] & mask ) //7
|
||||
{
|
||||
#if defined(__AES__)
|
||||
fugue512_Update( &h_ctx.fugue2, hashB, 512 ); //
|
||||
fugue512_Final( &h_ctx.fugue2, hashA ); //8
|
||||
#else
|
||||
sph_fugue512 (&h_ctx.fugue2, hashB, 64); //
|
||||
sph_fugue512_close(&h_ctx.fugue2, hashA); //8
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
SHA512_Update( &h_ctx.sha1, hashB, 64 );
|
||||
SHA512_Final( (unsigned char*) hashA, &h_ctx.sha1 );
|
||||
sph_sha512( &h_ctx.sha1, hashB, 64 );
|
||||
sph_sha512_close( &h_ctx.sha1, hashA );
|
||||
}
|
||||
|
||||
#if defined(__AES__)
|
||||
@@ -279,8 +297,8 @@ extern void hmq1725hash(void *state, const void *input)
|
||||
sph_groestl512_close(&h_ctx.groestl2, hashB); //4
|
||||
#endif
|
||||
|
||||
SHA512_Update( &h_ctx.sha2, hashB, 64 );
|
||||
SHA512_Final( (unsigned char*) hashA, &h_ctx.sha2 );
|
||||
sph_sha512( &h_ctx.sha2, hashB, 64 );
|
||||
sph_sha512_close( &h_ctx.sha2, hashA );
|
||||
|
||||
if ( hashA[0] & mask ) //4
|
||||
{
|
||||
|
@@ -127,10 +127,8 @@ void quark_8way_hash( void *state, const void *input )
|
||||
|
||||
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
|
||||
|
||||
if ( ( vh_mask & 0x0f ) != 0x0f )
|
||||
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
|
||||
if ( ( vh_mask & 0xf0 ) != 0xf0 )
|
||||
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
|
||||
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
|
||||
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
|
||||
|
||||
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
|
||||
|
||||
@@ -139,22 +137,14 @@ void quark_8way_hash( void *state, const void *input )
|
||||
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
|
||||
vhash, 512 );
|
||||
|
||||
if ( hash0[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||
if ( hash1[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
|
||||
if ( hash2[0] & 8)
|
||||
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
|
||||
if ( hash3[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||
if ( hash4[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
|
||||
if ( hash5[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
|
||||
if ( hash6[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
|
||||
if ( hash7[0] & 8 )
|
||||
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
|
||||
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
|
||||
|
||||
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
|
||||
512 );
|
||||
@@ -235,7 +225,7 @@ int scanhash_quark_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm512_add_epi32( *noncev,
|
||||
@@ -408,7 +398,7 @@ int scanhash_quark_4way( struct work *work, uint32_t max_nonce,
|
||||
if ( valid_hash( lane_hash, ptarget ) )
|
||||
{
|
||||
pdata[19] = bswap_32( n + lane );
|
||||
submit_lane_solution( work, lane_hash, mythr, lane );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
*noncev = _mm256_add_epi32( *noncev,
|
||||
|
@@ -106,13 +106,13 @@ int scanhash_deep_2way( struct work *work,uint32_t max_nonce,
|
||||
if ( fulltest( hash, ptarget) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n;
|
||||
submit_lane_solution( work, hash, mythr, 0 );
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
if ( !( (hash+8)[7] & mask ) )
|
||||
if ( fulltest( hash+8, ptarget) && !opt_benchmark )
|
||||
{
|
||||
pdata[19] = n+1;
|
||||
submit_lane_solution( work, hash+8, mythr, 1 );
|
||||
submit_solution( work, hash+8, mythr );
|
||||
}
|
||||
n += 2;
|
||||
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
|
||||
|
@@ -153,7 +153,7 @@ int scanhash_qubit_4way( struct work *work,uint32_t max_nonce,
|
||||
if ( likely( fulltest( hash+(lane<<3), ptarget) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = n + lane;
|
||||
submit_lane_solution( work, hash+(lane<<3), mythr, lane );
|
||||
submit_solution( work, hash+(lane<<3), mythr );
|
||||
}
|
||||
n += 4;
|
||||
} while ( ( n < max_nonce-4 ) && !work_restart[thr_id].restart );
|
||||
@@ -255,13 +255,13 @@ int scanhash_qubit_2way( struct work *work,uint32_t max_nonce,
|
||||
if ( likely( fulltest( hash, ptarget) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = n;
|
||||
submit_lane_solution( work, hash, mythr, 0 );
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
if ( unlikely( ( (hash+8))[7] <= Htarg ) )
|
||||
if ( likely( fulltest( hash+8, ptarget) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[19] = n+1;
|
||||
submit_lane_solution( work, hash+8, mythr, 1 );
|
||||
submit_solution( work, hash+8, mythr );
|
||||
}
|
||||
n += 2;
|
||||
} while ( ( n < max_nonce ) && !work_restart[thr_id].restart );
|
||||
|
@@ -132,7 +132,7 @@ int scanhash_lbry_16way( struct work *work, uint32_t max_nonce,
|
||||
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
|
||||
{
|
||||
pdata[27] = n + i;
|
||||
submit_lane_solution( work, lane_hash, mythr, i );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 16;
|
||||
@@ -251,7 +251,7 @@ int scanhash_lbry_8way( struct work *work, uint32_t max_nonce,
|
||||
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
|
||||
{
|
||||
pdata[27] = n + i;
|
||||
submit_lane_solution( work, lane_hash, mythr, i );
|
||||
submit_solution( work, lane_hash, mythr );
|
||||
}
|
||||
}
|
||||
n += 8;
|
||||
|
@@ -4,7 +4,7 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
double lbry_calc_network_diff( struct work *work )
|
||||
long double lbry_calc_network_diff( struct work *work )
|
||||
{
|
||||
// sample for diff 43.281 : 1c05ea29
|
||||
// todo: endian reversed on longpoll could be zr5 specific...
|
||||
@@ -12,7 +12,7 @@ double lbry_calc_network_diff( struct work *work )
|
||||
uint32_t nbits = swab32( work->data[ LBRY_NBITS_INDEX ] );
|
||||
uint32_t bits = (nbits & 0xffffff);
|
||||
int16_t shift = (swab32(nbits) & 0xff); // 0x1c = 28
|
||||
double d = (double)0x0000ffff / (double)bits;
|
||||
long double d = (long double)0x0000ffff / (long double)bits;
|
||||
|
||||
for (int m=shift; m < 29; m++) d *= 256.0;
|
||||
for (int m=29; m < shift; m++) d /= 256.0;
|
||||
@@ -69,13 +69,9 @@ void lbry_build_block_header( struct work* g_work, uint32_t version,
|
||||
void lbry_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
|
||||
{
|
||||
unsigned char merkle_root[64] = { 0 };
|
||||
size_t t;
|
||||
int i;
|
||||
|
||||
algo_gate.gen_merkle_root( merkle_root, sctx );
|
||||
// Increment extranonce2
|
||||
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
|
||||
// Assemble block header
|
||||
|
||||
memset( g_work->data, 0, sizeof(g_work->data) );
|
||||
g_work->data[0] = le32dec( sctx->job.version );
|
||||
|
@@ -7,28 +7,23 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "sph_ripemd.h"
|
||||
#include <openssl/sha.h>
|
||||
#include "algo/sha/sha256-hash.h"
|
||||
|
||||
void lbry_hash(void* output, const void* input)
|
||||
{
|
||||
SHA256_CTX ctx_sha256 __attribute__ ((aligned (64)));
|
||||
SHA512_CTX ctx_sha512 __attribute__ ((aligned (64)));
|
||||
sph_ripemd160_context ctx_ripemd __attribute__ ((aligned (64)));
|
||||
sha256_context ctx_sha256 __attribute__ ((aligned (64)));
|
||||
sph_sha512_context ctx_sha512 __attribute__ ((aligned (64)));
|
||||
sph_ripemd160_context ctx_ripemd __attribute__ ((aligned (64)));
|
||||
uint32_t _ALIGN(64) hashA[16];
|
||||
uint32_t _ALIGN(64) hashB[16];
|
||||
uint32_t _ALIGN(64) hashC[16];
|
||||
|
||||
SHA256_Init( &ctx_sha256 );
|
||||
SHA256_Update( &ctx_sha256, input, 112 );
|
||||
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
|
||||
sha256_full( hashA, input, 112 );
|
||||
sha256_full( hashA, hashA, 32 );
|
||||
|
||||
SHA256_Init( &ctx_sha256 );
|
||||
SHA256_Update( &ctx_sha256, hashA, 32 );
|
||||
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
|
||||
|
||||
SHA512_Init( &ctx_sha512 );
|
||||
SHA512_Update( &ctx_sha512, hashA, 32 );
|
||||
SHA512_Final( (unsigned char*) hashA, &ctx_sha512 );
|
||||
sph_sha512_init( &ctx_sha512 );
|
||||
sph_sha512( &ctx_sha512, hashA, 32 );
|
||||
sph_sha512_close( &ctx_sha512, hashA );
|
||||
|
||||
sph_ripemd160_init( &ctx_ripemd );
|
||||
sph_ripemd160 ( &ctx_ripemd, hashA, 32 );
|
||||
@@ -38,15 +33,13 @@ void lbry_hash(void* output, const void* input)
|
||||
sph_ripemd160 ( &ctx_ripemd, hashA+8, 32 );
|
||||
sph_ripemd160_close( &ctx_ripemd, hashC );
|
||||
|
||||
SHA256_Init( &ctx_sha256 );
|
||||
SHA256_Update( &ctx_sha256, hashB, 20 );
|
||||
SHA256_Update( &ctx_sha256, hashC, 20 );
|
||||
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
|
||||
|
||||
SHA256_Init( &ctx_sha256 );
|
||||
SHA256_Update( &ctx_sha256, hashA, 32 );
|
||||
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
|
||||
sha256_ctx_init( &ctx_sha256 );
|
||||
sha256_update( &ctx_sha256, hashB, 20 );
|
||||
sha256_update( &ctx_sha256, hashC, 20 );
|
||||
sha256_final( &ctx_sha256, hashA );
|
||||
|
||||
sha256_full( hashA, hashA, 32 );
|
||||
|
||||
memcpy( output, hashA, 32 );
|
||||
}
|
||||
|
||||
|
@@ -35,6 +35,7 @@
|
||||
|
||||
#include "sph_ripemd.h"
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Round functions for RIPEMD (original).
|
||||
*/
|
||||
@@ -46,6 +47,7 @@ static const sph_u32 oIV[5] = {
|
||||
SPH_C32(0x67452301), SPH_C32(0xEFCDAB89),
|
||||
SPH_C32(0x98BADCFE), SPH_C32(0x10325476)
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Round functions for RIPEMD-128 and RIPEMD-160.
|
||||
@@ -63,6 +65,8 @@ static const sph_u32 IV[5] = {
|
||||
|
||||
#define ROTL SPH_ROTL32
|
||||
|
||||
#if 0
|
||||
|
||||
/* ===================================================================== */
|
||||
/*
|
||||
* RIPEMD (original hash, deprecated).
|
||||
@@ -479,7 +483,7 @@ sph_ripemd_comp(const sph_u32 msg[16], sph_u32 val[4])
|
||||
* One round of RIPEMD-128. The data must be aligned for 32-bit access.
|
||||
*/
|
||||
static void
|
||||
ripemd128_round(const unsigned char *data, sph_u32 r[5])
|
||||
ripemd128_round(const unsigned char *data, sph_u32 r[4])
|
||||
{
|
||||
#if SPH_LITTLE_FAST
|
||||
|
||||
@@ -539,6 +543,8 @@ sph_ripemd128_comp(const sph_u32 msg[16], sph_u32 val[4])
|
||||
#undef RIPEMD128_IN
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* ===================================================================== */
|
||||
/*
|
||||
* RIPEMD-160.
|
||||
|
@@ -84,6 +84,7 @@
|
||||
* can be cloned by copying the context (e.g. with a simple
|
||||
* <code>memcpy()</code>).
|
||||
*/
|
||||
#if 0
|
||||
typedef struct {
|
||||
#ifndef DOXYGEN_IGNORE
|
||||
unsigned char buf[64]; /* first field, for alignment */
|
||||
@@ -204,6 +205,8 @@ void sph_ripemd128_close(void *cc, void *dst);
|
||||
*/
|
||||
void sph_ripemd128_comp(const sph_u32 msg[16], sph_u32 val[4]);
|
||||
|
||||
#endif
|
||||
|
||||
/* ===================================================================== */
|
||||
|
||||
/**
|
||||
|
@@ -69,8 +69,12 @@ typedef unsigned int uint;
|
||||
#define SCRYPT_HASH_BLOCK_SIZE 64U
|
||||
#define SCRYPT_HASH_DIGEST_SIZE 32U
|
||||
|
||||
#define ROTL32(a,b) (((a) << (b)) | ((a) >> (32 - b)))
|
||||
#define ROTR32(a,b) (((a) >> (b)) | ((a) << (32 - b)))
|
||||
//#define ROTL32(a,b) (((a) << (b)) | ((a) >> (32 - b)))
|
||||
//#define ROTR32(a,b) (((a) >> (b)) | ((a) << (32 - b)))
|
||||
|
||||
#define ROTL32(a,b) rol32(a,b)
|
||||
#define ROTR32(a,b) ror32(a,b)
|
||||
|
||||
|
||||
#define U8TO32_BE(p) \
|
||||
(((uint32_t)((p)[0]) << 24) | ((uint32_t)((p)[1]) << 16) | \
|
||||
@@ -1051,16 +1055,16 @@ int scanhash_neoscrypt( struct work *work,
|
||||
uint32_t _ALIGN(64) hash[8];
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
int thr_id = mythr->id; // thr_id arg is deprecated
|
||||
int thr_id = mythr->id;
|
||||
|
||||
while (pdata[19] < max_nonce && !work_restart[thr_id].restart)
|
||||
{
|
||||
neoscrypt((uint8_t *) hash, (uint8_t *) pdata );
|
||||
|
||||
/* Quick hash check */
|
||||
if (hash[7] <= Htarg && fulltest_le(hash, ptarget)) {
|
||||
*hashes_done = pdata[19] - first_nonce + 1;
|
||||
return 1;
|
||||
if (hash[7] <= Htarg && fulltest_le(hash, ptarget))
|
||||
{
|
||||
submit_solution( work, hash, mythr );
|
||||
}
|
||||
|
||||
pdata[19]++;
|
||||
|
3265
algo/scrypt/scrypt-core-4way.c
Normal file
3265
algo/scrypt/scrypt-core-4way.c
Normal file
File diff suppressed because it is too large
Load Diff
70
algo/scrypt/scrypt-core-4way.h
Normal file
70
algo/scrypt/scrypt-core-4way.h
Normal file
@@ -0,0 +1,70 @@
|
||||
#ifndef SCRYPT_CORE_4WAY_H__
|
||||
#define SCRYPT_CORE_4WAY_H__
|
||||
|
||||
#include "simd-utils.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
void scrypt_core_16way( __m512i *X, __m512i *V, const uint32_t N );
|
||||
|
||||
// Serial SIMD over 4 way parallel
|
||||
void scrypt_core_simd128_4way( __m128i *X, __m128i *V, const uint32_t N );
|
||||
|
||||
// 4 way parallel over serial SIMD
|
||||
void scrypt_core_4way_simd128( __m512i *X, __m512i *V, const uint32_t N );
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
void scrypt_core_8way( __m256i *X, __m256i *V, uint32_t N );
|
||||
|
||||
// 2 way parallel over SIMD128
|
||||
void scrypt_core_2way_simd128( __m256i *X, __m256i *V, const uint32_t N );
|
||||
|
||||
// Double buffered 2 way parallel over SIMD128
|
||||
void scrypt_core_2way_simd128_2buf( __m256i *X, __m256i *V, const uint32_t N );
|
||||
|
||||
// Triplee buffered 2 way parallel over SIMD128
|
||||
void scrypt_core_2way_simd128_3buf( __m256i *X, __m256i *V, const uint32_t N );
|
||||
|
||||
// Serial SIMD128 over 2 way parallel
|
||||
void scrypt_core_simd128_2way( uint64_t *X, uint64_t *V, const uint32_t N );
|
||||
|
||||
// Double buffered simd over parallel
|
||||
void scrypt_core_simd128_2way_2buf( uint64_t *X, uint64_t *V, const uint32_t N );
|
||||
|
||||
// Triple buffered 2 way
|
||||
void scrypt_core_simd128_2way_3buf( uint64_t *X, uint64_t *V, const uint32_t N );
|
||||
|
||||
// Quadruple buffered
|
||||
void scrypt_core_simd128_2way_4buf( uint64_t *X, uint64_t *V, const uint32_t N );
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__SSE2__)
|
||||
|
||||
// Parallel 4 way, 4x memory
|
||||
void scrypt_core_4way( __m128i *X, __m128i *V, const uint32_t N );
|
||||
|
||||
// Linear SIMD 1 way, 1x memory, lowest
|
||||
void scrypt_core_simd128( uint32_t *X, uint32_t *V, const uint32_t N );
|
||||
|
||||
// Double buffered, 2x memory
|
||||
void scrypt_core_simd128_2buf( uint32_t *X, uint32_t *V, const uint32_t N );
|
||||
|
||||
// Triple buffered
|
||||
void scrypt_core_simd128_3buf( uint32_t *X, uint32_t *V, const uint32_t N );
|
||||
|
||||
// Quadruple buffered, 4x memory
|
||||
void scrypt_core_simd128_4buf( uint32_t *X, uint32_t *V, const uint32_t N );
|
||||
|
||||
#endif
|
||||
|
||||
// For reference only
|
||||
void scrypt_core_1way( uint32_t *X, uint32_t *V, const uint32_t N );
|
||||
|
||||
#endif
|
||||
|
206
algo/scrypt/scrypt-core-ref.c
Normal file
206
algo/scrypt/scrypt-core-ref.c
Normal file
@@ -0,0 +1,206 @@
|
||||
#include "scrypt-core-ref.h"
|
||||
|
||||
#define ROTL(a, b) (((a) << (b)) | ((a) >> (32 - (b))))
|
||||
|
||||
static void xor_salsa8(uint32_t * const B, const uint32_t * const C)
|
||||
{
|
||||
uint32_t x0 = (B[ 0] ^= C[ 0]),
|
||||
x1 = (B[ 1] ^= C[ 1]),
|
||||
x2 = (B[ 2] ^= C[ 2]),
|
||||
x3 = (B[ 3] ^= C[ 3]);
|
||||
uint32_t x4 = (B[ 4] ^= C[ 4]),
|
||||
x5 = (B[ 5] ^= C[ 5]),
|
||||
x6 = (B[ 6] ^= C[ 6]),
|
||||
x7 = (B[ 7] ^= C[ 7]);
|
||||
uint32_t x8 = (B[ 8] ^= C[ 8]),
|
||||
x9 = (B[ 9] ^= C[ 9]),
|
||||
xa = (B[10] ^= C[10]),
|
||||
xb = (B[11] ^= C[11]);
|
||||
uint32_t xc = (B[12] ^= C[12]),
|
||||
xd = (B[13] ^= C[13]),
|
||||
xe = (B[14] ^= C[14]),
|
||||
xf = (B[15] ^= C[15]);
|
||||
|
||||
/* Operate on columns. */
|
||||
x4 ^= ROTL(x0 + xc, 7);
|
||||
x9 ^= ROTL(x5 + x1, 7);
|
||||
xe ^= ROTL(xa + x6, 7);
|
||||
x3 ^= ROTL(xf + xb, 7);
|
||||
x8 ^= ROTL(x4 + x0, 9);
|
||||
xd ^= ROTL(x9 + x5, 9);
|
||||
x2 ^= ROTL(xe + xa, 9);
|
||||
x7 ^= ROTL(x3 + xf, 9);
|
||||
xc ^= ROTL(x8 + x4, 13);
|
||||
x1 ^= ROTL(xd + x9, 13);
|
||||
x6 ^= ROTL(x2 + xe, 13);
|
||||
xb ^= ROTL(x7 + x3, 13);
|
||||
x0 ^= ROTL(xc + x8, 18);
|
||||
x5 ^= ROTL(x1 + xd, 18);
|
||||
xa ^= ROTL(x6 + x2, 18);
|
||||
xf ^= ROTL(xb + x7, 18);
|
||||
|
||||
/* Operate on rows. */
|
||||
x1 ^= ROTL(x0 + x3, 7);
|
||||
x6 ^= ROTL(x5 + x4, 7);
|
||||
xb ^= ROTL(xa + x9, 7);
|
||||
xc ^= ROTL(xf + xe, 7);
|
||||
x2 ^= ROTL(x1 + x0, 9);
|
||||
x7 ^= ROTL(x6 + x5, 9);
|
||||
x8 ^= ROTL(xb + xa, 9);
|
||||
xd ^= ROTL(xc + xf, 9);
|
||||
x3 ^= ROTL(x2 + x1, 13);
|
||||
x4 ^= ROTL(x7 + x6, 13);
|
||||
x9 ^= ROTL(x8 + xb, 13);
|
||||
xe ^= ROTL(xd + xc, 13);
|
||||
x0 ^= ROTL(x3 + x2, 18);
|
||||
x5 ^= ROTL(x4 + x7, 18);
|
||||
xa ^= ROTL(x9 + x8, 18);
|
||||
xf ^= ROTL(xe + xd, 18);
|
||||
|
||||
/* Operate on columns. */
|
||||
x4 ^= ROTL(x0 + xc, 7);
|
||||
x9 ^= ROTL(x5 + x1, 7);
|
||||
xe ^= ROTL(xa + x6, 7);
|
||||
x3 ^= ROTL(xf + xb, 7);
|
||||
x8 ^= ROTL(x4 + x0, 9);
|
||||
xd ^= ROTL(x9 + x5, 9);
|
||||
x2 ^= ROTL(xe + xa, 9);
|
||||
x7 ^= ROTL(x3 + xf, 9);
|
||||
xc ^= ROTL(x8 + x4, 13);
|
||||
x1 ^= ROTL(xd + x9, 13);
|
||||
x6 ^= ROTL(x2 + xe, 13);
|
||||
xb ^= ROTL(x7 + x3, 13);
|
||||
x0 ^= ROTL(xc + x8, 18);
|
||||
x5 ^= ROTL(x1 + xd, 18);
|
||||
xa ^= ROTL(x6 + x2, 18);
|
||||
xf ^= ROTL(xb + x7, 18);
|
||||
|
||||
/* Operate on rows. */
|
||||
x1 ^= ROTL(x0 + x3, 7);
|
||||
x6 ^= ROTL(x5 + x4, 7);
|
||||
xb ^= ROTL(xa + x9, 7);
|
||||
xc ^= ROTL(xf + xe, 7);
|
||||
x2 ^= ROTL(x1 + x0, 9);
|
||||
x7 ^= ROTL(x6 + x5, 9);
|
||||
x8 ^= ROTL(xb + xa, 9);
|
||||
xd ^= ROTL(xc + xf, 9);
|
||||
x3 ^= ROTL(x2 + x1, 13);
|
||||
x4 ^= ROTL(x7 + x6, 13);
|
||||
x9 ^= ROTL(x8 + xb, 13);
|
||||
xe ^= ROTL(xd + xc, 13);
|
||||
x0 ^= ROTL(x3 + x2, 18);
|
||||
x5 ^= ROTL(x4 + x7, 18);
|
||||
xa ^= ROTL(x9 + x8, 18);
|
||||
xf ^= ROTL(xe + xd, 18);
|
||||
|
||||
/* Operate on columns. */
|
||||
x4 ^= ROTL(x0 + xc, 7);
|
||||
x9 ^= ROTL(x5 + x1, 7);
|
||||
xe ^= ROTL(xa + x6, 7);
|
||||
x3 ^= ROTL(xf + xb, 7);
|
||||
x8 ^= ROTL(x4 + x0, 9);
|
||||
xd ^= ROTL(x9 + x5, 9);
|
||||
x2 ^= ROTL(xe + xa, 9);
|
||||
x7 ^= ROTL(x3 + xf, 9);
|
||||
xc ^= ROTL(x8 + x4, 13);
|
||||
x1 ^= ROTL(xd + x9, 13);
|
||||
x6 ^= ROTL(x2 + xe, 13);
|
||||
xb ^= ROTL(x7 + x3, 13);
|
||||
x0 ^= ROTL(xc + x8, 18);
|
||||
x5 ^= ROTL(x1 + xd, 18);
|
||||
xa ^= ROTL(x6 + x2, 18);
|
||||
xf ^= ROTL(xb + x7, 18);
|
||||
|
||||
/* Operate on rows. */
|
||||
x1 ^= ROTL(x0 + x3, 7);
|
||||
x6 ^= ROTL(x5 + x4, 7);
|
||||
xb ^= ROTL(xa + x9, 7);
|
||||
xc ^= ROTL(xf + xe, 7);
|
||||
x2 ^= ROTL(x1 + x0, 9);
|
||||
x7 ^= ROTL(x6 + x5, 9);
|
||||
x8 ^= ROTL(xb + xa, 9);
|
||||
xd ^= ROTL(xc + xf, 9);
|
||||
x3 ^= ROTL(x2 + x1, 13);
|
||||
x4 ^= ROTL(x7 + x6, 13);
|
||||
x9 ^= ROTL(x8 + xb, 13);
|
||||
xe ^= ROTL(xd + xc, 13);
|
||||
x0 ^= ROTL(x3 + x2, 18);
|
||||
x5 ^= ROTL(x4 + x7, 18);
|
||||
xa ^= ROTL(x9 + x8, 18);
|
||||
xf ^= ROTL(xe + xd, 18);
|
||||
|
||||
/* Operate on columns. */
|
||||
x4 ^= ROTL(x0 + xc, 7);
|
||||
x9 ^= ROTL(x5 + x1, 7);
|
||||
xe ^= ROTL(xa + x6, 7);
|
||||
x3 ^= ROTL(xf + xb, 7);
|
||||
x8 ^= ROTL(x4 + x0, 9);
|
||||
xd ^= ROTL(x9 + x5, 9);
|
||||
x2 ^= ROTL(xe + xa, 9);
|
||||
x7 ^= ROTL(x3 + xf, 9);
|
||||
xc ^= ROTL(x8 + x4, 13);
|
||||
x1 ^= ROTL(xd + x9, 13);
|
||||
x6 ^= ROTL(x2 + xe, 13);
|
||||
xb ^= ROTL(x7 + x3, 13);
|
||||
x0 ^= ROTL(xc + x8, 18);
|
||||
x5 ^= ROTL(x1 + xd, 18);
|
||||
xa ^= ROTL(x6 + x2, 18);
|
||||
xf ^= ROTL(xb + x7, 18);
|
||||
|
||||
/* Operate on rows. */
|
||||
x1 ^= ROTL(x0 + x3, 7);
|
||||
x6 ^= ROTL(x5 + x4, 7);
|
||||
xb ^= ROTL(xa + x9, 7);
|
||||
xc ^= ROTL(xf + xe, 7);
|
||||
x2 ^= ROTL(x1 + x0, 9);
|
||||
x7 ^= ROTL(x6 + x5, 9);
|
||||
x8 ^= ROTL(xb + xa, 9);
|
||||
xd ^= ROTL(xc + xf, 9);
|
||||
x3 ^= ROTL(x2 + x1, 13);
|
||||
x4 ^= ROTL(x7 + x6, 13);
|
||||
x9 ^= ROTL(x8 + xb, 13);
|
||||
xe ^= ROTL(xd + xc, 13);
|
||||
x0 ^= ROTL(x3 + x2, 18);
|
||||
x5 ^= ROTL(x4 + x7, 18);
|
||||
xa ^= ROTL(x9 + x8, 18);
|
||||
xf ^= ROTL(xe + xd, 18);
|
||||
|
||||
B[ 0] += x0;
|
||||
B[ 1] += x1;
|
||||
B[ 2] += x2;
|
||||
B[ 3] += x3;
|
||||
B[ 4] += x4;
|
||||
B[ 5] += x5;
|
||||
B[ 6] += x6;
|
||||
B[ 7] += x7;
|
||||
B[ 8] += x8;
|
||||
B[ 9] += x9;
|
||||
B[10] += xa;
|
||||
B[11] += xb;
|
||||
B[12] += xc;
|
||||
B[13] += xd;
|
||||
B[14] += xe;
|
||||
B[15] += xf;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param X input/ouput
|
||||
* @param V scratch buffer
|
||||
* @param N factor (def. 1024)
|
||||
*/
|
||||
void scrypt_core_ref(uint32_t *X, uint32_t *V, uint32_t N)
|
||||
{
|
||||
for (uint32_t i = 0; i < N; i++) {
|
||||
memcpy(&V[i * 32], X, 128);
|
||||
xor_salsa8(&X[0], &X[16]);
|
||||
xor_salsa8(&X[16], &X[0]);
|
||||
}
|
||||
for (uint32_t i = 0; i < N; i++) {
|
||||
uint32_t j = 32 * (X[16] & (N - 1));
|
||||
for (uint8_t k = 0; k < 32; k++)
|
||||
X[k] ^= V[j + k];
|
||||
xor_salsa8(&X[0], &X[16]);
|
||||
xor_salsa8(&X[16], &X[0]);
|
||||
}
|
||||
}
|
||||
|
1766
algo/scrypt/scrypt.c
1766
algo/scrypt/scrypt.c
File diff suppressed because it is too large
Load Diff
440
algo/sha/hmac-sha256-hash-4way.c
Normal file
440
algo/sha/hmac-sha256-hash-4way.c
Normal file
@@ -0,0 +1,440 @@
|
||||
/*-
|
||||
* Copyright 2005,2007,2009 Colin Percival
|
||||
* Copywright 2020 JayDDee246@gmail.com
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include "hmac-sha256-hash-4way.h"
|
||||
#include "compat.h"
|
||||
|
||||
// HMAC 4-way SSE2
|
||||
|
||||
/**
|
||||
* HMAC_SHA256_Buf(K, Klen, in, len, digest):
|
||||
* Compute the HMAC-SHA256 of ${len} bytes from ${in} using the key ${K} of
|
||||
* length ${Klen}, and write the result to ${digest}.
|
||||
*/
|
||||
void
|
||||
hmac_sha256_4way_full( void *digest, const void *K, size_t Klen,
|
||||
const void *in, size_t len )
|
||||
{
|
||||
hmac_sha256_4way_context ctx;
|
||||
hmac_sha256_4way_init( &ctx, K, Klen );
|
||||
hmac_sha256_4way_update( &ctx, in, len );
|
||||
hmac_sha256_4way_close( &ctx, digest );
|
||||
}
|
||||
|
||||
/* Initialize an HMAC-SHA256 operation with the given key. */
|
||||
void
|
||||
hmac_sha256_4way_init( hmac_sha256_4way_context *ctx, const void *_K,
|
||||
size_t Klen )
|
||||
{
|
||||
unsigned char pad[64*4] __attribute__ ((aligned (64)));
|
||||
unsigned char khash[32*4] __attribute__ ((aligned (64)));
|
||||
const unsigned char * K = _K;
|
||||
size_t i;
|
||||
|
||||
/* If Klen > 64, the key is really SHA256(K). */
|
||||
if ( Klen > 64 )
|
||||
{
|
||||
sha256_4way_init( &ctx->ictx );
|
||||
sha256_4way_update( &ctx->ictx, K, Klen );
|
||||
sha256_4way_close( &ctx->ictx, khash );
|
||||
K = khash;
|
||||
Klen = 32;
|
||||
}
|
||||
|
||||
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
||||
sha256_4way_init( &ctx->ictx );
|
||||
memset( pad, 0x36, 64*4 );
|
||||
|
||||
for ( i = 0; i < Klen; i++ )
|
||||
casti_m128i( pad, i ) = _mm_xor_si128( casti_m128i( pad, i ),
|
||||
casti_m128i( K, i ) );
|
||||
|
||||
sha256_4way_update( &ctx->ictx, pad, 64 );
|
||||
|
||||
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
||||
sha256_4way_init( &ctx->octx );
|
||||
memset( pad, 0x5c, 64*4 );
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m128i( pad, i ) = _mm_xor_si128( casti_m128i( pad, i ),
|
||||
casti_m128i( K, i ) );
|
||||
sha256_4way_update( &ctx->octx, pad, 64 );
|
||||
}
|
||||
|
||||
/* Add bytes to the HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_4way_update( hmac_sha256_4way_context *ctx, const void *in,
|
||||
size_t len )
|
||||
{
|
||||
/* Feed data to the inner SHA256 operation. */
|
||||
sha256_4way_update( &ctx->ictx, in, len );
|
||||
}
|
||||
|
||||
/* Finish an HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_4way_close( hmac_sha256_4way_context *ctx, void *digest )
|
||||
{
|
||||
unsigned char ihash[32*4] __attribute__ ((aligned (64)));
|
||||
|
||||
/* Finish the inner SHA256 operation. */
|
||||
sha256_4way_close( &ctx->ictx, ihash );
|
||||
|
||||
/* Feed the inner hash to the outer SHA256 operation. */
|
||||
sha256_4way_update( &ctx->octx, ihash, 32 );
|
||||
|
||||
/* Finish the outer SHA256 operation. */
|
||||
sha256_4way_close( &ctx->octx, digest );
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void
|
||||
pbkdf2_sha256_4way( uint8_t *buf, size_t dkLen,
|
||||
const uint8_t *passwd, size_t passwdlen,
|
||||
const uint8_t *salt, size_t saltlen, uint64_t c )
|
||||
{
|
||||
hmac_sha256_4way_context PShctx, hctx;
|
||||
uint8_t _ALIGN(128) T[32*4];
|
||||
uint8_t _ALIGN(128) U[32*4];
|
||||
__m128i ivec;
|
||||
size_t i, clen;
|
||||
uint64_t j;
|
||||
int k;
|
||||
|
||||
/* Compute HMAC state after processing P and S. */
|
||||
hmac_sha256_4way_init( &PShctx, passwd, passwdlen );
|
||||
hmac_sha256_4way_update( &PShctx, salt, saltlen );
|
||||
|
||||
/* Iterate through the blocks. */
|
||||
for ( i = 0; i * 32 < dkLen; i++ )
|
||||
{
|
||||
/* Generate INT(i + 1). */
|
||||
ivec = _mm_set1_epi32( bswap_32( i+1 ) );
|
||||
|
||||
/* Compute U_1 = PRF(P, S || INT(i)). */
|
||||
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_4way_context) );
|
||||
hmac_sha256_4way_update( &hctx, &ivec, 4 );
|
||||
hmac_sha256_4way_close( &hctx, U );
|
||||
|
||||
/* T_i = U_1 ... */
|
||||
memcpy( T, U, 32*4 );
|
||||
|
||||
for ( j = 2; j <= c; j++ )
|
||||
{
|
||||
/* Compute U_j. */
|
||||
hmac_sha256_4way_init( &hctx, passwd, passwdlen );
|
||||
hmac_sha256_4way_update( &hctx, U, 32 );
|
||||
hmac_sha256_4way_close( &hctx, U );
|
||||
|
||||
/* ... xor U_j ... */
|
||||
for ( k = 0; k < 8; k++ )
|
||||
casti_m128i( T, k ) = _mm_xor_si128( casti_m128i( T, k ),
|
||||
casti_m128i( U, k ) );
|
||||
}
|
||||
|
||||
/* Copy as many bytes as necessary into buf. */
|
||||
clen = dkLen - i * 32;
|
||||
if ( clen > 32 )
|
||||
clen = 32;
|
||||
memcpy( &buf[ i*32*4 ], T, clen*4 );
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
// HMAC 8-way AVX2
|
||||
|
||||
void
|
||||
hmac_sha256_8way_full( void *digest, const void *K, size_t Klen,
|
||||
const void *in, size_t len )
|
||||
{
|
||||
hmac_sha256_8way_context ctx;
|
||||
hmac_sha256_8way_init( &ctx, K, Klen );
|
||||
hmac_sha256_8way_update( &ctx, in, len );
|
||||
hmac_sha256_8way_close( &ctx, digest );
|
||||
}
|
||||
|
||||
/* Initialize an HMAC-SHA256 operation with the given key. */
|
||||
void
|
||||
hmac_sha256_8way_init( hmac_sha256_8way_context *ctx, const void *_K,
|
||||
size_t Klen )
|
||||
{
|
||||
unsigned char pad[64*8] __attribute__ ((aligned (128)));
|
||||
unsigned char khash[32*8] __attribute__ ((aligned (128)));
|
||||
const unsigned char * K = _K;
|
||||
size_t i;
|
||||
|
||||
/* If Klen > 64, the key is really SHA256(K). */
|
||||
if ( Klen > 64 )
|
||||
{
|
||||
sha256_8way_init( &ctx->ictx );
|
||||
sha256_8way_update( &ctx->ictx, K, Klen );
|
||||
sha256_8way_close( &ctx->ictx, khash );
|
||||
K = khash;
|
||||
Klen = 32;
|
||||
}
|
||||
|
||||
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
||||
sha256_8way_init( &ctx->ictx );
|
||||
memset( pad, 0x36, 64*8);
|
||||
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m256i( pad, i ) = _mm256_xor_si256( casti_m256i( pad, i ),
|
||||
casti_m256i( K, i ) );
|
||||
|
||||
sha256_8way_update( &ctx->ictx, pad, 64 );
|
||||
|
||||
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
||||
sha256_8way_init( &ctx->octx );
|
||||
memset( pad, 0x5c, 64*8 );
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m256i( pad, i ) = _mm256_xor_si256( casti_m256i( pad, i ),
|
||||
casti_m256i( K, i ) );
|
||||
sha256_8way_update( &ctx->octx, pad, 64 );
|
||||
}
|
||||
|
||||
void
|
||||
hmac_sha256_8way_update( hmac_sha256_8way_context *ctx, const void *in,
|
||||
size_t len )
|
||||
{
|
||||
/* Feed data to the inner SHA256 operation. */
|
||||
sha256_8way_update( &ctx->ictx, in, len );
|
||||
}
|
||||
|
||||
/* Finish an HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_8way_close( hmac_sha256_8way_context *ctx, void *digest )
|
||||
{
|
||||
unsigned char ihash[32*8] __attribute__ ((aligned (128)));
|
||||
|
||||
/* Finish the inner SHA256 operation. */
|
||||
sha256_8way_close( &ctx->ictx, ihash );
|
||||
|
||||
/* Feed the inner hash to the outer SHA256 operation. */
|
||||
sha256_8way_update( &ctx->octx, ihash, 32 );
|
||||
|
||||
/* Finish the outer SHA256 operation. */
|
||||
sha256_8way_close( &ctx->octx, digest );
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void
|
||||
pbkdf2_sha256_8way( uint8_t *buf, size_t dkLen, const uint8_t *passwd,
|
||||
size_t passwdlen, const uint8_t *salt, size_t saltlen,
|
||||
uint64_t c )
|
||||
{
|
||||
hmac_sha256_8way_context PShctx, hctx;
|
||||
uint8_t _ALIGN(128) T[32*8];
|
||||
uint8_t _ALIGN(128) U[32*8];
|
||||
size_t i, clen;
|
||||
uint64_t j;
|
||||
int k;
|
||||
|
||||
/* Compute HMAC state after processing P and S. */
|
||||
hmac_sha256_8way_init( &PShctx, passwd, passwdlen );
|
||||
|
||||
// saltlen can be odd number of bytes
|
||||
hmac_sha256_8way_update( &PShctx, salt, saltlen );
|
||||
|
||||
/* Iterate through the blocks. */
|
||||
for ( i = 0; i * 32 < dkLen; i++ )
|
||||
{
|
||||
__m256i ivec = _mm256_set1_epi32( bswap_32( i+1 ) );
|
||||
|
||||
/* Compute U_1 = PRF(P, S || INT(i)). */
|
||||
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_8way_context) );
|
||||
hmac_sha256_8way_update( &hctx, &ivec, 4 );
|
||||
hmac_sha256_8way_close( &hctx, U );
|
||||
|
||||
/* T_i = U_1 ... */
|
||||
memcpy( T, U, 32*8 );
|
||||
|
||||
for ( j = 2; j <= c; j++ )
|
||||
{
|
||||
/* Compute U_j. */
|
||||
hmac_sha256_8way_init( &hctx, passwd, passwdlen );
|
||||
hmac_sha256_8way_update( &hctx, U, 32 );
|
||||
hmac_sha256_8way_close( &hctx, U );
|
||||
|
||||
/* ... xor U_j ... */
|
||||
for ( k = 0; k < 8; k++ )
|
||||
casti_m256i( T, k ) = _mm256_xor_si256( casti_m256i( T, k ),
|
||||
casti_m256i( U, k ) );
|
||||
}
|
||||
|
||||
/* Copy as many bytes as necessary into buf. */
|
||||
clen = dkLen - i * 32;
|
||||
if ( clen > 32 )
|
||||
clen = 32;
|
||||
memcpy( &buf[ i*32*8 ], T, clen*8 );
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
// HMAC 16-way AVX512
|
||||
|
||||
void
|
||||
hmac_sha256_16way_full( void *digest, const void *K, size_t Klen,
|
||||
const void *in, size_t len )
|
||||
{
|
||||
hmac_sha256_16way_context ctx;
|
||||
hmac_sha256_16way_init( &ctx, K, Klen );
|
||||
hmac_sha256_16way_update( &ctx, in, len );
|
||||
hmac_sha256_16way_close( &ctx, digest );
|
||||
}
|
||||
|
||||
void
|
||||
hmac_sha256_16way_init( hmac_sha256_16way_context *ctx, const void *_K,
|
||||
size_t Klen )
|
||||
{
|
||||
unsigned char pad[64*16] __attribute__ ((aligned (128)));
|
||||
unsigned char khash[32*16] __attribute__ ((aligned (128)));
|
||||
const unsigned char * K = _K;
|
||||
size_t i;
|
||||
|
||||
/* If Klen > 64, the key is really SHA256(K). */
|
||||
if ( Klen > 64 )
|
||||
{
|
||||
sha256_16way_init( &ctx->ictx );
|
||||
sha256_16way_update( &ctx->ictx, K, Klen );
|
||||
sha256_16way_close( &ctx->ictx, khash );
|
||||
K = khash;
|
||||
Klen = 32;
|
||||
}
|
||||
|
||||
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
||||
sha256_16way_init( &ctx->ictx );
|
||||
memset( pad, 0x36, 64*16 );
|
||||
|
||||
for ( i = 0; i < Klen; i++ )
|
||||
casti_m512i( pad, i ) = _mm512_xor_si512( casti_m512i( pad, i ),
|
||||
casti_m512i( K, i ) );
|
||||
sha256_16way_update( &ctx->ictx, pad, 64 );
|
||||
|
||||
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
||||
sha256_16way_init( &ctx->octx );
|
||||
memset( pad, 0x5c, 64*16 );
|
||||
for ( i = 0; i < Klen/4; i++ )
|
||||
casti_m512i( pad, i ) = _mm512_xor_si512( casti_m512i( pad, i ),
|
||||
casti_m512i( K, i ) );
|
||||
sha256_16way_update( &ctx->octx, pad, 64 );
|
||||
}
|
||||
|
||||
void
|
||||
hmac_sha256_16way_update( hmac_sha256_16way_context *ctx, const void *in,
|
||||
size_t len )
|
||||
{
|
||||
/* Feed data to the inner SHA256 operation. */
|
||||
sha256_16way_update( &ctx->ictx, in, len );
|
||||
}
|
||||
|
||||
/* Finish an HMAC-SHA256 operation. */
|
||||
void
|
||||
hmac_sha256_16way_close( hmac_sha256_16way_context *ctx, void *digest )
|
||||
{
|
||||
unsigned char ihash[32*16] __attribute__ ((aligned (128)));
|
||||
|
||||
/* Finish the inner SHA256 operation. */
|
||||
sha256_16way_close( &ctx->ictx, ihash );
|
||||
|
||||
/* Feed the inner hash to the outer SHA256 operation. */
|
||||
sha256_16way_update( &ctx->octx, ihash, 32 );
|
||||
|
||||
/* Finish the outer SHA256 operation. */
|
||||
sha256_16way_close( &ctx->octx, digest );
|
||||
}
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void
|
||||
pbkdf2_sha256_16way( uint8_t *buf, size_t dkLen,
|
||||
const uint8_t *passwd, size_t passwdlen,
|
||||
const uint8_t *salt, size_t saltlen, uint64_t c )
|
||||
{
|
||||
hmac_sha256_16way_context PShctx, hctx;
|
||||
uint8_t _ALIGN(128) T[32*16];
|
||||
uint8_t _ALIGN(128) U[32*16];
|
||||
__m512i ivec;
|
||||
size_t i, clen;
|
||||
uint64_t j;
|
||||
int k;
|
||||
|
||||
/* Compute HMAC state after processing P and S. */
|
||||
hmac_sha256_16way_init( &PShctx, passwd, passwdlen );
|
||||
hmac_sha256_16way_update( &PShctx, salt, saltlen );
|
||||
|
||||
/* Iterate through the blocks. */
|
||||
for ( i = 0; i * 32 < dkLen; i++ )
|
||||
{
|
||||
/* Generate INT(i + 1). */
|
||||
ivec = _mm512_set1_epi32( bswap_32( i+1 ) );
|
||||
|
||||
/* Compute U_1 = PRF(P, S || INT(i)). */
|
||||
memcpy( &hctx, &PShctx, sizeof(hmac_sha256_16way_context) );
|
||||
hmac_sha256_16way_update( &hctx, &ivec, 4 );
|
||||
hmac_sha256_16way_close( &hctx, U );
|
||||
|
||||
/* T_i = U_1 ... */
|
||||
memcpy( T, U, 32*16 );
|
||||
|
||||
for ( j = 2; j <= c; j++ )
|
||||
{
|
||||
/* Compute U_j. */
|
||||
hmac_sha256_16way_init( &hctx, passwd, passwdlen );
|
||||
hmac_sha256_16way_update( &hctx, U, 32 );
|
||||
hmac_sha256_16way_close( &hctx, U );
|
||||
|
||||
/* ... xor U_j ... */
|
||||
for ( k = 0; k < 8; k++ )
|
||||
casti_m512i( T, k ) = _mm512_xor_si512( casti_m512i( T, k ),
|
||||
casti_m512i( U, k ) );
|
||||
}
|
||||
|
||||
/* Copy as many bytes as necessary into buf. */
|
||||
clen = dkLen - i * 32;
|
||||
if ( clen > 32 )
|
||||
clen = 32;
|
||||
memcpy( &buf[ i*32*16 ], T, clen*16 );
|
||||
}
|
||||
}
|
||||
|
||||
#endif // AVX512
|
||||
#endif // AVX2
|
||||
|
107
algo/sha/hmac-sha256-hash-4way.h
Normal file
107
algo/sha/hmac-sha256-hash-4way.h
Normal file
@@ -0,0 +1,107 @@
|
||||
/*-
|
||||
* Copyright 2005,2007,2009 Colin Percival
|
||||
* Copyright 2020 JayDDee@gmailcom
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD: src/lib/libmd/sha256_Y.h,v 1.2 2006/01/17 15:35:56 phk Exp $
|
||||
*/
|
||||
|
||||
#ifndef HMAC_SHA256_4WAY_H__
|
||||
#define HMAC_SHA256_4WAY_H__
|
||||
|
||||
|
||||
// Tested only 8-way with null pers
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdint.h>
|
||||
#include "simd-utils.h"
|
||||
#include "sha-hash-4way.h"
|
||||
|
||||
typedef struct _hmac_sha256_4way_context
|
||||
{
|
||||
sha256_4way_context ictx;
|
||||
sha256_4way_context octx;
|
||||
} hmac_sha256_4way_context;
|
||||
|
||||
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
|
||||
void hmac_sha256_4way_init( hmac_sha256_4way_context *, const void *, size_t );
|
||||
void hmac_sha256_4way_update( hmac_sha256_4way_context *, const void *,
|
||||
size_t );
|
||||
void hmac_sha256_4way_close( hmac_sha256_4way_context *, void* );
|
||||
void hmac_sha256_4way_full( void*, const void *, size_t Klen, const void *,
|
||||
size_t len );
|
||||
|
||||
/**
|
||||
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
||||
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
||||
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
||||
*/
|
||||
void pbkdf2_sha256_4way( uint8_t *, size_t, const uint8_t *, size_t,
|
||||
const uint8_t *, size_t, uint64_t );
|
||||
|
||||
#if defined(__AVX2__)
|
||||
|
||||
typedef struct _hmac_sha256_8way_context
|
||||
{
|
||||
sha256_8way_context ictx;
|
||||
sha256_8way_context octx;
|
||||
} hmac_sha256_8way_context;
|
||||
|
||||
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
|
||||
void hmac_sha256_8way_init( hmac_sha256_8way_context *, const void *, size_t );
|
||||
void hmac_sha256_8way_update( hmac_sha256_8way_context *, const void *,
|
||||
size_t );
|
||||
void hmac_sha256_8way_close( hmac_sha256_8way_context *, void* );
|
||||
void hmac_sha256_8way_full( void*, const void *, size_t Klen, const void *,
|
||||
size_t len );
|
||||
|
||||
void pbkdf2_sha256_8way( uint8_t *, size_t, const uint8_t *, size_t,
|
||||
const uint8_t *, size_t, uint64_t );
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
typedef struct _hmac_sha256_16way_context
|
||||
{
|
||||
sha256_16way_context ictx;
|
||||
sha256_16way_context octx;
|
||||
} hmac_sha256_16way_context;
|
||||
|
||||
//void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );
|
||||
void hmac_sha256_16way_init( hmac_sha256_16way_context *,
|
||||
const void *, size_t );
|
||||
void hmac_sha256_16way_update( hmac_sha256_16way_context *, const void *,
|
||||
size_t );
|
||||
void hmac_sha256_16way_close( hmac_sha256_16way_context *, void* );
|
||||
void hmac_sha256_16way_full( void*, const void *, size_t Klen, const void *,
|
||||
size_t len );
|
||||
|
||||
void pbkdf2_sha256_16way( uint8_t *, size_t, const uint8_t *, size_t,
|
||||
const uint8_t *, size_t, uint64_t );
|
||||
|
||||
|
||||
|
||||
#endif // AVX512
|
||||
#endif // AVX2
|
||||
|
||||
#endif // HMAC_SHA256_4WAY_H__
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user