From 77c5ae80ab021593edd6960563428dc4afe4a195 Mon Sep 17 00:00:00 2001 From: Jay D Dee Date: Thu, 30 May 2019 16:59:49 -0400 Subject: [PATCH] v3.9.1 --- INSTALL_LINUX | 126 ++ INSTALL_WINDOWS | 173 ++ Makefile.am | 18 +- README.md | 7 +- README.txt | 11 +- RELEASE_NOTES | 184 +- algo-gate-api.c | 7 + algo-gate-api.h | 14 +- .../argon2d/{thread.c => argon2d_thread.c} | 2 +- .../argon2d/{thread.h => argon2d_thread.h} | 0 algo/argon2/argon2d/argon2d/core.c | 2 +- algo/blake/blake-hash-4way.h | 27 +- ...blake-hash-4way.c => blake256-hash-4way.c} | 798 ++------ algo/blake/blake256-hash-4way.c.new | 322 +++ algo/blake/blake512-hash-4way.c | 701 +++++++ algo/cubehash/cube-hash-2way.c | 74 +- algo/cubehash/cubehash_sse2.c | 76 +- algo/hodl/aes.c | 4 +- algo/hodl/hodl-gate.c | 38 +- algo/hodl/hodl-wolf.c | 10 +- algo/hodl/sha512-avx.h | 8 +- algo/hodl/sha512_avx.c | 1 + algo/luffa/luffa_for_sse2.c | 13 + algo/lyra2/allium-4way.c | 6 +- algo/lyra2/lyra2-gate.c | 2 +- algo/lyra2/lyra2-gate.h | 4 +- algo/lyra2/lyra2re.c | 21 +- algo/lyra2/lyra2rev2-4way.c | 14 +- algo/lyra2/lyra2rev3-4way.c | 59 +- algo/lyra2/lyra2rev3.c | 71 +- algo/lyra2/sponge.h | 14 +- algo/m7m.c | 49 - algo/panama/sph_panama.c | 334 ++++ algo/panama/sph_panama.h | 118 ++ algo/quark/anime-4way.c | 48 +- algo/radiogatun/sph_radiogatun.c | 1003 ++++++++++ algo/radiogatun/sph_radiogatun.h | 186 ++ algo/ripemd/lbry-gate.h | 3 + algo/ripemd/lbry.c | 30 - algo/scryptjane/scrypt-jane-portable-x86.h | 2 + algo/sha/sha256t-4way.c | 88 +- algo/sha/sha256t-gate.h | 9 +- algo/sha/sha256t.c | 3 +- algo/shabal/shabal-hash-4way.c | 32 +- algo/shavite/shavite-hash-2way.c | 406 ++++ algo/shavite/shavite-hash-2way.h | 25 + algo/shavite/sph-shavite-aesni.c | 62 +- algo/shavite/sse2/shavite.c | 1764 ----------------- algo/skein/skein-gate.h | 3 + algo/skein/skein.c | 11 - algo/x12/x12-4way.c | 6 +- algo/x12/x12.c | 44 +- algo/{x17 => x16}/x16r-4way.c | 8 +- algo/{x17 => x16}/x16r-gate.c | 0 algo/{x17 => x16}/x16r-gate.h | 0 algo/{x17 => x16}/x16r.c | 30 +- algo/x17/hmq1725.c | 75 +- algo/x17/sonoa-4way.c | 872 ++++++++ algo/x17/sonoa-gate.c | 18 + algo/x17/sonoa-gate.h | 32 + algo/x17/sonoa.c | 648 ++++++ algo/x17/x17-4way.c | 197 +- algo/x17/x17-gate.c | 4 +- algo/x17/x17-gate.h | 6 +- algo/x17/x17.c | 233 ++- algo/x17/xevan.c | 77 +- algo/x20/x20r-gate.c | 34 + algo/x20/x20r-gate.h | 58 + algo/x20/x20r.c | 275 +++ algo/yescrypt/sha256_Y.c | 37 - algo/yescrypt/sha256_Y.h | 5 - algo/yescrypt/yescrypt-simd.c | 14 - algo/yespower/yespower-opt.c | 3 +- api.c | 30 +- avxdefs.h | 653 +++--- build-allarch.sh | 26 +- configure | 20 +- configure.ac | 2 +- cpu-miner.c | 58 +- interleave.h | 109 + miner.h | 15 +- winbuild-cross.sh | 40 +- 82 files changed, 6906 insertions(+), 3706 deletions(-) create mode 100644 INSTALL_LINUX create mode 100644 INSTALL_WINDOWS rename algo/argon2/argon2d/argon2d/{thread.c => argon2d_thread.c} (98%) rename algo/argon2/argon2d/argon2d/{thread.h => argon2d_thread.h} (100%) rename algo/blake/{blake-hash-4way.c => blake256-hash-4way.c} (55%) create mode 100644 algo/blake/blake256-hash-4way.c.new create mode 100644 algo/blake/blake512-hash-4way.c create mode 100644 algo/panama/sph_panama.c create mode 100644 algo/panama/sph_panama.h create mode 100644 algo/radiogatun/sph_radiogatun.c create mode 100644 algo/radiogatun/sph_radiogatun.h create mode 100644 algo/shavite/shavite-hash-2way.c create mode 100644 algo/shavite/shavite-hash-2way.h delete mode 100644 algo/shavite/sse2/shavite.c rename algo/{x17 => x16}/x16r-4way.c (98%) rename algo/{x17 => x16}/x16r-gate.c (100%) rename algo/{x17 => x16}/x16r-gate.h (100%) rename algo/{x17 => x16}/x16r.c (98%) create mode 100644 algo/x17/sonoa-4way.c create mode 100644 algo/x17/sonoa-gate.c create mode 100644 algo/x17/sonoa-gate.h create mode 100644 algo/x17/sonoa.c create mode 100644 algo/x20/x20r-gate.c create mode 100644 algo/x20/x20r-gate.h create mode 100644 algo/x20/x20r.c diff --git a/INSTALL_LINUX b/INSTALL_LINUX new file mode 100644 index 0000000..a9934a3 --- /dev/null +++ b/INSTALL_LINUX @@ -0,0 +1,126 @@ + + +Requirements: + +Intel Core2 or newer, or AMD Steamroller or newer CPU. ARM CPUs are not +supported. +64 bit Linux operating system. Apple is not supported. + +Building on linux prerequisites: + +It is assumed users know how to install packages on their system and +be able to compile standard source packages. This is basic Linux and +beyond the scope of cpuminer-opt. Regardless compiling is trivial if you +follow the instructions. + +Make sure you have the basic development packages installed. +Here is a good start: + +http://askubuntu.com/questions/457526/how-to-install-cpuminer-in-ubuntu + +Install any additional dependencies needed by cpuminer-opt. The list below +are some of the ones that may not be in the default install and need to +be installed manually. There may be others, read the error messages they +will give a clue as to the missing package. + +The following command should install everything you need on Debian based +distributions such as Ubuntu: + +sudo apt-get install build-essential libssl-dev libcurl4-openssl-dev libjansson-dev libgmp-dev automake zlib1g-dev + +build-essential (Development Tools package group on Fedora) +automake +libjansson-dev +libgmp-dev +libcurl4-openssl-dev +libssl-dev +lib-thread +zlib1g-dev + +SHA support on AMD Ryzen CPUs requires gcc version 5 or higher and +openssl 1.1.0e or higher. Add one of the following, depending on the +compiler version, to CFLAGS: +"-march=native" or "-march=znver1" or "-msha". + +Due to poor AVX2 performance on Ryzen users should add -DRYZEN_ to CFLAGS +to override multiway AVX2 on algos with sha256, and use SHA instead. + +Additional instructions for static compilalation can be found here: +https://lxadm.com/Static_compilation_of_cpuminer +Static builds should only considered in a homogeneous HW and SW environment. +Local builds will always have the best performance and compatibility. + +Extract cpuminer source. + +tar xvzf cpuminer-opt-x.y.z.tar.gz +cd cpuminer-opt-x.y.z + +Run ./build.sh to build on Linux or execute the following commands. + +./autogen.sh +CFLAGS="-O3 -march=native -Wall" ./configure --with-curl +make + +Start mining. + +./cpuminer -a algo -o url -u username -p password + +Windows + +Precompiled Windows binaries are built on a Linux host using Mingw +with a more recent compiler than the following Windows hosted procedure. + +Building on Windows prerequisites: + +msys +mingw_w64 +Visual C++ redistributable 2008 X64 +openssl + +Install msys and mingw_w64, only needed once. + +Unpack msys into C:\msys or your preferred directory. + +Install mingw_w64 from win-builds. +Follow instructions, check "msys or cygwin" and "x86_64" and accept default +existing msys instalation. + +Open a msys shell by double clicking on msys.bat. +Note that msys shell uses linux syntax for file specifications, "C:\" is +mounted at "/c/". + +Add mingw bin directory to PATH variable +PATH="/c/msys/opt/windows_64/bin/:$PATH" + +Instalation complete, compile cpuminer-opt. + +Unpack cpuminer-opt source files using tar from msys shell, or using 7zip +or similar Windows program. + +In msys shell cd to miner directory. +cd /c/path/to/cpuminer-opt + +Run build.sh to build on Windows or execute the following commands. + +./autogen.sh +CFLAGS="-O3 -march=native -Wall" ./configure --with-curl +make + +Start mining + +cpuminer.exe -a algo -o url -u user -p password + +The following tips may be useful for older AMD CPUs. + +AMD CPUs older than Steamroller, including Athlon x2 and Phenom II x4, are +not supported by cpuminer-opt due to an incompatible implementation of SSE2 +on these CPUs. Some algos may crash the miner with an invalid instruction. +Users are recommended to use an unoptimized miner such as cpuminer-multi. + +Some users with AMD CPUs without AES_NI have reported problems compiling +with build.sh or "-march=native". Problems have included compile errors +and poor performance. These users are recommended to compile manually +specifying "-march=btver1" on the configure command line. + +Support for even older x86_64 without AES_NI or SSE2 is not availble. + diff --git a/INSTALL_WINDOWS b/INSTALL_WINDOWS new file mode 100644 index 0000000..75d6d70 --- /dev/null +++ b/INSTALL_WINDOWS @@ -0,0 +1,173 @@ +Instructions for compiling cpuminer-opt for Windows. + + +Windows compilation using Visual Studio is not supported. Mingw64 is +used on a Linux system (bare metal or virtual machine) to cross-compile +cpuminer-opt executable binaries for Windows. + +These instructions were written for Debian and Ubuntu compatible distributions +but should work on other major distributions as well. However some of the +package names or file paths may be different. + +It is assumed a Linux system is already available and running. And the user +has enough Linux knowledge to find and install packages and follow these +instructions. + +First it is a good idea to create new user specifically for cross compiling. +It keeps all mingw stuff contained and isolated from the rest of the system. + +Step by step... + +1. Install necessary packages from the distribution's repositories. + +Refer to Linux compile instructions and install required packages. + +Additionally, install mingw-64. + +sudo apt-get install mingw-w64 + + +2. Create a local library directory for packages to be compiled in the next + step. Recommended location is $HOME/usr/lib/ + + +3. Download and build other packages for mingw that don't have a mingw64 + version available in the repositories. + +Download the following source code packages from their respective and +respected download locations, copy them to ~/usr/lib/ and uncompress them. + +openssl +curl +gmp + +In most cases the latest vesrion is ok but it's safest to download +the same major and minor version as included in your distribution. + +Run the following commands or follow the supplied instructions. +Do not run "make install" unless you are using ~/usr/lib, which isn't +recommended. + +Some instructions insist on running "make check". If make check fails +it may still work, YMMV. + +You can speed up "make" by using all CPU cores available with "-j n" where +n is the number of CPU threads you want to use. + +openssl: + +./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32 +make + +curl: + +./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32 +make + +gmp: + +./configure --host=x86_64-w64-mingw32 +make + + + +4. Tweak the environment. + +This step is required everytime you login or the commands can be added to +.bashrc. + +Define some local variables to point to local library. + +export LOCAL_LIB="$HOME/usr/lib" + +export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl" + +export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32" + +Create a release directory and copy some dll files previously built. +This can be done outside of cpuminer-opt and only needs to be done once. +If the release directory is in cpuminer-opt directory it needs to be +recreated every a source package is decompressed. + +mkdir release +cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/ +cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/ +cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/ +cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libgcc_s_seh-1.dll release/ +cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/ +cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/ + + + +The following steps need to be done every time a new source package is +opened. + +5. Download cpuminer-opt + +Download the latest source code package of cpumuner-opt to your desired +location. .zip or .tar.gz, your choice. + +https://github.com/JayDDee/cpuminer-opt/releases + +Decompress and change to the cpuminer-opt directory. + + + +6. Prepare to compile + +Create a link to the locally compiled version of gmp.h + +ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h + +Edit configure.ac to fix lipthread package name. + +sed -i 's/"-lpthread"/"-lpthreadGC2"/g' configure.ac + + +7. Compile + +you can use the default compile if you intend to use cpuminer-opt on the +same CPU and the virtual machine supports that architecture. + +./build.sh + +Otherwise you can compile manually while setting options in CFLAGS. + +Some common options: + +To compile for a specific CPU architecture: + +CFLAGS="-O3 -march=znver1 -Wall" ./configure --with-curl + +This will compile for AMD Ryzen. + +You can compile more generically for a set of specific CPU features +if you know what features you want: + +CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure --with-curl + +This will compile for an older CPU that does not have AVX. + +You can find several examples in build-allarch.sh + +If you have a CPU with more than 64 threads and Windows 7 or higher you +can enable the CPU Groups feature: + +-D_WIN32_WINNT==0x0601 + +Once you have run configure successfully run make with n CPU threads: + +make -j n + +Copy cpuminer.exe to the release directory, compress and copy the release +directory to a Windows system and run cpuminer.exe from the command line. + +Run cpuminer + +In a command windows change directories to the unzipped release folder. +to get a list of all options: + +cpuminer.exe --help + +Command options are specific to where you mine. Refer to the pool's +instructions on how to set them. diff --git a/Makefile.am b/Makefile.am index f70e8c8..96a22d2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -42,10 +42,11 @@ cpuminer_SOURCES = \ algo/argon2/argon2d/argon2d/argon2.c \ algo/argon2/argon2d/argon2d/core.c \ algo/argon2/argon2d/argon2d/opt.c \ - algo/argon2/argon2d/argon2d/thread.c \ + algo/argon2/argon2d/argon2d/argon2d_thread.c \ algo/argon2/argon2d/argon2d/encoding.c \ algo/blake/sph_blake.c \ - algo/blake/blake-hash-4way.c \ + algo/blake/blake256-hash-4way.c \ + algo/blake/blake512-hash-4way.c \ algo/blake/blake-gate.c \ algo/blake/blake.c \ algo/blake/blake-4way.c \ @@ -136,6 +137,8 @@ cpuminer_SOURCES = \ algo/nist5/nist5-4way.c \ algo/nist5/nist5.c \ algo/nist5/zr5.c \ + algo/panama/sph_panama.c \ + algo/radiogatun/sph_radiogatun.c \ algo/pluck.c \ algo/quark/quark-gate.c \ algo/quark/quark.c \ @@ -167,6 +170,7 @@ cpuminer_SOURCES = \ algo/shabal/shabal-hash-4way.c \ algo/shavite/sph_shavite.c \ algo/shavite/sph-shavite-aesni.c \ + algo/shavite/shavite-hash-2way.c \ algo/shavite/shavite.c \ algo/simd/sph_simd.c \ algo/simd/nist.c \ @@ -240,16 +244,20 @@ cpuminer_SOURCES = \ algo/x15/x15-gate.c \ algo/x15/x15.c \ algo/x15/x15-4way.c \ + algo/x16/x16r-gate.c \ + algo/x16/x16r.c \ + algo/x16/x16r-4way.c \ algo/x17/x17-gate.c \ algo/x17/x17.c \ algo/x17/x17-4way.c \ algo/x17/xevan-gate.c \ algo/x17/xevan.c \ algo/x17/xevan-4way.c \ - algo/x17/x16r-gate.c \ - algo/x17/x16r.c \ - algo/x17/x16r-4way.c \ algo/x17/hmq1725.c \ + algo/x17/sonoa-gate.c \ + algo/x17/sonoa-4way.c \ + algo/x17/sonoa.c \ + algo/x20/x20r.c \ algo/yescrypt/yescrypt.c \ algo/yescrypt/sha256_Y.c \ algo/yescrypt/yescrypt-best.c \ diff --git a/README.md b/README.md index 7f5a1d9..793c0cf 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,8 @@ https://bitcointalk.org/index.php?topic=1326803.0 mailto://jayddee246@gmail.com -See file RELEASE_NOTES for change log and compile instructions. +See file RELEASE_NOTES for change log and INSTALL_LINUX or INSTALL_WINDOWS +for compile instructions. Requirements ------------ @@ -78,6 +79,7 @@ Supported Algorithms lyra2h Hppcoin lyra2re lyra2 lyra2rev2 lyra2v2, Vertcoin + lyra2rev3 lyrav2v3, Vertcoin lyra2z Zcoin (XZC) lyra2z330 Lyra2 330 rows, Zoin (ZOI) m7m Magi (XMG) @@ -100,6 +102,7 @@ Supported Algorithms skein Skein+Sha (Skeincoin) skein2 Double Skein (Woodcoin) skunk Signatum (SIGT) + sonoa Sono timetravel Machinecoin (MAC) timetravel10 Bitcore tribus Denarius (DNR) @@ -130,6 +133,8 @@ Supported Algorithms Errata ------ +Cryptonight and variants are no longer supported, use another miner. + Neoscrypt crashes on Windows, use legacy version. AMD CPUs older than Piledriver, including Athlon x2 and Phenom II x4, are not diff --git a/README.txt b/README.txt index 0a0ecbf..ebe9be9 100644 --- a/README.txt +++ b/README.txt @@ -15,7 +15,8 @@ the features listed at cpuminer startup to ensure you are mining at optimum speed using all the available features. Architecture names and compile options used are only provided for Intel -Core series. Pentium and Celeron often have fewer features. +Core series. Even the newest Pentium and Celeron CPUs are often missing +features. AMD CPUs older than Piledriver, including Athlon x2 and Phenom II x4, are not supported by cpuminer-opt due to an incompatible implementation of SSE2 on @@ -27,17 +28,13 @@ Changes in v3.8.4 may have improved compatibility with some of these CPUs. Exe name Compile flags Arch name cpuminer-sse2.exe "-msse2" Core2, Nehalem -cpuminer-aes-sse42.exe "-march=westmere" Westmere, Sandy-Ivybridge +cpuminer-aes-sse42.exe "-march=westmere" Westmere cpuminer-avx.exe "-march=corei7-avx" Sandy-Ivybridge cpuminer-avx2.exe "-march=core-avx2" Haswell, Sky-Kaby-Coffeelake -cpuminer-avx2-sha.exe "-march=core-avx2 -msha" Ryzen +cpuminer-zen "-march=znver1 -DRYZEN_" Ryzen If you like this software feel free to donate: BTC: 12tdvfF7KmAsihBXQXynT6E6th2c2pByTT -ETH: 0x72122edabcae9d3f57eab0729305a425f6fef6d0 -LTC: LdUwoHJnux9r9EKqFWNvAi45kQompHk6e8 -BCH: 1QKYkB6atn4P7RFozyziAXLEnurwnUM1cQ -BTG: GVUyECtRHeC5D58z9F3nGGfVQndwnsPnHQ diff --git a/RELEASE_NOTES b/RELEASE_NOTES index c987ae8..2c0624c 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,11 +1,11 @@ -puminer-opt now supports HW SHA acceleration available on AMD Ryzen CPUs. +cpuminer-opt is a console program run from the command line using the +keyboard, not the mouse. + +cpuminer-opt now supports HW SHA acceleration available on AMD Ryzen CPUs. This feature requires recent SW including GCC version 5 or higher and openssl version 1.1 or higher. It may also require using "-march=znver1" compile flag. -HW SHA support is only available when compiled from source, Windows binaries -are not yet available. - cpuminer-opt is a console program, if you're using a mouse you're doing it wrong. @@ -25,140 +25,62 @@ required. Compile Instructions -------------------- -Requirements: +See INSTALL_LINUX or INSTALL_WINDOWS fror compile instruuctions + +Requirements +------------ Intel Core2 or newer, or AMD Steamroller or newer CPU. ARM CPUs are not supported. + 64 bit Linux or Windows operating system. Apple is not supported. -Building on linux prerequisites: - -It is assumed users know how to install packages on their system and -be able to compile standard source packages. This is basic Linux and -beyond the scope of cpuminer-opt. - -Make sure you have the basic development packages installed. -Here is a good start: - -http://askubuntu.com/questions/457526/how-to-install-cpuminer-in-ubuntu - -Install any additional dependencies needed by cpuminer-opt. The list below -are some of the ones that may not be in the default install and need to -be installed manually. There may be others, read the error messages they -will give a clue as to the missing package. - -The following command should install everything you need on Debian based -distributions such as Ubuntu: - -sudo apt-get install build-essential libssl-dev libcurl4-openssl-dev libjansson-dev libgmp-dev automake zlib1g-dev - -build-essential (for Ubuntu, Development Tools package group on Fedora) -automake -libjansson-dev -libgmp-dev -libcurl4-openssl-dev -libssl-dev -pthreads -zlib - -SHA support on AMD Ryzen CPUs requires gcc version 5 or higher and openssl 1.1 -or higher. Reports of improved performiance on Ryzen when using openssl 1.0.2 -have been due to AVX and AVX2 optimizations added to that version. -Additional improvements are expected on Ryzen with openssl 1.1. -"-march-znver1" or "-msha". - -Additional instructions for static compilalation can be found here: -https://lxadm.com/Static_compilation_of_cpuminer -Static builds should only considered in a homogeneous HW and SW environment. -Local builds will always have the best performance and compatibility. - -Extract cpuminer source. - -tar xvzf cpuminer-opt-x.y.z.tar.gz -cd cpuminer-opt-x.y.z - -Run ./build.sh to build on Linux or execute the following commands. - -./autogen.sh -CFLAGS="-O3 -march=native -Wall" ./configure --with-curl -make - -Additional optional compile flags, add the following to CFLAGS to activate: - --DUSE_SPH_SHA (deprecated) - -SPH may give slightly better performance on algos that use sha256 when using -openssl 1.0.1 or older. Openssl 1.0.2 adds AVX2 and 1.1 adds SHA and perform -better than SPH. This option is ignored when 4-way is used, even for CPUs -with SHA. - -Start mining. - -./cpuminer -a algo -o url -u username -p password - -Windows - -Precompiled Windows binaries are built on a Linux host using Mingw -with a more recent compiler than the following Windows hosted procedure. - -Building on Windows prerequisites: - -msys -mingw_w64 -Visual C++ redistributable 2008 X64 -openssl - -Install msys and mingw_w64, only needed once. - -Unpack msys into C:\msys or your preferred directory. - -Install mingw_w64 from win-builds. -Follow instructions, check "msys or cygwin" and "x86_64" and accept default -existing msys instalation. - -Open a msys shell by double clicking on msys.bat. -Note that msys shell uses linux syntax for file specifications, "C:\" is -mounted at "/c/". - -Add mingw bin directory to PATH variable -PATH="/c/msys/opt/windows_64/bin/:$PATH" - -Instalation complete, compile cpuminer-opt. - -Unpack cpuminer-opt source files using tar from msys shell, or using 7zip -or similar Windows program. - -In msys shell cd to miner directory. -cd /c/path/to/cpuminer-opt - -Run build.sh to build on Windows or execute the following commands. - -./autogen.sh -CFLAGS="-O3 -march=native -Wall" ./configure --with-curl -make - -Start mining - -cpuminer.exe -a algo -o url -u user -p password - -The following tips may be useful for older AMD CPUs. - -AMD CPUs older than Steamroller, including Athlon x2 and Phenom II x4, are -not supported by cpuminer-opt due to an incompatible implementation of SSE2 -on these CPUs. Some algos may crash the miner with an invalid instruction. -Users are recommended to use an unoptimized miner such as cpuminer-multi. - -Some users with AMD CPUs without AES_NI have reported problems compiling -with build.sh or "-march=native". Problems have included compile errors -and poor performance. These users are recommended to compile manually -specifying "-march=btver1" on the configure command line. - -Support for even older x86_64 without AES_NI or SSE2 is not availble. - - Change Log ---------- +v3.9.1 + +Fixed AVX2 version of anime algo. + +Added sonoa algo. + +Added "-DRYZEN_" compile option for Ryzen to override 4-way hashing when algo +contains sha256 and use SHA instead. This is due to a combination of +the introduction of HW SHA support combined with the poor performance +of AVX2 on Ryzen. The Windows binaries package replaces cpuminer-avx2-sha +with cpuminer-zen compiled with the override. Refer to the build instructions +for more information. + +Ongoing restructuring to streamline the process, reduce latency, +reduce memory usage and unnecessary copying of data. Most of these +will not result in a notoceably higher reported hashrate as the +change simply reduces the time wasted that wasn't factored into the +hash rate reported by the miner. In short, less dead time resulting in +a higher net hashrate. + +One of these measures to reduce latency also results in an enhanced +share submission message including the share number*, the CPU thread, +and the vector lane that found the solution. The time difference between +the share submission and acceptance (or rejection) response indicates +network ltatency. One other effect of this change is a reduction in hash +meter messages because the scan function no longer exits when a share is +found. Scan cycles will go longer and submit multiple shares per cycle. +*the share number is antcipated and includes both accepted and rejected +shares. Because the share is antipated and not synchronized it may be +incorrect in time of very rapid share submission. Under most conditions +it should be easy to match the submission with the corresponding response. + +Removed "-DUSE_SPH_SHA" option, all users should have a recent version of +openssl installed: v1.0.2 (Ubuntu 16.04) or better. Ryzen SHA requires +v1.1.0 or better. Ryzen SHA is not used when hashing multi-way parallel. +Ryzen SHA is available in the Windows binaries release package. + +Improved compile instructions, now in seperate files: INSTALL_LINUX and +INSTALL_WINDOWS. The Windows instructions are used to build the binaries +release package. It's built on a Linux system either running as a virtual +machine or a seperate computer. At this time there is no known way to +build natively on a Windows system. + v3.9.0.1 Isolate Windows CPU groups code when CPU groups support not explicitly defined. @@ -171,6 +93,7 @@ Prep work for AVX512. Added lyra2rev3 for the vertcoin algo change. Added yespower, yespowerr16 (Yenten) Added phi2 algo for LUX +Discontinued support for cryptonight and variants. v3.8.8.1 @@ -354,6 +277,7 @@ Changed default sha256 and sha512 to openssl. This should be used when compiling with openssl 1.0.2 or higher (Ubuntu 16.04). This should increase the hashrate for yescrypt, yescryptr16, m7m, xevan, skein, myr-gr & others when openssl 1.0.2 is installed. +Note: -DUSE_SPH_SHA has been removed in v3.9.1. Users with openssl 1.0.1 (Ubuntu 14.04) may get better perforance by adding "-DUSE_SPH_SHA" to CLAGS. Windows binaries are compiled with -DUSE_SPH_SHA and won't get the speedup. diff --git a/algo-gate-api.c b/algo-gate-api.c index 0f9cd02..9948e08 100644 --- a/algo-gate-api.c +++ b/algo-gate-api.c @@ -214,6 +214,7 @@ bool register_algo_gate( int algo, algo_gate_t *gate ) case ALGO_SKEIN: register_skein_algo ( gate ); break; case ALGO_SKEIN2: register_skein2_algo ( gate ); break; case ALGO_SKUNK: register_skunk_algo ( gate ); break; + case ALGO_SONOA: register_sonoa_algo ( gate ); break; case ALGO_TIMETRAVEL: register_timetravel_algo ( gate ); break; case ALGO_TIMETRAVEL10: register_timetravel10_algo ( gate ); break; case ALGO_TRIBUS: register_tribus_algo ( gate ); break; @@ -266,6 +267,10 @@ bool register_algo_gate( int algo, algo_gate_t *gate ) // override std defaults with jr2 defaults bool register_json_rpc2( algo_gate_t *gate ) { + applog(LOG_WARNING,"\nCryptonight algorithm and variants are no longer"); + applog(LOG_WARNING,"supported by cpuminer-opt. Shares submitted will"); + applog(LOG_WARNING,"likely be rejected. Proceed at your own risk.\n"); + gate->wait_for_diff = (void*)&do_nothing; gate->get_new_work = (void*)&jr2_get_new_work; gate->get_nonceptr = (void*)&jr2_get_nonceptr; @@ -354,3 +359,5 @@ void get_algo_alias( char** algo_or_alias ) } } +#undef ALIAS +#undef PROPER diff --git a/algo-gate-api.h b/algo-gate-api.h index c018856..326e759 100644 --- a/algo-gate-api.h +++ b/algo-gate-api.h @@ -109,8 +109,15 @@ inline bool set_excl ( set_t a, set_t b ) { return (a & b) == 0; } typedef struct { +// special case, only one target, provides a callback for scanhash to +// submit work with less overhead. +// bool (*submit_work ) ( struct thr_info*, const struct work* ); + // mandatory functions, must be overwritten -int ( *scanhash ) ( int, struct work*, uint32_t, uint64_t* ); +// Added a 5th arg for the thread_info structure to replace the int thr id +// in the first arg. Both will co-exist during the trasition. +//int ( *scanhash ) ( int, struct work*, uint32_t, uint64_t* ); +int ( *scanhash ) ( int, struct work*, uint32_t, uint64_t*, struct thr_info* ); // optional unsafe, must be overwritten if algo uses function void ( *hash ) ( void*, const void*, uint32_t ) ; @@ -188,6 +195,11 @@ void four_way_not_tested(); // allways returns failure int null_scanhash(); +// The one and only, a callback for scanhash. + + +bool submit_work( struct thr_info *thr, const struct work *work_in ); + // displays warning void null_hash (); void null_hash_suw(); diff --git a/algo/argon2/argon2d/argon2d/thread.c b/algo/argon2/argon2d/argon2d/argon2d_thread.c similarity index 98% rename from algo/argon2/argon2d/argon2d/thread.c rename to algo/argon2/argon2d/argon2d/argon2d_thread.c index e099a00..41eca42 100644 --- a/algo/argon2/argon2d/argon2d/thread.c +++ b/algo/argon2/argon2d/argon2d/argon2d_thread.c @@ -17,7 +17,7 @@ #if !defined(ARGON2_NO_THREADS) -#include "thread.h" +#include "argon2d_thread.h" #if defined(_WIN32) #include #endif diff --git a/algo/argon2/argon2d/argon2d/thread.h b/algo/argon2/argon2d/argon2d/argon2d_thread.h similarity index 100% rename from algo/argon2/argon2d/argon2d/thread.h rename to algo/argon2/argon2d/argon2d/argon2d_thread.h diff --git a/algo/argon2/argon2d/argon2d/core.c b/algo/argon2/argon2d/argon2d/core.c index 0a5b4e8..2a2986a 100644 --- a/algo/argon2/argon2d/argon2d/core.c +++ b/algo/argon2/argon2d/argon2d/core.c @@ -30,7 +30,7 @@ #include #include "core.h" -#include "thread.h" +#include "argon2d_thread.h" #include "../blake2/blake2.h" #include "../blake2/blake2-impl.h" diff --git a/algo/blake/blake-hash-4way.h b/algo/blake/blake-hash-4way.h index 037503a..199bedd 100644 --- a/algo/blake/blake-hash-4way.h +++ b/algo/blake/blake-hash-4way.h @@ -37,7 +37,7 @@ #ifndef __BLAKE_HASH_4WAY__ #define __BLAKE_HASH_4WAY__ 1 -#ifdef __SSE4_2__ +//#ifdef __SSE4_2__ #ifdef __cplusplus extern "C"{ @@ -57,19 +57,22 @@ extern "C"{ // Blake-256 4 way typedef struct { - __m128i buf[16] __attribute__ ((aligned (64))); - __m128i H[8]; - __m128i S[4]; + unsigned char buf[64<<2]; + uint32_t H[8<<2]; + uint32_t S[4<<2]; +// __m128i buf[16] __attribute__ ((aligned (64))); +// __m128i H[8]; +// __m128i S[4]; size_t ptr; - sph_u32 T0, T1; + uint32_t T0, T1; int rounds; // 14 for blake, 8 for blakecoin & vanilla -} blake_4way_small_context; +} blake_4way_small_context __attribute__ ((aligned (64))); // Default 14 rounds typedef blake_4way_small_context blake256_4way_context; -void blake256_4way_init(void *cc); -void blake256_4way(void *cc, const void *data, size_t len); -void blake256_4way_close(void *cc, void *dst); +void blake256_4way_init(void *ctx); +void blake256_4way(void *ctx, const void *data, size_t len); +void blake256_4way_close(void *ctx, void *dst); // 14 rounds, blake, decred typedef blake_4way_small_context blake256r14_4way_context; @@ -132,12 +135,10 @@ void blake512_4way_close(void *cc, void *dst); void blake512_4way_addbits_and_close( void *cc, unsigned ub, unsigned n, void *dst); -#endif +#endif // AVX2 #ifdef __cplusplus } #endif -#endif - -#endif +#endif // BLAKE_HASH_4WAY_H__ diff --git a/algo/blake/blake-hash-4way.c b/algo/blake/blake256-hash-4way.c similarity index 55% rename from algo/blake/blake-hash-4way.c rename to algo/blake/blake256-hash-4way.c index 04d57e2..180b040 100644 --- a/algo/blake/blake-hash-4way.c +++ b/algo/blake/blake256-hash-4way.c @@ -30,9 +30,10 @@ * @author Thomas Pornin */ -#if defined (__SSE4_2__) +//#if defined (__SSE4_2__) #include +#include #include #include @@ -60,26 +61,12 @@ extern "C"{ // Blake-256 -static const sph_u32 IV256[8] = { - SPH_C32(0x6A09E667), SPH_C32(0xBB67AE85), - SPH_C32(0x3C6EF372), SPH_C32(0xA54FF53A), - SPH_C32(0x510E527F), SPH_C32(0x9B05688C), - SPH_C32(0x1F83D9AB), SPH_C32(0x5BE0CD19) +static const uint32_t IV256[8] = +{ + 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, + 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; -#if defined (__AVX2__) - -// Blake-512 - -static const sph_u64 IV512[8] = { - SPH_C64(0x6A09E667F3BCC908), SPH_C64(0xBB67AE8584CAA73B), - SPH_C64(0x3C6EF372FE94F82B), SPH_C64(0xA54FF53A5F1D36F1), - SPH_C64(0x510E527FADE682D1), SPH_C64(0x9B05688C2B3E6C1F), - SPH_C64(0x1F83D9ABFB41BD6B), SPH_C64(0x5BE0CD19137E2179) -}; - -#endif - #if SPH_COMPACT_BLAKE_32 || SPH_COMPACT_BLAKE_64 // Blake-256 4 & 8 way, Blake-512 4 way @@ -317,47 +304,6 @@ static const sph_u32 CS[16] = { #endif -#if defined(__AVX2__) - -// Blake-512 4 way - -#define CBx(r, i) CBx_(Z ## r ## i) -#define CBx_(n) CBx__(n) -#define CBx__(n) CB ## n - -#define CB0 SPH_C64(0x243F6A8885A308D3) -#define CB1 SPH_C64(0x13198A2E03707344) -#define CB2 SPH_C64(0xA4093822299F31D0) -#define CB3 SPH_C64(0x082EFA98EC4E6C89) -#define CB4 SPH_C64(0x452821E638D01377) -#define CB5 SPH_C64(0xBE5466CF34E90C6C) -#define CB6 SPH_C64(0xC0AC29B7C97C50DD) -#define CB7 SPH_C64(0x3F84D5B5B5470917) -#define CB8 SPH_C64(0x9216D5D98979FB1B) -#define CB9 SPH_C64(0xD1310BA698DFB5AC) -#define CBA SPH_C64(0x2FFD72DBD01ADFB7) -#define CBB SPH_C64(0xB8E1AFED6A267E96) -#define CBC SPH_C64(0xBA7C9045F12C7F99) -#define CBD SPH_C64(0x24A19947B3916CF7) -#define CBE SPH_C64(0x0801F2E2858EFC16) -#define CBF SPH_C64(0x636920D871574E69) - -#if SPH_COMPACT_BLAKE_64 -// not used -static const sph_u64 CB[16] = { - SPH_C64(0x243F6A8885A308D3), SPH_C64(0x13198A2E03707344), - SPH_C64(0xA4093822299F31D0), SPH_C64(0x082EFA98EC4E6C89), - SPH_C64(0x452821E638D01377), SPH_C64(0xBE5466CF34E90C6C), - SPH_C64(0xC0AC29B7C97C50DD), SPH_C64(0x3F84D5B5B5470917), - SPH_C64(0x9216D5D98979FB1B), SPH_C64(0xD1310BA698DFB5AC), - SPH_C64(0x2FFD72DBD01ADFB7), SPH_C64(0xB8E1AFED6A267E96), - SPH_C64(0xBA7C9045F12C7F99), SPH_C64(0x24A19947B3916CF7), - SPH_C64(0x0801F2E2858EFC16), SPH_C64(0x636920D871574E69) -}; - -#endif - -#endif #define GS_4WAY( m0, m1, c0, c1, a, b, c, d ) \ do { \ @@ -411,125 +357,41 @@ do { \ #endif -#if defined (__AVX2__) - -// Blake-256 8 way - -#define GS_8WAY( m0, m1, c0, c1, a, b, c, d ) \ -do { \ - a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \ - _mm256_set1_epi32( c1 ), m0 ), b ), a ); \ - d = mm256_ror_32( _mm256_xor_si256( d, a ), 16 ); \ - c = _mm256_add_epi32( c, d ); \ - b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \ - a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \ - _mm256_set1_epi32( c0 ), m1 ), b ), a ); \ - d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \ - c = _mm256_add_epi32( c, d ); \ - b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \ -} while (0) - -#define ROUND_S_8WAY(r) do { \ - GS_8WAY(Mx(r, 0), Mx(r, 1), CSx(r, 0), CSx(r, 1), V0, V4, V8, VC); \ - GS_8WAY(Mx(r, 2), Mx(r, 3), CSx(r, 2), CSx(r, 3), V1, V5, V9, VD); \ - GS_8WAY(Mx(r, 4), Mx(r, 5), CSx(r, 4), CSx(r, 5), V2, V6, VA, VE); \ - GS_8WAY(Mx(r, 6), Mx(r, 7), CSx(r, 6), CSx(r, 7), V3, V7, VB, VF); \ - GS_8WAY(Mx(r, 8), Mx(r, 9), CSx(r, 8), CSx(r, 9), V0, V5, VA, VF); \ - GS_8WAY(Mx(r, A), Mx(r, B), CSx(r, A), CSx(r, B), V1, V6, VB, VC); \ - GS_8WAY(Mx(r, C), Mx(r, D), CSx(r, C), CSx(r, D), V2, V7, V8, VD); \ - GS_8WAY(Mx(r, E), Mx(r, F), CSx(r, E), CSx(r, F), V3, V4, V9, VE); \ -} while (0) - -// Blake-512 4 way - -#define GB_4WAY(m0, m1, c0, c1, a, b, c, d) do { \ - a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \ - _mm256_set_epi64x( c1, c1, c1, c1 ), m0 ), b ), a ); \ - d = mm256_ror_64( _mm256_xor_si256( d, a ), 32 ); \ - c = _mm256_add_epi64( c, d ); \ - b = mm256_ror_64( _mm256_xor_si256( b, c ), 25 ); \ - a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \ - _mm256_set_epi64x( c0, c0, c0, c0 ), m1 ), b ), a ); \ - d = mm256_ror_64( _mm256_xor_si256( d, a ), 16 ); \ - c = _mm256_add_epi64( c, d ); \ - b = mm256_ror_64( _mm256_xor_si256( b, c ), 11 ); \ -} while (0) - -#if SPH_COMPACT_BLAKE_64 -// not used -#define ROUND_B_4WAY(r) do { \ - GB_4WAY(M[sigma[r][0x0]], M[sigma[r][0x1]], \ - CB[sigma[r][0x0]], CB[sigma[r][0x1]], V0, V4, V8, VC); \ - GB_4WAY(M[sigma[r][0x2]], M[sigma[r][0x3]], \ - CB[sigma[r][0x2]], CB[sigma[r][0x3]], V1, V5, V9, VD); \ - GB_4WAY(M[sigma[r][0x4]], M[sigma[r][0x5]], \ - CB[sigma[r][0x4]], CB[sigma[r][0x5]], V2, V6, VA, VE); \ - GB_4WAY(M[sigma[r][0x6]], M[sigma[r][0x7]], \ - CB[sigma[r][0x6]], CB[sigma[r][0x7]], V3, V7, VB, VF); \ - GB_4WAY(M[sigma[r][0x8]], M[sigma[r][0x9]], \ - CB[sigma[r][0x8]], CB[sigma[r][0x9]], V0, V5, VA, VF); \ - GB_4WAY(M[sigma[r][0xA]], M[sigma[r][0xB]], \ - CB[sigma[r][0xA]], CB[sigma[r][0xB]], V1, V6, VB, VC); \ - GB_4WAY(M[sigma[r][0xC]], M[sigma[r][0xD]], \ - CB[sigma[r][0xC]], CB[sigma[r][0xD]], V2, V7, V8, VD); \ - GB_4WAY(M[sigma[r][0xE]], M[sigma[r][0xF]], \ - CB[sigma[r][0xE]], CB[sigma[r][0xF]], V3, V4, V9, VE); \ -} while (0) - -#else -//current_impl -#define ROUND_B_4WAY(r) do { \ - GB_4WAY(Mx(r, 0), Mx(r, 1), CBx(r, 0), CBx(r, 1), V0, V4, V8, VC); \ - GB_4WAY(Mx(r, 2), Mx(r, 3), CBx(r, 2), CBx(r, 3), V1, V5, V9, VD); \ - GB_4WAY(Mx(r, 4), Mx(r, 5), CBx(r, 4), CBx(r, 5), V2, V6, VA, VE); \ - GB_4WAY(Mx(r, 6), Mx(r, 7), CBx(r, 6), CBx(r, 7), V3, V7, VB, VF); \ - GB_4WAY(Mx(r, 8), Mx(r, 9), CBx(r, 8), CBx(r, 9), V0, V5, VA, VF); \ - GB_4WAY(Mx(r, A), Mx(r, B), CBx(r, A), CBx(r, B), V1, V6, VB, VC); \ - GB_4WAY(Mx(r, C), Mx(r, D), CBx(r, C), CBx(r, D), V2, V7, V8, VD); \ - GB_4WAY(Mx(r, E), Mx(r, F), CBx(r, E), CBx(r, F), V3, V4, V9, VE); \ - } while (0) - -#endif - -#endif - -// Blake-256 4 way - #define DECL_STATE32_4WAY \ __m128i H0, H1, H2, H3, H4, H5, H6, H7; \ __m128i S0, S1, S2, S3; \ - sph_u32 T0, T1; + uint32_t T0, T1; #define READ_STATE32_4WAY(state) do { \ - H0 = (state)->H[0]; \ - H1 = (state)->H[1]; \ - H2 = (state)->H[2]; \ - H3 = (state)->H[3]; \ - H4 = (state)->H[4]; \ - H5 = (state)->H[5]; \ - H6 = (state)->H[6]; \ - H7 = (state)->H[7]; \ - S0 = (state)->S[0]; \ - S1 = (state)->S[1]; \ - S2 = (state)->S[2]; \ - S3 = (state)->S[3]; \ + H0 = casti_m128i( state->H, 0 ); \ + H1 = casti_m128i( state->H, 1 ); \ + H2 = casti_m128i( state->H, 2 ); \ + H3 = casti_m128i( state->H, 3 ); \ + H4 = casti_m128i( state->H, 4 ); \ + H5 = casti_m128i( state->H, 5 ); \ + H6 = casti_m128i( state->H, 6 ); \ + H7 = casti_m128i( state->H, 7 ); \ + S0 = casti_m128i( state->S, 0 ); \ + S1 = casti_m128i( state->S, 1 ); \ + S2 = casti_m128i( state->S, 2 ); \ + S3 = casti_m128i( state->S, 3 ); \ T0 = (state)->T0; \ T1 = (state)->T1; \ } while (0) #define WRITE_STATE32_4WAY(state) do { \ - (state)->H[0] = H0; \ - (state)->H[1] = H1; \ - (state)->H[2] = H2; \ - (state)->H[3] = H3; \ - (state)->H[4] = H4; \ - (state)->H[5] = H5; \ - (state)->H[6] = H6; \ - (state)->H[7] = H7; \ - (state)->S[0] = S0; \ - (state)->S[1] = S1; \ - (state)->S[2] = S2; \ - (state)->S[3] = S3; \ + casti_m128i( state->H, 0 ) = H0; \ + casti_m128i( state->H, 1 ) = H1; \ + casti_m128i( state->H, 2 ) = H2; \ + casti_m128i( state->H, 3 ) = H3; \ + casti_m128i( state->H, 4 ) = H4; \ + casti_m128i( state->H, 5 ) = H5; \ + casti_m128i( state->H, 6 ) = H6; \ + casti_m128i( state->H, 7 ) = H7; \ + casti_m128i( state->S, 0 ) = S0; \ + casti_m128i( state->S, 1 ) = S1; \ + casti_m128i( state->S, 2 ) = S2; \ + casti_m128i( state->S, 3 ) = S3; \ (state)->T0 = T0; \ (state)->T1 = T1; \ } while (0) @@ -616,30 +478,30 @@ do { \ V5 = H5; \ V6 = H6; \ V7 = H7; \ - V8 = _mm_xor_si128( S0, _mm_set_epi32( CS0, CS0, CS0, CS0 ) ); \ - V9 = _mm_xor_si128( S1, _mm_set_epi32( CS1, CS1, CS1, CS1 ) ); \ - VA = _mm_xor_si128( S2, _mm_set_epi32( CS2, CS2, CS2, CS2 ) ); \ - VB = _mm_xor_si128( S3, _mm_set_epi32( CS3, CS3, CS3, CS3 ) ); \ + V8 = _mm_xor_si128( S0, _mm_set1_epi32( CS0 ) ); \ + V9 = _mm_xor_si128( S1, _mm_set1_epi32( CS1 ) ); \ + VA = _mm_xor_si128( S2, _mm_set1_epi32( CS2 ) ); \ + VB = _mm_xor_si128( S3, _mm_set1_epi32( CS3 ) ); \ VC = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS4 ) ); \ VD = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS5 ) ); \ VE = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS6 ) ); \ VF = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS7 ) ); \ - M0 = mm128_bswap_32( * buf ); \ - M1 = mm128_bswap_32( *(buf+1) ); \ - M2 = mm128_bswap_32( *(buf+2) ); \ - M3 = mm128_bswap_32( *(buf+3) ); \ - M4 = mm128_bswap_32( *(buf+4) ); \ - M5 = mm128_bswap_32( *(buf+5) ); \ - M6 = mm128_bswap_32( *(buf+6) ); \ - M7 = mm128_bswap_32( *(buf+7) ); \ - M8 = mm128_bswap_32( *(buf+8) ); \ - M9 = mm128_bswap_32( *(buf+9) ); \ - MA = mm128_bswap_32( *(buf+10) ); \ - MB = mm128_bswap_32( *(buf+11) ); \ - MC = mm128_bswap_32( *(buf+12) ); \ - MD = mm128_bswap_32( *(buf+13) ); \ - ME = mm128_bswap_32( *(buf+14) ); \ - MF = mm128_bswap_32( *(buf+15) ); \ + M0 = mm128_bswap_32( buf[ 0] ); \ + M1 = mm128_bswap_32( buf[ 1] ); \ + M2 = mm128_bswap_32( buf[ 2] ); \ + M3 = mm128_bswap_32( buf[ 3] ); \ + M4 = mm128_bswap_32( buf[ 4] ); \ + M5 = mm128_bswap_32( buf[ 5] ); \ + M6 = mm128_bswap_32( buf[ 6] ); \ + M7 = mm128_bswap_32( buf[ 7] ); \ + M8 = mm128_bswap_32( buf[ 8] ); \ + M9 = mm128_bswap_32( buf[ 9] ); \ + MA = mm128_bswap_32( buf[10] ); \ + MB = mm128_bswap_32( buf[11] ); \ + MC = mm128_bswap_32( buf[12] ); \ + MD = mm128_bswap_32( buf[13] ); \ + ME = mm128_bswap_32( buf[14] ); \ + MF = mm128_bswap_32( buf[15] ); \ ROUND_S_4WAY(0); \ ROUND_S_4WAY(1); \ ROUND_S_4WAY(2); \ @@ -673,6 +535,31 @@ do { \ // Blake-256 8 way +#define GS_8WAY( m0, m1, c0, c1, a, b, c, d ) \ +do { \ + a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \ + _mm256_set1_epi32( c1 ), m0 ), b ), a ); \ + d = mm256_ror_32( _mm256_xor_si256( d, a ), 16 ); \ + c = _mm256_add_epi32( c, d ); \ + b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \ + a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \ + _mm256_set1_epi32( c0 ), m1 ), b ), a ); \ + d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \ + c = _mm256_add_epi32( c, d ); \ + b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \ +} while (0) + +#define ROUND_S_8WAY(r) do { \ + GS_8WAY(Mx(r, 0), Mx(r, 1), CSx(r, 0), CSx(r, 1), V0, V4, V8, VC); \ + GS_8WAY(Mx(r, 2), Mx(r, 3), CSx(r, 2), CSx(r, 3), V1, V5, V9, VD); \ + GS_8WAY(Mx(r, 4), Mx(r, 5), CSx(r, 4), CSx(r, 5), V2, V6, VA, VE); \ + GS_8WAY(Mx(r, 6), Mx(r, 7), CSx(r, 6), CSx(r, 7), V3, V7, VB, VF); \ + GS_8WAY(Mx(r, 8), Mx(r, 9), CSx(r, 8), CSx(r, 9), V0, V5, VA, VF); \ + GS_8WAY(Mx(r, A), Mx(r, B), CSx(r, A), CSx(r, B), V1, V6, VB, VC); \ + GS_8WAY(Mx(r, C), Mx(r, D), CSx(r, C), CSx(r, D), V2, V7, V8, VD); \ + GS_8WAY(Mx(r, E), Mx(r, F), CSx(r, E), CSx(r, F), V3, V4, V9, VE); \ +} while (0) + #define DECL_STATE32_8WAY \ __m256i H0, H1, H2, H3, H4, H5, H6, H7; \ __m256i S0, S1, S2, S3; \ @@ -787,312 +674,136 @@ do { \ S3 ), H7 ); \ } while (0) -// Blake-512 4 way - -#define DECL_STATE64_4WAY \ - __m256i H0, H1, H2, H3, H4, H5, H6, H7; \ - __m256i S0, S1, S2, S3; \ - sph_u64 T0, T1; - -#define READ_STATE64_4WAY(state) do { \ - H0 = (state)->H[0]; \ - H1 = (state)->H[1]; \ - H2 = (state)->H[2]; \ - H3 = (state)->H[3]; \ - H4 = (state)->H[4]; \ - H5 = (state)->H[5]; \ - H6 = (state)->H[6]; \ - H7 = (state)->H[7]; \ - S0 = (state)->S[0]; \ - S1 = (state)->S[1]; \ - S2 = (state)->S[2]; \ - S3 = (state)->S[3]; \ - T0 = (state)->T0; \ - T1 = (state)->T1; \ - } while (0) - -#define WRITE_STATE64_4WAY(state) do { \ - (state)->H[0] = H0; \ - (state)->H[1] = H1; \ - (state)->H[2] = H2; \ - (state)->H[3] = H3; \ - (state)->H[4] = H4; \ - (state)->H[5] = H5; \ - (state)->H[6] = H6; \ - (state)->H[7] = H7; \ - (state)->S[0] = S0; \ - (state)->S[1] = S1; \ - (state)->S[2] = S2; \ - (state)->S[3] = S3; \ - (state)->T0 = T0; \ - (state)->T1 = T1; \ - } while (0) - -#if SPH_COMPACT_BLAKE_64 - -// not used -#define COMPRESS64_4WAY do { \ - __m256i M[16]; \ - __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ - __m256i V8, V9, VA, VB, VC, VD, VE, VF; \ - unsigned r; \ - V0 = H0; \ - V1 = H1; \ - V2 = H2; \ - V3 = H3; \ - V4 = H4; \ - V5 = H5; \ - V6 = H6; \ - V7 = H7; \ - V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \ - V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \ - VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \ - VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \ - VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \ - VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \ - VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \ - VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \ - M[0x0] = mm256_bswap_64( *(buf+0) ); \ - M[0x1] = mm256_bswap_64( *(buf+1) ); \ - M[0x2] = mm256_bswap_64( *(buf+2) ); \ - M[0x3] = mm256_bswap_64( *(buf+3) ); \ - M[0x4] = mm256_bswap_64( *(buf+4) ); \ - M[0x5] = mm256_bswap_64( *(buf+5) ); \ - M[0x6] = mm256_bswap_64( *(buf+6) ); \ - M[0x7] = mm256_bswap_64( *(buf+7) ); \ - M[0x8] = mm256_bswap_64( *(buf+8) ); \ - M[0x9] = mm256_bswap_64( *(buf+9) ); \ - M[0xA] = mm256_bswap_64( *(buf+10) ); \ - M[0xB] = mm256_bswap_64( *(buf+11) ); \ - M[0xC] = mm256_bswap_64( *(buf+12) ); \ - M[0xD] = mm256_bswap_64( *(buf+13) ); \ - M[0xE] = mm256_bswap_64( *(buf+14) ); \ - M[0xF] = mm256_bswap_64( *(buf+15) ); \ - for (r = 0; r < 16; r ++) \ - ROUND_B_4WAY(r); \ - H0 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S0, V0 ), V8 ), H0 ); \ - H1 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S1, V1 ), V9 ), H1 ); \ - H2 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S2, V2 ), VA ), H2 ); \ - H3 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S3, V3 ), VB ), H3 ); \ - H4 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S0, V4 ), VC ), H4 ); \ - H5 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S1, V5 ), VD ), H5 ); \ - H6 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S2, V6 ), VE ), H6 ); \ - H7 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S3, V7 ), VF ), H7 ); \ - } while (0) - -#else - -//current impl - -#define COMPRESS64_4WAY do { \ - __m256i M0, M1, M2, M3, M4, M5, M6, M7; \ - __m256i M8, M9, MA, MB, MC, MD, ME, MF; \ - __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ - __m256i V8, V9, VA, VB, VC, VD, VE, VF; \ - V0 = H0; \ - V1 = H1; \ - V2 = H2; \ - V3 = H3; \ - V4 = H4; \ - V5 = H5; \ - V6 = H6; \ - V7 = H7; \ - V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \ - V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \ - VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \ - VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \ - VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \ - VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \ - VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \ - VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \ - M0 = mm256_bswap_64( *(buf + 0) ); \ - M1 = mm256_bswap_64( *(buf + 1) ); \ - M2 = mm256_bswap_64( *(buf + 2) ); \ - M3 = mm256_bswap_64( *(buf + 3) ); \ - M4 = mm256_bswap_64( *(buf + 4) ); \ - M5 = mm256_bswap_64( *(buf + 5) ); \ - M6 = mm256_bswap_64( *(buf + 6) ); \ - M7 = mm256_bswap_64( *(buf + 7) ); \ - M8 = mm256_bswap_64( *(buf + 8) ); \ - M9 = mm256_bswap_64( *(buf + 9) ); \ - MA = mm256_bswap_64( *(buf + 10) ); \ - MB = mm256_bswap_64( *(buf + 11) ); \ - MC = mm256_bswap_64( *(buf + 12) ); \ - MD = mm256_bswap_64( *(buf + 13) ); \ - ME = mm256_bswap_64( *(buf + 14) ); \ - MF = mm256_bswap_64( *(buf + 15) ); \ - ROUND_B_4WAY(0); \ - ROUND_B_4WAY(1); \ - ROUND_B_4WAY(2); \ - ROUND_B_4WAY(3); \ - ROUND_B_4WAY(4); \ - ROUND_B_4WAY(5); \ - ROUND_B_4WAY(6); \ - ROUND_B_4WAY(7); \ - ROUND_B_4WAY(8); \ - ROUND_B_4WAY(9); \ - ROUND_B_4WAY(0); \ - ROUND_B_4WAY(1); \ - ROUND_B_4WAY(2); \ - ROUND_B_4WAY(3); \ - ROUND_B_4WAY(4); \ - ROUND_B_4WAY(5); \ - H0 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S0, V0 ), V8 ), H0 ); \ - H1 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S1, V1 ), V9 ), H1 ); \ - H2 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S2, V2 ), VA ), H2 ); \ - H3 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S3, V3 ), VB ), H3 ); \ - H4 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S0, V4 ), VC ), H4 ); \ - H5 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S1, V5 ), VD ), H5 ); \ - H6 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S2, V6 ), VE ), H6 ); \ - H7 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S3, V7 ), VF ), H7 ); \ - } while (0) - -#endif #endif // Blake-256 4 way -static const sph_u32 salt_zero_4way_small[4] = { 0, 0, 0, 0 }; +static const uint32_t salt_zero_4way_small[4] = { 0, 0, 0, 0 }; static void -blake32_4way_init( blake_4way_small_context *sc, const sph_u32 *iv, - const sph_u32 *salt, int rounds ) +blake32_4way_init( blake_4way_small_context *ctx, const uint32_t *iv, + const uint32_t *salt, int rounds ) { - int i; - for ( i = 0; i < 8; i++ ) - sc->H[i] = _mm_set1_epi32( iv[i] ); - for ( i = 0; i < 4; i++ ) - sc->S[i] = _mm_set1_epi32( salt[i] ); - sc->T0 = sc->T1 = 0; - sc->ptr = 0; - sc->rounds = rounds; + casti_m128i( ctx->H, 0 ) = _mm_set1_epi32( iv[0] ); + casti_m128i( ctx->H, 1 ) = _mm_set1_epi32( iv[1] ); + casti_m128i( ctx->H, 2 ) = _mm_set1_epi32( iv[2] ); + casti_m128i( ctx->H, 3 ) = _mm_set1_epi32( iv[3] ); + casti_m128i( ctx->H, 4 ) = _mm_set1_epi32( iv[4] ); + casti_m128i( ctx->H, 5 ) = _mm_set1_epi32( iv[5] ); + casti_m128i( ctx->H, 6 ) = _mm_set1_epi32( iv[6] ); + casti_m128i( ctx->H, 7 ) = _mm_set1_epi32( iv[7] ); + + casti_m128i( ctx->S, 0 ) = m128_zero; + casti_m128i( ctx->S, 1 ) = m128_zero; + casti_m128i( ctx->S, 2 ) = m128_zero; + casti_m128i( ctx->S, 3 ) = m128_zero; +/* + sc->S[0] = _mm_set1_epi32( salt[0] ); + sc->S[1] = _mm_set1_epi32( salt[1] ); + sc->S[2] = _mm_set1_epi32( salt[2] ); + sc->S[3] = _mm_set1_epi32( salt[3] ); +*/ + ctx->T0 = ctx->T1 = 0; + ctx->ptr = 0; + ctx->rounds = rounds; } static void -blake32_4way( blake_4way_small_context *sc, const void *data, size_t len ) +blake32_4way( blake_4way_small_context *ctx, const void *data, size_t len ) { - __m128i *vdata = (__m128i*)data; - __m128i *buf; - size_t ptr; - const int buf_size = 64; // number of elements, sizeof/4 + __m128i *buf = (__m128i*)ctx->buf; + size_t bptr = ctx->ptr<<2; + size_t vptr = ctx->ptr >> 2; + size_t blen = len << 2; DECL_STATE32_4WAY - buf = sc->buf; - ptr = sc->ptr; - if ( len < buf_size - ptr ) + + if ( blen < (sizeof ctx->buf) - bptr ) { - memcpy_128( buf + (ptr>>2), vdata, len>>2 ); - ptr += len; - sc->ptr = ptr; + memcpy( buf + vptr, data, (sizeof ctx->buf) - bptr ); + bptr += blen; + ctx->ptr = bptr>>2; return; } - READ_STATE32_4WAY(sc); - while ( len > 0 ) + READ_STATE32_4WAY( ctx ); + while ( blen > 0 ) { - size_t clen; + size_t clen = ( sizeof ctx->buf ) - bptr; - clen = buf_size - ptr; - if ( clen > len ) - clen = len; - memcpy_128( buf + (ptr>>2), vdata, clen>>2 ); - ptr += clen; - vdata += (clen>>2); - len -= clen; - if ( ptr == buf_size ) + if ( clen > blen ) + clen = blen; + memcpy( buf + vptr, data, clen ); + bptr += clen; + data = (const unsigned char *)data + clen; + blen -= clen; + if ( bptr == ( sizeof ctx->buf ) ) { - if ( ( T0 = SPH_T32(T0 + 512) ) < 512 ) - T1 = SPH_T32(T1 + 1); - COMPRESS32_4WAY( sc->rounds ); - ptr = 0; + if ( ( T0 = T0 + 512 ) < 512 ) + T1 = T1 + 1; + COMPRESS32_4WAY( ctx->rounds ); + bptr = 0; } } - WRITE_STATE32_4WAY(sc); - sc->ptr = ptr; + WRITE_STATE32_4WAY( ctx ); + ctx->ptr = bptr>>2; } static void -blake32_4way_close( blake_4way_small_context *sc, unsigned ub, unsigned n, +blake32_4way_close( blake_4way_small_context *ctx, unsigned ub, unsigned n, void *dst, size_t out_size_w32 ) { -// union { - __m128i buf[16]; -// sph_u32 dummy; -// } u; - size_t ptr, k; - unsigned bit_len; - sph_u32 th, tl; - __m128i *out; - - ptr = sc->ptr; - bit_len = ((unsigned)ptr << 3); - buf[ptr>>2] = _mm_set1_epi32( 0x80 ); - tl = sc->T0 + bit_len; - th = sc->T1; + __m128i buf[16] __attribute__ ((aligned (64))); + size_t ptr = ctx->ptr; + size_t vptr = ctx->ptr>>2; + unsigned bit_len = ( (unsigned)ptr << 3 ); + uint32_t tl = ctx->T0 + bit_len; + uint32_t th = ctx->T1; if ( ptr == 0 ) { - sc->T0 = SPH_C32(0xFFFFFE00UL); - sc->T1 = SPH_C32(0xFFFFFFFFUL); + ctx->T0 = 0xFFFFFE00UL; + ctx->T1 = 0xFFFFFFFFUL; } - else if ( sc->T0 == 0 ) + else if ( ctx->T0 == 0 ) { - sc->T0 = SPH_C32(0xFFFFFE00UL) + bit_len; - sc->T1 = SPH_T32(sc->T1 - 1); + ctx->T0 = 0xFFFFFE00UL + bit_len; + ctx->T1 = ctx->T1 - 1; } else - sc->T0 -= 512 - bit_len; + ctx->T0 -= 512 - bit_len; - if ( ptr <= 52 ) + buf[vptr] = _mm_set1_epi32( 0x80 ); + + if ( vptr < 12 ) { - memset_zero_128( buf + (ptr>>2) + 1, (52 - ptr) >> 2 ); - if (out_size_w32 == 8) - buf[52>>2] = _mm_or_si128( buf[52>>2], - _mm_set1_epi32( 0x01000000UL ) ); - *(buf+(56>>2)) = mm128_bswap_32( _mm_set1_epi32( th ) ); - *(buf+(60>>2)) = mm128_bswap_32( _mm_set1_epi32( tl ) ); - blake32_4way( sc, buf + (ptr>>2), 64 - ptr ); + memset_zero_128( buf + vptr + 1, 13 - vptr ); + buf[ 13 ] = _mm_or_si128( buf[ 13 ], _mm_set1_epi32( 0x01000000UL ) ); + buf[ 14 ] = mm128_bswap_32( _mm_set1_epi32( th ) ); + buf[ 15 ] = mm128_bswap_32( _mm_set1_epi32( tl ) ); + blake32_4way( ctx, buf + vptr, 64 - ptr ); } else { - memset_zero_128( buf + (ptr>>2) + 1, (60-ptr) >> 2 ); - blake32_4way( sc, buf + (ptr>>2), 64 - ptr ); - sc->T0 = SPH_C32(0xFFFFFE00UL); - sc->T1 = SPH_C32(0xFFFFFFFFUL); - memset_zero_128( buf, 56>>2 ); - if (out_size_w32 == 8) - buf[52>>2] = _mm_set1_epi32( 0x01000000UL ); - *(buf+(56>>2)) = mm128_bswap_32( _mm_set1_epi32( th ) ); - *(buf+(60>>2)) = mm128_bswap_32( _mm_set1_epi32( tl ) ); - blake32_4way( sc, buf, 64 ); + memset_zero_128( buf + vptr + 1, (60-ptr) >> 2 ); + blake32_4way( ctx, buf + vptr, 64 - ptr ); + ctx->T0 = 0xFFFFFE00UL; + ctx->T1 = 0xFFFFFFFFUL; + memset_zero_128( buf, 56>>2 ); + buf[ 13 ] = _mm_or_si128( buf[ 13 ], _mm_set1_epi32( 0x01000000UL ) ); + buf[ 14 ] = mm128_bswap_32( _mm_set1_epi32( th ) ); + buf[ 15 ] = mm128_bswap_32( _mm_set1_epi32( tl ) ); + blake32_4way( ctx, buf, 64 ); } - out = (__m128i*)dst; - for ( k = 0; k < out_size_w32; k++ ) - out[k] = mm128_bswap_32( sc->H[k] ); + + casti_m128i( dst, 0 ) = mm128_bswap_32( casti_m128i( ctx->H, 0 ) ); + casti_m128i( dst, 1 ) = mm128_bswap_32( casti_m128i( ctx->H, 1 ) ); + casti_m128i( dst, 2 ) = mm128_bswap_32( casti_m128i( ctx->H, 2 ) ); + casti_m128i( dst, 3 ) = mm128_bswap_32( casti_m128i( ctx->H, 3 ) ); + casti_m128i( dst, 4 ) = mm128_bswap_32( casti_m128i( ctx->H, 4 ) ); + casti_m128i( dst, 5 ) = mm128_bswap_32( casti_m128i( ctx->H, 5 ) ); + casti_m128i( dst, 6 ) = mm128_bswap_32( casti_m128i( ctx->H, 6 ) ); + casti_m128i( dst, 7 ) = mm128_bswap_32( casti_m128i( ctx->H, 7 ) ); } #if defined (__AVX2__) @@ -1217,163 +928,32 @@ blake32_8way_close( blake_8way_small_context *sc, unsigned ub, unsigned n, out[k] = mm256_bswap_32( sc->H[k] ); } -// Blake-512 4 way - -static const sph_u64 salt_zero_big[4] = { 0, 0, 0, 0 }; - -static void -blake64_4way_init( blake_4way_big_context *sc, const sph_u64 *iv, - const sph_u64 *salt ) -{ - int i; - for ( i = 0; i < 8; i++ ) - sc->H[i] = _mm256_set1_epi64x( iv[i] ); - for ( i = 0; i < 4; i++ ) - sc->S[i] = _mm256_set1_epi64x( salt[i] ); - sc->T0 = sc->T1 = 0; - sc->ptr = 0; -} - -static void -blake64_4way( blake_4way_big_context *sc, const void *data, size_t len) -{ - __m256i *vdata = (__m256i*)data; - __m256i *buf; - size_t ptr; - DECL_STATE64_4WAY - - const int buf_size = 128; // sizeof/8 - - buf = sc->buf; - ptr = sc->ptr; - if ( len < (buf_size - ptr) ) - { - memcpy_256( buf + (ptr>>3), vdata, len>>3 ); - ptr += len; - sc->ptr = ptr; - return; - } - - READ_STATE64_4WAY(sc); - while ( len > 0 ) - { - size_t clen; - - clen = buf_size - ptr; - if ( clen > len ) - clen = len; - memcpy_256( buf + (ptr>>3), vdata, clen>>3 ); - ptr += clen; - vdata = vdata + (clen>>3); - len -= clen; - if (ptr == buf_size ) - { - if ((T0 = SPH_T64(T0 + 1024)) < 1024) - T1 = SPH_T64(T1 + 1); - COMPRESS64_4WAY; - ptr = 0; - } - } - WRITE_STATE64_4WAY(sc); - sc->ptr = ptr; -} - -static void -blake64_4way_close( blake_4way_big_context *sc, - unsigned ub, unsigned n, void *dst, size_t out_size_w64) -{ -// union { - __m256i buf[16]; -// sph_u64 dummy; -// } u; - size_t ptr, k; - unsigned bit_len; - uint64_t z, zz; - sph_u64 th, tl; - __m256i *out; - - ptr = sc->ptr; - bit_len = ((unsigned)ptr << 3); - z = 0x80 >> n; - zz = ((ub & -z) | z) & 0xFF; - buf[ptr>>3] = _mm256_set_epi64x( zz, zz, zz, zz ); - tl = sc->T0 + bit_len; - th = sc->T1; - if (ptr == 0 ) - { - sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL); - sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL); - } - else if ( sc->T0 == 0 ) - { - sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL) + bit_len; - sc->T1 = SPH_T64(sc->T1 - 1); - } - else - { - sc->T0 -= 1024 - bit_len; - } - if ( ptr <= 104 ) - { - memset_zero_256( buf + (ptr>>3) + 1, (104-ptr) >> 3 ); - if ( out_size_w64 == 8 ) - buf[(104>>3)] = _mm256_or_si256( buf[(104>>3)], - _mm256_set1_epi64x( 0x0100000000000000ULL ) ); - *(buf+(112>>3)) = mm256_bswap_64( - _mm256_set_epi64x( th, th, th, th ) ); - *(buf+(120>>3)) = mm256_bswap_64( - _mm256_set_epi64x( tl, tl, tl, tl ) ); - - blake64_4way( sc, buf + (ptr>>3), 128 - ptr ); - } - else - { - memset_zero_256( buf + (ptr>>3) + 1, (120 - ptr) >> 3 ); - - blake64_4way( sc, buf + (ptr>>3), 128 - ptr ); - sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL); - sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL); - memset_zero_256( buf, 112>>3 ); - if ( out_size_w64 == 8 ) - buf[104>>3] = _mm256_set1_epi64x( 0x0100000000000000ULL ); - *(buf+(112>>3)) = mm256_bswap_64( - _mm256_set_epi64x( th, th, th, th ) ); - *(buf+(120>>3)) = mm256_bswap_64( - _mm256_set_epi64x( tl, tl, tl, tl ) ); - - blake64_4way( sc, buf, 128 ); - } - out = (__m256i*)dst; - for ( k = 0; k < out_size_w64; k++ ) - out[k] = mm256_bswap_64( sc->H[k] ); -} - #endif // Blake-256 4 way // default 14 rounds, backward copatibility void -blake256_4way_init(void *cc) +blake256_4way_init(void *ctx) { - blake32_4way_init( cc, IV256, salt_zero_4way_small, 14 ); + blake32_4way_init( ctx, IV256, salt_zero_4way_small, 14 ); } void -blake256_4way(void *cc, const void *data, size_t len) +blake256_4way(void *ctx, const void *data, size_t len) { - blake32_4way(cc, data, len); + blake32_4way(ctx, data, len); } void -blake256_4way_close(void *cc, void *dst) +blake256_4way_close(void *ctx, void *dst) { - blake32_4way_close(cc, 0, 0, dst, 8); + blake32_4way_close(ctx, 0, 0, dst, 8); } #if defined(__AVX2__) -// Blake-256 8way +// Blake-256 8 way void blake256_8way_init(void *cc) @@ -1473,38 +1053,8 @@ blake256r8_8way_close(void *cc, void *dst) #endif -// Blake-512 4 way - -#if defined (__AVX2__) - -void -blake512_4way_init(void *cc) -{ - blake64_4way_init(cc, IV512, salt_zero_big); -} - -void -blake512_4way(void *cc, const void *data, size_t len) -{ - blake64_4way(cc, data, len); -} - -void -blake512_4way_close(void *cc, void *dst) -{ - blake512_4way_addbits_and_close(cc, 0, 0, dst); -} - -void -blake512_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst) -{ - blake64_4way_close(cc, ub, n, dst, 8); -} - -#endif - #ifdef __cplusplus } #endif -#endif +//#endif diff --git a/algo/blake/blake256-hash-4way.c.new b/algo/blake/blake256-hash-4way.c.new new file mode 100644 index 0000000..683c84c --- /dev/null +++ b/algo/blake/blake256-hash-4way.c.new @@ -0,0 +1,322 @@ +// convert blake256 32 bit to use 64 bit with serial vectoring +// +// cut calls to GS in half +// +// combine V +// v0 = {V0,V1} +// v1 = {V2,V3} +// v2 = {V4,V5} +// v3 = {V6,V7} +// v4 = {V8,V9} +// v5 = {VA,VB} +// v6 = {VC,VD} +// v7 = {CE,VF} +// +// v6x = {VD,VC} swap(VC,VD) swap(v6) +// v7x = {VF,VE} swap(VE,VF) swap(v7) +// +// V0 = v1v0 +// V1 = v3v2 +// V2 = v5v4 +// V3 = v7v6 +// V4 = v9v8 +// V5 = vbva +// V6 = vdvc +// V7 = vfve +// +// The rotate in ROUND is to effect straddle and unstraddle for the third +// and 4th iteration of GS. +// It concatenates 2 contiguous 256 bit vectors and extracts the middle +// 256 bits. After the transform they must be restored with only the +// chosen bits modified in the original 2 vectors. +// ror1x128 achieves this by putting the chosen bits in arg1, the "low" +// 256 bit vector and saves the untouched bits temporailly in arg0, the +// "high" 256 bit vector. Simply reverse the process to restore data back +// to original positions. + +// Use standard 4way when AVX2 is not available use x2 mode with AVX2. +// +// Data is organised the same as 32 bit 4 way, in effect serial vectoring +// on top of parallel vectoring. Same data in the same place just taking +// two chunks at a time. +// +// Transparent to user, x2 mode used when AVX2 detected. +// Use existing 4way context but revert to scalar types. +// Same interleave function (128 bit) or x2 with 256 bit? +// User trsnaparency would have to apply to interleave as well. +// +// Use common 4way update and close + +/* +typedef struct { + unsigned char buf[64<<2]; + uint32_t H[8<<2]; + uint32_t S[4<<2]; + size_t ptr; + uint32_t T0, T1; + int rounds; // 14 for blake, 8 for blakecoin & vanilla +} blakex2_4way_small_context __attribute__ ((aligned (64))); +*/ + +static void +blake32x2_4way_init( blake_4way_small_context *ctx, const uint32_t *iv, + const uint32_t *salt, int rounds ) +{ + casti_m128i( ctx->H, 0 ) = _mm_set1_epi32( iv[0] ); + casti_m128i( ctx->H, 1 ) = _mm_set1_epi32( iv[1] ); + casti_m128i( ctx->H, 2 ) = _mm_set1_epi32( iv[2] ); + casti_m128i( ctx->H, 3 ) = _mm_set1_epi32( iv[3] ); + casti_m128i( ctx->H, 4 ) = _mm_set1_epi32( iv[4] ); + casti_m128i( ctx->H, 5 ) = _mm_set1_epi32( iv[5] ); + casti_m128i( ctx->H, 6 ) = _mm_set1_epi32( iv[6] ); + casti_m128i( ctx->H, 7 ) = _mm_set1_epi32( iv[7] ); + + casti_m128i( ctx->S, 0 ) = m128_zero; + casti_m128i( ctx->S, 1 ) = m128_zero; + casti_m128i( ctx->S, 2 ) = m128_zero; + casti_m128i( ctx->S, 3 ) = m128_zero; +/* + sc->S[0] = _mm_set1_epi32( salt[0] ); + sc->S[1] = _mm_set1_epi32( salt[1] ); + sc->S[2] = _mm_set1_epi32( salt[2] ); + sc->S[3] = _mm_set1_epi32( salt[3] ); +*/ + ctx->T0 = ctx->T1 = 0; + ctx->ptr = 0; + ctx->rounds = rounds; +} + +static void +blake32x2( blake_4way_small_context *ctx, const void *data, size_t len ) +{ + __m128i *buf = (__m256i*)ctx->buf; + size_t bptr = ctx->ptr << 2; + size_t vptr = ctx->ptr >> 3; + size_t blen = len << 2; +// unsigned char *buf = ctx->buf; +// size_t ptr = ctx->ptr<<4; // repurposed + DECL_STATE32x2 + +// buf = sc->buf; +// ptr = sc->ptr; + +// adjust len for use with ptr, clen, all absolute bytes. +// int blen = len<<2; + + if ( blen < (sizeof ctx->buf) - bptr ) + { + memcpy( buf + vptr, data, blen ); + ptr += blen; + ctx->ptr = bptr >> 2;; + return; + } + + READ_STATE32( ctx ); + while ( blen > 0 ) + { + size_t clen; + + clen = ( sizeof sc->buf ) - ptr; + if ( clen > blen ) + clen = blen; + memcpy( buf + vptr, data, clen ); + bptr += clen; + vptr = bptr >> 5; + data = (const unsigned char *)data + clen; + blen -= clen; + if ( bptr == sizeof ctx->buf ) + { + if ( ( T0 = T0 + 512 ) < 512 ) // not needed, will never rollover + T1 += 1; + COMPRESS32x2_4WAY( ctx->rounds ); + ptr = 0; + } + } + WRITE_STATE32x2( ctx ); + ctx->ptr = bptr >> 2; +} + +static void +blake32x2_4way_close( blake_4way_small_context *ctx, void *dst ) +{ + __m256i buf[8] __attribute__ ((aligned (64))); + size_t ptr = ctx->ptr; + size_t vptr = ctx->ptr>>2; + unsigned bit_len = ( (unsigned)ptr << 3 ); // one lane + uint32_t th = ctx->T1; + uint32_t tl = ctx->T0 + bit_len; + + if ( ptr == 0 ) + { + ctx->T0 = 0xFFFFFE00UL; + ctx->T1 = 0xFFFFFFFFUL; + } + else if ( ctx->T0 == 0 ) + { + ctx->T0 = 0xFFFFFE00UL + bit_len; + ctx->T1 -= 1; + } + else + ctx->T0 -= 512 - bit_len; + + // memset doesn't do ints + buf[ vptr ] = _mm256_set_epi32( 0,0,0,0, 0x80, 0x80, 0x80, 0x80 ); + + if ( vptr < 5 ) + { + memset_zero_256( buf + vptr + 1, 6 - vptr ); + buf[ 6 ] = _mm256_or_si256( vbuf[ 6 ], _mm256_set_epi32( + 0x01000000UL,0x01000000UL,0x01000000UL,0x01000000UL, 0,0,0,0 ) ); + buf[ 7 ] = mm256_bswap_32( _mm256_set_epi32( tl,tl,tl,tl, + th,th,th,th ) ); + blake32x2_4way( ctx, buf + vptr, 64 - ptr ); + } + else + { + memset_zero_256( vbuf + vptr + 1, 7 - vptr ); + blake32x2_4way( ctx, vbuf + ptr, 64 - ptr ); + ctx->T0 = 0xFFFFFE00UL; + ctx->T1 = 0xFFFFFFFFUL; + buf[ 6 ] = mm256_zero; + buf[ 6 ] = _mm256_set_epi32( 0,0,0,0, + 0x01000000UL,0x01000000UL,0x01000000UL,0x01000000UL ); + buf[ 7 ] = mm256_bswap_32( _mm256_set_epi32( tl, tl, tl, tl, + th, th, th, th ); + blake32x2_4way( ctx, buf, 64 ); + } + + casti_m256i( dst, 0 ) = mm256_bswap_32( casti_m256i( ctx->H, 0 ) ); + casti_m256i( dst, 1 ) = mm256_bswap_32( casti_m256i( ctx->H, 1 ) ); + casti_m256i( dst, 2 ) = mm256_bswap_32( casti_m256i( ctx->H, 2 ) ); + casti_m256i( dst, 3 ) = mm256_bswap_32( casti_m256i( ctx->H, 3 ) ); +} + + + + +#define DECL_STATE32x2_4WAY \ + __m256i H0, H1, H2, H3; \ + __m256i S0, S1; \ + uint32_t T0, T1; + +#define READ_STATE32x2_4WAY(state) do \ +{ \ + H0 = casti_m256i( state->H, 0 ); \ + H1 = casti_m256i( state->H, 1 ); \ + H2 = casti_m256i( state->H, 2 ); \ + H3 = casti_m256i( state->H, 3 ); \ + S0 = casti_m256i( state->S, 0 ); \ + S1 = casti_m256i( state->S, 1 ); \ + T0 = state->T0; \ + T1 = state->T1; \ + +#define WRITE_STATE32x2_4WAY(state) do { \ + casti_m256i( state->H, 0 ) = H0; \ + casti_m256i( state->H, 1 ) = H1; \ + casti_m256i( state->H, 2 ) = H2; \ + casti_m256i( state->H, 3 ) = H3; \ + casti_m256i( state->S, 0 ) = S0; \ + casti_m256i( state->S, 1 ) = S1; \ + state->T0 = T0; \ + state->T1 = T1; \ +} while (0) + + +#define GSx2_4WAY( m0m2, m1m3, c0c2, c1c3, a, b, c, d ) do \ +{ \ + a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \ + _mm256_set_epi32( c1,c3, c1,c3, c1,c3, c1,c3 ), \ + _mm256_set_epi32( m0,m2, m0,m2, m0,m2, m0,m2 ) ), b ), a ); \ + d = mm256_ror_32( _mm_xor_si128( d, a ), 16 ); \ + c = _mm256_add_epi32( c, d ); \ + b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \ + a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \ + _mm256_set_epi32( c0,c2, c0,c2, c0,c2, c0,c2 ), \ + _mm256_set_epi32( m1,m3, m1,m3, m1,m3, m1,m3 ) ), b ), a ); \ + d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \ + c = _mm256_add_epi32( c, d ); \ + b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \ +} while (0) + +#define ROUND_Sx2_4WAY(r) do \ +{ \ + GS2_4WAY( Mx(r, 0), Mx(r, 1), Mx(r, 2), Mx(r, 3), \ + CSx(r, 0), CSx(r, 1), CSx(r, 2), CSx(r, 3), V0, V2, V4, V6 ); \ + GS2_4WAY( Mx(r, 4), Mx(r, 5), Mx(r, 6), Mx(r, 7), \ + CSx(r, 4), CSx(r, 5), CSx(r, 6), CSx(r, 7), V1, V3, V5, V7 ); \ + mm256_ror1x128_512( V3, V2 ); \ + mm256_ror1x128_512( V6, V7 ); \ + GS2_4WAY( Mx(r, 8), Mx(r, 9), Mx(r, A), Mx(r, B), \ + CSx(r, 8), CSx(r, 9), CSx(r, A), CSx(r, B), V0, V2, V5, V7 ); \ + GS2_4WAY( Mx(r, C), Mx(r, D), Mx(r, C), Mx(r, D), \ + CSx(r, C), CSx(r, D), CSx(r, C), CSx(r, D), V1, V3, V4, V6 ); \ + mm256_rol1x128_512( V2, V3 ); \ + mm256_rol1x128_512( V7, V6 ); + +#define COMPRESS32x2_4WAY( rounds ) do \ +{ \ + __m256i M0, M1, M2, M3, M4, M5, M6, M7; \ + __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ + unsigned r; \ + V0 = H0; \ + V1 = H1; \ + V2 = H2; \ + V3 = H3; \ + V4 = _mm256_xor_si256( S0, _mm256_set_epi32( CS1, CS1, CS1, CS1, \ + CS0, CS0, CS0, CS0 ) ); \ + V5 = _mm256_xor_si256( S1, _mm256_set_epi32( CS3, CS3, CS3, CS3, \ + CS2, CS2, CS2, CS2 ) ); \ + V6 = _mm256_xor_si256( _mm256_set1_epi32( T0 ), \ + _mm256_set_epi32( CS5, CS5, CS5, CS5, \ + CS4, CS4, CS4, CS4 ) ); \ + V7 = _mm256_xor_si256( _mm256_set1_epi32( T1 ), \ + _mm256_set_epi32( CS7, CS7, CS7, CS7, \ + CS6, CS6, CS6, CS6 ) ); \ + M0 = mm256_bswap_32( buf[ 0] ); \ + M1 = mm256_bswap_32( buf[ 1] ); \ + M2 = mm256_bswap_32( buf[ 2] ); \ + M3 = mm256_bswap_32( buf[ 3] ); \ + M4 = mm256_bswap_32( buf[ 4] ); \ + M5 = mm256_bswap_32( buf[ 5] ); \ + M6 = mm256_bswap_32( buf[ 6] ); \ + M7 = mm256_bswap_32( buf[ 7] ); \ + ROUND_Sx2_4WAY(0); \ + ROUND_Sx2_4WAY(1); \ + ROUND_Sx2_4WAY(2); \ + ROUND_Sx2_4WAY(3); \ + ROUND_Sx2_4WAY(4); \ + ROUND_Sx2_4WAY(5); \ + ROUND_Sx2_4WAY(6); \ + ROUND_Sx2_4WAY(7); \ + if (rounds == 14) \ + { \ + ROUND_Sx2_4WAY(8); \ + ROUND_Sx2_4WAY(9); \ + ROUND_Sx2_4WAY(0); \ + ROUND_Sx2_4WAY(1); \ + ROUND_Sx2_4WAY(2); \ + ROUND_Sx2_4WAY(3); \ + } \ + H0 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( V8, V0 ), S0 ), H0 ); \ + H1 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( V9, V1 ), S1 ), H1 ); \ + H2 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( VA, V2 ), S2 ), H2 ); \ + H3 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( VB, V3 ), S3 ), H3 ); \ +} while (0) + + + + + + + + + + + + + + diff --git a/algo/blake/blake512-hash-4way.c b/algo/blake/blake512-hash-4way.c new file mode 100644 index 0000000..0063e4d --- /dev/null +++ b/algo/blake/blake512-hash-4way.c @@ -0,0 +1,701 @@ +/* $Id: blake.c 252 2011-06-07 17:55:14Z tp $ */ +/* + * BLAKE implementation. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2007-2010 Projet RNRT SAPHIR + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + +#if defined (__AVX2__) + +#include +#include +#include + +#include "blake-hash-4way.h" + +#ifdef __cplusplus +extern "C"{ +#endif + +#if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_BLAKE +#define SPH_SMALL_FOOTPRINT_BLAKE 1 +#endif + +#if SPH_64 && (SPH_SMALL_FOOTPRINT_BLAKE || !SPH_64_TRUE) +#define SPH_COMPACT_BLAKE_64 1 +#endif + +#ifdef _MSC_VER +#pragma warning (disable: 4146) +#endif + + +// Blake-512 + +static const sph_u64 IV512[8] = { + SPH_C64(0x6A09E667F3BCC908), SPH_C64(0xBB67AE8584CAA73B), + SPH_C64(0x3C6EF372FE94F82B), SPH_C64(0xA54FF53A5F1D36F1), + SPH_C64(0x510E527FADE682D1), SPH_C64(0x9B05688C2B3E6C1F), + SPH_C64(0x1F83D9ABFB41BD6B), SPH_C64(0x5BE0CD19137E2179) +}; + + +#if SPH_COMPACT_BLAKE_32 || SPH_COMPACT_BLAKE_64 + +// Blake-256 4 & 8 way, Blake-512 4 way + +static const unsigned sigma[16][16] = { + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, + { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, + { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, + { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, + { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } +}; + +#endif + +#define Z00 0 +#define Z01 1 +#define Z02 2 +#define Z03 3 +#define Z04 4 +#define Z05 5 +#define Z06 6 +#define Z07 7 +#define Z08 8 +#define Z09 9 +#define Z0A A +#define Z0B B +#define Z0C C +#define Z0D D +#define Z0E E +#define Z0F F + +#define Z10 E +#define Z11 A +#define Z12 4 +#define Z13 8 +#define Z14 9 +#define Z15 F +#define Z16 D +#define Z17 6 +#define Z18 1 +#define Z19 C +#define Z1A 0 +#define Z1B 2 +#define Z1C B +#define Z1D 7 +#define Z1E 5 +#define Z1F 3 + +#define Z20 B +#define Z21 8 +#define Z22 C +#define Z23 0 +#define Z24 5 +#define Z25 2 +#define Z26 F +#define Z27 D +#define Z28 A +#define Z29 E +#define Z2A 3 +#define Z2B 6 +#define Z2C 7 +#define Z2D 1 +#define Z2E 9 +#define Z2F 4 + +#define Z30 7 +#define Z31 9 +#define Z32 3 +#define Z33 1 +#define Z34 D +#define Z35 C +#define Z36 B +#define Z37 E +#define Z38 2 +#define Z39 6 +#define Z3A 5 +#define Z3B A +#define Z3C 4 +#define Z3D 0 +#define Z3E F +#define Z3F 8 + +#define Z40 9 +#define Z41 0 +#define Z42 5 +#define Z43 7 +#define Z44 2 +#define Z45 4 +#define Z46 A +#define Z47 F +#define Z48 E +#define Z49 1 +#define Z4A B +#define Z4B C +#define Z4C 6 +#define Z4D 8 +#define Z4E 3 +#define Z4F D + +#define Z50 2 +#define Z51 C +#define Z52 6 +#define Z53 A +#define Z54 0 +#define Z55 B +#define Z56 8 +#define Z57 3 +#define Z58 4 +#define Z59 D +#define Z5A 7 +#define Z5B 5 +#define Z5C F +#define Z5D E +#define Z5E 1 +#define Z5F 9 + +#define Z60 C +#define Z61 5 +#define Z62 1 +#define Z63 F +#define Z64 E +#define Z65 D +#define Z66 4 +#define Z67 A +#define Z68 0 +#define Z69 7 +#define Z6A 6 +#define Z6B 3 +#define Z6C 9 +#define Z6D 2 +#define Z6E 8 +#define Z6F B + +#define Z70 D +#define Z71 B +#define Z72 7 +#define Z73 E +#define Z74 C +#define Z75 1 +#define Z76 3 +#define Z77 9 +#define Z78 5 +#define Z79 0 +#define Z7A F +#define Z7B 4 +#define Z7C 8 +#define Z7D 6 +#define Z7E 2 +#define Z7F A + +#define Z80 6 +#define Z81 F +#define Z82 E +#define Z83 9 +#define Z84 B +#define Z85 3 +#define Z86 0 +#define Z87 8 +#define Z88 C +#define Z89 2 +#define Z8A D +#define Z8B 7 +#define Z8C 1 +#define Z8D 4 +#define Z8E A +#define Z8F 5 + +#define Z90 A +#define Z91 2 +#define Z92 8 +#define Z93 4 +#define Z94 7 +#define Z95 6 +#define Z96 1 +#define Z97 5 +#define Z98 F +#define Z99 B +#define Z9A 9 +#define Z9B E +#define Z9C 3 +#define Z9D C +#define Z9E D +#define Z9F 0 + +#define Mx(r, i) Mx_(Z ## r ## i) +#define Mx_(n) Mx__(n) +#define Mx__(n) M ## n + +// Blake-512 4 way + +#define CBx(r, i) CBx_(Z ## r ## i) +#define CBx_(n) CBx__(n) +#define CBx__(n) CB ## n + +#define CB0 SPH_C64(0x243F6A8885A308D3) +#define CB1 SPH_C64(0x13198A2E03707344) +#define CB2 SPH_C64(0xA4093822299F31D0) +#define CB3 SPH_C64(0x082EFA98EC4E6C89) +#define CB4 SPH_C64(0x452821E638D01377) +#define CB5 SPH_C64(0xBE5466CF34E90C6C) +#define CB6 SPH_C64(0xC0AC29B7C97C50DD) +#define CB7 SPH_C64(0x3F84D5B5B5470917) +#define CB8 SPH_C64(0x9216D5D98979FB1B) +#define CB9 SPH_C64(0xD1310BA698DFB5AC) +#define CBA SPH_C64(0x2FFD72DBD01ADFB7) +#define CBB SPH_C64(0xB8E1AFED6A267E96) +#define CBC SPH_C64(0xBA7C9045F12C7F99) +#define CBD SPH_C64(0x24A19947B3916CF7) +#define CBE SPH_C64(0x0801F2E2858EFC16) +#define CBF SPH_C64(0x636920D871574E69) + +#if SPH_COMPACT_BLAKE_64 +// not used +static const sph_u64 CB[16] = { + SPH_C64(0x243F6A8885A308D3), SPH_C64(0x13198A2E03707344), + SPH_C64(0xA4093822299F31D0), SPH_C64(0x082EFA98EC4E6C89), + SPH_C64(0x452821E638D01377), SPH_C64(0xBE5466CF34E90C6C), + SPH_C64(0xC0AC29B7C97C50DD), SPH_C64(0x3F84D5B5B5470917), + SPH_C64(0x9216D5D98979FB1B), SPH_C64(0xD1310BA698DFB5AC), + SPH_C64(0x2FFD72DBD01ADFB7), SPH_C64(0xB8E1AFED6A267E96), + SPH_C64(0xBA7C9045F12C7F99), SPH_C64(0x24A19947B3916CF7), + SPH_C64(0x0801F2E2858EFC16), SPH_C64(0x636920D871574E69) +}; + +#endif + + +// Blake-512 4 way + +#define GB_4WAY(m0, m1, c0, c1, a, b, c, d) do { \ + a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \ + _mm256_set_epi64x( c1, c1, c1, c1 ), m0 ), b ), a ); \ + d = mm256_ror_64( _mm256_xor_si256( d, a ), 32 ); \ + c = _mm256_add_epi64( c, d ); \ + b = mm256_ror_64( _mm256_xor_si256( b, c ), 25 ); \ + a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \ + _mm256_set_epi64x( c0, c0, c0, c0 ), m1 ), b ), a ); \ + d = mm256_ror_64( _mm256_xor_si256( d, a ), 16 ); \ + c = _mm256_add_epi64( c, d ); \ + b = mm256_ror_64( _mm256_xor_si256( b, c ), 11 ); \ +} while (0) + +#if SPH_COMPACT_BLAKE_64 +// not used +#define ROUND_B_4WAY(r) do { \ + GB_4WAY(M[sigma[r][0x0]], M[sigma[r][0x1]], \ + CB[sigma[r][0x0]], CB[sigma[r][0x1]], V0, V4, V8, VC); \ + GB_4WAY(M[sigma[r][0x2]], M[sigma[r][0x3]], \ + CB[sigma[r][0x2]], CB[sigma[r][0x3]], V1, V5, V9, VD); \ + GB_4WAY(M[sigma[r][0x4]], M[sigma[r][0x5]], \ + CB[sigma[r][0x4]], CB[sigma[r][0x5]], V2, V6, VA, VE); \ + GB_4WAY(M[sigma[r][0x6]], M[sigma[r][0x7]], \ + CB[sigma[r][0x6]], CB[sigma[r][0x7]], V3, V7, VB, VF); \ + GB_4WAY(M[sigma[r][0x8]], M[sigma[r][0x9]], \ + CB[sigma[r][0x8]], CB[sigma[r][0x9]], V0, V5, VA, VF); \ + GB_4WAY(M[sigma[r][0xA]], M[sigma[r][0xB]], \ + CB[sigma[r][0xA]], CB[sigma[r][0xB]], V1, V6, VB, VC); \ + GB_4WAY(M[sigma[r][0xC]], M[sigma[r][0xD]], \ + CB[sigma[r][0xC]], CB[sigma[r][0xD]], V2, V7, V8, VD); \ + GB_4WAY(M[sigma[r][0xE]], M[sigma[r][0xF]], \ + CB[sigma[r][0xE]], CB[sigma[r][0xF]], V3, V4, V9, VE); \ +} while (0) + +#else +//current_impl +#define ROUND_B_4WAY(r) do { \ + GB_4WAY(Mx(r, 0), Mx(r, 1), CBx(r, 0), CBx(r, 1), V0, V4, V8, VC); \ + GB_4WAY(Mx(r, 2), Mx(r, 3), CBx(r, 2), CBx(r, 3), V1, V5, V9, VD); \ + GB_4WAY(Mx(r, 4), Mx(r, 5), CBx(r, 4), CBx(r, 5), V2, V6, VA, VE); \ + GB_4WAY(Mx(r, 6), Mx(r, 7), CBx(r, 6), CBx(r, 7), V3, V7, VB, VF); \ + GB_4WAY(Mx(r, 8), Mx(r, 9), CBx(r, 8), CBx(r, 9), V0, V5, VA, VF); \ + GB_4WAY(Mx(r, A), Mx(r, B), CBx(r, A), CBx(r, B), V1, V6, VB, VC); \ + GB_4WAY(Mx(r, C), Mx(r, D), CBx(r, C), CBx(r, D), V2, V7, V8, VD); \ + GB_4WAY(Mx(r, E), Mx(r, F), CBx(r, E), CBx(r, F), V3, V4, V9, VE); \ + } while (0) + +#endif + + +// Blake-512 4 way + +#define DECL_STATE64_4WAY \ + __m256i H0, H1, H2, H3, H4, H5, H6, H7; \ + __m256i S0, S1, S2, S3; \ + sph_u64 T0, T1; + +#define READ_STATE64_4WAY(state) do { \ + H0 = (state)->H[0]; \ + H1 = (state)->H[1]; \ + H2 = (state)->H[2]; \ + H3 = (state)->H[3]; \ + H4 = (state)->H[4]; \ + H5 = (state)->H[5]; \ + H6 = (state)->H[6]; \ + H7 = (state)->H[7]; \ + S0 = (state)->S[0]; \ + S1 = (state)->S[1]; \ + S2 = (state)->S[2]; \ + S3 = (state)->S[3]; \ + T0 = (state)->T0; \ + T1 = (state)->T1; \ + } while (0) + +#define WRITE_STATE64_4WAY(state) do { \ + (state)->H[0] = H0; \ + (state)->H[1] = H1; \ + (state)->H[2] = H2; \ + (state)->H[3] = H3; \ + (state)->H[4] = H4; \ + (state)->H[5] = H5; \ + (state)->H[6] = H6; \ + (state)->H[7] = H7; \ + (state)->S[0] = S0; \ + (state)->S[1] = S1; \ + (state)->S[2] = S2; \ + (state)->S[3] = S3; \ + (state)->T0 = T0; \ + (state)->T1 = T1; \ + } while (0) + +#if SPH_COMPACT_BLAKE_64 + +// not used +#define COMPRESS64_4WAY do { \ + __m256i M[16]; \ + __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ + __m256i V8, V9, VA, VB, VC, VD, VE, VF; \ + unsigned r; \ + V0 = H0; \ + V1 = H1; \ + V2 = H2; \ + V3 = H3; \ + V4 = H4; \ + V5 = H5; \ + V6 = H6; \ + V7 = H7; \ + V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \ + V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \ + VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \ + VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \ + VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ + _mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \ + VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ + _mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \ + VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ + _mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \ + VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ + _mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \ + M[0x0] = mm256_bswap_64( *(buf+0) ); \ + M[0x1] = mm256_bswap_64( *(buf+1) ); \ + M[0x2] = mm256_bswap_64( *(buf+2) ); \ + M[0x3] = mm256_bswap_64( *(buf+3) ); \ + M[0x4] = mm256_bswap_64( *(buf+4) ); \ + M[0x5] = mm256_bswap_64( *(buf+5) ); \ + M[0x6] = mm256_bswap_64( *(buf+6) ); \ + M[0x7] = mm256_bswap_64( *(buf+7) ); \ + M[0x8] = mm256_bswap_64( *(buf+8) ); \ + M[0x9] = mm256_bswap_64( *(buf+9) ); \ + M[0xA] = mm256_bswap_64( *(buf+10) ); \ + M[0xB] = mm256_bswap_64( *(buf+11) ); \ + M[0xC] = mm256_bswap_64( *(buf+12) ); \ + M[0xD] = mm256_bswap_64( *(buf+13) ); \ + M[0xE] = mm256_bswap_64( *(buf+14) ); \ + M[0xF] = mm256_bswap_64( *(buf+15) ); \ + for (r = 0; r < 16; r ++) \ + ROUND_B_4WAY(r); \ + H0 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S0, V0 ), V8 ), H0 ); \ + H1 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S1, V1 ), V9 ), H1 ); \ + H2 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S2, V2 ), VA ), H2 ); \ + H3 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S3, V3 ), VB ), H3 ); \ + H4 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S0, V4 ), VC ), H4 ); \ + H5 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S1, V5 ), VD ), H5 ); \ + H6 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S2, V6 ), VE ), H6 ); \ + H7 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S3, V7 ), VF ), H7 ); \ + } while (0) + +#else + +//current impl + +#define COMPRESS64_4WAY do { \ + __m256i M0, M1, M2, M3, M4, M5, M6, M7; \ + __m256i M8, M9, MA, MB, MC, MD, ME, MF; \ + __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ + __m256i V8, V9, VA, VB, VC, VD, VE, VF; \ + V0 = H0; \ + V1 = H1; \ + V2 = H2; \ + V3 = H3; \ + V4 = H4; \ + V5 = H5; \ + V6 = H6; \ + V7 = H7; \ + V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \ + V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \ + VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \ + VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \ + VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ + _mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \ + VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ + _mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \ + VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ + _mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \ + VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ + _mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \ + M0 = mm256_bswap_64( *(buf + 0) ); \ + M1 = mm256_bswap_64( *(buf + 1) ); \ + M2 = mm256_bswap_64( *(buf + 2) ); \ + M3 = mm256_bswap_64( *(buf + 3) ); \ + M4 = mm256_bswap_64( *(buf + 4) ); \ + M5 = mm256_bswap_64( *(buf + 5) ); \ + M6 = mm256_bswap_64( *(buf + 6) ); \ + M7 = mm256_bswap_64( *(buf + 7) ); \ + M8 = mm256_bswap_64( *(buf + 8) ); \ + M9 = mm256_bswap_64( *(buf + 9) ); \ + MA = mm256_bswap_64( *(buf + 10) ); \ + MB = mm256_bswap_64( *(buf + 11) ); \ + MC = mm256_bswap_64( *(buf + 12) ); \ + MD = mm256_bswap_64( *(buf + 13) ); \ + ME = mm256_bswap_64( *(buf + 14) ); \ + MF = mm256_bswap_64( *(buf + 15) ); \ + ROUND_B_4WAY(0); \ + ROUND_B_4WAY(1); \ + ROUND_B_4WAY(2); \ + ROUND_B_4WAY(3); \ + ROUND_B_4WAY(4); \ + ROUND_B_4WAY(5); \ + ROUND_B_4WAY(6); \ + ROUND_B_4WAY(7); \ + ROUND_B_4WAY(8); \ + ROUND_B_4WAY(9); \ + ROUND_B_4WAY(0); \ + ROUND_B_4WAY(1); \ + ROUND_B_4WAY(2); \ + ROUND_B_4WAY(3); \ + ROUND_B_4WAY(4); \ + ROUND_B_4WAY(5); \ + H0 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S0, V0 ), V8 ), H0 ); \ + H1 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S1, V1 ), V9 ), H1 ); \ + H2 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S2, V2 ), VA ), H2 ); \ + H3 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S3, V3 ), VB ), H3 ); \ + H4 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S0, V4 ), VC ), H4 ); \ + H5 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S1, V5 ), VD ), H5 ); \ + H6 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S2, V6 ), VE ), H6 ); \ + H7 = _mm256_xor_si256( _mm256_xor_si256( \ + _mm256_xor_si256( S3, V7 ), VF ), H7 ); \ + } while (0) + +#endif + +static const sph_u64 salt_zero_big[4] = { 0, 0, 0, 0 }; + +static void +blake64_4way_init( blake_4way_big_context *sc, const sph_u64 *iv, + const sph_u64 *salt ) +{ + int i; + for ( i = 0; i < 8; i++ ) + sc->H[i] = _mm256_set1_epi64x( iv[i] ); + for ( i = 0; i < 4; i++ ) + sc->S[i] = _mm256_set1_epi64x( salt[i] ); + sc->T0 = sc->T1 = 0; + sc->ptr = 0; +} + +static void +blake64_4way( blake_4way_big_context *sc, const void *data, size_t len) +{ + __m256i *vdata = (__m256i*)data; + __m256i *buf; + size_t ptr; + DECL_STATE64_4WAY + + const int buf_size = 128; // sizeof/8 + + buf = sc->buf; + ptr = sc->ptr; + if ( len < (buf_size - ptr) ) + { + memcpy_256( buf + (ptr>>3), vdata, len>>3 ); + ptr += len; + sc->ptr = ptr; + return; + } + + READ_STATE64_4WAY(sc); + while ( len > 0 ) + { + size_t clen; + + clen = buf_size - ptr; + if ( clen > len ) + clen = len; + memcpy_256( buf + (ptr>>3), vdata, clen>>3 ); + ptr += clen; + vdata = vdata + (clen>>3); + len -= clen; + if (ptr == buf_size ) + { + if ((T0 = SPH_T64(T0 + 1024)) < 1024) + T1 = SPH_T64(T1 + 1); + COMPRESS64_4WAY; + ptr = 0; + } + } + WRITE_STATE64_4WAY(sc); + sc->ptr = ptr; +} + +static void +blake64_4way_close( blake_4way_big_context *sc, + unsigned ub, unsigned n, void *dst, size_t out_size_w64) +{ +// union { + __m256i buf[16]; +// sph_u64 dummy; +// } u; + size_t ptr, k; + unsigned bit_len; + uint64_t z, zz; + sph_u64 th, tl; + __m256i *out; + + ptr = sc->ptr; + bit_len = ((unsigned)ptr << 3); + z = 0x80 >> n; + zz = ((ub & -z) | z) & 0xFF; + buf[ptr>>3] = _mm256_set_epi64x( zz, zz, zz, zz ); + tl = sc->T0 + bit_len; + th = sc->T1; + if (ptr == 0 ) + { + sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL); + sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL); + } + else if ( sc->T0 == 0 ) + { + sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL) + bit_len; + sc->T1 = SPH_T64(sc->T1 - 1); + } + else + { + sc->T0 -= 1024 - bit_len; + } + if ( ptr <= 104 ) + { + memset_zero_256( buf + (ptr>>3) + 1, (104-ptr) >> 3 ); + if ( out_size_w64 == 8 ) + buf[(104>>3)] = _mm256_or_si256( buf[(104>>3)], + _mm256_set1_epi64x( 0x0100000000000000ULL ) ); + *(buf+(112>>3)) = mm256_bswap_64( + _mm256_set_epi64x( th, th, th, th ) ); + *(buf+(120>>3)) = mm256_bswap_64( + _mm256_set_epi64x( tl, tl, tl, tl ) ); + + blake64_4way( sc, buf + (ptr>>3), 128 - ptr ); + } + else + { + memset_zero_256( buf + (ptr>>3) + 1, (120 - ptr) >> 3 ); + + blake64_4way( sc, buf + (ptr>>3), 128 - ptr ); + sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL); + sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL); + memset_zero_256( buf, 112>>3 ); + if ( out_size_w64 == 8 ) + buf[104>>3] = _mm256_set1_epi64x( 0x0100000000000000ULL ); + *(buf+(112>>3)) = mm256_bswap_64( + _mm256_set_epi64x( th, th, th, th ) ); + *(buf+(120>>3)) = mm256_bswap_64( + _mm256_set_epi64x( tl, tl, tl, tl ) ); + + blake64_4way( sc, buf, 128 ); + } + out = (__m256i*)dst; + for ( k = 0; k < out_size_w64; k++ ) + out[k] = mm256_bswap_64( sc->H[k] ); +} + +void +blake512_4way_init(void *cc) +{ + blake64_4way_init(cc, IV512, salt_zero_big); +} + +void +blake512_4way(void *cc, const void *data, size_t len) +{ + blake64_4way(cc, data, len); +} + +void +blake512_4way_close(void *cc, void *dst) +{ + blake512_4way_addbits_and_close(cc, 0, 0, dst); +} + +void +blake512_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst) +{ + blake64_4way_close(cc, ub, n, dst, 8); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/algo/cubehash/cube-hash-2way.c b/algo/cubehash/cube-hash-2way.c index 8d7781d..019e1d4 100644 --- a/algo/cubehash/cube-hash-2way.c +++ b/algo/cubehash/cube-hash-2way.c @@ -7,6 +7,24 @@ // 2x128 +// The result of hashing 10 rounds of initial data which consists of params +// zero padded. +static const uint64_t IV256[] = +{ +0xCCD6F29FEA2BD4B4, 0x35481EAE63117E71, 0xE5D94E6322512D5B, 0xF4CC12BE7E624131, +0x42AF2070C2D0B696, 0x3361DA8CD0720C35, 0x8EF8AD8328CCECA4, 0x40E5FBAB4680AC00, +0x6107FBD5D89041C3, 0xF0B266796C859D41, 0x5FA2560309392549, 0x93CB628565C892FD, +0x9E4B4E602AF2B5AE, 0x85254725774ABFDD, 0x4AB6AAD615815AEB, 0xD6032C0A9CDAF8AF +}; + +static const uint64_t IV512[] = +{ +0x50F494D42AEA2A61, 0x4167D83E2D538B8B, 0xC701CF8C3FEE2313, 0x50AC5695CC39968E, +0xA647A8B34D42C787, 0x825B453797CF0BEF, 0xF22090C4EEF864D2, 0xA23911AED0E5CD33, +0x148FE485FCD398D9, 0xB64445321B017BEF, 0x2FF5781C6A536159, 0x0DBADEA991FA7934, +0xA5A70E75D65C8A2B, 0xBC796576B1C62456, 0xE7989AF11921C8F7, 0xD43E3B447795D246 +}; + static void transform_2way( cube_2way_context *sp ) { int r; @@ -45,10 +63,10 @@ static void transform_2way( cube_2way_context *sp ) x1 = _mm256_xor_si256( x1, x5 ); x2 = _mm256_xor_si256( x2, x6 ); x3 = _mm256_xor_si256( x3, x7 ); - x4 = mm256_swap128_64( x4 ); - x5 = mm256_swap128_64( x5 ); - x6 = mm256_swap128_64( x6 ); - x7 = mm256_swap128_64( x7 ); + x4 = mm256_swap64_128( x4 ); + x5 = mm256_swap64_128( x5 ); + x6 = mm256_swap64_128( x6 ); + x7 = mm256_swap64_128( x7 ); x4 = _mm256_add_epi32( x0, x4 ); x5 = _mm256_add_epi32( x1, x5 ); x6 = _mm256_add_epi32( x2, x6 ); @@ -69,10 +87,10 @@ static void transform_2way( cube_2way_context *sp ) x1 = _mm256_xor_si256( x1, x5 ); x2 = _mm256_xor_si256( x2, x6 ); x3 = _mm256_xor_si256( x3, x7 ); - x4 = mm256_swap64_32( x4 ); - x5 = mm256_swap64_32( x5 ); - x6 = mm256_swap64_32( x6 ); - x7 = mm256_swap64_32( x7 ); + x4 = mm256_swap32_64( x4 ); + x5 = mm256_swap32_64( x5 ); + x6 = mm256_swap32_64( x6 ); + x7 = mm256_swap32_64( x7 ); } _mm256_store_si256( (__m256i*)sp->h, x0 ); @@ -86,36 +104,26 @@ static void transform_2way( cube_2way_context *sp ) } -cube_2way_context cube_2way_ctx_cache __attribute__ ((aligned (64))); - -int cube_2way_reinit( cube_2way_context *sp ) -{ - memcpy( sp, &cube_2way_ctx_cache, sizeof(cube_2way_context) ); - return 0; -} - int cube_2way_init( cube_2way_context *sp, int hashbitlen, int rounds, - int blockbytes ) + int blockbytes ) { - int i; + const uint64_t* iv = hashbitlen == 512 ? IV512 : IV256; + sp->hashlen = hashbitlen/128; + sp->blocksize = blockbytes/16; + sp->rounds = rounds; + sp->pos = 0; - // all sizes of __m128i - cube_2way_ctx_cache.hashlen = hashbitlen/128; - cube_2way_ctx_cache.blocksize = blockbytes/16; - cube_2way_ctx_cache.rounds = rounds; - cube_2way_ctx_cache.pos = 0; + __m256i* h = (__m256i*)sp->h; - for ( i = 0; i < 8; ++i ) - cube_2way_ctx_cache.h[i] = m256_zero; + h[0] = _mm256_set_epi64x( iv[ 1], iv[ 0], iv[ 1], iv[ 0] ); + h[1] = _mm256_set_epi64x( iv[ 3], iv[ 2], iv[ 3], iv[ 2] ); + h[2] = _mm256_set_epi64x( iv[ 5], iv[ 4], iv[ 5], iv[ 4] ); + h[3] = _mm256_set_epi64x( iv[ 7], iv[ 6], iv[ 7], iv[ 6] ); + h[4] = _mm256_set_epi64x( iv[ 9], iv[ 8], iv[ 9], iv[ 8] ); + h[5] = _mm256_set_epi64x( iv[11], iv[10], iv[11], iv[10] ); + h[6] = _mm256_set_epi64x( iv[13], iv[12], iv[13], iv[12] ); + h[7] = _mm256_set_epi64x( iv[15], iv[14], iv[15], iv[14] ); - cube_2way_ctx_cache.h[0] = _mm256_set_epi32( - 0, rounds, blockbytes, hashbitlen / 8, - 0, rounds, blockbytes, hashbitlen / 8 ); - - for ( i = 0; i < 10; ++i ) - transform_2way( &cube_2way_ctx_cache ); - - memcpy( sp, &cube_2way_ctx_cache, sizeof(cube_2way_context) ); return 0; } diff --git a/algo/cubehash/cubehash_sse2.c b/algo/cubehash/cubehash_sse2.c index c4a9e67..fcb3d9e 100644 --- a/algo/cubehash/cubehash_sse2.c +++ b/algo/cubehash/cubehash_sse2.c @@ -14,6 +14,25 @@ #include #include #include "avxdefs.h" +#include + +// The result of hashing 10 rounds of initial data which is params and +// mostly zeros. +static const uint64_t IV256[] = +{ +0xCCD6F29FEA2BD4B4, 0x35481EAE63117E71, 0xE5D94E6322512D5B, 0xF4CC12BE7E624131, +0x42AF2070C2D0B696, 0x3361DA8CD0720C35, 0x8EF8AD8328CCECA4, 0x40E5FBAB4680AC00, +0x6107FBD5D89041C3, 0xF0B266796C859D41, 0x5FA2560309392549, 0x93CB628565C892FD, +0x9E4B4E602AF2B5AE, 0x85254725774ABFDD, 0x4AB6AAD615815AEB, 0xD6032C0A9CDAF8AF +}; + +static const uint64_t IV512[] = +{ +0x50F494D42AEA2A61, 0x4167D83E2D538B8B, 0xC701CF8C3FEE2313, 0x50AC5695CC39968E, +0xA647A8B34D42C787, 0x825B453797CF0BEF, 0xF22090C4EEF864D2, 0xA23911AED0E5CD33, +0x148FE485FCD398D9, 0xB64445321B017BEF, 0x2FF5781C6A536159, 0x0DBADEA991FA7934, +0xA5A70E75D65C8A2B, 0xBC796576B1C62456, 0xE7989AF11921C8F7, 0xD43E3B447795D246 +}; static void transform( cubehashParam *sp ) { @@ -128,48 +147,37 @@ static void transform( cubehashParam *sp ) #endif } // transform -// Cubehash context initializing is very expensive. -// Cache the intial value for faster reinitializing. -cubehashParam cube_ctx_cache __attribute__ ((aligned (64))); - -int cubehashReinit( cubehashParam *sp ) -{ - memcpy( sp, &cube_ctx_cache, sizeof(cubehashParam) ); - return SUCCESS; - -} - -// Initialize the cache then copy to sp. int cubehashInit(cubehashParam *sp, int hashbitlen, int rounds, int blockbytes) { - int i; + const uint64_t* iv = hashbitlen == 512 ? IV512 : IV256; + sp->hashlen = hashbitlen/128; + sp->blocksize = blockbytes/16; + sp->rounds = rounds; + sp->pos = 0; + +#if defined(__AVX2__) - if ( hashbitlen < 8 ) return BAD_HASHBITLEN; - if ( hashbitlen > 512 ) return BAD_HASHBITLEN; - if ( hashbitlen != 8 * (hashbitlen / 8) ) return BAD_HASHBITLEN; + __m256i* x = (__m256i*)sp->x; - /* Sanity checks */ - if ( rounds <= 0 || rounds > 32 ) - rounds = CUBEHASH_ROUNDS; - if ( blockbytes <= 0 || blockbytes >= 256) - blockbytes = CUBEHASH_BLOCKBYTES; + x[0] = _mm256_set_epi64x( iv[ 3], iv[ 2], iv[ 1], iv[ 0] ); + x[1] = _mm256_set_epi64x( iv[ 7], iv[ 6], iv[ 5], iv[ 4] ); + x[2] = _mm256_set_epi64x( iv[11], iv[10], iv[ 9], iv[ 8] ); + x[3] = _mm256_set_epi64x( iv[15], iv[14], iv[13], iv[12] ); - // all sizes of __m128i - cube_ctx_cache.hashlen = hashbitlen/128; - cube_ctx_cache.blocksize = blockbytes/16; - cube_ctx_cache.rounds = rounds; - cube_ctx_cache.pos = 0; +#else - for ( i = 0; i < 8; ++i ) - cube_ctx_cache.x[i] = _mm_setzero_si128();; + __m128i* x = (__m128i*)sp->x; - cube_ctx_cache.x[0] = _mm_set_epi32( 0, rounds, blockbytes, - hashbitlen / 8 ); + x[0] = _mm_set_epi64x( iv[ 1], iv[ 0] ); + x[1] = _mm_set_epi64x( iv[ 3], iv[ 2] ); + x[2] = _mm_set_epi64x( iv[ 5], iv[ 4] ); + x[3] = _mm_set_epi64x( iv[ 7], iv[ 6] ); + x[4] = _mm_set_epi64x( iv[ 9], iv[ 8] ); + x[5] = _mm_set_epi64x( iv[11], iv[10] ); + x[6] = _mm_set_epi64x( iv[13], iv[12] ); + x[7] = _mm_set_epi64x( iv[15], iv[14] ); - for ( i = 0; i < 10; ++i ) - transform( &cube_ctx_cache ); - - memcpy( sp, &cube_ctx_cache, sizeof(cubehashParam) ); +#endif return SUCCESS; } diff --git a/algo/hodl/aes.c b/algo/hodl/aes.c index ff1c7fa..4ea054f 100644 --- a/algo/hodl/aes.c +++ b/algo/hodl/aes.c @@ -3,7 +3,7 @@ #include "wolf-aes.h" #include "miner.h" -#ifndef NO_AES_NI +#if defined(__AES__) static inline void ExpandAESKey256_sub1(__m128i *tmp1, __m128i *tmp2) { @@ -151,7 +151,7 @@ void AES256CBC(__m128i** data, const __m128i** next, __m128i ExpandedKey[][16], } } -#else // NO AVX +#else // NO SSE4.2 static inline __m128i AES256Core(__m128i State, const __m128i *ExpandedKey) { diff --git a/algo/hodl/hodl-gate.c b/algo/hodl/hodl-gate.c index d1d80c4..b9d5e11 100644 --- a/algo/hodl/hodl-gate.c +++ b/algo/hodl/hodl-gate.c @@ -101,39 +101,6 @@ void hodl_build_block_header( struct work* g_work, uint32_t version, g_work->data[31] = 0x00000280; } -// hodl build_extra_header is redundant, hodl can use std_build_extra_header -// and call hodl_build_block_header. -#if 0 -void hodl_build_extraheader( struct work* g_work, struct stratum_ctx *sctx ) -{ - uchar merkle_tree[64] = { 0 }; - size_t t; -// int i; - - algo_gate.gen_merkle_root( merkle_tree, sctx ); - // Increment extranonce2 - for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ ); - - algo_gate.build_block_header( g_work, le32dec( sctx->job.version ), - (uint32_t*) sctx->job.prevhash, (uint32_t*) merkle_tree, - le32dec( sctx->job.ntime ), le32dec( sctx->job.nbits ) ); -/* - // Assemble block header - memset( g_work->data, 0, sizeof(g_work->data) ); - g_work->data[0] = le32dec( sctx->job.version ); - for ( i = 0; i < 8; i++ ) - g_work->data[1 + i] = le32dec( (uint32_t *) sctx->job.prevhash + i ); - for ( i = 0; i < 8; i++ ) - g_work->data[9 + i] = be32dec( (uint32_t *) merkle_root + i ); - - g_work->data[ algo_gate.ntime_index ] = le32dec( sctx->job.ntime ); - g_work->data[ algo_gate.nbits_index ] = le32dec( sctx->job.nbits ); - g_work->data[22] = 0x80000000; - g_work->data[31] = 0x00000280; -*/ -} -#endif - // called only by thread 0, saves a backup of g_work void hodl_get_new_work( struct work* work, struct work* g_work) { @@ -179,7 +146,7 @@ bool hodl_do_this_thread( int thr_id ) int hodl_scanhash( int thr_id, struct work* work, uint32_t max_nonce, uint64_t *hashes_done ) { -#ifndef NO_AES_NI +#if defined(__AES__) GenRandomGarbage( (CacheEntry*)hodl_scratchbuf, work->data, thr_id ); pthread_barrier_wait( &hodl_barrier ); return scanhash_hodl_wolf( thr_id, work, max_nonce, hashes_done ); @@ -189,7 +156,7 @@ int hodl_scanhash( int thr_id, struct work* work, uint32_t max_nonce, bool register_hodl_algo( algo_gate_t* gate ) { -#ifdef NO_AES_NI +#if defined(__AES__) applog( LOG_ERR, "Only CPUs with AES are supported, use legacy version."); return false; #endif @@ -207,7 +174,6 @@ bool register_hodl_algo( algo_gate_t* gate ) gate->build_stratum_request = (void*)&hodl_le_build_stratum_request; gate->malloc_txs_request = (void*)&hodl_malloc_txs_request; gate->build_block_header = (void*)&hodl_build_block_header; -// gate->build_extraheader = (void*)&hodl_build_extraheader; gate->resync_threads = (void*)&hodl_resync_threads; gate->do_this_thread = (void*)&hodl_do_this_thread; gate->work_cmp_size = 76; diff --git a/algo/hodl/hodl-wolf.c b/algo/hodl/hodl-wolf.c index 1ae2442..5747e7e 100644 --- a/algo/hodl/hodl-wolf.c +++ b/algo/hodl/hodl-wolf.c @@ -8,7 +8,7 @@ #include "hodl-wolf.h" #include "miner.h" -#ifndef NO_AES_NI +#if defined(__AES__) void GenerateGarbageCore( CacheEntry *Garbage, int ThreadID, int ThreadCount, void *MidHash ) @@ -139,7 +139,7 @@ int scanhash_hodl_wolf( int threadNumber, struct work* work, uint32_t max_nonce, return(0); -#else // no AVX +#else // no SSE4.2 uint32_t *pdata = work->data; uint32_t *ptarget = work->target; @@ -160,7 +160,6 @@ int scanhash_hodl_wolf( int threadNumber, struct work* work, uint32_t max_nonce, { // copy data to first l2 cache memcpy(Cache.dwords, Garbage + k, GARBAGE_SLICE_SIZE); -#ifndef NO_AES_NI for(int j = 0; j < AES_ITERATIONS; j++) { CacheEntry TmpXOR; @@ -184,7 +183,6 @@ int scanhash_hodl_wolf( int threadNumber, struct work* work, uint32_t max_nonce, AES256CBC( Cache.dqwords, TmpXOR.dqwords, ExpKey, TmpXOR.dqwords[ (GARBAGE_SLICE_SIZE / sizeof(__m128i)) - 1 ], 256 ); } -#endif // use last X bits as solution if( ( Cache.dwords[ (GARBAGE_SLICE_SIZE >> 2) - 1 ] & (COMPARE_SIZE - 1) ) < 1000 ) @@ -206,7 +204,7 @@ int scanhash_hodl_wolf( int threadNumber, struct work* work, uint32_t max_nonce, *hashes_done = CollisionCount; return(0); -#endif +#endif // SSE4.2 else } @@ -218,5 +216,5 @@ void GenRandomGarbage(CacheEntry *Garbage, uint32_t *pdata, int thr_id) GenerateGarbageCore(Garbage, thr_id, opt_n_threads, MidHash); } -#endif +#endif // AES diff --git a/algo/hodl/sha512-avx.h b/algo/hodl/sha512-avx.h index e9888e0..db7cca5 100644 --- a/algo/hodl/sha512-avx.h +++ b/algo/hodl/sha512-avx.h @@ -22,16 +22,20 @@ typedef struct #ifdef __AVX2__ __m256i h[8]; __m256i w[80]; -#else // AVX +#elif defined(__SSE4_2__) __m128i h[8]; __m128i w[80]; +#else + int dummy; #endif } Sha512Context; #ifdef __AVX2__ #define SHA512_PARALLEL_N 8 -#else // AVX +#elif defined(__SSE$_2__) #define SHA512_PARALLEL_N 4 +#else +#define SHA512_PARALLEL_N 1 // dummy value #endif //SHA-512 related functions diff --git a/algo/hodl/sha512_avx.c b/algo/hodl/sha512_avx.c index 7993f2e..b0ffc5d 100644 --- a/algo/hodl/sha512_avx.c +++ b/algo/hodl/sha512_avx.c @@ -1,4 +1,5 @@ #ifndef __AVX2__ + #ifdef __SSE4_2__ //#ifdef __AVX__ diff --git a/algo/luffa/luffa_for_sse2.c b/algo/luffa/luffa_for_sse2.c index f1b8214..36acc07 100644 --- a/algo/luffa/luffa_for_sse2.c +++ b/algo/luffa/luffa_for_sse2.c @@ -30,6 +30,19 @@ a1 = _mm_or_si128( _mm_srli_si128(a1,4), _mm_slli_si128(b,12) ); \ } while(0) +/* +static inline __m256i mult2_avx2( a ) +{ + __m128 a0, a0, b; + a0 = mm128_extractlo_256( a ); + a1 = mm128_extracthi_256( a ); + b = _mm_xor_si128( a0, _mm_shuffle_epi32( _mm_and_si128(a1,MASK), 16 ) ); + a0 = _mm_or_si128( _mm_srli_si128(b,4), _mm_slli_si128(a1,12) ); + a1 = _mm_or_si128( _mm_srli_si128(a1,4), _mm_slli_si128(b,12) ); + return mm256_concat_128( a1, a0 ); +} +*/ + #define STEP_PART(x,c,t)\ SUBCRUMB(*x,*(x+1),*(x+2),*(x+3),*t);\ SUBCRUMB(*(x+5),*(x+6),*(x+7),*(x+4),*t);\ diff --git a/algo/lyra2/allium-4way.c b/algo/lyra2/allium-4way.c index e83d27b..18d4655 100644 --- a/algo/lyra2/allium-4way.c +++ b/algo/lyra2/allium-4way.c @@ -55,11 +55,11 @@ void allium_4way_hash( void *state, const void *input ) LYRA2RE( hash3, 32, hash3, 32, hash3, 32, 1, 8, 8 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*)hash0, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash1, (const byte*)hash1, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash2, (const byte*)hash2, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*)hash3, 32 ); LYRA2RE( hash0, 32, hash0, 32, hash0, 32, 1, 8, 8 ); diff --git a/algo/lyra2/lyra2-gate.c b/algo/lyra2/lyra2-gate.c index 85bb851..4e23ba3 100644 --- a/algo/lyra2/lyra2-gate.c +++ b/algo/lyra2/lyra2-gate.c @@ -27,7 +27,7 @@ bool register_lyra2rev3_algo( algo_gate_t* gate ) gate->scanhash = (void*)&scanhash_lyra2rev3; gate->hash = (void*)&lyra2rev3_hash; #endif - gate->optimizations = SSE2_OPT | AES_OPT | SSE42_OPT | AVX2_OPT; + gate->optimizations = SSE2_OPT | SSE42_OPT | AVX2_OPT; gate->miner_thread_init = (void*)&lyra2rev3_thread_init; gate->set_target = (void*)&alt_set_target; return true; diff --git a/algo/lyra2/lyra2-gate.h b/algo/lyra2/lyra2-gate.h index be020e2..5e91742 100644 --- a/algo/lyra2/lyra2-gate.h +++ b/algo/lyra2/lyra2-gate.h @@ -17,14 +17,14 @@ bool register_lyra2rev3_algo( algo_gate_t* gate ); void lyra2rev3_4way_hash( void *state, const void *input ); int scanhash_lyra2rev3_4way( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ); + uint64_t *hashes_done, struct thr_info *mythr ); bool init_lyra2rev3_4way_ctx(); #else void lyra2rev3_hash( void *state, const void *input ); int scanhash_lyra2rev3( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ); + uint64_t *hashes_done, struct thr_info *mythr ); bool init_lyra2rev3_ctx(); #endif diff --git a/algo/lyra2/lyra2re.c b/algo/lyra2/lyra2re.c index a64781f..5d3a475 100644 --- a/algo/lyra2/lyra2re.c +++ b/algo/lyra2/lyra2re.c @@ -7,8 +7,7 @@ #include "lyra2.h" #include "algo-gate-api.h" #include "avxdefs.h" - -#ifndef NO_AES_NI +#if defined(__AES__) #include "algo/groestl/aes_ni/hash-groestl256.h" #endif @@ -18,10 +17,10 @@ typedef struct { sph_blake256_context blake; sph_keccak256_context keccak; sph_skein256_context skein; -#ifdef NO_AES_NI - sph_groestl256_context groestl; -#else +#if defined(__AES__) hashState_groestl256 groestl; +#else + sph_groestl256_context groestl; #endif } lyra2re_ctx_holder; @@ -33,10 +32,10 @@ void init_lyra2re_ctx() sph_blake256_init(&lyra2re_ctx.blake); sph_keccak256_init(&lyra2re_ctx.keccak); sph_skein256_init(&lyra2re_ctx.skein); -#ifdef NO_AES_NI - sph_groestl256_init(&lyra2re_ctx.groestl); -#else +#if defined(__AES__) init_groestl256( &lyra2re_ctx.groestl, 32 ); +#else + sph_groestl256_init(&lyra2re_ctx.groestl); #endif } @@ -72,11 +71,11 @@ void lyra2re_hash(void *state, const void *input) sph_skein256(&ctx.skein, hashA, 32); sph_skein256_close(&ctx.skein, hashB); -#ifdef NO_AES_NI +#if defined(__AES__) + update_and_final_groestl256( &ctx.groestl, hashA, hashB, 256 ); +#else sph_groestl256( &ctx.groestl, hashB, 32 ); sph_groestl256_close( &ctx.groestl, hashA ); -#else - update_and_final_groestl256( &ctx.groestl, hashA, hashB, 256 ); #endif memcpy(state, hashA, 32); diff --git a/algo/lyra2/lyra2rev2-4way.c b/algo/lyra2/lyra2rev2-4way.c index 456c42f..e814c2e 100644 --- a/algo/lyra2/lyra2rev2-4way.c +++ b/algo/lyra2/lyra2rev2-4way.c @@ -48,11 +48,11 @@ void lyra2rev2_4way_hash( void *state, const void *input ) mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash1, (const byte*) hash1, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash2, (const byte*) hash2, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash3, (const byte*) hash3, 32 ); LYRA2REV2( l2v2_wholeMatrix, hash0, 32, hash0, 32, hash0, 32, 1, 4, 4 ); @@ -65,13 +65,13 @@ void lyra2rev2_4way_hash( void *state, const void *input ) skein256_4way_close( &ctx.skein, vhash64 ); mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash64, 256 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash1, (const byte*) hash1, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash2, (const byte*) hash2, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash3, (const byte*) hash3, 32 ); mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 256 ); diff --git a/algo/lyra2/lyra2rev3-4way.c b/algo/lyra2/lyra2rev3-4way.c index acf2e5f..540439e 100644 --- a/algo/lyra2/lyra2rev3-4way.c +++ b/algo/lyra2/lyra2rev3-4way.c @@ -43,11 +43,11 @@ void lyra2rev3_4way_hash( void *state, const void *input ) LYRA2REV3( l2v3_wholeMatrix, hash3, 32, hash3, 32, hash3, 32, 1, 4, 4 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*) hash0, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash1, (const byte*) hash1, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash2, (const byte*) hash2, 32 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 256, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash3, (const byte*) hash3, 32 ); LYRA2REV3( l2v3_wholeMatrix, hash0, 32, hash0, 32, hash0, 32, 1, 4, 4 ); @@ -57,54 +57,67 @@ void lyra2rev3_4way_hash( void *state, const void *input ) mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 256 ); bmw256_4way( &ctx.bmw, vhash, 32 ); - bmw256_4way_close( &ctx.bmw, vhash ); + bmw256_4way_close( &ctx.bmw, state ); - mm128_deinterleave_4x32( state, state+32, state+64, state+96, vhash, 256 ); } int scanhash_lyra2rev3_4way( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ) + uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t hash[8*4] __attribute__ ((aligned (64))); uint32_t vdata[20*4] __attribute__ ((aligned (64))); uint32_t edata[20] __attribute__ ((aligned (64))); + uint32_t *hash7 = &(hash[7<<2]); + uint32_t lane_hash[8]; uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; uint32_t n = first_nonce; const uint32_t Htarg = ptarget[7]; - uint32_t *nonces = work->nonces; int num_found = 0; - uint32_t *noncep = vdata + 76; // 19*4 - + __m128i *noncev = (__m128i*)vdata + 19; // aligned + /* int */ thr_id = mythr->id; // thr_id arg is deprecated + if ( opt_benchmark ) ( (uint32_t*)ptarget )[7] = 0x0000ff; - swab32_array( edata, pdata, 20 ); + // Need big endian data + casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) ); + casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) ); + casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) ); + casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) ); + casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); + mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 ); - do { - be32enc( noncep, n ); - be32enc( noncep+1, n+1 ); - be32enc( noncep+2, n+2 ); - be32enc( noncep+3, n+3 ); + do + { + *noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) ); lyra2rev3_4way_hash( hash, vdata ); pdata[19] = n; - for ( int i = 0; i < 4; i++ ) - if ( (hash+(i<<3))[7] <= Htarg && fulltest( hash+(i<<3), ptarget ) ) + for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg ) { - pdata[19] = n+i; - nonces[ num_found++ ] = n+i; - work_set_target_ratio( work, hash+(i<<3) ); + mm128_extract_lane_4x32( lane_hash, hash, lane, 256 ); + + if ( fulltest( lane_hash, ptarget ) ) + { + pdata[19] = n + lane; + work_set_target_ratio( work, lane_hash ); + if ( submit_work( mythr, work ) ) + applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.", + accepted_share_count + rejected_share_count + 1, + thr_id, lane ); + else + applog( LOG_WARNING, "Failed to submit share." ); + } } n += 4; - } while ( (num_found == 0) && (n < max_nonce-4) - && !work_restart[thr_id].restart); + } while ( (n < max_nonce-4) && !work_restart[thr_id].restart); *hashes_done = n - first_nonce + 1; - return num_found; + return 0; } #endif diff --git a/algo/lyra2/lyra2rev3.c b/algo/lyra2/lyra2rev3.c index 2da382e..3791f0e 100644 --- a/algo/lyra2/lyra2rev3.c +++ b/algo/lyra2/lyra2rev3.c @@ -8,7 +8,6 @@ typedef struct { cubehashParam cube; -// cubehashParam cube2; sph_blake256_context blake; sph_bmw256_context bmw; @@ -20,7 +19,6 @@ static __thread sph_blake256_context l2v3_blake_mid; bool init_lyra2rev3_ctx() { cubehashInit( &lyra2v3_ctx.cube, 256, 16, 32 ); -// cubehashInit( &lyra2v3_ctx.cube2, 256, 16, 32 ); sph_blake256_init( &lyra2v3_ctx.blake ); sph_bmw256_init( &lyra2v3_ctx.bmw ); return true; @@ -59,44 +57,51 @@ void lyra2rev3_hash( void *state, const void *input ) memcpy( state, hash, 32 ); } -int scanhash_lyra2rev3(int thr_id, struct work *work, - uint32_t max_nonce, uint64_t *hashes_done) +int scanhash_lyra2rev3( int thr_id, struct work *work, + uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr ) { - uint32_t *pdata = work->data; - uint32_t *ptarget = work->target; - uint32_t endiandata[20] __attribute__ ((aligned (64))); - uint32_t hash[8] __attribute__((aligned(64))); - const uint32_t first_nonce = pdata[19]; - uint32_t nonce = first_nonce; - const uint32_t Htarg = ptarget[7]; + uint32_t *pdata = work->data; + uint32_t *ptarget = work->target; + uint32_t endiandata[20] __attribute__ ((aligned (64))); + uint32_t hash[8] __attribute__((aligned(64))); + const uint32_t first_nonce = pdata[19]; + uint32_t nonce = first_nonce; + const uint32_t Htarg = ptarget[7]; + /* int */ thr_id = mythr->id; // thr_id arg is deprecated - if (opt_benchmark) - ((uint32_t*)ptarget)[7] = 0x0000ff; + if (opt_benchmark) + ((uint32_t*)ptarget)[7] = 0x0000ff; - swab32_array( endiandata, pdata, 20 ); + // need big endian data + casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) ); + casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) ); + casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) ); + casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) ); + casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); - l2v3_blake256_midstate( endiandata ); + l2v3_blake256_midstate( endiandata ); - do { - be32enc(&endiandata[19], nonce); - lyra2rev3_hash(hash, endiandata); + do + { + be32enc(&endiandata[19], nonce); + lyra2rev3_hash(hash, endiandata); - if (hash[7] <= Htarg ) - { - if( fulltest(hash, ptarget) ) - { - pdata[19] = nonce; - work_set_target_ratio( work, hash ); - *hashes_done = pdata[19] - first_nonce; - return 1; - } - } - nonce++; + if (hash[7] <= Htarg ) + { + if( fulltest(hash, ptarget) ) + { + pdata[19] = nonce; + work_set_target_ratio( work, hash ); + *hashes_done = pdata[19] - first_nonce; + return 1; + } + } + nonce++; - } while (nonce < max_nonce && !work_restart[thr_id].restart); + } while (nonce < max_nonce && !work_restart[thr_id].restart); - pdata[19] = nonce; - *hashes_done = pdata[19] - first_nonce + 1; - return 0; + pdata[19] = nonce; + *hashes_done = pdata[19] - first_nonce + 1; + return 0; } diff --git a/algo/lyra2/sponge.h b/algo/lyra2/sponge.h index a391e66..6c4104f 100644 --- a/algo/lyra2/sponge.h +++ b/algo/lyra2/sponge.h @@ -91,7 +91,7 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){ LYRA_ROUND_AVX2( s0, s1, s2, s3 ) \ LYRA_ROUND_AVX2( s0, s1, s2, s3 ) \ -#elif defined(__SSE4_2__) +#elif defined(__SSE2__) // process 2 columns in parallel // returns void, all args updated @@ -108,14 +108,14 @@ static inline uint64_t rotr64( const uint64_t w, const unsigned c ){ #define LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \ G_2X64( s0, s2, s4, s6 ); \ G_2X64( s1, s3, s5, s7 ); \ - mm128_ror256_1x64( s2, s3 ); \ - mm128_swap256_128( s4, s5 ); \ - mm128_rol256_1x64( s6, s7 ); \ + mm128_rol1x64_256( s2, s3 ); \ + mm128_swap128_256( s4, s5 ); \ + mm128_rol1x64_256( s6, s7 ); \ G_2X64( s0, s2, s4, s6 ); \ G_2X64( s1, s3, s5, s7 ); \ - mm128_rol256_1x64( s2, s3 ); \ - mm128_swap256_128( s4, s5 ); \ - mm128_ror256_1x64( s6, s7 ); + mm128_rol1x64_256( s2, s3 ); \ + mm128_swap128_256( s4, s5 ); \ + mm128_ror1x64_256( s6, s7 ); #define LYRA_12_ROUNDS_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \ LYRA_ROUND_AVX(s0,s1,s2,s3,s4,s5,s6,s7) \ diff --git a/algo/m7m.c b/algo/m7m.c index f5077b4..2398fe7 100644 --- a/algo/m7m.c +++ b/algo/m7m.c @@ -7,7 +7,6 @@ #include #include #include -#include "algo/sha/sph_sha2.h" #include "algo/keccak/sph_keccak.h" #include "algo/haval/sph-haval.h" #include "algo/tiger/sph_tiger.h" @@ -117,13 +116,8 @@ uint32_t sw2_(int nnounce) } typedef struct { -#ifndef USE_SPH_SHA SHA256_CTX sha256; SHA512_CTX sha512; -#else - sph_sha256_context sha256; - sph_sha512_context sha512; -#endif sph_keccak512_context keccak; sph_whirlpool_context whirlpool; sph_haval256_5_context haval; @@ -135,13 +129,8 @@ m7m_ctx_holder m7m_ctx; void init_m7m_ctx() { -#ifndef USE_SPH_SHA SHA256_Init( &m7m_ctx.sha256 ); SHA512_Init( &m7m_ctx.sha512 ); -#else - sph_sha256_init( &m7m_ctx.sha256 ); - sph_sha512_init( &m7m_ctx.sha512 ); -#endif sph_keccak512_init( &m7m_ctx.keccak ); sph_whirlpool_init( &m7m_ctx.whirlpool ); sph_haval256_5_init( &m7m_ctx.haval ); @@ -176,28 +165,18 @@ int scanhash_m7m_hash( int thr_id, struct work* work, m7m_ctx_holder ctx1, ctx2 __attribute__ ((aligned (64))); memcpy( &ctx1, &m7m_ctx, sizeof(m7m_ctx) ); -#ifndef USE_SPH_SHA SHA256_CTX ctxf_sha256; -#else - sph_sha256_context ctxf_sha256; -#endif memcpy(data, pdata, 80); -#ifndef USE_SPH_SHA SHA256_Update( &ctx1.sha256, data, M7_MIDSTATE_LEN ); SHA512_Update( &ctx1.sha512, data, M7_MIDSTATE_LEN ); -#else - sph_sha256( &ctx1.sha256, data, M7_MIDSTATE_LEN ); - sph_sha512( &ctx1.sha512, data, M7_MIDSTATE_LEN ); -#endif sph_keccak512( &ctx1.keccak, data, M7_MIDSTATE_LEN ); sph_whirlpool( &ctx1.whirlpool, data, M7_MIDSTATE_LEN ); sph_haval256_5( &ctx1.haval, data, M7_MIDSTATE_LEN ); sph_tiger( &ctx1.tiger, data, M7_MIDSTATE_LEN ); sph_ripemd160( &ctx1.ripemd, data, M7_MIDSTATE_LEN ); -// the following calculations can be performed once and the results shared mpz_t magipi, magisw, product, bns0, bns1; mpf_t magifpi, magifpi0, mpt1, mpt2, mptmp, mpten; @@ -222,22 +201,11 @@ int scanhash_m7m_hash( int thr_id, struct work* work, memcpy( &ctx2, &ctx1, sizeof(m7m_ctx) ); -// with 4 way can a single midstate be shared among lanes? -// do sinlge round of midstate and inyerleave for final - -#ifndef USE_SPH_SHA SHA256_Update( &ctx2.sha256, data_p64, 80 - M7_MIDSTATE_LEN ); SHA256_Final( (unsigned char*) (bhash[0]), &ctx2.sha256 ); SHA512_Update( &ctx2.sha512, data_p64, 80 - M7_MIDSTATE_LEN ); SHA512_Final( (unsigned char*) (bhash[1]), &ctx2.sha512 ); -#else - sph_sha256( &ctx2.sha256, data_p64, 80 - M7_MIDSTATE_LEN ); - sph_sha256_close( &ctx2.sha256, (void*)(bhash[0]) ); - - sph_sha512( &ctx2.sha512, data_p64, 80 - M7_MIDSTATE_LEN ); - sph_sha512_close( &ctx2.sha512, (void*)(bhash[1]) ); -#endif sph_keccak512( &ctx2.keccak, data_p64, 80 - M7_MIDSTATE_LEN ); sph_keccak512_close( &ctx2.keccak, (void*)(bhash[2]) ); @@ -253,7 +221,6 @@ int scanhash_m7m_hash( int thr_id, struct work* work, sph_ripemd160( &ctx2.ripemd, data_p64, 80 - M7_MIDSTATE_LEN ); sph_ripemd160_close( &ctx2.ripemd, (void*)(bhash[6]) ); -// 4 way serial mpz_import(bns0, a, -1, p, -1, 0, bhash[0]); mpz_set(bns1, bns0); mpz_set(product, bns0); @@ -269,17 +236,10 @@ int scanhash_m7m_hash( int thr_id, struct work* work, bytes = mpz_sizeinbase(product, 256); mpz_export((void *)bdata, NULL, -1, 1, 0, 0, product); -#ifndef USE_SPH_SHA SHA256_Init( &ctxf_sha256 ); SHA256_Update( &ctxf_sha256, bdata, bytes ); SHA256_Final( (unsigned char*) hash, &ctxf_sha256 ); -#else - sph_sha256_init( &ctxf_sha256 ); - sph_sha256( &ctxf_sha256, bdata, bytes ); - sph_sha256_close( &ctxf_sha256, (void*)(hash) ); -#endif -// do once and share digits=(int)((sqrt((double)(n/2))*(1.+EPS))/9000+75); mp_bitcnt_t prec = (long int)(digits*BITS_PER_DIGIT+16); mpf_set_prec_raw(magifpi, prec); @@ -302,7 +262,6 @@ int scanhash_m7m_hash( int thr_id, struct work* work, mpz_set_f(magipi, magifpi); mpz_add(magipi,magipi,magisw); mpz_add(product,product,magipi); -// share magipi, product and do serial mpz_import(bns0, b, -1, p, -1, 0, (void*)(hash)); mpz_add(bns1, bns1, bns0); mpz_mul(product,product,bns1); @@ -312,18 +271,11 @@ int scanhash_m7m_hash( int thr_id, struct work* work, mpzscale=bytes; mpz_export(bdata, NULL, -1, 1, 0, 0, product); -#ifndef USE_SPH_SHA SHA256_Init( &ctxf_sha256 ); SHA256_Update( &ctxf_sha256, bdata, bytes ); SHA256_Final( (unsigned char*) hash, &ctxf_sha256 ); -#else - sph_sha256_init( &ctxf_sha256 ); - sph_sha256( &ctxf_sha256, bdata, bytes ); - sph_sha256_close( &ctxf_sha256, (void*)(hash) ); -#endif } -// this is the scanhash part const unsigned char *hash_ = (const unsigned char *)hash; const unsigned char *target_ = (const unsigned char *)ptarget; for ( i = 31; i >= 0; i-- ) @@ -354,7 +306,6 @@ int scanhash_m7m_hash( int thr_id, struct work* work, pdata[19] = n; -// do this in hashm7m out: mpf_set_prec_raw(magifpi, prec0); mpf_set_prec_raw(magifpi0, prec0); diff --git a/algo/panama/sph_panama.c b/algo/panama/sph_panama.c new file mode 100644 index 0000000..f3c27c7 --- /dev/null +++ b/algo/panama/sph_panama.c @@ -0,0 +1,334 @@ +/* $Id: panama.c 216 2010-06-08 09:46:57Z tp $ */ +/* + * PANAMA implementation. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2007-2010 Projet RNRT SAPHIR + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + +#include +#include + +#include "sph_panama.h" + +#define LVAR17(b) sph_u32 \ + b ## 0, b ## 1, b ## 2, b ## 3, b ## 4, b ## 5, \ + b ## 6, b ## 7, b ## 8, b ## 9, b ## 10, b ## 11, \ + b ## 12, b ## 13, b ## 14, b ## 15, b ## 16; + +#define LVARS \ + LVAR17(a) \ + LVAR17(g) \ + LVAR17(p) \ + LVAR17(t) + +#define M17(macro) do { \ + macro( 0, 1, 2, 4); \ + macro( 1, 2, 3, 5); \ + macro( 2, 3, 4, 6); \ + macro( 3, 4, 5, 7); \ + macro( 4, 5, 6, 8); \ + macro( 5, 6, 7, 9); \ + macro( 6, 7, 8, 10); \ + macro( 7, 8, 9, 11); \ + macro( 8, 9, 10, 12); \ + macro( 9, 10, 11, 13); \ + macro(10, 11, 12, 14); \ + macro(11, 12, 13, 15); \ + macro(12, 13, 14, 16); \ + macro(13, 14, 15, 0); \ + macro(14, 15, 16, 1); \ + macro(15, 16, 0, 2); \ + macro(16, 0, 1, 3); \ + } while (0) + +#define BUPDATE1(n0, n2) do { \ + sc->buffer[ptr24][n0] ^= sc->buffer[ptr31][n2]; \ + sc->buffer[ptr31][n2] ^= INW1(n2); \ + } while (0) + +#define BUPDATE do { \ + BUPDATE1(0, 2); \ + BUPDATE1(1, 3); \ + BUPDATE1(2, 4); \ + BUPDATE1(3, 5); \ + BUPDATE1(4, 6); \ + BUPDATE1(5, 7); \ + BUPDATE1(6, 0); \ + BUPDATE1(7, 1); \ + } while (0) + +#define RSTATE(n0, n1, n2, n4) (a ## n0 = sc->state[n0]) + +#define WSTATE(n0, n1, n2, n4) (sc->state[n0] = a ## n0) + +#define GAMMA(n0, n1, n2, n4) \ + (g ## n0 = a ## n0 ^ (a ## n1 | SPH_T32(~a ## n2))) + +#define PI_ALL do { \ + p0 = g0; \ + p1 = SPH_ROTL32( g7, 1); \ + p2 = SPH_ROTL32(g14, 3); \ + p3 = SPH_ROTL32( g4, 6); \ + p4 = SPH_ROTL32(g11, 10); \ + p5 = SPH_ROTL32( g1, 15); \ + p6 = SPH_ROTL32( g8, 21); \ + p7 = SPH_ROTL32(g15, 28); \ + p8 = SPH_ROTL32( g5, 4); \ + p9 = SPH_ROTL32(g12, 13); \ + p10 = SPH_ROTL32( g2, 23); \ + p11 = SPH_ROTL32( g9, 2); \ + p12 = SPH_ROTL32(g16, 14); \ + p13 = SPH_ROTL32( g6, 27); \ + p14 = SPH_ROTL32(g13, 9); \ + p15 = SPH_ROTL32( g3, 24); \ + p16 = SPH_ROTL32(g10, 8); \ + } while (0) + +#define THETA(n0, n1, n2, n4) \ + (t ## n0 = p ## n0 ^ p ## n1 ^ p ## n4) + +#define SIGMA_ALL do { \ + a0 = t0 ^ 1; \ + a1 = t1 ^ INW2(0); \ + a2 = t2 ^ INW2(1); \ + a3 = t3 ^ INW2(2); \ + a4 = t4 ^ INW2(3); \ + a5 = t5 ^ INW2(4); \ + a6 = t6 ^ INW2(5); \ + a7 = t7 ^ INW2(6); \ + a8 = t8 ^ INW2(7); \ + a9 = t9 ^ sc->buffer[ptr16][0]; \ + a10 = t10 ^ sc->buffer[ptr16][1]; \ + a11 = t11 ^ sc->buffer[ptr16][2]; \ + a12 = t12 ^ sc->buffer[ptr16][3]; \ + a13 = t13 ^ sc->buffer[ptr16][4]; \ + a14 = t14 ^ sc->buffer[ptr16][5]; \ + a15 = t15 ^ sc->buffer[ptr16][6]; \ + a16 = t16 ^ sc->buffer[ptr16][7]; \ + } while (0) + +#define PANAMA_STEP do { \ + unsigned ptr16, ptr24, ptr31; \ + \ + ptr24 = (ptr0 - 8) & 31; \ + ptr31 = (ptr0 - 1) & 31; \ + BUPDATE; \ + M17(GAMMA); \ + PI_ALL; \ + M17(THETA); \ + ptr16 = ptr0 ^ 16; \ + SIGMA_ALL; \ + ptr0 = ptr31; \ + } while (0) + +/* + * These macros are used to compute + */ +#define INC0 1 +#define INC1 2 +#define INC2 3 +#define INC3 4 +#define INC4 5 +#define INC5 6 +#define INC6 7 +#define INC7 8 + +/* + * Push data by blocks of 32 bytes. "pbuf" must be 32-bit aligned. Each + * iteration processes 32 data bytes; "num" contains the number of + * iterations. + */ +static void +panama_push(sph_panama_context *sc, const unsigned char *pbuf, size_t num) +{ + LVARS + unsigned ptr0; +#if SPH_LITTLE_FAST +#define INW1(i) sph_dec32le_aligned(pbuf + 4 * (i)) +#else + sph_u32 X_var[8]; +#define INW1(i) X_var[i] +#endif +#define INW2(i) INW1(i) + + M17(RSTATE); + ptr0 = sc->buffer_ptr; + while (num -- > 0) { +#if !SPH_LITTLE_FAST + int i; + + for (i = 0; i < 8; i ++) + X_var[i] = sph_dec32le_aligned(pbuf + 4 * (i)); +#endif + PANAMA_STEP; + pbuf = (const unsigned char *)pbuf + 32; + } + M17(WSTATE); + sc->buffer_ptr = ptr0; + +#undef INW1 +#undef INW2 +} + +/* + * Perform the "pull" operation repeatedly ("num" times). The hash output + * will be extracted from the state afterwards. + */ +static void +panama_pull(sph_panama_context *sc, unsigned num) +{ + LVARS + unsigned ptr0; +#define INW1(i) INW_H1(INC ## i) +#define INW_H1(i) INW_H2(i) +#define INW_H2(i) a ## i +#define INW2(i) sc->buffer[ptr4][i] + + M17(RSTATE); + ptr0 = sc->buffer_ptr; + while (num -- > 0) { + unsigned ptr4; + + ptr4 = (ptr0 + 4) & 31; + PANAMA_STEP; + } + M17(WSTATE); + +#undef INW1 +#undef INW_H1 +#undef INW_H2 +#undef INW2 +} + +/* see sph_panama.h */ +void +sph_panama_init(void *cc) +{ + sph_panama_context *sc; + + sc = cc; + /* + * This is not completely conformant, but "it will work + * everywhere". Initial state consists of zeroes everywhere. + * Conceptually, the sph_u32 type may have padding bits which + * must not be set to 0; but such an architecture remains to + * be seen. + */ + sc->data_ptr = 0; + memset(sc->buffer, 0, sizeof sc->buffer); + sc->buffer_ptr = 0; + memset(sc->state, 0, sizeof sc->state); +} + +#ifdef SPH_UPTR +static void +panama_short(void *cc, const void *data, size_t len) +#else +void +sph_panama(void *cc, const void *data, size_t len) +#endif +{ + sph_panama_context *sc; + unsigned current; + + sc = cc; + current = sc->data_ptr; + while (len > 0) { + unsigned clen; + + clen = (sizeof sc->data) - current; + if (clen > len) + clen = len; + memcpy(sc->data + current, data, clen); + data = (const unsigned char *)data + clen; + len -= clen; + current += clen; + if (current == sizeof sc->data) { + current = 0; + panama_push(sc, sc->data, 1); + } + } + sc->data_ptr = current; +} + +#ifdef SPH_UPTR +/* see sph_panama.h */ +void +sph_panama(void *cc, const void *data, size_t len) +{ + sph_panama_context *sc; + unsigned current; + size_t rlen; + + if (len < (2 * sizeof sc->data)) { + panama_short(cc, data, len); + return; + } + sc = cc; + current = sc->data_ptr; + if (current > 0) { + unsigned t; + + t = (sizeof sc->data) - current; + panama_short(sc, data, t); + data = (const unsigned char *)data + t; + len -= t; + } +#if !SPH_UNALIGNED + if (((SPH_UPTR)data & 3) != 0) { + panama_short(sc, data, len); + return; + } +#endif + panama_push(sc, data, len >> 5); + rlen = len & 31; + if (rlen > 0) + memcpy(sc->data, + (const unsigned char *)data + len - rlen, rlen); + sc->data_ptr = rlen; +} +#endif + +/* see sph_panama.h */ +void +sph_panama_close(void *cc, void *dst) +{ + sph_panama_context *sc; + unsigned current; + int i; + + sc = cc; + current = sc->data_ptr; + sc->data[current ++] = 0x01; + memset(sc->data + current, 0, (sizeof sc->data) - current); + panama_push(sc, sc->data, 1); + panama_pull(sc, 32); + for (i = 0; i < 8; i ++) + sph_enc32le((unsigned char *)dst + 4 * i, sc->state[i + 9]); + sph_panama_init(sc); +} diff --git a/algo/panama/sph_panama.h b/algo/panama/sph_panama.h new file mode 100644 index 0000000..6f9d3e8 --- /dev/null +++ b/algo/panama/sph_panama.h @@ -0,0 +1,118 @@ +/* $Id: sph_panama.h 154 2010-04-26 17:00:24Z tp $ */ +/** + * PANAMA interface. + * + * PANAMA has been published in: J. Daemen and C. Clapp, "Fast Hashing + * and Stream Encryption with PANAMA", Fast Software Encryption - + * FSE'98, LNCS 1372, Springer (1998), pp. 60--74. + * + * PANAMA is not fully defined with regards to endianness and related + * topics. This implementation follows strict little-endian conventions: + *
    + *
  • Each 32-byte input block is split into eight 32-bit words, the + * first (leftmost) word being numbered 0.
  • + *
  • Each such 32-bit word is decoded from memory in little-endian + * convention.
  • + *
  • The additional padding bit equal to "1" is added by considering + * the least significant bit in a byte to come first; practically, this + * means that a single byte of value 0x01 is appended to the (byte-oriented) + * message, and then 0 to 31 bytes of value 0x00.
  • + *
  • The output consists of eight 32-bit words; the word numbered 0 is + * written first (in leftmost position) and it is encoded in little-endian + * convention. + *
+ * With these conventions, PANAMA is sometimes known as "PANAMA-LE". The + * PANAMA reference implementation uses our conventions for input, but + * prescribes no convention for output. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2007-2010 Projet RNRT SAPHIR + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @file sph_panama.h + * @author Thomas Pornin + */ + +#ifndef SPH_PANAMA_H__ +#define SPH_PANAMA_H__ + +#include +#include "algo/sha/sph_types.h" + +/** + * Output size (in bits) for PANAMA. + */ +#define SPH_SIZE_panama 256 + +/** + * This structure is a context for PANAMA computations: it contains the + * intermediate values and some data from the last entered block. Once + * a PANAMA computation has been performed, the context can be reused for + * another computation. + * + * The contents of this structure are private. A running PANAMA computation + * can be cloned by copying the context (e.g. with a simple + * memcpy()). + */ +typedef struct { +#ifndef DOXYGEN_IGNORE + unsigned char data[32]; /* first field, for alignment */ + unsigned data_ptr; + + sph_u32 buffer[32][8]; + unsigned buffer_ptr; + + sph_u32 state[17]; +#endif +} sph_panama_context; + +/** + * Initialize a PANAMA context. This process performs no memory allocation. + * + * @param cc the PANAMA context (pointer to a sph_panama_context) + */ +void sph_panama_init(void *cc); + +/** + * Process some data bytes. It is acceptable that len is zero + * (in which case this function does nothing). + * + * @param cc the PANAMA context + * @param data the input data + * @param len the input data length (in bytes) + */ +void sph_panama(void *cc, const void *data, size_t len); + +/** + * Terminate the current PANAMA computation and output the result into the + * provided buffer. The destination buffer must be wide enough to + * accomodate the result (32 bytes). The context is automatically + * reinitialized. + * + * @param cc the PANAMA context + * @param dst the destination buffer + */ +void sph_panama_close(void *cc, void *dst); + +#endif diff --git a/algo/quark/anime-4way.c b/algo/quark/anime-4way.c index 9f15dcf..89ad927 100644 --- a/algo/quark/anime-4way.c +++ b/algo/quark/anime-4way.c @@ -48,36 +48,36 @@ void anime_4way_hash( void *state, const void *input ) __m256i* vhA = (__m256i*)vhashA; __m256i* vhB = (__m256i*)vhashB; __m256i vh_mask; - __m256i bit3_mask; bit3_mask = _mm256_set1_epi64x( 8 ); + const __m256i bit3_mask = _mm256_set1_epi64x( 8 ); int i; anime_4way_ctx_holder ctx; memcpy( &ctx, &anime_4way_ctx, sizeof(anime_4way_ctx) ); - bmw512_4way( &ctx.bmw, vhash, 80 ); + bmw512_4way( &ctx.bmw, input, 80 ); bmw512_4way_close( &ctx.bmw, vhash ); - blake512_4way( &ctx.blake, input, 64 ); + blake512_4way( &ctx.blake, vhash, 64 ); blake512_4way_close( &ctx.blake, vhash ); vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), m256_zero ); - mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); - update_and_final_groestl( &ctx.groestl, (char*)hash0, - (char*)hash0, 512 ); - reinit_groestl( &ctx.groestl ); - update_and_final_groestl( &ctx.groestl, (char*)hash1, - (char*)hash1, 512 ); - reinit_groestl( &ctx.groestl ); - update_and_final_groestl( &ctx.groestl, (char*)hash2, - (char*)hash2, 512 ); - reinit_groestl( &ctx.groestl ); - update_and_final_groestl( &ctx.groestl, (char*)hash3, - (char*)hash3, 512 ); - mm256_interleave_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + update_and_final_groestl( &ctx.groestl, (char*)hash0, + (char*)hash0, 512 ); + reinit_groestl( &ctx.groestl ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, + (char*)hash1, 512 ); + reinit_groestl( &ctx.groestl ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, + (char*)hash2, 512 ); + reinit_groestl( &ctx.groestl ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, + (char*)hash3, 512 ); + mm256_interleave_4x64( vhashA, hash0, hash1, hash2, hash3, 512 ); - skein512_4way( &ctx.skein, vhash, 64 ); - skein512_4way_close( &ctx.skein, vhashB ); + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhashB ); for ( i = 0; i < 8; i++ ) vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask ); @@ -120,13 +120,13 @@ void anime_4way_hash( void *state, const void *input ) vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), m256_zero ); - keccak512_4way_init( &ctx.keccak ); - keccak512_4way( &ctx.keccak, vhash, 64 ); - keccak512_4way_close( &ctx.keccak, vhashA ); + keccak512_4way_init( &ctx.keccak ); + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhashA ); - jh512_4way_init( &ctx.jh ); - jh512_4way( &ctx.jh, vhash, 64 ); - jh512_4way_close( &ctx.jh, vhashB ); + jh512_4way_init( &ctx.jh ); + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhashB ); for ( i = 0; i < 8; i++ ) vh[i] = _mm256_blendv_epi8( vhA[i], vhB[i], vh_mask ); diff --git a/algo/radiogatun/sph_radiogatun.c b/algo/radiogatun/sph_radiogatun.c new file mode 100644 index 0000000..888b028 --- /dev/null +++ b/algo/radiogatun/sph_radiogatun.c @@ -0,0 +1,1003 @@ +/* $Id: radiogatun.c 226 2010-06-16 17:28:08Z tp $ */ +/* + * RadioGatun implementation. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2007-2010 Projet RNRT SAPHIR + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @author Thomas Pornin + */ + +#include +#include + +#include "sph_radiogatun.h" + +#if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_RADIOGATUN +#define SPH_SMALL_FOOTPRINT_RADIOGATUN 1 +#endif + +/* ======================================================================= */ +/* + * The core macros. We want to unroll 13 successive rounds so that the + * belt rotation becomes pure routing, solved at compilation time, with + * no unnecessary copying. We also wish all state variables to be + * independant local variables, so that the C compiler becomes free to + * map these on registers at it sees fit. This requires some heavy + * preprocessor trickeries, including a full addition macro modulo 13. + * + * These macros are size-independent. Some macros must be defined before + * use: + * WT evaluates to the type for a word (32-bit or 64-bit) + * T truncates a value to the proper word size + * ROR(x, n) right rotation of a word x, with explicit modular + * reduction of the rotation count n by the word size + * INW(i, j) input word j (0, 1, or 2) of block i (0 to 12) + * + * For INW, the input buffer is pointed to by "buf" which has type + * "const unsigned char *". + */ + +#define MUL19(action) do { \ + action(0); \ + action(1); \ + action(2); \ + action(3); \ + action(4); \ + action(5); \ + action(6); \ + action(7); \ + action(8); \ + action(9); \ + action(10); \ + action(11); \ + action(12); \ + action(13); \ + action(14); \ + action(15); \ + action(16); \ + action(17); \ + action(18); \ + } while (0) + +#define DECL19(b) b ## 0, b ## 1, b ## 2, b ## 3, b ## 4, b ## 5, \ + b ## 6, b ## 7, b ## 8, b ## 9, b ## 10, b ## 11, \ + b ## 12, b ## 13, b ## 14, b ## 15, b ## 16, \ + b ## 17, b ## 18 + +#define M19_T7(i) M19_T7_(i) +#define M19_T7_(i) M19_T7_ ## i +#define M19_T7_0 0 +#define M19_T7_1 7 +#define M19_T7_2 14 +#define M19_T7_3 2 +#define M19_T7_4 9 +#define M19_T7_5 16 +#define M19_T7_6 4 +#define M19_T7_7 11 +#define M19_T7_8 18 +#define M19_T7_9 6 +#define M19_T7_10 13 +#define M19_T7_11 1 +#define M19_T7_12 8 +#define M19_T7_13 15 +#define M19_T7_14 3 +#define M19_T7_15 10 +#define M19_T7_16 17 +#define M19_T7_17 5 +#define M19_T7_18 12 + +#define M19_A1(i) M19_A1_(i) +#define M19_A1_(i) M19_A1_ ## i +#define M19_A1_0 1 +#define M19_A1_1 2 +#define M19_A1_2 3 +#define M19_A1_3 4 +#define M19_A1_4 5 +#define M19_A1_5 6 +#define M19_A1_6 7 +#define M19_A1_7 8 +#define M19_A1_8 9 +#define M19_A1_9 10 +#define M19_A1_10 11 +#define M19_A1_11 12 +#define M19_A1_12 13 +#define M19_A1_13 14 +#define M19_A1_14 15 +#define M19_A1_15 16 +#define M19_A1_16 17 +#define M19_A1_17 18 +#define M19_A1_18 0 + +#define M19_A2(i) M19_A2_(i) +#define M19_A2_(i) M19_A2_ ## i +#define M19_A2_0 2 +#define M19_A2_1 3 +#define M19_A2_2 4 +#define M19_A2_3 5 +#define M19_A2_4 6 +#define M19_A2_5 7 +#define M19_A2_6 8 +#define M19_A2_7 9 +#define M19_A2_8 10 +#define M19_A2_9 11 +#define M19_A2_10 12 +#define M19_A2_11 13 +#define M19_A2_12 14 +#define M19_A2_13 15 +#define M19_A2_14 16 +#define M19_A2_15 17 +#define M19_A2_16 18 +#define M19_A2_17 0 +#define M19_A2_18 1 + +#define M19_A4(i) M19_A4_(i) +#define M19_A4_(i) M19_A4_ ## i +#define M19_A4_0 4 +#define M19_A4_1 5 +#define M19_A4_2 6 +#define M19_A4_3 7 +#define M19_A4_4 8 +#define M19_A4_5 9 +#define M19_A4_6 10 +#define M19_A4_7 11 +#define M19_A4_8 12 +#define M19_A4_9 13 +#define M19_A4_10 14 +#define M19_A4_11 15 +#define M19_A4_12 16 +#define M19_A4_13 17 +#define M19_A4_14 18 +#define M19_A4_15 0 +#define M19_A4_16 1 +#define M19_A4_17 2 +#define M19_A4_18 3 + +#define ACC_a(i) ACC_a_(i) +#define ACC_a_(i) a ## i +#define ACC_atmp(i) ACC_atmp_(i) +#define ACC_atmp_(i) atmp ## i + +#define MILL1(i) (atmp ## i = a ## i ^ T(ACC_a(M19_A1(i)) \ + | ~ACC_a(M19_A2(i)))) +#define MILL2(i) (a ## i = ROR(ACC_atmp(M19_T7(i)), ((i * (i + 1)) >> 1))) +#define MILL3(i) (atmp ## i = a ## i ^ ACC_a(M19_A1(i)) ^ ACC_a(M19_A4(i))) +#define MILL4(i) (a ## i = atmp ## i ^ (i == 0)) + +#define MILL do { \ + WT DECL19(atmp); \ + MUL19(MILL1); \ + MUL19(MILL2); \ + MUL19(MILL3); \ + MUL19(MILL4); \ + } while (0) + +#define DECL13(b) b ## 0 ## _0, b ## 0 ## _1, b ## 0 ## _2, \ + b ## 1 ## _0, b ## 1 ## _1, b ## 1 ## _2, \ + b ## 2 ## _0, b ## 2 ## _1, b ## 2 ## _2, \ + b ## 3 ## _0, b ## 3 ## _1, b ## 3 ## _2, \ + b ## 4 ## _0, b ## 4 ## _1, b ## 4 ## _2, \ + b ## 5 ## _0, b ## 5 ## _1, b ## 5 ## _2, \ + b ## 6 ## _0, b ## 6 ## _1, b ## 6 ## _2, \ + b ## 7 ## _0, b ## 7 ## _1, b ## 7 ## _2, \ + b ## 8 ## _0, b ## 8 ## _1, b ## 8 ## _2, \ + b ## 9 ## _0, b ## 9 ## _1, b ## 9 ## _2, \ + b ## 10 ## _0, b ## 10 ## _1, b ## 10 ## _2, \ + b ## 11 ## _0, b ## 11 ## _1, b ## 11 ## _2, \ + b ## 12 ## _0, b ## 12 ## _1, b ## 12 ## _2 + +#define M13_A(i, j) M13_A_(i, j) +#define M13_A_(i, j) M13_A_ ## i ## _ ## j +#define M13_A_0_0 0 +#define M13_A_0_1 1 +#define M13_A_0_2 2 +#define M13_A_0_3 3 +#define M13_A_0_4 4 +#define M13_A_0_5 5 +#define M13_A_0_6 6 +#define M13_A_0_7 7 +#define M13_A_0_8 8 +#define M13_A_0_9 9 +#define M13_A_0_10 10 +#define M13_A_0_11 11 +#define M13_A_0_12 12 +#define M13_A_1_0 1 +#define M13_A_1_1 2 +#define M13_A_1_2 3 +#define M13_A_1_3 4 +#define M13_A_1_4 5 +#define M13_A_1_5 6 +#define M13_A_1_6 7 +#define M13_A_1_7 8 +#define M13_A_1_8 9 +#define M13_A_1_9 10 +#define M13_A_1_10 11 +#define M13_A_1_11 12 +#define M13_A_1_12 0 +#define M13_A_2_0 2 +#define M13_A_2_1 3 +#define M13_A_2_2 4 +#define M13_A_2_3 5 +#define M13_A_2_4 6 +#define M13_A_2_5 7 +#define M13_A_2_6 8 +#define M13_A_2_7 9 +#define M13_A_2_8 10 +#define M13_A_2_9 11 +#define M13_A_2_10 12 +#define M13_A_2_11 0 +#define M13_A_2_12 1 +#define M13_A_3_0 3 +#define M13_A_3_1 4 +#define M13_A_3_2 5 +#define M13_A_3_3 6 +#define M13_A_3_4 7 +#define M13_A_3_5 8 +#define M13_A_3_6 9 +#define M13_A_3_7 10 +#define M13_A_3_8 11 +#define M13_A_3_9 12 +#define M13_A_3_10 0 +#define M13_A_3_11 1 +#define M13_A_3_12 2 +#define M13_A_4_0 4 +#define M13_A_4_1 5 +#define M13_A_4_2 6 +#define M13_A_4_3 7 +#define M13_A_4_4 8 +#define M13_A_4_5 9 +#define M13_A_4_6 10 +#define M13_A_4_7 11 +#define M13_A_4_8 12 +#define M13_A_4_9 0 +#define M13_A_4_10 1 +#define M13_A_4_11 2 +#define M13_A_4_12 3 +#define M13_A_5_0 5 +#define M13_A_5_1 6 +#define M13_A_5_2 7 +#define M13_A_5_3 8 +#define M13_A_5_4 9 +#define M13_A_5_5 10 +#define M13_A_5_6 11 +#define M13_A_5_7 12 +#define M13_A_5_8 0 +#define M13_A_5_9 1 +#define M13_A_5_10 2 +#define M13_A_5_11 3 +#define M13_A_5_12 4 +#define M13_A_6_0 6 +#define M13_A_6_1 7 +#define M13_A_6_2 8 +#define M13_A_6_3 9 +#define M13_A_6_4 10 +#define M13_A_6_5 11 +#define M13_A_6_6 12 +#define M13_A_6_7 0 +#define M13_A_6_8 1 +#define M13_A_6_9 2 +#define M13_A_6_10 3 +#define M13_A_6_11 4 +#define M13_A_6_12 5 +#define M13_A_7_0 7 +#define M13_A_7_1 8 +#define M13_A_7_2 9 +#define M13_A_7_3 10 +#define M13_A_7_4 11 +#define M13_A_7_5 12 +#define M13_A_7_6 0 +#define M13_A_7_7 1 +#define M13_A_7_8 2 +#define M13_A_7_9 3 +#define M13_A_7_10 4 +#define M13_A_7_11 5 +#define M13_A_7_12 6 +#define M13_A_8_0 8 +#define M13_A_8_1 9 +#define M13_A_8_2 10 +#define M13_A_8_3 11 +#define M13_A_8_4 12 +#define M13_A_8_5 0 +#define M13_A_8_6 1 +#define M13_A_8_7 2 +#define M13_A_8_8 3 +#define M13_A_8_9 4 +#define M13_A_8_10 5 +#define M13_A_8_11 6 +#define M13_A_8_12 7 +#define M13_A_9_0 9 +#define M13_A_9_1 10 +#define M13_A_9_2 11 +#define M13_A_9_3 12 +#define M13_A_9_4 0 +#define M13_A_9_5 1 +#define M13_A_9_6 2 +#define M13_A_9_7 3 +#define M13_A_9_8 4 +#define M13_A_9_9 5 +#define M13_A_9_10 6 +#define M13_A_9_11 7 +#define M13_A_9_12 8 +#define M13_A_10_0 10 +#define M13_A_10_1 11 +#define M13_A_10_2 12 +#define M13_A_10_3 0 +#define M13_A_10_4 1 +#define M13_A_10_5 2 +#define M13_A_10_6 3 +#define M13_A_10_7 4 +#define M13_A_10_8 5 +#define M13_A_10_9 6 +#define M13_A_10_10 7 +#define M13_A_10_11 8 +#define M13_A_10_12 9 +#define M13_A_11_0 11 +#define M13_A_11_1 12 +#define M13_A_11_2 0 +#define M13_A_11_3 1 +#define M13_A_11_4 2 +#define M13_A_11_5 3 +#define M13_A_11_6 4 +#define M13_A_11_7 5 +#define M13_A_11_8 6 +#define M13_A_11_9 7 +#define M13_A_11_10 8 +#define M13_A_11_11 9 +#define M13_A_11_12 10 +#define M13_A_12_0 12 +#define M13_A_12_1 0 +#define M13_A_12_2 1 +#define M13_A_12_3 2 +#define M13_A_12_4 3 +#define M13_A_12_5 4 +#define M13_A_12_6 5 +#define M13_A_12_7 6 +#define M13_A_12_8 7 +#define M13_A_12_9 8 +#define M13_A_12_10 9 +#define M13_A_12_11 10 +#define M13_A_12_12 11 + +#define M13_N(i) M13_N_(i) +#define M13_N_(i) M13_N_ ## i +#define M13_N_0 12 +#define M13_N_1 11 +#define M13_N_2 10 +#define M13_N_3 9 +#define M13_N_4 8 +#define M13_N_5 7 +#define M13_N_6 6 +#define M13_N_7 5 +#define M13_N_8 4 +#define M13_N_9 3 +#define M13_N_10 2 +#define M13_N_11 1 +#define M13_N_12 0 + +#define ACC_b(i, k) ACC_b_(i, k) +#define ACC_b_(i, k) b ## i ## _ ## k + +#define ROUND_ELT(k, s) do { \ + if ((bj += 3) == 39) \ + bj = 0; \ + sc->b[bj + s] ^= a ## k; \ + } while (0) + +#define ROUND_SF(j) do { \ + size_t bj = (j) * 3; \ + ROUND_ELT(1, 0); \ + ROUND_ELT(2, 1); \ + ROUND_ELT(3, 2); \ + ROUND_ELT(4, 0); \ + ROUND_ELT(5, 1); \ + ROUND_ELT(6, 2); \ + ROUND_ELT(7, 0); \ + ROUND_ELT(8, 1); \ + ROUND_ELT(9, 2); \ + ROUND_ELT(10, 0); \ + ROUND_ELT(11, 1); \ + ROUND_ELT(12, 2); \ + MILL; \ + bj = (j) * 3; \ + a ## 13 ^= sc->b[bj + 0]; \ + a ## 14 ^= sc->b[bj + 1]; \ + a ## 15 ^= sc->b[bj + 2]; \ + } while (0) + +#define INPUT_SF(j, p0, p1, p2) do { \ + size_t bj = ((j) + 1) * 3; \ + if (bj == 39) \ + bj = 0; \ + sc->b[bj + 0] ^= (p0); \ + sc->b[bj + 1] ^= (p1); \ + sc->b[bj + 2] ^= (p2); \ + a16 ^= (p0); \ + a17 ^= (p1); \ + a18 ^= (p2); \ + } while (0) + + +#if SPH_SMALL_FOOTPRINT_RADIOGATUN + +#define ROUND ROUND_SF +#define INPUT INPUT_SF + +#else + +/* + * Round function R, on base j. The value j is such that B[0] is actually + * b[j] after the initial rotation. On the 13-round macro, j has the + * successive values 12, 11, 10... 1, 0. + */ +#define ROUND(j) do { \ + ACC_b(M13_A(1, j), 0) ^= a ## 1; \ + ACC_b(M13_A(2, j), 1) ^= a ## 2; \ + ACC_b(M13_A(3, j), 2) ^= a ## 3; \ + ACC_b(M13_A(4, j), 0) ^= a ## 4; \ + ACC_b(M13_A(5, j), 1) ^= a ## 5; \ + ACC_b(M13_A(6, j), 2) ^= a ## 6; \ + ACC_b(M13_A(7, j), 0) ^= a ## 7; \ + ACC_b(M13_A(8, j), 1) ^= a ## 8; \ + ACC_b(M13_A(9, j), 2) ^= a ## 9; \ + ACC_b(M13_A(10, j), 0) ^= a ## 10; \ + ACC_b(M13_A(11, j), 1) ^= a ## 11; \ + ACC_b(M13_A(12, j), 2) ^= a ## 12; \ + MILL; \ + a ## 13 ^= ACC_b(j, 0); \ + a ## 14 ^= ACC_b(j, 1); \ + a ## 15 ^= ACC_b(j, 2); \ + } while (0) + +#define INPUT(j, p0, p1, p2) do { \ + ACC_b(M13_A(1, j), 0) ^= (p0); \ + ACC_b(M13_A(1, j), 1) ^= (p1); \ + ACC_b(M13_A(1, j), 2) ^= (p2); \ + a16 ^= (p0); \ + a17 ^= (p1); \ + a18 ^= (p2); \ + } while (0) + +#endif + +#define MUL13(action) do { \ + action(0); \ + action(1); \ + action(2); \ + action(3); \ + action(4); \ + action(5); \ + action(6); \ + action(7); \ + action(8); \ + action(9); \ + action(10); \ + action(11); \ + action(12); \ + } while (0) + +#define MILL_READ_ELT(i) do { \ + a ## i = sc->a[i]; \ + } while (0) + +#define MILL_WRITE_ELT(i) do { \ + sc->a[i] = a ## i; \ + } while (0) + +#define STATE_READ_SF do { \ + MUL19(MILL_READ_ELT); \ + } while (0) + +#define STATE_WRITE_SF do { \ + MUL19(MILL_WRITE_ELT); \ + } while (0) + +#define PUSH13_SF do { \ + WT DECL19(a); \ + const unsigned char *buf; \ + \ + buf = data; \ + STATE_READ_SF; \ + while (len >= sizeof sc->data) { \ + size_t mk; \ + for (mk = 13; mk > 0; mk --) { \ + WT p0 = INW(0, 0); \ + WT p1 = INW(0, 1); \ + WT p2 = INW(0, 2); \ + INPUT_SF(mk - 1, p0, p1, p2); \ + ROUND_SF(mk - 1); \ + buf += (sizeof sc->data) / 13; \ + len -= (sizeof sc->data) / 13; \ + } \ + } \ + STATE_WRITE_SF; \ + return len; \ + } while (0) + +#if SPH_SMALL_FOOTPRINT_RADIOGATUN + +#define STATE_READ STATE_READ_SF +#define STATE_WRITE STATE_WRITE_SF +#define PUSH13 PUSH13_SF + +#else + +#define BELT_READ_ELT(i) do { \ + b ## i ## _0 = sc->b[3 * i + 0]; \ + b ## i ## _1 = sc->b[3 * i + 1]; \ + b ## i ## _2 = sc->b[3 * i + 2]; \ + } while (0) + +#define BELT_WRITE_ELT(i) do { \ + sc->b[3 * i + 0] = b ## i ## _0; \ + sc->b[3 * i + 1] = b ## i ## _1; \ + sc->b[3 * i + 2] = b ## i ## _2; \ + } while (0) + +#define STATE_READ do { \ + MUL13(BELT_READ_ELT); \ + MUL19(MILL_READ_ELT); \ + } while (0) + +#define STATE_WRITE do { \ + MUL13(BELT_WRITE_ELT); \ + MUL19(MILL_WRITE_ELT); \ + } while (0) + +/* + * Input data by chunks of 13*3 blocks. This is the body of the + * radiogatun32_push13() and radiogatun64_push13() functions. + */ +#define PUSH13 do { \ + WT DECL19(a), DECL13(b); \ + const unsigned char *buf; \ + \ + buf = data; \ + STATE_READ; \ + while (len >= sizeof sc->data) { \ + WT p0, p1, p2; \ + MUL13(PUSH13_ELT); \ + buf += sizeof sc->data; \ + len -= sizeof sc->data; \ + } \ + STATE_WRITE; \ + return len; \ + } while (0) + +#define PUSH13_ELT(k) do { \ + p0 = INW(k, 0); \ + p1 = INW(k, 1); \ + p2 = INW(k, 2); \ + INPUT(M13_N(k), p0, p1, p2); \ + ROUND(M13_N(k)); \ + } while (0) + +#endif + +#define BLANK13_SF do { \ + size_t mk = 13; \ + while (mk -- > 0) \ + ROUND_SF(mk); \ + } while (0) + +#define BLANK1_SF do { \ + WT tmp0, tmp1, tmp2; \ + ROUND_SF(12); \ + tmp0 = sc->b[36]; \ + tmp1 = sc->b[37]; \ + tmp2 = sc->b[38]; \ + memmove(sc->b + 3, sc->b, 36 * sizeof sc->b[0]); \ + sc->b[0] = tmp0; \ + sc->b[1] = tmp1; \ + sc->b[2] = tmp2; \ + } while (0) + +#if SPH_SMALL_FOOTPRINT_RADIOGATUN + +#define BLANK13 BLANK13_SF +#define BLANK1 BLANK1_SF + +#else + +/* + * Run 13 blank rounds. This macro expects the "a" and "b" state variables + * to be alread declared. + */ +#define BLANK13 MUL13(BLANK13_ELT) + +#define BLANK13_ELT(k) ROUND(M13_N(k)) + +#define MUL12(action) do { \ + action(0); \ + action(1); \ + action(2); \ + action(3); \ + action(4); \ + action(5); \ + action(6); \ + action(7); \ + action(8); \ + action(9); \ + action(10); \ + action(11); \ + } while (0) + +/* + * Run a single blank round, and physically rotate the belt. This is used + * for the last blank rounds, and the output rounds. This macro expects the + * "a" abd "b" state variables to be already declared. + */ +#define BLANK1 do { \ + WT tmp0, tmp1, tmp2; \ + ROUND(12); \ + tmp0 = b0_0; \ + tmp1 = b0_1; \ + tmp2 = b0_2; \ + MUL12(BLANK1_ELT); \ + b1_0 = tmp0; \ + b1_1 = tmp1; \ + b1_2 = tmp2; \ + } while (0) + +#define BLANK1_ELT(i) do { \ + ACC_b(M13_A(M13_N(i), 1), 0) = ACC_b(M13_N(i), 0); \ + ACC_b(M13_A(M13_N(i), 1), 1) = ACC_b(M13_N(i), 1); \ + ACC_b(M13_A(M13_N(i), 1), 2) = ACC_b(M13_N(i), 2); \ + } while (0) + +#endif + +#define NO_TOKEN + +/* + * Perform padding, then blank rounds, then output some words. This is + * the body of sph_radiogatun32_close() and sph_radiogatun64_close(). + */ +#define CLOSE_SF(width) CLOSE_GEN(width, \ + NO_TOKEN, STATE_READ_SF, BLANK1_SF, BLANK13_SF) + +#if SPH_SMALL_FOOTPRINT_RADIOGATUN +#define CLOSE CLOSE_SF +#else +#define CLOSE(width) CLOSE_GEN(width, \ + WT DECL13(b);, STATE_READ, BLANK1, BLANK13) +#endif + +#define CLOSE_GEN(width, WTb13, state_read, blank1, blank13) do { \ + unsigned ptr, num; \ + unsigned char *out; \ + WT DECL19(a); \ + WTb13 \ + \ + ptr = sc->data_ptr; \ + sc->data[ptr ++] = 0x01; \ + memset(sc->data + ptr, 0, (sizeof sc->data) - ptr); \ + radiogatun ## width ## _push13(sc, sc->data, sizeof sc->data); \ + \ + num = 17; \ + for (;;) { \ + ptr += 3 * (width >> 3); \ + if (ptr > sizeof sc->data) \ + break; \ + num --; \ + } \ + \ + state_read; \ + if (num >= 13) { \ + blank13; \ + num -= 13; \ + } \ + while (num -- > 0) \ + blank1; \ + \ + num = 0; \ + out = dst; \ + for (;;) { \ + OUTW(out, a1); \ + out += width >> 3; \ + OUTW(out, a2); \ + out += width >> 3; \ + num += 2 * (width >> 3); \ + if (num >= 32) \ + break; \ + blank1; \ + } \ + INIT; \ + } while (0) + +/* + * Initialize context structure. + */ +#if SPH_LITTLE_ENDIAN || SPH_BIG_ENDIAN + +#define INIT do { \ + memset(sc->a, 0, sizeof sc->a); \ + memset(sc->b, 0, sizeof sc->b); \ + sc->data_ptr = 0; \ + } while (0) + +#else + +#define INIT do { \ + size_t u; \ + for (u = 0; u < 19; u ++) \ + sc->a[u] = 0; \ + for (u = 0; u < 39; u ++) \ + sc->b[u] = 0; \ + sc->data_ptr = 0; \ + } while (0) + +#endif + +/* ======================================================================= */ +/* + * RadioGatun[32]. + */ + +#if !SPH_NO_RG32 + +#undef WT +#define WT sph_u32 +#undef T +#define T SPH_T32 +#undef ROR +#define ROR(x, n) SPH_T32(((x) << ((32 - (n)) & 31)) | ((x) >> ((n) & 31))) +#undef INW +#define INW(i, j) sph_dec32le_aligned(buf + (4 * (3 * (i) + (j)))) +#undef OUTW +#define OUTW(b, v) sph_enc32le(b, v) + +/* + * Insert data by big chunks of 13*12 = 156 bytes. Returned value is the + * number of remaining bytes (between 0 and 155). This method assumes that + * the input data is suitably aligned. + */ +static size_t +radiogatun32_push13(sph_radiogatun32_context *sc, const void *data, size_t len) +{ + PUSH13; +} + +/* see sph_radiogatun.h */ +void +sph_radiogatun32_init(void *cc) +{ + sph_radiogatun32_context *sc; + + sc = cc; + INIT; +} + +#ifdef SPH_UPTR +static void +radiogatun32_short(void *cc, const void *data, size_t len) +#else +/* see sph_radiogatun.h */ +void +sph_radiogatun32(void *cc, const void *data, size_t len) +#endif +{ + sph_radiogatun32_context *sc; + unsigned ptr; + + sc = cc; + ptr = sc->data_ptr; + while (len > 0) { + size_t clen; + + clen = (sizeof sc->data) - ptr; + if (clen > len) + clen = len; + memcpy(sc->data + ptr, data, clen); + data = (const unsigned char *)data + clen; + len -= clen; + ptr += clen; + if (ptr == sizeof sc->data) { + radiogatun32_push13(sc, sc->data, sizeof sc->data); + ptr = 0; + } + } + sc->data_ptr = ptr; +} + +#ifdef SPH_UPTR +/* see sph_radiogatun.h */ +void +sph_radiogatun32(void *cc, const void *data, size_t len) +{ + sph_radiogatun32_context *sc; + unsigned ptr; + size_t rlen; + + if (len < (2 * sizeof sc->data)) { + radiogatun32_short(cc, data, len); + return; + } + sc = cc; + ptr = sc->data_ptr; + if (ptr > 0) { + unsigned t; + + t = (sizeof sc->data) - ptr; + radiogatun32_short(sc, data, t); + data = (const unsigned char *)data + t; + len -= t; + } +#if !SPH_UNALIGNED + if (((SPH_UPTR)data & 3) != 0) { + radiogatun32_short(sc, data, len); + return; + } +#endif + rlen = radiogatun32_push13(sc, data, len); + memcpy(sc->data, (const unsigned char *)data + len - rlen, rlen); + sc->data_ptr = rlen; +} +#endif + +/* see sph_radiogatun.h */ +void +sph_radiogatun32_close(void *cc, void *dst) +{ + sph_radiogatun32_context *sc; + + sc = cc; + CLOSE(32); +} + +#endif + +/* ======================================================================= */ +/* + * RadioGatun[64]. Compiled only if a 64-bit or more type is available. + */ + +#if SPH_64 + +#if !SPH_NO_RG64 + +#undef WT +#define WT sph_u64 +#undef T +#define T SPH_T64 +#undef ROR +#define ROR(x, n) SPH_T64(((x) << ((64 - (n)) & 63)) | ((x) >> ((n) & 63))) +#undef INW +#define INW(i, j) sph_dec64le_aligned(buf + (8 * (3 * (i) + (j)))) +#undef OUTW +#define OUTW(b, v) sph_enc64le(b, v) + +/* + * On 32-bit x86, register pressure is such that using the small + * footprint version is a net gain (x2 speed), because that variant + * uses fewer local variables. + */ +#if SPH_I386_MSVC || SPH_I386_GCC || defined __i386__ +#undef PUSH13 +#define PUSH13 PUSH13_SF +#undef CLOSE +#define CLOSE CLOSE_SF +#endif + +/* + * Insert data by big chunks of 13*24 = 312 bytes. Returned value is the + * number of remaining bytes (between 0 and 311). This method assumes that + * the input data is suitably aligned. + */ +static size_t +radiogatun64_push13(sph_radiogatun64_context *sc, const void *data, size_t len) +{ + PUSH13; +} + +/* see sph_radiogatun.h */ +void +sph_radiogatun64_init(void *cc) +{ + sph_radiogatun64_context *sc; + + sc = cc; + INIT; +} + +#ifdef SPH_UPTR +static void +radiogatun64_short(void *cc, const void *data, size_t len) +#else +/* see sph_radiogatun.h */ +void +sph_radiogatun64(void *cc, const void *data, size_t len) +#endif +{ + sph_radiogatun64_context *sc; + unsigned ptr; + + sc = cc; + ptr = sc->data_ptr; + while (len > 0) { + size_t clen; + + clen = (sizeof sc->data) - ptr; + if (clen > len) + clen = len; + memcpy(sc->data + ptr, data, clen); + data = (const unsigned char *)data + clen; + len -= clen; + ptr += clen; + if (ptr == sizeof sc->data) { + radiogatun64_push13(sc, sc->data, sizeof sc->data); + ptr = 0; + } + } + sc->data_ptr = ptr; +} + +#ifdef SPH_UPTR +/* see sph_radiogatun.h */ +void +sph_radiogatun64(void *cc, const void *data, size_t len) +{ + sph_radiogatun64_context *sc; + unsigned ptr; + size_t rlen; + + if (len < (2 * sizeof sc->data)) { + radiogatun64_short(cc, data, len); + return; + } + sc = cc; + ptr = sc->data_ptr; + if (ptr > 0) { + unsigned t; + + t = (sizeof sc->data) - ptr; + radiogatun64_short(sc, data, t); + data = (const unsigned char *)data + t; + len -= t; + } +#if !SPH_UNALIGNED + if (((SPH_UPTR)data & 7) != 0) { + radiogatun64_short(sc, data, len); + return; + } +#endif + rlen = radiogatun64_push13(sc, data, len); + memcpy(sc->data, (const unsigned char *)data + len - rlen, rlen); + sc->data_ptr = rlen; +} +#endif + +/* see sph_radiogatun.h */ +void +sph_radiogatun64_close(void *cc, void *dst) +{ + sph_radiogatun64_context *sc; + + sc = cc; + CLOSE(64); +} + +#endif + +#endif diff --git a/algo/radiogatun/sph_radiogatun.h b/algo/radiogatun/sph_radiogatun.h new file mode 100644 index 0000000..4e3888c --- /dev/null +++ b/algo/radiogatun/sph_radiogatun.h @@ -0,0 +1,186 @@ +/* $Id: sph_radiogatun.h 226 2010-06-16 17:28:08Z tp $ */ +/** + * RadioGatun interface. + * + * RadioGatun has been published in: G. Bertoni, J. Daemen, M. Peeters + * and G. Van Assche, "RadioGatun, a belt-and-mill hash function", + * presented at the Second Cryptographic Hash Workshop, Santa Barbara, + * August 24-25, 2006. The main Web site, containing that article, the + * reference code and some test vectors, appears to be currently located + * at the following URL: http://radiogatun.noekeon.org/ + * + * The presentation article does not specify endianness or padding. The + * reference code uses the following conventions, which we also apply + * here: + *
    + *
  • The input message is an integral number of sequences of three + * words. Each word is either a 32-bit of 64-bit word (depending on + * the version of RadioGatun).
  • + *
  • Input bytes are decoded into words using little-endian + * convention.
  • + *
  • Padding consists of a single bit of value 1, using little-endian + * convention within bytes (i.e. for a byte-oriented input, a single + * byte of value 0x01 is appended), then enough bits of value 0 to finish + * the current block.
  • + *
  • Output consists of 256 bits. Successive output words are encoded + * with little-endian convention.
  • + *
+ * These conventions are very close to those we use for PANAMA, which is + * a close ancestor or RadioGatun. + * + * RadioGatun is actually a family of functions, depending on some + * internal parameters. We implement here two functions, with a "belt + * length" of 13, a "belt width" of 3, and a "mill length" of 19. The + * RadioGatun[32] version uses 32-bit words, while the RadioGatun[64] + * variant uses 64-bit words. + * + * Strictly speaking, the name "RadioGatun" should use an acute accent + * on the "u", which we omitted here to keep strict ASCII-compatibility + * of this file. + * + * ==========================(LICENSE BEGIN)============================ + * + * Copyright (c) 2007-2010 Projet RNRT SAPHIR + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ===========================(LICENSE END)============================= + * + * @file sph_radiogatun.h + * @author Thomas Pornin + */ + +#ifndef SPH_RADIOGATUN_H__ +#define SPH_RADIOGATUN_H__ + +#include +#include "algo/sha/sph_types.h" + +/** + * Output size (in bits) for RadioGatun[32]. + */ +#define SPH_SIZE_radiogatun32 256 + +/** + * This structure is a context for RadioGatun[32] computations: it + * contains intermediate values and some data from the last entered + * block. Once a RadioGatun[32] computation has been performed, the + * context can be reused for another computation. + * + * The contents of this structure are private. A running RadioGatun[32] + * computation can be cloned by copying the context (e.g. with a + * simple memcpy()). + */ +typedef struct { +#ifndef DOXYGEN_IGNORE + unsigned char data[156]; /* first field, for alignment */ + unsigned data_ptr; + sph_u32 a[19], b[39]; +#endif +} sph_radiogatun32_context; + +/** + * Initialize a RadioGatun[32] context. This process performs no + * memory allocation. + * + * @param cc the RadioGatun[32] context (pointer to a + * sph_radiogatun32_context) + */ +void sph_radiogatun32_init(void *cc); + +/** + * Process some data bytes. It is acceptable that len is zero + * (in which case this function does nothing). + * + * @param cc the RadioGatun[32] context + * @param data the input data + * @param len the input data length (in bytes) + */ +void sph_radiogatun32(void *cc, const void *data, size_t len); + +/** + * Terminate the current RadioGatun[32] computation and output the + * result into the provided buffer. The destination buffer must be wide + * enough to accomodate the result (32 bytes). The context is + * automatically reinitialized. + * + * @param cc the RadioGatun[32] context + * @param dst the destination buffer + */ +void sph_radiogatun32_close(void *cc, void *dst); + +#if SPH_64 + +/** + * Output size (in bits) for RadioGatun[64]. + */ +#define SPH_SIZE_radiogatun64 256 + +/** + * This structure is a context for RadioGatun[64] computations: it + * contains intermediate values and some data from the last entered + * block. Once a RadioGatun[64] computation has been performed, the + * context can be reused for another computation. + * + * The contents of this structure are private. A running RadioGatun[64] + * computation can be cloned by copying the context (e.g. with a + * simple memcpy()). + */ +typedef struct { +#ifndef DOXYGEN_IGNORE + unsigned char data[312]; /* first field, for alignment */ + unsigned data_ptr; + sph_u64 a[19], b[39]; +#endif +} sph_radiogatun64_context; + +/** + * Initialize a RadioGatun[64] context. This process performs no + * memory allocation. + * + * @param cc the RadioGatun[64] context (pointer to a + * sph_radiogatun64_context) + */ +void sph_radiogatun64_init(void *cc); + +/** + * Process some data bytes. It is acceptable that len is zero + * (in which case this function does nothing). + * + * @param cc the RadioGatun[64] context + * @param data the input data + * @param len the input data length (in bytes) + */ +void sph_radiogatun64(void *cc, const void *data, size_t len); + +/** + * Terminate the current RadioGatun[64] computation and output the + * result into the provided buffer. The destination buffer must be wide + * enough to accomodate the result (32 bytes). The context is + * automatically reinitialized. + * + * @param cc the RadioGatun[64] context + * @param dst the destination buffer + */ +void sph_radiogatun64_close(void *cc, void *dst); + +#endif + +#endif diff --git a/algo/ripemd/lbry-gate.h b/algo/ripemd/lbry-gate.h index f3a604d..078ee81 100644 --- a/algo/ripemd/lbry-gate.h +++ b/algo/ripemd/lbry-gate.h @@ -4,10 +4,13 @@ #include "algo-gate-api.h" #include +// Overide multi way on ryzen, SHA is better. +#if !defined(RYZEN_) // need sha512 2 way AVX x2 or 1 way scalar x4 to support 4way AVX. #if defined(__AVX2__) #define LBRY_8WAY #endif +#endif #define LBRY_NTIME_INDEX 25 #define LBRY_NBITS_INDEX 26 diff --git a/algo/ripemd/lbry.c b/algo/ripemd/lbry.c index e7a01a2..ef4e242 100644 --- a/algo/ripemd/lbry.c +++ b/algo/ripemd/lbry.c @@ -4,24 +4,17 @@ #include #include #include "sph_ripemd.h" -#include "algo/sha/sph_sha2.h" #include void lbry_hash(void* output, const void* input) { -#ifndef USE_SPH_SHA SHA256_CTX ctx_sha256 __attribute__ ((aligned (64))); SHA512_CTX ctx_sha512 __attribute__ ((aligned (64))); -#else - sph_sha256_context ctx_sha256 __attribute__ ((aligned (64))); - sph_sha512_context ctx_sha512 __attribute__ ((aligned (64))); -#endif sph_ripemd160_context ctx_ripemd __attribute__ ((aligned (64))); uint32_t _ALIGN(64) hashA[16]; uint32_t _ALIGN(64) hashB[16]; uint32_t _ALIGN(64) hashC[16]; -#ifndef USE_SPH_SHA SHA256_Init( &ctx_sha256 ); SHA256_Update( &ctx_sha256, input, 112 ); SHA256_Final( (unsigned char*) hashA, &ctx_sha256 ); @@ -33,19 +26,6 @@ void lbry_hash(void* output, const void* input) SHA512_Init( &ctx_sha512 ); SHA512_Update( &ctx_sha512, hashA, 32 ); SHA512_Final( (unsigned char*) hashA, &ctx_sha512 ); -#else - sph_sha256_init( &ctx_sha256 ); - sph_sha256 ( &ctx_sha256, input, 112 ); - sph_sha256_close( &ctx_sha256, hashA ); - - sph_sha256_init( &ctx_sha256 ); - sph_sha256 ( &ctx_sha256, hashA, 32 ); - sph_sha256_close( &ctx_sha256, hashA ); - - sph_sha512_init( &ctx_sha512 ); - sph_sha512 ( &ctx_sha512, hashA, 32 ); - sph_sha512_close( &ctx_sha512, hashA ); -#endif sph_ripemd160_init( &ctx_ripemd ); sph_ripemd160 ( &ctx_ripemd, hashA, 32 ); @@ -55,7 +35,6 @@ void lbry_hash(void* output, const void* input) sph_ripemd160 ( &ctx_ripemd, hashA+8, 32 ); sph_ripemd160_close( &ctx_ripemd, hashC ); -#ifndef USE_SPH_SHA SHA256_Init( &ctx_sha256 ); SHA256_Update( &ctx_sha256, hashB, 20 ); SHA256_Update( &ctx_sha256, hashC, 20 ); @@ -64,16 +43,7 @@ void lbry_hash(void* output, const void* input) SHA256_Init( &ctx_sha256 ); SHA256_Update( &ctx_sha256, hashA, 32 ); SHA256_Final( (unsigned char*) hashA, &ctx_sha256 ); -#else - sph_sha256_init( &ctx_sha256 ); - sph_sha256 ( &ctx_sha256, hashB, 20 ); - sph_sha256 ( &ctx_sha256, hashC, 20 ); - sph_sha256_close( &ctx_sha256, hashA ); - sph_sha256_init( &ctx_sha256 ); - sph_sha256 ( &ctx_sha256, hashA, 32 ); - sph_sha256_close( &ctx_sha256, hashA ); -#endif memcpy( output, hashA, 32 ); } diff --git a/algo/scryptjane/scrypt-jane-portable-x86.h b/algo/scryptjane/scrypt-jane-portable-x86.h index d8325f0..29aaaae 100644 --- a/algo/scryptjane/scrypt-jane-portable-x86.h +++ b/algo/scryptjane/scrypt-jane-portable-x86.h @@ -296,6 +296,7 @@ get_xgetbv(uint32_t flags) { size_t cpu_detect_mask = (size_t)-1; #endif +#if 0 static size_t detect_cpu(void) { union { uint8_t s[12]; uint32_t i[3]; } vendor_string; @@ -354,6 +355,7 @@ detect_cpu(void) { return cpu_flags; } +#endif #if defined(SCRYPT_TEST_SPEED) static const char * diff --git a/algo/sha/sha256t-4way.c b/algo/sha/sha256t-4way.c index 56519f0..ca381f3 100644 --- a/algo/sha/sha256t-4way.c +++ b/algo/sha/sha256t-4way.c @@ -28,8 +28,8 @@ void sha256t_8way_hash( void* output, const void* input ) } -int scanhash_sha256t_8way( int thr_id, struct work *work, - uint32_t max_nonce, uint64_t *hashes_done ) +int scanhash_sha256t_8way( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t vdata[20*8] __attribute__ ((aligned (64))); uint32_t hash[8*8] __attribute__ ((aligned (32))); @@ -39,9 +39,8 @@ int scanhash_sha256t_8way( int thr_id, struct work *work, const uint32_t Htarg = ptarget[7]; const uint32_t first_nonce = pdata[19]; uint32_t n = first_nonce; - uint32_t *nonces = work->nonces; - int num_found = 0; - uint32_t *noncep = vdata + 152; // 19*8 + __m256i *noncev = (__m256i*)vdata + 19; // aligned + /* int */ thr_id = mythr->id; // thr_id arg is deprecated const uint64_t htmax[] = { 0, 0xF, @@ -56,27 +55,25 @@ int scanhash_sha256t_8way( int thr_id, struct work *work, 0xFFFF0000, 0 }; - for ( int k = 0; k < 20; k++ ) - be32enc( &edata[k], pdata[k] ); + // Need big endian data + casti_m256i( edata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) ); + casti_m256i( edata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) ); + casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); mm256_interleave_8x32( vdata, edata, edata, edata, edata, edata, edata, edata, edata, 640 ); sha256_8way_init( &sha256_ctx8 ); sha256_8way( &sha256_ctx8, vdata, 64 ); - + for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] ) { uint32_t mask = masks[m]; - do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); - be32enc( noncep +4, n+4 ); - be32enc( noncep +5, n+5 ); - be32enc( noncep +6, n+6 ); - be32enc( noncep +7, n+7 ); - pdata[19] = n; + do + { + *noncev = mm256_bswap_32( + _mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n ) ); + + pdata[19] = n; sha256t_8way_hash( hash, vdata ); @@ -91,20 +88,24 @@ int scanhash_sha256t_8way( int thr_id, struct work *work, if ( fulltest( lane_hash, ptarget ) ) { - pdata[19] = n + lane; - nonces[ num_found++ ] = n + lane; - work_set_target_ratio( work, lane_hash ); + pdata[19] = n + lane; + work_set_target_ratio( work, lane_hash ); + if ( submit_work( mythr, work ) ) + applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.", + accepted_share_count + rejected_share_count + 1, + thr_id, lane ); + else + applog( LOG_WARNING, "Failed to submit share." ); } } n += 8; - } while ( (num_found == 0) && (n < max_nonce) - && !work_restart[thr_id].restart ); + } while ( (n < max_nonce-10) && !work_restart[thr_id].restart ); break; } *hashes_done = n - first_nonce + 1; - return num_found; + return 0; } #elif defined(SHA256T_4WAY) @@ -130,8 +131,8 @@ void sha256t_4way_hash( void* output, const void* input ) } -int scanhash_sha256t_4way( int thr_id, struct work *work, - uint32_t max_nonce, uint64_t *hashes_done ) +int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t vdata[20*4] __attribute__ ((aligned (64))); uint32_t hash[8*4] __attribute__ ((aligned (32))); @@ -143,9 +144,8 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, const uint32_t Htarg = ptarget[7]; const uint32_t first_nonce = pdata[19]; uint32_t n = first_nonce; - uint32_t *nonces = work->nonces; - int num_found = 0; - uint32_t *noncep = vdata + 76; // 19*4 + __m128i *noncev = (__m128i*)vdata + 19; // aligned + /* int */ thr_id = mythr->id; // thr_id arg is deprecated const uint64_t htmax[] = { 0, 0xF, @@ -160,8 +160,11 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, 0xFFFF0000, 0 }; - for ( int k = 0; k < 19; k++ ) - be32enc( &edata[k], pdata[k] ); + casti_m128i( edata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) ); + casti_m128i( edata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) ); + casti_m128i( edata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) ); + casti_m128i( edata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) ); + casti_m128i( edata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); mm128_interleave_4x32( vdata, edata, edata, edata, edata, 640 ); sha256_4way_init( &sha256_ctx4 ); @@ -171,11 +174,8 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, { uint32_t mask = masks[m]; do { - be32enc( noncep, n ); - be32enc( noncep +1, n+1 ); - be32enc( noncep +2, n+2 ); - be32enc( noncep +3, n+3 ); - pdata[19] = n; + *noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) ); + pdata[19] = n; sha256t_4way_hash( hash, vdata ); @@ -186,21 +186,25 @@ int scanhash_sha256t_4way( int thr_id, struct work *work, if ( fulltest( lane_hash, ptarget ) ) { - pdata[19] = n + lane; - nonces[ num_found++ ] = n + lane; - work_set_target_ratio( work, lane_hash ); + pdata[19] = n + lane; + work_set_target_ratio( work, lane_hash ); + if ( submit_work( mythr, work ) ) + applog( LOG_NOTICE, "Share %d submitted by thread %d, lane %d.", + accepted_share_count + rejected_share_count + 1, + thr_id, lane ); + else + applog( LOG_WARNING, "Failed to submit share." ); } } n += 4; - } while ( (num_found == 0) && (n < max_nonce) - && !work_restart[thr_id].restart ); + } while ( (n < max_nonce - 4) && !work_restart[thr_id].restart ); break; } *hashes_done = n - first_nonce + 1; - return num_found; + return 0; } #endif diff --git a/algo/sha/sha256t-gate.h b/algo/sha/sha256t-gate.h index 8728414..92a5945 100644 --- a/algo/sha/sha256t-gate.h +++ b/algo/sha/sha256t-gate.h @@ -4,12 +4,15 @@ #include #include "algo-gate-api.h" +// Override multi way on ryzen, SHA is better. +#if !defined(RYZEN_) #if defined(__SSE4_2__) #define SHA256T_4WAY #endif #if defined(__AVX2__) #define SHA256T_8WAY #endif +#endif bool register_blake2s_algo( algo_gate_t* gate ); @@ -17,18 +20,18 @@ bool register_blake2s_algo( algo_gate_t* gate ); void sha256t_8way_hash( void *output, const void *input ); int scanhash_sha256t_8way( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ); + uint64_t *hashes_done, struct thr_info *mythr ); #elif defined (SHA256T_4WAY) void sha256t_4way_hash( void *output, const void *input ); int scanhash_sha256t_4way( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ); + uint64_t *hashes_done, struct thr_info *mythr ); #else void sha256t_hash( void *output, const void *input ); int scanhash_sha256t( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ); + uint64_t *hashes_done, struct thr_info *mythr ); #endif diff --git a/algo/sha/sha256t.c b/algo/sha/sha256t.c index 9899188..549f8bf 100644 --- a/algo/sha/sha256t.c +++ b/algo/sha/sha256t.c @@ -39,7 +39,7 @@ void sha256t_hash( void* output, const void* input ) } int scanhash_sha256t( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done) + uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t *pdata = work->data; uint32_t *ptarget = work->target; @@ -52,6 +52,7 @@ int scanhash_sha256t( int thr_id, struct work *work, uint32_t max_nonce, uint32_t hash64[8] __attribute__((aligned(32))); #endif uint32_t endiandata[32]; + /* int */ thr_id = mythr->id; // thr_id arg is deprecated uint64_t htmax[] = { 0, diff --git a/algo/shabal/shabal-hash-4way.c b/algo/shabal/shabal-hash-4way.c index 90061c3..383e936 100644 --- a/algo/shabal/shabal-hash-4way.c +++ b/algo/shabal/shabal-hash-4way.c @@ -248,22 +248,22 @@ do { \ */ #define SWAP_BC \ do { \ - mm128_swap256_128( B0, C0 ); \ - mm128_swap256_128( B1, C1 ); \ - mm128_swap256_128( B2, C2 ); \ - mm128_swap256_128( B3, C3 ); \ - mm128_swap256_128( B4, C4 ); \ - mm128_swap256_128( B5, C5 ); \ - mm128_swap256_128( B6, C6 ); \ - mm128_swap256_128( B7, C7 ); \ - mm128_swap256_128( B8, C8 ); \ - mm128_swap256_128( B9, C9 ); \ - mm128_swap256_128( BA, CA ); \ - mm128_swap256_128( BB, CB ); \ - mm128_swap256_128( BC, CC ); \ - mm128_swap256_128( BD, CD ); \ - mm128_swap256_128( BE, CE ); \ - mm128_swap256_128( BF, CF ); \ + mm128_swap128_256( B0, C0 ); \ + mm128_swap128_256( B1, C1 ); \ + mm128_swap128_256( B2, C2 ); \ + mm128_swap128_256( B3, C3 ); \ + mm128_swap128_256( B4, C4 ); \ + mm128_swap128_256( B5, C5 ); \ + mm128_swap128_256( B6, C6 ); \ + mm128_swap128_256( B7, C7 ); \ + mm128_swap128_256( B8, C8 ); \ + mm128_swap128_256( B9, C9 ); \ + mm128_swap128_256( BA, CA ); \ + mm128_swap128_256( BB, CB ); \ + mm128_swap128_256( BC, CC ); \ + mm128_swap128_256( BD, CD ); \ + mm128_swap128_256( BE, CE ); \ + mm128_swap128_256( BF, CF ); \ } while (0) #define PERM_ELT(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \ diff --git a/algo/shavite/shavite-hash-2way.c b/algo/shavite/shavite-hash-2way.c new file mode 100644 index 0000000..15149df --- /dev/null +++ b/algo/shavite/shavite-hash-2way.c @@ -0,0 +1,406 @@ +#include "shavite-hash-2way.h" +#include "algo/sha/sph_types.h" + +#include + +#if defined(__AVX2__) + +static const uint32_t IV512[] = +{ + 0x72FCCDD8, 0x79CA4727, 0x128A077B, 0x40D55AEC, + 0xD1901A06, 0x430AE307, 0xB29F5CD1, 0xDF07FBFC, + 0x8E45D73D, 0x681AB538, 0xBDE86578, 0xDD577E47, + 0xE275EADE, 0x502D9FCD, 0xB9357178, 0x022A4B9A +}; + +#define mm256_ror2x256hi_1x32( a, b ) \ + _mm256_blend_epi32( mm256_ror1x32_128( a ), \ + mm256_ror1x32_128( b ), 0x88 ) + +static void +c512_2way( shavite512_2way_context *ctx, const void *msg ) +{ + __m256i p0, p1, p2, p3, x; + __m256i k00, k01, k02, k03, k10, k11, k12, k13; + __m256i *m = (__m256i*)msg; + __m256i *h = (__m256i*)ctx->h; + int r; + + p0 = h[0]; + p1 = h[1]; + p2 = h[2]; + p3 = h[3]; + + // round + k00 = m[0]; + x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ) ); + k01 = m[1]; + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + k02 = m[2]; + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + k03 = m[3]; + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + + p0 = _mm256_xor_si256( p0, x ); + + k10 = m[4]; + x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ) ); + k11 = m[5]; + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + k12 = m[6]; + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + k13 = m[7]; + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + + p2 = _mm256_xor_si256( p2, x ); + + for ( r = 0; r < 3; r ++ ) + { + // round 1, 5, 9 + + k00 = _mm256_xor_si256( k13, mm256_ror1x32_128( + mm256_aesenc_2x128( k00 ) ) ); + + if ( r == 0 ) + k00 = _mm256_xor_si256( k00, _mm256_set_epi32( + ~ctx->count3, ctx->count2, ctx->count1, ctx->count0, + ~ctx->count3, ctx->count2, ctx->count1, ctx->count0 ) ); + + x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ) ); + k01 = _mm256_xor_si256( k00, + mm256_ror1x32_128( mm256_aesenc_2x128( k01 ) ) ); + + if ( r == 1 ) + k01 = _mm256_xor_si256( k01, _mm256_set_epi32( + ~ctx->count0, ctx->count1, ctx->count2, ctx->count3, + ~ctx->count0, ctx->count1, ctx->count2, ctx->count3 ) ); + + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + k02 = _mm256_xor_si256( k01, + mm256_ror1x32_128( mm256_aesenc_2x128( k02 ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + k03 = _mm256_xor_si256( k02, + mm256_ror1x32_128( mm256_aesenc_2x128( k03 ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + + p3 = _mm256_xor_si256( p3, x ); + + k10 = _mm256_xor_si256( k03, + mm256_ror1x32_128( mm256_aesenc_2x128( k10 ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ) ); + k11 = _mm256_xor_si256( k10, + mm256_ror1x32_128( mm256_aesenc_2x128( k11 ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + k12 = _mm256_xor_si256( k11, + mm256_ror1x32_128( mm256_aesenc_2x128( k12 ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + k13 = _mm256_xor_si256( k12, + mm256_ror1x32_128( mm256_aesenc_2x128( k13 ) ) ); + + if ( r == 2 ) + k13 = _mm256_xor_si256( k13, _mm256_set_epi32( + ~ctx->count1, ctx->count0, ctx->count3, ctx->count2, + ~ctx->count1, ctx->count0, ctx->count3, ctx->count2 ) ); + + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + p1 = _mm256_xor_si256( p1, x ); + + // round 2, 6, 10 + + k00 = _mm256_xor_si256( k00, mm256_ror2x256hi_1x32( k12, k13 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k00 ) ); + k01 = _mm256_xor_si256( k01, mm256_ror2x256hi_1x32( k13, k00 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + k02 = _mm256_xor_si256( k02, mm256_ror2x256hi_1x32( k00, k01 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + k03 = _mm256_xor_si256( k03, mm256_ror2x256hi_1x32( k01, k02 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + + p2 = _mm256_xor_si256( p2, x ); + + k10 = _mm256_xor_si256( k10, mm256_ror2x256hi_1x32( k02, k03 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k10 ) ); + k11 = _mm256_xor_si256( k11, mm256_ror2x256hi_1x32( k03, k10 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + k12 = _mm256_xor_si256( k12, mm256_ror2x256hi_1x32( k10, k11 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + k13 = _mm256_xor_si256( k13, mm256_ror2x256hi_1x32( k11, k12 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + + p0 = _mm256_xor_si256( p0, x ); + + // round 3, 7, 11 + + k00 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k00 ) ), k13 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k00 ) ); + k01 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k01 ) ), k00 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + k02 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k02 ) ), k01 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + k03 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k03 ) ), k02 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + + p1 = _mm256_xor_si256( p1, x ); + + k10 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k10 ) ), k03 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k10 ) ); + k11 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k11 ) ), k10 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + k12 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k12 ) ), k11 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + k13 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k13 ) ), k12 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + + p3 = _mm256_xor_si256( p3, x ); + + // round 4, 8, 12 + + k00 = _mm256_xor_si256( k00, mm256_ror2x256hi_1x32( k12, k13 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ) ); + k01 = _mm256_xor_si256( k01, mm256_ror2x256hi_1x32( k13, k00 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + k02 = _mm256_xor_si256( k02, mm256_ror2x256hi_1x32( k00, k01 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + k03 = _mm256_xor_si256( k03, mm256_ror2x256hi_1x32( k01, k02 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + + p0 = _mm256_xor_si256( p0, x ); + + k10 = _mm256_xor_si256( k10, mm256_ror2x256hi_1x32( k02, k03 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ) ); + k11 = _mm256_xor_si256( k11, mm256_ror2x256hi_1x32( k03, k10 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + k12 = _mm256_xor_si256( k12, mm256_ror2x256hi_1x32( k10, k11 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + k13 = _mm256_xor_si256( k13, mm256_ror2x256hi_1x32( k11, k12 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + + p2 = _mm256_xor_si256( p2, x ); + + } + + // round 13 + + k00 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k00 ) ), k13 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ) ); + k01 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k01 ) ), k00 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + k02 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k02 ) ), k01 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + k03 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k03 ) ), k02 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + + p3 = _mm256_xor_si256( p3, x ); + + k10 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k10 ) ), k03 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ) ); + k11 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k11 ) ), k10 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + + k12 = mm256_ror1x32_128( mm256_aesenc_2x128( k12 ) ); + k12 = _mm256_xor_si256( k12, _mm256_xor_si256( k11, _mm256_set_epi32( + ~ctx->count2, ctx->count3, ctx->count0, ctx->count1, + ~ctx->count2, ctx->count3, ctx->count0, ctx->count1 ) ) ); + + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + k13 = _mm256_xor_si256( mm256_ror1x32_128( + mm256_aesenc_2x128( k13 ) ), k12 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + + p1 = _mm256_xor_si256( p1, x ); + + h[0] = _mm256_xor_si256( h[0], p2 ); + h[1] = _mm256_xor_si256( h[1], p3 ); + h[2] = _mm256_xor_si256( h[2], p0 ); + h[3] = _mm256_xor_si256( h[3], p1 ); +} + +void shavite512_2way_init( shavite512_2way_context *ctx ) +{ + casti_m256i( ctx->h, 0 ) = + _mm256_set_epi32( IV512[ 3], IV512[ 2], IV512[ 1], IV512[ 0], + IV512[ 3], IV512[ 2], IV512[ 1], IV512[ 0] ); + casti_m256i( ctx->h, 1 ) = + _mm256_set_epi32( IV512[ 7], IV512[ 6], IV512[ 5], IV512[ 4], + IV512[ 7], IV512[ 6], IV512[ 5], IV512[ 4] ); + casti_m256i( ctx->h, 2 ) = + _mm256_set_epi32( IV512[11], IV512[10], IV512[ 9], IV512[ 8], + IV512[11], IV512[10], IV512[ 9], IV512[ 8] ); + casti_m256i( ctx->h, 3 ) = + _mm256_set_epi32( IV512[15], IV512[14], IV512[13], IV512[12], + IV512[15], IV512[14], IV512[13], IV512[12] ); + ctx->ptr = 0; + ctx->count0 = 0; + ctx->count1 = 0; + ctx->count2 = 0; + ctx->count3 = 0; +} + +void shavite512_2way_update( shavite512_2way_context *ctx, const void *data, + size_t len ) +{ + unsigned char *buf = ctx->buf; + size_t ptr = ctx->ptr; + + while ( len > 0 ) + { + size_t clen; + + clen = (sizeof ctx->buf) - ptr; + if ( clen > len << 1 ) + clen = len << 1; + memcpy( buf + ptr, data, clen ); + data = (const unsigned char *)data + clen; + ptr += clen; + len -= clen >> 1; + if ( ptr == sizeof ctx->buf ) + { + if ( ( ctx->count0 = ctx->count0 + 1024 ) == 0 ) + { + ctx->count1 = ctx->count1 + 1; + if ( ctx->count1 == 0 ) + { + ctx->count2 = ctx->count2 + 1; + if ( ctx->count2 == 0 ) + ctx->count3 = ctx->count3 + 1; + } + } + c512_2way( ctx, buf ); + ptr = 0; + } + } + ctx->ptr = ptr; +} + +void shavite512_2way_close( shavite512_2way_context *ctx, void *dst ) +{ + unsigned char *buf; + union + { + uint32_t u32[4]; + uint16_t u16[8]; + } count; + + buf = ctx->buf; + uint32_t vp = ctx->ptr>>5; + + // Terminating byte then zero pad + casti_m256i( buf, vp++ ) = _mm256_set_epi32( 0,0,0,0x80, 0,0,0,0x80 ); + + // Zero pad full vectors up to count + for ( ; vp < 6; vp++ ) + casti_m256i( buf, vp ) = m256_zero; + + // Count = { 0, 16, 64, 80 }. Outsize = 16 u32 = 512 bits = 0x0200 + // Count is misaligned to 16 bits and straddles a vector. + // Use u32 overlay to stage then u16 to load buf. + count.u32[0] = ctx->count0 += (ctx->ptr << 2); // ptr/2 * 8 + count.u32[1] = ctx->count1; + count.u32[2] = ctx->count2; + count.u32[3] = ctx->count3; + + casti_m256i( buf, 6 ) = _mm256_set_epi16( count.u16[0], 0,0,0,0,0,0,0, + count.u16[0], 0,0,0,0,0,0,0 ); + casti_m256i( buf, 7 ) = _mm256_set_epi16( + 0x0200 , count.u16[7], count.u16[6], count.u16[5], + count.u16[4], count.u16[3], count.u16[2], count.u16[1], + 0x0200 , count.u16[7], count.u16[6], count.u16[5], + count.u16[4], count.u16[3], count.u16[2], count.u16[1] ); + + c512_2way( ctx, buf); + + casti_m256i( dst, 0 ) = casti_m256i( ctx->h, 0 ); + casti_m256i( dst, 1 ) = casti_m256i( ctx->h, 1 ); + casti_m256i( dst, 2 ) = casti_m256i( ctx->h, 2 ); + casti_m256i( dst, 3 ) = casti_m256i( ctx->h, 3 ); +} + +void shavite512_2way_update_close( shavite512_2way_context *ctx, void *dst, + const void *data, size_t len ) +{ + unsigned char *buf = ctx->buf; + size_t ptr = ctx->ptr; + + // process full blocks and load buf with remainder. + while ( len > 0 ) + { + size_t clen; + + clen = (sizeof ctx->buf) - ptr; + if ( clen > len << 1 ) + clen = len << 1; + memcpy( buf + ptr, data, clen ); + data = (const unsigned char *)data + clen; + ptr += clen; + len -= clen >> 1; + if ( ptr == sizeof ctx->buf ) + { + if ( ( ctx->count0 = ctx->count0 + 1024 ) == 0 ) + { + ctx->count1 = ctx->count1 + 1; + if ( ctx->count1 == 0 ) + { + ctx->count2 = ctx->count2 + 1; + if ( ctx->count2 == 0 ) + ctx->count3 = ctx->count3 + 1; + } + } + c512_2way( ctx, buf ); + ptr = 0; + } + } + + uint32_t vp = ptr>>5; + + // Terminating byte then zero pad + casti_m256i( buf, vp++ ) = _mm256_set_epi32( 0,0,0,0x80, 0,0,0,0x80 ); + + // Zero pad full vectors up to count + for ( ; vp < 6; vp++ ) + casti_m256i( buf, vp ) = m256_zero; + + // Count = { 0, 16, 64, 80 }. Outsize = 16 u32 = 512 bits = 0x0200 + // Count is misaligned to 16 bits and straddles a vector. + // Use u32 overlay to stage then u16 to load buf. + union + { + uint32_t u32[4]; + uint16_t u16[8]; + } count; + + count.u32[0] = ctx->count0 += (ptr << 2); // ptr/2 * 8 + count.u32[1] = ctx->count1; + count.u32[2] = ctx->count2; + count.u32[3] = ctx->count3; + + casti_m256i( buf, 6 ) = _mm256_set_epi16( count.u16[0], 0,0,0,0,0,0,0, + count.u16[0], 0,0,0,0,0,0,0 ); + casti_m256i( buf, 7 ) = _mm256_set_epi16( + 0x0200 , count.u16[7], count.u16[6], count.u16[5], + count.u16[4], count.u16[3], count.u16[2], count.u16[1], + 0x0200 , count.u16[7], count.u16[6], count.u16[5], + count.u16[4], count.u16[3], count.u16[2], count.u16[1] ); + + c512_2way( ctx, buf); + + casti_m256i( dst, 0 ) = casti_m256i( ctx->h, 0 ); + casti_m256i( dst, 1 ) = casti_m256i( ctx->h, 1 ); + casti_m256i( dst, 2 ) = casti_m256i( ctx->h, 2 ); + casti_m256i( dst, 3 ) = casti_m256i( ctx->h, 3 ); +} + +#endif // AVX2 diff --git a/algo/shavite/shavite-hash-2way.h b/algo/shavite/shavite-hash-2way.h new file mode 100644 index 0000000..28ca009 --- /dev/null +++ b/algo/shavite/shavite-hash-2way.h @@ -0,0 +1,25 @@ +#ifndef SHAVITE_HASH_2WAY_H__ +#define SHAVITE_HASH_2WAY_H__ + +#if defined(__AVX2__) + +#include "avxdefs.h" + +typedef struct { + unsigned char buf[128<<1]; + uint32_t h[16<<1]; + size_t ptr; + uint32_t count0, count1, count2, count3; +} shavite512_2way_context __attribute__ ((aligned (64))); + +void shavite512_2way_init( shavite512_2way_context *ctx ); +void shavite512_2way_update( shavite512_2way_context *ctx, const void *data, + size_t len ); +void shavite512_2way_close( shavite512_2way_context *ctx, void *dst ); +void shavite512_2way_update_close( shavite512_2way_context *ctx, void *dst, + const void *data, size_t len ); + +#endif // AVX2 + +#endif // SHAVITE_HASH_2WAY_H__ + diff --git a/algo/shavite/sph-shavite-aesni.c b/algo/shavite/sph-shavite-aesni.c index eeaf922..6046659 100644 --- a/algo/shavite/sph-shavite-aesni.c +++ b/algo/shavite/sph-shavite-aesni.c @@ -102,35 +102,31 @@ c512( sph_shavite_big_context *sc, const void *msg ) k00 = m[0]; x = _mm_xor_si128( p1, k00 ); x = _mm_aesenc_si128( x, m128_zero ); - k01 = m[1]; x = _mm_xor_si128( x, k01 ); x = _mm_aesenc_si128( x, m128_zero ); - k02 = m[2]; x = _mm_xor_si128( x, k02 ); x = _mm_aesenc_si128( x, m128_zero ); - k03 = m[3]; x = _mm_xor_si128( x, k03 ); x = _mm_aesenc_si128( x, m128_zero ); + p0 = _mm_xor_si128( p0, x ); k10 = m[4]; x = _mm_xor_si128( p3, k10 ); x = _mm_aesenc_si128( x, m128_zero ); - k11 = m[5]; x = _mm_xor_si128( x, k11 ); x = _mm_aesenc_si128( x, m128_zero ); - k12 = m[6]; x = _mm_xor_si128( x, k12 ); x = _mm_aesenc_si128( x, m128_zero ); - k13 = m[7]; x = _mm_xor_si128( x, k13 ); x = _mm_aesenc_si128( x, m128_zero ); + p2 = _mm_xor_si128( p2, x ); for ( r = 0; r < 3; r ++ ) @@ -156,15 +152,15 @@ c512( sph_shavite_big_context *sc, const void *msg ) x = _mm_aesenc_si128( x, m128_zero ); k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) ); k02 = _mm_xor_si128( k02, k01 ); - x = _mm_xor_si128( x, k02 ); x = _mm_aesenc_si128( x, m128_zero ); k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) ); k03 = _mm_xor_si128( k03, k02 ); - x = _mm_xor_si128( x, k03 ); x = _mm_aesenc_si128( x, m128_zero ); + p3 = _mm_xor_si128( p3, x ); + k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) ); k10 = _mm_xor_si128( k10, k03 ); @@ -172,12 +168,10 @@ c512( sph_shavite_big_context *sc, const void *msg ) x = _mm_aesenc_si128( x, m128_zero ); k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) ); k11 = _mm_xor_si128( k11, k10 ); - x = _mm_xor_si128( x, k11 ); x = _mm_aesenc_si128( x, m128_zero ); k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) ); k12 = _mm_xor_si128( k12, k11 ); - x = _mm_xor_si128( x, k12 ); x = _mm_aesenc_si128( x, m128_zero ); k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) ); @@ -196,118 +190,103 @@ c512( sph_shavite_big_context *sc, const void *msg ) k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) ); x = _mm_xor_si128( p3, k00 ); x = _mm_aesenc_si128( x, m128_zero ); - k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) ); x = _mm_xor_si128( x, k01 ); x = _mm_aesenc_si128( x, m128_zero ); - k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) ); x = _mm_xor_si128( x, k02 ); x = _mm_aesenc_si128( x, m128_zero ); - k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) ); x = _mm_xor_si128( x, k03 ); x = _mm_aesenc_si128( x, m128_zero ); p2 = _mm_xor_si128( p2, x ); + k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) ); x = _mm_xor_si128( p1, k10 ); x = _mm_aesenc_si128( x, m128_zero ); - k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) ); x = _mm_xor_si128( x, k11 ); x = _mm_aesenc_si128( x, m128_zero ); - k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) ); x = _mm_xor_si128( x, k12 ); x = _mm_aesenc_si128( x, m128_zero ); - k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) ); x = _mm_xor_si128( x, k13 ); x = _mm_aesenc_si128( x, m128_zero ); + p0 = _mm_xor_si128( p0, x ); // round 3, 7, 11 k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) ); k00 = _mm_xor_si128( k00, k13 ); - x = _mm_xor_si128( p2, k00 ); x = _mm_aesenc_si128( x, m128_zero ); - k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) ); k01 = _mm_xor_si128( k01, k00 ); - x = _mm_xor_si128( x, k01 ); x = _mm_aesenc_si128( x, m128_zero ); k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) ); k02 = _mm_xor_si128( k02, k01 ); - x = _mm_xor_si128( x, k02 ); x = _mm_aesenc_si128( x, m128_zero ); k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) ); k03 = _mm_xor_si128( k03, k02 ); - x = _mm_xor_si128( x, k03 ); x = _mm_aesenc_si128( x, m128_zero ); + p1 = _mm_xor_si128( p1, x ); + k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) ); k10 = _mm_xor_si128( k10, k03 ); - x = _mm_xor_si128( p0, k10 ); x = _mm_aesenc_si128( x, m128_zero ); k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) ); k11 = _mm_xor_si128( k11, k10 ); - x = _mm_xor_si128( x, k11 ); x = _mm_aesenc_si128( x, m128_zero ); k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) ); k12 = _mm_xor_si128( k12, k11 ); - x = _mm_xor_si128( x, k12 ); x = _mm_aesenc_si128( x, m128_zero ); k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) ); k13 = _mm_xor_si128( k13, k12 ); - x = _mm_xor_si128( x, k13 ); x = _mm_aesenc_si128( x, m128_zero ); + p3 = _mm_xor_si128( p3, x ); // round 4, 8, 12 k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) ); - x = _mm_xor_si128( p1, k00 ); x = _mm_aesenc_si128( x, m128_zero ); k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) ); - x = _mm_xor_si128( x, k01 ); x = _mm_aesenc_si128( x, m128_zero ); k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) ); - x = _mm_xor_si128( x, k02 ); x = _mm_aesenc_si128( x, m128_zero ); k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) ); - x = _mm_xor_si128( x, k03 ); x = _mm_aesenc_si128( x, m128_zero ); - p0 = _mm_xor_si128( p0, x ); - k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) ); + p0 = _mm_xor_si128( p0, x ); + + k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) ); x = _mm_xor_si128( p3, k10 ); x = _mm_aesenc_si128( x, m128_zero ); k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) ); - x = _mm_xor_si128( x, k11 ); x = _mm_aesenc_si128( x, m128_zero ); k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) ); - x = _mm_xor_si128( x, k12 ); x = _mm_aesenc_si128( x, m128_zero ); k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) ); - x = _mm_xor_si128( x, k13 ); x = _mm_aesenc_si128( x, m128_zero ); + p2 = _mm_xor_si128( p2, x ); } @@ -315,46 +294,41 @@ c512( sph_shavite_big_context *sc, const void *msg ) k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) ); k00 = _mm_xor_si128( k00, k13 ); - x = _mm_xor_si128( p0, k00 ); x = _mm_aesenc_si128( x, m128_zero ); k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) ); k01 = _mm_xor_si128( k01, k00 ); - x = _mm_xor_si128( x, k01 ); x = _mm_aesenc_si128( x, m128_zero ); k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) ); k02 = _mm_xor_si128( k02, k01 ); - x = _mm_xor_si128( x, k02 ); x = _mm_aesenc_si128( x, m128_zero ); k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) ); k03 = _mm_xor_si128( k03, k02 ); - x = _mm_xor_si128( x, k03 ); x = _mm_aesenc_si128( x, m128_zero ); + p3 = _mm_xor_si128( p3, x ); + k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) ); k10 = _mm_xor_si128( k10, k03 ); - x = _mm_xor_si128( p2, k10 ); x = _mm_aesenc_si128( x, m128_zero ); k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) ); k11 = _mm_xor_si128( k11, k10 ); - x = _mm_xor_si128( x, k11 ); x = _mm_aesenc_si128( x, m128_zero ); k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) ); k12 = _mm_xor_si128( k12, _mm_xor_si128( k11, _mm_set_epi32( ~sc->count2, sc->count3, sc->count0, sc->count1 ) ) ); - x = _mm_xor_si128( x, k12 ); x = _mm_aesenc_si128( x, m128_zero ); k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) ); k13 = _mm_xor_si128( k13, k12 ); - x = _mm_xor_si128( x, k13 ); x = _mm_aesenc_si128( x, m128_zero ); + p1 = _mm_xor_si128( p1, x ); h[0] = _mm_xor_si128( h[0], p2 ); @@ -427,6 +401,9 @@ shavite_big_aesni_close( sph_shavite_big_context *sc, unsigned ub, unsigned n, count1 = sc->count1; count2 = sc->count2; count3 = sc->count3; + + + z = 0x80 >> n; z = ((ub & -z) | z) & 0xFF; if (ptr == 0 && n == 0) { @@ -443,6 +420,7 @@ shavite_big_aesni_close( sph_shavite_big_context *sc, unsigned ub, unsigned n, memset(buf, 0, 110); sc->count0 = sc->count1 = sc->count2 = sc->count3 = 0; } + sph_enc32le(buf + 110, count0); sph_enc32le(buf + 114, count1); sph_enc32le(buf + 118, count2); diff --git a/algo/shavite/sse2/shavite.c b/algo/shavite/sse2/shavite.c deleted file mode 100644 index 85074f3..0000000 --- a/algo/shavite/sse2/shavite.c +++ /dev/null @@ -1,1764 +0,0 @@ -/* $Id: shavite.c 227 2010-06-16 17:28:38Z tp $ */ -/* - * SHAvite-3 implementation. - * - * ==========================(LICENSE BEGIN)============================ - * - * Copyright (c) 2007-2010 Projet RNRT SAPHIR - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * ===========================(LICENSE END)============================= - * - * @author Thomas Pornin - */ - -#include -#include - -#include "sph_shavite.h" - -#ifdef __cplusplus -extern "C"{ -#endif - -#if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_SHAVITE -#define SPH_SMALL_FOOTPRINT_SHAVITE 1 -#endif - -#ifdef _MSC_VER -#pragma warning (disable: 4146) -#endif - -#define C32 SPH_C32 - -/* - * As of round 2 of the SHA-3 competition, the published reference - * implementation and test vectors are wrong, because they use - * big-endian AES tables while the internal decoding uses little-endian. - * The code below follows the specification. To turn it into a code - * which follows the reference implementation (the one called "BugFix" - * on the SHAvite-3 web site, published on Nov 23rd, 2009), comment out - * the code below (from the '#define AES_BIG_ENDIAN...' to the definition - * of the AES_ROUND_NOKEY macro) and replace it with the version which - * is commented out afterwards. - */ - -#define AES_BIG_ENDIAN 0 -#include "aes_helper.c" - -static const sph_u32 IV224[] = { - C32(0x6774F31C), C32(0x990AE210), C32(0xC87D4274), C32(0xC9546371), - C32(0x62B2AEA8), C32(0x4B5801D8), C32(0x1B702860), C32(0x842F3017) -}; - -static const sph_u32 IV256[] = { - C32(0x49BB3E47), C32(0x2674860D), C32(0xA8B392AC), C32(0x021AC4E6), - C32(0x409283CF), C32(0x620E5D86), C32(0x6D929DCB), C32(0x96CC2A8B) -}; - -static const sph_u32 IV384[] = { - C32(0x83DF1545), C32(0xF9AAEC13), C32(0xF4803CB0), C32(0x11FE1F47), - C32(0xDA6CD269), C32(0x4F53FCD7), C32(0x950529A2), C32(0x97908147), - C32(0xB0A4D7AF), C32(0x2B9132BF), C32(0x226E607D), C32(0x3C0F8D7C), - C32(0x487B3F0F), C32(0x04363E22), C32(0x0155C99C), C32(0xEC2E20D3) -}; - -static const sph_u32 IV512[] = { - C32(0x72FCCDD8), C32(0x79CA4727), C32(0x128A077B), C32(0x40D55AEC), - C32(0xD1901A06), C32(0x430AE307), C32(0xB29F5CD1), C32(0xDF07FBFC), - C32(0x8E45D73D), C32(0x681AB538), C32(0xBDE86578), C32(0xDD577E47), - C32(0xE275EADE), C32(0x502D9FCD), C32(0xB9357178), C32(0x022A4B9A) -}; - -#define AES_ROUND_NOKEY(x0, x1, x2, x3) do { \ - sph_u32 t0 = (x0); \ - sph_u32 t1 = (x1); \ - sph_u32 t2 = (x2); \ - sph_u32 t3 = (x3); \ - AES_ROUND_NOKEY_LE(t0, t1, t2, t3, x0, x1, x2, x3); \ - } while (0) - -/* - * This is the code needed to match the "reference implementation" as - * published on Nov 23rd, 2009, instead of the published specification. - * - -#define AES_BIG_ENDIAN 1 -#include "aes_helper.c" - -static const sph_u32 IV224[] = { - C32(0xC4C67795), C32(0xC0B1817F), C32(0xEAD88924), C32(0x1ABB1BB0), - C32(0xE0C29152), C32(0xBDE046BA), C32(0xAEEECF99), C32(0x58D509D8) -}; - -static const sph_u32 IV256[] = { - C32(0x3EECF551), C32(0xBF10819B), C32(0xE6DC8559), C32(0xF3E23FD5), - C32(0x431AEC73), C32(0x79E3F731), C32(0x98325F05), C32(0xA92A31F1) -}; - -static const sph_u32 IV384[] = { - C32(0x71F48510), C32(0xA903A8AC), C32(0xFE3216DD), C32(0x0B2D2AD4), - C32(0x6672900A), C32(0x41032819), C32(0x15A7D780), C32(0xB3CAB8D9), - C32(0x34EF4711), C32(0xDE019FE8), C32(0x4D674DC4), C32(0xE056D96B), - C32(0xA35C016B), C32(0xDD903BA7), C32(0x8C1B09B4), C32(0x2C3E9F25) -}; - -static const sph_u32 IV512[] = { - C32(0xD5652B63), C32(0x25F1E6EA), C32(0xB18F48FA), C32(0xA1EE3A47), - C32(0xC8B67B07), C32(0xBDCE48D3), C32(0xE3937B78), C32(0x05DB5186), - C32(0x613BE326), C32(0xA11FA303), C32(0x90C833D4), C32(0x79CEE316), - C32(0x1E1AF00F), C32(0x2829B165), C32(0x23B25F80), C32(0x21E11499) -}; - -#define AES_ROUND_NOKEY(x0, x1, x2, x3) do { \ - sph_u32 t0 = (x0); \ - sph_u32 t1 = (x1); \ - sph_u32 t2 = (x2); \ - sph_u32 t3 = (x3); \ - AES_ROUND_NOKEY_BE(t0, t1, t2, t3, x0, x1, x2, x3); \ - } while (0) - - */ - -#define KEY_EXPAND_ELT(k0, k1, k2, k3) do { \ - sph_u32 kt; \ - AES_ROUND_NOKEY(k1, k2, k3, k0); \ - kt = (k0); \ - (k0) = (k1); \ - (k1) = (k2); \ - (k2) = (k3); \ - (k3) = kt; \ - } while (0) - -#if SPH_SMALL_FOOTPRINT_SHAVITE - -/* - * This function assumes that "msg" is aligned for 32-bit access. - */ -static void -c256(sph_shavite_small_context *sc, const void *msg) -{ - sph_u32 p0, p1, p2, p3, p4, p5, p6, p7; - sph_u32 rk[144]; - size_t u; - int r, s; - -#if SPH_LITTLE_ENDIAN - memcpy(rk, msg, 64); -#else - for (u = 0; u < 16; u += 4) { - rk[u + 0] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 0); - rk[u + 1] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 4); - rk[u + 2] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 8); - rk[u + 3] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 12); - } -#endif - u = 16; - for (r = 0; r < 4; r ++) { - for (s = 0; s < 2; s ++) { - sph_u32 x0, x1, x2, x3; - - x0 = rk[u - 15]; - x1 = rk[u - 14]; - x2 = rk[u - 13]; - x3 = rk[u - 16]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk[u + 0] = x0 ^ rk[u - 4]; - rk[u + 1] = x1 ^ rk[u - 3]; - rk[u + 2] = x2 ^ rk[u - 2]; - rk[u + 3] = x3 ^ rk[u - 1]; - if (u == 16) { - rk[ 16] ^= sc->count0; - rk[ 17] ^= SPH_T32(~sc->count1); - } else if (u == 56) { - rk[ 57] ^= sc->count1; - rk[ 58] ^= SPH_T32(~sc->count0); - } - u += 4; - - x0 = rk[u - 15]; - x1 = rk[u - 14]; - x2 = rk[u - 13]; - x3 = rk[u - 16]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk[u + 0] = x0 ^ rk[u - 4]; - rk[u + 1] = x1 ^ rk[u - 3]; - rk[u + 2] = x2 ^ rk[u - 2]; - rk[u + 3] = x3 ^ rk[u - 1]; - if (u == 84) { - rk[ 86] ^= sc->count1; - rk[ 87] ^= SPH_T32(~sc->count0); - } else if (u == 124) { - rk[124] ^= sc->count0; - rk[127] ^= SPH_T32(~sc->count1); - } - u += 4; - } - for (s = 0; s < 4; s ++) { - rk[u + 0] = rk[u - 16] ^ rk[u - 3]; - rk[u + 1] = rk[u - 15] ^ rk[u - 2]; - rk[u + 2] = rk[u - 14] ^ rk[u - 1]; - rk[u + 3] = rk[u - 13] ^ rk[u - 0]; - u += 4; - } - } - - p0 = sc->h[0x0]; - p1 = sc->h[0x1]; - p2 = sc->h[0x2]; - p3 = sc->h[0x3]; - p4 = sc->h[0x4]; - p5 = sc->h[0x5]; - p6 = sc->h[0x6]; - p7 = sc->h[0x7]; - u = 0; - for (r = 0; r < 6; r ++) { - sph_u32 x0, x1, x2, x3; - - x0 = p4 ^ rk[u ++]; - x1 = p5 ^ rk[u ++]; - x2 = p6 ^ rk[u ++]; - x3 = p7 ^ rk[u ++]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - x0 ^= rk[u ++]; - x1 ^= rk[u ++]; - x2 ^= rk[u ++]; - x3 ^= rk[u ++]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - x0 ^= rk[u ++]; - x1 ^= rk[u ++]; - x2 ^= rk[u ++]; - x3 ^= rk[u ++]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - - x0 = p0 ^ rk[u ++]; - x1 = p1 ^ rk[u ++]; - x2 = p2 ^ rk[u ++]; - x3 = p3 ^ rk[u ++]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - x0 ^= rk[u ++]; - x1 ^= rk[u ++]; - x2 ^= rk[u ++]; - x3 ^= rk[u ++]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - x0 ^= rk[u ++]; - x1 ^= rk[u ++]; - x2 ^= rk[u ++]; - x3 ^= rk[u ++]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - } - sc->h[0x0] ^= p0; - sc->h[0x1] ^= p1; - sc->h[0x2] ^= p2; - sc->h[0x3] ^= p3; - sc->h[0x4] ^= p4; - sc->h[0x5] ^= p5; - sc->h[0x6] ^= p6; - sc->h[0x7] ^= p7; -} - -#else - -/* - * This function assumes that "msg" is aligned for 32-bit access. - */ -static void -c256(sph_shavite_small_context *sc, const void *msg) -{ - sph_u32 p0, p1, p2, p3, p4, p5, p6, p7; - sph_u32 x0, x1, x2, x3; - sph_u32 rk0, rk1, rk2, rk3, rk4, rk5, rk6, rk7; - sph_u32 rk8, rk9, rkA, rkB, rkC, rkD, rkE, rkF; - - p0 = sc->h[0x0]; - p1 = sc->h[0x1]; - p2 = sc->h[0x2]; - p3 = sc->h[0x3]; - p4 = sc->h[0x4]; - p5 = sc->h[0x5]; - p6 = sc->h[0x6]; - p7 = sc->h[0x7]; - /* round 0 */ - rk0 = sph_dec32le_aligned((const unsigned char *)msg + 0); - x0 = p4 ^ rk0; - rk1 = sph_dec32le_aligned((const unsigned char *)msg + 4); - x1 = p5 ^ rk1; - rk2 = sph_dec32le_aligned((const unsigned char *)msg + 8); - x2 = p6 ^ rk2; - rk3 = sph_dec32le_aligned((const unsigned char *)msg + 12); - x3 = p7 ^ rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk4 = sph_dec32le_aligned((const unsigned char *)msg + 16); - x0 ^= rk4; - rk5 = sph_dec32le_aligned((const unsigned char *)msg + 20); - x1 ^= rk5; - rk6 = sph_dec32le_aligned((const unsigned char *)msg + 24); - x2 ^= rk6; - rk7 = sph_dec32le_aligned((const unsigned char *)msg + 28); - x3 ^= rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk8 = sph_dec32le_aligned((const unsigned char *)msg + 32); - x0 ^= rk8; - rk9 = sph_dec32le_aligned((const unsigned char *)msg + 36); - x1 ^= rk9; - rkA = sph_dec32le_aligned((const unsigned char *)msg + 40); - x2 ^= rkA; - rkB = sph_dec32le_aligned((const unsigned char *)msg + 44); - x3 ^= rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - /* round 1 */ - rkC = sph_dec32le_aligned((const unsigned char *)msg + 48); - x0 = p0 ^ rkC; - rkD = sph_dec32le_aligned((const unsigned char *)msg + 52); - x1 = p1 ^ rkD; - rkE = sph_dec32le_aligned((const unsigned char *)msg + 56); - x2 = p2 ^ rkE; - rkF = sph_dec32le_aligned((const unsigned char *)msg + 60); - x3 = p3 ^ rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk0, rk1, rk2, rk3); - rk0 ^= rkC ^ sc->count0; - rk1 ^= rkD ^ SPH_T32(~sc->count1); - rk2 ^= rkE; - rk3 ^= rkF; - x0 ^= rk0; - x1 ^= rk1; - x2 ^= rk2; - x3 ^= rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk4, rk5, rk6, rk7); - rk4 ^= rk0; - rk5 ^= rk1; - rk6 ^= rk2; - rk7 ^= rk3; - x0 ^= rk4; - x1 ^= rk5; - x2 ^= rk6; - x3 ^= rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - /* round 2 */ - KEY_EXPAND_ELT(rk8, rk9, rkA, rkB); - rk8 ^= rk4; - rk9 ^= rk5; - rkA ^= rk6; - rkB ^= rk7; - x0 = p4 ^ rk8; - x1 = p5 ^ rk9; - x2 = p6 ^ rkA; - x3 = p7 ^ rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rkC, rkD, rkE, rkF); - rkC ^= rk8; - rkD ^= rk9; - rkE ^= rkA; - rkF ^= rkB; - x0 ^= rkC; - x1 ^= rkD; - x2 ^= rkE; - x3 ^= rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk0 ^= rkD; - x0 ^= rk0; - rk1 ^= rkE; - x1 ^= rk1; - rk2 ^= rkF; - x2 ^= rk2; - rk3 ^= rk0; - x3 ^= rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - /* round 3 */ - rk4 ^= rk1; - x0 = p0 ^ rk4; - rk5 ^= rk2; - x1 = p1 ^ rk5; - rk6 ^= rk3; - x2 = p2 ^ rk6; - rk7 ^= rk4; - x3 = p3 ^ rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk8 ^= rk5; - x0 ^= rk8; - rk9 ^= rk6; - x1 ^= rk9; - rkA ^= rk7; - x2 ^= rkA; - rkB ^= rk8; - x3 ^= rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rkC ^= rk9; - x0 ^= rkC; - rkD ^= rkA; - x1 ^= rkD; - rkE ^= rkB; - x2 ^= rkE; - rkF ^= rkC; - x3 ^= rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - /* round 4 */ - KEY_EXPAND_ELT(rk0, rk1, rk2, rk3); - rk0 ^= rkC; - rk1 ^= rkD; - rk2 ^= rkE; - rk3 ^= rkF; - x0 = p4 ^ rk0; - x1 = p5 ^ rk1; - x2 = p6 ^ rk2; - x3 = p7 ^ rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk4, rk5, rk6, rk7); - rk4 ^= rk0; - rk5 ^= rk1; - rk6 ^= rk2; - rk7 ^= rk3; - x0 ^= rk4; - x1 ^= rk5; - x2 ^= rk6; - x3 ^= rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk8, rk9, rkA, rkB); - rk8 ^= rk4; - rk9 ^= rk5 ^ sc->count1; - rkA ^= rk6 ^ SPH_T32(~sc->count0); - rkB ^= rk7; - x0 ^= rk8; - x1 ^= rk9; - x2 ^= rkA; - x3 ^= rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - /* round 5 */ - KEY_EXPAND_ELT(rkC, rkD, rkE, rkF); - rkC ^= rk8; - rkD ^= rk9; - rkE ^= rkA; - rkF ^= rkB; - x0 = p0 ^ rkC; - x1 = p1 ^ rkD; - x2 = p2 ^ rkE; - x3 = p3 ^ rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk0 ^= rkD; - x0 ^= rk0; - rk1 ^= rkE; - x1 ^= rk1; - rk2 ^= rkF; - x2 ^= rk2; - rk3 ^= rk0; - x3 ^= rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk4 ^= rk1; - x0 ^= rk4; - rk5 ^= rk2; - x1 ^= rk5; - rk6 ^= rk3; - x2 ^= rk6; - rk7 ^= rk4; - x3 ^= rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - /* round 6 */ - rk8 ^= rk5; - x0 = p4 ^ rk8; - rk9 ^= rk6; - x1 = p5 ^ rk9; - rkA ^= rk7; - x2 = p6 ^ rkA; - rkB ^= rk8; - x3 = p7 ^ rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rkC ^= rk9; - x0 ^= rkC; - rkD ^= rkA; - x1 ^= rkD; - rkE ^= rkB; - x2 ^= rkE; - rkF ^= rkC; - x3 ^= rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk0, rk1, rk2, rk3); - rk0 ^= rkC; - rk1 ^= rkD; - rk2 ^= rkE; - rk3 ^= rkF; - x0 ^= rk0; - x1 ^= rk1; - x2 ^= rk2; - x3 ^= rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - /* round 7 */ - KEY_EXPAND_ELT(rk4, rk5, rk6, rk7); - rk4 ^= rk0; - rk5 ^= rk1; - rk6 ^= rk2 ^ sc->count1; - rk7 ^= rk3 ^ SPH_T32(~sc->count0); - x0 = p0 ^ rk4; - x1 = p1 ^ rk5; - x2 = p2 ^ rk6; - x3 = p3 ^ rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk8, rk9, rkA, rkB); - rk8 ^= rk4; - rk9 ^= rk5; - rkA ^= rk6; - rkB ^= rk7; - x0 ^= rk8; - x1 ^= rk9; - x2 ^= rkA; - x3 ^= rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rkC, rkD, rkE, rkF); - rkC ^= rk8; - rkD ^= rk9; - rkE ^= rkA; - rkF ^= rkB; - x0 ^= rkC; - x1 ^= rkD; - x2 ^= rkE; - x3 ^= rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - /* round 8 */ - rk0 ^= rkD; - x0 = p4 ^ rk0; - rk1 ^= rkE; - x1 = p5 ^ rk1; - rk2 ^= rkF; - x2 = p6 ^ rk2; - rk3 ^= rk0; - x3 = p7 ^ rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk4 ^= rk1; - x0 ^= rk4; - rk5 ^= rk2; - x1 ^= rk5; - rk6 ^= rk3; - x2 ^= rk6; - rk7 ^= rk4; - x3 ^= rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk8 ^= rk5; - x0 ^= rk8; - rk9 ^= rk6; - x1 ^= rk9; - rkA ^= rk7; - x2 ^= rkA; - rkB ^= rk8; - x3 ^= rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - /* round 9 */ - rkC ^= rk9; - x0 = p0 ^ rkC; - rkD ^= rkA; - x1 = p1 ^ rkD; - rkE ^= rkB; - x2 = p2 ^ rkE; - rkF ^= rkC; - x3 = p3 ^ rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk0, rk1, rk2, rk3); - rk0 ^= rkC; - rk1 ^= rkD; - rk2 ^= rkE; - rk3 ^= rkF; - x0 ^= rk0; - x1 ^= rk1; - x2 ^= rk2; - x3 ^= rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk4, rk5, rk6, rk7); - rk4 ^= rk0; - rk5 ^= rk1; - rk6 ^= rk2; - rk7 ^= rk3; - x0 ^= rk4; - x1 ^= rk5; - x2 ^= rk6; - x3 ^= rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - /* round 10 */ - KEY_EXPAND_ELT(rk8, rk9, rkA, rkB); - rk8 ^= rk4; - rk9 ^= rk5; - rkA ^= rk6; - rkB ^= rk7; - x0 = p4 ^ rk8; - x1 = p5 ^ rk9; - x2 = p6 ^ rkA; - x3 = p7 ^ rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rkC, rkD, rkE, rkF); - rkC ^= rk8 ^ sc->count0; - rkD ^= rk9; - rkE ^= rkA; - rkF ^= rkB ^ SPH_T32(~sc->count1); - x0 ^= rkC; - x1 ^= rkD; - x2 ^= rkE; - x3 ^= rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk0 ^= rkD; - x0 ^= rk0; - rk1 ^= rkE; - x1 ^= rk1; - rk2 ^= rkF; - x2 ^= rk2; - rk3 ^= rk0; - x3 ^= rk3; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - /* round 11 */ - rk4 ^= rk1; - x0 = p0 ^ rk4; - rk5 ^= rk2; - x1 = p1 ^ rk5; - rk6 ^= rk3; - x2 = p2 ^ rk6; - rk7 ^= rk4; - x3 = p3 ^ rk7; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk8 ^= rk5; - x0 ^= rk8; - rk9 ^= rk6; - x1 ^= rk9; - rkA ^= rk7; - x2 ^= rkA; - rkB ^= rk8; - x3 ^= rkB; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rkC ^= rk9; - x0 ^= rkC; - rkD ^= rkA; - x1 ^= rkD; - rkE ^= rkB; - x2 ^= rkE; - rkF ^= rkC; - x3 ^= rkF; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - sc->h[0x0] ^= p0; - sc->h[0x1] ^= p1; - sc->h[0x2] ^= p2; - sc->h[0x3] ^= p3; - sc->h[0x4] ^= p4; - sc->h[0x5] ^= p5; - sc->h[0x6] ^= p6; - sc->h[0x7] ^= p7; -} - -#endif - -#if SPH_SMALL_FOOTPRINT_SHAVITE - -/* - * This function assumes that "msg" is aligned for 32-bit access. - */ -static void -c512(sph_shavite_big_context *sc, const void *msg) -{ - sph_u32 p0, p1, p2, p3, p4, p5, p6, p7; - sph_u32 p8, p9, pA, pB, pC, pD, pE, pF; - sph_u32 rk[448]; - size_t u; - int r, s; - -#if SPH_LITTLE_ENDIAN - memcpy(rk, msg, 128); -#else - for (u = 0; u < 32; u += 4) { - rk[u + 0] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 0); - rk[u + 1] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 4); - rk[u + 2] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 8); - rk[u + 3] = sph_dec32le_aligned( - (const unsigned char *)msg + (u << 2) + 12); - } -#endif - u = 32; - for (;;) { - for (s = 0; s < 4; s ++) { - sph_u32 x0, x1, x2, x3; - - x0 = rk[u - 31]; - x1 = rk[u - 30]; - x2 = rk[u - 29]; - x3 = rk[u - 32]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk[u + 0] = x0 ^ rk[u - 4]; - rk[u + 1] = x1 ^ rk[u - 3]; - rk[u + 2] = x2 ^ rk[u - 2]; - rk[u + 3] = x3 ^ rk[u - 1]; - if (u == 32) { - rk[ 32] ^= sc->count0; - rk[ 33] ^= sc->count1; - rk[ 34] ^= sc->count2; - rk[ 35] ^= SPH_T32(~sc->count3); - } else if (u == 440) { - rk[440] ^= sc->count1; - rk[441] ^= sc->count0; - rk[442] ^= sc->count3; - rk[443] ^= SPH_T32(~sc->count2); - } - u += 4; - - x0 = rk[u - 31]; - x1 = rk[u - 30]; - x2 = rk[u - 29]; - x3 = rk[u - 32]; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk[u + 0] = x0 ^ rk[u - 4]; - rk[u + 1] = x1 ^ rk[u - 3]; - rk[u + 2] = x2 ^ rk[u - 2]; - rk[u + 3] = x3 ^ rk[u - 1]; - if (u == 164) { - rk[164] ^= sc->count3; - rk[165] ^= sc->count2; - rk[166] ^= sc->count1; - rk[167] ^= SPH_T32(~sc->count0); - } else if (u == 316) { - rk[316] ^= sc->count2; - rk[317] ^= sc->count3; - rk[318] ^= sc->count0; - rk[319] ^= SPH_T32(~sc->count1); - } - u += 4; - } - if (u == 448) - break; - for (s = 0; s < 8; s ++) { - rk[u + 0] = rk[u - 32] ^ rk[u - 7]; - rk[u + 1] = rk[u - 31] ^ rk[u - 6]; - rk[u + 2] = rk[u - 30] ^ rk[u - 5]; - rk[u + 3] = rk[u - 29] ^ rk[u - 4]; - u += 4; - } - } - - p0 = sc->h[0x0]; - p1 = sc->h[0x1]; - p2 = sc->h[0x2]; - p3 = sc->h[0x3]; - p4 = sc->h[0x4]; - p5 = sc->h[0x5]; - p6 = sc->h[0x6]; - p7 = sc->h[0x7]; - p8 = sc->h[0x8]; - p9 = sc->h[0x9]; - pA = sc->h[0xA]; - pB = sc->h[0xB]; - pC = sc->h[0xC]; - pD = sc->h[0xD]; - pE = sc->h[0xE]; - pF = sc->h[0xF]; - u = 0; - for (r = 0; r < 14; r ++) { -#define C512_ELT(l0, l1, l2, l3, r0, r1, r2, r3) do { \ - sph_u32 x0, x1, x2, x3; \ - x0 = r0 ^ rk[u ++]; \ - x1 = r1 ^ rk[u ++]; \ - x2 = r2 ^ rk[u ++]; \ - x3 = r3 ^ rk[u ++]; \ - AES_ROUND_NOKEY(x0, x1, x2, x3); \ - x0 ^= rk[u ++]; \ - x1 ^= rk[u ++]; \ - x2 ^= rk[u ++]; \ - x3 ^= rk[u ++]; \ - AES_ROUND_NOKEY(x0, x1, x2, x3); \ - x0 ^= rk[u ++]; \ - x1 ^= rk[u ++]; \ - x2 ^= rk[u ++]; \ - x3 ^= rk[u ++]; \ - AES_ROUND_NOKEY(x0, x1, x2, x3); \ - x0 ^= rk[u ++]; \ - x1 ^= rk[u ++]; \ - x2 ^= rk[u ++]; \ - x3 ^= rk[u ++]; \ - AES_ROUND_NOKEY(x0, x1, x2, x3); \ - l0 ^= x0; \ - l1 ^= x1; \ - l2 ^= x2; \ - l3 ^= x3; \ - } while (0) - -#define WROT(a, b, c, d) do { \ - sph_u32 t = d; \ - d = c; \ - c = b; \ - b = a; \ - a = t; \ - } while (0) - - C512_ELT(p0, p1, p2, p3, p4, p5, p6, p7); - C512_ELT(p8, p9, pA, pB, pC, pD, pE, pF); - - WROT(p0, p4, p8, pC); - WROT(p1, p5, p9, pD); - WROT(p2, p6, pA, pE); - WROT(p3, p7, pB, pF); - -#undef C512_ELT -#undef WROT - } - sc->h[0x0] ^= p0; - sc->h[0x1] ^= p1; - sc->h[0x2] ^= p2; - sc->h[0x3] ^= p3; - sc->h[0x4] ^= p4; - sc->h[0x5] ^= p5; - sc->h[0x6] ^= p6; - sc->h[0x7] ^= p7; - sc->h[0x8] ^= p8; - sc->h[0x9] ^= p9; - sc->h[0xA] ^= pA; - sc->h[0xB] ^= pB; - sc->h[0xC] ^= pC; - sc->h[0xD] ^= pD; - sc->h[0xE] ^= pE; - sc->h[0xF] ^= pF; -} - -#else - -/* - * This function assumes that "msg" is aligned for 32-bit access. - */ -static void -c512(sph_shavite_big_context *sc, const void *msg) -{ - sph_u32 p0, p1, p2, p3, p4, p5, p6, p7; - sph_u32 p8, p9, pA, pB, pC, pD, pE, pF; - sph_u32 x0, x1, x2, x3; - sph_u32 rk00, rk01, rk02, rk03, rk04, rk05, rk06, rk07; - sph_u32 rk08, rk09, rk0A, rk0B, rk0C, rk0D, rk0E, rk0F; - sph_u32 rk10, rk11, rk12, rk13, rk14, rk15, rk16, rk17; - sph_u32 rk18, rk19, rk1A, rk1B, rk1C, rk1D, rk1E, rk1F; - int r; - - p0 = sc->h[0x0]; - p1 = sc->h[0x1]; - p2 = sc->h[0x2]; - p3 = sc->h[0x3]; - p4 = sc->h[0x4]; - p5 = sc->h[0x5]; - p6 = sc->h[0x6]; - p7 = sc->h[0x7]; - p8 = sc->h[0x8]; - p9 = sc->h[0x9]; - pA = sc->h[0xA]; - pB = sc->h[0xB]; - pC = sc->h[0xC]; - pD = sc->h[0xD]; - pE = sc->h[0xE]; - pF = sc->h[0xF]; - /* round 0 */ - rk00 = sph_dec32le_aligned((const unsigned char *)msg + 0); - x0 = p4 ^ rk00; - rk01 = sph_dec32le_aligned((const unsigned char *)msg + 4); - x1 = p5 ^ rk01; - rk02 = sph_dec32le_aligned((const unsigned char *)msg + 8); - x2 = p6 ^ rk02; - rk03 = sph_dec32le_aligned((const unsigned char *)msg + 12); - x3 = p7 ^ rk03; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk04 = sph_dec32le_aligned((const unsigned char *)msg + 16); - x0 ^= rk04; - rk05 = sph_dec32le_aligned((const unsigned char *)msg + 20); - x1 ^= rk05; - rk06 = sph_dec32le_aligned((const unsigned char *)msg + 24); - x2 ^= rk06; - rk07 = sph_dec32le_aligned((const unsigned char *)msg + 28); - x3 ^= rk07; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk08 = sph_dec32le_aligned((const unsigned char *)msg + 32); - x0 ^= rk08; - rk09 = sph_dec32le_aligned((const unsigned char *)msg + 36); - x1 ^= rk09; - rk0A = sph_dec32le_aligned((const unsigned char *)msg + 40); - x2 ^= rk0A; - rk0B = sph_dec32le_aligned((const unsigned char *)msg + 44); - x3 ^= rk0B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk0C = sph_dec32le_aligned((const unsigned char *)msg + 48); - x0 ^= rk0C; - rk0D = sph_dec32le_aligned((const unsigned char *)msg + 52); - x1 ^= rk0D; - rk0E = sph_dec32le_aligned((const unsigned char *)msg + 56); - x2 ^= rk0E; - rk0F = sph_dec32le_aligned((const unsigned char *)msg + 60); - x3 ^= rk0F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - rk10 = sph_dec32le_aligned((const unsigned char *)msg + 64); - x0 = pC ^ rk10; - rk11 = sph_dec32le_aligned((const unsigned char *)msg + 68); - x1 = pD ^ rk11; - rk12 = sph_dec32le_aligned((const unsigned char *)msg + 72); - x2 = pE ^ rk12; - rk13 = sph_dec32le_aligned((const unsigned char *)msg + 76); - x3 = pF ^ rk13; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk14 = sph_dec32le_aligned((const unsigned char *)msg + 80); - x0 ^= rk14; - rk15 = sph_dec32le_aligned((const unsigned char *)msg + 84); - x1 ^= rk15; - rk16 = sph_dec32le_aligned((const unsigned char *)msg + 88); - x2 ^= rk16; - rk17 = sph_dec32le_aligned((const unsigned char *)msg + 92); - x3 ^= rk17; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk18 = sph_dec32le_aligned((const unsigned char *)msg + 96); - x0 ^= rk18; - rk19 = sph_dec32le_aligned((const unsigned char *)msg + 100); - x1 ^= rk19; - rk1A = sph_dec32le_aligned((const unsigned char *)msg + 104); - x2 ^= rk1A; - rk1B = sph_dec32le_aligned((const unsigned char *)msg + 108); - x3 ^= rk1B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk1C = sph_dec32le_aligned((const unsigned char *)msg + 112); - x0 ^= rk1C; - rk1D = sph_dec32le_aligned((const unsigned char *)msg + 116); - x1 ^= rk1D; - rk1E = sph_dec32le_aligned((const unsigned char *)msg + 120); - x2 ^= rk1E; - rk1F = sph_dec32le_aligned((const unsigned char *)msg + 124); - x3 ^= rk1F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p8 ^= x0; - p9 ^= x1; - pA ^= x2; - pB ^= x3; - - for (r = 0; r < 3; r ++) { - /* round 1, 5, 9 */ - KEY_EXPAND_ELT(rk00, rk01, rk02, rk03); - rk00 ^= rk1C; - rk01 ^= rk1D; - rk02 ^= rk1E; - rk03 ^= rk1F; - if (r == 0) { - rk00 ^= sc->count0; - rk01 ^= sc->count1; - rk02 ^= sc->count2; - rk03 ^= SPH_T32(~sc->count3); - } - x0 = p0 ^ rk00; - x1 = p1 ^ rk01; - x2 = p2 ^ rk02; - x3 = p3 ^ rk03; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk04, rk05, rk06, rk07); - rk04 ^= rk00; - rk05 ^= rk01; - rk06 ^= rk02; - rk07 ^= rk03; - if (r == 1) { - rk04 ^= sc->count3; - rk05 ^= sc->count2; - rk06 ^= sc->count1; - rk07 ^= SPH_T32(~sc->count0); - } - x0 ^= rk04; - x1 ^= rk05; - x2 ^= rk06; - x3 ^= rk07; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk08, rk09, rk0A, rk0B); - rk08 ^= rk04; - rk09 ^= rk05; - rk0A ^= rk06; - rk0B ^= rk07; - x0 ^= rk08; - x1 ^= rk09; - x2 ^= rk0A; - x3 ^= rk0B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk0C, rk0D, rk0E, rk0F); - rk0C ^= rk08; - rk0D ^= rk09; - rk0E ^= rk0A; - rk0F ^= rk0B; - x0 ^= rk0C; - x1 ^= rk0D; - x2 ^= rk0E; - x3 ^= rk0F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - pC ^= x0; - pD ^= x1; - pE ^= x2; - pF ^= x3; - KEY_EXPAND_ELT(rk10, rk11, rk12, rk13); - rk10 ^= rk0C; - rk11 ^= rk0D; - rk12 ^= rk0E; - rk13 ^= rk0F; - x0 = p8 ^ rk10; - x1 = p9 ^ rk11; - x2 = pA ^ rk12; - x3 = pB ^ rk13; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk14, rk15, rk16, rk17); - rk14 ^= rk10; - rk15 ^= rk11; - rk16 ^= rk12; - rk17 ^= rk13; - x0 ^= rk14; - x1 ^= rk15; - x2 ^= rk16; - x3 ^= rk17; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk18, rk19, rk1A, rk1B); - rk18 ^= rk14; - rk19 ^= rk15; - rk1A ^= rk16; - rk1B ^= rk17; - x0 ^= rk18; - x1 ^= rk19; - x2 ^= rk1A; - x3 ^= rk1B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk1C, rk1D, rk1E, rk1F); - rk1C ^= rk18; - rk1D ^= rk19; - rk1E ^= rk1A; - rk1F ^= rk1B; - if (r == 2) { - rk1C ^= sc->count2; - rk1D ^= sc->count3; - rk1E ^= sc->count0; - rk1F ^= SPH_T32(~sc->count1); - } - x0 ^= rk1C; - x1 ^= rk1D; - x2 ^= rk1E; - x3 ^= rk1F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - /* round 2, 6, 10 */ - rk00 ^= rk19; - x0 = pC ^ rk00; - rk01 ^= rk1A; - x1 = pD ^ rk01; - rk02 ^= rk1B; - x2 = pE ^ rk02; - rk03 ^= rk1C; - x3 = pF ^ rk03; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk04 ^= rk1D; - x0 ^= rk04; - rk05 ^= rk1E; - x1 ^= rk05; - rk06 ^= rk1F; - x2 ^= rk06; - rk07 ^= rk00; - x3 ^= rk07; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk08 ^= rk01; - x0 ^= rk08; - rk09 ^= rk02; - x1 ^= rk09; - rk0A ^= rk03; - x2 ^= rk0A; - rk0B ^= rk04; - x3 ^= rk0B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk0C ^= rk05; - x0 ^= rk0C; - rk0D ^= rk06; - x1 ^= rk0D; - rk0E ^= rk07; - x2 ^= rk0E; - rk0F ^= rk08; - x3 ^= rk0F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p8 ^= x0; - p9 ^= x1; - pA ^= x2; - pB ^= x3; - rk10 ^= rk09; - x0 = p4 ^ rk10; - rk11 ^= rk0A; - x1 = p5 ^ rk11; - rk12 ^= rk0B; - x2 = p6 ^ rk12; - rk13 ^= rk0C; - x3 = p7 ^ rk13; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk14 ^= rk0D; - x0 ^= rk14; - rk15 ^= rk0E; - x1 ^= rk15; - rk16 ^= rk0F; - x2 ^= rk16; - rk17 ^= rk10; - x3 ^= rk17; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk18 ^= rk11; - x0 ^= rk18; - rk19 ^= rk12; - x1 ^= rk19; - rk1A ^= rk13; - x2 ^= rk1A; - rk1B ^= rk14; - x3 ^= rk1B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk1C ^= rk15; - x0 ^= rk1C; - rk1D ^= rk16; - x1 ^= rk1D; - rk1E ^= rk17; - x2 ^= rk1E; - rk1F ^= rk18; - x3 ^= rk1F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - /* round 3, 7, 11 */ - KEY_EXPAND_ELT(rk00, rk01, rk02, rk03); - rk00 ^= rk1C; - rk01 ^= rk1D; - rk02 ^= rk1E; - rk03 ^= rk1F; - x0 = p8 ^ rk00; - x1 = p9 ^ rk01; - x2 = pA ^ rk02; - x3 = pB ^ rk03; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk04, rk05, rk06, rk07); - rk04 ^= rk00; - rk05 ^= rk01; - rk06 ^= rk02; - rk07 ^= rk03; - x0 ^= rk04; - x1 ^= rk05; - x2 ^= rk06; - x3 ^= rk07; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk08, rk09, rk0A, rk0B); - rk08 ^= rk04; - rk09 ^= rk05; - rk0A ^= rk06; - rk0B ^= rk07; - x0 ^= rk08; - x1 ^= rk09; - x2 ^= rk0A; - x3 ^= rk0B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk0C, rk0D, rk0E, rk0F); - rk0C ^= rk08; - rk0D ^= rk09; - rk0E ^= rk0A; - rk0F ^= rk0B; - x0 ^= rk0C; - x1 ^= rk0D; - x2 ^= rk0E; - x3 ^= rk0F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - KEY_EXPAND_ELT(rk10, rk11, rk12, rk13); - rk10 ^= rk0C; - rk11 ^= rk0D; - rk12 ^= rk0E; - rk13 ^= rk0F; - x0 = p0 ^ rk10; - x1 = p1 ^ rk11; - x2 = p2 ^ rk12; - x3 = p3 ^ rk13; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk14, rk15, rk16, rk17); - rk14 ^= rk10; - rk15 ^= rk11; - rk16 ^= rk12; - rk17 ^= rk13; - x0 ^= rk14; - x1 ^= rk15; - x2 ^= rk16; - x3 ^= rk17; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk18, rk19, rk1A, rk1B); - rk18 ^= rk14; - rk19 ^= rk15; - rk1A ^= rk16; - rk1B ^= rk17; - x0 ^= rk18; - x1 ^= rk19; - x2 ^= rk1A; - x3 ^= rk1B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk1C, rk1D, rk1E, rk1F); - rk1C ^= rk18; - rk1D ^= rk19; - rk1E ^= rk1A; - rk1F ^= rk1B; - x0 ^= rk1C; - x1 ^= rk1D; - x2 ^= rk1E; - x3 ^= rk1F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - pC ^= x0; - pD ^= x1; - pE ^= x2; - pF ^= x3; - /* round 4, 8, 12 */ - rk00 ^= rk19; - x0 = p4 ^ rk00; - rk01 ^= rk1A; - x1 = p5 ^ rk01; - rk02 ^= rk1B; - x2 = p6 ^ rk02; - rk03 ^= rk1C; - x3 = p7 ^ rk03; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk04 ^= rk1D; - x0 ^= rk04; - rk05 ^= rk1E; - x1 ^= rk05; - rk06 ^= rk1F; - x2 ^= rk06; - rk07 ^= rk00; - x3 ^= rk07; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk08 ^= rk01; - x0 ^= rk08; - rk09 ^= rk02; - x1 ^= rk09; - rk0A ^= rk03; - x2 ^= rk0A; - rk0B ^= rk04; - x3 ^= rk0B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk0C ^= rk05; - x0 ^= rk0C; - rk0D ^= rk06; - x1 ^= rk0D; - rk0E ^= rk07; - x2 ^= rk0E; - rk0F ^= rk08; - x3 ^= rk0F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p0 ^= x0; - p1 ^= x1; - p2 ^= x2; - p3 ^= x3; - rk10 ^= rk09; - x0 = pC ^ rk10; - rk11 ^= rk0A; - x1 = pD ^ rk11; - rk12 ^= rk0B; - x2 = pE ^ rk12; - rk13 ^= rk0C; - x3 = pF ^ rk13; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk14 ^= rk0D; - x0 ^= rk14; - rk15 ^= rk0E; - x1 ^= rk15; - rk16 ^= rk0F; - x2 ^= rk16; - rk17 ^= rk10; - x3 ^= rk17; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk18 ^= rk11; - x0 ^= rk18; - rk19 ^= rk12; - x1 ^= rk19; - rk1A ^= rk13; - x2 ^= rk1A; - rk1B ^= rk14; - x3 ^= rk1B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - rk1C ^= rk15; - x0 ^= rk1C; - rk1D ^= rk16; - x1 ^= rk1D; - rk1E ^= rk17; - x2 ^= rk1E; - rk1F ^= rk18; - x3 ^= rk1F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p8 ^= x0; - p9 ^= x1; - pA ^= x2; - pB ^= x3; - } - /* round 13 */ - KEY_EXPAND_ELT(rk00, rk01, rk02, rk03); - rk00 ^= rk1C; - rk01 ^= rk1D; - rk02 ^= rk1E; - rk03 ^= rk1F; - x0 = p0 ^ rk00; - x1 = p1 ^ rk01; - x2 = p2 ^ rk02; - x3 = p3 ^ rk03; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk04, rk05, rk06, rk07); - rk04 ^= rk00; - rk05 ^= rk01; - rk06 ^= rk02; - rk07 ^= rk03; - x0 ^= rk04; - x1 ^= rk05; - x2 ^= rk06; - x3 ^= rk07; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk08, rk09, rk0A, rk0B); - rk08 ^= rk04; - rk09 ^= rk05; - rk0A ^= rk06; - rk0B ^= rk07; - x0 ^= rk08; - x1 ^= rk09; - x2 ^= rk0A; - x3 ^= rk0B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk0C, rk0D, rk0E, rk0F); - rk0C ^= rk08; - rk0D ^= rk09; - rk0E ^= rk0A; - rk0F ^= rk0B; - x0 ^= rk0C; - x1 ^= rk0D; - x2 ^= rk0E; - x3 ^= rk0F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - pC ^= x0; - pD ^= x1; - pE ^= x2; - pF ^= x3; - KEY_EXPAND_ELT(rk10, rk11, rk12, rk13); - rk10 ^= rk0C; - rk11 ^= rk0D; - rk12 ^= rk0E; - rk13 ^= rk0F; - x0 = p8 ^ rk10; - x1 = p9 ^ rk11; - x2 = pA ^ rk12; - x3 = pB ^ rk13; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk14, rk15, rk16, rk17); - rk14 ^= rk10; - rk15 ^= rk11; - rk16 ^= rk12; - rk17 ^= rk13; - x0 ^= rk14; - x1 ^= rk15; - x2 ^= rk16; - x3 ^= rk17; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk18, rk19, rk1A, rk1B); - rk18 ^= rk14 ^ sc->count1; - rk19 ^= rk15 ^ sc->count0; - rk1A ^= rk16 ^ sc->count3; - rk1B ^= rk17 ^ SPH_T32(~sc->count2); - x0 ^= rk18; - x1 ^= rk19; - x2 ^= rk1A; - x3 ^= rk1B; - AES_ROUND_NOKEY(x0, x1, x2, x3); - KEY_EXPAND_ELT(rk1C, rk1D, rk1E, rk1F); - rk1C ^= rk18; - rk1D ^= rk19; - rk1E ^= rk1A; - rk1F ^= rk1B; - x0 ^= rk1C; - x1 ^= rk1D; - x2 ^= rk1E; - x3 ^= rk1F; - AES_ROUND_NOKEY(x0, x1, x2, x3); - p4 ^= x0; - p5 ^= x1; - p6 ^= x2; - p7 ^= x3; - sc->h[0x0] ^= p8; - sc->h[0x1] ^= p9; - sc->h[0x2] ^= pA; - sc->h[0x3] ^= pB; - sc->h[0x4] ^= pC; - sc->h[0x5] ^= pD; - sc->h[0x6] ^= pE; - sc->h[0x7] ^= pF; - sc->h[0x8] ^= p0; - sc->h[0x9] ^= p1; - sc->h[0xA] ^= p2; - sc->h[0xB] ^= p3; - sc->h[0xC] ^= p4; - sc->h[0xD] ^= p5; - sc->h[0xE] ^= p6; - sc->h[0xF] ^= p7; -} - -#endif - -static void -shavite_small_init(sph_shavite_small_context *sc, const sph_u32 *iv) -{ - memcpy(sc->h, iv, sizeof sc->h); - sc->ptr = 0; - sc->count0 = 0; - sc->count1 = 0; -} - -static void -shavite_small_core(sph_shavite_small_context *sc, const void *data, size_t len) -{ - unsigned char *buf; - size_t ptr; - - buf = sc->buf; - ptr = sc->ptr; - while (len > 0) { - size_t clen; - - clen = (sizeof sc->buf) - ptr; - if (clen > len) - clen = len; - memcpy(buf + ptr, data, clen); - data = (const unsigned char *)data + clen; - ptr += clen; - len -= clen; - if (ptr == sizeof sc->buf) { - if ((sc->count0 = SPH_T32(sc->count0 + 512)) == 0) - sc->count1 = SPH_T32(sc->count1 + 1); - c256(sc, buf); - ptr = 0; - } - } - sc->ptr = ptr; -} - -static void -shavite_small_close(sph_shavite_small_context *sc, - unsigned ub, unsigned n, void *dst, size_t out_size_w32) -{ - unsigned char *buf; - size_t ptr, u; - unsigned z; - sph_u32 count0, count1; - - buf = sc->buf; - ptr = sc->ptr; - count0 = (sc->count0 += (ptr << 3) + n); - count1 = sc->count1; - z = 0x80 >> n; - z = ((ub & -z) | z) & 0xFF; - if (ptr == 0 && n == 0) { - buf[0] = 0x80; - memset(buf + 1, 0, 53); - sc->count0 = sc->count1 = 0; - } else if (ptr < 54) { - buf[ptr ++] = z; - memset(buf + ptr, 0, 54 - ptr); - } else { - buf[ptr ++] = z; - memset(buf + ptr, 0, 64 - ptr); - c256(sc, buf); - memset(buf, 0, 54); - sc->count0 = sc->count1 = 0; - } - sph_enc32le(buf + 54, count0); - sph_enc32le(buf + 58, count1); - buf[62] = out_size_w32 << 5; - buf[63] = out_size_w32 >> 3; - c256(sc, buf); - for (u = 0; u < out_size_w32; u ++) - sph_enc32le((unsigned char *)dst + (u << 2), sc->h[u]); -} - -static void -shavite_big_init(sph_shavite_big_context *sc, const sph_u32 *iv) -{ - memcpy(sc->h, iv, sizeof sc->h); - sc->ptr = 0; - sc->count0 = 0; - sc->count1 = 0; - sc->count2 = 0; - sc->count3 = 0; -} - -static void -shavite_big_core(sph_shavite_big_context *sc, const void *data, size_t len) -{ - unsigned char *buf; - size_t ptr; - - buf = sc->buf; - ptr = sc->ptr; - while (len > 0) { - size_t clen; - - clen = (sizeof sc->buf) - ptr; - if (clen > len) - clen = len; - memcpy(buf + ptr, data, clen); - data = (const unsigned char *)data + clen; - ptr += clen; - len -= clen; - if (ptr == sizeof sc->buf) { - if ((sc->count0 = SPH_T32(sc->count0 + 1024)) == 0) { - sc->count1 = SPH_T32(sc->count1 + 1); - if (sc->count1 == 0) { - sc->count2 = SPH_T32(sc->count2 + 1); - if (sc->count2 == 0) { - sc->count3 = SPH_T32( - sc->count3 + 1); - } - } - } - c512(sc, buf); - ptr = 0; - } - } - sc->ptr = ptr; -} - -static void -shavite_big_close(sph_shavite_big_context *sc, - unsigned ub, unsigned n, void *dst, size_t out_size_w32) -{ - unsigned char *buf; - size_t ptr, u; - unsigned z; - sph_u32 count0, count1, count2, count3; - - buf = sc->buf; - ptr = sc->ptr; - count0 = (sc->count0 += (ptr << 3) + n); - count1 = sc->count1; - count2 = sc->count2; - count3 = sc->count3; - z = 0x80 >> n; - z = ((ub & -z) | z) & 0xFF; - if (ptr == 0 && n == 0) { - buf[0] = 0x80; - memset(buf + 1, 0, 109); - sc->count0 = sc->count1 = sc->count2 = sc->count3 = 0; - } else if (ptr < 110) { - buf[ptr ++] = z; - memset(buf + ptr, 0, 110 - ptr); - } else { - buf[ptr ++] = z; - memset(buf + ptr, 0, 128 - ptr); - c512(sc, buf); - memset(buf, 0, 110); - sc->count0 = sc->count1 = sc->count2 = sc->count3 = 0; - } - sph_enc32le(buf + 110, count0); - sph_enc32le(buf + 114, count1); - sph_enc32le(buf + 118, count2); - sph_enc32le(buf + 122, count3); - buf[126] = out_size_w32 << 5; - buf[127] = out_size_w32 >> 3; - c512(sc, buf); - for (u = 0; u < out_size_w32; u ++) - sph_enc32le((unsigned char *)dst + (u << 2), sc->h[u]); -} - -/* see sph_shavite.h */ -void -sph_shavite224_init(void *cc) -{ - shavite_small_init(cc, IV224); -} - -/* see sph_shavite.h */ -void -sph_shavite224(void *cc, const void *data, size_t len) -{ - shavite_small_core(cc, data, len); -} - -/* see sph_shavite.h */ -void -sph_shavite224_close(void *cc, void *dst) -{ - shavite_small_close(cc, 0, 0, dst, 7); - shavite_small_init(cc, IV224); -} - -/* see sph_shavite.h */ -void -sph_shavite224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst) -{ - shavite_small_close(cc, ub, n, dst, 7); - shavite_small_init(cc, IV224); -} - -/* see sph_shavite.h */ -void -sph_shavite256_init(void *cc) -{ - shavite_small_init(cc, IV256); -} - -/* see sph_shavite.h */ -void -sph_shavite256(void *cc, const void *data, size_t len) -{ - shavite_small_core(cc, data, len); -} - -/* see sph_shavite.h */ -void -sph_shavite256_close(void *cc, void *dst) -{ - shavite_small_close(cc, 0, 0, dst, 8); - shavite_small_init(cc, IV256); -} - -/* see sph_shavite.h */ -void -sph_shavite256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst) -{ - shavite_small_close(cc, ub, n, dst, 8); - shavite_small_init(cc, IV256); -} - -/* see sph_shavite.h */ -void -sph_shavite384_init(void *cc) -{ - shavite_big_init(cc, IV384); -} - -/* see sph_shavite.h */ -void -sph_shavite384(void *cc, const void *data, size_t len) -{ - shavite_big_core(cc, data, len); -} - -/* see sph_shavite.h */ -void -sph_shavite384_close(void *cc, void *dst) -{ - shavite_big_close(cc, 0, 0, dst, 12); - shavite_big_init(cc, IV384); -} - -/* see sph_shavite.h */ -void -sph_shavite384_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst) -{ - shavite_big_close(cc, ub, n, dst, 12); - shavite_big_init(cc, IV384); -} - -/* see sph_shavite.h */ -void -sph_shavite512_init(void *cc) -{ - shavite_big_init(cc, IV512); -} - -/* see sph_shavite.h */ -void -sph_shavite512(void *cc, const void *data, size_t len) -{ - shavite_big_core(cc, data, len); -} - -/* see sph_shavite.h */ -void -sph_shavite512_close(void *cc, void *dst) -{ - shavite_big_close(cc, 0, 0, dst, 16); - shavite_big_init(cc, IV512); -} - -/* see sph_shavite.h */ -void -sph_shavite512_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst) -{ - shavite_big_close(cc, ub, n, dst, 16); - shavite_big_init(cc, IV512); -} - -#ifdef __cplusplus -} -#endif \ No newline at end of file diff --git a/algo/skein/skein-gate.h b/algo/skein/skein-gate.h index c90f153..3fee094 100644 --- a/algo/skein/skein-gate.h +++ b/algo/skein/skein-gate.h @@ -3,9 +3,12 @@ #include #include "algo-gate-api.h" +// Override multi way on ryzen, SHA is better. +#if !defined(RYZEN_) #if defined(__AVX2__) #define SKEIN_4WAY #endif +#endif #if defined(SKEIN_4WAY) diff --git a/algo/skein/skein.c b/algo/skein/skein.c index 60f8614..90d3863 100644 --- a/algo/skein/skein.c +++ b/algo/skein/skein.c @@ -3,31 +3,20 @@ #include #include "sph_skein.h" #include -#include "algo/sha/sph_sha2.h" void skeinhash(void *state, const void *input) { uint32_t hash[16] __attribute__ ((aligned (64))); sph_skein512_context ctx_skein; -#ifndef USE_SPH_SHA SHA256_CTX ctx_sha256; -#else - sph_sha256_context ctx_sha256; -#endif sph_skein512_init( &ctx_skein ); sph_skein512( &ctx_skein, input, 80 ); sph_skein512_close( &ctx_skein, hash ); -#ifndef USE_SPH_SHA SHA256_Init( &ctx_sha256 ); SHA256_Update( &ctx_sha256, (unsigned char*)hash, 64 ); SHA256_Final( (unsigned char*) hash, &ctx_sha256 ); -#else - sph_sha256_init( &ctx_sha256 ); - sph_sha256( &ctx_sha256, hash, 64 ); - sph_sha256_close( &ctx_sha256, hash ); -#endif memcpy(state, hash, 32); } diff --git a/algo/x12/x12-4way.c b/algo/x12/x12-4way.c index c24d61b..1b1731c 100644 --- a/algo/x12/x12-4way.c +++ b/algo/x12/x12-4way.c @@ -114,11 +114,11 @@ void x12_4way_hash( void *state, const void *input ) // 8 Cubehash cubehashUpdateDigest( &ctx.cube, (byte*)hash0, (const byte*) hash0, 64 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 512, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash1, (const byte*) hash1, 64 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 512, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash2, (const byte*) hash2, 64 ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 512, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*) hash3, 64 ); // 9 Shavite diff --git a/algo/x12/x12.c b/algo/x12/x12.c index 0603bba..ce6f2a4 100644 --- a/algo/x12/x12.c +++ b/algo/x12/x12.c @@ -17,8 +17,6 @@ #include "algo/simd/sph_simd.h" #include "algo/echo/sph_echo.h" #include "algo/hamsi/sph_hamsi.h" -//#include "algo/fugue/sph_fugue.h" - #include "algo/luffa/luffa_for_sse2.h" #include "algo/cubehash/cubehash_sse2.h" #include "algo/simd/nist.h" @@ -27,45 +25,42 @@ #include "algo/keccak/sse2/keccak.c" #include "algo/skein/sse2/skein.c" #include "algo/jh/sse2/jh_sse2_opt64.h" - -#ifndef NO_AES_NI +#if defined(__AES__) #include "algo/groestl/aes_ni/hash-groestl.h" #include "algo/echo/aes_ni/hash_api.h" #endif typedef struct { -#ifdef NO_AES_NI - sph_groestl512_context groestl; - sph_echo512_context echo; -#else +#if defined(__AES__) hashState_groestl groestl; hashState_echo echo; +#else + sph_groestl512_context groestl; + sph_echo512_context echo; #endif hashState_luffa luffa; cubehashParam cubehash; sph_shavite512_context shavite; hashState_sd simd; sph_hamsi512_context hamsi; -// sph_fugue512_context fugue; } x12_ctx_holder; x12_ctx_holder x12_ctx; void init_x12_ctx() { -#ifdef NO_AES_NI - sph_groestl512_init(&x12_ctx.groestl); - sph_echo512_init(&x12_ctx.echo); -#else +#if defined(__AES__) init_echo( &x12_ctx.echo, 512 ); init_groestl (&x12_ctx.groestl, 64 ); +#else + sph_groestl512_init(&x12_ctx.groestl); + sph_echo512_init(&x12_ctx.echo); #endif init_luffa( &x12_ctx.luffa, 512 ); cubehashInit( &x12_ctx.cubehash, 512, 16, 32 ); sph_shavite512_init( &x12_ctx.shavite ); init_sd( &x12_ctx.simd, 512 ); sph_hamsi512_init( &x12_ctx.hamsi ); -// sph_fugue512_init( &x13_ctx.fugue ); }; void x12hash(void *output, const void *input) @@ -108,12 +103,12 @@ void x12hash(void *output, const void *input) //---groetl---- -#ifdef NO_AES_NI - sph_groestl512 (&ctx.groestl, hash, 64); - sph_groestl512_close(&ctx.groestl, hash); -#else +#if defined(__AES__) update_and_final_groestl( &ctx.groestl, (char*)hash, (const char*)hash, 512 ); +#else + sph_groestl512 (&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); #endif //---skein4--- @@ -153,23 +148,18 @@ void x12hash(void *output, const void *input) //11---echo--- -#ifdef NO_AES_NI - sph_echo512(&ctx.echo, hash, 64); - sph_echo512_close(&ctx.echo, hashB); -#else +#if defined(__AES__) update_final_echo ( &ctx.echo, (BitSequence *)hashB, (const BitSequence *)hash, 512 ); +#else + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hashB); #endif // 12 Hamsi sph_hamsi512(&ctx.hamsi, hashB, 64); sph_hamsi512_close(&ctx.hamsi, hash); -/* - // 13 Fugue - sph_fugue512(&ctx.fugue, hash, 64); - sph_fugue512_close(&ctx.fugue, hashB); -*/ asm volatile ("emms"); memcpy(output, hashB, 32); } diff --git a/algo/x17/x16r-4way.c b/algo/x16/x16r-4way.c similarity index 98% rename from algo/x17/x16r-4way.c rename to algo/x16/x16r-4way.c index b02b882..ff2c775 100644 --- a/algo/x17/x16r-4way.c +++ b/algo/x16/x16r-4way.c @@ -183,16 +183,16 @@ void x16r_4way_hash( void* output, const void* input ) mm256_deinterleave_2x128( hash2, hash3, vhash, 512 ); break; case CUBEHASH: - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 512, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash0, (const byte*)in0, size ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 512, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash1, (const byte*)in1, size ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 512, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash2, (const byte*)in2, size ); - cubehashReinit( &ctx.cube ); + cubehashInit( &ctx.cube, 512, 16, 32 ); cubehashUpdateDigest( &ctx.cube, (byte*) hash3, (const byte*)in3, size ); break; diff --git a/algo/x17/x16r-gate.c b/algo/x16/x16r-gate.c similarity index 100% rename from algo/x17/x16r-gate.c rename to algo/x16/x16r-gate.c diff --git a/algo/x17/x16r-gate.h b/algo/x16/x16r-gate.h similarity index 100% rename from algo/x17/x16r-gate.h rename to algo/x16/x16r-gate.h diff --git a/algo/x17/x16r.c b/algo/x16/x16r.c similarity index 98% rename from algo/x17/x16r.c rename to algo/x16/x16r.c index a17154d..a0cd2db 100644 --- a/algo/x17/x16r.c +++ b/algo/x16/x16r.c @@ -25,7 +25,7 @@ #include "algo/shabal/sph_shabal.h" #include "algo/whirlpool/sph_whirlpool.h" #include -#ifndef NO_AES_NI +#if defined(__AES__) #include "algo/echo/aes_ni/hash_api.h" #include "algo/groestl/aes_ni/hash-groestl.h" #endif @@ -34,12 +34,12 @@ static __thread uint32_t s_ntime = UINT32_MAX; static __thread char hashOrder[X16R_HASH_FUNC_COUNT + 1] = { 0 }; typedef struct { -#ifdef NO_AES_NI - sph_groestl512_context groestl; - sph_echo512_context echo; -#else +#if defined(__AES__) hashState_echo echo; hashState_groestl groestl; +#else + sph_groestl512_context groestl; + sph_echo512_context echo; #endif sph_blake512_context blake; sph_bmw512_context bmw; @@ -95,14 +95,14 @@ void x16r_hash( void* output, const void* input ) sph_bmw512_close(&ctx.bmw, hash); break; case GROESTL: -#ifdef NO_AES_NI - sph_groestl512_init( &ctx.groestl ); - sph_groestl512( &ctx.groestl, in, size ); - sph_groestl512_close(&ctx.groestl, hash); -#else +#if defined(__AES__) init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash, (const char*)in, size<<3 ); +#else + sph_groestl512_init( &ctx.groestl ); + sph_groestl512( &ctx.groestl, in, size ); + sph_groestl512_close(&ctx.groestl, hash); #endif break; case SKEIN: @@ -141,14 +141,14 @@ void x16r_hash( void* output, const void* input ) (const BitSequence*)in, size<<3 ); break; case ECHO: -#ifdef NO_AES_NI - sph_echo512_init( &ctx.echo ); - sph_echo512( &ctx.echo, in, size ); - sph_echo512_close( &ctx.echo, hash ); -#else +#if defined(__AES__) init_echo( &ctx.echo, 512 ); update_final_echo ( &ctx.echo, (BitSequence *)hash, (const BitSequence*)in, size<<3 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512( &ctx.echo, in, size ); + sph_echo512_close( &ctx.echo, hash ); #endif break; case HAMSI: diff --git a/algo/x17/hmq1725.c b/algo/x17/hmq1725.c index 1c49d2f..da370ed 100644 --- a/algo/x17/hmq1725.c +++ b/algo/x17/hmq1725.c @@ -16,10 +16,9 @@ #include "algo/fugue/sph_fugue.h" #include "algo/shabal/sph_shabal.h" #include "algo/whirlpool/sph_whirlpool.h" -#include "algo/sha/sph_sha2.h" #include "algo/haval/sph-haval.h" #include -#ifndef NO_AES_NI +#if defined(__AES__) #include "algo/groestl/aes_ni/hash-groestl.h" #include "algo/echo/aes_ni/hash_api.h" #endif @@ -42,18 +41,14 @@ typedef struct { sph_fugue512_context fugue1, fugue2; sph_shabal512_context shabal1; sph_whirlpool_context whirlpool1, whirlpool2, whirlpool3, whirlpool4; -#ifndef USE_SPH_SHA SHA512_CTX sha1, sha2; -#else - sph_sha512_context sha1, sha2; -#endif sph_haval256_5_context haval1, haval2; -#ifdef NO_AES_NI - sph_groestl512_context groestl1, groestl2; - sph_echo512_context echo1, echo2; -#else +#if defined(__AES__) hashState_echo echo1, echo2; hashState_groestl groestl1, groestl2; +#else + sph_groestl512_context groestl1, groestl2; + sph_echo512_context echo1, echo2; #endif } hmq1725_ctx_holder; @@ -101,26 +96,22 @@ void init_hmq1725_ctx() sph_whirlpool_init(&hmq1725_ctx.whirlpool3); sph_whirlpool_init(&hmq1725_ctx.whirlpool4); -#ifndef USE_SPH_SHA SHA512_Init( &hmq1725_ctx.sha1 ); SHA512_Init( &hmq1725_ctx.sha2 ); -#else - sph_sha512_init(&hmq1725_ctx.sha1); - sph_sha512_init(&hmq1725_ctx.sha2); -#endif + sph_haval256_5_init(&hmq1725_ctx.haval1); sph_haval256_5_init(&hmq1725_ctx.haval2); -#ifdef NO_AES_NI - sph_groestl512_init( &hmq1725_ctx.groestl1 ); - sph_groestl512_init( &hmq1725_ctx.groestl2 ); - sph_echo512_init( &hmq1725_ctx.echo1 ); - sph_echo512_init( &hmq1725_ctx.echo2 ); -#else +#if defined(__AES__) init_echo( &hmq1725_ctx.echo1, 512 ); init_echo( &hmq1725_ctx.echo2, 512 ); init_groestl( &hmq1725_ctx.groestl1, 64 ); init_groestl( &hmq1725_ctx.groestl2, 64 ); +#else + sph_groestl512_init( &hmq1725_ctx.groestl1 ); + sph_groestl512_init( &hmq1725_ctx.groestl2 ); + sph_echo512_init( &hmq1725_ctx.echo1 ); + sph_echo512_init( &hmq1725_ctx.echo2 ); #endif } @@ -151,12 +142,12 @@ extern void hmq1725hash(void *state, const void *input) if ( hashB[0] & mask ) //1 { -#ifdef NO_AES_NI +#if defined(__AES__) + update_and_final_groestl( &h_ctx.groestl1, (char*)hashA, + (const char*)hashB, 512 ); +#else sph_groestl512 (&h_ctx.groestl1, hashB, 64); //1 sph_groestl512_close(&h_ctx.groestl1, hashA); //2 -#else - update_and_final_groestl( &h_ctx.groestl1, (char*)hashA, - (const char*)hashB, 512 ); #endif } else @@ -217,12 +208,12 @@ extern void hmq1725hash(void *state, const void *input) memset(&hashB[8], 0, 32); } -#ifdef NO_AES_NI - sph_echo512 (&h_ctx.echo1, hashB, 64); //5 - sph_echo512_close(&h_ctx.echo1, hashA); //6 -#else +#if defined(__AES__) update_final_echo ( &h_ctx.echo1, (BitSequence *)hashA, (const BitSequence *)hashB, 512 ); +#else + sph_echo512 (&h_ctx.echo1, hashB, 64); //5 + sph_echo512_close(&h_ctx.echo1, hashA); //6 #endif sph_blake512 (&h_ctx.blake2, hashA, 64); //6 @@ -247,12 +238,12 @@ extern void hmq1725hash(void *state, const void *input) if ( hashA[0] & mask ) //4 { -#ifdef NO_AES_NI - sph_echo512 (&h_ctx.echo2, hashA, 64); // - sph_echo512_close(&h_ctx.echo2, hashB); //5 -#else +#if defined(__AES__) update_final_echo ( &h_ctx.echo2, (BitSequence *)hashB, (const BitSequence *)hashA, 512 ); +#else + sph_echo512 (&h_ctx.echo2, hashA, 64); // + sph_echo512_close(&h_ctx.echo2, hashB); //5 #endif } else @@ -274,30 +265,20 @@ extern void hmq1725hash(void *state, const void *input) } else { -#ifndef USE_SPH_SHA SHA512_Update( &h_ctx.sha1, hashB, 64 ); SHA512_Final( (unsigned char*) hashA, &h_ctx.sha1 ); -#else - sph_sha512 (&h_ctx.sha1, hashB, 64); //7 - sph_sha512_close(&h_ctx.sha1, hashA); //8 -#endif } -#ifdef NO_AES_NI - sph_groestl512 (&h_ctx.groestl2, hashA, 64); //3 - sph_groestl512_close(&h_ctx.groestl2, hashB); //4 -#else +#if defined(__AES__) update_and_final_groestl( &h_ctx.groestl2, (char*)hashB, (const char*)hashA, 512 ); +#else + sph_groestl512 (&h_ctx.groestl2, hashA, 64); //3 + sph_groestl512_close(&h_ctx.groestl2, hashB); //4 #endif -#ifndef USE_SPH_SHA SHA512_Update( &h_ctx.sha2, hashB, 64 ); SHA512_Final( (unsigned char*) hashA, &h_ctx.sha2 ); -#else - sph_sha512 (&h_ctx.sha2, hashB, 64); //2 - sph_sha512_close(&h_ctx.sha2, hashA); //3 -#endif if ( hashA[0] & mask ) //4 { diff --git a/algo/x17/sonoa-4way.c b/algo/x17/sonoa-4way.c new file mode 100644 index 0000000..11a2a37 --- /dev/null +++ b/algo/x17/sonoa-4way.c @@ -0,0 +1,872 @@ +#include "sonoa-gate.h" + +#if defined(SONOA_4WAY) + +#include +#include +#include +#include +#include "algo/blake/blake-hash-4way.h" +#include "algo/bmw/bmw-hash-4way.h" +#include "algo/groestl/aes_ni/hash-groestl.h" +#include "algo/skein/skein-hash-4way.h" +#include "algo/jh/jh-hash-4way.h" +#include "algo/keccak/keccak-hash-4way.h" +#include "algo/luffa/luffa-hash-2way.h" +#include "algo/cubehash/cube-hash-2way.h" +#include "algo/shavite/sph_shavite.h" +#include "algo/shavite/shavite-hash-2way.h" +#include "algo/simd/simd-hash-2way.h" +#include "algo/echo/aes_ni/hash_api.h" +#include "algo/hamsi/hamsi-hash-4way.h" +#include "algo/fugue/sph_fugue.h" +#include "algo/shabal/shabal-hash-4way.h" +#include "algo/whirlpool/sph_whirlpool.h" +#include "algo/haval/haval-hash-4way.h" +#include "algo/sha/sha2-hash-4way.h" + +typedef struct { + blake512_4way_context blake; + bmw512_4way_context bmw; + hashState_groestl groestl; + skein512_4way_context skein; + jh512_4way_context jh; + keccak512_4way_context keccak; + luffa_2way_context luffa; + cube_2way_context cube; + shavite512_2way_context shavite; + simd_2way_context simd; + hashState_echo echo; + hamsi512_4way_context hamsi; + sph_fugue512_context fugue; + shabal512_4way_context shabal; + sph_whirlpool_context whirlpool; + sha512_4way_context sha512; + haval256_5_4way_context haval; +} sonoa_4way_ctx_holder; + +sonoa_4way_ctx_holder sonoa_4way_ctx __attribute__ ((aligned (64))); + +void init_sonoa_4way_ctx() +{ + blake512_4way_init( &sonoa_4way_ctx.blake ); + bmw512_4way_init( &sonoa_4way_ctx.bmw ); + init_groestl( &sonoa_4way_ctx.groestl, 64 ); + skein512_4way_init( &sonoa_4way_ctx.skein ); + jh512_4way_init( &sonoa_4way_ctx.jh ); + keccak512_4way_init( &sonoa_4way_ctx.keccak ); + luffa_2way_init( &sonoa_4way_ctx.luffa, 512 ); + cube_2way_init( &sonoa_4way_ctx.cube, 512, 16, 32 ); + shavite512_2way_init( &sonoa_4way_ctx.shavite ); + simd_2way_init( &sonoa_4way_ctx.simd, 512 ); + init_echo( &sonoa_4way_ctx.echo, 512 ); + hamsi512_4way_init( &sonoa_4way_ctx.hamsi ); + sph_fugue512_init( &sonoa_4way_ctx.fugue ); + shabal512_4way_init( &sonoa_4way_ctx.shabal ); + sph_whirlpool_init( &sonoa_4way_ctx.whirlpool ); + sha512_4way_init( &sonoa_4way_ctx.sha512 ); + haval256_5_4way_init( &sonoa_4way_ctx.haval ); +}; + +void sonoa_4way_hash( void *state, const void *input ) +{ + uint64_t hash0[8] __attribute__ ((aligned (64))); + uint64_t hash1[8] __attribute__ ((aligned (64))); + uint64_t hash2[8] __attribute__ ((aligned (64))); + uint64_t hash3[8] __attribute__ ((aligned (64))); + uint64_t vhash[8*4] __attribute__ ((aligned (64))); + uint64_t vhashA[8*4] __attribute__ ((aligned (64))); + uint64_t vhashB[8*4] __attribute__ ((aligned (64))); + sonoa_4way_ctx_holder ctx __attribute__ ((aligned (64))); + memcpy( &ctx, &sonoa_4way_ctx, sizeof(sonoa_4way_ctx) ); + +// 1 + + blake512_4way( &ctx.blake, input, 80 ); + blake512_4way_close( &ctx.blake, vhash ); + + bmw512_4way( &ctx.bmw, vhash, 64 ); + bmw512_4way_close( &ctx.bmw, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhash ); + + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhash ); + + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhash ); + + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); + + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); + + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); + + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); + mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); + + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + +// 2 + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + bmw512_4way_init( &ctx.bmw ); + bmw512_4way( &ctx.bmw, vhash, 64 ); + bmw512_4way_close( &ctx.bmw, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + skein512_4way_init( &ctx.skein ); + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhash ); + + jh512_4way_init( &ctx.jh ); + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhash ); + + keccak512_4way_init( &ctx.keccak ); + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhash ); + + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); + + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); + + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); + + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); + mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); + + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + hamsi512_4way( &ctx.hamsi, vhash, 64 ); + hamsi512_4way_close( &ctx.hamsi, vhash ); + +// 3 + bmw512_4way_init( &ctx.bmw ); + bmw512_4way( &ctx.bmw, vhash, 64 ); + bmw512_4way_close( &ctx.bmw, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + skein512_4way_init( &ctx.skein ); + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhash ); + + jh512_4way_init( &ctx.jh ); + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhash ); + + keccak512_4way_init( &ctx.keccak ); + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhash ); + + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); + + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); + + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); + + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); + mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); + + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + hamsi512_4way_init( &ctx.hamsi ); + hamsi512_4way( &ctx.hamsi, vhash, 64 ); + hamsi512_4way_close( &ctx.hamsi, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_fugue512( &ctx.fugue, hash0, 64 ); + sph_fugue512_close( &ctx.fugue, hash0 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash1, 64 ); + sph_fugue512_close( &ctx.fugue, hash1 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash2, 64 ); + sph_fugue512_close( &ctx.fugue, hash2 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash3, 64 ); + sph_fugue512_close( &ctx.fugue, hash3 ); + +// 4 + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + bmw512_4way_init( &ctx.bmw ); + bmw512_4way( &ctx.bmw, vhash, 64 ); + bmw512_4way_close( &ctx.bmw, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + skein512_4way_init( &ctx.skein ); + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhash ); + + jh512_4way_init( &ctx.jh ); + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhash ); + + keccak512_4way_init( &ctx.keccak ); + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhash ); + + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); + + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); + + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); + + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); + mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); + + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + hamsi512_4way_init( &ctx.hamsi ); + hamsi512_4way( &ctx.hamsi, vhash, 64 ); + hamsi512_4way_close( &ctx.hamsi, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash0, 64 ); + sph_fugue512_close( &ctx.fugue, hash0 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash1, 64 ); + sph_fugue512_close( &ctx.fugue, hash1 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash2, 64 ); + sph_fugue512_close( &ctx.fugue, hash2 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash3, 64 ); + sph_fugue512_close( &ctx.fugue, hash3 ); + + mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); + + shabal512_4way( &ctx.shabal, vhash, 64 ); + shabal512_4way_close( &ctx.shabal, vhash ); + + mm256_reinterleave_4x64( vhashB, vhash, 512 ); + + hamsi512_4way_init( &ctx.hamsi ); + hamsi512_4way( &ctx.hamsi, vhashB, 64 ); + hamsi512_4way_close( &ctx.hamsi, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + + mm256_interleave_2x128( vhashA, hash0, hash1, 512 ); + mm256_interleave_2x128( vhashB, hash2, hash3, 512 ); + + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + +// 5 + mm256_reinterleave_2x128_4x64( vhash, vhashA, vhashB, 512 ); + + bmw512_4way_init( &ctx.bmw ); + bmw512_4way( &ctx.bmw, vhash, 64 ); + bmw512_4way_close( &ctx.bmw, vhash ); + + mm256_reinterleave_4x32( vhashB, vhash, 512 ); + + shabal512_4way_init( &ctx.shabal ); + shabal512_4way( &ctx.shabal, vhashB, 64 ); + shabal512_4way_close( &ctx.shabal, vhash ); + + mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 ); + + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + skein512_4way_init( &ctx.skein ); + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhash ); + + jh512_4way_init( &ctx.jh ); + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhash ); + + keccak512_4way_init( &ctx.keccak ); + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhash ); + + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); + + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); + + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); + + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); + mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); + + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + hamsi512_4way_init( &ctx.hamsi ); + hamsi512_4way( &ctx.hamsi, vhash, 64 ); + hamsi512_4way_close( &ctx.hamsi, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash0, 64 ); + sph_fugue512_close( &ctx.fugue, hash0 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash1, 64 ); + sph_fugue512_close( &ctx.fugue, hash1 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash2, 64 ); + sph_fugue512_close( &ctx.fugue, hash2 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash3, 64 ); + sph_fugue512_close( &ctx.fugue, hash3 ); + + mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); + + shabal512_4way_init( &ctx.shabal ); + shabal512_4way( &ctx.shabal, vhash, 64 ); + shabal512_4way_close( &ctx.shabal, vhash ); + + mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_whirlpool( &ctx.whirlpool, hash0, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash0 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash1, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash1 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash2, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash2 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash3, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash3 ); + +// 6 + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + bmw512_4way_init( &ctx.bmw ); + bmw512_4way( &ctx.bmw, vhash, 64 ); + bmw512_4way_close( &ctx.bmw, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + skein512_4way_init( &ctx.skein ); + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhash ); + + jh512_4way_init( &ctx.jh ); + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhash ); + + keccak512_4way_init( &ctx.keccak ); + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhash ); + + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); + + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); + + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); + + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); + mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); + + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + hamsi512_4way_init( &ctx.hamsi ); + hamsi512_4way( &ctx.hamsi, vhash, 64 ); + hamsi512_4way_close( &ctx.hamsi, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash0, 64 ); + sph_fugue512_close( &ctx.fugue, hash0 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash1, 64 ); + sph_fugue512_close( &ctx.fugue, hash1 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash2, 64 ); + sph_fugue512_close( &ctx.fugue, hash2 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash3, 64 ); + sph_fugue512_close( &ctx.fugue, hash3 ); + + mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); + + shabal512_4way_init( &ctx.shabal ); + shabal512_4way( &ctx.shabal, vhash, 64 ); + shabal512_4way_close( &ctx.shabal, vhash ); + + mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash0, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash0 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash1, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash1 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash2, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash2 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash3, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash3 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + sha512_4way( &ctx.sha512, vhash, 64 ); + sha512_4way_close( &ctx.sha512, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash0, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash0 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash1, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash1 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash2, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash2 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash3, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash3 ); + +// 7 + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + bmw512_4way_init( &ctx.bmw ); + bmw512_4way( &ctx.bmw, vhash, 64 ); + bmw512_4way_close( &ctx.bmw, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + skein512_4way_init( &ctx.skein ); + skein512_4way( &ctx.skein, vhash, 64 ); + skein512_4way_close( &ctx.skein, vhash ); + + jh512_4way_init( &ctx.jh ); + jh512_4way( &ctx.jh, vhash, 64 ); + jh512_4way_close( &ctx.jh, vhash ); + + keccak512_4way_init( &ctx.keccak ); + keccak512_4way( &ctx.keccak, vhash, 64 ); + keccak512_4way_close( &ctx.keccak, vhash ); + + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); + + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); + + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); + + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); + mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); + + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash0, + (const BitSequence *) hash0, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash1, + (const BitSequence *) hash1, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash2, + (const BitSequence *) hash2, 512 ); + init_echo( &ctx.echo, 512 ); + update_final_echo( &ctx.echo, (BitSequence *)hash3, + (const BitSequence *) hash3, 512 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + hamsi512_4way_init( &ctx.hamsi ); + hamsi512_4way( &ctx.hamsi, vhash, 64 ); + hamsi512_4way_close( &ctx.hamsi, vhash ); + + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash0, 64 ); + sph_fugue512_close( &ctx.fugue, hash0 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash1, 64 ); + sph_fugue512_close( &ctx.fugue, hash1 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash2, 64 ); + sph_fugue512_close( &ctx.fugue, hash2 ); + sph_fugue512_init( &ctx.fugue ); + sph_fugue512( &ctx.fugue, hash3, 64 ); + sph_fugue512_close( &ctx.fugue, hash3 ); + + mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); + + shabal512_4way_init( &ctx.shabal ); + shabal512_4way( &ctx.shabal, vhash, 64 ); + shabal512_4way_close( &ctx.shabal, vhash ); + + mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 ); + + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash0, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash0 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash1, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash1 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash2, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash2 ); + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool( &ctx.whirlpool, hash3, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash3 ); + + mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + sha512_4way_init( &ctx.sha512 ); + sha512_4way( &ctx.sha512, vhash, 64 ); + sha512_4way_close( &ctx.sha512, vhash ); + + mm256_reinterleave_4x32( vhashB, vhash, 512 ); + + haval256_5_4way( &ctx.haval, vhashB, 64 ); + haval256_5_4way_close( &ctx.haval, state ); + +} + +int scanhash_sonoa_4way( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done, struct thr_info *mythr ) +{ + uint32_t hash[4*8] __attribute__ ((aligned (64))); + uint32_t *hash7 = &(hash[7<<2]); + uint32_t lane_hash[8]; + uint32_t vdata[24*4] __attribute__ ((aligned (64))); + uint32_t endiandata[20] __attribute__((aligned(64))); + uint32_t *pdata = work->data; + uint32_t *ptarget = work->target; + uint32_t n = pdata[19]; + const uint32_t first_nonce = pdata[19]; + uint32_t *nonces = work->nonces; + int num_found = 0; + __m256i *noncev = (__m256i*)vdata + 9; // aligned +// uint32_t *noncep = vdata + 73; // 9*8 + 1 + const uint32_t Htarg = ptarget[7]; + /* int */ thr_id = mythr->id; // thr_id arg is deprecated + uint64_t htmax[] = { 0, 0xF, 0xFF, + 0xFFF, 0xFFFF, 0x10000000 }; + uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, + 0xFFFFF000, 0xFFFF0000, 0 }; + + // Need big endian data + casti_m256i( endiandata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) ); + casti_m256i( endiandata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) ); + casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); + + uint64_t *edata = (uint64_t*)endiandata; + mm256_interleave_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); + + for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] ) + { + uint32_t mask = masks[m]; + do + { + *noncev = mm256_interleave_blend_32( mm256_bswap_32( + _mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ), + *noncev ); + sonoa_4way_hash( hash, vdata ); + + for ( int lane = 0; lane < 4; lane++ ) + if ( ( ( hash7[ lane ] & mask ) == 0 ) ) + { + mm128_extract_lane_4x32( lane_hash, hash, lane, 256 ); + if ( fulltest( lane_hash, ptarget ) ) + { + pdata[19] = n + lane; + nonces[ num_found++ ] = n + lane; + work_set_target_ratio( work, lane_hash ); + } + } + n += 4; + } while ( ( num_found == 0 ) && ( n < max_nonce ) + && !work_restart[thr_id].restart ); + break; + } + + *hashes_done = n - first_nonce + 1; + return num_found; +} + +#endif diff --git a/algo/x17/sonoa-gate.c b/algo/x17/sonoa-gate.c new file mode 100644 index 0000000..e74073b --- /dev/null +++ b/algo/x17/sonoa-gate.c @@ -0,0 +1,18 @@ +#include "sonoa-gate.h" + +bool register_sonoa_algo( algo_gate_t* gate ) +{ +#if defined (SONOA_4WAY) + init_sonoa_4way_ctx(); + gate->scanhash = (void*)&scanhash_sonoa_4way; + gate->hash = (void*)&sonoa_4way_hash; +#else + init_sonoa_ctx(); + gate->scanhash = (void*)&scanhash_sonoa; + gate->hash = (void*)&sonoa_hash; +#endif + gate->get_max64 = (void*)&get_max64_0x1ffff; + gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT; + return true; +}; + diff --git a/algo/x17/sonoa-gate.h b/algo/x17/sonoa-gate.h new file mode 100644 index 0000000..f00efd8 --- /dev/null +++ b/algo/x17/sonoa-gate.h @@ -0,0 +1,32 @@ +#ifndef SONOA_GATE_H__ +#define SONOA_GATE_H__ 1 + +#include "algo-gate-api.h" +#include + +#if defined(__AVX2__) && defined(__AES__) + #define SONOA_4WAY +#endif + +bool register_sonoa_algo( algo_gate_t* gate ); + +#if defined(SONOA_4WAY) + +void sonoa_4way_hash( void *state, const void *input ); + +int scanhash_sonoa_4way( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done, struct thr_info *mythr ); + +void init_sonoa_4way_ctx(); + +#endif + +void sonoa_hash( void *state, const void *input ); + +int scanhash_sonoa( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done, struct thr_info *mythr ); + +void init_sonoa_ctx(); + +#endif + diff --git a/algo/x17/sonoa.c b/algo/x17/sonoa.c new file mode 100644 index 0000000..5ace927 --- /dev/null +++ b/algo/x17/sonoa.c @@ -0,0 +1,648 @@ +#include "sonoa-gate.h" +#include +#include +#include +#include +#include "algo/blake/sph_blake.h" +#include "algo/bmw/sph_bmw.h" +#include "algo/groestl/sph_groestl.h" +#include "algo/jh/sph_jh.h" +#include "algo/keccak/sph_keccak.h" +#include "algo/skein/sph_skein.h" +#include "algo/shavite/sph_shavite.h" +#include "algo/hamsi/sph_hamsi.h" +#include "algo/fugue/sph_fugue.h" +#include "algo/shabal/sph_shabal.h" +#include "algo/whirlpool/sph_whirlpool.h" +#include "algo/haval/sph-haval.h" +#include "algo/luffa/luffa_for_sse2.h" +#include "algo/cubehash/cubehash_sse2.h" +#include "algo/simd/nist.h" +#include "algo/blake/sse2/blake.c" +#include "algo/bmw/sse2/bmw.c" +#include "algo/keccak/sse2/keccak.c" +#include "algo/skein/sse2/skein.c" +#include "algo/jh/sse2/jh_sse2_opt64.h" +#include +#if defined(__AES__) + #include "algo/echo/aes_ni/hash_api.h" + #include "algo/groestl/aes_ni/hash-groestl.h" +#else + #include "algo/groestl/sph_groestl.h" + #include "algo/echo/sph_echo.h" +#endif + +typedef struct { + sph_blake512_context blake; + sph_bmw512_context bmw; +#if defined(__AES__) + hashState_echo echo; + hashState_groestl groestl; +#else + sph_groestl512_context groestl; + sph_echo512_context echo; +#endif + sph_jh512_context jh; + sph_keccak512_context keccak; + sph_skein512_context skein; + hashState_luffa luffa; + cubehashParam cubehash; + sph_shavite512_context shavite; + hashState_sd simd; + sph_hamsi512_context hamsi; + sph_fugue512_context fugue; + sph_shabal512_context shabal; + sph_whirlpool_context whirlpool; + SHA512_CTX sha512; + sph_haval256_5_context haval; +} sonoa_ctx_holder; + +sonoa_ctx_holder sonoa_ctx __attribute__ ((aligned (64))); + +void init_sonoa_ctx() +{ + sph_blake512_init( &sonoa_ctx.blake); + sph_bmw512_init( &sonoa_ctx.bmw); +#if defined(__AES__) + init_echo( &sonoa_ctx.echo, 512 ); + init_groestl( &sonoa_ctx.groestl, 64 ); +#else + sph_groestl512_init(&sonoa_ctx.groestl ); + sph_echo512_init( &sonoa_ctx.echo ); +#endif + sph_skein512_init( &sonoa_ctx.skein); + sph_jh512_init( &sonoa_ctx.jh); + sph_keccak512_init( &sonoa_ctx.keccak ); + init_luffa( &sonoa_ctx.luffa, 512 ); + cubehashInit( &sonoa_ctx.cubehash, 512, 16, 32 ); + sph_shavite512_init( &sonoa_ctx.shavite ); + init_sd( &sonoa_ctx.simd, 512 ); + sph_hamsi512_init( &sonoa_ctx.hamsi ); + sph_fugue512_init( &sonoa_ctx.fugue ); + sph_shabal512_init( &sonoa_ctx.shabal ); + sph_whirlpool_init( &sonoa_ctx.whirlpool ); + SHA512_Init( &sonoa_ctx.sha512 ); + sph_haval256_5_init(&sonoa_ctx.haval); +}; + +void sonoa_hash( void *state, const void *input ) +{ + uint8_t hash[128] __attribute__ ((aligned (64))); + sonoa_ctx_holder ctx __attribute__ ((aligned (64))); + memcpy( &ctx, &sonoa_ctx, sizeof(sonoa_ctx) ); + + sph_blake512(&ctx.blake, input, 80); + sph_blake512_close(&ctx.blake, hash); + + sph_bmw512(&ctx.bmw, hash, 64); + sph_bmw512_close(&ctx.bmw, hash); + +#if defined(__AES__) + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, 512 ); +#else + sph_groestl512(&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); +#endif + + sph_skein512(&ctx.skein, hash, 64); + sph_skein512_close(&ctx.skein, hash); + + sph_jh512(&ctx.jh, hash, 64); + sph_jh512_close(&ctx.jh, hash); + + sph_keccak512(&ctx.keccak, hash, 64); + sph_keccak512_close(&ctx.keccak, hash); + + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)hash, 64 ); + + cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, + (const byte*)hash, 64 ); + + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); + +#if defined(__AES__) + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + +// + + sph_bmw512_init( &ctx.bmw); + sph_bmw512(&ctx.bmw, hash, 64); + sph_bmw512_close(&ctx.bmw, hash); + +#if defined(__AES__) + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, 512 ); +#else + sph_groestl512_init(&ctx.groestl ); + sph_groestl512(&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); +#endif + + sph_skein512_init( &ctx.skein); + sph_skein512(&ctx.skein, hash, 64); + sph_skein512_close(&ctx.skein, hash); + + sph_jh512_init( &ctx.jh); + sph_jh512(&ctx.jh, hash, 64); + sph_jh512_close(&ctx.jh, hash); + + sph_keccak512_init( &ctx.keccak ); + sph_keccak512(&ctx.keccak, hash, 64); + sph_keccak512_close(&ctx.keccak, hash); + + init_luffa( &ctx.luffa, 512 ); + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)hash, 64 ); + + cubehashInit( &ctx.cubehash, 512, 16, 32 ); + cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, + (const byte*)hash, 64 ); + + sph_shavite512_init( &ctx.shavite ); + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + + init_sd( &ctx.simd, 512 ); + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); + +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + + sph_hamsi512(&ctx.hamsi, hash, 64); + sph_hamsi512_close(&ctx.hamsi, hash); + +// + + sph_bmw512_init( &ctx.bmw); + sph_bmw512(&ctx.bmw, hash, 64); + sph_bmw512_close(&ctx.bmw, hash); + +#if defined(__AES__) + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, 512 ); +#else + sph_groestl512_init(&ctx.groestl ); + sph_groestl512(&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); +#endif + + sph_skein512_init( &ctx.skein); + sph_skein512(&ctx.skein, hash, 64); + sph_skein512_close(&ctx.skein, hash); + + sph_jh512_init( &ctx.jh); + sph_jh512(&ctx.jh, hash, 64); + sph_jh512_close(&ctx.jh, hash); + + sph_keccak512_init( &ctx.keccak ); + sph_keccak512(&ctx.keccak, hash, 64); + sph_keccak512_close(&ctx.keccak, hash); + + init_luffa( &ctx.luffa, 512 ); + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)hash, 64 ); + + cubehashInit( &ctx.cubehash, 512, 16, 32 ); + cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, + (const byte*)hash, 64 ); + + sph_shavite512_init( &ctx.shavite ); + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + + init_sd( &ctx.simd, 512 ); + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); + +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + + sph_hamsi512_init( &ctx.hamsi ); + sph_hamsi512(&ctx.hamsi, hash, 64); + sph_hamsi512_close(&ctx.hamsi, hash); + + sph_fugue512(&ctx.fugue, hash, 64); + sph_fugue512_close(&ctx.fugue, hash); + +// + + sph_bmw512_init( &ctx.bmw); + sph_bmw512(&ctx.bmw, hash, 64); + sph_bmw512_close(&ctx.bmw, hash); + +#if defined(__AES__) + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, 512 ); +#else + sph_groestl512_init(&ctx.groestl ); + sph_groestl512(&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); +#endif + + sph_skein512_init( &ctx.skein); + sph_skein512(&ctx.skein, hash, 64); + sph_skein512_close(&ctx.skein, hash); + + sph_jh512_init( &ctx.jh); + sph_jh512(&ctx.jh, hash, 64); + sph_jh512_close(&ctx.jh, hash); + + sph_keccak512_init( &ctx.keccak ); + sph_keccak512(&ctx.keccak, hash, 64); + sph_keccak512_close(&ctx.keccak, hash); + + init_luffa( &ctx.luffa, 512 ); + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)hash, 64 ); + + cubehashInit( &ctx.cubehash, 512, 16, 32 ); + cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, + (const byte*)hash, 64 ); + + sph_shavite512_init( &ctx.shavite ); + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + + init_sd( &ctx.simd, 512 ); + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); + +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + + sph_hamsi512_init( &ctx.hamsi ); + sph_hamsi512(&ctx.hamsi, hash, 64); + sph_hamsi512_close(&ctx.hamsi, hash); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512(&ctx.fugue, hash, 64); + sph_fugue512_close(&ctx.fugue, hash); + + sph_shabal512(&ctx.shabal, hash, 64); + sph_shabal512_close(&ctx.shabal, hash); + + sph_hamsi512_init( &ctx.hamsi ); + sph_hamsi512(&ctx.hamsi, hash, 64); + sph_hamsi512_close(&ctx.hamsi, hash); + +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + + sph_shavite512_init( &ctx.shavite ); + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + +// + + sph_bmw512_init( &ctx.bmw); + sph_bmw512(&ctx.bmw, hash, 64); + sph_bmw512_close(&ctx.bmw, hash); + + sph_shabal512_init( &ctx.shabal ); + sph_shabal512(&ctx.shabal, hash, 64); + sph_shabal512_close(&ctx.shabal, hash); + +#if defined(__AES__) + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, 512 ); +#else + sph_groestl512_init(&ctx.groestl ); + sph_groestl512(&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); +#endif + + sph_skein512_init( &ctx.skein); + sph_skein512(&ctx.skein, hash, 64); + sph_skein512_close(&ctx.skein, hash); + + sph_jh512_init( &ctx.jh); + sph_jh512(&ctx.jh, hash, 64); + sph_jh512_close(&ctx.jh, hash); + + sph_keccak512_init( &ctx.keccak ); + sph_keccak512(&ctx.keccak, hash, 64); + sph_keccak512_close(&ctx.keccak, hash); + + init_luffa( &ctx.luffa, 512 ); + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)hash, 64 ); + + cubehashInit( &ctx.cubehash, 512, 16, 32 ); + cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, + (const byte*)hash, 64 ); + + sph_shavite512_init( &ctx.shavite ); + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + + init_sd( &ctx.simd, 512 ); + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); + +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + + sph_hamsi512_init( &ctx.hamsi ); + sph_hamsi512(&ctx.hamsi, hash, 64); + sph_hamsi512_close(&ctx.hamsi, hash); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512(&ctx.fugue, hash, 64); + sph_fugue512_close(&ctx.fugue, hash); + + sph_shabal512_init( &ctx.shabal ); + sph_shabal512(&ctx.shabal, hash, 64); + sph_shabal512_close(&ctx.shabal, hash); + + sph_whirlpool(&ctx.whirlpool, hash, 64); + sph_whirlpool_close(&ctx.whirlpool, hash); + +// + sph_bmw512_init( &ctx.bmw); + sph_bmw512(&ctx.bmw, hash, 64); + sph_bmw512_close(&ctx.bmw, hash); + +#if defined(__AES__) + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, 512 ); +#else + sph_groestl512_init(&ctx.groestl ); + sph_groestl512(&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); +#endif + + sph_skein512_init( &ctx.skein); + sph_skein512(&ctx.skein, hash, 64); + sph_skein512_close(&ctx.skein, hash); + + sph_jh512_init( &ctx.jh); + sph_jh512(&ctx.jh, hash, 64); + sph_jh512_close(&ctx.jh, hash); + + sph_keccak512_init( &ctx.keccak ); + sph_keccak512(&ctx.keccak, hash, 64); + sph_keccak512_close(&ctx.keccak, hash); + + init_luffa( &ctx.luffa, 512 ); + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)hash, 64 ); + + cubehashInit( &ctx.cubehash, 512, 16, 32 ); + cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, + (const byte*)hash, 64 ); + + sph_shavite512_init( &ctx.shavite ); + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + + init_sd( &ctx.simd, 512 ); + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); + +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + + sph_hamsi512_init( &ctx.hamsi ); + sph_hamsi512(&ctx.hamsi, hash, 64); + sph_hamsi512_close(&ctx.hamsi, hash); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512(&ctx.fugue, hash, 64); + sph_fugue512_close(&ctx.fugue, hash); + + sph_shabal512_init( &ctx.shabal ); + sph_shabal512(&ctx.shabal, hash, 64); + sph_shabal512_close(&ctx.shabal, hash); + + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool(&ctx.whirlpool, hash, 64); + sph_whirlpool_close(&ctx.whirlpool, hash); + + SHA512_Update( &ctx.sha512, hash, 64 ); + SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); + + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool(&ctx.whirlpool, hash, 64); + sph_whirlpool_close(&ctx.whirlpool, hash); + +// + + sph_bmw512_init( &ctx.bmw); + sph_bmw512(&ctx.bmw, hash, 64); + sph_bmw512_close(&ctx.bmw, hash); + +#if defined(__AES__) + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, 512 ); +#else + sph_groestl512_init(&ctx.groestl ); + sph_groestl512(&ctx.groestl, hash, 64); + sph_groestl512_close(&ctx.groestl, hash); +#endif + + sph_skein512_init( &ctx.skein); + sph_skein512(&ctx.skein, hash, 64); + sph_skein512_close(&ctx.skein, hash); + + sph_jh512_init( &ctx.jh); + sph_jh512(&ctx.jh, hash, 64); + sph_jh512_close(&ctx.jh, hash); + + sph_keccak512_init( &ctx.keccak ); + sph_keccak512(&ctx.keccak, hash, 64); + sph_keccak512_close(&ctx.keccak, hash); + + init_luffa( &ctx.luffa, 512 ); + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)hash, 64 ); + + cubehashInit( &ctx.cubehash, 512, 16, 32 ); + cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, + (const byte*)hash, 64 ); + + sph_shavite512_init( &ctx.shavite ); + sph_shavite512(&ctx.shavite, hash, 64); + sph_shavite512_close(&ctx.shavite, hash); + + init_sd( &ctx.simd, 512 ); + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); + +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)hash, 512 ); +#else + sph_echo512_init( &ctx.echo ); + sph_echo512(&ctx.echo, hash, 64); + sph_echo512_close(&ctx.echo, hash); +#endif + + sph_hamsi512_init( &ctx.hamsi ); + sph_hamsi512(&ctx.hamsi, hash, 64); + sph_hamsi512_close(&ctx.hamsi, hash); + + sph_fugue512_init( &ctx.fugue ); + sph_fugue512(&ctx.fugue, hash, 64); + sph_fugue512_close(&ctx.fugue, hash); + + sph_shabal512_init( &ctx.shabal ); + sph_shabal512(&ctx.shabal, hash, 64); + sph_shabal512_close(&ctx.shabal, hash); + + sph_whirlpool_init( &ctx.whirlpool ); + sph_whirlpool(&ctx.whirlpool, hash, 64); + sph_whirlpool_close(&ctx.whirlpool, hash); + + SHA512_Init( &ctx.sha512 ); + SHA512_Update( &ctx.sha512, hash, 64 ); + SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); + + sph_haval256_5(&ctx.haval,(const void*) hash, 64); + sph_haval256_5_close(&ctx.haval, hash); + + memcpy(state, hash, 32); +} + +int scanhash_sonoa( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done, struct thr_info *mythr ) +{ +uint32_t _ALIGN(128) hash32[8]; +uint32_t _ALIGN(128) endiandata[20]; +uint32_t *pdata = work->data; +uint32_t *ptarget = work->target; +const uint32_t first_nonce = pdata[19]; +const uint32_t Htarg = ptarget[7]; +uint32_t n = pdata[19] - 1; +/* int */ thr_id = mythr->id; // thr_id arg is deprecated + + uint64_t htmax[] = + { + 0, + 0xF, + 0xFF, + 0xFFF, + 0xFFFF, + 0x10000000 + }; + uint32_t masks[] = + { + 0xFFFFFFFF, + 0xFFFFFFF0, + 0xFFFFFF00, + 0xFFFFF000, + 0xFFFF0000, + 0 + }; + + + // we need bigendian data... + casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) ); + casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) ); + casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) ); + casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) ); + casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); + +#ifdef DEBUG_ALGO + printf("[%d] Htarg=%X\n", thr_id, Htarg); +#endif + for ( int m = 0; m < 6; m++ ) + { + if ( Htarg <= htmax[m] ) + { + uint32_t mask = masks[m]; + do + { + pdata[19] = ++n; + be32enc(&endiandata[19], n); + sonoa_hash(hash32, endiandata); +#ifndef DEBUG_ALGO + if ( ( !( hash32[7] & mask ) ) && fulltest( hash32, ptarget ) ) + { + work_set_target_ratio( work, hash32 ); + *hashes_done = n - first_nonce + 1; + return 1; + } +#else + if (!(n % 0x1000) && !thr_id) printf("."); + if ( !(hash32[7] & mask) ) + { + printf("[%d]",thr_id); + if ( fulltest( hash32, ptarget ) ) + { + work_set_target_ratio( work, hash32 ); + *hashes_done = n - first_nonce + 1; + return 1; + } + } +#endif + } while (n < max_nonce && !work_restart[thr_id].restart); + // see blake.c if else to understand the loop on htmax => mask + break; + } + } + + *hashes_done = n - first_nonce + 1; + pdata[19] = n; + return 0; +} diff --git a/algo/x17/x17-4way.c b/algo/x17/x17-4way.c index f7df6a8..c57c3ad 100644 --- a/algo/x17/x17-4way.c +++ b/algo/x17/x17-4way.c @@ -15,6 +15,7 @@ #include "algo/luffa/luffa-hash-2way.h" #include "algo/cubehash/cube-hash-2way.h" #include "algo/shavite/sph_shavite.h" +#include "algo/shavite/shavite-hash-2way.h" #include "algo/simd/simd-hash-2way.h" #include "algo/echo/aes_ni/hash_api.h" #include "algo/hamsi/hamsi-hash-4way.h" @@ -24,7 +25,9 @@ #include "algo/haval/haval-hash-4way.h" #include "algo/sha/sha2-hash-4way.h" -typedef struct { +//typedef struct { +union _x17_4way_context_overlay +{ blake512_4way_context blake; bmw512_4way_context bmw; hashState_groestl groestl; @@ -33,7 +36,7 @@ typedef struct { keccak512_4way_context keccak; luffa_2way_context luffa; cube_2way_context cube; - sph_shavite512_context shavite; + shavite512_2way_context shavite; simd_2way_context simd; hashState_echo echo; hamsi512_4way_context hamsi; @@ -42,8 +45,10 @@ typedef struct { sph_whirlpool_context whirlpool; sha512_4way_context sha512; haval256_5_4way_context haval; -} x17_4way_ctx_holder; +}; +typedef union _x17_4way_context_overlay x17_4way_context_overlay; +/* x17_4way_ctx_holder x17_4way_ctx __attribute__ ((aligned (64))); void init_x17_4way_ctx() @@ -56,16 +61,17 @@ void init_x17_4way_ctx() keccak512_4way_init( &x17_4way_ctx.keccak ); luffa_2way_init( &x17_4way_ctx.luffa, 512 ); cube_2way_init( &x17_4way_ctx.cube, 512, 16, 32 ); - sph_shavite512_init( &x17_4way_ctx.shavite ); + shavite512_2way_init( &x17_4way_ctx.shavite ); simd_2way_init( &x17_4way_ctx.simd, 512 ); init_echo( &x17_4way_ctx.echo, 512 ); hamsi512_4way_init( &x17_4way_ctx.hamsi ); sph_fugue512_init( &x17_4way_ctx.fugue ); shabal512_4way_init( &x17_4way_ctx.shabal ); + sph_whirlpool_init( &x17_4way_ctx.whirlpool ); sha512_4way_init( &x17_4way_ctx.sha512 ); haval256_5_4way_init( &x17_4way_ctx.haval ); }; - +*/ void x17_4way_hash( void *state, const void *input ) { uint64_t hash0[8] __attribute__ ((aligned (64))); @@ -73,155 +79,159 @@ void x17_4way_hash( void *state, const void *input ) uint64_t hash2[8] __attribute__ ((aligned (64))); uint64_t hash3[8] __attribute__ ((aligned (64))); uint64_t vhash[8*4] __attribute__ ((aligned (64))); + uint64_t vhashA[8*4] __attribute__ ((aligned (64))); uint64_t vhashB[8*4] __attribute__ ((aligned (64))); - x17_4way_ctx_holder ctx; - memcpy( &ctx, &x17_4way_ctx, sizeof(x17_4way_ctx) ); + x17_4way_context_overlay ctx; +// memcpy( &ctx, &x17_4way_ctx, sizeof(x17_4way_ctx) ); - // 1 Blake 4 way 64 bit + // 1 Blake parallel 4 way 64 bit + blake512_4way_init( &ctx.blake ); blake512_4way( &ctx.blake, input, 80 ); blake512_4way_close( &ctx.blake, vhash ); // 2 Bmw + bmw512_4way_init( &ctx.bmw ); bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - // Serial + // Serialize mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 3 Groestl + init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash0, (char*)hash0, 512 ); - memcpy( &ctx.groestl, &x17_4way_ctx.groestl, sizeof(hashState_groestl) ); + init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash1, (char*)hash1, 512 ); - memcpy( &ctx.groestl, &x17_4way_ctx.groestl, sizeof(hashState_groestl) ); + init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash2, (char*)hash2, 512 ); - memcpy( &ctx.groestl, &x17_4way_ctx.groestl, sizeof(hashState_groestl) ); + init_groestl( &ctx.groestl, 64 ); update_and_final_groestl( &ctx.groestl, (char*)hash3, (char*)hash3, 512 ); - // Parallel 4way + // Parallellize mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); - // 4 Skein + // 4 Skein parallel 4 way 64 bit + skein512_4way_init( &ctx.skein ); skein512_4way( &ctx.skein, vhash, 64 ); skein512_4way_close( &ctx.skein, vhash ); // 5 JH + jh512_4way_init( &ctx.jh ); jh512_4way( &ctx.jh, vhash, 64 ); jh512_4way_close( &ctx.jh, vhash ); // 6 Keccak + keccak512_4way_init( &ctx.keccak ); keccak512_4way( &ctx.keccak, vhash, 64 ); keccak512_4way_close( &ctx.keccak, vhash ); - mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); + // 7 Luffa parallel 2 way 128 bit + mm256_reinterleave_4x64_2x128( vhashA, vhashB, vhash, 512 ); - // 7 Luffa parallel 2 way - mm256_interleave_2x128( vhash, hash0, hash1, 512 ); - mm256_interleave_2x128( vhashB, hash2, hash3, 512 ); - luffa_2way_update_close( &ctx.luffa, vhash, vhash, 64 ); + luffa_2way_init( &ctx.luffa, 512 ); + luffa_2way_update_close( &ctx.luffa, vhashA, vhashA, 64 ); luffa_2way_init( &ctx.luffa, 512 ); luffa_2way_update_close( &ctx.luffa, vhashB, vhashB, 64 ); - // 8 Cubehash parallel 2 way - cube_2way_update_close( &ctx.cube, vhash, vhash, 64 ); - cube_2way_reinit( &ctx.cube ); + // 8 Cubehash + cube_2way_init( &ctx.cube, 512, 16, 32 ); + cube_2way_update_close( &ctx.cube, vhashA, vhashA, 64 ); + cube_2way_init( &ctx.cube, 512, 16, 32 ); cube_2way_update_close( &ctx.cube, vhashB, vhashB, 64 ); - mm256_deinterleave_2x128( hash0, hash1, vhash, 512 ); + // 9 Shavite + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashA, vhashA, 64 ); + shavite512_2way_init( &ctx.shavite ); + shavite512_2way_update_close( &ctx.shavite, vhashB, vhashB, 64 ); + + // 10 Simd + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashA, vhashA, 512 ); + simd_2way_init( &ctx.simd, 512 ); + simd_2way_update_close( &ctx.simd, vhashB, vhashB, 512 ); + + mm256_deinterleave_2x128( hash0, hash1, vhashA, 512 ); mm256_deinterleave_2x128( hash2, hash3, vhashB, 512 ); - // 9 Shavite serial - sph_shavite512( &ctx.shavite, hash0, 64 ); - sph_shavite512_close( &ctx.shavite, hash0 ); - memcpy( &ctx.shavite, &x17_4way_ctx.shavite, - sizeof(sph_shavite512_context) ); - sph_shavite512( &ctx.shavite, hash1, 64 ); - sph_shavite512_close( &ctx.shavite, hash1 ); - memcpy( &ctx.shavite, &x17_4way_ctx.shavite, - sizeof(sph_shavite512_context) ); - sph_shavite512( &ctx.shavite, hash2, 64 ); - sph_shavite512_close( &ctx.shavite, hash2 ); - memcpy( &ctx.shavite, &x17_4way_ctx.shavite, - sizeof(sph_shavite512_context) ); - sph_shavite512( &ctx.shavite, hash3, 64 ); - sph_shavite512_close( &ctx.shavite, hash3 ); - - // 10 Simd parallel 2 way 128 bit - mm256_interleave_2x128( vhash, hash0, hash1, 512 ); - simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_deinterleave_2x128( hash0, hash1, vhash, 512 ); - mm256_interleave_2x128( vhash, hash2, hash3, 512 ); - simd_2way_init( &ctx.simd, 512 ); - simd_2way_update_close( &ctx.simd, vhash, vhash, 512 ); - mm256_deinterleave_2x128( hash2, hash3, vhash, 512 ); - // 11 Echo serial + init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash0, (const BitSequence *) hash0, 512 ); - memcpy( &ctx.echo, &x17_4way_ctx.echo, sizeof(hashState_echo) ); + init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash1, (const BitSequence *) hash1, 512 ); - memcpy( &ctx.echo, &x17_4way_ctx.echo, sizeof(hashState_echo) ); + init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash2, (const BitSequence *) hash2, 512 ); - memcpy( &ctx.echo, &x17_4way_ctx.echo, sizeof(hashState_echo) ); + init_echo( &ctx.echo, 512 ); update_final_echo( &ctx.echo, (BitSequence *)hash3, (const BitSequence *) hash3, 512 ); // 12 Hamsi parallel 4 way 64 bit mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + hamsi512_4way_init( &ctx.hamsi ); hamsi512_4way( &ctx.hamsi, vhash, 64 ); hamsi512_4way_close( &ctx.hamsi, vhash ); + mm256_deinterleave_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); // 13 Fugue serial + sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash0, 64 ); sph_fugue512_close( &ctx.fugue, hash0 ); - memcpy( &ctx.fugue, &x17_4way_ctx.fugue, sizeof(sph_fugue512_context) ); + sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash1, 64 ); sph_fugue512_close( &ctx.fugue, hash1 ); - memcpy( &ctx.fugue, &x17_4way_ctx.fugue, sizeof(sph_fugue512_context) ); + sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash2, 64 ); sph_fugue512_close( &ctx.fugue, hash2 ); - memcpy( &ctx.fugue, &x17_4way_ctx.fugue, sizeof(sph_fugue512_context) ); + sph_fugue512_init( &ctx.fugue ); sph_fugue512( &ctx.fugue, hash3, 64 ); sph_fugue512_close( &ctx.fugue, hash3 ); - // 14 Shabal, parallel 4 way 32 bit SSE + // 14 Shabal, parallel 4 way 32 bit mm128_interleave_4x32( vhash, hash0, hash1, hash2, hash3, 512 ); + + shabal512_4way_init( &ctx.shabal ); shabal512_4way( &ctx.shabal, vhash, 64 ); shabal512_4way_close( &ctx.shabal, vhash ); + mm128_deinterleave_4x32( hash0, hash1, hash2, hash3, vhash, 512 ); - // 15 Whirlpool + // 15 Whirlpool serial + sph_whirlpool_init( &ctx.whirlpool ); sph_whirlpool( &ctx.whirlpool, hash0, 64 ); sph_whirlpool_close( &ctx.whirlpool, hash0 ); - memcpy( &ctx.whirlpool, &x17_4way_ctx.whirlpool, - sizeof(sph_whirlpool_context) ); + sph_whirlpool_init( &ctx.whirlpool ); sph_whirlpool( &ctx.whirlpool, hash1, 64 ); sph_whirlpool_close( &ctx.whirlpool, hash1 ); - memcpy( &ctx.whirlpool, &x17_4way_ctx.whirlpool, - sizeof(sph_whirlpool_context) ); + sph_whirlpool_init( &ctx.whirlpool ); sph_whirlpool( &ctx.whirlpool, hash2, 64 ); sph_whirlpool_close( &ctx.whirlpool, hash2 ); - memcpy( &ctx.whirlpool, &x17_4way_ctx.whirlpool, - sizeof(sph_whirlpool_context) ); + sph_whirlpool_init( &ctx.whirlpool ); sph_whirlpool( &ctx.whirlpool, hash3, 64 ); sph_whirlpool_close( &ctx.whirlpool, hash3 ); // 16 SHA512 parallel 64 bit mm256_interleave_4x64( vhash, hash0, hash1, hash2, hash3, 512 ); + + sha512_4way_init( &ctx.sha512 ); sha512_4way( &ctx.sha512, vhash, 64 ); sha512_4way_close( &ctx.sha512, vhash ); // 17 Haval parallel 32 bit mm256_reinterleave_4x32( vhashB, vhash, 512 ); + + haval256_5_4way_init( &ctx.haval ); haval256_5_4way( &ctx.haval, vhashB, 64 ); haval256_5_4way_close( &ctx.haval, state ); } int scanhash_x17_4way( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ) + uint64_t *hashes_done, struct thr_info *mythr ) { uint32_t hash[4*8] __attribute__ ((aligned (64))); uint32_t *hash7 = &(hash[7<<2]); @@ -234,49 +244,48 @@ int scanhash_x17_4way( int thr_id, struct work *work, uint32_t max_nonce, const uint32_t first_nonce = pdata[19]; uint32_t *nonces = work->nonces; int num_found = 0; - uint32_t *noncep = vdata + 73; // 9*8 + 1 + __m256i *noncev = (__m256i*)vdata + 9; // aligned + /* int */ thr_id = mythr->id; // thr_id arg is deprecated const uint32_t Htarg = ptarget[7]; uint64_t htmax[] = { 0, 0xF, 0xFF, 0xFFF, 0xFFFF, 0x10000000 }; uint32_t masks[] = { 0xFFFFFFFF, 0xFFFFFFF0, 0xFFFFFF00, 0xFFFFF000, 0xFFFF0000, 0 }; - // big endian encode 0..18 uint32_t, 64 bits at a time - swab32_array( endiandata, pdata, 20 ); + // Need big endian data + casti_m256i( endiandata, 0 ) = mm256_bswap_32( casti_m256i( pdata, 0 ) ); + casti_m256i( endiandata, 1 ) = mm256_bswap_32( casti_m256i( pdata, 1 ) ); + casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); uint64_t *edata = (uint64_t*)endiandata; mm256_interleave_4x64( (uint64_t*)vdata, edata, edata, edata, edata, 640 ); - for ( int m=0; m < 6; m++ ) - if ( Htarg <= htmax[m] ) - { - uint32_t mask = masks[m]; - do - { - be32enc( noncep, n ); - be32enc( noncep+2, n+1 ); - be32enc( noncep+4, n+2 ); - be32enc( noncep+6, n+3 ); + for ( int m=0; m < 6; m++ ) if ( Htarg <= htmax[m] ) + { + uint32_t mask = masks[m]; + do + { + *noncev = mm256_interleave_blend_32( mm256_bswap_32( + _mm256_set_epi32( n+3, 0,n+2, 0,n+1, 0, n, 0 ) ), + *noncev ); + x17_4way_hash( hash, vdata ); - x17_4way_hash( hash, vdata ); - - for ( int lane = 0; lane < 4; lane++ ) - if ( ( ( hash7[ lane ] & mask ) == 0 ) ) - { - mm128_extract_lane_4x32( lane_hash, hash, lane, 256 ); - - if ( fulltest( lane_hash, ptarget ) ) - { - pdata[19] = n + lane; - nonces[ num_found++ ] = n + lane; - work_set_target_ratio( work, lane_hash ); - } - } - n += 4; - } while ( ( num_found == 0 ) && ( n < max_nonce ) + for ( int lane = 0; lane < 4; lane++ ) + if ( ( ( hash7[ lane ] & mask ) == 0 ) ) + { + mm128_extract_lane_4x32( lane_hash, hash, lane, 256 ); + if ( fulltest( lane_hash, ptarget ) ) + { + pdata[19] = n + lane; + nonces[ num_found++ ] = n + lane; + work_set_target_ratio( work, lane_hash ); + } + } + n += 4; + } while ( ( num_found == 0 ) && ( n < max_nonce ) && !work_restart[thr_id].restart ); - break; - } + break; + } *hashes_done = n - first_nonce + 1; return num_found; diff --git a/algo/x17/x17-gate.c b/algo/x17/x17-gate.c index 9ab2da5..c1cf1b0 100644 --- a/algo/x17/x17-gate.c +++ b/algo/x17/x17-gate.c @@ -3,10 +3,12 @@ bool register_x17_algo( algo_gate_t* gate ) { #if defined (X17_4WAY) - init_x17_4way_ctx(); +printf("register x17 4way\n"); +// init_x17_4way_ctx(); gate->scanhash = (void*)&scanhash_x17_4way; gate->hash = (void*)&x17_4way_hash; #else +printf("register x17 no 4way\n"); init_x17_ctx(); gate->scanhash = (void*)&scanhash_x17; gate->hash = (void*)&x17_hash; diff --git a/algo/x17/x17-gate.h b/algo/x17/x17-gate.h index 32dadff..45faec4 100644 --- a/algo/x17/x17-gate.h +++ b/algo/x17/x17-gate.h @@ -15,16 +15,16 @@ bool register_x17_algo( algo_gate_t* gate ); void x17_4way_hash( void *state, const void *input ); int scanhash_x17_4way( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ); + uint64_t *hashes_done, struct thr_info *mythr ); -void init_x17_4way_ctx(); +//void init_x17_4way_ctx(); #endif void x17_hash( void *state, const void *input ); int scanhash_x17( int thr_id, struct work *work, uint32_t max_nonce, - uint64_t *hashes_done ); + uint64_t *hashes_done, struct thr_info *mythr ); void init_x17_ctx(); diff --git a/algo/x17/x17.c b/algo/x17/x17.c index 33ad4d2..591161e 100644 --- a/algo/x17/x17.c +++ b/algo/x17/x17.c @@ -5,7 +5,6 @@ #include #include "algo/blake/sph_blake.h" #include "algo/bmw/sph_bmw.h" -#include "algo/groestl/sph_groestl.h" #include "algo/jh/sph_jh.h" #include "algo/keccak/sph_keccak.h" #include "algo/skein/sph_skein.h" @@ -13,14 +12,11 @@ #include "algo/luffa/sph_luffa.h" #include "algo/cubehash/sph_cubehash.h" #include "algo/simd/sph_simd.h" -#include "algo/echo/sph_echo.h" #include "algo/hamsi/sph_hamsi.h" #include "algo/fugue/sph_fugue.h" #include "algo/shabal/sph_shabal.h" #include "algo/whirlpool/sph_whirlpool.h" -#include "algo/sha/sph_sha2.h" #include "algo/haval/sph-haval.h" - #include "algo/luffa/luffa_for_sse2.h" #include "algo/cubehash/cubehash_sse2.h" #include "algo/simd/nist.h" @@ -30,18 +26,21 @@ #include "algo/skein/sse2/skein.c" #include "algo/jh/sse2/jh_sse2_opt64.h" #include -#ifndef NO_AES_NI +#if defined(__AES__) #include "algo/echo/aes_ni/hash_api.h" #include "algo/groestl/aes_ni/hash-groestl.h" +#else + #include "algo/groestl/sph_groestl.h" + #include "algo/echo/sph_echo.h" #endif typedef struct { -#ifdef NO_AES_NI - sph_groestl512_context groestl; - sph_echo512_context echo; -#else - hashState_echo echo; +#if defined(__AES__) hashState_groestl groestl; + hashState_echo echo; +#else + sph_groestl512_context groestl; + sph_echo512_context echo; #endif hashState_luffa luffa; cubehashParam cubehash; @@ -51,11 +50,7 @@ typedef struct { sph_fugue512_context fugue; sph_shabal512_context shabal; sph_whirlpool_context whirlpool; -#ifndef USE_SPH_SHA SHA512_CTX sha512; -#else - sph_sha512_context sha512; -#endif sph_haval256_5_context haval; } x17_ctx_holder; @@ -63,12 +58,12 @@ x17_ctx_holder x17_ctx __attribute__ ((aligned (64))); void init_x17_ctx() { -#ifdef NO_AES_NI +#if defined(__AES__) + init_groestl( &x17_ctx.groestl, 64 ); + init_echo( &x17_ctx.echo, 512 ); +#else sph_groestl512_init(&x17_ctx.groestl ); sph_echo512_init(&x17_ctx.echo); -#else - init_echo( &x17_ctx.echo, 512 ); - init_groestl( &x17_ctx.groestl, 64 ); #endif init_luffa( &x17_ctx.luffa, 512 ); cubehashInit( &x17_ctx.cubehash, 512, 16, 32 ); @@ -78,11 +73,7 @@ void init_x17_ctx() sph_fugue512_init( &x17_ctx.fugue ); sph_shabal512_init( &x17_ctx.shabal ); sph_whirlpool_init( &x17_ctx.whirlpool ); -#ifndef USE_SPH_SHA SHA512_Init( &x17_ctx.sha512 ); -#else - sph_sha512_init(&x17_ctx.sha512); -#endif sph_haval256_5_init(&x17_ctx.haval); }; @@ -123,12 +114,12 @@ void x17_hash(void *output, const void *input) //---groestl---- -#ifdef NO_AES_NI - sph_groestl512(&ctx.groestl, hash, 64); - sph_groestl512_close(&ctx.groestl, hash); -#else +#if defined(__AES__) update_and_final_groestl( &ctx.groestl, (char*)hash, (const char*)hash, 512 ); +#else + sph_groestl512( &ctx.groestl, hash, 64 ); + sph_groestl512_close( &ctx.groestl, hash ); #endif //---skein4--- @@ -151,134 +142,136 @@ void x17_hash(void *output, const void *input) KEC_C; //--- luffa7 - update_and_final_luffa( &ctx.luffa, (BitSequence*)hashB, + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, (const BitSequence*)hash, 64 ); // 8 Cube cubehashUpdateDigest( &ctx.cubehash, (byte*) hash, - (const byte*)hashB, 64 ); + (const byte*)hash, 64 ); // 9 Shavite sph_shavite512( &ctx.shavite, hash, 64); - sph_shavite512_close( &ctx.shavite, hashB); + sph_shavite512_close( &ctx.shavite, hash); // 10 Simd - update_final_sd( &ctx.simd, (BitSequence *)hash, - (const BitSequence *)hashB, 512 ); + update_final_sd( &ctx.simd, (BitSequence*)hash, + (const BitSequence*)hash, 512 ); //11---echo--- -#ifdef NO_AES_NI - sph_echo512(&ctx.echo, hash, 64); - sph_echo512_close(&ctx.echo, hashB); +#if defined(__AES__) + update_final_echo ( &ctx.echo, (BitSequence*)hash, + (const BitSequence*)hash, 512 ); #else - update_final_echo ( &ctx.echo, (BitSequence *)hashB, - (const BitSequence *)hash, 512 ); + sph_echo512( &ctx.echo, hash, 64 ); + sph_echo512_close( &ctx.echo, hash ); #endif // X13 algos // 12 Hamsi - sph_hamsi512(&ctx.hamsi, hashB, 64); - sph_hamsi512_close(&ctx.hamsi, hash); + sph_hamsi512( &ctx.hamsi, hash, 64 ); + sph_hamsi512_close( &ctx.hamsi, hash ); // 13 Fugue - sph_fugue512(&ctx.fugue, hash, 64); - sph_fugue512_close(&ctx.fugue, hashB); + sph_fugue512(&ctx.fugue, hash, 64 ); + sph_fugue512_close(&ctx.fugue, hash ); // X14 Shabal - sph_shabal512(&ctx.shabal, hashB, 64); - sph_shabal512_close(&ctx.shabal, hash); + sph_shabal512(&ctx.shabal, hash, 64); + sph_shabal512_close( &ctx.shabal, hash ); // X15 Whirlpool - sph_whirlpool(&ctx.whirlpool, hash, 64); - sph_whirlpool_close(&ctx.whirlpool, hashB); + sph_whirlpool( &ctx.whirlpool, hash, 64 ); + sph_whirlpool_close( &ctx.whirlpool, hash ); -#ifndef USE_SPH_SHA - SHA512_Update( &ctx.sha512, hashB, 64 ); - SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); -#else - sph_sha512(&ctx.sha512,(const void*) hashB, 64); - sph_sha512_close(&ctx.sha512,(void*) hash); -#endif - sph_haval256_5(&ctx.haval,(const void*) hash, 64); - sph_haval256_5_close(&ctx.haval,hashB); + SHA512_Update( &ctx.sha512, hash, 64 ); + SHA512_Final( (unsigned char*)hash, &ctx.sha512 ); - - asm volatile ("emms"); - memcpy(output, hashB, 32); + sph_haval256_5( &ctx.haval, (const void*)hash, 64 ); + sph_haval256_5_close( &ctx.haval, output ); } -int scanhash_x17(int thr_id, struct work *work, - uint32_t max_nonce, uint64_t *hashes_done) +int scanhash_x17( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done, struct thr_info *mythr) { - uint32_t endiandata[20] __attribute__((aligned(64))); - uint32_t hash64[8] __attribute__((aligned(64))); - uint32_t *pdata = work->data; - uint32_t *ptarget = work->target; - uint32_t n = pdata[19] - 1; - const uint32_t first_nonce = pdata[19]; - const uint32_t Htarg = ptarget[7]; + uint32_t endiandata[20] __attribute__((aligned(64))); + uint32_t hash64[8] __attribute__((aligned(64))); + uint32_t *pdata = work->data; + uint32_t *ptarget = work->target; + uint32_t n = pdata[19] - 1; + const uint32_t first_nonce = pdata[19]; + const uint32_t Htarg = ptarget[7]; + /* int */ thr_id = mythr->id; // thr_id arg is deprecated - uint64_t htmax[] = { - 0, - 0xF, - 0xFF, - 0xFFF, - 0xFFFF, - 0x10000000 - }; - uint32_t masks[] = { - 0xFFFFFFFF, - 0xFFFFFFF0, - 0xFFFFFF00, - 0xFFFFF000, - 0xFFFF0000, - 0 - }; + uint64_t htmax[] = + { + 0, + 0xF, + 0xFF, + 0xFFF, + 0xFFFF, + 0x10000000 + }; + uint32_t masks[] = + { + 0xFFFFFFFF, + 0xFFFFFFF0, + 0xFFFFFF00, + 0xFFFFF000, + 0xFFFF0000, + 0 + }; - // we need bigendian data... - swab32_array( endiandata, pdata, 20 ); + // we need bigendian data... + casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) ); + casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) ); + casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) ); + casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) ); + casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) ); #ifdef DEBUG_ALGO - if (Htarg != 0) - printf("[%d] Htarg=%X\n", thr_id, Htarg); + if ( Htarg != 0 ) + printf( "[%d] Htarg=%X\n", thr_id, Htarg ); #endif - for (int m=0; m < 6; m++) { - if (Htarg <= htmax[m]) { - uint32_t mask = masks[m]; - do { - pdata[19] = ++n; - be32enc(&endiandata[19], n); - x17_hash(hash64, endiandata); + for ( int m = 0; m < 6; m++ ) + { + if ( Htarg <= htmax[m] ) + { + uint32_t mask = masks[m]; + do + { + pdata[19] = ++n; + be32enc( &endiandata[19], n ); + x17_hash( hash64, endiandata ); #ifndef DEBUG_ALGO - if (!(hash64[7] & mask)) - { - if ( fulltest(hash64, ptarget)) { - *hashes_done = n - first_nonce + 1; - return true; - } -// else -// { -// applog(LOG_INFO, "Result does not validate on CPU!"); -// } - } + if ( !( hash64[7] & mask ) ) + { + if ( fulltest( hash64, ptarget ) ) + { + *hashes_done = n - first_nonce + 1; + return true; + } +// else +// applog(LOG_INFO, "Result does not validate on CPU!"); + } #else - if (!(n % 0x1000) && !thr_id) printf("."); - if (!(hash64[7] & mask)) { - printf("[%d]",thr_id); - if (fulltest(hash64, ptarget)) { - work_set_target_ratio( work, hash64 ); - *hashes_done = n - first_nonce + 1; - return true; - } - } -#endif - } while (n < max_nonce && !work_restart[thr_id].restart); - // see blake.c if else to understand the loop on htmax => mask - break; + if ( !( n % 0x1000 ) && !thr_id ) printf("."); + if ( !( hash64[7] & mask ) ) + { + printf("[%d]",thr_id); + if ( fulltest( hash64, ptarget ) ) + { + work_set_target_ratio( work, hash64 ); + *hashes_done = n - first_nonce + 1; + return true; + } } +#endif + } while (n < max_nonce && !work_restart[thr_id].restart); + // see blake.c if else to understand the loop on htmax => mask + break; } - - *hashes_done = n - first_nonce + 1; - pdata[19] = n; - return 0; + } + *hashes_done = n - first_nonce + 1; + pdata[19] = n; + return 0; } diff --git a/algo/x17/xevan.c b/algo/x17/xevan.c index 8335400..b35c657 100644 --- a/algo/x17/xevan.c +++ b/algo/x17/xevan.c @@ -16,17 +16,16 @@ #include "algo/fugue/sph_fugue.h" #include "algo/shabal/sph_shabal.h" #include "algo/whirlpool/sph_whirlpool.h" -#include "algo/sha/sph_sha2.h" #include "algo/haval/sph-haval.h" #include "algo/simd/nist.h" #include "algo/cubehash/cubehash_sse2.h" #include -#ifdef NO_AES_NI - #include "algo/groestl/sph_groestl.h" - #include "algo/echo/sph_echo.h" -#else +#if defined(__AES__) #include "algo/groestl/aes_ni/hash-groestl.h" #include "algo/echo/aes_ni/hash_api.h" +#else + #include "algo/groestl/sph_groestl.h" + #include "algo/echo/sph_echo.h" #endif typedef struct { @@ -43,18 +42,14 @@ typedef struct { sph_fugue512_context fugue; sph_shabal512_context shabal; sph_whirlpool_context whirlpool; -#ifndef USE_SPH_SHA SHA512_CTX sha512; -#else - sph_sha512_context sha512; -#endif sph_haval256_5_context haval; -#ifdef NO_AES_NI - sph_groestl512_context groestl; - sph_echo512_context echo; -#else +#if defined(__AES__) hashState_echo echo; hashState_groestl groestl; +#else + sph_groestl512_context groestl; + sph_echo512_context echo; #endif } xevan_ctx_holder; @@ -77,18 +72,14 @@ void init_xevan_ctx() sph_fugue512_init( &xevan_ctx.fugue ); sph_shabal512_init( &xevan_ctx.shabal ); sph_whirlpool_init( &xevan_ctx.whirlpool ); -#ifndef USE_SPH_SHA SHA512_Init( &xevan_ctx.sha512 ); -#else - sph_sha512_init(&xevan_ctx.sha512); -#endif sph_haval256_5_init(&xevan_ctx.haval); -#ifdef NO_AES_NI - sph_groestl512_init( &xevan_ctx.groestl ); - sph_echo512_init( &xevan_ctx.echo ); -#else +#if defined(__AES__) init_groestl( &xevan_ctx.groestl, 64 ); init_echo( &xevan_ctx.echo, 512 ); +#else + sph_groestl512_init( &xevan_ctx.groestl ); + sph_echo512_init( &xevan_ctx.echo ); #endif }; @@ -117,12 +108,12 @@ void xevan_hash(void *output, const void *input) sph_bmw512(&ctx.bmw, hash, dataLen); sph_bmw512_close(&ctx.bmw, hash); -#ifdef NO_AES_NI +#if defined(__AES__) + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)hash, dataLen*8 ); +#else sph_groestl512(&ctx.groestl, hash, dataLen); sph_groestl512_close(&ctx.groestl, hash); -#else - update_and_final_groestl( &ctx.groestl, (char*)hash, - (const char*)hash, dataLen*8 ); #endif sph_skein512(&ctx.skein, hash, dataLen); @@ -146,12 +137,12 @@ void xevan_hash(void *output, const void *input) update_final_sd( &ctx.simd, (BitSequence *)hash, (const BitSequence *)hash, dataLen*8 ); -#ifdef NO_AES_NI +#if defined(__AES__) + update_final_echo( &ctx.echo, (BitSequence *) hash, + (const BitSequence *) hash, dataLen*8 ); +#else sph_echo512(&ctx.echo, hash, dataLen); sph_echo512_close(&ctx.echo, hash); -#else - update_final_echo( &ctx.echo, (BitSequence *) hash, - (const BitSequence *) hash, dataLen*8 ); #endif sph_hamsi512(&ctx.hamsi, hash, dataLen); @@ -166,13 +157,9 @@ void xevan_hash(void *output, const void *input) sph_whirlpool(&ctx.whirlpool, hash, dataLen); sph_whirlpool_close(&ctx.whirlpool, hash); -#ifndef USE_SPH_SHA SHA512_Update( &ctx.sha512, hash, dataLen ); SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); -#else - sph_sha512(&ctx.sha512,(const void*) hash, dataLen); - sph_sha512_close(&ctx.sha512,(void*) hash); -#endif + sph_haval256_5(&ctx.haval,(const void*) hash, dataLen); sph_haval256_5_close(&ctx.haval, hash); @@ -186,12 +173,12 @@ void xevan_hash(void *output, const void *input) sph_bmw512(&ctx.bmw, hash, dataLen); sph_bmw512_close(&ctx.bmw, hash); -#ifdef NO_AES_NI - sph_groestl512(&ctx.groestl, hash, dataLen); - sph_groestl512_close(&ctx.groestl, hash); -#else +#if defined(__AES__) update_and_final_groestl( &ctx.groestl, (char*)hash, (const BitSequence*)hash, dataLen*8 ); +#else + sph_groestl512(&ctx.groestl, hash, dataLen); + sph_groestl512_close(&ctx.groestl, hash); #endif sph_skein512(&ctx.skein, hash, dataLen); @@ -214,12 +201,12 @@ void xevan_hash(void *output, const void *input) update_final_sd( &ctx.simd, (BitSequence *)hash, (const BitSequence *)hash, dataLen*8 ); -#ifdef NO_AES_NI - sph_echo512(&ctx.echo, hash, dataLen); - sph_echo512_close(&ctx.echo, hash); -#else +#if defined(__AES__) update_final_echo( &ctx.echo, (BitSequence *) hash, (const BitSequence *) hash, dataLen*8 ); +#else + sph_echo512(&ctx.echo, hash, dataLen); + sph_echo512_close(&ctx.echo, hash); #endif sph_hamsi512(&ctx.hamsi, hash, dataLen); @@ -234,13 +221,9 @@ void xevan_hash(void *output, const void *input) sph_whirlpool(&ctx.whirlpool, hash, dataLen); sph_whirlpool_close(&ctx.whirlpool, hash); -#ifndef USE_SPH_SHA SHA512_Update( &ctx.sha512, hash, dataLen ); SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); -#else - sph_sha512(&ctx.sha512,(const void*) hash, dataLen); - sph_sha512_close(&ctx.sha512,(void*) hash); -#endif + sph_haval256_5(&ctx.haval,(const void*) hash, dataLen); sph_haval256_5_close(&ctx.haval, hash); diff --git a/algo/x20/x20r-gate.c b/algo/x20/x20r-gate.c new file mode 100644 index 0000000..36113b7 --- /dev/null +++ b/algo/x20/x20r-gate.c @@ -0,0 +1,34 @@ +#include "x20r-gate.h" + +void getAlgoString( const uint8_t* prevblock, char *output ) +{ + char *sptr = outpuit; + + for ( int j = 0; j < X20R_HASH_FUNC_COUNT; j++ ) + { + char b = (19 - j) >> 1; // 16 ascii hex chars, reversed + uint8_t algoDigit = (j & 1) ? prevblock[b] & 0xF : prevblock[b] >> 4; + if (algoDigit >= 10) + sprintf(sptr, "%c", 'A' + (algoDigit - 10)); + else + sprintf(sptr, "%u", (uint32_t) algoDigit); + sptr++; + } + *sptr = '\0'; +} + +bool register_x20r_algo( algo_gate_t* gate ) +{ +#if defined (X20R_4WAY) + gate->scanhash = (void*)&scanhash_x20r_4way; + gate->hash = (void*)&x20r_4way_hash; +#else + gate->scanhash = (void*)&scanhash_x20r; + gate->hash = (void*)&x20r_hash; +#endif + gate->set_target = (void*)&alt_set_target; + gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT; + x20_r_s_getAlgoString = (void*)&x20r_getAlgoString; + return true; +}; + diff --git a/algo/x20/x20r-gate.h b/algo/x20/x20r-gate.h new file mode 100644 index 0000000..359b6d4 --- /dev/null +++ b/algo/x20/x20r-gate.h @@ -0,0 +1,58 @@ +#ifndef X20R_GATE_H__ +#define X20R_GATE_H__ 1 + +#include "algo-gate-api.h" +#include + +/* +#if defined(__AVX2__) && defined(__AES__) + #define X20R_4WAY +#endif +*/ + +enum x20r_Algo { + BLAKE = 0, + BMW, + GROESTL, + JH, + KECCAK, + SKEIN, + LUFFA, + CUBEHASH, + SHAVITE, + SIMD, + ECHO, + HAMSI, + FUGUE, + SHABAL, + WHIRLPOOL, + SHA_512, + HAVAL, // 256-bits output + GOST, + RADIOGATUN, // 256-bits output + PANAMA, // 256-bits output + X20R_HASH_FUNC_COUNT +}; + +void (*x20_r_s_getAlgoString) ( const uint8_t*, char* ); + +void x20r_getAlgoString( const uint8_t* prevblock, char *output ); + +bool register_xi20r_algo( algo_gate_t* gate ); + +#if defined(X20R_4WAY) + +void x20r_4way_hash( void *state, const void *input ); + +int scanhash_x20r_4way( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done ); + +#endif + +void x20rhash( void *state, const void *input ); + +int scanhash_x20r( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done ); + +#endif + diff --git a/algo/x20/x20r.c b/algo/x20/x20r.c new file mode 100644 index 0000000..7b98990 --- /dev/null +++ b/algo/x20/x20r.c @@ -0,0 +1,275 @@ +#include "x20r-gate.h" + +#include +#include +#include + +#include "algo/blake/sph_blake.h" +#include "algo/bmw/sph_bmw.h" +#include "algo/jh/sph_jh.h" +#include "algo/keccak/sph_keccak.h" +#include "algo/skein/sph_skein.h" +#include "algo/shavite/sph_shavite.h" +#include "algo/hamsi/sph_hamsi.h" +#include "algo/fugue/sph_fugue.h" +#include "algo/shabal/sph_shabal.h" +#include "algo/whirlpool/sph_whirlpool.h" +#include "algo/haval/sph-haval.h" +#include "algo/radiogatun/sph_radiogatun.h" +#include "algo/panama/sph_panama.h" +#include "algo/gost/sph_gost.h" +#include +#if defined(__AES__) + #include "algo/echo/aes_ni/hash_api.h" + #include "algo/groestl/aes_ni/hash-groestl.h" +#else + #include "algo/groestl/sph_groestl.h" + #include "algo/echo/sph_echo.h" +#endif +#include "algo/luffa/luffa_for_sse2.h" +#include "algo/cubehash/cubehash_sse2.h" +#include "algo/simd/nist.h" + + +static __thread uint32_t s_ntime = UINT32_MAX; +static __thread char hashOrder[X20R_HASH_FUNC_COUNT + 1] = { 0 }; + +union _x20r_context_overlay +{ + sph_blake512_context blake; + sph_bmw512_context bmw; +#if defined(__AES__) + hashState_groestl groestl; + hashState_echo echo; +#else + sph_groestl512_context groestl; + sph_echo512_context echo; +#endif + sph_skein512_context skein; + sph_jh512_context jh; + sph_keccak512_context keccak; + hashState_luffa luffa; + cubehashParam cube; + hashState_sd simd; + sph_shavite512_context shavite; + sph_hamsi512_context hamsi; + sph_fugue512_context fugue; + sph_shabal512_context shabal; + sph_whirlpool_context whirlpool; + SHA512_CTX sha512; + sph_haval256_5_context haval; + sph_gost512_context gost; + sph_radiogatun64_context radiogatun; + sph_panama_context panama; +}; +typedef union _x20r_context_overlay x20r_context_overlay; + +void x20r_hash(void* output, const void* input) +{ + uint32_t _ALIGN(128) hash[64/4]; + x20r_context_overlay ctx; +/* + sph_blake512_context ctx_blake; + sph_bmw512_context ctx_bmw; + sph_groestl512_context ctx_groestl; + sph_skein512_context ctx_skein; + sph_jh512_context ctx_jh; + sph_keccak512_context ctx_keccak; + sph_luffa512_context ctx_luffa; + sph_cubehash512_context ctx_cubehash; + sph_shavite512_context ctx_shavite; + sph_simd512_context ctx_simd; + sph_echo512_context ctx_echo; + sph_hamsi512_context ctx_hamsi; + sph_fugue512_context ctx_fugue; + sph_shabal512_context ctx_shabal; + sph_whirlpool_context ctx_whirlpool; + sph_sha512_context ctx_sha512; + sph_haval256_5_context ctx_haval; + sph_gost512_context ctx_gost; + sph_radiogatun64_context ctx_radiogatun; + sph_panama_context ctx_panama; +*/ + void *in = (void*) input; + int size = 80; + + if ( s_ntime == UINT32_MAX ) + { + const uint8_t* in8 = (uint8_t*) input; + x20_r_s_getAlgoString(&in8[4], hashOrder); + } + + for (int i = 0; i < 20; i++) + { + const char elem = hashOrder[i]; + const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0'; + + switch ( algo ) + { + case BLAKE: + sph_blake512_init(&ctx.blake); + sph_blake512(&ctx.blake, in, size); + sph_blake512_close(&ctx.blake, hash); + break; + case BMW: + sph_bmw512_init(&ctx.bmw); + sph_bmw512(&ctx.bmw, in, size); + sph_bmw512_close(&ctx.bmw, hash); + break; + case GROESTL: +#if defined(__AES__) + init_groestl( &ctx.groestl, 64 ); + update_and_final_groestl( &ctx.groestl, (char*)hash, + (const char*)in, size<<3 ); +#else + sph_groestl512_init(&ctx.groestl); + sph_groestl512(&ctx.groestl, in, size); + sph_groestl512_close(&ctx.groestl, hash); +#endif + break; + case SKEIN: + sph_skein512_init(&ctx.skein); + sph_skein512(&ctx.skein, in, size); + sph_skein512_close(&ctx.skein, hash); + break; + case JH: + sph_jh512_init(&ctx.jh); + sph_jh512(&ctx.jh, in, size); + sph_jh512_close(&ctx.jh, hash); + break; + case KECCAK: + sph_keccak512_init(&ctx.keccak); + sph_keccak512(&ctx.keccak, in, size); + sph_keccak512_close(&ctx.keccak, hash); + break; + case LUFFA: + init_luffa( &ctx.luffa, 512 ); + update_and_final_luffa( &ctx.luffa, (BitSequence*)hash, + (const BitSequence*)in, size ); + break; + case CUBEHASH: + cubehashInit( &ctx.cube, 512, 16, 32 ); + cubehashUpdateDigest( &ctx.cube, (byte*) hash, + (const byte*)in, size ); + break; + case SHAVITE: + sph_shavite512_init(&ctx.shavite); + sph_shavite512(&ctx.shavite, in, size); + sph_shavite512_close(&ctx.shavite, hash); + break; + case SIMD: + init_sd( &ctx.simd, 512 ); + update_final_sd( &ctx.simd, (BitSequence *)hash, + (const BitSequence *)in, size<<3 ); + break; + case ECHO: +#if defined(__AES__) + init_echo( &ctx.echo, 512 ); + update_final_echo ( &ctx.echo, (BitSequence *)hash, + (const BitSequence *)in, size<<3 ); +#else + sph_echo512_init(&ctx.echo); + sph_echo512(&ctx.echo, in, size); + sph_echo512_close(&ctx.echo, hash); +#endif + break; + case HAMSI: + sph_hamsi512_init(&ctx.hamsi); + sph_hamsi512(&ctx.hamsi, in, size); + sph_hamsi512_close(&ctx.hamsi, hash); + break; + case FUGUE: + sph_fugue512_init(&ctx.fugue); + sph_fugue512(&ctx.fugue, in, size); + sph_fugue512_close(&ctx.fugue, hash); + break; + case SHABAL: + sph_shabal512_init(&ctx.shabal); + sph_shabal512(&ctx.shabal, in, size); + sph_shabal512_close(&ctx.shabal, hash); + break; + case WHIRLPOOL: + sph_whirlpool_init(&ctx.whirlpool); + sph_whirlpool(&ctx.whirlpool, in, size); + sph_whirlpool_close(&ctx.whirlpool, hash); + break; + case SHA_512: + SHA512_Init( &ctx.sha512 ); + SHA512_Update( &ctx.sha512, in, size ); + SHA512_Final( (unsigned char*) hash, &ctx.sha512 ); + break; + case HAVAL: + sph_haval256_5_init(&ctx.haval); + sph_haval256_5(&ctx.haval, in, size); + sph_haval256_5_close(&ctx.haval, hash); + memset(&hash[8], 0, 32); + break; + case GOST: + sph_gost512_init(&ctx.gost); + sph_gost512(&ctx.gost, in, size); + sph_gost512_close(&ctx.gost, hash); + break; + case RADIOGATUN: + sph_radiogatun64_init(&ctx.radiogatun); + sph_radiogatun64(&ctx.radiogatun, in, size); + sph_radiogatun64_close(&ctx.radiogatun, hash); + memset(&hash[8], 0, 32); + break; + case PANAMA: + sph_panama_init(&ctx.panama); + sph_panama(&ctx.panama, in, size); + sph_panama_close(&ctx.panama, hash); + memset(&hash[8], 0, 32); + break; + } + in = (void*) hash; + size = 64; + } + memcpy(output, hash, 32); +} + +int scanhash_x20r( int thr_id, struct work *work, uint32_t max_nonce, + uint64_t *hashes_done ) +{ + uint32_t _ALIGN(128) hash32[8]; + uint32_t _ALIGN(128) endiandata[20]; + uint32_t *pdata = work->data; + uint32_t *ptarget = work->target; + const uint32_t Htarg = ptarget[7]; + const uint32_t first_nonce = pdata[19]; + uint32_t nonce = first_nonce; + volatile uint8_t *restart = &(work_restart[thr_id].restart); + + for (int k=0; k < 19; k++) + be32enc( &endiandata[k], pdata[k] ); + + if ( s_ntime != pdata[17] ) + { + uint32_t ntime = swab32(pdata[17]); + x20_r_s_getAlgoString( (const char*) (&endiandata[1]), hashOrder ); + s_ntime = ntime; + if (opt_debug && !thr_id) applog(LOG_DEBUG, "hash order %s (%08x)", hashOrder, ntime); + } + + if ( opt_benchmark ) + ptarget[7] = 0x0cff; + + do { + be32enc( &endiandata[19], nonce ); + x20r_hash( hash32, endiandata ); + + if ( hash32[7] <= Htarg && fulltest( hash32, ptarget ) ) + { + work_set_target_ratio( work, hash32 ); + pdata[19] = nonce; + *hashes_done = pdata[19] - first_nonce; + return 1; + } + nonce++; + + } while (nonce < max_nonce && !(*restart)); + + pdata[19] = nonce; + *hashes_done = pdata[19] - first_nonce + 1; + return 0; +} diff --git a/algo/yescrypt/sha256_Y.c b/algo/yescrypt/sha256_Y.c index 4ba00a0..62265bf 100644 --- a/algo/yescrypt/sha256_Y.c +++ b/algo/yescrypt/sha256_Y.c @@ -299,48 +299,26 @@ HMAC_SHA256_Init(HMAC_SHA256_CTX * ctx, const void * _K, size_t Klen) /* If Klen > 64, the key is really SHA256(K). */ if (Klen > 64) { -#ifndef USE_SPH_SHA SHA256_Init(&ctx->ictx); SHA256_Update(&ctx->ictx, K, Klen); SHA256_Final(khash, &ctx->ictx); -#else - SHA256_Init_Y(&ctx->ictx); - SHA256_Update_Y(&ctx->ictx, K, Klen); - SHA256_Final_Y(khash, &ctx->ictx); -#endif K = khash; Klen = 32; } /* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */ -#ifndef USE_SPH_SHA SHA256_Init(&ctx->ictx); -#else - SHA256_Init_Y(&ctx->ictx); -#endif memset(pad, 0x36, 64); for (i = 0; i < Klen; i++) pad[i] ^= K[i]; -#ifndef USE_SPH_SHA SHA256_Update(&ctx->ictx, pad, 64); -#else - SHA256_Update_Y(&ctx->ictx, pad, 64); -#endif /* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */ -#ifndef USE_SPH_SHA SHA256_Init(&ctx->octx); -#else - SHA256_Init_Y(&ctx->octx); -#endif memset(pad, 0x5c, 64); for (i = 0; i < Klen; i++) pad[i] ^= K[i]; -#ifndef USE_SPH_SHA SHA256_Update(&ctx->octx, pad, 64); -#else - SHA256_Update_Y(&ctx->octx, pad, 64); -#endif /* Clean the stack. */ //memset(khash, 0, 32); @@ -352,11 +330,7 @@ HMAC_SHA256_Update(HMAC_SHA256_CTX * ctx, const void *in, size_t len) { /* Feed data to the inner SHA256 operation. */ -#ifndef USE_SPH_SHA SHA256_Update(&ctx->ictx, in, len); -#else - SHA256_Update_Y(&ctx->ictx, in, len); -#endif } /* Finish an HMAC-SHA256 operation. */ @@ -365,7 +339,6 @@ HMAC_SHA256_Final(unsigned char digest[32], HMAC_SHA256_CTX * ctx) { unsigned char ihash[32]; -#ifndef USE_SPH_SHA /* Finish the inner SHA256 operation. */ SHA256_Final(ihash, &ctx->ictx); @@ -374,16 +347,6 @@ HMAC_SHA256_Final(unsigned char digest[32], HMAC_SHA256_CTX * ctx) /* Finish the outer SHA256 operation. */ SHA256_Final(digest, &ctx->octx); -#else - /* Finish the inner SHA256 operation. */ - SHA256_Final_Y(ihash, &ctx->ictx); - - /* Feed the inner hash to the outer SHA256 operation. */ - SHA256_Update_Y(&ctx->octx, ihash, 32); - - /* Finish the outer SHA256 operation. */ - SHA256_Final_Y(digest, &ctx->octx); -#endif /* Clean the stack. */ //memset(ihash, 0, 32); diff --git a/algo/yescrypt/sha256_Y.h b/algo/yescrypt/sha256_Y.h index 2d6f3ee..4912e43 100644 --- a/algo/yescrypt/sha256_Y.h +++ b/algo/yescrypt/sha256_Y.h @@ -47,13 +47,8 @@ typedef struct HMAC_SHA256Context { */ typedef struct HMAC_SHA256Context { -#ifndef USE_SPH_SHA SHA256_CTX ictx; SHA256_CTX octx; -#else - SHA256_CTX_Y ictx; - SHA256_CTX_Y octx; -#endif } HMAC_SHA256_CTX; void SHA256_Init_Y(SHA256_CTX_Y *); diff --git a/algo/yescrypt/yescrypt-simd.c b/algo/yescrypt/yescrypt-simd.c index 5aeca04..edecb60 100644 --- a/algo/yescrypt/yescrypt-simd.c +++ b/algo/yescrypt/yescrypt-simd.c @@ -1303,17 +1303,10 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, S = (uint8_t *)XY + XY_size; if (t || flags) { -#ifndef USE_SPH_SHA SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, passwd, passwdlen); SHA256_Final(sha256, &ctx); -#else - SHA256_CTX_Y ctx; - SHA256_Init_Y(&ctx); - SHA256_Update_Y(&ctx, passwd, passwdlen); - SHA256_Final_Y(sha256, &ctx); -#endif passwd = sha256; passwdlen = sizeof(sha256); } @@ -1372,17 +1365,10 @@ yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, } /* Compute StoredKey */ { -#ifndef USE_SPH_SHA SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, sha256, sizeof(sha256)); SHA256_Final(buf, &ctx); -#else - SHA256_CTX_Y ctx; - SHA256_Init_Y(&ctx); - SHA256_Update_Y(&ctx, sha256, sizeof(sha256)); - SHA256_Final_Y(buf, &ctx); -#endif } } diff --git a/algo/yespower/yespower-opt.c b/algo/yespower/yespower-opt.c index 3159800..b92e21c 100644 --- a/algo/yespower/yespower-opt.c +++ b/algo/yespower/yespower-opt.c @@ -49,6 +49,7 @@ * no slowdown from the prefixes is generally observed on AMD CPUs supporting * XOP, some slowdown is sometimes observed on Intel CPUs with AVX. */ +/* #ifdef __XOP__ #warning "Note: XOP is enabled. That's great." #elif defined(__AVX__) @@ -60,7 +61,7 @@ #else #warning "Note: building generic code for non-x86. That's OK." #endif - +*/ /* * The SSE4 code version has fewer instructions than the generic SSE2 version, * but all of the instructions are SIMD, thereby wasting the scalar execution diff --git a/api.c b/api.c index 5184309..8999397 100644 --- a/api.c +++ b/api.c @@ -96,9 +96,9 @@ extern char *opt_api_allow; extern int opt_api_listen; /* port */ extern int opt_api_remote; extern double global_hashrate; -extern uint32_t accepted_count; -extern uint32_t rejected_count; -extern uint32_t solved_count; +//extern uint32_t accepted_count; +//extern uint32_t rejected_count; +//extern uint32_t solved_count; #define cpu_threads opt_n_threads @@ -136,7 +136,7 @@ static char *getsummary( char *params ) char algo[64]; *algo = '\0'; time_t ts = time(NULL); double uptime = difftime(ts, startup); - double accps = (60.0 * accepted_count) / (uptime ? uptime : 1.0); + double accps = (60.0 * accepted_share_count) / (uptime ? uptime : 1.0); double diff = net_diff > 0. ? net_diff : stratum_diff; char diff_str[16]; double hrate = (double)global_hashrate; @@ -157,16 +157,18 @@ static char *getsummary( char *params ) sprintf( diff_str, "%.6f", diff); *buffer = '\0'; - sprintf( buffer, "NAME=%s;VER=%s;API=%s;" - "ALGO=%s;CPUS=%d;URL=%s;" - "HS=%.2f;KHS=%.2f;ACC=%d;REJ=%d;SOL=%d;" - "ACCMN=%.3f;DIFF=%s;TEMP=%.1f;FAN=%d;FREQ=%d;" - "UPTIME=%.0f;TS=%u|", - PACKAGE_NAME, PACKAGE_VERSION, APIVERSION, - algo, opt_n_threads, short_url, hrate, hrate/1000.0, - accepted_count, rejected_count, solved_count, - accps, diff_str, cpu.cpu_temp, cpu.cpu_fan, cpu.cpu_clock, - uptime, (uint32_t) ts); + sprintf( buffer, + "NAME=%s;VER=%s;API=%s;" + "ALGO=%s;CPUS=%d;URL=%s;" + "HS=%.2f;KHS=%.2f;ACC=%d;REJ=%d;SOL=%d;" + "ACCMN=%.3f;DIFF=%s;TEMP=%.1f;FAN=%d;FREQ=%d;" + "UPTIME=%.0f;TS=%u|", + PACKAGE_NAME, PACKAGE_VERSION, APIVERSION, + algo, opt_n_threads, short_url, + hrate, hrate/1000.0, accepted_share_count, rejected_share_count, + solved_block_count, + accps, diff_str, cpu.cpu_temp, cpu.cpu_fan, cpu.cpu_clock, + uptime, (uint32_t) ts); return buffer; } diff --git a/avxdefs.h b/avxdefs.h index f477564..2f021e1 100644 --- a/avxdefs.h +++ b/avxdefs.h @@ -27,15 +27,16 @@ // eliminate some extraneous characters. The main ones are: // - the leading underscore(s) "_" and the "i" are dropped from the prefix, // - "mm64" and "mm128" used for 64 and 128 bit prefix respectively to avoid -// the ambiguity of mm, +// the ambiguity of "mm", // - the element size does not include additional type specifiers like "epi", // - some macros contain value args that are updated, // - specialized shift and rotate functions that move elements around // use the notation "1x32" to indicate the distance moved as units of // the element size. +// // -// [prefix]_[op][op_size]_[lane_size] -// +// prefix_op[esize]_[vec_size] +// // prefix: indicates the vector type of the returned value of a function, // the type of the vector args of a function or the type of a data // identifier. @@ -52,14 +53,14 @@ // c512: 512 bit constant vector data // mm512: 512 bit vector intrinsic function // -// op: describes the operation of the function or names the constant -// identifier. +// op: describes the operation of the function or names the data identifier. // -// op_size: optional, used if the size of the operation is different than the -// size specified in the prefix. +// op_size: optional, element size of operation +// vsize: optional, lane size used when a function operates on elements of +// vectors within lanes. // -// lane_size: optional, used when a function operates on lanes of packed -// elements within a vector. +// Ex: mm256_ror1x64_128 rotates each 128 bit lane of a 256 bit vector +// right by 64 bits. // // Macros vs inline functions: // @@ -80,7 +81,7 @@ // they being passed as arguments it is good practice to always define // arguments even if they have the same name. // -// General tips for inline functions: +// General guidelines for inline functions: // // Inline functions should not have loops, it defeats the purpose of inlining. // Inline functions should be short, the benefit is lost and the memory cost @@ -98,12 +99,21 @@ #include #include +// 64 bit seems completely useless + //////////////////////////////////////////////////////////////// // // 64 bit MMX vectors. // -// There are rumours MMX wil be removed. - +// There are rumours MMX wil be removed. Although casting with int64 +// works there is likely some overhead to move the data to An MMX register +// and back. +// Byte swap and rotation may be more efficient using an MMX shuffle +// except that it won't compile due to a "target specific option mismatch" +// with "inlining failed in call to always inline". MMX was designed for +// 32 bit CPUs and might not work on 64 bit CPUs where the CPU has full +// support for 64 bit operations without vectoring. +// // Universal 64 bit overlay union _m64v { @@ -151,51 +161,68 @@ typedef union _m64_v16 m64_v16; #define m64_neg1 ( (__m64)0xFFFFFFFFFFFFFFFFULL ) */ + +#define casti_m64(p,i) (((__m64*)(p))[(i)]) + + +// cast all arguments as the're likely uint64_t + // Bitwise not: ~(a) -#define mm64_not( a ) _mm_xor_si64( a, m64_neg1 ) +#define mm64_not( a ) _mm_xor_si64( (__m64)a, m64_neg1 ) // Unary negate elements -#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, v ) -#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, v ) +#define mm64_negate_32( v ) _mm_sub_pi32( m64_zero, (__m64)v ) +#define mm64_negate_16( v ) _mm_sub_pi16( m64_zero, (__m64)v ) // Rotate bits in packed elements of 64 bit vector -#define mm64_rotl_32( a, n ) \ - _mm_or_si64( _mm_slli_pi32( a, n ), _mm_srli_pi32( a, 32-(n) ) ) +#define mm64_rol_32( a, n ) \ + _mm_or_si64( _mm_slli_pi32( a, n ), _mm_srli_pi32( (__m64)a, 32-(n) ) ) -#define mm64_rotr_32( a, n ) \ - _mm_or_si64( _mm_srli_pi32( a, n ), _mm_slli_pi32( a, 32-(n) ) ) +#define mm64_ror_32( a, n ) \ + _mm_or_si64( _mm_srli_pi32( a, n ), _mm_slli_pi32( (__m64)a, 32-(n) ) ) -#define mm64_rotl_16( a, n ) \ - _mm_or_si64( _mm_slli_pi16( a, n ), _mm_srli_pi16( a, 16-(n) ) ) +#define mm64_rol_16( a, n ) \ + _mm_or_si64( _mm_slli_pi16( a, n ), _mm_srli_pi16( (__m64)a, 16-(n) ) ) -#define mm64_rotr_16( a, n ) \ - _mm_or_si64( _mm_srli_pi16( a, n ), _mm_slli_pi16( a, 16-(n) ) ) +#define mm64_ror_16( a, n ) \ + _mm_or_si64( _mm_srli_pi16( a, n ), _mm_slli_pi16( (__m64)a, 16-(n) ) ) -// Rotate packed elements accross lanes +// Rotate packed elements accross lanes. Useful for byte swap and byte +// rotation. + +// _mm_shuffle_pi8 requires SSSE3 while _mm_shuffle_pi16 requires SSE +// even though these are MMX instructions. // Swap hi & lo 32 bits. -#define mm64_swap_32( a ) _mm_shuffle_pi16( a, 0x4e ) +#define mm64_swap32( a ) _mm_shuffle_pi16( (__m64)a, 0x4e ) + +#define mm64_ror1x16_64( a ) _mm_shuffle_pi16( (__m64)a, 0x39 ) +#define mm64_rol1x16_64( a ) _mm_shuffle_pi16( (__m64)a, 0x93 ) // Swap hi & lo 16 bits of each 32 bit element -#define mm64_swap32_16( a ) _mm_shuffle_pi16( a, 0xb1 ) +#define mm64_swap16_32( a ) _mm_shuffle_pi16( (__m64)a, 0xb1 ) -#define mm64_ror_1x16( v ) _mm_shuffle_pi16( v, 0x39 ) -#define mm64_rol_1x16( v ) _mm_shuffle_pi16( v, 0x93 ) +#if defined(__SSSE3__) // Endian byte swap packed elements #define mm64_bswap_32( v ) \ - _mm_shuffle_pi8( v, _mm_set_pi8( 4,5,6,7, 0,1,2,3 ) ) + _mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 4,5,6,7, 0,1,2,3 ) ) #define mm64_bswap_16( v ) \ - _mm_shuffle_pi8( v, _mm_set_pi8( 6,7, 4,5, 2,3, 0,1 ) ); + _mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 6,7, 4,5, 2,3, 0,1 ) ); + +#endif // Invert vector: {3,2,1,0} -> {0,1,2,3} -#define mm64_invert_16( v ) _mm_shuffle_pi16( a, 0x1b ) - -#define mm64_invert_8( v ) \ - _mm_shuffle_pi8( v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) ); +#define mm64_invert_16( v ) _mm_shuffle_pi16( (__m64)v, 0x1b ) +#if defined(__SSSE3__) + +#define mm64_invert_8( v ) \ + _mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) ); + +#endif // A couple of 64 bit scalar functions. restrictive, data must be aligned and // integral. The compiler can probably do just as well with memset. @@ -268,8 +295,8 @@ typedef union _m128_v8 m128_v8; // Compile time constant initializers are type agnostic and can have // a pointer handle of almost any type. All arguments must be scalar constants. -// These iniitializers should only be used at compile time to initialize -// vector arrays. All data reside in memory. +// up to 64 bits. These iniitializers should only be used at compile time +// to initialize vector arrays. All data reside in memory. #define mm128_const_64( x1, x0 ) {{ x1, x0 }} #define mm128_const1_64( x ) {{ x, x }} @@ -383,7 +410,6 @@ typedef union _m128_v8 m128_v8; ((uint32_t*)d)[3] = (uint32_t)s3; // Scatter data from contiguous memory. -// All arguments are pointers #define mm128_scatter_64( d0, d1, s ) \ *( (uint64_t*)d0) = ((uint64_t*)s)[0]; \ *( (uint64_t*)d1) = ((uint64_t*)s)[1]; @@ -457,9 +483,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) #define mm128_ror_1x32( v ) _mm_shuffle_epi32( v, 0x39 ) #define mm128_rol_1x32( v ) _mm_shuffle_epi32( v, 0x93 ) -#define mm128_swap32_16( v ) _mm_shuffle_epi8( v, \ - _mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 ) - #define mm128_ror_1x16( v ) \ _mm_shuffle_epi8( v, _mm_set_epi8( 1, 0,15,14,13,12,11,10 \ 9, 8, 7, 6, 5, 4, 3, 2 ) ) @@ -496,9 +519,15 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) // // Rotate elements within lanes. -#define mm128_swap64_32( v ) _mm_shuffle_epi32( v, 0xb1 ) +#define mm128_swap32_64( v ) _mm_shuffle_epi32( v, 0xb1 ) -#define mm128_swap32_16( v ) _mm_shuffle_epi8( v, \ +#define mm128_ror16_64( v ) _mm_shuffle_epi8( v, \ + _mm_set_epi8( 9, 8,15,14,13,12,11,10, 1, 0, 7, 6, 5, 4, 3, 2 ) +#define mm128_rol16_64( v ) _mm_shuffle_epi8( v, \ + _mm_set_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 ) + + +#define mm128_swap16_32( v ) _mm_shuffle_epi8( v, \ _mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 ) // @@ -543,75 +572,134 @@ static inline __m128i mm128_bswap_16( __m128i v ) #endif // SSSE3 else SSE2 // -// Concatenate 128 bit vectors v1 & v2 to form a 256 bit vector then rotate it -// in place. Source arguments are overwritten. +// Rotate in place concatenated 128 bit vectors as one 256 bit vector. -#define mm128_swap256_128(v1, v2) \ +// Swap 128 bit vectorse. + +#define mm128_swap128_256(v1, v2) \ v1 = _mm_xor_si128(v1, v2); \ v2 = _mm_xor_si128(v1, v2); \ v1 = _mm_xor_si128(v1, v2); +// Concatenate v1 & v2 and rotate as one 256 bit vector. #if defined(__SSE4_1__) -// There are no SSE2 compatible versions of these functions. - -#define mm128_ror256_1x64( v1, v2 ) \ +#define mm128_ror1x64_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 8 ); \ - v1 = _mm_alignr_epi8( v2, v1, 8 ); \ - v2 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 8 ); \ + v1 = _mm_alignr_epi8( v2, v1, 8 ); \ + v2 = t; \ } while(0) -#define mm128_rol256_1x64( v1, v2 ) \ +#define mm128_rol1x64_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 8 ); \ - v2 = _mm_alignr_epi8( v2, v1, 8 ); \ - v1 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 8 ); \ + v2 = _mm_alignr_epi8( v2, v1, 8 ); \ + v1 = t; \ } while(0) -#define mm128_ror256_1x32( v1, v2 ) \ +#define mm128_ror1x32_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 4 ); \ - v1 = _mm_alignr_epi8( v2, v1, 4 ); \ - v2 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 4 ); \ + v1 = _mm_alignr_epi8( v2, v1, 4 ); \ + v2 = t; \ } while(0) -#define mm128_rol256_1x32( v1, v2 ) \ +#define mm128_rol1x32_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 12 ); \ - v2 = _mm_alignr_epi8( v2, v1, 12 ); \ - v1 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 12 ); \ + v2 = _mm_alignr_epi8( v2, v1, 12 ); \ + v1 = t; \ } while(0) -#define mm128_ror256_1x16( v1, v2 ) \ +#define mm128_ror1x16_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 2 ); \ - v1 = _mm_alignr_epi8( v2, v1, 2 ); \ - v2 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 2 ); \ + v1 = _mm_alignr_epi8( v2, v1, 2 ); \ + v2 = t; \ } while(0) -#define mm128_rol256_1x16( v1, v2 ) \ +#define mm128_rol1x16_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 14 ); \ - v2 = _mm_alignr_epi8( v2, v1, 14 ); \ - v1 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 14 ); \ + v2 = _mm_alignr_epi8( v2, v1, 14 ); \ + v1 = t; \ } while(0) -#define mm128_ror256_1x8( v1, v2 ) \ +#define mm128_ror1x8_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 1 ); \ - v1 = _mm_alignr_epi8( v2, v1, 1 ); \ - v2 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 1 ); \ + v1 = _mm_alignr_epi8( v2, v1, 1 ); \ + v2 = t; \ } while(0) -#define mm128_rol256_1x8( v1, v2 ) \ +#define mm128_rol1x8_256( v1, v2 ) \ do { \ - __m128i t = _mm_alignr_epi8( v1, v2, 15 ); \ - v2 = _mm_alignr_epi8( v2, v1, 15 ); \ - v1 = t; \ + __m128i t = _mm_alignr_epi8( v1, v2, 15 ); \ + v2 = _mm_alignr_epi8( v2, v1, 15 ); \ + v1 = t; \ } while(0) -#endif // SSE4.1 +#else // SSE2 + +#define mm128_ror1x64_256( v1, v2 ) \ +do { \ + __m128i t = _mm_srli_si128( v1, 8 ) | _mm_slli_si128( v2, 24 ); \ + v2 = _mm_srli_si128( v2, 8 ) | _mm_slli_si128( v1, 24 ); \ + v1 = t; \ +} while(0) + +#define mm128_rol1x64_256( v1, v2 ) \ +do { \ + __m128i t = _mm_slli_si128( v1, 8 ) | _mm_srli_si128( v2, 24 ); \ + v2 = _mm_slli_si128( v2, 8 ) | _mm_srli_si128( v1, 24 ); \ + v1 = t; \ +} while(0) + +#define mm128_ror1x32_256( v1, v2 ) \ +do { \ + __m128i t = _mm_srli_si128( v1, 4 ) | _mm_slli_si128( v2, 28 ); \ + v2 = _mm_srli_si128( v2, 4 ) | _mm_slli_si128( v1, 28 ); \ + v1 = t; \ +} while(0) + +#define mm128_rol1x32_256( v1, v2 ) \ +do { \ + __m128i t = _mm_slli_si128( v1, 4 ) | _mm_srli_si128( v2, 28 ); \ + v2 = _mm_slli_si128( v2, 4 ) | _mm_srli_si128( v1, 28 ); \ + v1 = t; \ +} while(0) + +#define mm128_ror1x16_256( v1, v2 ) \ +do { \ + __m128i t = _mm_srli_si128( v1, 2 ) | _mm_slli_si128( v2, 30 ); \ + v2 = _mm_srli_si128( v2, 2 ) | _mm_slli_si128( v1, 30 ); \ + v1 = t; \ +} while(0) + +#define mm128_rol1x16_256( v1, v2 ) \ +do { \ + __m128i t = _mm_slli_si128( v1, 2 ) | _mm_srli_si128( v2, 30 ); \ + v2 = _mm_slli_si128( v2, 2 ) | _mm_srli_si128( v1, 30 ); \ + v1 = t; \ +} while(0) + +#define mm128_ror1x8_256( v1, v2 ) \ +do { \ + __m128i t = _mm_srli_si128( v1, 1 ) | _mm_slli_si128( v2, 31 ); \ + v2 = _mm_srli_si128( v2, 1 ) | _mm_slli_si128( v1, 31 ); \ + v1 = t; \ +} while(0) + +#define mm128_rol1x8_256( v1, v2 ) \ +do { \ + __m128i t = _mm_slli_si128( v1, 1 ) | _mm_srli_si128( v2, 31 ); \ + v2 = _mm_slli_si128( v2, 1 ) | _mm_srli_si128( v1, 31 ); \ + v1 = t; \ +} while(0) + + +#endif // SSE4.1 else SSE2 #if defined (__AVX2__) @@ -746,6 +834,64 @@ typedef union _m256_v8 m256_v8; #define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a ) #define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a ) +// +// Vector size conversion. +// +// Allows operations on either or both halves of a 256 bit vector serially. +// Handy for parallel AES. +// Caveats: +// _mm256_castsi256_si128 is free and without side effects. +// _mm256_castsi128_si256 is also free but leaves the high half undefined. +// To avoid side effects use _mm256_inserti128_si256 for the lo half +// unless the hi half is to be subsequently overwritten anyway. +#define mm128_ext_lo128_256( a ) _mm256_castsi256_si128( a ) +#define mm128_ext_hi128_256( a ) _mm256_extracti128_si256( a, 1 ) + +// input __m128i, returns __m256i +// To build a 256 bit vector from 2 128 bit vectors lo must be done first. +// lo alone leaves hi undefined, hi alone leaves lo unchanged. +// Both cost one clock while preserving the other half.. +// Insert b into specified half of a leaving other half of a unchanged. +#define mm256_ins_lo128_256( a, b ) _mm256_inserti128_si256( a, b, 0 ) +#define mm256_ins_hi128_256( a, b ) _mm256_inserti128_si256( a, b, 1 ) + +// concatenate two 128 bit vectors into one 256 bit vector +#define mm256_concat_128( hi, lo ) \ + mm256_ins_hi128_256( _mm256_castsi128_si256( lo ), hi ) + +// Parallel AES, for when x is expected to be in a 256 bit register. +#define mm256_aesenc_2x128( x ) \ + mm256_concat_128( \ + _mm_aesenc_si128( mm128_ext_hi128_256( x ), m128_zero ), \ + _mm_aesenc_si128( mm128_ext_lo128_256( x ), m128_zero ) ) + +#define mm256_aesenckey_2x128( x, k ) \ + mm256_concat_128( \ + _mm_aesenc_si128( mm128_ext_hi128_256( x ), \ + mm128_ext_lo128_256( k ) ), \ + _mm_aesenc_si128( mm128_ext_hi128_256( x ), \ + mm128_ext_lo128_256( k ) ) ) + +#define mm256_paesenc_2x128( y, x ) do \ +{ \ + __m256i *X = (__m256i*)x; \ + __m256i *Y = (__m256i*)y; \ + y[0] = _mm_aesenc_si128( x[0], m128_zero ); \ + y[1] = _mm_aesenc_si128( x[1], m128_zero ); \ +} while(0); + +// With pointers. +#define mm256_paesenckey_2x128( y, x, k ) do \ +{ \ + __m256i *X = (__m256i*)x; \ + __m256i *Y = (__m256i*)y; \ + __m256i *K = (__m256i*)ky; \ + y[0] = _mm_aesenc_si128( x[0], K[0] ); \ + y[1] = _mm_aesenc_si128( x[1], K[1] ); \ +} while(0); + + + // // Pointer casting @@ -973,37 +1119,43 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) // Rotate elements within lanes of 256 bit vector. // Swap 64 bit elements in each 128 bit lane. -#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e ) +#define mm256_swap64_128( v ) _mm256_shuffle_epi32( v, 0x4e ) // Rotate each 128 bit lane by one 32 bit element. -#define mm256_ror128_1x32( v ) _mm256_shuffle_epi32( v, 0x39 ) -#define mm256_rol128_1x32( v ) _mm256_shuffle_epi32( v, 0x93 ) +#define mm256_ror1x32_128( v ) _mm256_shuffle_epi32( v, 0x39 ) +#define mm256_rol1x32_128( v ) _mm256_shuffle_epi32( v, 0x93 ) // Rotate each 128 bit lane by one 16 bit element. -#define mm256_rol128_1x16( v ) \ +#define mm256_rol1x16_128( v ) \ _mm256_shuffle_epi8( 13,12,11,10, 9,8,7,6, 5,4,3,2, 1,0,15,14 ) -#define mm256_ror128_1x16( v ) \ +#define mm256_ror1x16_128( v ) \ _mm256_shuffle_epi8( 1,0,15,14, 13,12,11,10, 9,8,7,6, 5,4,3,2 ) // Rotate each 128 bit lane by one byte -#define mm256_rol128_1x8( v ) \ +#define mm256_rol1x8_128( v ) \ _mm256_shuffle_epi8( 14, 13,12,11, 10,9,8,7, 6,5,4,3, 2,1,0,15 ) -#define mm256_ror128_1x8( v ) \ +#define mm256_ror1x8_128( v ) \ _mm256_shuffle_epi8( 0,15,14,13, 12,11,10,9, 8,7,6,5, 4,3,2,1 ) // Rotate each 128 bit lane by c bytes. -#define mm256_ror128_x8( v, c ) \ +#define mm256_bror_128( v, c ) \ _mm256_or_si256( _mm256_bsrli_epi128( v, c ), \ _mm256_bslli_epi128( v, 16-(c) ) ) -#define mm256_rol128_x8( v, c ) \ +#define mm256_brol_128( v, c ) \ _mm256_or_si256( _mm256_bslli_epi128( v, c ), \ _mm256_bsrli_epi128( v, 16-(c) ) ) // Swap 32 bit elements in each 64 bit lane -#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 ) +#define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 ) + +#define mm256_ror16_64( v ) \ + _mm256_shuffle_epi8( 9, 8,15,14,13,12,11,10, 1, 0, 7, 6, 5, 4, 3, 2 ); +#define mm256_rol16_64( v ) \ + _mm256_shuffle_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 ); + // Swap 16 bit elements in each 32 bit lane -#define mm256_swap32_16( v ) _mm256_shuffle_epi8( v, \ +#define mm256_swap16_32( v ) _mm256_shuffle_epi8( v, \ _mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 ) // @@ -1034,75 +1186,75 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) // Some of these can use permute but appears to be slower. Maybe a Ryzen // issue -#define mm256_swap512_256 (v1, v2) \ +#define mm256_swap256_512 (v1, v2) \ v1 = _mm256_xor_si256(v1, v2); \ v2 = _mm256_xor_si256(v1, v2); \ v1 = _mm256_xor_si256(v1, v2); -#define mm256_ror512_1x128( v1, v2 ) \ +#define mm256_ror1x128_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 16 ); \ v1 = _mm256_alignr_epi8( v2, v1, 16 ); \ v2 = t; \ } while(0) -#define mm256_rol512_1x128( v1, v2 ) \ +#define mm256_rol1x128_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 16 ); \ v2 = _mm256_alignr_epi8( v2, v1, 16 ); \ v1 = t; \ } while(0) -#define mm256_ror512_1x64( v1, v2 ) \ +#define mm256_ror1x64_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 8 ); \ v1 = _mm256_alignr_epi8( v2, v1, 8 ); \ v2 = t; \ } while(0) -#define mm256_rol512_1x64( v1, v2 ) \ +#define mm256_rol1x64_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 24 ); \ v2 = _mm256_alignr_epi8( v2, v1, 24 ); \ v1 = t; \ } while(0) -#define mm256_ror512_1x32( v1, v2 ) \ +#define mm256_ror1x32_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 4 ); \ v1 = _mm256_alignr_epi8( v2, v1, 4 ); \ v2 = t; \ } while(0) -#define mm256_rol512_1x32( v1, v2 ) \ +#define mm256_rol1x32_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 28 ); \ v2 = _mm256_alignr_epi8( v2, v1, 28 ); \ v1 = t; \ } while(0) -#define mm256_ror512_1x16( v1, v2 ) \ +#define mm256_ror1x16_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 2 ); \ v1 = _mm256_alignr_epi8( v2, v1, 2 ); \ v2 = t; \ } while(0) -#define mm256_rol512_1x16( v1, v2 ) \ +#define mm256_rol1x16_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 30 ); \ v2 = _mm256_alignr_epi8( v2, v1, 30 ); \ v1 = t; \ } while(0) -#define mm256_ror512_1x8( v1, v2 ) \ +#define mm256_ror1x8_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 1 ); \ v1 = _mm256_alignr_epi8( v2, v1, 1 ); \ v2 = t; \ } while(0) -#define mm256_rol512_1x8( v1, v2 ) \ +#define mm256_rol1x8_512( v1, v2 ) \ do { \ __m256i t = _mm256_alignr_epi8( v1, v2, 31 ); \ v2 = _mm256_alignr_epi8( v2, v1, 31 ); \ @@ -1449,117 +1601,117 @@ typedef union _m512_v8 m512_v8; #define mm512_invert_128( v ) _mm512_permute4f128_epi32( a, 0x1b ) #define mm512_invert_64( v ) \ - _mm512_permutex_epi64( v, _mm512_set_epi64( 0,1,2,3,4,5,6,7 ) ) + _mm512_permutex_epi64( v, _mm512_set_epi64( 0,1,2,3,4,5,6,7 ) ) #define mm512_invert_32( v ) \ - _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15 ) ) + _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15 ) ) #define mm512_invert_16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x00000001, 0x00020003, 0x00040005, 0x00060007, \ - 0x00080009, 0x000A000B, 0x000C000D, 0x000E000F, \ - 0x00100011, 0x00120013, 0x00140015, 0x00160017, \ - 0x00180019, 0x001A001B, 0x001C001D, 0x001E001F ) ) + _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ + 0x00000001, 0x00020003, 0x00040005, 0x00060007, \ + 0x00080009, 0x000A000B, 0x000C000D, 0x000E000F, \ + 0x00100011, 0x00120013, 0x00140015, 0x00160017, \ + 0x00180019, 0x001A001B, 0x001C001D, 0x001E001F ) ) #define mm512_invert_8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F, \ - 0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F, \ - 0x20212223, 0x24252627, 0x28292A2B, 0x2C2D2E2F, \ - 0x30313233, 0x34353637, 0x38393A3B, 0x3C3D3E3F ) ) + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F, \ + 0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F, \ + 0x20212223, 0x24252627, 0x28292A2B, 0x2C2D2E2F, \ + 0x30313233, 0x34353637, 0x38393A3B, 0x3C3D3E3F ) ) // // Rotate elements within 256 bit lanes of 512 bit vector. // Swap hi & lo 128 bits in each 256 bit lane -#define mm512_swap256_128( v ) _mm512_permutex_epi64( v, 0x4e ) +#define mm512_swap128_256( v ) _mm512_permutex_epi64( v, 0x4e ) // Rotate 256 bit lanes by one 64 bit element -#define mm512_ror256_1x64( v ) _mm512_permutex_epi64( v, 0x39 ) -#define mm512_rol256_1x64( v ) _mm512_permutex_epi64( v, 0x93 ) +#define mm512_ror1x64_256( v ) _mm512_permutex_epi64( v, 0x39 ) +#define mm512_rol1x64_256( v ) _mm512_permutex_epi64( v, 0x93 ) // Rotate 256 bit lanes by one 32 bit element -#define mm512_ror256_1x32( v ) \ - _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ - 8,15,14,13,12,11,10, 9, 0, 7, 6, 5, 4, 3, 2, 1 ) ) -#define mm512_rol256_1x32( v ) \ - _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ - 14,13,12,11,10, 9, 8,15, 6, 5, 4, 3, 2, 1, 0, 7 ) ) +#define mm512_ror1x32_256( v ) \ + _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ + 8,15,14,13,12,11,10, 9, 0, 7, 6, 5, 4, 3, 2, 1 ) ) +#define mm512_rol1x32_256( v ) \ + _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ + 14,13,12,11,10, 9, 8,15, 6, 5, 4, 3, 2, 1, 0, 7 ) ) -#define mm512_ror256_1x16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x0010001F, 0x001E001D, 0x001C001B, 0x001A0019, \ - 0x00180017, 0x00160015, 0x00140013, 0x00120011, \ - 0x0000000F, 0x000E000D, 0x000C000B, 0x000A0009, \ - 0x00080007, 0x00060005, 0x00040003, 0x00020001 ) ) +#define mm512_ror1x16_256( v ) \ + _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ + 0x0010001F, 0x001E001D, 0x001C001B, 0x001A0019, \ + 0x00180017, 0x00160015, 0x00140013, 0x00120011, \ + 0x0000000F, 0x000E000D, 0x000C000B, 0x000A0009, \ + 0x00080007, 0x00060005, 0x00040003, 0x00020001 ) ) -#define mm512_rol256_1x16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \ - 0x00160015, 0x00140013, 0x00120011, 0x0000000F, \ - 0x000E000D, 0x000C000B, 0x000A0009, 0x00080007, \ - 0x00060005, 0x00040003, 0x00020001, 0x0000001F ) ) +#define mm512_rol1x16_256( v ) \ + _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ + 0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \ + 0x00160015, 0x00140013, 0x00120011, 0x0000000F, \ + 0x000E000D, 0x000C000B, 0x000A0009, 0x00080007, \ + 0x00060005, 0x00040003, 0x00020001, 0x0000001F ) ) -#define mm512_ror256_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x203F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \ - 0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \ - 0x001F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \ - 0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) ) +#define mm512_ror1x8_256( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x203F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \ + 0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \ + 0x001F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \ + 0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) ) -#define mm512_rol256_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F, \ - 0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221203F, \ - 0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \ - 0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201001F ) ) +#define mm512_rol1x8_256( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F, \ + 0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221203F, \ + 0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \ + 0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201001F ) ) // // Rotate elements within 128 bit lanes of 512 bit vector. // Swap hi & lo 64 bits in each 128 bit lane -#define mm512_swap128_64( v ) _mm512_permutex_epi64( v, 0xb1 ) +#define mm512_swap64_128( v ) _mm512_permutex_epi64( v, 0xb1 ) // Rotate 128 bit lanes by one 32 bit element -#define mm512_ror128_1x32( v ) _mm512_shuffle_epi32( v, 0x39 ) -#define mm512_rol128_1x32( v ) _mm512_shuffle_epi32( v, 0x93 ) +#define mm512_ror1x32_128( v ) _mm512_shuffle_epi32( v, 0x39 ) +#define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 ) -#define mm512_ror128_1x16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x0018001F, 0x001E001D, 0x001C001B, 0x001A0019, \ - 0x00100017, 0x00160015, 0x00140013, 0x00120011, \ - 0x0008000F, 0x000E000D, 0x000C000B, 0x000A0009, \ - 0x00000007, 0x00060005, 0x00040003, 0x00020001 ) ) +#define mm512_ror1x16_128( v ) \ + _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ + 0x0018001F, 0x001E001D, 0x001C001B, 0x001A0019, \ + 0x00100017, 0x00160015, 0x00140013, 0x00120011, \ + 0x0008000F, 0x000E000D, 0x000C000B, 0x000A0009, \ + 0x00000007, 0x00060005, 0x00040003, 0x00020001 ) ) -#define mm512_rol128_1x16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ - 0x001E001D, 0x001C001B, 0x001A0019, 0x0018001F, \ - 0x00160015, 0x00140013, 0x00120011, 0x00100017, \ - 0x000E000D, 0x000C000B, 0x000A0009, 0x0008000F, \ - 0x00060005, 0x00040003, 0x00020001, 0x00000007 ) ) +#define mm512_rol1x16_128( v ) \ + _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ + 0x001E001D, 0x001C001B, 0x001A0019, 0x0018001F, \ + 0x00160015, 0x00140013, 0x00120011, 0x00100017, \ + 0x000E000D, 0x000C000B, 0x000A0009, 0x0008000F, \ + 0x00060005, 0x00040003, 0x00020001, 0x00000007 ) ) -#define mm512_ror128_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x303F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \ - 0x202F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \ - 0x101F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \ - 0x000F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) ) +#define mm512_ror1x8_128( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x303F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \ + 0x202F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \ + 0x101F1E1D, 0x1C1B1A19, 0x18171615, 0x14131211, \ + 0x000F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) ) -#define mm512_rol128_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3E3D3C3B, 0x3A393837, 0x36353433. 0x3231303F, \ - 0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221202F, \ - 0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211101F, \ - 0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201000F ) ) +#define mm512_rol1x8_128( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x3E3D3C3B, 0x3A393837, 0x36353433. 0x3231303F, \ + 0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221202F, \ + 0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211101F, \ + 0x0E0D0C0B, 0x0A090807, 0x06050403, 0x0201000F ) ) // Rotate 128 bit lanes by c bytes. -#define mm512_ror128_x8( v, c ) \ +#define mm512_bror_128( v, c ) \ _mm512_or_si512( _mm512_bsrli_epi128( v, c ), \ _mm512_bslli_epi128( v, 16-(c) ) ) -#define mm512_rol128_x8( v, c ) \ +#define mm512_brol_128( v, c ) \ _mm512_or_si512( _mm512_bslli_epi128( v, c ), \ _mm512_bsrli_epi128( v, 16-(c) ) ) @@ -1568,83 +1720,83 @@ typedef union _m512_v8 m512_v8; // Rotate elements within 64 bit lanes. // Swap 32 bit elements in each 64 bit lane -#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 ) +#define mm512_swap32_64( v ) _mm512_shuffle_epi32( v, 0xb1 ) // _mm512_set_epi8 doesn't seem to work // Rotate each 64 bit lane by one 16 bit element. -#define mm512_ror64_1x16( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x39383F3E, 0x3D3C3B3A, 0x31303736, 0x35343332, \ - 0x29282F2E, 0x2D2C2B2A, 0x21202726, 0x25242322, \ - 0x19181F1E, 0x1D1C1B1A, 0x11101716, 0x15141312, \ - 0x09080F0E, 0x0D0C0B0A, 0x01000706, 0x05040302 ) ) +#define mm512_ror1x16_64( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x39383F3E, 0x3D3C3B3A, 0x31303736, 0x35343332, \ + 0x29282F2E, 0x2D2C2B2A, 0x21202726, 0x25242322, \ + 0x19181F1E, 0x1D1C1B1A, 0x11101716, 0x15141312, \ + 0x09080F0E, 0x0D0C0B0A, 0x01000706, 0x05040302 ) ) -#define mm512_rol64_1x16( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3D3C3B3A, 0x39383F3E, 0x35343332, 0x31303736 \ - 0x2D2C2B2A, 0x29282F2E, 0x25242322, 0x21202726 \ - 0x1D1C1B1A, 0x19181F1E, 0x15141312, 0x11101716 \ - 0x0D0C0B0A, 0x09080F0E, 0x05040302, 0x01000706 ) ) +#define mm512_rol1x16_64( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x3D3C3B3A, 0x39383F3E, 0x35343332, 0x31303736 \ + 0x2D2C2B2A, 0x29282F2E, 0x25242322, 0x21202726 \ + 0x1D1C1B1A, 0x19181F1E, 0x15141312, 0x11101716 \ + 0x0D0C0B0A, 0x09080F0E, 0x05040302, 0x01000706 ) ) // Rotate each 64 bit lane by one byte. -#define mm512_ror64_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x383F3E3D, 0x3C3B3A39, 0x30373635, 0x34333231, \ - 0x282F2E2D, 0x2C2B2A29, 0x20272625, 0x24232221, \ - 0x181F1E1D, 0x1C1B1A19, 0x10171615, 0x14131211, \ - 0x080F0E0D, 0x0C0B0A09, 0x00070605, 0x0403020 ) +#define mm512_ror1x8_64( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x383F3E3D, 0x3C3B3A39, 0x30373635, 0x34333231, \ + 0x282F2E2D, 0x2C2B2A29, 0x20272625, 0x24232221, \ + 0x181F1E1D, 0x1C1B1A19, 0x10171615, 0x14131211, \ + 0x080F0E0D, 0x0C0B0A09, 0x00070605, 0x0403020 ) -#define mm512_rol64_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3E3D3C3B, 0x3A39383F, 0x36353433, 0x32313037, \ - 0x2E2D2C2B, 0x2A29282F, 0x26252423, 0x22212027, \ - 0x1E1D1C1B, 0x1A19181F, 0x16151413, 0x12111017, \ - 0x0E0D0C0B, 0x0A09080F, 0x06050403, 0x02010007 ) +#define mm512_rol1x8_64( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x3E3D3C3B, 0x3A39383F, 0x36353433, 0x32313037, \ + 0x2E2D2C2B, 0x2A29282F, 0x26252423, 0x22212027, \ + 0x1E1D1C1B, 0x1A19181F, 0x16151413, 0x12111017, \ + 0x0E0D0C0B, 0x0A09080F, 0x06050403, 0x02010007 ) // // Rotate elements within 32 bit lanes. -#define mm512_swap32_16( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x001D001C, 0x001F001E, 0x00190018, 0x001B001A, \ - 0x00150014, 0x00170016, 0x00110010, 0x00130012, \ - 0x000D000C, 0x000F000E, 0x00190008, 0x000B000A, \ - 0x00050004, 0x00070006, 0x00110000, 0x00030002 ) +#define mm512_swap16_32( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x001D001C, 0x001F001E, 0x00190018, 0x001B001A, \ + 0x00150014, 0x00170016, 0x00110010, 0x00130012, \ + 0x000D000C, 0x000F000E, 0x00190008, 0x000B000A, \ + 0x00050004, 0x00070006, 0x00110000, 0x00030002 ) -#define mm512_ror32_8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3C3F3E3D, 0x383B3A39, 0x34373635, 0x30333231, \ - 0x2C2F2E2D, 0x282B2A29, 0x24272625, 0x20232221, \ - 0x1C1F1E1D, 0x181B1A19, 0x14171615, 0x10131211, \ - 0x0C0F0E0D, 0x080B0A09, 0x04070605, 0x00030201 ) ) +#define mm512_ror1x8_32( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x3C3F3E3D, 0x383B3A39, 0x34373635, 0x30333231, \ + 0x2C2F2E2D, 0x282B2A29, 0x24272625, 0x20232221, \ + 0x1C1F1E1D, 0x181B1A19, 0x14171615, 0x10131211, \ + 0x0C0F0E0D, 0x080B0A09, 0x04070605, 0x00030201 ) ) -#define mm512_rol32_8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ - 0x3E3D3C3F, 0x3A39383B, 0x36353437, 0x32313033, \ - 0x2E2D2C2F, 0x2A29282B, 0x26252427, 0x22212023, \ - 0x1E1D1C1F, 0x1A19181B, 0x16151417, 0x12111013, \ - 0x0E0D0C0F, 0x0A09080B, 0x06050407, 0x02010003 ) ) +#define mm512_rol1x8_32( v ) \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + 0x3E3D3C3F, 0x3A39383B, 0x36353437, 0x32313033, \ + 0x2E2D2C2F, 0x2A29282B, 0x26252427, 0x22212023, \ + 0x1E1D1C1F, 0x1A19181B, 0x16151417, 0x12111013, \ + 0x0E0D0C0F, 0x0A09080B, 0x06050407, 0x02010003 ) ) // // Swap bytes in vector elements, vectorized bswap. #define mm512_bswap_64( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ 0x38393A3B, 0x3C3D3E3F, 0x20313233, 0x34353637, \ 0x28292A2B, 0x2C2D2E2F, 0x20212223, 0x34353637, \ 0x18191A1B, 0x1C1D1E1F, 0x10111213, 0x14151617, \ 0x08090A0B, 0x0C0D0E0F, 0x00010203, 0x04050607 ) ) #define mm512_bswap_32( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi832( \ + _mm512_permutexvar_epi8( v, _mm512_set_epi832( \ 0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \ 0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \ 0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233, \ 0x3C3D3E3F, 0x38393A3B, 0x34353637, 0x30313233 ) ) #define mm512_bswap_16( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ 0x3E3F3C3D, 0x3A3B3839, 0x36373435, 0x32333031, \ 0x2E2F2C2D, 0x2A2B2829, 0x26272425, 0x22232021, \ 0x1E1F1C1D, 0x1A1B1819, 0x16171415, 0x12131011, \ @@ -1656,89 +1808,89 @@ typedef union _m512_v8 m512_v8; // These can all be done with 2 permutex2var instructions but they are // slower than either xor or alignr. -#define mm512_swap1024_512(v1, v2) \ +#define mm512_swap512_1024(v1, v2) \ v1 = _mm512_xor_si512(v1, v2); \ v2 = _mm512_xor_si512(v1, v2); \ v1 = _mm512_xor_si512(v1, v2); -#define mm512_ror1024_1x256( v1, v2 ) \ +#define mm512_ror1x256_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi64( v1, v2, 4 ); \ v1 = _mm512_alignr_epi64( v2, v1, 4 ); \ v2 = t; \ } while(0) -#define mm512_rol1024_1x256( v1, v2 ) \ +#define mm512_rol1x256_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi64( v1, v2, 4 ); \ v2 = _mm512_alignr_epi64( v2, v1, 4 ); \ v1 = t; \ } while(0) -#define mm512_ror1024_1x128( v1, v2 ) \ +#define mm512_ror1x128_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi64( v1, v2, 2 ); \ v1 = _mm512_alignr_epi64( v2, v1, 2 ); \ v2 = t; \ } while(0) -#define mm512_rol1024_1x128i( v1, v2 ) \ +#define mm512_rol1x128_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi64( v1, v2, 6 ); \ v2 = _mm512_alignr_epi64( v2, v1, 6 ); \ v1 = t; \ } while(0) -#define mm512_ror1024_1x64( v1, v2 ) \ +#define mm512_ror1x64_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi64( v1, v2, 1 ); \ v1 = _mm512_alignr_epi64( v2, v1, 1 ); \ v2 = t; \ } while(0) -#define mm512_rol1024_1x64( v1, v2 ) \ +#define mm512_rol1x64_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi64( v1, v2, 7 ); \ v2 = _mm512_alignr_epi64( v2, v1, 7 ); \ v1 = t; \ } while(0) -#define mm512_ror1024_1x32( v1, v2 ) \ +#define mm512_ror1x32_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi32( v1, v2, 1 ); \ v1 = _mm512_alignr_epi32( v2, v1, 1 ); \ v2 = t; \ } while(0) -#define mm512_rol1024_1x32( v1, v2 ) \ +#define mm512_rol1x32_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi32( v1, v2, 15 ); \ v2 = _mm512_alignr_epi32( v2, v1, 15 ); \ v1 = t; \ } while(0) -#define mm512_ror1024_1x16( v1, v2 ) \ +#define mm512_ror1x16_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi8( v1, v2, 2 ); \ v1 = _mm512_alignr_epi8( v2, v1, 2 ); \ v2 = t; \ } while(0) -#define mm512_rol1024_1x16( v1, v2 ) \ +#define mm512_rol1x16_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi8( v1, v2, 62 ); \ v2 = _mm512_alignr_epi8( v2, v1, 62 ); \ v1 = t; \ } while(0) -#define mm512_ror1024_1x8( v1, v2 ) \ +#define mm512_ror1x8_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi8( v1, v2, 1 ); \ v1 = _mm512_alignr_epi8( v2, v1, 1 ); \ v2 = t; \ } while(0) -#define mm512_rol1024_1x8( v1, v2 ) \ +#define mm512_rol1x8_1024( v1, v2 ) \ do { \ __m512i t = _mm512_alignr_epi8( v1, v2, 63 ); \ v2 = _mm512_alignr_epi8( v2, v1, 63 ); \ @@ -1747,12 +1899,29 @@ do { \ #endif // AVX512F +#if 0 ////////////////////////////////////////////////// // // Compile test. // // Code to test that macros compile. +static inline __m64 mmx_compile_test( __m64 a ) +{ + __m64 n[4]; + __m64 m = _mm_set_pi32( 1, 2 ); + uint64_t i = 3; + m = mm64_ror_32( m, 2 ); + m = _mm_shuffle_pi16( m, 0xaa ); +#if defined(__SSSE3__) + m = _mm_shuffle_pi8( m, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) ); +#endif + m = _mm_shuffle_pi8( m, (__m64)0x0102030405060708 ); + i = (uint64_t) mm64_ror_32( (__m64)i, 7 ); + casti_m64( n, 2 ) = m; + return a; +} + // Don't use universal overlay for initialized globals static const m128_v64 m128_v64_ex[4] = { mm128_const1_64( 3), c128_zero, c128_neg1, c128_one_64 }; @@ -1769,7 +1938,7 @@ static inline __m128i sse2_compile_test( __m128i *a ) w = mm128_bswap_64( *a ); // sse2 vs ssse3 w = mm128_ror_1x32( x.v128 ); #if defined(__SSE4_1__) - mm128_ror256_1x64( w, x.v128 ); // sse4.1 only + mm128_ror1x64_256( w, x.v128 ); // sse4.1 only #endif return w; } @@ -1794,9 +1963,9 @@ static inline __m256i avx2_compile_test( __m256i *a ) w = mm256_invert_32( w ); w = mm256_bswap_64( *a ); w = mm256_ror_1x32( w ); - mm256_ror512_1x64( w, x.v256 ); + mm256_ror1x64_512( w, x.v256 ); w = mm256_rolv_64( w, 2 ); - w = mm256_ror128_x8( w, 5 ); + w = mm256_bror_128( w, 5 ); return w; } @@ -1818,8 +1987,8 @@ static inline __m512i avx512_compile_test( __m512i *a ) w = mm512_invert_32( w ); w = mm512_bswap_64( *a ); w = mm512_ror_1x32( w ); - mm512_ror1024_1x64( w, x.v512 ); - w = mm512_ror128_x8( w, 5 ); + mm512_ror1x64_1024( w, x.v512 ); + w = mm512_bror_128( w, 5 ); __m256i y = m256_zero; y = mm256_rorv_16( y, 3 ); @@ -1829,5 +1998,7 @@ static inline __m512i avx512_compile_test( __m512i *a ) #endif // AVX512 +#endif // 0 + #endif // AVXDEFS_H__ diff --git a/build-allarch.sh b/build-allarch.sh index 3b5e2fe..9da2d5d 100755 --- a/build-allarch.sh +++ b/build-allarch.sh @@ -1,10 +1,14 @@ #!/bin/bash +# +# This script is not intended for users, it is only used for compile testing +# during develpment. Howver the information contained my provide cimpilation +# tips to users. make distclean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -march=core-avx2 -msha -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-avx2-sha.exe strip -s cpuminer @@ -14,7 +18,7 @@ make clean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -march=skylake-avx512 -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-avx512.exe strip -s cpuminer @@ -24,7 +28,7 @@ make clean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -march=core-avx2 -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-avx2.exe strip -s cpuminer @@ -34,7 +38,7 @@ make clean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -march=corei7-avx -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-aes-avx.exe strip -s cpuminer @@ -44,7 +48,7 @@ make clean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-aes-sse42.exe strip -s cpuminer @@ -54,7 +58,7 @@ make clean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -march=corei7 -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-sse42.exe strip -s cpuminer @@ -64,7 +68,7 @@ make clean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -march=core2 -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-ssse3.exe strip -s cpuminer @@ -74,7 +78,7 @@ make clean || echo clean rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -msse2 -Wall" ./configure --with-curl -make -j 4 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-sse2.exe strip -s cpuminer @@ -83,8 +87,8 @@ mv cpuminer cpuminer-sse2 make clean || echo done rm -f config.status ./autogen.sh || echo done -CFLAGS="-O3 -march=znver1 -Wall" ./configure --with-curl -make -j 8 +CFLAGS="-O3 -march=znver1 -DRYZEN_ -Wall" ./configure --with-curl +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-zen.exe strip -s cpuminer @@ -94,7 +98,7 @@ make clean || echo done rm -f config.status ./autogen.sh || echo done CFLAGS="-O3 -march=native -Wall" ./configure --with-curl -make -j 8 +make -j 16 strip -s cpuminer.exe mv cpuminer.exe cpuminer-native.exe strip -s cpuminer diff --git a/configure b/configure index e05d06a..a8ee020 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.0.1. +# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.1. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -577,8 +577,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='cpuminer-opt' PACKAGE_TARNAME='cpuminer-opt' -PACKAGE_VERSION='3.9.0.1' -PACKAGE_STRING='cpuminer-opt 3.9.0.1' +PACKAGE_VERSION='3.9.1' +PACKAGE_STRING='cpuminer-opt 3.9.1' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures cpuminer-opt 3.9.0.1 to adapt to many kinds of systems. +\`configure' configures cpuminer-opt 3.9.1 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1404,7 +1404,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of cpuminer-opt 3.9.0.1:";; + short | recursive ) echo "Configuration of cpuminer-opt 3.9.1:";; esac cat <<\_ACEOF @@ -1509,7 +1509,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -cpuminer-opt configure 3.9.0.1 +cpuminer-opt configure 3.9.1 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by cpuminer-opt $as_me 3.9.0.1, which was +It was created by cpuminer-opt $as_me 3.9.1, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2993,7 +2993,7 @@ fi # Define the identity of the package. PACKAGE='cpuminer-opt' - VERSION='3.9.0.1' + VERSION='3.9.1' cat >>confdefs.h <<_ACEOF @@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by cpuminer-opt $as_me 3.9.0.1, which was +This file was extended by cpuminer-opt $as_me 3.9.1, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -6756,7 +6756,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -cpuminer-opt config.status 3.9.0.1 +cpuminer-opt config.status 3.9.1 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 3469366..3e87edb 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -AC_INIT([cpuminer-opt], [3.9.0.1]) +AC_INIT([cpuminer-opt], [3.9.1]) AC_PREREQ([2.59c]) AC_CANONICAL_SYSTEM diff --git a/cpu-miner.c b/cpu-miner.c index 436c4b6..edbd654 100644 --- a/cpu-miner.c +++ b/cpu-miner.c @@ -141,9 +141,9 @@ double opt_diff_factor = 1.0; uint32_t zr5_pok = 0; bool opt_stratum_stats = false; -uint32_t accepted_count = 0L; -uint32_t rejected_count = 0L; -uint32_t solved_count = 0L; +uint32_t accepted_share_count = 0ULL; +uint32_t rejected_share_count = 0ULL; +uint32_t solved_block_count = 0ULL; double *thr_hashrates; double *thr_hashcount; double global_hashcount = 0; @@ -857,24 +857,24 @@ static int share_result( int result, struct work *work, const char *reason ) hashcount += thr_hashcount[i]; hashrate += thr_hashrates[i]; } - result ? accepted_count++ : rejected_count++; + result ? accepted_share_count++ : rejected_share_count++; if ( solved ) { - solved_count++; + solved_block_count++; if ( use_colors ) - sprintf( sol, CL_GRN " Solved" CL_WHT " %d", solved_count ); + sprintf( sol, CL_GRN " Solved" CL_WHT " %d", solved_block_count ); else - sprintf( sol, " Solved %d", solved_count ); + sprintf( sol, " Solved %d", solved_block_count ); } pthread_mutex_unlock(&stats_lock); global_hashcount = hashcount; global_hashrate = hashrate; - total_submits = accepted_count + rejected_count; + total_submits = accepted_share_count + rejected_share_count; - rate = ( result ? ( 100. * accepted_count / total_submits ) - : ( 100. * rejected_count / total_submits ) ); + rate = ( result ? ( 100. * accepted_share_count / total_submits ) + : ( 100. * rejected_share_count / total_submits ) ); if (use_colors) { @@ -889,7 +889,7 @@ static int share_result( int result, struct work *work, const char *reason ) // Rates > 99% and < 100% (rejects>0) display 99.9%. if ( result ) { - rate = 100. * accepted_count / total_submits; + rate = 100. * accepted_share_count / total_submits; if ( rate == 100.0 ) sprintf( rate_s, "%.0f", rate ); else @@ -897,7 +897,7 @@ static int share_result( int result, struct work *work, const char *reason ) } else { - rate = 100. * rejected_count / total_submits; + rate = 100. * rejected_share_count / total_submits; if ( rate < 0.1 ) sprintf( rate_s, "%.1f", 0.10 ); else @@ -926,26 +926,26 @@ static int share_result( int result, struct work *work, const char *reason ) { #if ((defined(_WIN64) || defined(__WINDOWS__))) applog( LOG_NOTICE, "%s %lu/%lu (%s%%), %s %sH, %s %sH/s", - sres, ( result ? accepted_count : rejected_count ), - total_submits, rate_s, hc, hc_units, hr, hr_units ); + sres, ( result ? accepted_share_count : rejected_share_count ), + total_submits, rate_s, hc, hc_units, hr, hr_units ); #else applog( LOG_NOTICE, "%s %lu/%lu (%s%%), %s %sH, %s %sH/s, %dC", - sres, ( result ? accepted_count : rejected_count ), - total_submits, rate_s, hc, hc_units, hr, hr_units, - (uint32_t)cpu_temp(0) ); + sres, ( result ? accepted_share_count : rejected_share_count ), + total_submits, rate_s, hc, hc_units, hr, hr_units, + (uint32_t)cpu_temp(0) ); #endif } else { #if ((defined(_WIN64) || defined(__WINDOWS__))) applog( LOG_NOTICE, "%s %lu/%lu (%s%%), diff %.3g%s, %s %sH/s", - sres, ( result ? accepted_count : rejected_count ), - total_submits, rate_s, sharediff, sol, hr, hr_units ); + sres, ( result ? accepted_share_count : rejected_share_count ), + total_submits, rate_s, sharediff, sol, hr, hr_units ); #else applog( LOG_NOTICE, "%s %lu/%lu (%s%%), diff %.3g%s, %s %sH/s, %dC", - sres, ( result ? accepted_count : rejected_count ), - total_submits, rate_s, sharediff, sol, hr, hr_units, - (uint32_t)cpu_temp(0) ); + sres, ( result ? accepted_share_count : rejected_share_count ), + total_submits, rate_s, sharediff, sol, hr, hr_units, + (uint32_t)cpu_temp(0) ); #endif } @@ -1544,7 +1544,7 @@ static bool get_work(struct thr_info *thr, struct work *work) return true; } -static bool submit_work(struct thr_info *thr, const struct work *work_in) +bool submit_work(struct thr_info *thr, const struct work *work_in) { struct workio_cmd *wc; /* fill out work request message */ @@ -1969,7 +1969,7 @@ static void *miner_thread( void *userdata ) // Scan for nonce nonce_found = algo_gate.scanhash( thr_id, &work, max_nonce, - &hashes_done ); + &hashes_done, mythr ); // record scanhash elapsed time gettimeofday( &tv_end, NULL ); @@ -1998,7 +1998,7 @@ static void *miner_thread( void *userdata ) break; } } - else + else { // only 1 nonce, in work ready to submit. if ( !submit_work( mythr, &work ) ) @@ -2043,7 +2043,7 @@ static void *miner_thread( void *userdata ) } // Display benchmark total // Update hashrate for API if no shares accepted yet. - if ( ( opt_benchmark || !accepted_count ) + if ( ( opt_benchmark || !accepted_share_count ) && thr_id == opt_n_threads - 1 ) { double hashrate = 0.; @@ -2482,15 +2482,15 @@ static void *stratum_thread(void *userdata ) if ( last_bloc_height != stratum.bloc_height ) { last_bloc_height = stratum.bloc_height; - if ( !opt_quiet ) - { +// if ( !opt_quiet ) +// { if (net_diff > 0.) applog(LOG_BLUE, "%s block %d, network diff %.3f", algo_names[opt_algo], stratum.bloc_height, net_diff); else applog(LOG_BLUE, "%s %s block %d", short_url, algo_names[opt_algo], stratum.bloc_height); - } +// } } restart_threads(); } diff --git a/interleave.h b/interleave.h index 9e11b3f..9a65c12 100644 --- a/interleave.h +++ b/interleave.h @@ -70,6 +70,13 @@ _mm_set_epi32( ((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \ ((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] ) +// blend 2 vectors while interleaving: { hi[n], lo[n-1], ... hi[1], lo[0] } +#define mm128_interleave_blend_64( hi, lo ) \ + _mm256_blend_epi16( hi, lo, 0x0f ) +#define mm128_interleave_blend_32( hi, lo ) \ + _mm6_blend_epi16( hi, lo, 0x33 ) + + // 1 sse2 block, 16 bytes * 4 lanes static inline void mm128_interleave_4x32x128( void *d, const void *s0, const void *s1, const void *s2, const void *s3 ) @@ -313,6 +320,18 @@ static inline void mm128_deinterleave_4x32x( void *dst0, void *dst1, void *dst2, ((const uint32_t*)(s))[i3], ((const uint32_t*)(s))[i2], \ ((const uint32_t*)(s))[i1], ((const uint32_t*)(s))[i0] ) + +// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1], lo[0] } +#define mm256_interleave_blend_128( hi, lo ) \ + _mm256_blend_epi32( hi, lo, 0x0f ) + +#define mm256_interleave_blend_64( hi, lo ) \ + _mm256_blend_epi32( hi, lo, 0x33 ) + +#define mm256_interleave_blend_32( hi, lo ) \ + _mm256_blend_epi32( hi, lo, 0x55 ) + + // Used for AVX2 interleaving static inline void mm256_interleave_8x32x256( void *d, const void *s00, @@ -751,6 +770,78 @@ static inline void mm256_reinterleave_4x32( void *dst, void *src, int bit_len ) // bit_len == 1024 } +static inline void mm256_reinterleave_4x64_2x128( void *dst0, void *dst1, + const void *src, int bit_len ) +{ + __m256i* d0 = (__m256i*)dst0; + __m256i* d1 = (__m256i*)dst1; + uint64_t *s = (uint64_t*)src; + + d0[0] = _mm256_set_epi64x( s[ 5], s[ 1], s[ 4], s[ 0] ); + d1[0] = _mm256_set_epi64x( s[ 7], s[ 3], s[ 6], s[ 2] ); + + d0[1] = _mm256_set_epi64x( s[13], s[ 9], s[12], s[ 8] ); + d1[1] = _mm256_set_epi64x( s[15], s[11], s[14], s[10] ); + + if ( bit_len <= 256 ) return; + + d0[2] = _mm256_set_epi64x( s[21], s[17], s[20], s[16] ); + d1[2] = _mm256_set_epi64x( s[23], s[19], s[22], s[18] ); + + d0[3] = _mm256_set_epi64x( s[29], s[25], s[28], s[24] ); + d1[3] = _mm256_set_epi64x( s[31], s[27], s[30], s[26] ); + + if ( bit_len <= 512 ) return; + + d0[4] = _mm256_set_epi64x( s[37], s[33], s[36], s[32] ); + d1[4] = _mm256_set_epi64x( s[39], s[35], s[38], s[34] ); + + d0[5] = _mm256_set_epi64x( s[45], s[41], s[44], s[40] ); + d1[5] = _mm256_set_epi64x( s[47], s[43], s[46], s[42] ); + + d0[6] = _mm256_set_epi64x( s[53], s[49], s[52], s[48] ); + d1[6] = _mm256_set_epi64x( s[55], s[51], s[54], s[50] ); + + d0[7] = _mm256_set_epi64x( s[61], s[57], s[60], s[56] ); + d1[7] = _mm256_set_epi64x( s[63], s[59], s[62], s[58] ); +} + + +static inline void mm256_reinterleave_2x128_4x64( void *dst, const void *src0, + const void *src1, int bit_len ) +{ + __m256i* d = (__m256i*)dst; + uint64_t *s0 = (uint64_t*)src0; + uint64_t *s1 = (uint64_t*)src1; + + d[ 0] = _mm256_set_epi64x( s1[2], s1[0], s0[2], s0[0] ); + d[ 1] = _mm256_set_epi64x( s1[3], s1[1], s0[3], s0[1] ); + d[ 2] = _mm256_set_epi64x( s1[6], s1[4], s0[6], s0[4] ); + d[ 3] = _mm256_set_epi64x( s1[7], s1[5], s0[7], s0[5] ); + + if ( bit_len <= 256 ) return; + + d[ 4] = _mm256_set_epi64x( s1[10], s1[ 8], s0[10], s0[ 8] ); + d[ 5] = _mm256_set_epi64x( s1[11], s1[ 9], s0[11], s0[ 9] ); + d[ 6] = _mm256_set_epi64x( s1[14], s1[12], s0[14], s0[12] ); + d[ 7] = _mm256_set_epi64x( s1[15], s1[13], s0[15], s0[13] ); + + if ( bit_len <= 512 ) return; + + d[ 8] = _mm256_set_epi64x( s1[18], s1[16], s0[18], s0[16] ); + d[ 9] = _mm256_set_epi64x( s1[19], s1[17], s0[19], s0[17] ); + d[10] = _mm256_set_epi64x( s1[22], s1[20], s0[22], s0[20] ); + d[11] = _mm256_set_epi64x( s1[23], s1[21], s0[23], s0[21] ); + + d[12] = _mm256_set_epi64x( s1[26], s1[24], s0[26], s0[24] ); + d[13] = _mm256_set_epi64x( s1[27], s1[25], s0[27], s0[25] ); + d[14] = _mm256_set_epi64x( s1[30], s1[28], s0[30], s0[28] ); + d[15] = _mm256_set_epi64x( s1[31], s1[29], s0[31], s0[29] ); +} + + + + /* // not used static inline void mm_reinterleave_4x32( void *dst, void *src, int bit_len ) @@ -850,6 +941,24 @@ static inline void mm256_deinterleave_2x128( void *d0, void *d1, void *s, ((const uint32_t*)(s))[i03], ((const uint32_t*)(s))[i02], \ ((const uint32_t*)(s))[i01], ((const uint32_t*)(s))[i00] ) +// AVX512 has no blend, can be done with permute2xvar but at what cost? +// Can also be done with shifting and mask-or'ing for 3 instructins with +// 1 dependency. Finally it can be done with 1 _mm512_set but with 8 64 bit +// array index calculations and 8 pointer reads. + +// Blend 2 vectors alternating hi & lo: { hi[n], lo[n-1], ... hi[1]. lo[0] } +#define mm512_interleave_blend_128( hi, lo ) \ + _mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \ + 0x7, 0x6, 0x5, 0x4, 0xb, 0xa, 0x9, 0x8 ) + +#define mm512_interleave_blend_64( hi, lo ) \ + _mm256_permute2xvar_epi64( hi, lo, _mm512_set_epi64( \ + 0x7, 0x6, 0xd, 0xc, 0x3, 0x2, 0x9, 0x8 ) + +#define mm512_interleave_blend_32( hi, lo ) \ + _mm256_permute2xvar_epi32( hi, lo, _mm512_set_epi32( \ + 0x0f, 0x1e, 0x0d, 0x1c, 0x0b, 0x1a, 0x09, 0x18, \ + 0x07, 0x16, 0x05, 0x14, 0x03, 0x12, 0x01, 0x10 ) // static inline void mm512_interleave_16x32x512( void *d, const void *s00, diff --git a/miner.h b/miner.h index 6bb58eb..98fc082 100644 --- a/miner.h +++ b/miner.h @@ -164,6 +164,8 @@ static inline void be32enc(void *pp, uint32_t x) } #endif +// Deprecated in favour of mm64_bswap_32 +// // This is a poorman's SIMD instruction, use 64 bit instruction to encode 2 // uint32_t. This function flips endian on two adjacent 32 bit quantities // aligned to 64 bits. If source is LE output is BE, and vice versa. @@ -540,7 +542,8 @@ enum algos { ALGO_SKEIN, ALGO_SKEIN2, ALGO_SKUNK, - ALGO_TIMETRAVEL, + ALGO_SONOA, + ALGO_TIMETRAVEL, ALGO_TIMETRAVEL10, ALGO_TRIBUS, ALGO_VANILLA, @@ -626,7 +629,8 @@ static const char* const algo_names[] = { "skein", "skein2", "skunk", - "timetravel", + "sonoa", + "timetravel", "timetravel10", "tribus", "vanilla", @@ -705,7 +709,9 @@ extern bool opt_stratum_stats; extern int num_cpus; extern int num_cpugroups; extern int opt_priority; - +extern uint32_t accepted_share_count; +extern uint32_t rejected_share_count; +extern uint32_t solved_block_count; extern pthread_mutex_t rpc2_job_lock; extern pthread_mutex_t rpc2_login_lock; extern pthread_mutex_t applog_lock; @@ -772,7 +778,8 @@ Options:\n\ skein Skein+Sha (Skeincoin)\n\ skein2 Double Skein (Woodcoin)\n\ skunk Signatum (SIGT)\n\ - timetravel timeravel8, Machinecoin (MAC)\n\ + sonoa Sono\n\ + timetravel timeravel8, Machinecoin (MAC)\n\ timetravel10 Bitcore (BTX)\n\ tribus Denarius (DNR)\n\ vanilla blake256r8vnl (VCash)\n\ diff --git a/winbuild-cross.sh b/winbuild-cross.sh index 8c7a3b4..dcfc17c 100755 --- a/winbuild-cross.sh +++ b/winbuild-cross.sh @@ -1,13 +1,27 @@ #!/bin/bash +# +# Script for building Windows binaries release package using mingw. +# Requires a custom mingw environment, not intended for users. +# +# Compiles Windows EXE files for selected CPU architectures, copies them +# as well as some DLLs that aren't available in most Windows environments +# into a release folder ready to be zipped and uploaded. -LOCAL_LIB="$HOME/usr/lib" +# define some local variables + +export LOCAL_LIB="$HOME/usr/lib" export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl" -F="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32" +export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32" +# make link to local gmp header file. +ln -s $LOCAL_LIB/gmp/gmp.h ./gmp.h + +# edit configure to fix pthread lib name for Windows. sed -i 's/"-lpthread"/"-lpthreadGC2"/g' configure.ac +# make release directory and copy selected DLLs. mkdir release cp README.txt release/ cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/ @@ -20,27 +34,27 @@ cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/ make distclean || echo clean rm -f config.status ./autogen.sh || echo done -CFLAGS="-O3 -march=core-avx2 -msha -Wall" ./configure $F +CFLAGS="-O3 -march=znver1 -DRYZEN_ -Wall" ./configure $CONFIGURE_ARGS make -j 16 strip -s cpuminer.exe -mv cpuminer.exe release/cpuminer-avx2-sha.exe +mv cpuminer.exe release/cpuminer-zen.exe #make clean || echo clean -#CFLAGS="-O3 -march=corei7-avx -msha -Wall" ./configure $F +#CFLAGS="-O3 -march=corei7-avx -msha -Wall" ./configure $CONFIGURE_ARGS #make #strip -s cpuminer.exe #mv cpuminer.exe release/cpuminer-avx-sha.exe make clean || echo clean rm -f config.status -CFLAGS="-O3 -march=core-avx2 -Wall" ./configure $F +CFLAGS="-O3 -march=core-avx2 -Wall" ./configure $CONFIGURE_ARGS make -j 16 strip -s cpuminer.exe mv cpuminer.exe release/cpuminer-avx2.exe #make clean || echo clean #rm -f config.status -#CFLAGS="-O3 -march=znver1 -Wall" ./configure $F +#CFLAGS="-O3 -march=znver1 -Wall" ./configure $CONFIGURE_ARGS #make -j #strip -s cpuminer.exe #mv cpuminer.exe release/cpuminer-aes-sha.exe @@ -48,7 +62,7 @@ mv cpuminer.exe release/cpuminer-avx2.exe make clean || echo clean rm -f config.status -CFLAGS="-O3 -march=corei7-avx -Wall" ./configure $F +CFLAGS="-O3 -march=corei7-avx -Wall" ./configure $CONFIGURE_ARGS make -j 16 strip -s cpuminer.exe mv cpuminer.exe release/cpuminer-avx.exe @@ -56,22 +70,22 @@ mv cpuminer.exe release/cpuminer-avx.exe # -march=westmere is supported in gcc5 make clean || echo clean rm -f config.status -CFLAGS="-O3 -march=westmere -Wall" ./configure $F -#CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure $F +CFLAGS="-O3 -march=westmere -Wall" ./configure $CONFIGURE_ARGS +#CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure $CONFIGURE_ARGS make -j 16 strip -s cpuminer.exe mv cpuminer.exe release/cpuminer-aes-sse42.exe #make clean || echo clean #rm -f config.status -#CFLAGS="-O3 -march=corei7 -Wall" ./configure $F +#CFLAGS="-O3 -march=corei7 -Wall" ./configure $CONFIGURE_ARGS #make #strip -s cpuminer.exe #mv cpuminer.exe release/cpuminer-sse42.exe #make clean || echo clean #rm -f config.status -#CFLAGS="-O3 -march=core2 -Wall" ./configure $F +#CFLAGS="-O3 -march=core2 -Wall" ./configure $CONFIGURE_ARGS #make #strip -s cpuminer.exe #mv cpuminer.exe release/cpuminer-ssse3.exe @@ -79,7 +93,7 @@ mv cpuminer.exe release/cpuminer-aes-sse42.exe make clean || echo clean rm -f config.status -CFLAGS="-O3 -msse2 -Wall" ./configure $F +CFLAGS="-O3 -msse2 -Wall" ./configure $CONFIGURE_ARGS make -j 16 strip -s cpuminer.exe mv cpuminer.exe release/cpuminer-sse2.exe