Compare commits

...

24 Commits

Author SHA1 Message Date
Jay D Dee
92b3733925 v3.17.0 2021-07-15 20:30:44 -04:00
Jay D Dee
19cc88d102 v3.16.5 2021-06-26 12:27:44 -04:00
Jay D Dee
a053690170 v3.16.4 2021-06-23 21:52:42 -04:00
Jay D Dee
3c5e8921b7 v3.16.3 2021-05-06 14:55:03 -04:00
Jay D Dee
f3333b0070 v3.16.2 2021-04-08 18:09:31 -04:00
Jay D Dee
902ec046dd v3.16.1 2021-03-24 18:24:20 -04:00
Jay D Dee
d0b4941321 v3.16.0 2021-03-19 15:45:32 -04:00
Jay D Dee
40089428c5 v3.15.7 2021-03-08 22:44:44 -05:00
Jay D Dee
dc6b007a18 v3.15.6 2021-02-12 15:16:53 -05:00
Jay D Dee
06bfaa1249 v3.15.5 2020-12-21 13:25:33 -05:00
Jay D Dee
6566e99a13 v3.15.4 2020-12-15 13:15:02 -05:00
Jay D Dee
ccfccbadd5 v3.15.3 2020-12-10 18:23:49 -05:00
Jay D Dee
45ecd0de14 v3.15.2 2020-11-15 17:57:06 -05:00
Jay D Dee
4fa8fcea8b v3.15.1 2020-11-09 13:19:05 -05:00
Jay D Dee
c85fb3842b v3.15.0 2020-10-02 10:48:37 -04:00
Jay D Dee
cdd587537e v3.14.3 2020-06-18 17:30:26 -04:00
Jay D Dee
51a1d91abd v3.14.2 2020-05-30 21:20:44 -04:00
Jay D Dee
13563e2598 v3.14.1 2020-05-21 13:00:29 -04:00
Jay D Dee
9571f85d53 v3.14.0 2020-05-20 13:56:35 -04:00
Jay D Dee
0e69756634 v3.13.2-segwit-test 2020-05-18 18:17:27 -04:00
Jay D Dee
9653bca1e2 v3.13.1.1 2020-05-17 19:21:37 -04:00
Jay D Dee
1c0719e8a4 v3.13.1 2020-05-10 21:34:55 -04:00
Jay D Dee
8b4b4dc613 v3.13.0.1 2020-05-07 17:57:04 -04:00
Jay D Dee
e76feaced8 v3.13.0 2020-05-06 00:53:43 -04:00
154 changed files with 11618 additions and 6170 deletions

View File

@@ -1,5 +1,9 @@
Instructions for compiling cpuminer-opt for Windows.
Thwaw intructions nay be out of date. Please consult the wiki for
the latest:
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
Windows compilation using Visual Studio is not supported. Mingw64 is
used on a Linux system (bare metal or virtual machine) to cross-compile
@@ -24,79 +28,76 @@ Refer to Linux compile instructions and install required packages.
Additionally, install mingw-w64.
sudo apt-get install mingw-w64
sudo apt-get install mingw-w64 libz-mingw-w64-dev
2. Create a local library directory for packages to be compiled in the next
step. Suggested location is $HOME/usr/lib/
$ mkdir $HOME/usr/lib
3. Download and build other packages for mingw that don't have a mingw64
version available in the repositories.
Download the following source code packages from their respective and
respected download locations, copy them to ~/usr/lib/ and uncompress them.
openssl
curl
gmp
openssl: https://github.com/openssl/openssl/releases
In most cases the latest vesrion is ok but it's safest to download
the same major and minor version as included in your distribution.
curl: https://github.com/curl/curl/releases
Run the following commands or follow the supplied instructions.
Do not run "make install" unless you are using ~/usr/lib, which isn't
recommended.
gmp: https://gmplib.org/download/gmp/
Some instructions insist on running "make check". If make check fails
it may still work, YMMV.
In most cases the latest version is ok but it's safest to download the same major and minor version as included in your distribution. The following uses versions from Ubuntu 20.04. Change version numbers as required.
You can speed up "make" by using all CPU cores available with "-j n" where
n is the number of CPU threads you want to use.
Run the following commands or follow the supplied instructions. Do not run "make install" unless you are using /usr/lib, which isn't recommended.
Some instructions insist on running "make check". If make check fails it may still work, YMMV.
You can speed up "make" by using all CPU cores available with "-j n" where n is the number of CPU threads you want to use.
openssl:
./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32
make
$ ./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32-
$ make
Make may fail with an ld error, just ensure libcrypto-1_1-x64.dll is created.
curl:
./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32
make
$ ./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32
$ make
gmp:
./configure --host=x86_64-w64-mingw32
make
$ ./configure --host=x86_64-w64-mingw32
$ make
4. Tweak the environment.
This step is required everytime you login or the commands can be added to
.bashrc.
This step is required everytime you login or the commands can be added to .bashrc.
Define some local variables to point to local library.
Define some local variables to point to local library.
export LOCAL_LIB="$HOME/usr/lib"
$ export LOCAL_LIB="$HOME/usr/lib"
export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
$ export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
$ export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
Create a release directory and copy some dll files previously built.
This can be done outside of cpuminer-opt and only needs to be done once.
If the release directory is in cpuminer-opt directory it needs to be
recreated every a source package is decompressed.
Adjust for gcc version:
mkdir release
cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libgcc_s_seh-1.dll release/
cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/
cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
$ export GCC_MINGW_LIB="/usr/lib/gcc/x86_64-w64-mingw32/9.3-win32"
Create a release directory and copy some dll files previously built. This can be done outside of cpuminer-opt and only needs to be done once. If the release directory is in cpuminer-opt directory it needs to be recreated every time a source package is decompressed.
$ mkdir release
$ cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
$ cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
$ cp $GCC_MINGW_LIB/libstdc++-6.dll release/
$ cp $GCC_MINGW_LIB/libgcc_s_seh-1.dll release/
$ cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/
$ cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
The following steps need to be done every time a new source package is
opened.
@@ -110,13 +111,73 @@ https://github.com/JayDDee/cpuminer-opt/releases
Decompress and change to the cpuminer-opt directory.
6. Prepare to compile
6. compile
Create a link to the locally compiled version of gmp.h
ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
$ ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
$ ./autogen.sh
Configure the compiler for the CPU architecture of the host machine:
CFLAGS="-O3 -march=native -Wall" ./configure $CONFIGURE_ARGS
or cross compile for a specific CPU architecture:
CFLAGS="-O3 -march=znver1 -Wall" ./configure $CONFIGURE_ARGS
This will compile for AMD Ryzen.
You can compile more generically for a set of specific CPU features if you know what features you want:
CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure $CONFIGURE_ARGS
This will compile for an older CPU that does not have AVX.
You can find several examples in README.txt
If you have a CPU with more than 64 threads and Windows 7 or higher you can enable the CPU Groups feature by adding the following to CFLAGS:
"-D_WIN32_WINNT=0x0601"
Once you have run configure successfully run the compiler with n CPU threads:
$ make -j n
Copy cpuminer.exe to the release directory, compress and copy the release directory to a Windows system and run cpuminer.exe from the command line.
Run cpuminer
In a command windows change directories to the unzipped release folder. to get a list of all options:
cpuminer.exe --help
Command options are specific to where you mine. Refer to the pool's instructions on how to set them.
Create a link to the locally compiled version of gmp.h
$ ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
Edit configure.ac to fix lipthread package name.

View File

@@ -85,6 +85,7 @@ cpuminer_SOURCES = \
algo/groestl/aes_ni/hash-groestl.c \
algo/groestl/aes_ni/hash-groestl256.c \
algo/fugue/sph_fugue.c \
algo/fugue/fugue-aesni.c \
algo/hamsi/sph_hamsi.c \
algo/hamsi/hamsi-hash-4way.c \
algo/haval/haval.c \
@@ -128,7 +129,7 @@ cpuminer_SOURCES = \
algo/lyra2/allium.c \
algo/lyra2/phi2-4way.c \
algo/lyra2/phi2.c \
algo//m7m/m7m.c \
algo/m7m/m7m.c \
algo/m7m/magimath.cpp \
algo/nist5/nist5-gate.c \
algo/nist5/nist5-4way.c \
@@ -162,6 +163,8 @@ cpuminer_SOURCES = \
algo/sha/sph_sha2big.c \
algo/sha/sha256-hash-4way.c \
algo/sha/sha512-hash-4way.c \
algo/sha/sha256-hash-opt.c \
algo/sha/sha256-hash-2way-ni.c \
algo/sha/hmac-sha256-hash.c \
algo/sha/hmac-sha256-hash-4way.c \
algo/sha/sha2.c \
@@ -191,6 +194,11 @@ cpuminer_SOURCES = \
algo/sm3/sm3-hash-4way.c \
algo/swifftx/swifftx.c \
algo/tiger/sph_tiger.c \
algo/verthash/verthash-gate.c \
algo/verthash/Verthash.c \
algo/verthash/fopen_utf8.c \
algo/verthash/tiny_sha3/sha3.c \
algo/verthash/tiny_sha3/sha3-4way.c \
algo/whirlpool/sph_whirlpool.c \
algo/whirlpool/whirlpool-hash-4way.c \
algo/whirlpool/whirlpool-gate.c \
@@ -257,6 +265,7 @@ cpuminer_SOURCES = \
algo/x16/hex.c \
algo/x16/x21s-4way.c \
algo/x16/x21s.c \
algo/x16/minotaur.c \
algo/x17/x17-gate.c \
algo/x17/x17.c \
algo/x17/x17-4way.c \

View File

@@ -89,10 +89,11 @@ Supported Algorithms
lyra2h Hppcoin
lyra2re lyra2
lyra2rev2 lyra2v2
lyra2rev3 lyrav2v3, Vertcoin
lyra2rev3 lyrav2v3
lyra2z
lyra2z330 Lyra2 330 rows, Zoin (ZOI)
m7m Magi (XMG)
minotaur Ringcoin (RNG)
myr-gr Myriad-Groestl
neoscrypt NeoScrypt(128, 2, 1)
nist5 Nist5
@@ -121,6 +122,7 @@ Supported Algorithms
tribus Denarius (DNR)
vanilla blake256r8vnl (VCash)
veltor (VLT)
verthash Vertcoin
whirlpool
whirlpoolx
x11 Dash
@@ -133,7 +135,7 @@ Supported Algorithms
x14 X14
x15 X15
x16r
x16rv2 Ravencoin (RVN)
x16rv2
x16rt Gincoin (GIN)
x16rt-veil Veil (VEIL)
x16s Pigeoncoin (PGN)

View File

@@ -1,8 +1,12 @@
This file is included in the Windows binary package. Compile instructions
for Linux and Windows can be found in RELEASE_NOTES.
cpuminer is a console program that is executed from a DOS command prompt.
There is no GUI and no mouse support.
This package is officially avalable only from:
https://github.com/JayDDee/cpuminer-opt
No other sources should be trusted.
cpuminer is a console program that is executed from a DOS or Powershell
prompt. There is no GUI and no mouse support.
Miner programs are often flagged as malware by antivirus programs. This is
a false positive, they are flagged simply because they are cryptocurrency
@@ -10,13 +14,13 @@ miners. The source code is open for anyone to inspect. If you don't trust
the software, don't use it.
Choose the exe that best matches you CPU's features or use trial and
error to find the fastest one that doesn't crash. Pay attention to
error to find the fastest one that works. Pay attention to
the features listed at cpuminer startup to ensure you are mining at
optimum speed using the best available features.
Architecture names and compile options used are only provided for Intel
Core series. Budget CPUs like Pentium and Celeron are often missing the
latest features.
Core series. Budget CPUs like Pentium and Celeron are often missing some
features.
AMD CPUs older than Piledriver, including Athlon x2 and Phenom II x4, are not
supported by cpuminer-opt due to an incompatible implementation of SSE2 on
@@ -31,14 +35,39 @@ https://en.wikipedia.org/wiki/List_of_Intel_CPU_microarchitectures
https://en.wikipedia.org/wiki/List_of_AMD_CPU_microarchitectures
Exe name Compile flags Arch name
Exe file name Compile flags Arch name
cpuminer-sse2.exe "-msse2" Core2, Nehalem
cpuminer-aes-sse42.exe "-march=westmere" Westmere
cpuminer-avx.exe "-march=corei7-avx" Sandybridge
cpuminer-avx2.exe "-march=core-avx2 -maes" Haswell, Skylake, Coffeelake
cpuminer-avx512.exe "-march=skylake-avx512" Skylake-X, Cascadelake-X
cpuminer-zen "-march=znver1" AMD Ryzen, Threadripper
cpuminer-sse2.exe "-msse2" Core2, Nehalem
cpuminer-aes-sse42.exe "-march=westmere" Westmere
cpuminer-avx.exe "-march=corei7-avx" Sandybridge, Ivybridge
cpuminer-avx2.exe "-march=core-avx2 -maes" Haswell(1)
cpuminer-avx512.exe "-march=skylake-avx512" Skylake-X, Cascadelake
cpuminer-avx512-sha.exe "-march=cascadelake -msha" Rocketlake(2)
cpuminer-avx512-sha-vaes.exe "-march=icelake-client" Icelake, Tigerlake(3)
cpuminer-zen.exe "-march=znver1" AMD Zen1, Zen2
cpuminer-zen3.exe "-march=znver2 -mvaes" Zen3(4)
(1) Haswell includes Broadwell, Skylake, Kabylake, Coffeelake & Cometlake.
(2) Rocketlake build uses cascadelake+sha as a workaround until Rocketlake
compiler support is avalable.
(3) Icelake & Tigerlake are only available on some laptops. Mining with a
laptop is not recommended.
(4) Zen3 build uses zen2+vaes as a workaround until Zen3 compiler support is
available. Zen2 CPUs should use Zen1 build.
Notes about included DLL files:
Downloading DLL files from alternative sources presents an inherent
security risk if their source is unknown. All DLL files included have
been copied from the Ubuntu-20.04 installation or compiled by me from
source code obtained from the author's official repository. The exact
procedure is documented in the build instructions for Windows:
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
Some DLL filess may already be installed on the system by Windows or third
party packages. They often will work and may be used instead of the included
file. Without a compelling reason to do so it's recommended to use the included
files as they are packaged.
If you like this software feel free to donate:

View File

@@ -44,7 +44,7 @@ Please include the following information:
1. CPU model, operating system, cpuminer-opt version (must be latest),
binary file for Windows, changes to default build procedure for Linux.
2. Exact comand line (except user and pw) and intial output showing
2. Exact command line (except user and pw) and intial output showing
the above requested info.
3. Additional program output showing any error messages or other
@@ -65,6 +65,165 @@ If not what makes it happen or not happen?
Change Log
----------
v3.17.0
AVX512 optimized using ternary logic instructions.
Faster sha256t on all CPU architectures: AVX512 +30%, SHA +30%, AVX2 +9%.
Use SHA on supported CPUs to produce merkle hash.
Fixed byte order in Extranonce2 log & replaced Block height with Job ID.
v3.16.5
#329: Fixed GBT incorrect target diff in stats, second attempt.
Fixed formatting error in share result log when --no-color option is used.
v3.16.4
Faster sha512 and sha256 when not using SHA CPU extension.
#329: Fixed GBT incorrect target diff in stats.
v3.16.3
#313 Fix compile error with GCC 11.
Incremental improvements to verthash.
v3.16.2
Verthash: midstate prehash optimization for all architectures.
Verthash: AVX2 optimization.
GBT: added support for Bech32 addresses.
Linux: added CPU frequency to benchmark log.
Fixed integer overflow in time calculations.
v3.16.1
New options for verthash:
--data-file to specify the name, and optionally the path, of the verthash
data file, default is "verthash.dat" in the current directory.
--verify to perform the data file integrity check at startup, default is
not to verify data file integrity.
Support for creation of default verthash data file if:
1) --data-file option is not used,
2) no default data file is found in the current directory, and,
3) --verify option is used.
More detailed logs related to verthash data file.
Small verthash performance improvement.
Fixed detection of corrupt stats caused by networking issues.
v3.16.0
Added verthash algo.
v3.15.7
Added accepted/stale/rejected percentage to summary log report.
Added warning if share counters mismatch which could corrupt stats.
Linux: CPU temperature reporting is more responsive to rising temperature.
A few AVX2 & AVX512 tweaks.
Removed some dead code and other cleanup.
v3.15.6
Implement keccak pre-hash optimization for x16* algos.
Move conditional mining test to before get_new_work in miner thread.
Add test for share reject reason when solo mining.
Add support for floating point, as well as integer, "networkhasps" in
RPC getmininginfo method.
v3.15.5
Fix stratum jobs lost if 2 jobs received in less than one second.
v3.15.4
Fixed yescryptr16 broken in v3.15.3.
v3.15.3
Yescrypt algos now use yespower v0.5, a little faster.
New implementation of sha256 using SHA CPU extension.
Replace Openssl with SPH for sha256 & sha512.
AVX512 optimization for sha256t & sha256q.
Faster sha256t, sha256q, x21s, x22i & x25x on CPUs with SHA without AVX512.
AVX512+SHA build for Intel Rocketlake added to Windows binary package.
v3.15.2
Zen3 AVX2+VAES optimization for x16*, x17, sonoa, xevan, x21s, x22i, x25x,
allium.
Zen3 (AVX2+SHA+VAES) build added to Windows binary package.
v3.15.1
Fix compile on AMD Zen3 CPUs with VAES.
Force new work immediately after solving a block solo.
v3.15.0
Fugue optimized with AES, improves many sha3 algos.
Minotaur algo optimized for all architectures.
Fixed neoscrypt BUG log.
v3.14.3
#265: more mutex changes to reduce blocking with high thread count.
#267: fixed hodl algo potential memory alignment issue,
add warning when thread count is not valid for mining hodl algo.
v3.14.2
The second line of the Share Accepted log is no longer displayed,
new Xnonce log is added and other small log tweaks.
#265: Cleanup use of mutex.
v3.14.1
GBT and getwork log changes:
fixed missing TTF in New Block log,
ntime no longer byte-swapped for display in New Work log,
fixed zero effective hash rate in Periodic Report log,
deleted "Current block is..." log.
Renamed stratum "New Job" log to "New Work" to be consistent with the solo
version of the log. Added more data to both versions.
v3.14.0
Changes to solo mining:
- segwit is supported by getblocktemplate,
- longpolling is not working and is disabled,
- Periodic Report log is output,
- New Block log includes TTF estimates,
- Stratum thread no longer created when using getwork or GBT.
Fixed BUG log mining sha256d.
v3.13.1.1
Fixed Windows crash mining minotaur algo.
Fixed GCC 10 compile again.
Added -fno-common to testing to be consistent with GCC 10 default.
v3.13.1
Added minotaur algo for Ringcoin.
v3.13.0.1
Issue #262: Fixed xevan AVX2 invalid shares.
v3.13.0
Updated Windows binaries compiled with GCC 9. Included DLLs also updated.
Icelake build (cpuminer-avx512-sha-vaes.exe) now included in Windows
binaries package.
No source code changes.
v3.12.8.2
Fixed x12 AVX2 rejects.

193
aclocal.m4 vendored
View File

@@ -1,6 +1,6 @@
# generated automatically by aclocal 1.15.1 -*- Autoconf -*-
# generated automatically by aclocal 1.16.1 -*- Autoconf -*-
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -20,7 +20,7 @@ You have another version of autoconf. It may work, but is not guaranteed to.
If you have problems, you may need to regenerate the build system entirely.
To do so, use the procedure documented by the package, typically 'autoreconf'.])])
# Copyright (C) 2002-2017 Free Software Foundation, Inc.
# Copyright (C) 2002-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -32,10 +32,10 @@ To do so, use the procedure documented by the package, typically 'autoreconf'.])
# generated from the m4 files accompanying Automake X.Y.
# (This private macro should not be called outside this file.)
AC_DEFUN([AM_AUTOMAKE_VERSION],
[am__api_version='1.15'
[am__api_version='1.16'
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
dnl require some minimum version. Point them to the right macro.
m4_if([$1], [1.15.1], [],
m4_if([$1], [1.16.1], [],
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
])
@@ -51,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], [])
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
[AM_AUTOMAKE_VERSION([1.15.1])dnl
[AM_AUTOMAKE_VERSION([1.16.1])dnl
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
# Figure out how to run the assembler. -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -78,7 +78,7 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -130,7 +130,7 @@ am_aux_dir=`cd "$ac_aux_dir" && pwd`
# AM_CONDITIONAL -*- Autoconf -*-
# Copyright (C) 1997-2017 Free Software Foundation, Inc.
# Copyright (C) 1997-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -161,7 +161,7 @@ AC_CONFIG_COMMANDS_PRE(
Usually this means the macro was only invoked conditionally.]])
fi])])
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
# Copyright (C) 1999-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -352,13 +352,12 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
# Generate code to set up dependency tracking. -*- Autoconf -*-
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
# Copyright (C) 1999-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# _AM_OUTPUT_DEPENDENCY_COMMANDS
# ------------------------------
AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
@@ -366,49 +365,41 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# Older Autoconf quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
# if we detect the quoting.
case $CONFIG_FILES in
*\'*) eval set x "$CONFIG_FILES" ;;
*) set x $CONFIG_FILES ;;
esac
# TODO: see whether this extra hack can be removed once we start
# requiring Autoconf 2.70 or later.
AS_CASE([$CONFIG_FILES],
[*\'*], [eval set x "$CONFIG_FILES"],
[*], [set x $CONFIG_FILES])
shift
for mf
# Used to flag and report bootstrapping failures.
am_rc=0
for am_mf
do
# Strip MF so we end up with the name of the file.
mf=`echo "$mf" | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile or not.
# We used to match only the files named 'Makefile.in', but
# some people rename them; so instead we look at the file content.
# Grep'ing the first line is not enough: some people post-process
# each Makefile.in and add a new line on top of each file to say so.
# Grep'ing the whole file is not good either: AIX grep has a line
am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile which includes
# dependency-tracking related rules and includes.
# Grep'ing the whole file directly is not great: AIX grep has a line
# limit of 2048, but all sed's we know have understand at least 4000.
if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
dirpart=`AS_DIRNAME("$mf")`
else
continue
fi
# Extract the definition of DEPDIR, am__include, and am__quote
# from the Makefile without running 'make'.
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
test -z "$am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
# simplest approach to changing $(DEPDIR) to its actual value in the
# expansion.
for file in `sed -n "
s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
# Make sure the directory exists.
test -f "$dirpart/$file" && continue
fdir=`AS_DIRNAME(["$file"])`
AS_MKDIR_P([$dirpart/$fdir])
# echo "creating $dirpart/$file"
echo '# dummy' > "$dirpart/$file"
done
sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \
|| continue
am_dirpart=`AS_DIRNAME(["$am_mf"])`
am_filepart=`AS_BASENAME(["$am_mf"])`
AM_RUN_LOG([cd "$am_dirpart" \
&& sed -e '/# am--include-marker/d' "$am_filepart" \
| $MAKE -f - am--depfiles]) || am_rc=$?
done
if test $am_rc -ne 0; then
AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments
for automatic dependency tracking. Try re-running configure with the
'--disable-dependency-tracking' option to at least be able to build
the package (albeit without support for automatic dependency tracking).])
fi
AS_UNSET([am_dirpart])
AS_UNSET([am_filepart])
AS_UNSET([am_mf])
AS_UNSET([am_rc])
rm -f conftest-deps.mk
}
])# _AM_OUTPUT_DEPENDENCY_COMMANDS
@@ -417,18 +408,17 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# -----------------------------
# This macro should only be invoked once -- use via AC_REQUIRE.
#
# This code is only required when automatic dependency tracking
# is enabled. FIXME. This creates each '.P' file that we will
# need in order to bootstrap the dependency handling code.
# This code is only required when automatic dependency tracking is enabled.
# This creates each '.Po' and '.Plo' makefile fragment that we'll need in
# order to bootstrap the dependency handling code.
AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
[AC_CONFIG_COMMANDS([depfiles],
[test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
[AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
])
[AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])])
# Do all the work for Automake. -*- Autoconf -*-
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -515,8 +505,8 @@ AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
AC_REQUIRE([AC_PROG_MKDIR_P])dnl
# For better backward compatibility. To be removed once Automake 1.9.x
# dies out for good. For more background, see:
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
# <https://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
# <https://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
# We need awk for the "check" target (and possibly the TAP driver). The
# system "awk" is bad on some platforms.
@@ -583,7 +573,7 @@ END
Aborting the configuration process, to ensure you take notice of the issue.
You can download and install GNU coreutils to get an 'rm' implementation
that behaves properly: <http://www.gnu.org/software/coreutils/>.
that behaves properly: <https://www.gnu.org/software/coreutils/>.
If you want to complete the configuration process using your problematic
'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
@@ -625,7 +615,7 @@ for _am_header in $config_headers :; do
done
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -646,7 +636,7 @@ if test x"${install_sh+set}" != xset; then
fi
AC_SUBST([install_sh])])
# Copyright (C) 2003-2017 Free Software Foundation, Inc.
# Copyright (C) 2003-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -668,7 +658,7 @@ AC_SUBST([am__leading_dot])])
# Add --enable-maintainer-mode option to configure. -*- Autoconf -*-
# From Jim Meyering
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -703,7 +693,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
# Check to see how 'make' treats includes. -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -711,49 +701,42 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
# AM_MAKE_INCLUDE()
# -----------------
# Check to see how make treats includes.
# Check whether make has an 'include' directive that can support all
# the idioms we need for our automatic dependency tracking code.
AC_DEFUN([AM_MAKE_INCLUDE],
[am_make=${MAKE-make}
cat > confinc << 'END'
[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive])
cat > confinc.mk << 'END'
am__doit:
@echo this is the am__doit target
@echo this is the am__doit target >confinc.out
.PHONY: am__doit
END
# If we don't find an include directive, just comment out the code.
AC_MSG_CHECKING([for style of include used by $am_make])
am__include="#"
am__quote=
_am_result=none
# First try GNU make style include.
echo "include confinc" > confmf
# Ignore all kinds of additional output from 'make'.
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=include
am__quote=
_am_result=GNU
;;
esac
# Now try BSD make style include.
if test "$am__include" = "#"; then
echo '.include "confinc"' > confmf
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=.include
am__quote="\""
_am_result=BSD
;;
esac
fi
AC_SUBST([am__include])
AC_SUBST([am__quote])
AC_MSG_RESULT([$_am_result])
rm -f confinc confmf
])
# BSD make does it like this.
echo '.include "confinc.mk" # ignored' > confmf.BSD
# Other make implementations (GNU, Solaris 10, AIX) do it like this.
echo 'include confinc.mk # ignored' > confmf.GNU
_am_result=no
for s in GNU BSD; do
AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out])
AS_CASE([$?:`cat confinc.out 2>/dev/null`],
['0:this is the am__doit target'],
[AS_CASE([$s],
[BSD], [am__include='.include' am__quote='"'],
[am__include='include' am__quote=''])])
if test "$am__include" != "#"; then
_am_result="yes ($s style)"
break
fi
done
rm -f confinc.* confmf.*
AC_MSG_RESULT([${_am_result}])
AC_SUBST([am__include])])
AC_SUBST([am__quote])])
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
# Copyright (C) 1997-2017 Free Software Foundation, Inc.
# Copyright (C) 1997-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -792,7 +775,7 @@ fi
# Helper functions for option handling. -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -821,7 +804,7 @@ AC_DEFUN([_AM_SET_OPTIONS],
AC_DEFUN([_AM_IF_OPTION],
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
# Copyright (C) 1999-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -868,7 +851,7 @@ AC_LANG_POP([C])])
# For backward compatibility.
AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -887,7 +870,7 @@ AC_DEFUN([AM_RUN_LOG],
# Check to make sure that the build environment is sane. -*- Autoconf -*-
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -968,7 +951,7 @@ AC_CONFIG_COMMANDS_PRE(
rm -f conftest.file
])
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# Copyright (C) 2009-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1028,7 +1011,7 @@ AC_SUBST([AM_BACKSLASH])dnl
_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
])
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1056,7 +1039,7 @@ fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
AC_SUBST([INSTALL_STRIP_PROGRAM])])
# Copyright (C) 2006-2017 Free Software Foundation, Inc.
# Copyright (C) 2006-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1075,7 +1058,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Check how to create a tarball. -*- Autoconf -*-
# Copyright (C) 2004-2017 Free Software Foundation, Inc.
# Copyright (C) 2004-2018 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,

View File

@@ -15,8 +15,6 @@
#include <stdbool.h>
#include <memory.h>
#include <unistd.h>
#include <openssl/sha.h>
//#include "miner.h"
#include "algo-gate-api.h"
// Define null and standard functions.
@@ -90,13 +88,157 @@ void algo_not_implemented()
}
// default null functions
// deprecated, use generic as default
int null_scanhash()
{
applog(LOG_WARNING,"SWERR: undefined scanhash function in algo_gate");
return 0;
}
// Default generic scanhash can be used in many cases.
int scanhash_generic( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 1;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm128_bswap32_80( edata, pdata );
do
{
edata[19] = n;
if ( likely( algo_gate.hash( hash, edata, thr_id ) ) )
if ( unlikely( valid_hash( hash, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash, mythr );
}
n++;
} while ( n < last_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
#if defined(__AVX2__)
//int scanhash_4way_64_64( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
//int scanhash_4way_64_640( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
int scanhash_4way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash32_d7 = &(hash32[ 7*4 ]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( likely( algo_gate.hash( hash32, vdata, thr_id ) ) )
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 && !bench ) )
{
extr_lane_4x32( lane_hash, hash32, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev,
m256_const1_64( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n <= last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
//int scanhash_8way_32_32( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
//int scanhash_8way_64_64( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
//int scanhash_8way_64_640( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
int scanhash_8way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash32_d7 = &(hash32[7*8]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( likely( algo_gate.hash( hash32, vdata, thr_id ) ) )
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hash32_d7[ lane ] <= targ32_d7 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev,
m512_const1_64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
//int scanhash_16way_32_32( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
#endif
int null_hash()
{
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
@@ -106,7 +248,7 @@ int null_hash()
void init_algo_gate( algo_gate_t* gate )
{
gate->miner_thread_init = (void*)&return_true;
gate->scanhash = (void*)&null_scanhash;
gate->scanhash = (void*)&scanhash_generic;
gate->hash = (void*)&null_hash;
gate->get_new_work = (void*)&std_get_new_work;
gate->work_decode = (void*)&std_le_work_decode;
@@ -135,9 +277,11 @@ void init_algo_gate( algo_gate_t* gate )
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wimplicit-function-declaration"
// called by each thread that uses the gate
// Called once by main
bool register_algo_gate( int algo, algo_gate_t *gate )
{
bool rc = false;
if ( NULL == gate )
{
applog(LOG_ERR,"FAIL: algo_gate registration failed, NULL gate\n");
@@ -146,103 +290,108 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
init_algo_gate( gate );
switch (algo)
switch ( algo )
{
case ALGO_ALLIUM: register_allium_algo ( gate ); break;
case ALGO_ANIME: register_anime_algo ( gate ); break;
case ALGO_ARGON2: register_argon2_algo ( gate ); break;
case ALGO_ARGON2D250: register_argon2d_crds_algo ( gate ); break;
case ALGO_ARGON2D500: register_argon2d_dyn_algo ( gate ); break;
case ALGO_ARGON2D4096: register_argon2d4096_algo ( gate ); break;
case ALGO_AXIOM: register_axiom_algo ( gate ); break;
case ALGO_BLAKE: register_blake_algo ( gate ); break;
case ALGO_BLAKE2B: register_blake2b_algo ( gate ); break;
case ALGO_BLAKE2S: register_blake2s_algo ( gate ); break;
case ALGO_BLAKECOIN: register_blakecoin_algo ( gate ); break;
case ALGO_BMW512: register_bmw512_algo ( gate ); break;
case ALGO_C11: register_c11_algo ( gate ); break;
case ALGO_DECRED: register_decred_algo ( gate ); break;
case ALGO_DEEP: register_deep_algo ( gate ); break;
case ALGO_DMD_GR: register_dmd_gr_algo ( gate ); break;
case ALGO_GROESTL: register_groestl_algo ( gate ); break;
case ALGO_HEX: register_hex_algo ( gate ); break;
case ALGO_HMQ1725: register_hmq1725_algo ( gate ); break;
case ALGO_HODL: register_hodl_algo ( gate ); break;
case ALGO_JHA: register_jha_algo ( gate ); break;
case ALGO_KECCAK: register_keccak_algo ( gate ); break;
case ALGO_KECCAKC: register_keccakc_algo ( gate ); break;
case ALGO_LBRY: register_lbry_algo ( gate ); break;
case ALGO_LYRA2H: register_lyra2h_algo ( gate ); break;
case ALGO_LYRA2RE: register_lyra2re_algo ( gate ); break;
case ALGO_LYRA2REV2: register_lyra2rev2_algo ( gate ); break;
case ALGO_LYRA2REV3: register_lyra2rev3_algo ( gate ); break;
case ALGO_LYRA2Z: register_lyra2z_algo ( gate ); break;
case ALGO_LYRA2Z330: register_lyra2z330_algo ( gate ); break;
case ALGO_M7M: register_m7m_algo ( gate ); break;
case ALGO_MYR_GR: register_myriad_algo ( gate ); break;
case ALGO_NEOSCRYPT: register_neoscrypt_algo ( gate ); break;
case ALGO_NIST5: register_nist5_algo ( gate ); break;
case ALGO_PENTABLAKE: register_pentablake_algo ( gate ); break;
case ALGO_PHI1612: register_phi1612_algo ( gate ); break;
case ALGO_PHI2: register_phi2_algo ( gate ); break;
case ALGO_POLYTIMOS: register_polytimos_algo ( gate ); break;
case ALGO_POWER2B: register_power2b_algo ( gate ); break;
case ALGO_QUARK: register_quark_algo ( gate ); break;
case ALGO_QUBIT: register_qubit_algo ( gate ); break;
case ALGO_SCRYPT: register_scrypt_algo ( gate ); break;
case ALGO_SHA256D: register_sha256d_algo ( gate ); break;
case ALGO_SHA256Q: register_sha256q_algo ( gate ); break;
case ALGO_SHA256T: register_sha256t_algo ( gate ); break;
case ALGO_SHA3D: register_sha3d_algo ( gate ); break;
case ALGO_SHAVITE3: register_shavite_algo ( gate ); break;
case ALGO_SKEIN: register_skein_algo ( gate ); break;
case ALGO_SKEIN2: register_skein2_algo ( gate ); break;
case ALGO_SKUNK: register_skunk_algo ( gate ); break;
case ALGO_SONOA: register_sonoa_algo ( gate ); break;
case ALGO_TIMETRAVEL: register_timetravel_algo ( gate ); break;
case ALGO_TIMETRAVEL10: register_timetravel10_algo ( gate ); break;
case ALGO_TRIBUS: register_tribus_algo ( gate ); break;
case ALGO_VANILLA: register_vanilla_algo ( gate ); break;
case ALGO_VELTOR: register_veltor_algo ( gate ); break;
case ALGO_WHIRLPOOL: register_whirlpool_algo ( gate ); break;
case ALGO_WHIRLPOOLX: register_whirlpoolx_algo ( gate ); break;
case ALGO_X11: register_x11_algo ( gate ); break;
case ALGO_X11EVO: register_x11evo_algo ( gate ); break;
case ALGO_X11GOST: register_x11gost_algo ( gate ); break;
case ALGO_X12: register_x12_algo ( gate ); break;
case ALGO_X13: register_x13_algo ( gate ); break;
case ALGO_X13BCD: register_x13bcd_algo ( gate ); break;
case ALGO_X13SM3: register_x13sm3_algo ( gate ); break;
case ALGO_X14: register_x14_algo ( gate ); break;
case ALGO_X15: register_x15_algo ( gate ); break;
case ALGO_X16R: register_x16r_algo ( gate ); break;
case ALGO_X16RV2: register_x16rv2_algo ( gate ); break;
case ALGO_X16RT: register_x16rt_algo ( gate ); break;
case ALGO_X16RT_VEIL: register_x16rt_veil_algo ( gate ); break;
case ALGO_X16S: register_x16s_algo ( gate ); break;
case ALGO_X17: register_x17_algo ( gate ); break;
case ALGO_X21S: register_x21s_algo ( gate ); break;
case ALGO_X22I: register_x22i_algo ( gate ); break;
case ALGO_X25X: register_x25x_algo ( gate ); break;
case ALGO_XEVAN: register_xevan_algo ( gate ); break;
case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break;
case ALGO_YESCRYPTR8: register_yescryptr8_algo ( gate ); break;
case ALGO_YESCRYPTR8G: register_yescryptr8g_algo ( gate ); break;
case ALGO_YESCRYPTR16: register_yescryptr16_algo ( gate ); break;
case ALGO_YESCRYPTR32: register_yescryptr32_algo ( gate ); break;
case ALGO_YESPOWER: register_yespower_algo ( gate ); break;
case ALGO_YESPOWERR16: register_yespowerr16_algo ( gate ); break;
case ALGO_YESPOWER_B2B: register_yespower_b2b_algo ( gate ); break;
case ALGO_ZR5: register_zr5_algo ( gate ); break;
case ALGO_ALLIUM: rc = register_allium_algo ( gate ); break;
case ALGO_ANIME: rc = register_anime_algo ( gate ); break;
case ALGO_ARGON2: rc = register_argon2_algo ( gate ); break;
case ALGO_ARGON2D250: rc = register_argon2d_crds_algo ( gate ); break;
case ALGO_ARGON2D500: rc = register_argon2d_dyn_algo ( gate ); break;
case ALGO_ARGON2D4096: rc = register_argon2d4096_algo ( gate ); break;
case ALGO_AXIOM: rc = register_axiom_algo ( gate ); break;
case ALGO_BLAKE: rc = register_blake_algo ( gate ); break;
case ALGO_BLAKE2B: rc = register_blake2b_algo ( gate ); break;
case ALGO_BLAKE2S: rc = register_blake2s_algo ( gate ); break;
case ALGO_BLAKECOIN: rc = register_blakecoin_algo ( gate ); break;
case ALGO_BMW512: rc = register_bmw512_algo ( gate ); break;
case ALGO_C11: rc = register_c11_algo ( gate ); break;
case ALGO_DECRED: rc = register_decred_algo ( gate ); break;
case ALGO_DEEP: rc = register_deep_algo ( gate ); break;
case ALGO_DMD_GR: rc = register_dmd_gr_algo ( gate ); break;
case ALGO_GROESTL: rc = register_groestl_algo ( gate ); break;
case ALGO_HEX: rc = register_hex_algo ( gate ); break;
case ALGO_HMQ1725: rc = register_hmq1725_algo ( gate ); break;
case ALGO_HODL: rc = register_hodl_algo ( gate ); break;
case ALGO_JHA: rc = register_jha_algo ( gate ); break;
case ALGO_KECCAK: rc = register_keccak_algo ( gate ); break;
case ALGO_KECCAKC: rc = register_keccakc_algo ( gate ); break;
case ALGO_LBRY: rc = register_lbry_algo ( gate ); break;
case ALGO_LYRA2H: rc = register_lyra2h_algo ( gate ); break;
case ALGO_LYRA2RE: rc = register_lyra2re_algo ( gate ); break;
case ALGO_LYRA2REV2: rc = register_lyra2rev2_algo ( gate ); break;
case ALGO_LYRA2REV3: rc = register_lyra2rev3_algo ( gate ); break;
case ALGO_LYRA2Z: rc = register_lyra2z_algo ( gate ); break;
case ALGO_LYRA2Z330: rc = register_lyra2z330_algo ( gate ); break;
case ALGO_M7M: rc = register_m7m_algo ( gate ); break;
case ALGO_MINOTAUR: rc = register_minotaur_algo ( gate ); break;
case ALGO_MYR_GR: rc = register_myriad_algo ( gate ); break;
case ALGO_NEOSCRYPT: rc = register_neoscrypt_algo ( gate ); break;
case ALGO_NIST5: rc = register_nist5_algo ( gate ); break;
case ALGO_PENTABLAKE: rc = register_pentablake_algo ( gate ); break;
case ALGO_PHI1612: rc = register_phi1612_algo ( gate ); break;
case ALGO_PHI2: rc = register_phi2_algo ( gate ); break;
case ALGO_POLYTIMOS: rc = register_polytimos_algo ( gate ); break;
case ALGO_POWER2B: rc = register_power2b_algo ( gate ); break;
case ALGO_QUARK: rc = register_quark_algo ( gate ); break;
case ALGO_QUBIT: rc = register_qubit_algo ( gate ); break;
case ALGO_SCRYPT: rc = register_scrypt_algo ( gate ); break;
case ALGO_SHA256D: rc = register_sha256d_algo ( gate ); break;
case ALGO_SHA256Q: rc = register_sha256q_algo ( gate ); break;
case ALGO_SHA256T: rc = register_sha256t_algo ( gate ); break;
case ALGO_SHA3D: rc = register_sha3d_algo ( gate ); break;
case ALGO_SHAVITE3: rc = register_shavite_algo ( gate ); break;
case ALGO_SKEIN: rc = register_skein_algo ( gate ); break;
case ALGO_SKEIN2: rc = register_skein2_algo ( gate ); break;
case ALGO_SKUNK: rc = register_skunk_algo ( gate ); break;
case ALGO_SONOA: rc = register_sonoa_algo ( gate ); break;
case ALGO_TIMETRAVEL: rc = register_timetravel_algo ( gate ); break;
case ALGO_TIMETRAVEL10: rc = register_timetravel10_algo ( gate ); break;
case ALGO_TRIBUS: rc = register_tribus_algo ( gate ); break;
case ALGO_VANILLA: rc = register_vanilla_algo ( gate ); break;
case ALGO_VELTOR: rc = register_veltor_algo ( gate ); break;
case ALGO_VERTHASH: rc = register_verthash_algo ( gate ); break;
case ALGO_WHIRLPOOL: rc = register_whirlpool_algo ( gate ); break;
case ALGO_WHIRLPOOLX: rc = register_whirlpoolx_algo ( gate ); break;
case ALGO_X11: rc = register_x11_algo ( gate ); break;
case ALGO_X11EVO: rc = register_x11evo_algo ( gate ); break;
case ALGO_X11GOST: rc = register_x11gost_algo ( gate ); break;
case ALGO_X12: rc = register_x12_algo ( gate ); break;
case ALGO_X13: rc = register_x13_algo ( gate ); break;
case ALGO_X13BCD: rc = register_x13bcd_algo ( gate ); break;
case ALGO_X13SM3: rc = register_x13sm3_algo ( gate ); break;
case ALGO_X14: rc = register_x14_algo ( gate ); break;
case ALGO_X15: rc = register_x15_algo ( gate ); break;
case ALGO_X16R: rc = register_x16r_algo ( gate ); break;
case ALGO_X16RV2: rc = register_x16rv2_algo ( gate ); break;
case ALGO_X16RT: rc = register_x16rt_algo ( gate ); break;
case ALGO_X16RT_VEIL: rc = register_x16rt_veil_algo ( gate ); break;
case ALGO_X16S: rc = register_x16s_algo ( gate ); break;
case ALGO_X17: rc = register_x17_algo ( gate ); break;
case ALGO_X21S: rc = register_x21s_algo ( gate ); break;
case ALGO_X22I: rc = register_x22i_algo ( gate ); break;
case ALGO_X25X: rc = register_x25x_algo ( gate ); break;
case ALGO_XEVAN: rc = register_xevan_algo ( gate ); break;
case ALGO_YESCRYPT: rc = register_yescrypt_05_algo ( gate ); break;
// case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break;
case ALGO_YESCRYPTR8: rc = register_yescryptr8_05_algo ( gate ); break;
// case ALGO_YESCRYPTR8: register_yescryptr8_algo ( gate ); break;
case ALGO_YESCRYPTR8G: rc = register_yescryptr8g_algo ( gate ); break;
case ALGO_YESCRYPTR16: rc = register_yescryptr16_05_algo( gate ); break;
// case ALGO_YESCRYPTR16: register_yescryptr16_algo ( gate ); break;
case ALGO_YESCRYPTR32: rc = register_yescryptr32_05_algo( gate ); break;
// case ALGO_YESCRYPTR32: register_yescryptr32_algo ( gate ); break;
case ALGO_YESPOWER: rc = register_yespower_algo ( gate ); break;
case ALGO_YESPOWERR16: rc = register_yespowerr16_algo ( gate ); break;
case ALGO_YESPOWER_B2B: rc = register_yespower_b2b_algo ( gate ); break;
case ALGO_ZR5: rc = register_zr5_algo ( gate ); break;
default:
applog(LOG_ERR,"FAIL: algo_gate registration failed, unknown algo %s.\n", algo_names[opt_algo] );
applog(LOG_ERR,"BUG: unregistered algorithm %s.\n", algo_names[opt_algo] );
return false;
} // switch
// ensure required functions were defined.
if ( gate->scanhash == (void*)&null_scanhash )
if ( !rc )
{
applog(LOG_ERR, "FAIL: Required algo_gate functions undefined\n");
applog(LOG_ERR, "FAIL: %s algorithm failed to initialize\n", algo_names[opt_algo] );
return false;
}
return true;
@@ -251,7 +400,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
// restore warnings
#pragma GCC diagnostic pop
// run the alternate hash function for a specific algo
void exec_hash_function( int algo, void *output, const void *pdata )
{
algo_gate_t gate;
@@ -271,7 +419,6 @@ void exec_hash_function( int algo, void *output, const void *pdata )
const char* const algo_alias_map[][2] =
{
// alias proper
{ "argon2d-crds", "argon2d250" },
{ "argon2d-dyn", "argon2d500" },
{ "argon2d-uis", "argon2d4096" },
{ "bcd", "x13bcd" },
@@ -286,7 +433,6 @@ const char* const algo_alias_map[][2] =
{ "flax", "c11" },
{ "hsr", "x13sm3" },
{ "jackpot", "jha" },
{ "jane", "scryptjane" },
{ "lyra2", "lyra2re" },
{ "lyra2v2", "lyra2rev2" },
{ "lyra2v3", "lyra2rev3" },

View File

@@ -90,10 +90,11 @@ typedef uint32_t set_t;
#define AES_OPT 2
#define SSE42_OPT 4
#define AVX_OPT 8 // Sandybridge
#define AVX2_OPT 0x10 // Haswell
#define SHA_OPT 0x20 // sha256 (Ryzen, Ice Lake)
#define AVX512_OPT 0x40 // AVX512- F, VL, DQ, BW (Skylake-X)
#define VAES_OPT 0x80 // VAES (Ice Lake)
#define AVX2_OPT 0x10 // Haswell, Zen1
#define SHA_OPT 0x20 // Zen1, Icelake (sha256)
#define AVX512_OPT 0x40 // Skylake-X (AVX512[F,VL,DQ,BW])
#define VAES_OPT 0x80 // Icelake (VAES & AVX512)
#define VAES256_OPT 0x100 // Zen3 (VAES without AVX512)
// return set containing all elements from sets a & b
@@ -110,23 +111,25 @@ inline bool set_excl ( set_t a, set_t b ) { return (a & b) == 0; }
typedef struct
{
// mandatory function, must be overwritten
// Mandatory functions, one of these is mandatory. If a generic scanhash
// is used a custom target hash function must be registered, with a custom
// scanhash the target hash function can be called directly and doesn't need
// to be registered with the gate.
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
// Deprecated, will be removed
int ( *hash ) ( void*, const void*, uint32_t ) ;
int ( *hash ) ( void*, const void*, int );
//optional, safe to use default in most cases
// Allocate thread local buffers and other initialization specific to miner
// threads.
// Called once by each miner thread to allocate thread local buffers and
// other initialization specific to miner threads.
bool ( *miner_thread_init ) ( int );
// Get thread local copy of blockheader with unique nonce.
void ( *get_new_work ) ( struct work*, struct work*, int, uint32_t* );
// Decode getwork blockheader
bool ( *work_decode ) ( const json_t*, struct work* );
bool ( *work_decode ) ( struct work* );
// Extra getwork data
void ( *decode_extra_data ) ( struct work*, uint64_t* );
@@ -147,7 +150,7 @@ void ( *build_stratum_request ) ( char*, struct work*, struct stratum_ctx* );
char* ( *malloc_txs_request ) ( struct work* );
// Big or little
// Big endian or little endian
void ( *set_work_data_endian ) ( struct work* );
double ( *calc_network_diff ) ( struct work* );
@@ -159,7 +162,7 @@ bool ( *ready_to_mine ) ( struct work*, struct stratum_ctx*, int );
bool ( *do_this_thread ) ( int );
// After do_this_thread
void ( *resync_threads ) ( struct work* );
void ( *resync_threads ) ( int, struct work* );
// No longer needed
json_t* (*longpoll_rpc_call) ( CURL*, int*, char* );
@@ -201,17 +204,63 @@ void four_way_not_tested();
#define STD_WORK_DATA_SIZE 128
#define STD_WORK_CMP_SIZE 76
#define JR2_NONCE_INDEX 39 // 8 bit offset
//#define JR2_NONCE_INDEX 39 // 8 bit offset
// These indexes are only used with JSON RPC2 and are not gated.
#define JR2_WORK_CMP_INDEX_2 43
#define JR2_WORK_CMP_SIZE_2 33
//#define JR2_WORK_CMP_INDEX_2 43
//#define JR2_WORK_CMP_SIZE_2 33
// allways returns failure
// deprecated, use generic instead
int null_scanhash();
// Default generic, may be used in many cases.
// N-way is more complicated, requires many different implementations
// depending on architecture, input format, and output format.
// Naming convention is scanhash_[N]way_[input format]in_[output format]out
// N = number of lanes
// input/output format:
// 32: 32 bit interleaved parallel lanes
// 64: 64 bit interleaved parallel lanes
// 640: input only, not interleaved, contiguous serial 640 bit lanes.
// 256: output only, not interleaved, contiguous serial 256 bit lanes.
int scanhash_generic( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#if defined(__AVX2__)
//int scanhash_4way_64in_64out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_4way_64in_256out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
int scanhash_4way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_8way_32in_32out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
//int scanhash_8way_64in_64out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_8way_64in_256out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
int scanhash_8way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_16way_32in_32out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
#endif
// displays warning
int null_hash ();
int null_hash();
// optional safe targets, default listed first unless noted.
@@ -221,8 +270,8 @@ void std_get_new_work( struct work *work, struct work *g_work, int thr_id,
void sha256d_gen_merkle_root( char *merkle_root, struct stratum_ctx *sctx );
void SHA256_gen_merkle_root ( char *merkle_root, struct stratum_ctx *sctx );
bool std_le_work_decode( const json_t *val, struct work *work );
bool std_be_work_decode( const json_t *val, struct work *work );
bool std_le_work_decode( struct work *work );
bool std_be_work_decode( struct work *work );
bool std_le_submit_getwork_result( CURL *curl, struct work *work );
bool std_be_submit_getwork_result( CURL *curl, struct work *work );
@@ -232,7 +281,7 @@ void std_be_build_stratum_request( char *req, struct work *work );
char* std_malloc_txs_request( struct work *work );
// Default is do_nothing (assumed LE)
// Default is do_nothing, little endian is assumed
void set_work_data_big_endian( struct work *work );
double std_calc_network_diff( struct work *work );
@@ -257,7 +306,7 @@ int std_get_work_data_size();
// by calling the algo's register function.
bool register_algo_gate( int algo, algo_gate_t *gate );
// Called by algos toverride any default gate functions that are applicable
// Called by algos to verride any default gate functions that are applicable
// and do any other algo-specific initialization.
// The register functions for all the algos can be declared here to reduce
// compiler warnings but that's just more work for devs adding new algos.

View File

@@ -17,7 +17,7 @@
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
ALIGN(128) typedef struct {
typedef struct ALIGN( 64 ) {
__m512i b[16]; // input buffer
__m512i h[8]; // chained state
uint64_t t[2]; // total number of bytes
@@ -35,7 +35,7 @@ void blake2b_8way_final( blake2b_8way_ctx *ctx, void *out );
#if defined(__AVX2__)
// state context
ALIGN(128) typedef struct {
typedef struct ALIGN( 64 ) {
__m256i b[16]; // input buffer
__m256i h[8]; // chained state
uint64_t t[2]; // total number of bytes

View File

@@ -60,7 +60,7 @@ typedef struct __blake2s_nway_param
} blake2s_nway_param;
#pragma pack(pop)
ALIGN( 64 ) typedef struct __blake2s_4way_state
typedef struct ALIGN( 64 ) __blake2s_4way_state
{
__m128i h[8];
uint8_t buf[ BLAKE2S_BLOCKBYTES * 4 ];
@@ -80,7 +80,7 @@ int blake2s_4way_full_blocks( blake2s_4way_state *S, void *out,
#if defined(__AVX2__)
ALIGN( 64 ) typedef struct __blake2s_8way_state
typedef struct ALIGN( 64 ) __blake2s_8way_state
{
__m256i h[8];
uint8_t buf[ BLAKE2S_BLOCKBYTES * 8 ];
@@ -101,7 +101,7 @@ int blake2s_8way_full_blocks( blake2s_8way_state *S, void *out,
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
ALIGN( 128 ) typedef struct __blake2s_16way_state
typedef struct ALIGN( 64 ) __blake2s_16way_state
{
__m512i h[8];
uint8_t buf[ BLAKE2S_BLOCKBYTES * 16 ];

View File

@@ -78,7 +78,6 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
uint32_t extraheader[32] = { 0 };
int headersize = 0;
uint32_t* extradata = (uint32_t*) sctx->xnonce1;
size_t t;
int i;
// getwork over stratum, getwork merkle + header passed in coinb1
@@ -87,9 +86,6 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
sizeof(extraheader) );
memcpy( extraheader, &sctx->job.coinbase[32], headersize );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );

View File

@@ -323,7 +323,7 @@ int blake2s_final( blake2s_state *S, uint8_t *out, uint8_t outlen )
int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen )
{
blake2s_state S[1];
blake2s_state S;
/* Verify parameters */
if ( NULL == in ) return -1;
@@ -334,15 +334,15 @@ int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen
if( keylen > 0 )
{
if( blake2s_init_key( S, outlen, key, keylen ) < 0 ) return -1;
if( blake2s_init_key( &S, outlen, key, keylen ) < 0 ) return -1;
}
else
{
if( blake2s_init( S, outlen ) < 0 ) return -1;
if( blake2s_init( &S, outlen ) < 0 ) return -1;
}
blake2s_update( S, ( uint8_t * )in, inlen );
blake2s_final( S, out, outlen );
blake2s_update( &S, ( uint8_t * )in, inlen );
blake2s_final( &S, out, outlen );
return 0;
}

View File

@@ -116,7 +116,7 @@ extern "C" {
uint8_t personal[BLAKE2S_PERSONALBYTES]; // 32
} blake2s_param;
ALIGN( 64 ) typedef struct __blake2s_state
typedef struct ALIGN( 64 ) __blake2s_state
{
uint32_t h[8];
uint32_t t[2];

View File

@@ -18,7 +18,7 @@
#endif
// state context
ALIGN(64) typedef struct {
typedef ALIGN(64) struct {
uint8_t b[128]; // input buffer
uint64_t h[8]; // chained state
uint64_t t[2]; // total number of bytes

View File

@@ -1293,32 +1293,26 @@ void compress_big_8way( const __m512i *M, const __m512i H[16],
mm512_xor4( qt[28], qt[29], qt[30], qt[31] ) ) );
#define DH1L( m, sl, sr, a, b, c ) \
_mm512_add_epi64( \
_mm512_xor_si512( M[m], \
_mm512_xor_si512( _mm512_slli_epi64( xh, sl ), \
_mm512_srli_epi64( qt[a], sr ) ) ), \
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
_mm512_add_epi64( mm512_xor3( M[m], _mm512_slli_epi64( xh, sl ), \
_mm512_srli_epi64( qt[a], sr ) ), \
mm512_xor3( xl, qt[b], qt[c] ) )
#define DH1R( m, sl, sr, a, b, c ) \
_mm512_add_epi64( \
_mm512_xor_si512( M[m], \
_mm512_xor_si512( _mm512_srli_epi64( xh, sl ), \
_mm512_slli_epi64( qt[a], sr ) ) ), \
_mm512_xor_si512( _mm512_xor_si512( xl, qt[b] ), qt[c] ) )
_mm512_add_epi64( mm512_xor3( M[m], _mm512_srli_epi64( xh, sl ), \
_mm512_slli_epi64( qt[a], sr ) ), \
mm512_xor3( xl, qt[b], qt[c] ) )
#define DH2L( m, rl, sl, h, a, b, c ) \
_mm512_add_epi64( _mm512_add_epi64( \
mm512_rol_64( dH[h], rl ), \
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
_mm512_xor_si512( _mm512_slli_epi64( xl, sl ), \
_mm512_xor_si512( qt[b], qt[c] ) ) );
mm512_rol_64( dH[h], rl ), \
mm512_xor3( xh, qt[a], M[m] ) ), \
mm512_xor3( _mm512_slli_epi64( xl, sl ), qt[b], qt[c] ) )
#define DH2R( m, rl, sr, h, a, b, c ) \
_mm512_add_epi64( _mm512_add_epi64( \
mm512_rol_64( dH[h], rl ), \
_mm512_xor_si512( _mm512_xor_si512( xh, qt[a] ), M[m] )), \
_mm512_xor_si512( _mm512_srli_epi64( xl, sr ), \
_mm512_xor_si512( qt[b], qt[c] ) ) );
mm512_rol_64( dH[h], rl ), \
mm512_xor3( xh, qt[a], M[m] ) ), \
mm512_xor3( _mm512_srli_epi64( xl, sr ), qt[b], qt[c] ) )
dH[ 0] = DH1L( 0, 5, 5, 16, 24, 0 );

View File

@@ -55,8 +55,8 @@ MYALIGN const unsigned int mul2ipt[] = {0x728efc00, 0x6894e61a, 0x3fc3b14d, 0x2
#define ECHO_SUBBYTES(state, i, j) \
state[i][j] = _mm_aesenc_si128(state[i][j], k1);\
state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero));\
k1 = _mm_add_epi32(k1, M128(const1))
k1 = _mm_add_epi32(k1, M128(const1));\
state[i][j] = _mm_aesenc_si128(state[i][j], M128(zero))
#define ECHO_MIXBYTES(state1, state2, j, t1, t2, s2) \
s2 = _mm_add_epi8(state1[0][j], state1[0][j]);\

View File

@@ -1,5 +1,4 @@
//#if 0
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__VAES__)
#include "simd-utils.h"
#include "echo-hash-4way.h"
@@ -11,18 +10,20 @@ static const unsigned int mul2ipt[] __attribute__ ((aligned (64))) =
0xfd5ba600, 0x2a8c71d7, 0x1eb845e3, 0xc96f9234
};
*/
// do these need to be reversed?
#define mul2mask \
_mm512_set4_epi32( 0, 0, 0, 0x00001b00 )
// _mm512_set4_epi32( 0x00001b00, 0, 0, 0 )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define lsbmask m512_const1_32( 0x01010101 )
//#define mul2mask m512_const2_64( 0, 0x00001b00 )
//_mm512_set4_epi32( 0, 0, 0, 0x00001b00 )
//_mm512_set4_epi32( 0x00001b00, 0, 0, 0 )
//#define lsbmask m512_const1_32( 0x01010101 )
#define ECHO_SUBBYTES( state, i, j ) \
state[i][j] = _mm512_aesenc_epi128( state[i][j], k1 ); \
state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero ); \
k1 = _mm512_add_epi32( k1, m512_one_128 );
k1 = _mm512_add_epi32( k1, one ); \
state[i][j] = _mm512_aesenc_epi128( state[i][j], m512_zero );
#define ECHO_MIXBYTES( state1, state2, j, t1, t2, s2 ) do \
{ \
@@ -30,87 +31,87 @@ static const unsigned int mul2ipt[] __attribute__ ((aligned (64))) =
const int j2 = ( (j)+2 ) & 3; \
const int j3 = ( (j)+3 ) & 3; \
s2 = _mm512_add_epi8( state1[ 0 ] [j ], state1[ 0 ][ j ] ); \
t1 = _mm512_srli_epi16( state1[ 0 ][ j ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask );\
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 ); \
state2[ 0 ] [j ] = s2; \
state2[ 1 ] [j ] = state1[ 0 ][ j ]; \
state2[ 2 ] [j ] = state1[ 0 ][ j ]; \
state2[ 3 ] [j ] = _mm512_xor_si512( s2, state1[ 0 ][ j ] );\
s2 = _mm512_add_epi8( state1[ 1 ][ j1 ], state1[ 1 ][ j1 ] ); \
t1 = _mm512_srli_epi16( state1[ 1 ][ j1 ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask ); \
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 );\
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], \
_mm512_xor_si512( s2, state1[ 1 ][ j1 ] ) ); \
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], s2 ); \
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], state1[ 1 ][ j1 ] ); \
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], state1[ 1 ][ j1 ] ); \
s2 = _mm512_add_epi8( state1[ 2 ][ j2 ], state1[ 2 ][ j2 ] ); \
t1 = _mm512_srli_epi16( state1[ 2 ][ j2 ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask ); \
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 ); \
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 2 ][ j2 ] ); \
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], \
t1 = _mm512_srli_epi16( state1[ 0 ][ j ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask );\
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 ); \
state2[ 0 ] [j ] = s2; \
state2[ 1 ] [j ] = state1[ 0 ][ j ]; \
state2[ 2 ] [j ] = state1[ 0 ][ j ]; \
state2[ 3 ] [j ] = _mm512_xor_si512( s2, state1[ 0 ][ j ] );\
s2 = _mm512_add_epi8( state1[ 1 ][ j1 ], state1[ 1 ][ j1 ] ); \
t1 = _mm512_srli_epi16( state1[ 1 ][ j1 ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask ); \
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 );\
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], \
_mm512_xor_si512( s2, state1[ 1 ][ j1 ] ) ); \
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], s2 ); \
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], state1[ 1 ][ j1 ] ); \
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], state1[ 1 ][ j1 ] ); \
s2 = _mm512_add_epi8( state1[ 2 ][ j2 ], state1[ 2 ][ j2 ] ); \
t1 = _mm512_srli_epi16( state1[ 2 ][ j2 ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask ); \
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 ); \
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 2 ][ j2 ] ); \
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], \
_mm512_xor_si512( s2, state1[ 2 ][ j2 ] ) ); \
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], s2 ); \
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3][ j ], state1[ 2 ][ j2 ] ); \
s2 = _mm512_add_epi8( state1[ 3 ][ j3 ], state1[ 3 ][ j3 ] ); \
t1 = _mm512_srli_epi16( state1[ 3 ][ j3 ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask ); \
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 ); \
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 3 ][ j3 ] ); \
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], state1[ 3 ][ j3 ] ); \
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], \
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], s2 ); \
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3][ j ], state1[ 2 ][ j2 ] ); \
s2 = _mm512_add_epi8( state1[ 3 ][ j3 ], state1[ 3 ][ j3 ] ); \
t1 = _mm512_srli_epi16( state1[ 3 ][ j3 ], 7 ); \
t1 = _mm512_and_si512( t1, lsbmask ); \
t2 = _mm512_shuffle_epi8( mul2mask, t1 ); \
s2 = _mm512_xor_si512( s2, t2 ); \
state2[ 0 ][ j ] = _mm512_xor_si512( state2[ 0 ][ j ], state1[ 3 ][ j3 ] ); \
state2[ 1 ][ j ] = _mm512_xor_si512( state2[ 1 ][ j ], state1[ 3 ][ j3 ] ); \
state2[ 2 ][ j ] = _mm512_xor_si512( state2[ 2 ][ j ], \
_mm512_xor_si512( s2, state1[ 3 ][ j3] ) ); \
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], s2 ); \
state2[ 3 ][ j ] = _mm512_xor_si512( state2[ 3 ][ j ], s2 ); \
} while(0)
#define ECHO_ROUND_UNROLL2 \
ECHO_SUBBYTES(_state, 0, 0);\
ECHO_SUBBYTES(_state, 0, 0);\
ECHO_SUBBYTES(_state, 1, 0);\
ECHO_SUBBYTES(_state, 2, 0);\
ECHO_SUBBYTES(_state, 3, 0);\
ECHO_SUBBYTES(_state, 0, 1);\
ECHO_SUBBYTES(_state, 1, 1);\
ECHO_SUBBYTES(_state, 2, 1);\
ECHO_SUBBYTES(_state, 3, 1);\
ECHO_SUBBYTES(_state, 0, 2);\
ECHO_SUBBYTES(_state, 1, 2);\
ECHO_SUBBYTES(_state, 2, 2);\
ECHO_SUBBYTES(_state, 3, 2);\
ECHO_SUBBYTES(_state, 0, 3);\
ECHO_SUBBYTES(_state, 1, 3);\
ECHO_SUBBYTES(_state, 2, 3);\
ECHO_SUBBYTES(_state, 3, 3);\
ECHO_MIXBYTES(_state, _state2, 0, t1, t2, s2);\
ECHO_MIXBYTES(_state, _state2, 1, t1, t2, s2);\
ECHO_MIXBYTES(_state, _state2, 2, t1, t2, s2);\
ECHO_MIXBYTES(_state, _state2, 3, t1, t2, s2);\
ECHO_SUBBYTES(_state2, 0, 0);\
ECHO_SUBBYTES(_state2, 1, 0);\
ECHO_SUBBYTES(_state2, 2, 0);\
ECHO_SUBBYTES(_state2, 3, 0);\
ECHO_SUBBYTES(_state2, 0, 1);\
ECHO_SUBBYTES(_state2, 1, 1);\
ECHO_SUBBYTES(_state2, 2, 1);\
ECHO_SUBBYTES(_state2, 3, 1);\
ECHO_SUBBYTES(_state2, 0, 2);\
ECHO_SUBBYTES(_state2, 1, 2);\
ECHO_SUBBYTES(_state2, 2, 2);\
ECHO_SUBBYTES(_state2, 3, 2);\
ECHO_SUBBYTES(_state2, 0, 3);\
ECHO_SUBBYTES(_state2, 1, 3);\
ECHO_SUBBYTES(_state2, 2, 3);\
ECHO_SUBBYTES(_state2, 3, 3);\
ECHO_MIXBYTES(_state2, _state, 0, t1, t2, s2);\
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
ECHO_SUBBYTES(_state, 2, 0);\
ECHO_SUBBYTES(_state, 3, 0);\
ECHO_SUBBYTES(_state, 0, 1);\
ECHO_SUBBYTES(_state, 1, 1);\
ECHO_SUBBYTES(_state, 2, 1);\
ECHO_SUBBYTES(_state, 3, 1);\
ECHO_SUBBYTES(_state, 0, 2);\
ECHO_SUBBYTES(_state, 1, 2);\
ECHO_SUBBYTES(_state, 2, 2);\
ECHO_SUBBYTES(_state, 3, 2);\
ECHO_SUBBYTES(_state, 0, 3);\
ECHO_SUBBYTES(_state, 1, 3);\
ECHO_SUBBYTES(_state, 2, 3);\
ECHO_SUBBYTES(_state, 3, 3);\
ECHO_MIXBYTES(_state, _state2, 0, t1, t2, s2);\
ECHO_MIXBYTES(_state, _state2, 1, t1, t2, s2);\
ECHO_MIXBYTES(_state, _state2, 2, t1, t2, s2);\
ECHO_MIXBYTES(_state, _state2, 3, t1, t2, s2);\
ECHO_SUBBYTES(_state2, 0, 0);\
ECHO_SUBBYTES(_state2, 1, 0);\
ECHO_SUBBYTES(_state2, 2, 0);\
ECHO_SUBBYTES(_state2, 3, 0);\
ECHO_SUBBYTES(_state2, 0, 1);\
ECHO_SUBBYTES(_state2, 1, 1);\
ECHO_SUBBYTES(_state2, 2, 1);\
ECHO_SUBBYTES(_state2, 3, 1);\
ECHO_SUBBYTES(_state2, 0, 2);\
ECHO_SUBBYTES(_state2, 1, 2);\
ECHO_SUBBYTES(_state2, 2, 2);\
ECHO_SUBBYTES(_state2, 3, 2);\
ECHO_SUBBYTES(_state2, 0, 3);\
ECHO_SUBBYTES(_state2, 1, 3);\
ECHO_SUBBYTES(_state2, 2, 3);\
ECHO_SUBBYTES(_state2, 3, 3);\
ECHO_MIXBYTES(_state2, _state, 0, t1, t2, s2);\
ECHO_MIXBYTES(_state2, _state, 1, t1, t2, s2);\
ECHO_MIXBYTES(_state2, _state, 2, t1, t2, s2);\
ECHO_MIXBYTES(_state2, _state, 3, t1, t2, s2)
#define SAVESTATE(dst, src)\
dst[0][0] = src[0][0];\
@@ -137,6 +138,9 @@ void echo_4way_compress( echo_4way_context *ctx, const __m512i *pmsg,
unsigned int r, b, i, j;
__m512i t1, t2, s2, k1;
__m512i _state[4][4], _state2[4][4], _statebackup[4][4];
__m512i one = m512_one_128;
__m512i mul2mask = m512_const2_64( 0, 0x00001b00 );
__m512i lsbmask = m512_const1_32( 0x01010101 );
_state[ 0 ][ 0 ] = ctx->state[ 0 ][ 0 ];
_state[ 0 ][ 1 ] = ctx->state[ 0 ][ 1 ];
@@ -224,43 +228,43 @@ void echo_4way_compress( echo_4way_context *ctx, const __m512i *pmsg,
int echo_4way_init( echo_4way_context *ctx, int nHashSize )
{
int i, j;
int i, j;
ctx->k = m512_zero;
ctx->processed_bits = 0;
ctx->uBufferBytes = 0;
ctx->processed_bits = 0;
ctx->uBufferBytes = 0;
switch( nHashSize )
{
case 256:
ctx->uHashSize = 256;
ctx->uBlockLength = 192;
ctx->uRounds = 8;
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x100 );
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x600 );
break;
switch( nHashSize )
{
case 256:
ctx->uHashSize = 256;
ctx->uBlockLength = 192;
ctx->uRounds = 8;
ctx->hashsize = m512_const2_64( 0, 0x100 );
ctx->const1536 = m512_const2_64( 0, 0x600 );
break;
case 512:
ctx->uHashSize = 512;
ctx->uBlockLength = 128;
ctx->uRounds = 10;
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x200 );
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x400);
break;
case 512:
ctx->uHashSize = 512;
ctx->uBlockLength = 128;
ctx->uRounds = 10;
ctx->hashsize = m512_const2_64( 0, 0x200 );
ctx->const1536 = m512_const2_64( 0, 0x400);
break;
default:
return 1;
}
default:
return 1;
}
for( i = 0; i < 4; i++ )
for( j = 0; j < nHashSize / 256; j++ )
ctx->state[ i ][ j ] = ctx->hashsize;
for( i = 0; i < 4; i++ )
for( j = 0; j < nHashSize / 256; j++ )
ctx->state[ i ][ j ] = ctx->hashsize;
for( i = 0; i < 4; i++ )
for( j = nHashSize / 256; j < 4; j++ )
ctx->state[ i ][ j ] = m512_zero;
for( i = 0; i < 4; i++ )
for( j = nHashSize / 256; j < 4; j++ )
ctx->state[ i ][ j ] = m512_zero;
return 0;
return 0;
}
int echo_4way_update_close( echo_4way_context *state, void *hashval,
@@ -285,17 +289,13 @@ int echo_4way_update_close( echo_4way_context *state, void *hashval,
vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
memcpy_512( state->buffer, data, vlen );
state->processed_bits += (unsigned int)( databitlen );
remainingbits = _mm512_set4_epi32( 0, 0, 0, databitlen );
remainingbits = m512_const2_64( 0, (uint64_t)databitlen );
}
state->buffer[ vlen ] = _mm512_set4_epi32( 0, 0, 0, 0x80 );
state->buffer[ vlen ] = m512_const2_64( 0, 0x80 );
memset_zero_512( state->buffer + vlen + 1, vblen - vlen - 2 );
state->buffer[ vblen-2 ] =
_mm512_set4_epi32( (uint32_t)state->uHashSize << 16, 0, 0, 0 );
state->buffer[ vblen-1 ] =
_mm512_set4_epi64( 0, state->processed_bits,
0, state->processed_bits );
state->buffer[ vblen-2 ] = m512_const2_64( (uint64_t)state->uHashSize << 48, 0 );
state->buffer[ vblen-1 ] = m512_const2_64( 0, state->processed_bits);
state->k = _mm512_add_epi64( state->k, remainingbits );
state->k = _mm512_sub_epi64( state->k, state->const1536 );
@@ -328,16 +328,16 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
ctx->uHashSize = 256;
ctx->uBlockLength = 192;
ctx->uRounds = 8;
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x100 );
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x600 );
ctx->hashsize = m512_const2_64( 0, 0x100 );
ctx->const1536 = m512_const2_64( 0, 0x600 );
break;
case 512:
ctx->uHashSize = 512;
ctx->uBlockLength = 128;
ctx->uRounds = 10;
ctx->hashsize = _mm512_set4_epi32( 0, 0, 0, 0x200 );
ctx->const1536 = _mm512_set4_epi32( 0, 0, 0, 0x400);
ctx->hashsize = m512_const2_64( 0, 0x200 );
ctx->const1536 = m512_const2_64( 0, 0x400 );
break;
default:
@@ -372,17 +372,14 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
memcpy_512( ctx->buffer, data, vlen );
ctx->processed_bits += (unsigned int)( databitlen );
remainingbits = _mm512_set4_epi32( 0, 0, 0, databitlen );
remainingbits = m512_const2_64( 0, databitlen );
}
ctx->buffer[ vlen ] = _mm512_set4_epi32( 0, 0, 0, 0x80 );
ctx->buffer[ vlen ] = m512_const2_64( 0, 0x80 );
memset_zero_512( ctx->buffer + vlen + 1, vblen - vlen - 2 );
ctx->buffer[ vblen-2 ] =
_mm512_set4_epi32( (uint32_t)ctx->uHashSize << 16, 0, 0, 0 );
ctx->buffer[ vblen-1 ] =
_mm512_set4_epi64( 0, ctx->processed_bits,
0, ctx->processed_bits );
m512_const2_64( (uint64_t)ctx->uHashSize << 48, 0 );
ctx->buffer[ vblen-1 ] = m512_const2_64( 0, ctx->processed_bits);
ctx->k = _mm512_add_epi64( ctx->k, remainingbits );
ctx->k = _mm512_sub_epi64( ctx->k, ctx->const1536 );
@@ -400,5 +397,380 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
return 0;
}
#endif // AVX512
#endif
// AVX2 + VAES
#define mul2mask_2way m256_const2_64( 0, 0x0000000000001b00 )
#define lsbmask_2way m256_const1_32( 0x01010101 )
#define ECHO_SUBBYTES_2WAY( state, i, j ) \
state[i][j] = _mm256_aesenc_epi128( state[i][j], k1 ); \
k1 = _mm256_add_epi32( k1, m256_one_128 ); \
state[i][j] = _mm256_aesenc_epi128( state[i][j], m256_zero ); \
#define ECHO_MIXBYTES_2WAY( state1, state2, j, t1, t2, s2 ) do \
{ \
const int j1 = ( (j)+1 ) & 3; \
const int j2 = ( (j)+2 ) & 3; \
const int j3 = ( (j)+3 ) & 3; \
s2 = _mm256_add_epi8( state1[ 0 ] [j ], state1[ 0 ][ j ] ); \
t1 = _mm256_srli_epi16( state1[ 0 ][ j ], 7 ); \
t1 = _mm256_and_si256( t1, lsbmask_2way );\
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
s2 = _mm256_xor_si256( s2, t2 ); \
state2[ 0 ] [j ] = s2; \
state2[ 1 ] [j ] = state1[ 0 ][ j ]; \
state2[ 2 ] [j ] = state1[ 0 ][ j ]; \
state2[ 3 ] [j ] = _mm256_xor_si256( s2, state1[ 0 ][ j ] );\
s2 = _mm256_add_epi8( state1[ 1 ][ j1 ], state1[ 1 ][ j1 ] ); \
t1 = _mm256_srli_epi16( state1[ 1 ][ j1 ], 7 ); \
t1 = _mm256_and_si256( t1, lsbmask_2way ); \
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
s2 = _mm256_xor_si256( s2, t2 );\
state2[ 0 ][ j ] = _mm256_xor_si256( state2[ 0 ][ j ], \
_mm256_xor_si256( s2, state1[ 1 ][ j1 ] ) ); \
state2[ 1 ][ j ] = _mm256_xor_si256( state2[ 1 ][ j ], s2 ); \
state2[ 2 ][ j ] = _mm256_xor_si256( state2[ 2 ][ j ], state1[ 1 ][ j1 ] ); \
state2[ 3 ][ j ] = _mm256_xor_si256( state2[ 3 ][ j ], state1[ 1 ][ j1 ] ); \
s2 = _mm256_add_epi8( state1[ 2 ][ j2 ], state1[ 2 ][ j2 ] ); \
t1 = _mm256_srli_epi16( state1[ 2 ][ j2 ], 7 ); \
t1 = _mm256_and_si256( t1, lsbmask_2way ); \
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
s2 = _mm256_xor_si256( s2, t2 ); \
state2[ 0 ][ j ] = _mm256_xor_si256( state2[ 0 ][ j ], state1[ 2 ][ j2 ] ); \
state2[ 1 ][ j ] = _mm256_xor_si256( state2[ 1 ][ j ], \
_mm256_xor_si256( s2, state1[ 2 ][ j2 ] ) ); \
state2[ 2 ][ j ] = _mm256_xor_si256( state2[ 2 ][ j ], s2 ); \
state2[ 3 ][ j ] = _mm256_xor_si256( state2[ 3][ j ], state1[ 2 ][ j2 ] ); \
s2 = _mm256_add_epi8( state1[ 3 ][ j3 ], state1[ 3 ][ j3 ] ); \
t1 = _mm256_srli_epi16( state1[ 3 ][ j3 ], 7 ); \
t1 = _mm256_and_si256( t1, lsbmask_2way ); \
t2 = _mm256_shuffle_epi8( mul2mask_2way, t1 ); \
s2 = _mm256_xor_si256( s2, t2 ); \
state2[ 0 ][ j ] = _mm256_xor_si256( state2[ 0 ][ j ], state1[ 3 ][ j3 ] ); \
state2[ 1 ][ j ] = _mm256_xor_si256( state2[ 1 ][ j ], state1[ 3 ][ j3 ] ); \
state2[ 2 ][ j ] = _mm256_xor_si256( state2[ 2 ][ j ], \
_mm256_xor_si256( s2, state1[ 3 ][ j3] ) ); \
state2[ 3 ][ j ] = _mm256_xor_si256( state2[ 3 ][ j ], s2 ); \
} while(0)
#define ECHO_ROUND_UNROLL2_2WAY \
ECHO_SUBBYTES_2WAY(_state, 0, 0);\
ECHO_SUBBYTES_2WAY(_state, 1, 0);\
ECHO_SUBBYTES_2WAY(_state, 2, 0);\
ECHO_SUBBYTES_2WAY(_state, 3, 0);\
ECHO_SUBBYTES_2WAY(_state, 0, 1);\
ECHO_SUBBYTES_2WAY(_state, 1, 1);\
ECHO_SUBBYTES_2WAY(_state, 2, 1);\
ECHO_SUBBYTES_2WAY(_state, 3, 1);\
ECHO_SUBBYTES_2WAY(_state, 0, 2);\
ECHO_SUBBYTES_2WAY(_state, 1, 2);\
ECHO_SUBBYTES_2WAY(_state, 2, 2);\
ECHO_SUBBYTES_2WAY(_state, 3, 2);\
ECHO_SUBBYTES_2WAY(_state, 0, 3);\
ECHO_SUBBYTES_2WAY(_state, 1, 3);\
ECHO_SUBBYTES_2WAY(_state, 2, 3);\
ECHO_SUBBYTES_2WAY(_state, 3, 3);\
ECHO_MIXBYTES_2WAY(_state, _state2, 0, t1, t2, s2);\
ECHO_MIXBYTES_2WAY(_state, _state2, 1, t1, t2, s2);\
ECHO_MIXBYTES_2WAY(_state, _state2, 2, t1, t2, s2);\
ECHO_MIXBYTES_2WAY(_state, _state2, 3, t1, t2, s2);\
ECHO_SUBBYTES_2WAY(_state2, 0, 0);\
ECHO_SUBBYTES_2WAY(_state2, 1, 0);\
ECHO_SUBBYTES_2WAY(_state2, 2, 0);\
ECHO_SUBBYTES_2WAY(_state2, 3, 0);\
ECHO_SUBBYTES_2WAY(_state2, 0, 1);\
ECHO_SUBBYTES_2WAY(_state2, 1, 1);\
ECHO_SUBBYTES_2WAY(_state2, 2, 1);\
ECHO_SUBBYTES_2WAY(_state2, 3, 1);\
ECHO_SUBBYTES_2WAY(_state2, 0, 2);\
ECHO_SUBBYTES_2WAY(_state2, 1, 2);\
ECHO_SUBBYTES_2WAY(_state2, 2, 2);\
ECHO_SUBBYTES_2WAY(_state2, 3, 2);\
ECHO_SUBBYTES_2WAY(_state2, 0, 3);\
ECHO_SUBBYTES_2WAY(_state2, 1, 3);\
ECHO_SUBBYTES_2WAY(_state2, 2, 3);\
ECHO_SUBBYTES_2WAY(_state2, 3, 3);\
ECHO_MIXBYTES_2WAY(_state2, _state, 0, t1, t2, s2);\
ECHO_MIXBYTES_2WAY(_state2, _state, 1, t1, t2, s2);\
ECHO_MIXBYTES_2WAY(_state2, _state, 2, t1, t2, s2);\
ECHO_MIXBYTES_2WAY(_state2, _state, 3, t1, t2, s2)
#define SAVESTATE_2WAY(dst, src)\
dst[0][0] = src[0][0];\
dst[0][1] = src[0][1];\
dst[0][2] = src[0][2];\
dst[0][3] = src[0][3];\
dst[1][0] = src[1][0];\
dst[1][1] = src[1][1];\
dst[1][2] = src[1][2];\
dst[1][3] = src[1][3];\
dst[2][0] = src[2][0];\
dst[2][1] = src[2][1];\
dst[2][2] = src[2][2];\
dst[2][3] = src[2][3];\
dst[3][0] = src[3][0];\
dst[3][1] = src[3][1];\
dst[3][2] = src[3][2];\
dst[3][3] = src[3][3]
// blockcount always 1
void echo_2way_compress( echo_2way_context *ctx, const __m256i *pmsg,
unsigned int uBlockCount )
{
unsigned int r, b, i, j;
__m256i t1, t2, s2, k1;
__m256i _state[4][4], _state2[4][4], _statebackup[4][4];
_state[ 0 ][ 0 ] = ctx->state[ 0 ][ 0 ];
_state[ 0 ][ 1 ] = ctx->state[ 0 ][ 1 ];
_state[ 0 ][ 2 ] = ctx->state[ 0 ][ 2 ];
_state[ 0 ][ 3 ] = ctx->state[ 0 ][ 3 ];
_state[ 1 ][ 0 ] = ctx->state[ 1 ][ 0 ];
_state[ 1 ][ 1 ] = ctx->state[ 1 ][ 1 ];
_state[ 1 ][ 2 ] = ctx->state[ 1 ][ 2 ];
_state[ 1 ][ 3 ] = ctx->state[ 1 ][ 3 ];
_state[ 2 ][ 0 ] = ctx->state[ 2 ][ 0 ];
_state[ 2 ][ 1 ] = ctx->state[ 2 ][ 1 ];
_state[ 2 ][ 2 ] = ctx->state[ 2 ][ 2 ];
_state[ 2 ][ 3 ] = ctx->state[ 2 ][ 3 ];
_state[ 3 ][ 0 ] = ctx->state[ 3 ][ 0 ];
_state[ 3 ][ 1 ] = ctx->state[ 3 ][ 1 ];
_state[ 3 ][ 2 ] = ctx->state[ 3 ][ 2 ];
_state[ 3 ][ 3 ] = ctx->state[ 3 ][ 3 ];
for ( b = 0; b < uBlockCount; b++ )
{
ctx->k = _mm256_add_epi64( ctx->k, ctx->const1536 );
for( j = ctx->uHashSize / 256; j < 4; j++ )
{
for ( i = 0; i < 4; i++ )
{
_state[ i ][ j ] = _mm256_load_si256(
pmsg + 4 * (j - (ctx->uHashSize / 256)) + i );
}
}
// save state
SAVESTATE_2WAY( _statebackup, _state );
k1 = ctx->k;
for ( r = 0; r < ctx->uRounds / 2; r++ )
{
ECHO_ROUND_UNROLL2_2WAY;
}
if ( ctx->uHashSize == 256 )
{
for ( i = 0; i < 4; i++ )
{
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_state[ i ][ 1 ] );
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_state[ i ][ 2 ] );
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_state[ i ][ 3 ] );
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_statebackup[ i ][ 0 ] );
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_statebackup[ i ][ 1 ] );
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_statebackup[ i ][ 2 ] ) ;
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_statebackup[ i ][ 3 ] );
}
}
else
{
for ( i = 0; i < 4; i++ )
{
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_state[ i ][ 2 ] );
_state[ i ][ 1 ] = _mm256_xor_si256( _state[ i ][ 1 ],
_state[ i ][ 3 ] );
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ][ 0 ],
_statebackup[ i ][ 0 ] );
_state[ i ][ 0 ] = _mm256_xor_si256( _state[ i ] [0 ],
_statebackup[ i ][ 2 ] );
_state[ i ][ 1 ] = _mm256_xor_si256( _state[ i ][ 1 ],
_statebackup[ i ][ 1 ] );
_state[ i ][ 1 ] = _mm256_xor_si256( _state[ i ][ 1 ],
_statebackup[ i ][ 3 ] );
}
}
pmsg += ctx->uBlockLength;
}
SAVESTATE_2WAY(ctx->state, _state);
}
int echo_2way_init( echo_2way_context *ctx, int nHashSize )
{
int i, j;
ctx->k = m256_zero;
ctx->processed_bits = 0;
ctx->uBufferBytes = 0;
switch( nHashSize )
{
case 256:
ctx->uHashSize = 256;
ctx->uBlockLength = 192;
ctx->uRounds = 8;
ctx->hashsize = m256_const2_64( 0, 0x100 );
ctx->const1536 = m256_const2_64( 0, 0x600 );
break;
case 512:
ctx->uHashSize = 512;
ctx->uBlockLength = 128;
ctx->uRounds = 10;
ctx->hashsize = m256_const2_64( 0, 0x200 );
ctx->const1536 = m256_const2_64( 0, 0x400 );
break;
default:
return 1;
}
for( i = 0; i < 4; i++ )
for( j = 0; j < nHashSize / 256; j++ )
ctx->state[ i ][ j ] = ctx->hashsize;
for( i = 0; i < 4; i++ )
for( j = nHashSize / 256; j < 4; j++ )
ctx->state[ i ][ j ] = m256_zero;
return 0;
}
int echo_2way_update_close( echo_2way_context *state, void *hashval,
const void *data, int databitlen )
{
// bytelen is either 32 (maybe), 64 or 80 or 128!
// all are less than full block.
int vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
const int vblen = state->uBlockLength / 16; // 16 bytes per lane
__m256i remainingbits;
if ( databitlen == 1024 )
{
echo_2way_compress( state, data, 1 );
state->processed_bits = 1024;
remainingbits = m256_const2_64( 0, -1024 );
vlen = 0;
}
else
{
memcpy_256( state->buffer, data, vlen );
state->processed_bits += (unsigned int)( databitlen );
remainingbits = m256_const2_64( 0, databitlen );
}
state->buffer[ vlen ] = m256_const2_64( 0, 0x80 );
memset_zero_256( state->buffer + vlen + 1, vblen - vlen - 2 );
state->buffer[ vblen-2 ] = m256_const2_64( (uint64_t)state->uHashSize << 48, 0 );
state->buffer[ vblen-1 ] = m256_const2_64( 0, state->processed_bits );
state->k = _mm256_add_epi64( state->k, remainingbits );
state->k = _mm256_sub_epi64( state->k, state->const1536 );
echo_2way_compress( state, state->buffer, 1 );
_mm256_store_si256( (__m256i*)hashval + 0, state->state[ 0 ][ 0] );
_mm256_store_si256( (__m256i*)hashval + 1, state->state[ 1 ][ 0] );
if ( state->uHashSize == 512 )
{
_mm256_store_si256( (__m256i*)hashval + 2, state->state[ 2 ][ 0 ] );
_mm256_store_si256( (__m256i*)hashval + 3, state->state[ 3 ][ 0 ] );
}
return 0;
}
int echo_2way_full( echo_2way_context *ctx, void *hashval, int nHashSize,
const void *data, int datalen )
{
int i, j;
int databitlen = datalen * 8;
ctx->k = m256_zero;
ctx->processed_bits = 0;
ctx->uBufferBytes = 0;
switch( nHashSize )
{
case 256:
ctx->uHashSize = 256;
ctx->uBlockLength = 192;
ctx->uRounds = 8;
ctx->hashsize = m256_const2_64( 0, 0x100 );
ctx->const1536 = m256_const2_64( 0, 0x600 );
break;
case 512:
ctx->uHashSize = 512;
ctx->uBlockLength = 128;
ctx->uRounds = 10;
ctx->hashsize = m256_const2_64( 0, 0x200 );
ctx->const1536 = m256_const2_64( 0, 0x400 );
break;
default:
return 1;
}
for( i = 0; i < 4; i++ )
for( j = 0; j < nHashSize / 256; j++ )
ctx->state[ i ][ j ] = ctx->hashsize;
for( i = 0; i < 4; i++ )
for( j = nHashSize / 256; j < 4; j++ )
ctx->state[ i ][ j ] = m256_zero;
int vlen = datalen / 32;
const int vblen = ctx->uBlockLength / 16; // 16 bytes per lane
__m256i remainingbits;
if ( databitlen == 1024 )
{
echo_2way_compress( ctx, data, 1 );
ctx->processed_bits = 1024;
remainingbits = m256_const2_64( 0, -1024 );
vlen = 0;
}
else
{
vlen = databitlen / 128; // * 4 lanes / 128 bits per lane
memcpy_256( ctx->buffer, data, vlen );
ctx->processed_bits += (unsigned int)( databitlen );
remainingbits = m256_const2_64( 0, databitlen );
}
ctx->buffer[ vlen ] = m256_const2_64( 0, 0x80 );
memset_zero_256( ctx->buffer + vlen + 1, vblen - vlen - 2 );
ctx->buffer[ vblen-2 ] = m256_const2_64( (uint64_t)ctx->uHashSize << 48, 0 );
ctx->buffer[ vblen-1 ] = m256_const2_64( 0, ctx->processed_bits );
ctx->k = _mm256_add_epi64( ctx->k, remainingbits );
ctx->k = _mm256_sub_epi64( ctx->k, ctx->const1536 );
echo_2way_compress( ctx, ctx->buffer, 1 );
_mm256_store_si256( (__m256i*)hashval + 0, ctx->state[ 0 ][ 0] );
_mm256_store_si256( (__m256i*)hashval + 1, ctx->state[ 1 ][ 0] );
if ( ctx->uHashSize == 512 )
{
_mm256_store_si256( (__m256i*)hashval + 2, ctx->state[ 2 ][ 0 ] );
_mm256_store_si256( (__m256i*)hashval + 3, ctx->state[ 3 ][ 0 ] );
}
return 0;
}
#endif // VAES

View File

@@ -1,10 +1,12 @@
#if !defined(ECHO_HASH_4WAY_H__)
#define ECHO_HASH_4WAY_H__ 1
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__VAES__)
#include "simd-utils.h"
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct
{
__m512i state[4][4];
@@ -20,6 +22,7 @@ typedef struct
unsigned int processed_bits;
} echo_4way_context __attribute__ ((aligned (64)));
#define echo512_4way_context echo_4way_context
int echo_4way_init( echo_4way_context *state, int hashbitlen );
#define echo512_4way_init( state ) echo_4way_init( state, 512 )
@@ -29,8 +32,8 @@ int echo_4way_update( echo_4way_context *state, const void *data,
unsigned int databitlen);
#define echo512_4way_update echo_4way_update
int echo_close( echo_4way_context *state, void *hashval );
#define echo512_4way_close echo_4way_close
// int echo_4way_close( echo_4way_context *state, void *hashval );
// #define echo512_4way_close echo_4way_close
int echo_4way_update_close( echo_4way_context *state, void *hashval,
const void *data, int databitlen );
@@ -43,5 +46,45 @@ int echo_4way_full( echo_4way_context *ctx, void *hashval, int nHashSize,
#define echo256_4way_full( state, hashval, data, datalen ) \
echo_4way_full( state, hashval, 256, data, datalen )
#endif
#endif
#endif // AVX512
typedef struct
{
__m256i state[4][4];
__m256i buffer[ 4 * 192 / 16 ]; // 4x128 interleaved 192 bytes
__m256i k;
__m256i hashsize;
__m256i const1536;
unsigned int uRounds;
unsigned int uHashSize;
unsigned int uBlockLength;
unsigned int uBufferBytes;
unsigned int processed_bits;
} echo_2way_context __attribute__ ((aligned (64)));
#define echo512_2way_context echo_2way_context
int echo_2way_init( echo_2way_context *state, int hashbitlen );
#define echo512_2way_init( state ) echo_2way_init( state, 512 )
#define echo256_2way_init( state ) echo_2way_init( state, 256 )
int echo_2way_update( echo_2way_context *state, const void *data,
unsigned int databitlen);
#define echo512_2way_update echo_2way_update
int echo_2way_update_close( echo_2way_context *state, void *hashval,
const void *data, int databitlen );
#define echo512_2way_update_close echo_2way_update_close
int echo_2way_full( echo_2way_context *ctx, void *hashval, int nHashSize,
const void *data, int datalen );
#define echo512_2way_full( state, hashval, data, datalen ) \
echo_2way_full( state, hashval, 512, data, datalen )
#define echo256_2way_full( state, hashval, data, datalen ) \
echo_2way_full( state, hashval, 256, data, datalen )
#endif // VAES
#endif // ECHO_HASH_4WAY_H__

565
algo/fugue/fugue-aesni.c Normal file
View File

@@ -0,0 +1,565 @@
/*
* file : fugue_vperm.c
* version : 1.0.208
* date : 14.12.2010
*
* - vperm and aes_ni implementations of hash function Fugue
* - implements NIST hash api
* - assumes that message lenght is multiple of 8-bits
* - _FUGUE_VPERM_ must be defined if compiling with ../main.c
* - default version is vperm, define AES_NI for aes_ni version
*
* Cagdas Calik
* ccalik@metu.edu.tr
* Institute of Applied Mathematics, Middle East Technical University, Turkey.
*
*/
#if defined(__AES__)
#include <x86intrin.h>
#include <memory.h>
#include "fugue-aesni.h"
MYALIGN const unsigned long long _supermix1a[] = {0x0202010807020100, 0x0a05000f06010c0b};
MYALIGN const unsigned long long _supermix1b[] = {0x0b0d080703060504, 0x0e0a090c050e0f0a};
MYALIGN const unsigned long long _supermix1c[] = {0x0402060c070d0003, 0x090a060580808080};
MYALIGN const unsigned long long _supermix1d[] = {0x808080800f0e0d0c, 0x0f0e0d0c80808080};
MYALIGN const unsigned long long _supermix2a[] = {0x07020d0880808080, 0x0b06010c050e0f0a};
MYALIGN const unsigned long long _supermix4a[] = {0x000f0a050c0b0601, 0x0302020404030e09};
MYALIGN const unsigned long long _supermix4b[] = {0x07020d08080e0d0d, 0x07070908050e0f0a};
MYALIGN const unsigned long long _supermix4c[] = {0x0706050403020000, 0x0302000007060504};
MYALIGN const unsigned long long _supermix7a[] = {0x010c0b060d080702, 0x0904030e03000104};
MYALIGN const unsigned long long _supermix7b[] = {0x8080808080808080, 0x0504070605040f06};
MYALIGN const unsigned long long _k_n[] = {0x4E4E4E4E4E4E4E4E, 0x1B1B1B1B0E0E0E0E};
MYALIGN const unsigned char _shift_one_mask[] = {7, 4, 5, 6, 11, 8, 9, 10, 15, 12, 13, 14, 3, 0, 1, 2};
MYALIGN const unsigned char _shift_four_mask[] = {13, 14, 15, 12, 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8};
MYALIGN const unsigned char _shift_seven_mask[] = {10, 11, 8, 9, 14, 15, 12, 13, 2, 3, 0, 1, 6, 7, 4, 5};
MYALIGN const unsigned char _aes_shift_rows[] = {0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11};
MYALIGN const unsigned int _inv_shift_rows[] = {0x070a0d00, 0x0b0e0104, 0x0f020508, 0x0306090c};
MYALIGN const unsigned int _mul2mask[] = {0x1b1b0000, 0x00000000, 0x00000000, 0x00000000};
MYALIGN const unsigned int _mul4mask[] = {0x2d361b00, 0x00000000, 0x00000000, 0x00000000};
MYALIGN const unsigned int _lsbmask2[] = {0x03030303, 0x03030303, 0x03030303, 0x03030303};
MYALIGN const unsigned int _IV512[] = {
0x00000000, 0x00000000, 0x7ea50788, 0x00000000,
0x75af16e6, 0xdbe4d3c5, 0x27b09aac, 0x00000000,
0x17f115d9, 0x54cceeb6, 0x0b02e806, 0x00000000,
0xd1ef924a, 0xc9e2c6aa, 0x9813b2dd, 0x00000000,
0x3858e6ca, 0x3f207f43, 0xe778ea25, 0x00000000,
0xd6dd1f95, 0x1dd16eda, 0x67353ee1, 0x00000000};
#if defined(__SSE4_1__)
#define PACK_S0(s0, s1, t1)\
s0 = _mm_castps_si128(_mm_insert_ps(_mm_castsi128_ps(s0), _mm_castsi128_ps(s1), 0x30))
#define UNPACK_S0(s0, s1, t1)\
s1 = _mm_castps_si128(_mm_insert_ps(_mm_castsi128_ps(s1), _mm_castsi128_ps(s0), 0xc0));\
s0 = mm128_mask_32( s0, 8 )
#define CMIX(s1, s2, r1, r2, t1, t2)\
t1 = s1;\
t1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(t1), _mm_castsi128_ps(s2), _MM_SHUFFLE(3, 0, 2, 1)));\
r1 = _mm_xor_si128(r1, t1);\
r2 = _mm_xor_si128(r2, t1);
#else // SSE2
#define PACK_S0(s0, s1, t1)\
t1 = _mm_shuffle_epi32(s1, _MM_SHUFFLE(0, 3, 3, 3));\
s0 = _mm_xor_si128(s0, t1);
#define UNPACK_S0(s0, s1, t1)\
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 3, 3));\
s1 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s1), _mm_castsi128_ps(t1)));\
s0 = mm128_mask_32( s0, 8 )
#define CMIX(s1, s2, r1, r2, t1, t2)\
t1 = _mm_shuffle_epi32(s1, 0xf9);\
t2 = _mm_shuffle_epi32(s2, 0xcf);\
t1 = _mm_xor_si128(t1, t2);\
r1 = _mm_xor_si128(r1, t1);\
r2 = _mm_xor_si128(r2, t1)
#endif
#define TIX256(msg, s10, s8, s24, s0, t1, t2, t3)\
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 0, 3));\
s10 = _mm_xor_si128(s10, t1);\
t1 = _mm_castps_si128(_mm_load_ss((float*)msg));\
s0 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s0), _mm_castsi128_ps(t1)));\
t1 = _mm_slli_si128(t1, 8);\
s8 = _mm_xor_si128(s8, t1);\
t1 = _mm_shuffle_epi32(s24, _MM_SHUFFLE(3, 3, 0, 3));\
s0 = _mm_xor_si128(s0, t1)
#define TIX384(msg, s16, s8, s27, s30, s0, s4, t1, t2, t3)\
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 0, 3));\
s16 = _mm_xor_si128(s16, t1);\
t1 = _mm_castps_si128(_mm_load_ss((float*)msg));\
s0 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s0), _mm_castsi128_ps(t1)));\
t1 = _mm_slli_si128(t1, 8);\
s8 = _mm_xor_si128(s8, t1);\
t1 = _mm_shuffle_epi32(s27, _MM_SHUFFLE(3, 3, 0, 3));\
s0 = _mm_xor_si128(s0, t1);\
t1 = _mm_shuffle_epi32(s30, _MM_SHUFFLE(3, 3, 0, 3));\
s4 = _mm_xor_si128(s4, t1)
#define TIX512(msg, s22, s8, s24, s27, s30, s0, s4, s7, t1, t2, t3)\
t1 = _mm_shuffle_epi32(s0, _MM_SHUFFLE(3, 3, 0, 3));\
s22 = _mm_xor_si128(s22, t1);\
t1 = _mm_castps_si128(_mm_load_ss((float*)msg));\
s0 = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(s0), _mm_castsi128_ps(t1)));\
t1 = _mm_slli_si128(t1, 8);\
s8 = _mm_xor_si128(s8, t1);\
t1 = _mm_shuffle_epi32(s24, _MM_SHUFFLE(3, 3, 0, 3));\
s0 = _mm_xor_si128(s0, t1);\
t1 = _mm_shuffle_epi32(s27, _MM_SHUFFLE(3, 3, 0, 3));\
s4 = _mm_xor_si128(s4, t1);\
t1 = _mm_shuffle_epi32(s30, _MM_SHUFFLE(3, 3, 0, 3));\
s7 = _mm_xor_si128(s7, t1)
#define PRESUPERMIX(x, t1, s1, s2, t2)\
s1 = x;\
s2 = _mm_add_epi8(x, x);\
t2 = _mm_add_epi8(s2, s2);\
t1 = _mm_srli_epi16(x, 6);\
t1 = _mm_and_si128(t1, M128(_lsbmask2));\
s2 = _mm_xor_si128(s2, _mm_shuffle_epi8(M128(_mul2mask), t1));\
x = _mm_xor_si128(t2, _mm_shuffle_epi8(M128(_mul4mask), t1))
#define SUBSTITUTE(r0, _t1, _t2, _t3, _t0)\
_t2 = _mm_shuffle_epi8(r0, M128(_inv_shift_rows));\
_t2 = _mm_aesenclast_si128( _t2, m128_zero )
#define SUPERMIX(t0, t1, t2, t3, t4)\
PRESUPERMIX(t0, t1, t2, t3, t4);\
POSTSUPERMIX(t0, t1, t2, t3, t4)
#define POSTSUPERMIX(t0, t1, t2, t3, t4)\
t1 = t2;\
t1 = _mm_shuffle_epi8(t1, M128(_supermix1b));\
t4 = t1;\
t1 = _mm_shuffle_epi8(t1, M128(_supermix1c));\
t4 = _mm_xor_si128(t4, t1);\
t1 = t4;\
t1 = _mm_shuffle_epi8(t1, M128(_supermix1d));\
t4 = _mm_xor_si128(t4, t1);\
t1 = t2;\
t1 = _mm_shuffle_epi8(t1, M128(_supermix1a));\
t4 = _mm_xor_si128(t4, t1);\
t2 = _mm_xor_si128(t2, t3);\
t2 = _mm_xor_si128(t2, t0);\
t2 = _mm_shuffle_epi8(t2, M128(_supermix7a));\
t4 = _mm_xor_si128(t4, t2);\
t2 = _mm_shuffle_epi8(t2, M128(_supermix7b));\
t4 = _mm_xor_si128(t4, t2);\
t3 = _mm_shuffle_epi8(t3, M128(_supermix2a));\
t1 = t0;\
t1 = _mm_shuffle_epi8(t1, M128(_supermix4a));\
t4 = _mm_xor_si128(t4, t1);\
t0 = _mm_shuffle_epi8(t0, M128(_supermix4b));\
t0 = _mm_xor_si128(t0, t3);\
t4 = _mm_xor_si128(t4, t0);\
t0 = _mm_shuffle_epi8(t0, M128(_supermix4c));\
t4 = _mm_xor_si128(t4, t0)
#define SUBROUND512_3(r1a, r1b, r1c, r1d, r2a, r2b, r2c, r2d, r3a, r3b, r3c, r3d)\
CMIX(r1a, r1b, r1c, r1d, _t0, _t1);\
PACK_S0(r1c, r1a, _t0);\
SUBSTITUTE(r1c, _t1, _t2, _t3, _t0);\
SUPERMIX(_t2, _t3, _t0, _t1, r1c);\
_t0 = _mm_shuffle_epi32(r1c, 0x39);\
r2c = _mm_xor_si128(r2c, _t0);\
_t0 = mm128_mask_32( _t0, 8 ); \
r2d = _mm_xor_si128(r2d, _t0);\
UNPACK_S0(r1c, r1a, _t3);\
SUBSTITUTE(r2c, _t1, _t2, _t3, _t0);\
SUPERMIX(_t2, _t3, _t0, _t1, r2c);\
_t0 = _mm_shuffle_epi32(r2c, 0x39);\
r3c = _mm_xor_si128(r3c, _t0);\
_t0 = mm128_mask_32( _t0, 8 ); \
r3d = _mm_xor_si128(r3d, _t0);\
UNPACK_S0(r2c, r2a, _t3);\
SUBSTITUTE(r3c, _t1, _t2, _t3, _t0);\
SUPERMIX(_t2, _t3, _t0, _t1, r3c);\
UNPACK_S0(r3c, r3a, _t3)
#define SUBROUND512_4(r1a, r1b, r1c, r1d, r2a, r2b, r2c, r2d, r3a, r3b, r3c, r3d, r4a, r4b, r4c, r4d)\
CMIX(r1a, r1b, r1c, r1d, _t0, _t1);\
PACK_S0(r1c, r1a, _t0);\
SUBSTITUTE(r1c, _t1, _t2, _t3, _t0);\
SUPERMIX(_t2, _t3, _t0, _t1, r1c);\
_t0 = _mm_shuffle_epi32(r1c, 0x39);\
r2c = _mm_xor_si128(r2c, _t0);\
_t0 = mm128_mask_32( _t0, 8 ); \
r2d = _mm_xor_si128(r2d, _t0);\
UNPACK_S0(r1c, r1a, _t3);\
SUBSTITUTE(r2c, _t1, _t2, _t3, _t0);\
SUPERMIX(_t2, _t3, _t0, _t1, r2c);\
_t0 = _mm_shuffle_epi32(r2c, 0x39);\
r3c = _mm_xor_si128(r3c, _t0);\
_t0 = mm128_mask_32( _t0, 8 ); \
r3d = _mm_xor_si128(r3d, _t0);\
UNPACK_S0(r2c, r2a, _t3);\
SUBSTITUTE(r3c, _t1, _t2, _t3, _t0);\
SUPERMIX(_t2, _t3, _t0, _t1, r3c);\
_t0 = _mm_shuffle_epi32(r3c, 0x39);\
r4c = _mm_xor_si128(r4c, _t0);\
_t0 = mm128_mask_32( _t0, 8 ); \
r4d = _mm_xor_si128(r4d, _t0);\
UNPACK_S0(r3c, r3a, _t3);\
SUBSTITUTE(r4c, _t1, _t2, _t3, _t0);\
SUPERMIX(_t2, _t3, _t0, _t1, r4c);\
UNPACK_S0(r4c, r4a, _t3)
#define LOADCOLUMN(x, s, a)\
block[0] = col[(base + a + 0) % s];\
block[1] = col[(base + a + 1) % s];\
block[2] = col[(base + a + 2) % s];\
block[3] = col[(base + a + 3) % s];\
x = _mm_load_si128((__m128i*)block)
#define STORECOLUMN(x, s)\
_mm_store_si128((__m128i*)block, x);\
col[(base + 0) % s] = block[0];\
col[(base + 1) % s] = block[1];\
col[(base + 2) % s] = block[2];\
col[(base + 3) % s] = block[3]
void Compress512(hashState_fugue *ctx, const unsigned char *pmsg, unsigned int uBlockCount)
{
__m128i _t0, _t1, _t2, _t3;
switch(ctx->base)
{
case 1:
TIX512( pmsg, ctx->state[3], ctx->state[10], ctx->state[4],
ctx->state[5], ctx->state[ 6], ctx->state[8],
ctx->state[9], ctx->state[10], _t0, _t1, _t2 );
SUBROUND512_4( ctx->state[8], ctx->state[9], ctx->state[7],
ctx->state[1], ctx->state[7], ctx->state[8],
ctx->state[6], ctx->state[0], ctx->state[6],
ctx->state[7], ctx->state[5], ctx->state[11],
ctx->state[5], ctx->state[6], ctx->state[4],
ctx->state[10] );
ctx->base++;
pmsg += 4;
uBlockCount--;
if( uBlockCount == 0 ) break;
case 2:
TIX512( pmsg, ctx->state[11], ctx->state[6], ctx->state[0],
ctx->state[ 1], ctx->state[2], ctx->state[4],
ctx->state[ 5], ctx->state[6], _t0, _t1, _t2);
SUBROUND512_4( ctx->state[4], ctx->state[5], ctx->state[3],
ctx->state[9], ctx->state[3], ctx->state[4],
ctx->state[2], ctx->state[8], ctx->state[2],
ctx->state[3], ctx->state[1], ctx->state[7],
ctx->state[1], ctx->state[2], ctx->state[0],
ctx->state[6]);
ctx->base = 0;
pmsg += 4;
uBlockCount--;
break;
}
while( uBlockCount > 0 )
{
TIX512( pmsg, ctx->state[ 7], ctx->state[2], ctx->state[8], ctx->state[9],
ctx->state[10], ctx->state[0], ctx->state[1], ctx->state[2],
_t0, _t1, _t2 );
SUBROUND512_4( ctx->state[0], ctx->state[1], ctx->state[11],
ctx->state[5], ctx->state[11], ctx->state[0],
ctx->state[10], ctx->state[4], ctx->state[10],
ctx->state[11], ctx->state[9], ctx->state[3],
ctx->state[9], ctx->state[10], ctx->state[8],
ctx->state[2] );
ctx->base++;
pmsg += 4;
uBlockCount--;
if( uBlockCount == 0 ) break;
TIX512( pmsg, ctx->state[3], ctx->state[10], ctx->state[4], ctx->state[5],
ctx->state[6], ctx->state[8], ctx->state[9], ctx->state[10],
_t0, _t1, _t2 );
SUBROUND512_4( ctx->state[8], ctx->state[9], ctx->state[7], ctx->state[1], ctx->state[7], ctx->state[8], ctx->state[6], ctx->state[0],
ctx->state[6], ctx->state[7], ctx->state[5], ctx->state[11],
ctx->state[5], ctx->state[6, ctx->state[4], ctx->state[10]);
ctx->base++;
pmsg += 4;
uBlockCount--;
if( uBlockCount == 0 ) break;
TIX512( pmsg, ctx->state[11], ctx->state[6], ctx->state[0], ctx->state[1],
ctx->state[2], ctx->state[4], ctx->state[5], ctx->state[6],
_t0, _t1, _t2);
SUBROUND512_4( ctx->state[4], ctx->state[5], ctx->state[3], ctx->state[9],
ctx->state[3], ctx->state[4], ctx->state[2], ctx->state[8],
ctx->state[2], ctx->state[3], ctx->state[1], ctx->state[7],
ctx->state[1], ctx->state[2], ctx->state[0], ctx->state[6]);
ctx->base = 0;
pmsg += 4;
uBlockCount--;
}
}
void Final512(hashState_fugue *ctx, BitSequence *hashval)
{
unsigned int block[4] __attribute__ ((aligned (32)));
unsigned int col[36] __attribute__ ((aligned (16)));
unsigned int i, base;
__m128i r0, _t0, _t1, _t2, _t3;
for(i = 0; i < 12; i++)
{
_mm_store_si128((__m128i*)block, ctx->state[i]);
col[3 * i + 0] = block[0];
col[3 * i + 1] = block[1];
col[3 * i + 2] = block[2];
}
base = (36 - (12 * ctx->base)) % 36;
for(i = 0; i < 32; i++)
{
// ROR3
base = (base + 33) % 36;
// CMIX
col[(base + 0) % 36] ^= col[(base + 4) % 36];
col[(base + 1) % 36] ^= col[(base + 5) % 36];
col[(base + 2) % 36] ^= col[(base + 6) % 36];
col[(base + 18) % 36] ^= col[(base + 4) % 36];
col[(base + 19) % 36] ^= col[(base + 5) % 36];
col[(base + 20) % 36] ^= col[(base + 6) % 36];
// SMIX
LOADCOLUMN(r0, 36, 0);
SUBSTITUTE(r0, _t1, _t2, _t3, _t0);
SUPERMIX(_t2, _t3, _t0, _t1, r0);
STORECOLUMN(r0, 36);
}
for(i = 0; i < 13; i++)
{
// S4 += S0; S9 += S0; S18 += S0; S27 += S0;
col[(base + 4) % 36] ^= col[(base + 0) % 36];
col[(base + 9) % 36] ^= col[(base + 0) % 36];
col[(base + 18) % 36] ^= col[(base + 0) % 36];
col[(base + 27) % 36] ^= col[(base + 0) % 36];
// ROR9
base = (base + 27) % 36;
// SMIX
LOADCOLUMN(r0, 36, 0);
SUBSTITUTE(r0, _t1, _t2, _t3, _t0);
SUPERMIX(_t2, _t3, _t0, _t1, r0);
STORECOLUMN(r0, 36);
// S4 += S0; S10 += S0; S18 += S0; S27 += S0;
col[(base + 4) % 36] ^= col[(base + 0) % 36];
col[(base + 10) % 36] ^= col[(base + 0) % 36];
col[(base + 18) % 36] ^= col[(base + 0) % 36];
col[(base + 27) % 36] ^= col[(base + 0) % 36];
// ROR9
base = (base + 27) % 36;
// SMIX
LOADCOLUMN(r0, 36, 0);
SUBSTITUTE(r0, _t1, _t2, _t3, _t0);
SUPERMIX(_t2, _t3, _t0, _t1, r0);
STORECOLUMN(r0, 36);
// S4 += S0; S10 += S0; S19 += S0; S27 += S0;
col[(base + 4) % 36] ^= col[(base + 0) % 36];
col[(base + 10) % 36] ^= col[(base + 0) % 36];
col[(base + 19) % 36] ^= col[(base + 0) % 36];
col[(base + 27) % 36] ^= col[(base + 0) % 36];
// ROR9
base = (base + 27) % 36;
// SMIX
LOADCOLUMN(r0, 36, 0);
SUBSTITUTE(r0, _t1, _t2, _t3, _t0);
SUPERMIX(_t2, _t3, _t0, _t1, r0);
STORECOLUMN(r0, 36);
// S4 += S0; S10 += S0; S19 += S0; S28 += S0;
col[(base + 4) % 36] ^= col[(base + 0) % 36];
col[(base + 10) % 36] ^= col[(base + 0) % 36];
col[(base + 19) % 36] ^= col[(base + 0) % 36];
col[(base + 28) % 36] ^= col[(base + 0) % 36];
// ROR8
base = (base + 28) % 36;
// SMIX
LOADCOLUMN(r0, 36, 0);
SUBSTITUTE(r0, _t1, _t2, _t3, _t0);
SUPERMIX(_t2, _t3, _t0, _t1, r0);
STORECOLUMN(r0, 36);
}
// S4 += S0; S9 += S0; S18 += S0; S27 += S0;
col[(base + 4) % 36] ^= col[(base + 0) % 36];
col[(base + 9) % 36] ^= col[(base + 0) % 36];
col[(base + 18) % 36] ^= col[(base + 0) % 36];
col[(base + 27) % 36] ^= col[(base + 0) % 36];
// Transform to the standard basis and store output; S1 || S2 || S3 || S4
LOADCOLUMN(r0, 36, 1);
_mm_store_si128((__m128i*)hashval, r0);
// Transform to the standard basis and store output; S9 || S10 || S11 || S12
LOADCOLUMN(r0, 36, 9);
_mm_store_si128((__m128i*)hashval + 1, r0);
// Transform to the standard basis and store output; S18 || S19 || S20 || S21
LOADCOLUMN(r0, 36, 18);
_mm_store_si128((__m128i*)hashval + 2, r0);
// Transform to the standard basis and store output; S27 || S28 || S29 || S30
LOADCOLUMN(r0, 36, 27);
_mm_store_si128((__m128i*)hashval + 3, r0);
}
HashReturn fugue512_Init(hashState_fugue *ctx, int nHashSize)
{
int i;
ctx->processed_bits = 0;
ctx->uBufferBytes = 0;
ctx->base = 0;
ctx->uHashSize = 512;
ctx->uBlockLength = 4;
for(i = 0; i < 6; i++)
ctx->state[i] = m128_zero;
ctx->state[6] = _mm_load_si128((__m128i*)_IV512 + 0);
ctx->state[7] = _mm_load_si128((__m128i*)_IV512 + 1);
ctx->state[8] = _mm_load_si128((__m128i*)_IV512 + 2);
ctx->state[9] = _mm_load_si128((__m128i*)_IV512 + 3);
ctx->state[10] = _mm_load_si128((__m128i*)_IV512 + 4);
ctx->state[11] = _mm_load_si128((__m128i*)_IV512 + 5);
return SUCCESS;
}
HashReturn fugue512_Update(hashState_fugue *state, const void *data, DataLength databitlen)
{
unsigned int uByteLength, uBlockCount, uRemainingBytes;
uByteLength = (unsigned int)(databitlen / 8);
if(state->uBufferBytes + uByteLength >= state->uBlockLength)
{
if(state->uBufferBytes != 0)
{
// Fill the buffer
memcpy(state->buffer + state->uBufferBytes, (void*)data, state->uBlockLength - state->uBufferBytes);
// Process the buffer
Compress512(state, state->buffer, 1);
state->processed_bits += state->uBlockLength * 8;
data += state->uBlockLength - state->uBufferBytes;
uByteLength -= state->uBlockLength - state->uBufferBytes;
}
// buffer now does not contain any unprocessed bytes
uBlockCount = uByteLength / state->uBlockLength;
uRemainingBytes = uByteLength % state->uBlockLength;
if(uBlockCount > 0)
{
Compress512(state, data, uBlockCount);
state->processed_bits += uBlockCount * state->uBlockLength * 8;
data += uBlockCount * state->uBlockLength;
}
if(uRemainingBytes > 0)
{
memcpy(state->buffer, (void*)data, uRemainingBytes);
}
state->uBufferBytes = uRemainingBytes;
}
else
{
memcpy(state->buffer + state->uBufferBytes, (void*)data, uByteLength);
state->uBufferBytes += uByteLength;
}
return SUCCESS;
}
HashReturn fugue512_Final(hashState_fugue *state, void *hashval)
{
unsigned int i;
BitSequence lengthbuf[8] __attribute__((aligned(64)));
// Update message bit count
state->processed_bits += state->uBufferBytes * 8;
// Pad the remaining buffer bytes with zero
if(state->uBufferBytes != 0)
{
if ( state->uBufferBytes != state->uBlockLength)
memset(state->buffer + state->uBufferBytes, 0, state->uBlockLength - state->uBufferBytes);
Compress512(state, state->buffer, 1);
}
// Last two blocks are message length in bits
for(i = 0; i < 8; i++)
lengthbuf[i] = ((state->processed_bits) >> (8 * (7 - i))) & 0xff;
// Process the last two blocks
Compress512(state, lengthbuf, 2);
// Finalization
Final512(state, hashval);
return SUCCESS;
}
HashReturn fugue512_full(hashState_fugue *hs, void *hashval, const void *data, DataLength databitlen)
{
fugue512_Init(hs, 512);
fugue512_Update(hs, data, databitlen*8);
fugue512_Final(hs, hashval);
return SUCCESS;
}
#endif // AES

50
algo/fugue/fugue-aesni.h Normal file
View File

@@ -0,0 +1,50 @@
/*
* file : hash_api.h
* version : 1.0.208
* date : 14.12.2010
*
* Fugue vperm implementation Hash API
*
* Cagdas Calik
* ccalik@metu.edu.tr
* Institute of Applied Mathematics, Middle East Technical University, Turkey.
*
*/
#ifndef FUGUE_HASH_API_H
#define FUGUE_HASH_API_H
#if defined(__AES__)
#if !defined(__SSE4_1__)
#error "Unsupported configuration, AES needs SSE4.1. Compile without AES."
#endif
#include "algo/sha/sha3_common.h"
#include "simd-utils.h"
typedef struct
{
__m128i state[12];
unsigned int base;
unsigned int uHashSize;
unsigned int uBlockLength;
unsigned int uBufferBytes;
DataLength processed_bits;
BitSequence buffer[4];
} hashState_fugue __attribute__ ((aligned (64)));
HashReturn fugue512_Init(hashState_fugue *state, int hashbitlen);
HashReturn fugue512_Update(hashState_fugue *state, const void *data, DataLength databitlen);
HashReturn fugue512_Final(hashState_fugue *state, void *hashval);
HashReturn fugue512_full(hashState_fugue *hs, void *hashval, const void *data, DataLength databitlen);
#endif // AES
#endif // HASH_API_H

View File

@@ -15,7 +15,9 @@
#include "miner.h"
#include "simd-utils.h"
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__AVX2__) && defined(__VAES__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
int groestl256_4way_init( groestl256_4way_context* ctx, uint64_t hashlen )
@@ -43,13 +45,13 @@ int groestl256_4way_init( groestl256_4way_context* ctx, uint64_t hashlen )
}
int groestl256_4way_full( groestl256_4way_context* ctx, void* output,
const void* input, uint64_t databitlen )
const void* input, uint64_t datalen )
{
const int len = (int)databitlen / 128;
const int hashlen_m128i = 32 / 16; // bytes to __m128i
const int len = (int)datalen >> 4;
const int hashlen_m128i = 32 >> 4; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr;
int blocks = len / SIZE256;
uint64_t blocks = len / SIZE256;
__m512i* in = (__m512i*)input;
int i;
@@ -87,21 +89,21 @@ int groestl256_4way_full( groestl256_4way_context* ctx, void* output,
if ( i == SIZE256 - 1 )
{
// only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0x80 );
ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
}
else
{
// add first padding
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
// add zero padding
for ( i += 1; i < SIZE256 - 1; i++ )
ctx->buffer[i] = m512_zero;
// add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m512_const2_64( (uint64_t)blocks << 56, 0 );
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
}
// digest final padding block and do output transform
// digest final padding block and do output transform
TF512_4way( ctx->chaining, ctx->buffer );
OF512_4way( ctx->chaining );
@@ -120,7 +122,7 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr;
int blocks = len / SIZE256;
uint64_t blocks = len / SIZE256;
__m512i* in = (__m512i*)input;
int i;
@@ -144,20 +146,18 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
if ( i == SIZE256 - 1 )
{
// only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
}
else
{
// add first padding
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
// add zero padding
for ( i += 1; i < SIZE256 - 1; i++ )
ctx->buffer[i] = m512_zero;
// add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
}
// digest final padding block and do output transform
@@ -172,5 +172,159 @@ int groestl256_4way_update_close( groestl256_4way_context* ctx, void* output,
return 0;
}
#endif // VAES
#endif // AVX512
// AVX2 + VAES
int groestl256_2way_init( groestl256_2way_context* ctx, uint64_t hashlen )
{
int i;
ctx->hashlen = hashlen;
if (ctx->chaining == NULL || ctx->buffer == NULL)
return 1;
for ( i = 0; i < SIZE256; i++ )
{
ctx->chaining[i] = m256_zero;
ctx->buffer[i] = m256_zero;
}
// The only non-zero in the IV is len. It can be hard coded.
ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 );
ctx->buf_ptr = 0;
ctx->rem_ptr = 0;
return 0;
}
int groestl256_2way_full( groestl256_2way_context* ctx, void* output,
const void* input, uint64_t datalen )
{
const int len = (int)datalen >> 4;
const int hashlen_m128i = 32 >> 4; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr;
uint64_t blocks = len / SIZE256;
__m256i* in = (__m256i*)input;
int i;
if (ctx->chaining == NULL || ctx->buffer == NULL)
return 1;
for ( i = 0; i < SIZE256; i++ )
{
ctx->chaining[i] = m256_zero;
ctx->buffer[i] = m256_zero;
}
// The only non-zero in the IV is len. It can be hard coded.
ctx->chaining[ 3 ] = m256_const2_64( 0, 0x0100000000000000 );
ctx->buf_ptr = 0;
ctx->rem_ptr = 0;
// --- update ---
// digest any full blocks, process directly from input
for ( i = 0; i < blocks; i++ )
TF512_2way( ctx->chaining, &in[ i * SIZE256 ] );
ctx->buf_ptr = blocks * SIZE256;
// copy any remaining data to buffer, it may already contain data
// from a previous update for a midstate precalc
for ( i = 0; i < len % SIZE256; i++ )
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
i += rem; // use i as rem_ptr in final
//--- final ---
blocks++; // adjust for final block
if ( i == SIZE256 - 1 )
{
// only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
}
else
{
// add first padding
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
// add zero padding
for ( i += 1; i < SIZE256 - 1; i++ )
ctx->buffer[i] = m256_zero;
// add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
}
// digest final padding block and do output transform
TF512_2way( ctx->chaining, ctx->buffer );
OF512_2way( ctx->chaining );
// store hash result in output
for ( i = 0; i < hashlen_m128i; i++ )
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
return 0;
}
int groestl256_2way_update_close( groestl256_2way_context* ctx, void* output,
const void* input, uint64_t databitlen )
{
const int len = (int)databitlen / 128;
const int hashlen_m128i = ctx->hashlen / 16; // bytes to __m128i
const int hash_offset = SIZE256 - hashlen_m128i;
int rem = ctx->rem_ptr;
uint64_t blocks = len / SIZE256;
__m256i* in = (__m256i*)input;
int i;
// --- update ---
// digest any full blocks, process directly from input
for ( i = 0; i < blocks; i++ )
TF512_2way( ctx->chaining, &in[ i * SIZE256 ] );
ctx->buf_ptr = blocks * SIZE256;
// copy any remaining data to buffer, it may already contain data
// from a previous update for a midstate precalc
for ( i = 0; i < len % SIZE256; i++ )
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
i += rem; // use i as rem_ptr in final
//--- final ---
blocks++; // adjust for final block
if ( i == SIZE256 - 1 )
{
// only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
}
else
{
// add first padding
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
// add zero padding
for ( i += 1; i < SIZE256 - 1; i++ )
ctx->buffer[i] = m256_zero;
// add length padding, second last byte is zero unless blocks > 255
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
}
// digest final padding block and do output transform
TF512_2way( ctx->chaining, ctx->buffer );
OF512_2way( ctx->chaining );
// store hash result in output
for ( i = 0; i < hashlen_m128i; i++ )
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
return 0;
}
#endif // VAES

View File

@@ -18,8 +18,8 @@
#endif
#include <stdlib.h>
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__AVX2__) && defined(__VAES__)
#define LENGTH (256)
//#include "brg_endian.h"
@@ -48,6 +48,8 @@
#define SIZE256 (SIZE_512/16)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
__attribute__ ((aligned (128))) __m512i chaining[SIZE256];
__attribute__ ((aligned (64))) __m512i buffer[SIZE256];
@@ -55,7 +57,7 @@ typedef struct {
int blk_count; // SIZE_m128i
int buf_ptr; // __m128i offset
int rem_ptr;
int databitlen; // bits
// int databitlen; // bits
} groestl256_4way_context;
@@ -74,5 +76,25 @@ int groestl256_4way_update_close( groestl256_4way_context*, void*,
int groestl256_4way_full( groestl256_4way_context*, void*,
const void*, uint64_t );
#endif
#endif
#endif // AVX512
typedef struct {
__attribute__ ((aligned (128))) __m256i chaining[SIZE256];
__attribute__ ((aligned (64))) __m256i buffer[SIZE256];
int hashlen; // byte
int blk_count; // SIZE_m128i
int buf_ptr; // __m128i offset
int rem_ptr;
// int databitlen; // bits
} groestl256_2way_context;
int groestl256_2way_init( groestl256_2way_context*, uint64_t );
int groestl256_2way_update_close( groestl256_2way_context*, void*,
const void*, uint64_t );
int groestl256_2way_full( groestl256_2way_context*, void*,
const void*, uint64_t );
#endif // VAES
#endif // GROESTL256_HASH_4WAY_H__

View File

@@ -7,13 +7,13 @@
* This code is placed in the public domain
*/
#if !defined(GROESTL256_INTR_4WAY_H__)
#define GROESTL256_INTR_4WAY_H__ 1
#include "groestl256-hash-4way.h"
#if defined(__VAES__)
#if defined(__AVX2__) && defined(__VAES__)
static const __m128i round_const_l0[] __attribute__ ((aligned (64))) =
{
{ 0x7060504030201000, 0xffffffffffffffff },
@@ -42,6 +42,8 @@ static const __m128i round_const_l7[] __attribute__ ((aligned (64))) =
{ 0x0000000000000000, 0x8696a6b6c6d6e6f6 }
};
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
static const __m512i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02,
0x1d1519111c141810, 0x1f171b131e161a12,
0x2d2529212c242820, 0x2f272b232e262a22,
@@ -499,5 +501,398 @@ void OF512_4way( __m512i* chaining )
chaining[3] = xmm11;
}
#endif // AVX512
static const __m256i TRANSP_MASK_2WAY =
{ 0x0d0509010c040800, 0x0f070b030e060a02,
0x1d1519111c141810, 0x1f171b131e161a12 };
static const __m256i SUBSH_MASK0_2WAY =
{ 0x0c0f0104070b0e00, 0x03060a0d08020509,
0x1c1f1114171b1e10, 0x13161a1d18121519 };
static const __m256i SUBSH_MASK1_2WAY =
{ 0x0e090205000d0801, 0x04070c0f0a03060b,
0x1e191215101d1801, 0x14171c1f1a13161b };
static const __m256i SUBSH_MASK2_2WAY =
{ 0x080b0306010f0a02, 0x05000e090c04070d,
0x181b1316111f1a12, 0x15101e191c14171d };
static const __m256i SUBSH_MASK3_2WAY =
{ 0x0a0d040702090c03, 0x0601080b0e05000f,
0x1a1d141712191c13, 0x1611181b1e15101f };
static const __m256i SUBSH_MASK4_2WAY =
{ 0x0b0e0500030a0d04, 0x0702090c0f060108,
0x1b1e1510131a1d14, 0x1712191c1f161118 };
static const __m256i SUBSH_MASK5_2WAY =
{ 0x0d080601040c0f05, 0x00030b0e0907020a,
0x1d181611141c1f15, 0x10131b1e1917121a };
static const __m256i SUBSH_MASK6_2WAY =
{ 0x0f0a0702050e0906, 0x01040d080b00030c,
0x1f1a1712151e1916, 0x11141d181b10131c };
static const __m256i SUBSH_MASK7_2WAY =
{ 0x090c000306080b07, 0x02050f0a0d01040e,
0x191c101316181b17, 0x12151f1a1d11141e, };
#define tos(a) #a
#define tostr(a) tos(a)
/* xmm[i] will be multiplied by 2
* xmm[j] will be lost
* xmm[k] has to be all 0x1b */
#define MUL2_2WAY(i, j, k){\
j = _mm256_xor_si256(j, j);\
j = _mm256_cmpgt_epi8(j, i );\
i = _mm256_add_epi8(i, i);\
j = _mm256_and_si256(j, k);\
i = _mm256_xor_si256(i, j);\
}
#define MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
/* t_i = a_i + a_{i+1} */\
b6 = a0;\
b7 = a1;\
a0 = _mm256_xor_si256(a0, a1);\
b0 = a2;\
a1 = _mm256_xor_si256(a1, a2);\
b1 = a3;\
a2 = _mm256_xor_si256(a2, a3);\
b2 = a4;\
a3 = _mm256_xor_si256(a3, a4);\
b3 = a5;\
a4 = _mm256_xor_si256(a4, a5);\
b4 = a6;\
a5 = _mm256_xor_si256(a5, a6);\
b5 = a7;\
a6 = _mm256_xor_si256(a6, a7);\
a7 = _mm256_xor_si256(a7, b6);\
\
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
b0 = _mm256_xor_si256(b0, a4);\
b6 = _mm256_xor_si256(b6, a4);\
b1 = _mm256_xor_si256(b1, a5);\
b7 = _mm256_xor_si256(b7, a5);\
b2 = _mm256_xor_si256(b2, a6);\
b0 = _mm256_xor_si256(b0, a6);\
/* spill values y_4, y_5 to memory */\
TEMP0 = b0;\
b3 = _mm256_xor_si256(b3, a7);\
b1 = _mm256_xor_si256(b1, a7);\
TEMP1 = b1;\
b4 = _mm256_xor_si256(b4, a0);\
b2 = _mm256_xor_si256(b2, a0);\
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
b0 = a0;\
b5 = _mm256_xor_si256(b5, a1);\
b3 = _mm256_xor_si256(b3, a1);\
b1 = a1;\
b6 = _mm256_xor_si256(b6, a2);\
b4 = _mm256_xor_si256(b4, a2);\
TEMP2 = a2;\
b7 = _mm256_xor_si256(b7, a3);\
b5 = _mm256_xor_si256(b5, a3);\
\
/* compute x_i = t_i + t_{i+3} */\
a0 = _mm256_xor_si256(a0, a3);\
a1 = _mm256_xor_si256(a1, a4);\
a2 = _mm256_xor_si256(a2, a5);\
a3 = _mm256_xor_si256(a3, a6);\
a4 = _mm256_xor_si256(a4, a7);\
a5 = _mm256_xor_si256(a5, b0);\
a6 = _mm256_xor_si256(a6, b1);\
a7 = _mm256_xor_si256(a7, TEMP2);\
\
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
/* compute w_i : add y_{i+4} */\
b1 = m256_const1_64( 0x1b1b1b1b1b1b1b1b );\
MUL2_2WAY(a0, b0, b1);\
a0 = _mm256_xor_si256(a0, TEMP0);\
MUL2_2WAY(a1, b0, b1);\
a1 = _mm256_xor_si256(a1, TEMP1);\
MUL2_2WAY(a2, b0, b1);\
a2 = _mm256_xor_si256(a2, b2);\
MUL2_2WAY(a3, b0, b1);\
a3 = _mm256_xor_si256(a3, b3);\
MUL2_2WAY(a4, b0, b1);\
a4 = _mm256_xor_si256(a4, b4);\
MUL2_2WAY(a5, b0, b1);\
a5 = _mm256_xor_si256(a5, b5);\
MUL2_2WAY(a6, b0, b1);\
a6 = _mm256_xor_si256(a6, b6);\
MUL2_2WAY(a7, b0, b1);\
a7 = _mm256_xor_si256(a7, b7);\
\
/* compute v_i : double w_i */\
/* add to y_4 y_5 .. v3, v4, ... */\
MUL2_2WAY(a0, b0, b1);\
b5 = _mm256_xor_si256(b5, a0);\
MUL2_2WAY(a1, b0, b1);\
b6 = _mm256_xor_si256(b6, a1);\
MUL2_2WAY(a2, b0, b1);\
b7 = _mm256_xor_si256(b7, a2);\
MUL2_2WAY(a5, b0, b1);\
b2 = _mm256_xor_si256(b2, a5);\
MUL2_2WAY(a6, b0, b1);\
b3 = _mm256_xor_si256(b3, a6);\
MUL2_2WAY(a7, b0, b1);\
b4 = _mm256_xor_si256(b4, a7);\
MUL2_2WAY(a3, b0, b1);\
MUL2_2WAY(a4, b0, b1);\
b0 = TEMP0;\
b1 = TEMP1;\
b0 = _mm256_xor_si256(b0, a3);\
b1 = _mm256_xor_si256(b1, a4);\
}/*MixBytes*/
#define ROUND_2WAY(i, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
/* AddRoundConstant */\
b1 = m256_const2_64( 0xffffffffffffffff, 0 ); \
a0 = _mm256_xor_si256( a0, m256_const1_128( round_const_l0[i] ) );\
a1 = _mm256_xor_si256( a1, b1 );\
a2 = _mm256_xor_si256( a2, b1 );\
a3 = _mm256_xor_si256( a3, b1 );\
a4 = _mm256_xor_si256( a4, b1 );\
a5 = _mm256_xor_si256( a5, b1 );\
a6 = _mm256_xor_si256( a6, b1 );\
a7 = _mm256_xor_si256( a7, m256_const1_128( round_const_l7[i] ) );\
\
/* ShiftBytes + SubBytes (interleaved) */\
b0 = _mm256_xor_si256( b0, b0 );\
a0 = _mm256_shuffle_epi8( a0, SUBSH_MASK0_2WAY );\
a0 = _mm256_aesenclast_epi128(a0, b0 );\
a1 = _mm256_shuffle_epi8( a1, SUBSH_MASK1_2WAY );\
a1 = _mm256_aesenclast_epi128(a1, b0 );\
a2 = _mm256_shuffle_epi8( a2, SUBSH_MASK2_2WAY );\
a2 = _mm256_aesenclast_epi128(a2, b0 );\
a3 = _mm256_shuffle_epi8( a3, SUBSH_MASK3_2WAY );\
a3 = _mm256_aesenclast_epi128(a3, b0 );\
a4 = _mm256_shuffle_epi8( a4, SUBSH_MASK4_2WAY );\
a4 = _mm256_aesenclast_epi128(a4, b0 );\
a5 = _mm256_shuffle_epi8( a5, SUBSH_MASK5_2WAY );\
a5 = _mm256_aesenclast_epi128(a5, b0 );\
a6 = _mm256_shuffle_epi8( a6, SUBSH_MASK6_2WAY );\
a6 = _mm256_aesenclast_epi128(a6, b0 );\
a7 = _mm256_shuffle_epi8( a7, SUBSH_MASK7_2WAY );\
a7 = _mm256_aesenclast_epi128( a7, b0 );\
\
/* MixBytes */\
MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
\
}
/* 10 rounds, P and Q in parallel */
#define ROUNDS_P_Q_2WAY(){\
ROUND_2WAY(0, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
ROUND_2WAY(1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
ROUND_2WAY(2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
ROUND_2WAY(3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
ROUND_2WAY(4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
ROUND_2WAY(5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
ROUND_2WAY(6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
ROUND_2WAY(7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
ROUND_2WAY(8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
ROUND_2WAY(9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
}
#define Matrix_Transpose_A_2way(i0, i1, i2, i3, o1, o2, o3, t0){\
t0 = TRANSP_MASK_2WAY;\
\
i0 = _mm256_shuffle_epi8( i0, t0 );\
i1 = _mm256_shuffle_epi8( i1, t0 );\
i2 = _mm256_shuffle_epi8( i2, t0 );\
i3 = _mm256_shuffle_epi8( i3, t0 );\
\
o1 = i0;\
t0 = i2;\
\
i0 = _mm256_unpacklo_epi16( i0, i1 );\
o1 = _mm256_unpackhi_epi16( o1, i1 );\
i2 = _mm256_unpacklo_epi16( i2, i3 );\
t0 = _mm256_unpackhi_epi16( t0, i3 );\
\
i0 = _mm256_shuffle_epi32( i0, 216 );\
o1 = _mm256_shuffle_epi32( o1, 216 );\
i2 = _mm256_shuffle_epi32( i2, 216 );\
t0 = _mm256_shuffle_epi32( t0, 216 );\
\
o2 = i0;\
o3 = o1;\
\
i0 = _mm256_unpacklo_epi32( i0, i2 );\
o1 = _mm256_unpacklo_epi32( o1, t0 );\
o2 = _mm256_unpackhi_epi32( o2, i2 );\
o3 = _mm256_unpackhi_epi32( o3, t0 );\
}/**/
#define Matrix_Transpose_B_2way(i0, i1, i2, i3, i4, i5, i6, i7, o1, o2, o3, o4, o5, o6, o7){\
o1 = i0;\
o2 = i1;\
i0 = _mm256_unpacklo_epi64( i0, i4 );\
o1 = _mm256_unpackhi_epi64( o1, i4 );\
o3 = i1;\
o4 = i2;\
o2 = _mm256_unpacklo_epi64( o2, i5 );\
o3 = _mm256_unpackhi_epi64( o3, i5 );\
o5 = i2;\
o6 = i3;\
o4 = _mm256_unpacklo_epi64( o4, i6 );\
o5 = _mm256_unpackhi_epi64( o5, i6 );\
o7 = i3;\
o6 = _mm256_unpacklo_epi64( o6, i7 );\
o7 = _mm256_unpackhi_epi64( o7, i7 );\
}/**/
#define Matrix_Transpose_B_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, o3){\
o0 = i0;\
i0 = _mm256_unpacklo_epi64( i0, i1 );\
o0 = _mm256_unpackhi_epi64( o0, i1 );\
o1 = i2;\
i2 = _mm256_unpacklo_epi64( i2, i3 );\
o1 = _mm256_unpackhi_epi64( o1, i3 );\
o2 = i4;\
i4 = _mm256_unpacklo_epi64( i4, i5 );\
o2 = _mm256_unpackhi_epi64( o2, i5 );\
o3 = i6;\
i6 = _mm256_unpacklo_epi64( i6, i7 );\
o3 = _mm256_unpackhi_epi64( o3, i7 );\
}/**/
#define Matrix_Transpose_O_B_2way(i0, i1, i2, i3, i4, i5, i6, i7, t0){\
t0 = _mm256_xor_si256( t0, t0 );\
i1 = i0;\
i3 = i2;\
i5 = i4;\
i7 = i6;\
i0 = _mm256_unpacklo_epi64( i0, t0 );\
i1 = _mm256_unpackhi_epi64( i1, t0 );\
i2 = _mm256_unpacklo_epi64( i2, t0 );\
i3 = _mm256_unpackhi_epi64( i3, t0 );\
i4 = _mm256_unpacklo_epi64( i4, t0 );\
i5 = _mm256_unpackhi_epi64( i5, t0 );\
i6 = _mm256_unpacklo_epi64( i6, t0 );\
i7 = _mm256_unpackhi_epi64( i7, t0 );\
}/**/
#define Matrix_Transpose_O_B_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7){\
i0 = _mm256_unpacklo_epi64( i0, i1 );\
i2 = _mm256_unpacklo_epi64( i2, i3 );\
i4 = _mm256_unpacklo_epi64( i4, i5 );\
i6 = _mm256_unpacklo_epi64( i6, i7 );\
}/**/
void TF512_2way( __m256i* chaining, __m256i* message )
{
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
static __m256i TEMP0;
static __m256i TEMP1;
static __m256i TEMP2;
/* load message into registers xmm12 - xmm15 */
xmm12 = message[0];
xmm13 = message[1];
xmm14 = message[2];
xmm15 = message[3];
/* transform message M from column ordering into row ordering */
/* we first put two rows (64 bit) of the message into one 128-bit xmm register */
Matrix_Transpose_A_2way(xmm12, xmm13, xmm14, xmm15, xmm2, xmm6, xmm7, xmm0);
/* load previous chaining value */
/* we first put two rows (64 bit) of the CV into one 128-bit xmm register */
xmm8 = chaining[0];
xmm0 = chaining[1];
xmm4 = chaining[2];
xmm5 = chaining[3];
/* xor message to CV get input of P */
/* result: CV+M in xmm8, xmm0, xmm4, xmm5 */
xmm8 = _mm256_xor_si256( xmm8, xmm12 );
xmm0 = _mm256_xor_si256( xmm0, xmm2 );
xmm4 = _mm256_xor_si256( xmm4, xmm6 );
xmm5 = _mm256_xor_si256( xmm5, xmm7 );
/* there are now 2 rows of the Groestl state (P and Q) in each xmm register */
/* unpack to get 1 row of P (64 bit) and Q (64 bit) into one xmm register */
/* result: the 8 rows of P and Q in xmm8 - xmm12 */
Matrix_Transpose_B_2way(xmm8, xmm0, xmm4, xmm5, xmm12, xmm2, xmm6, xmm7, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);
/* compute the two permutations P and Q in parallel */
ROUNDS_P_Q_2WAY();
/* unpack again to get two rows of P or two rows of Q in one xmm register */
Matrix_Transpose_B_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3);
/* xor output of P and Q */
/* result: P(CV+M)+Q(M) in xmm0...xmm3 */
xmm0 = _mm256_xor_si256( xmm0, xmm8 );
xmm1 = _mm256_xor_si256( xmm1, xmm10 );
xmm2 = _mm256_xor_si256( xmm2, xmm12 );
xmm3 = _mm256_xor_si256( xmm3, xmm14 );
/* xor CV (feed-forward) */
/* result: P(CV+M)+Q(M)+CV in xmm0...xmm3 */
xmm0 = _mm256_xor_si256( xmm0, (chaining[0]) );
xmm1 = _mm256_xor_si256( xmm1, (chaining[1]) );
xmm2 = _mm256_xor_si256( xmm2, (chaining[2]) );
xmm3 = _mm256_xor_si256( xmm3, (chaining[3]) );
/* store CV */
chaining[0] = xmm0;
chaining[1] = xmm1;
chaining[2] = xmm2;
chaining[3] = xmm3;
return;
}
void OF512_2way( __m256i* chaining )
{
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
static __m256i TEMP0;
static __m256i TEMP1;
static __m256i TEMP2;
/* load CV into registers xmm8, xmm10, xmm12, xmm14 */
xmm8 = chaining[0];
xmm10 = chaining[1];
xmm12 = chaining[2];
xmm14 = chaining[3];
/* there are now 2 rows of the CV in one xmm register */
/* unpack to get 1 row of P (64 bit) into one half of an xmm register */
/* result: the 8 input rows of P in xmm8 - xmm15 */
Matrix_Transpose_O_B_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0);
/* compute the permutation P */
/* result: the output of P(CV) in xmm8 - xmm15 */
ROUNDS_P_Q_2WAY();
/* unpack again to get two rows of P in one xmm register */
/* result: P(CV) in xmm8, xmm10, xmm12, xmm14 */
Matrix_Transpose_O_B_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);
/* xor CV to P output (feed-forward) */
/* result: P(CV)+CV in xmm8, xmm10, xmm12, xmm14 */
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
xmm10 = _mm256_xor_si256( xmm10, (chaining[1]) );
xmm12 = _mm256_xor_si256( xmm12, (chaining[2]) );
xmm14 = _mm256_xor_si256( xmm14, (chaining[3]) );
/* transform state back from row ordering into column ordering */
/* result: final hash value in xmm9, xmm11 */
Matrix_Transpose_A_2way(xmm8, xmm10, xmm12, xmm14, xmm4, xmm9, xmm11, xmm0);
/* we only need to return the truncated half of the state */
chaining[2] = xmm9;
chaining[3] = xmm11;
}
#endif // VAES
#endif // GROESTL512_INTR_4WAY_H__
#endif // GROESTL256_INTR_4WAY_H__

View File

@@ -15,7 +15,9 @@
#include "miner.h"
#include "simd-utils.h"
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__AVX2__) && defined(__VAES__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
int groestl512_4way_init( groestl512_4way_context* ctx, uint64_t hashlen )
{
@@ -41,7 +43,7 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output,
const int hashlen_m128i = 64 / 16; // bytes to __m128i
const int hash_offset = SIZE512 - hashlen_m128i;
int rem = ctx->rem_ptr;
int blocks = len / SIZE512;
uint64_t blocks = len / SIZE512;
__m512i* in = (__m512i*)input;
int i;
@@ -62,16 +64,14 @@ int groestl512_4way_update_close( groestl512_4way_context* ctx, void* output,
if ( i == SIZE512 - 1 )
{
// only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
blocks, blocks>>8,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x80 ) );
ctx->buffer[i] = m512_const2_64( blocks << 56, 0x80 );
}
else
{
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
for ( i += 1; i < SIZE512 - 1; i++ )
ctx->buffer[i] = m512_zero;
ctx->buffer[i] = m512_const1_128( _mm_set_epi8(
blocks, blocks>>8, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 ) );
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
}
TF1024_4way( ctx->chaining, ctx->buffer );
@@ -122,7 +122,7 @@ int groestl512_4way_full( groestl512_4way_context* ctx, void* output,
}
else
{
ctx->buffer[i] = m512_const4_64( 0, 0x80, 0, 0x80 );
ctx->buffer[i] = m512_const2_64( 0, 0x80 );
for ( i += 1; i < SIZE512 - 1; i++ )
ctx->buffer[i] = m512_zero;
ctx->buffer[i] = m512_const2_64( blocks << 56, 0 );
@@ -137,5 +137,128 @@ int groestl512_4way_full( groestl512_4way_context* ctx, void* output,
return 0;
}
#endif // AVX512
// AVX2 + VAES
int groestl512_2way_init( groestl512_2way_context* ctx, uint64_t hashlen )
{
if (ctx->chaining == NULL || ctx->buffer == NULL)
return 1;
memset_zero_256( ctx->chaining, SIZE512 );
memset_zero_256( ctx->buffer, SIZE512 );
// The only non-zero in the IV is len. It can be hard coded.
ctx->chaining[ 6 ] = m256_const2_64( 0x0200000000000000, 0 );
ctx->buf_ptr = 0;
ctx->rem_ptr = 0;
return 0;
}
int groestl512_2way_update_close( groestl512_2way_context* ctx, void* output,
const void* input, uint64_t databitlen )
{
const int len = (int)databitlen / 128;
const int hashlen_m128i = 64 / 16; // bytes to __m128i
const int hash_offset = SIZE512 - hashlen_m128i;
int rem = ctx->rem_ptr;
uint64_t blocks = len / SIZE512;
__m256i* in = (__m256i*)input;
int i;
// --- update ---
for ( i = 0; i < blocks; i++ )
TF1024_2way( ctx->chaining, &in[ i * SIZE512 ] );
ctx->buf_ptr = blocks * SIZE512;
for ( i = 0; i < len % SIZE512; i++ )
ctx->buffer[ rem + i ] = in[ ctx->buf_ptr + i ];
i += rem;
//--- final ---
blocks++; // adjust for final block
if ( i == SIZE512 - 1 )
{
// only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
}
else
{
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
for ( i += 1; i < SIZE512 - 1; i++ )
ctx->buffer[i] = m256_zero;
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
}
TF1024_2way( ctx->chaining, ctx->buffer );
OF1024_2way( ctx->chaining );
for ( i = 0; i < hashlen_m128i; i++ )
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
return 0;
}
int groestl512_2way_full( groestl512_2way_context* ctx, void* output,
const void* input, uint64_t datalen )
{
const int len = (int)datalen >> 4;
const int hashlen_m128i = 64 >> 4; // bytes to __m128i
const int hash_offset = SIZE512 - hashlen_m128i;
uint64_t blocks = len / SIZE512;
__m256i* in = (__m256i*)input;
int i;
// --- init ---
memset_zero_256( ctx->chaining, SIZE512 );
memset_zero_256( ctx->buffer, SIZE512 );
ctx->chaining[ 6 ] = m256_const2_64( 0x0200000000000000, 0 );
ctx->buf_ptr = 0;
ctx->rem_ptr = 0;
// --- update ---
for ( i = 0; i < blocks; i++ )
TF1024_2way( ctx->chaining, &in[ i * SIZE512 ] );
ctx->buf_ptr = blocks * SIZE512;
for ( i = 0; i < len % SIZE512; i++ )
ctx->buffer[ ctx->rem_ptr + i ] = in[ ctx->buf_ptr + i ];
i += ctx->rem_ptr;
// --- close ---
blocks++;
if ( i == SIZE512 - 1 )
{
// only 1 vector left in buffer, all padding at once
ctx->buffer[i] = m256_const2_64( blocks << 56, 0x80 );
}
else
{
ctx->buffer[i] = m256_const2_64( 0, 0x80 );
for ( i += 1; i < SIZE512 - 1; i++ )
ctx->buffer[i] = m256_zero;
ctx->buffer[i] = m256_const2_64( blocks << 56, 0 );
}
TF1024_2way( ctx->chaining, ctx->buffer );
OF1024_2way( ctx->chaining );
for ( i = 0; i < hashlen_m128i; i++ )
casti_m256i( output, i ) = ctx->chaining[ hash_offset + i ];
return 0;
}
#endif // VAES

View File

@@ -10,7 +10,7 @@
#endif
#include <stdlib.h>
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__AVX2__) && defined(__VAES__)
#define LENGTH (512)
@@ -36,20 +36,19 @@
#define SIZE512 (SIZE_1024/16)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
typedef struct {
__attribute__ ((aligned (128))) __m512i chaining[SIZE512];
__attribute__ ((aligned (64))) __m512i buffer[SIZE512];
int blk_count; // SIZE_m128i
int buf_ptr; // __m128i offset
int rem_ptr;
int databitlen; // bits
} groestl512_4way_context;
int groestl512_4way_init( groestl512_4way_context*, uint64_t );
//int reinit_groestl( hashState_groestl* );
int groestl512_4way_update( groestl512_4way_context*, const void*,
uint64_t );
int groestl512_4way_close( groestl512_4way_context*, void* );
@@ -58,5 +57,29 @@ int groestl512_4way_update_close( groestl512_4way_context*, void*,
int groestl512_4way_full( groestl512_4way_context*, void*,
const void*, uint64_t );
#endif // AVX512
// AVX2 + VAES
typedef struct {
__attribute__ ((aligned (128))) __m256i chaining[SIZE512];
__attribute__ ((aligned (64))) __m256i buffer[SIZE512];
int blk_count; // SIZE_m128i
int buf_ptr; // __m128i offset
int rem_ptr;
} groestl512_2way_context;
int groestl512_2way_init( groestl512_2way_context*, uint64_t );
int groestl512_2way_update( groestl512_2way_context*, const void*,
uint64_t );
int groestl512_2way_close( groestl512_2way_context*, void* );
int groestl512_2way_update_close( groestl512_2way_context*, void*,
const void*, uint64_t );
int groestl512_2way_full( groestl512_2way_context*, void*,
const void*, uint64_t );
#endif // VAES
#endif // GROESTL512_HASH_4WAY_H__

View File

@@ -7,13 +7,12 @@
* This code is placed in the public domain
*/
#if !defined(GROESTL512_INTR_4WAY_H__)
#define GROESTL512_INTR_4WAY_H__ 1
#include "groestl512-hash-4way.h"
#if defined(__VAES__)
#if defined(__AVX2__) && defined(__VAES__)
static const __m128i round_const_p[] __attribute__ ((aligned (64))) =
{
@@ -51,6 +50,8 @@ static const __m128i round_const_q[] __attribute__ ((aligned (64))) =
{ 0x8292a2b2c2d2e2f2, 0x0212223242526272 }
};
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
static const __m512i TRANSP_MASK = { 0x0d0509010c040800, 0x0f070b030e060a02,
0x1d1519111c141810, 0x1f171b131e161a12,
0x2d2529212c242820, 0x2f272b232e262a22,
@@ -661,5 +662,578 @@ void OF1024_4way( __m512i* chaining )
return;
}
#endif // AVX512
// AVX2 + VAES
static const __m256i TRANSP_MASK_2WAY =
{ 0x0d0509010c040800, 0x0f070b030e060a02,
0x1d1519111c141810, 0x1f171b131e161a12 };
static const __m256i SUBSH_MASK0_2WAY =
{ 0x0b0e0104070a0d00, 0x0306090c0f020508,
0x1b1e1114171a1d10, 0x1316191c1f121518 };
static const __m256i SUBSH_MASK1_2WAY =
{ 0x0c0f0205080b0e01, 0x04070a0d00030609,
0x1c1f1215181b1e11, 0x14171a1d10131619 };
static const __m256i SUBSH_MASK2_2WAY =
{ 0x0d000306090c0f02, 0x05080b0e0104070a,
0x1d101316191c1f12, 0x15181b1e1114171a };
static const __m256i SUBSH_MASK3_2WAY =
{ 0x0e0104070a0d0003, 0x06090c0f0205080b,
0x1e1114171a1d1013, 0x16191c1f1215181b };
static const __m256i SUBSH_MASK4_2WAY =
{ 0x0f0205080b0e0104, 0x070a0d000306090c,
0x1f1215181b1e1114, 0x171a1d101316191c };
static const __m256i SUBSH_MASK5_2WAY =
{ 0x000306090c0f0205, 0x080b0e0104070a0d,
0x101316191c1f1215, 0x181b1e1114171a1d };
static const __m256i SUBSH_MASK6_2WAY =
{ 0x0104070a0d000306, 0x090c0f0205080b0e,
0x1114171a1d101316, 0x191c1f1215181b1e };
static const __m256i SUBSH_MASK7_2WAY =
{ 0x06090c0f0205080b, 0x0e0104070a0d0003,
0x16191c1f1215181b, 0x1e1114171a1d1013 };
#define tos(a) #a
#define tostr(a) tos(a)
/* xmm[i] will be multiplied by 2
* xmm[j] will be lost
* xmm[k] has to be all 0x1b */
#define MUL2_2WAY(i, j, k){\
j = _mm256_xor_si256(j, j);\
j = _mm256_cmpgt_epi8(j, i );\
i = _mm256_add_epi8(i, i);\
j = _mm256_and_si256(j, k);\
i = _mm256_xor_si256(i, j);\
}
#define MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
/* t_i = a_i + a_{i+1} */\
b6 = a0;\
b7 = a1;\
a0 = _mm256_xor_si256(a0, a1);\
b0 = a2;\
a1 = _mm256_xor_si256(a1, a2);\
b1 = a3;\
a2 = _mm256_xor_si256(a2, a3);\
b2 = a4;\
a3 = _mm256_xor_si256(a3, a4);\
b3 = a5;\
a4 = _mm256_xor_si256(a4, a5);\
b4 = a6;\
a5 = _mm256_xor_si256(a5, a6);\
b5 = a7;\
a6 = _mm256_xor_si256(a6, a7);\
a7 = _mm256_xor_si256(a7, b6);\
\
/* build y4 y5 y6 ... in regs xmm8, xmm9, xmm10 by adding t_i*/\
b0 = _mm256_xor_si256(b0, a4);\
b6 = _mm256_xor_si256(b6, a4);\
b1 = _mm256_xor_si256(b1, a5);\
b7 = _mm256_xor_si256(b7, a5);\
b2 = _mm256_xor_si256(b2, a6);\
b0 = _mm256_xor_si256(b0, a6);\
/* spill values y_4, y_5 to memory */\
TEMP0 = b0;\
b3 = _mm256_xor_si256(b3, a7);\
b1 = _mm256_xor_si256(b1, a7);\
TEMP1 = b1;\
b4 = _mm256_xor_si256(b4, a0);\
b2 = _mm256_xor_si256(b2, a0);\
/* save values t0, t1, t2 to xmm8, xmm9 and memory */\
b0 = a0;\
b5 = _mm256_xor_si256(b5, a1);\
b3 = _mm256_xor_si256(b3, a1);\
b1 = a1;\
b6 = _mm256_xor_si256(b6, a2);\
b4 = _mm256_xor_si256(b4, a2);\
TEMP2 = a2;\
b7 = _mm256_xor_si256(b7, a3);\
b5 = _mm256_xor_si256(b5, a3);\
\
/* compute x_i = t_i + t_{i+3} */\
a0 = _mm256_xor_si256(a0, a3);\
a1 = _mm256_xor_si256(a1, a4);\
a2 = _mm256_xor_si256(a2, a5);\
a3 = _mm256_xor_si256(a3, a6);\
a4 = _mm256_xor_si256(a4, a7);\
a5 = _mm256_xor_si256(a5, b0);\
a6 = _mm256_xor_si256(a6, b1);\
a7 = _mm256_xor_si256(a7, TEMP2);\
\
/* compute z_i : double x_i using temp xmm8 and 1B xmm9 */\
/* compute w_i : add y_{i+4} */\
b1 = m256_const1_64( 0x1b1b1b1b1b1b1b1b );\
MUL2_2WAY(a0, b0, b1);\
a0 = _mm256_xor_si256(a0, TEMP0);\
MUL2_2WAY(a1, b0, b1);\
a1 = _mm256_xor_si256(a1, TEMP1);\
MUL2_2WAY(a2, b0, b1);\
a2 = _mm256_xor_si256(a2, b2);\
MUL2_2WAY(a3, b0, b1);\
a3 = _mm256_xor_si256(a3, b3);\
MUL2_2WAY(a4, b0, b1);\
a4 = _mm256_xor_si256(a4, b4);\
MUL2_2WAY(a5, b0, b1);\
a5 = _mm256_xor_si256(a5, b5);\
MUL2_2WAY(a6, b0, b1);\
a6 = _mm256_xor_si256(a6, b6);\
MUL2_2WAY(a7, b0, b1);\
a7 = _mm256_xor_si256(a7, b7);\
\
/* compute v_i : double w_i */\
/* add to y_4 y_5 .. v3, v4, ... */\
MUL2_2WAY(a0, b0, b1);\
b5 = _mm256_xor_si256(b5, a0);\
MUL2_2WAY(a1, b0, b1);\
b6 = _mm256_xor_si256(b6, a1);\
MUL2_2WAY(a2, b0, b1);\
b7 = _mm256_xor_si256(b7, a2);\
MUL2_2WAY(a5, b0, b1);\
b2 = _mm256_xor_si256(b2, a5);\
MUL2_2WAY(a6, b0, b1);\
b3 = _mm256_xor_si256(b3, a6);\
MUL2_2WAY(a7, b0, b1);\
b4 = _mm256_xor_si256(b4, a7);\
MUL2_2WAY(a3, b0, b1);\
MUL2_2WAY(a4, b0, b1);\
b0 = TEMP0;\
b1 = TEMP1;\
b0 = _mm256_xor_si256(b0, a3);\
b1 = _mm256_xor_si256(b1, a4);\
}/*MixBytes*/
/* one round
* a0-a7 = input rows
* b0-b7 = output rows
*/
#define SUBMIX_2WAY(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7){\
/* SubBytes */\
b0 = _mm256_xor_si256( b0, b0 );\
a0 = _mm256_aesenclast_epi128( a0, b0 );\
a1 = _mm256_aesenclast_epi128( a1, b0 );\
a2 = _mm256_aesenclast_epi128( a2, b0 );\
a3 = _mm256_aesenclast_epi128( a3, b0 );\
a4 = _mm256_aesenclast_epi128( a4, b0 );\
a5 = _mm256_aesenclast_epi128( a5, b0 );\
a6 = _mm256_aesenclast_epi128( a6, b0 );\
a7 = _mm256_aesenclast_epi128( a7, b0 );\
/* MixBytes */\
MixBytes_2way(a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3, b4, b5, b6, b7);\
}
#define ROUNDS_P_2WAY(){\
uint8_t round_counter = 0;\
for ( round_counter = 0; round_counter < 14; round_counter += 2 ) \
{ \
/* AddRoundConstant P1024 */\
xmm8 = _mm256_xor_si256( xmm8, m256_const1_128( \
casti_m128i( round_const_p, round_counter ) ) ); \
/* ShiftBytes P1024 + pre-AESENCLAST */\
xmm8 = _mm256_shuffle_epi8( xmm8, SUBSH_MASK0_2WAY ); \
xmm9 = _mm256_shuffle_epi8( xmm9, SUBSH_MASK1_2WAY );\
xmm10 = _mm256_shuffle_epi8( xmm10, SUBSH_MASK2_2WAY );\
xmm11 = _mm256_shuffle_epi8( xmm11, SUBSH_MASK3_2WAY );\
xmm12 = _mm256_shuffle_epi8( xmm12, SUBSH_MASK4_2WAY );\
xmm13 = _mm256_shuffle_epi8( xmm13, SUBSH_MASK5_2WAY );\
xmm14 = _mm256_shuffle_epi8( xmm14, SUBSH_MASK6_2WAY );\
xmm15 = _mm256_shuffle_epi8( xmm15, SUBSH_MASK7_2WAY );\
/* SubBytes + MixBytes */\
SUBMIX_2WAY(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
\
/* AddRoundConstant P1024 */\
xmm0 = _mm256_xor_si256( xmm0, m256_const1_128( \
casti_m128i( round_const_p, round_counter+1 ) ) ); \
/* ShiftBytes P1024 + pre-AESENCLAST */\
xmm0 = _mm256_shuffle_epi8( xmm0, SUBSH_MASK0_2WAY );\
xmm1 = _mm256_shuffle_epi8( xmm1, SUBSH_MASK1_2WAY );\
xmm2 = _mm256_shuffle_epi8( xmm2, SUBSH_MASK2_2WAY );\
xmm3 = _mm256_shuffle_epi8( xmm3, SUBSH_MASK3_2WAY );\
xmm4 = _mm256_shuffle_epi8( xmm4, SUBSH_MASK4_2WAY );\
xmm5 = _mm256_shuffle_epi8( xmm5, SUBSH_MASK5_2WAY );\
xmm6 = _mm256_shuffle_epi8( xmm6, SUBSH_MASK6_2WAY );\
xmm7 = _mm256_shuffle_epi8( xmm7, SUBSH_MASK7_2WAY );\
/* SubBytes + MixBytes */\
SUBMIX_2WAY(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
}\
}
#define ROUNDS_Q_2WAY(){\
uint8_t round_counter = 0;\
for ( round_counter = 0; round_counter < 14; round_counter += 2) \
{ \
/* AddRoundConstant Q1024 */\
xmm1 = m256_neg1;\
xmm8 = _mm256_xor_si256( xmm8, xmm1 );\
xmm9 = _mm256_xor_si256( xmm9, xmm1 );\
xmm10 = _mm256_xor_si256( xmm10, xmm1 );\
xmm11 = _mm256_xor_si256( xmm11, xmm1 );\
xmm12 = _mm256_xor_si256( xmm12, xmm1 );\
xmm13 = _mm256_xor_si256( xmm13, xmm1 );\
xmm14 = _mm256_xor_si256( xmm14, xmm1 );\
xmm15 = _mm256_xor_si256( xmm15, m256_const1_128( \
casti_m128i( round_const_q, round_counter ) ) ); \
/* ShiftBytes Q1024 + pre-AESENCLAST */\
xmm8 = _mm256_shuffle_epi8( xmm8, SUBSH_MASK1_2WAY );\
xmm9 = _mm256_shuffle_epi8( xmm9, SUBSH_MASK3_2WAY );\
xmm10 = _mm256_shuffle_epi8( xmm10, SUBSH_MASK5_2WAY );\
xmm11 = _mm256_shuffle_epi8( xmm11, SUBSH_MASK7_2WAY );\
xmm12 = _mm256_shuffle_epi8( xmm12, SUBSH_MASK0_2WAY );\
xmm13 = _mm256_shuffle_epi8( xmm13, SUBSH_MASK2_2WAY );\
xmm14 = _mm256_shuffle_epi8( xmm14, SUBSH_MASK4_2WAY );\
xmm15 = _mm256_shuffle_epi8( xmm15, SUBSH_MASK6_2WAY );\
/* SubBytes + MixBytes */\
SUBMIX_2WAY(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);\
\
/* AddRoundConstant Q1024 */\
xmm9 = m256_neg1;\
xmm0 = _mm256_xor_si256( xmm0, xmm9 );\
xmm1 = _mm256_xor_si256( xmm1, xmm9 );\
xmm2 = _mm256_xor_si256( xmm2, xmm9 );\
xmm3 = _mm256_xor_si256( xmm3, xmm9 );\
xmm4 = _mm256_xor_si256( xmm4, xmm9 );\
xmm5 = _mm256_xor_si256( xmm5, xmm9 );\
xmm6 = _mm256_xor_si256( xmm6, xmm9 );\
xmm7 = _mm256_xor_si256( xmm7, m256_const1_128( \
casti_m128i( round_const_q, round_counter+1 ) ) ); \
/* ShiftBytes Q1024 + pre-AESENCLAST */\
xmm0 = _mm256_shuffle_epi8( xmm0, SUBSH_MASK1_2WAY );\
xmm1 = _mm256_shuffle_epi8( xmm1, SUBSH_MASK3_2WAY );\
xmm2 = _mm256_shuffle_epi8( xmm2, SUBSH_MASK5_2WAY );\
xmm3 = _mm256_shuffle_epi8( xmm3, SUBSH_MASK7_2WAY );\
xmm4 = _mm256_shuffle_epi8( xmm4, SUBSH_MASK0_2WAY );\
xmm5 = _mm256_shuffle_epi8( xmm5, SUBSH_MASK2_2WAY );\
xmm6 = _mm256_shuffle_epi8( xmm6, SUBSH_MASK4_2WAY );\
xmm7 = _mm256_shuffle_epi8( xmm7, SUBSH_MASK6_2WAY );\
/* SubBytes + MixBytes */\
SUBMIX_2WAY(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15);\
}\
}
#define Matrix_Transpose_2way(i0, i1, i2, i3, i4, i5, i6, i7, t0, t1, t2, t3, t4, t5, t6, t7){\
t0 = TRANSP_MASK_2WAY;\
\
i6 = _mm256_shuffle_epi8(i6, t0);\
i0 = _mm256_shuffle_epi8(i0, t0);\
i1 = _mm256_shuffle_epi8(i1, t0);\
i2 = _mm256_shuffle_epi8(i2, t0);\
i3 = _mm256_shuffle_epi8(i3, t0);\
t1 = i2;\
i4 = _mm256_shuffle_epi8(i4, t0);\
i5 = _mm256_shuffle_epi8(i5, t0);\
t2 = i4;\
t3 = i6;\
i7 = _mm256_shuffle_epi8(i7, t0);\
\
/* continue with unpack using 4 temp registers */\
t0 = i0;\
t2 = _mm256_unpackhi_epi16(t2, i5);\
i4 = _mm256_unpacklo_epi16(i4, i5);\
t3 = _mm256_unpackhi_epi16(t3, i7);\
i6 = _mm256_unpacklo_epi16(i6, i7);\
t0 = _mm256_unpackhi_epi16(t0, i1);\
t1 = _mm256_unpackhi_epi16(t1, i3);\
i2 = _mm256_unpacklo_epi16(i2, i3);\
i0 = _mm256_unpacklo_epi16(i0, i1);\
\
/* shuffle with immediate */\
t0 = _mm256_shuffle_epi32(t0, 216);\
t1 = _mm256_shuffle_epi32(t1, 216);\
t2 = _mm256_shuffle_epi32(t2, 216);\
t3 = _mm256_shuffle_epi32(t3, 216);\
i0 = _mm256_shuffle_epi32(i0, 216);\
i2 = _mm256_shuffle_epi32(i2, 216);\
i4 = _mm256_shuffle_epi32(i4, 216);\
i6 = _mm256_shuffle_epi32(i6, 216);\
\
/* continue with unpack */\
t4 = i0;\
i0 = _mm256_unpacklo_epi32(i0, i2);\
t4 = _mm256_unpackhi_epi32(t4, i2);\
t5 = t0;\
t0 = _mm256_unpacklo_epi32(t0, t1);\
t5 = _mm256_unpackhi_epi32(t5, t1);\
t6 = i4;\
i4 = _mm256_unpacklo_epi32(i4, i6);\
t7 = t2;\
t6 = _mm256_unpackhi_epi32(t6, i6);\
i2 = t0;\
t2 = _mm256_unpacklo_epi32(t2, t3);\
i3 = t0;\
t7 = _mm256_unpackhi_epi32(t7, t3);\
\
/* there are now 2 rows in each xmm */\
/* unpack to get 1 row of CV in each xmm */\
i1 = i0;\
i1 = _mm256_unpackhi_epi64(i1, i4);\
i0 = _mm256_unpacklo_epi64(i0, i4);\
i4 = t4;\
i3 = _mm256_unpackhi_epi64(i3, t2);\
i5 = t4;\
i2 = _mm256_unpacklo_epi64(i2, t2);\
i6 = t5;\
i5 = _mm256_unpackhi_epi64(i5, t6);\
i7 = t5;\
i4 = _mm256_unpacklo_epi64(i4, t6);\
i7 = _mm256_unpackhi_epi64(i7, t7);\
i6 = _mm256_unpacklo_epi64(i6, t7);\
/* transpose done */\
}/**/
#define Matrix_Transpose_INV_2way(i0, i1, i2, i3, i4, i5, i6, i7, o0, o1, o2, t0, t1, t2, t3, t4){\
/* transpose matrix to get output format */\
o1 = i0;\
i0 = _mm256_unpacklo_epi64(i0, i1);\
o1 = _mm256_unpackhi_epi64(o1, i1);\
t0 = i2;\
i2 = _mm256_unpacklo_epi64(i2, i3);\
t0 = _mm256_unpackhi_epi64(t0, i3);\
t1 = i4;\
i4 = _mm256_unpacklo_epi64(i4, i5);\
t1 = _mm256_unpackhi_epi64(t1, i5);\
t2 = i6;\
o0 = TRANSP_MASK_2WAY;\
i6 = _mm256_unpacklo_epi64(i6, i7);\
t2 = _mm256_unpackhi_epi64(t2, i7);\
/* load transpose mask into a register, because it will be used 8 times */\
i0 = _mm256_shuffle_epi8(i0, o0);\
i2 = _mm256_shuffle_epi8(i2, o0);\
i4 = _mm256_shuffle_epi8(i4, o0);\
i6 = _mm256_shuffle_epi8(i6, o0);\
o1 = _mm256_shuffle_epi8(o1, o0);\
t0 = _mm256_shuffle_epi8(t0, o0);\
t1 = _mm256_shuffle_epi8(t1, o0);\
t2 = _mm256_shuffle_epi8(t2, o0);\
/* continue with unpack using 4 temp registers */\
t3 = i4;\
o2 = o1;\
o0 = i0;\
t4 = t1;\
\
t3 = _mm256_unpackhi_epi16(t3, i6);\
i4 = _mm256_unpacklo_epi16(i4, i6);\
o0 = _mm256_unpackhi_epi16(o0, i2);\
i0 = _mm256_unpacklo_epi16(i0, i2);\
o2 = _mm256_unpackhi_epi16(o2, t0);\
o1 = _mm256_unpacklo_epi16(o1, t0);\
t4 = _mm256_unpackhi_epi16(t4, t2);\
t1 = _mm256_unpacklo_epi16(t1, t2);\
/* shuffle with immediate */\
i4 = _mm256_shuffle_epi32(i4, 216);\
t3 = _mm256_shuffle_epi32(t3, 216);\
o1 = _mm256_shuffle_epi32(o1, 216);\
o2 = _mm256_shuffle_epi32(o2, 216);\
i0 = _mm256_shuffle_epi32(i0, 216);\
o0 = _mm256_shuffle_epi32(o0, 216);\
t1 = _mm256_shuffle_epi32(t1, 216);\
t4 = _mm256_shuffle_epi32(t4, 216);\
/* continue with unpack */\
i1 = i0;\
i3 = o0;\
i5 = o1;\
i7 = o2;\
i0 = _mm256_unpacklo_epi32(i0, i4);\
i1 = _mm256_unpackhi_epi32(i1, i4);\
o0 = _mm256_unpacklo_epi32(o0, t3);\
i3 = _mm256_unpackhi_epi32(i3, t3);\
o1 = _mm256_unpacklo_epi32(o1, t1);\
i5 = _mm256_unpackhi_epi32(i5, t1);\
o2 = _mm256_unpacklo_epi32(o2, t4);\
i7 = _mm256_unpackhi_epi32(i7, t4);\
/* transpose done */\
}/**/
void INIT_2way( __m256i *chaining )
{
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
/* load IV into registers xmm8 - xmm15 */
xmm8 = chaining[0];
xmm9 = chaining[1];
xmm10 = chaining[2];
xmm11 = chaining[3];
xmm12 = chaining[4];
xmm13 = chaining[5];
xmm14 = chaining[6];
xmm15 = chaining[7];
/* transform chaining value from column ordering into row ordering */
Matrix_Transpose_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
/* store transposed IV */
chaining[0] = xmm8;
chaining[1] = xmm9;
chaining[2] = xmm10;
chaining[3] = xmm11;
chaining[4] = xmm12;
chaining[5] = xmm13;
chaining[6] = xmm14;
chaining[7] = xmm15;
}
void TF1024_2way( __m256i *chaining, const __m256i *message )
{
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
static __m256i QTEMP[8];
static __m256i TEMP0;
static __m256i TEMP1;
static __m256i TEMP2;
/* load message into registers xmm8 - xmm15 (Q = message) */
xmm8 = message[0];
xmm9 = message[1];
xmm10 = message[2];
xmm11 = message[3];
xmm12 = message[4];
xmm13 = message[5];
xmm14 = message[6];
xmm15 = message[7];
/* transform message M from column ordering into row ordering */
Matrix_Transpose_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
/* store message M (Q input) for later */
QTEMP[0] = xmm8;
QTEMP[1] = xmm9;
QTEMP[2] = xmm10;
QTEMP[3] = xmm11;
QTEMP[4] = xmm12;
QTEMP[5] = xmm13;
QTEMP[6] = xmm14;
QTEMP[7] = xmm15;
/* xor CV to message to get P input */
/* result: CV+M in xmm8...xmm15 */
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
/* compute permutation P */
/* result: P(CV+M) in xmm8...xmm15 */
ROUNDS_P_2WAY();
/* xor CV to P output (feed-forward) */
/* result: P(CV+M)+CV in xmm8...xmm15 */
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
/* store P(CV+M)+CV */
chaining[0] = xmm8;
chaining[1] = xmm9;
chaining[2] = xmm10;
chaining[3] = xmm11;
chaining[4] = xmm12;
chaining[5] = xmm13;
chaining[6] = xmm14;
chaining[7] = xmm15;
/* load message M (Q input) into xmm8-15 */
xmm8 = QTEMP[0];
xmm9 = QTEMP[1];
xmm10 = QTEMP[2];
xmm11 = QTEMP[3];
xmm12 = QTEMP[4];
xmm13 = QTEMP[5];
xmm14 = QTEMP[6];
xmm15 = QTEMP[7];
/* compute permutation Q */
/* result: Q(M) in xmm8...xmm15 */
ROUNDS_Q_2WAY();
/* xor Q output */
/* result: P(CV+M)+CV+Q(M) in xmm8...xmm15 */
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
/* store CV */
chaining[0] = xmm8;
chaining[1] = xmm9;
chaining[2] = xmm10;
chaining[3] = xmm11;
chaining[4] = xmm12;
chaining[5] = xmm13;
chaining[6] = xmm14;
chaining[7] = xmm15;
return;
}
void OF1024_2way( __m256i* chaining )
{
static __m256i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
static __m256i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15;
static __m256i TEMP0;
static __m256i TEMP1;
static __m256i TEMP2;
/* load CV into registers xmm8 - xmm15 */
xmm8 = chaining[0];
xmm9 = chaining[1];
xmm10 = chaining[2];
xmm11 = chaining[3];
xmm12 = chaining[4];
xmm13 = chaining[5];
xmm14 = chaining[6];
xmm15 = chaining[7];
/* compute permutation P */
/* result: P(CV) in xmm8...xmm15 */
ROUNDS_P_2WAY();
/* xor CV to P output (feed-forward) */
/* result: P(CV)+CV in xmm8...xmm15 */
xmm8 = _mm256_xor_si256( xmm8, (chaining[0]) );
xmm9 = _mm256_xor_si256( xmm9, (chaining[1]) );
xmm10 = _mm256_xor_si256( xmm10, (chaining[2]) );
xmm11 = _mm256_xor_si256( xmm11, (chaining[3]) );
xmm12 = _mm256_xor_si256( xmm12, (chaining[4]) );
xmm13 = _mm256_xor_si256( xmm13, (chaining[5]) );
xmm14 = _mm256_xor_si256( xmm14, (chaining[6]) );
xmm15 = _mm256_xor_si256( xmm15, (chaining[7]) );
/* transpose CV back from row ordering to column ordering */
/* result: final hash value in xmm0, xmm6, xmm13, xmm15 */
Matrix_Transpose_INV_2way(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm4, xmm0, xmm6, xmm1, xmm2, xmm3, xmm5, xmm7);
/* we only need to return the truncated half of the state */
chaining[4] = xmm0;
chaining[5] = xmm6;
chaining[6] = xmm13;
chaining[7] = xmm15;
return;
}
#endif // VAES
#endif // GROESTL512_INTR_4WAY_H__

View File

@@ -11,7 +11,7 @@
#else
#include "sph_groestl.h"
#endif
#include <openssl/sha.h>
#include "algo/sha/sph_sha2.h"
typedef struct {
#ifdef __AES__
@@ -19,7 +19,7 @@ typedef struct {
#else
sph_groestl512_context groestl;
#endif
SHA256_CTX sha;
sph_sha256_context sha;
} myrgr_ctx_holder;
myrgr_ctx_holder myrgr_ctx;
@@ -31,7 +31,7 @@ void init_myrgr_ctx()
#else
sph_groestl512_init( &myrgr_ctx.groestl );
#endif
SHA256_Init( &myrgr_ctx.sha );
sph_sha256_init( &myrgr_ctx.sha );
}
void myriad_hash(void *output, const void *input)
@@ -39,54 +39,55 @@ void myriad_hash(void *output, const void *input)
myrgr_ctx_holder ctx;
memcpy( &ctx, &myrgr_ctx, sizeof(myrgr_ctx) );
uint32_t _ALIGN(32) hash[16];
uint32_t _ALIGN(32) hash[16];
#ifdef __AES__
update_groestl( &ctx.groestl, (char*)input, 640 );
final_groestl( &ctx.groestl, (char*)hash);
#else
sph_groestl512(&ctx.groestl, input, 80);
sph_groestl512_close(&ctx.groestl, hash);
sph_groestl512(&ctx.groestl, input, 80);
sph_groestl512_close(&ctx.groestl, hash);
#endif
SHA256_Update( &ctx.sha, (unsigned char*)hash, 64 );
SHA256_Final( (unsigned char*)hash, &ctx.sha );
sph_sha256( &ctx.sha, hash, 64 );
sph_sha256_close( &ctx.sha, hash );
memcpy(output, hash, 32);
memcpy(output, hash, 32);
}
int scanhash_myriad( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t first_nonce = pdata[19];
uint32_t nonce = first_nonce;
int thr_id = mythr->id;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x0000ff;
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x0000ff;
swab32_array( endiandata, pdata, 20 );
swab32_array( endiandata, pdata, 20 );
do {
const uint32_t Htarg = ptarget[7];
uint32_t hash[8];
be32enc(&endiandata[19], nonce);
myriad_hash(hash, endiandata);
do {
const uint32_t Htarg = ptarget[7];
uint32_t hash[8];
be32enc(&endiandata[19], nonce);
myriad_hash(hash, endiandata);
if (hash[7] <= Htarg && fulltest(hash, ptarget)) {
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
return 1;
}
nonce++;
if (hash[7] <= Htarg && fulltest(hash, ptarget))
{
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
return 1;
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
} while (nonce < max_nonce && !work_restart[thr_id].restart);
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}
#endif

View File

@@ -548,7 +548,7 @@ static const sph_u32 T512[64][16] = {
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Hamsi 8 way
// Hamsi 8 way AVX512
#define INPUT_BIG8 \
do { \
@@ -560,22 +560,14 @@ do { \
__m512i dm = _mm512_and_si512( db, m512_one_64 ) ; \
dm = mm512_negate_32( _mm512_or_si512( dm, \
_mm512_slli_epi64( dm, 32 ) ) ); \
m0 = _mm512_xor_si512( m0, _mm512_and_si512( dm, \
m512_const1_64( tp[0] ) ) ); \
m1 = _mm512_xor_si512( m1, _mm512_and_si512( dm, \
m512_const1_64( tp[1] ) ) ); \
m2 = _mm512_xor_si512( m2, _mm512_and_si512( dm, \
m512_const1_64( tp[2] ) ) ); \
m3 = _mm512_xor_si512( m3, _mm512_and_si512( dm, \
m512_const1_64( tp[3] ) ) ); \
m4 = _mm512_xor_si512( m4, _mm512_and_si512( dm, \
m512_const1_64( tp[4] ) ) ); \
m5 = _mm512_xor_si512( m5, _mm512_and_si512( dm, \
m512_const1_64( tp[5] ) ) ); \
m6 = _mm512_xor_si512( m6, _mm512_and_si512( dm, \
m512_const1_64( tp[6] ) ) ); \
m7 = _mm512_xor_si512( m7, _mm512_and_si512( dm, \
m512_const1_64( tp[7] ) ) ); \
m0 = mm512_xorand( m0, dm, m512_const1_64( tp[0] ) ); \
m1 = mm512_xorand( m1, dm, m512_const1_64( tp[1] ) ); \
m2 = mm512_xorand( m2, dm, m512_const1_64( tp[2] ) ); \
m3 = mm512_xorand( m3, dm, m512_const1_64( tp[3] ) ); \
m4 = mm512_xorand( m4, dm, m512_const1_64( tp[4] ) ); \
m5 = mm512_xorand( m5, dm, m512_const1_64( tp[5] ) ); \
m6 = mm512_xorand( m6, dm, m512_const1_64( tp[6] ) ); \
m7 = mm512_xorand( m7, dm, m512_const1_64( tp[7] ) ); \
tp += 8; \
db = _mm512_srli_epi64( db, 1 ); \
} \
@@ -585,20 +577,13 @@ do { \
do { \
__m512i t; \
t = a; \
a = _mm512_and_si512( a, c ); \
a = _mm512_xor_si512( a, d ); \
c = _mm512_xor_si512( c, b ); \
c = _mm512_xor_si512( c, a ); \
d = _mm512_or_si512( d, t ); \
d = _mm512_xor_si512( d, b ); \
a = mm512_xorand( d, a, c ); \
c = mm512_xor3( a, b, c ); \
b = mm512_xoror( b, d, t ); \
t = _mm512_xor_si512( t, c ); \
b = d; \
d = _mm512_or_si512( d, t ); \
d = _mm512_xor_si512( d, a ); \
a = _mm512_and_si512( a, b ); \
t = _mm512_xor_si512( t, a ); \
b = _mm512_xor_si512( b, d ); \
b = _mm512_xor_si512( b, t ); \
d = mm512_xoror( a, b, t ); \
t = mm512_xorand( t, a, b ); \
b = mm512_xor3( b, d, t ); \
a = c; \
c = b; \
b = d; \
@@ -609,14 +594,12 @@ do { \
do { \
a = mm512_rol_32( a, 13 ); \
c = mm512_rol_32( c, 3 ); \
b = _mm512_xor_si512( b, _mm512_xor_si512( a, c ) ); \
d = _mm512_xor_si512( d, _mm512_xor_si512( c, \
_mm512_slli_epi32( a, 3 ) ) ); \
b = mm512_xor3( a, b, c ); \
d = mm512_xor3( d, c, _mm512_slli_epi32( a, 3 ) ); \
b = mm512_rol_32( b, 1 ); \
d = mm512_rol_32( d, 7 ); \
a = _mm512_xor_si512( a, _mm512_xor_si512( b, d ) ); \
c = _mm512_xor_si512( c, _mm512_xor_si512( d, \
_mm512_slli_epi32( b, 7 ) ) ); \
a = mm512_xor3( a, b, d ); \
c = mm512_xor3( c, d, _mm512_slli_epi32( b, 7 ) ); \
a = mm512_rol_32( a, 5 ); \
c = mm512_rol_32( c, 22 ); \
} while (0)
@@ -849,13 +832,11 @@ void hamsi512_8way_update( hamsi_8way_big_context *sc, const void *data,
void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst )
{
__m512i pad[1];
int ch, cl;
uint32_t ch, cl;
sph_enc32be( &ch, sc->count_high );
sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) );
pad[0] = _mm512_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch,
cl, ch, cl, ch, cl, ch, cl, ch );
// pad[0] = m512_const2_32( cl, ch );
pad[0] = _mm512_set1_epi64( ((uint64_t)cl << 32 ) | (uint64_t)ch );
sc->buf[0] = m512_const1_64( 0x80 );
hamsi_8way_big( sc, sc->buf, 1 );
hamsi_8way_big_final( sc, pad );
@@ -863,11 +844,9 @@ void hamsi512_8way_close( hamsi_8way_big_context *sc, void *dst )
mm512_block_bswap_32( (__m512i*)dst, sc->h );
}
#endif // AVX512
// Hamsi 4 way
// Hamsi 4 way AVX2
#define INPUT_BIG \
do { \
@@ -1186,14 +1165,12 @@ void hamsi512_4way_update( hamsi_4way_big_context *sc, const void *data,
void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst )
{
__m256i pad[1];
int ch, cl;
uint32_t ch, cl;
sph_enc32be( &ch, sc->count_high );
sph_enc32be( &cl, sc->count_low + ( sc->partial_len << 3 ) );
pad[0] = _mm256_set_epi32( cl, ch, cl, ch, cl, ch, cl, ch );
pad[0] = _mm256_set1_epi64x( ((uint64_t)cl << 32 ) | (uint64_t)ch );
sc->buf[0] = m256_const1_64( 0x80 );
// sc->buf[0] = _mm256_set_epi32( 0UL, 0x80UL, 0UL, 0x80UL,
// 0UL, 0x80UL, 0UL, 0x80UL );
hamsi_big( sc, sc->buf, 1 );
hamsi_big_final( sc, pad );

View File

@@ -522,50 +522,53 @@ do { \
// Haval-256 8 way 32 bit avx2
#if defined (__AVX512VL__)
// ( ~( a ^ b ) ) & c
#define mm256_andnotxor( a, b, c ) \
_mm256_ternarylogic_epi32( a, b, c, 0x82 )
#else
#define mm256_andnotxor( a, b, c ) \
_mm256_andnot_si256( _mm256_xor_si256( a, b ), c )
#endif
#define F1_8W(x6, x5, x4, x3, x2, x1, x0) \
_mm256_xor_si256( x0, \
_mm256_xor_si256( _mm256_and_si256(_mm256_xor_si256( x0, x4 ), x1 ), \
_mm256_xor_si256( _mm256_and_si256( x2, x5 ), \
_mm256_and_si256( x3, x6 ) ) ) ) \
mm256_xor3( x0, mm256_andxor( x1, x0, x4 ), \
_mm256_xor_si256( _mm256_and_si256( x2, x5 ), \
_mm256_and_si256( x3, x6 ) ) ) \
#define F2_8W(x6, x5, x4, x3, x2, x1, x0) \
_mm256_xor_si256( \
_mm256_and_si256( x2, \
_mm256_xor_si256( _mm256_andnot_si256( x3, x1 ), \
_mm256_xor_si256( _mm256_and_si256( x4, x5 ), \
_mm256_xor_si256( x6, x0 ) ) ) ), \
_mm256_xor_si256( \
_mm256_and_si256( x4, _mm256_xor_si256( x1, x5 ) ), \
_mm256_xor_si256( _mm256_and_si256( x3, x5 ), x0 ) ) ) \
mm256_xor3( mm256_andxor( x2, _mm256_andnot_si256( x3, x1 ), \
mm256_xor3( _mm256_and_si256( x4, x5 ), x6, x0 ) ), \
mm256_andxor( x4, x1, x5 ), \
mm256_xorand( x0, x3, x5 ) ) \
#define F3_8W(x6, x5, x4, x3, x2, x1, x0) \
_mm256_xor_si256( \
_mm256_and_si256( x3, \
_mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
_mm256_xor_si256( x6, x0 ) ) ), \
_mm256_xor_si256( _mm256_xor_si256(_mm256_and_si256( x1, x4 ), \
_mm256_and_si256( x2, x5 ) ), x0 ) )
mm256_xor3( x0, \
_mm256_and_si256( x3, \
mm256_xor3( _mm256_and_si256( x1, x2 ), x6, x0 ) ), \
_mm256_xor_si256( _mm256_and_si256( x1, x4 ), \
_mm256_and_si256( x2, x5 ) ) )
#define F4_8W(x6, x5, x4, x3, x2, x1, x0) \
_mm256_xor_si256( \
_mm256_xor_si256( \
_mm256_and_si256( x3, \
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
_mm256_or_si256( x4, x6 ) ), x5 ) ), \
_mm256_and_si256( x4, \
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( mm256_not(x2), x5 ), \
_mm256_xor_si256( x1, x6 ) ), x0 ) ) ), \
_mm256_xor_si256( _mm256_and_si256( x2, x6 ), x0 ) )
mm256_xor3( \
mm256_andxor( x3, x5, \
_mm256_xor_si256( _mm256_and_si256( x1, x2 ), \
_mm256_or_si256( x4, x6 ) ) ), \
_mm256_and_si256( x4, \
mm256_xor3( x0, _mm256_andnot_si256( x2, x5 ), \
_mm256_xor_si256( x1, x6 ) ) ), \
mm256_xorand( x0, x2, x6 ) )
#define F5_8W(x6, x5, x4, x3, x2, x1, x0) \
_mm256_xor_si256( \
_mm256_and_si256( x0, \
mm256_not( _mm256_xor_si256( \
_mm256_and_si256( _mm256_and_si256( x1, x2 ), x3 ), x5 ) ) ), \
_mm256_xor_si256( _mm256_xor_si256( _mm256_and_si256( x1, x4 ), \
_mm256_and_si256( x2, x5 ) ), \
_mm256_and_si256( x3, x6 ) ) )
mm256_andnotxor( mm256_and3( x1, x2, x3 ), x5, x0 ), \
mm256_xor3( _mm256_and_si256( x1, x4 ), \
_mm256_and_si256( x2, x5 ), \
_mm256_and_si256( x3, x6 ) ) )
#define FP3_1_8W(x6, x5, x4, x3, x2, x1, x0) \
F1_8W(x1, x0, x3, x5, x6, x2, x4)

View File

@@ -99,9 +99,13 @@ void hodl_build_block_header( struct work* g_work, uint32_t version,
// called only by thread 0, saves a backup of g_work
void hodl_get_new_work( struct work* work, struct work* g_work)
{
work_free( &hodl_work );
work_copy( &hodl_work, g_work );
hodl_work.data[ algo_gate.nonce_index ] = ( clock() + rand() ) % 9999;
// pthread_rwlock_rdlock( &g_work_lock );
work_free( &hodl_work );
work_copy( &hodl_work, g_work );
hodl_work.data[ algo_gate.nonce_index ] = ( clock() + rand() ) % 9999;
// pthread_rwlock_unlock( &g_work_lock );
}
json_t *hodl_longpoll_rpc_call( CURL *curl, int *err, char* lp_url )
@@ -121,7 +125,7 @@ json_t *hodl_longpoll_rpc_call( CURL *curl, int *err, char* lp_url )
}
// called by every thread, copies the backup to each thread's work.
void hodl_resync_threads( struct work* work )
void hodl_resync_threads( int thr_id, struct work* work )
{
int nonce_index = algo_gate.nonce_index;
pthread_barrier_wait( &hodl_barrier );
@@ -131,6 +135,7 @@ void hodl_resync_threads( struct work* work )
work_copy( work, &hodl_work );
}
work->data[ nonce_index ] = swab32( hodl_work.data[ nonce_index ] );
work_restart[thr_id].restart = 0;
}
bool hodl_do_this_thread( int thr_id )
@@ -155,11 +160,10 @@ bool register_hodl_algo( algo_gate_t* gate )
applog( LOG_ERR, "Only CPUs with AES are supported, use legacy version.");
return false;
#endif
// if ( TOTAL_CHUNKS % opt_n_threads )
// {
// applog(LOG_ERR,"Thread count must be power of 2.");
// return false;
// }
if ( GARBAGE_SIZE % opt_n_threads )
applog( LOG_WARNING,"WARNING: Thread count must be power of 2. Miner may crash or produce invalid hash!" );
pthread_barrier_init( &hodl_barrier, NULL, opt_n_threads );
gate->optimizations = SSE42_OPT | AES_OPT | AVX2_OPT;
gate->scanhash = (void*)&hodl_scanhash;
@@ -171,7 +175,7 @@ bool register_hodl_algo( algo_gate_t* gate )
gate->resync_threads = (void*)&hodl_resync_threads;
gate->do_this_thread = (void*)&hodl_do_this_thread;
gate->work_cmp_size = 76;
hodl_scratchbuf = (unsigned char*)malloc( 1 << 30 );
hodl_scratchbuf = (unsigned char*)_mm_malloc( 1 << 30, 64 );
allow_getwork = false;
opt_target_factor = 8388608.0;
return ( hodl_scratchbuf != NULL );

View File

@@ -70,7 +70,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce,
uint32_t *ptarget = work->target;
int threadNumber = mythr->id;
CacheEntry *Garbage = (CacheEntry*)hodl_scratchbuf;
CacheEntry Cache[AES_PARALLEL_N];
CacheEntry Cache[AES_PARALLEL_N] __attribute__ ((aligned (64)));
__m128i* data[AES_PARALLEL_N];
const __m128i* next[AES_PARALLEL_N];
uint32_t CollisionCount = 0;

View File

@@ -51,15 +51,15 @@ extern "C"{
do { \
__m512i cc = _mm512_set1_epi64( c ); \
x3 = mm512_not( x3 ); \
x0 = _mm512_xor_si512( x0, _mm512_andnot_si512( x2, cc ) ); \
tmp = _mm512_xor_si512( cc, _mm512_and_si512( x0, x1 ) ); \
x0 = _mm512_xor_si512( x0, _mm512_and_si512( x2, x3 ) ); \
x3 = _mm512_xor_si512( x3, _mm512_andnot_si512( x1, x2 ) ); \
x1 = _mm512_xor_si512( x1, _mm512_and_si512( x0, x2 ) ); \
x2 = _mm512_xor_si512( x2, _mm512_andnot_si512( x3, x0 ) ); \
x0 = _mm512_xor_si512( x0, _mm512_or_si512( x1, x3 ) ); \
x3 = _mm512_xor_si512( x3, _mm512_and_si512( x1, x2 ) ); \
x1 = _mm512_xor_si512( x1, _mm512_and_si512( tmp, x0 ) ); \
x0 = mm512_xorandnot( x0, x2, cc ); \
tmp = mm512_xorand( cc, x0, x1 ); \
x0 = mm512_xorand( x0, x2, x3 ); \
x3 = mm512_xorandnot( x3, x1, x2 ); \
x1 = mm512_xorand( x1, x0, x2 ); \
x2 = mm512_xorandnot( x2, x3, x0 ); \
x0 = mm512_xoror( x0, x1, x3 ); \
x3 = mm512_xorand( x3, x1, x2 ); \
x1 = mm512_xorand( x1, tmp, x0 ); \
x2 = _mm512_xor_si512( x2, tmp ); \
} while (0)
@@ -67,11 +67,11 @@ do { \
do { \
x4 = _mm512_xor_si512( x4, x1 ); \
x5 = _mm512_xor_si512( x5, x2 ); \
x6 = _mm512_xor_si512( x6, _mm512_xor_si512( x3, x0 ) ); \
x6 = mm512_xor3( x6, x3, x0 ); \
x7 = _mm512_xor_si512( x7, x0 ); \
x0 = _mm512_xor_si512( x0, x5 ); \
x1 = _mm512_xor_si512( x1, x6 ); \
x2 = _mm512_xor_si512( x2, _mm512_xor_si512( x7, x4 ) ); \
x2 = mm512_xor3( x2, x7, x4 ); \
x3 = _mm512_xor_si512( x3, x4 ); \
} while (0)
@@ -318,12 +318,12 @@ static const sph_u64 C[] = {
#define Wz_8W(x, c, n) \
do { \
__m512i t = _mm512_slli_epi64( _mm512_and_si512(x ## h, (c)), (n) ); \
x ## h = _mm512_or_si512( _mm512_and_si512( \
_mm512_srli_epi64(x ## h, (n)), (c)), t ); \
x ## h = mm512_orand( t, _mm512_srli_epi64( x ## h, (n) ), (c) ); \
t = _mm512_slli_epi64( _mm512_and_si512(x ## l, (c)), (n) ); \
x ## l = _mm512_or_si512( _mm512_and_si512((x ## l >> (n)), (c)), t ); \
x ## l = mm512_orand( t, (x ## l >> (n)), (c) ); \
} while (0)
#define W80(x) Wz_8W(x, m512_const1_64( 0x5555555555555555 ), 1 )
#define W81(x) Wz_8W(x, m512_const1_64( 0x3333333333333333 ), 2 )
#define W82(x) Wz_8W(x, m512_const1_64( 0x0F0F0F0F0F0F0F0F ), 4 )

View File

@@ -76,6 +76,9 @@ static const uint64_t RC[] = {
#define OR64(d, a, b) (d = _mm512_or_si512(a,b))
#define NOT64(d, s) (d = _mm512_xor_si512(s,m512_neg1))
#define ROL64(d, v, n) (d = mm512_rol_64(v, n))
#define XOROR(d, a, b, c) (d = mm512_xoror(a, b, c))
#define XORAND(d, a, b, c) (d = mm512_xorand(a, b, c))
#include "keccak-macros.c"
@@ -238,6 +241,8 @@ keccak512_8way_close(void *cc, void *dst)
#undef NOT64
#undef ROL64
#undef KECCAK_F_1600
#undef XOROR
#undef XORAND
#endif // AVX512
@@ -255,6 +260,8 @@ keccak512_8way_close(void *cc, void *dst)
#define OR64(d, a, b) (d = _mm256_or_si256(a,b))
#define NOT64(d, s) (d = _mm256_xor_si256(s,m256_neg1))
#define ROL64(d, v, n) (d = mm256_rol_64(v, n))
#define XOROR(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_or_si256(b, c)))
#define XORAND(d, a, b, c) (d = _mm256_xor_si256(a, _mm256_and_si256(b, c)))
#include "keccak-macros.c"
@@ -419,5 +426,7 @@ keccak512_4way_close(void *cc, void *dst)
#undef NOT64
#undef ROL64
#undef KECCAK_F_1600
#undef XOROR
#undef XORAND
#endif // AVX2

View File

@@ -110,20 +110,34 @@
#ifdef KHI_XO
#undef KHI_XO
#endif
#define KHI_XO(d, a, b, c) do { \
XOROR(d, a, b, c); \
} while (0)
/*
#define KHI_XO(d, a, b, c) do { \
DECL64(kt); \
OR64(kt, b, c); \
XOR64(d, a, kt); \
} while (0)
*/
#ifdef KHI_XA
#undef KHI_XA
#endif
#define KHI_XA(d, a, b, c) do { \
XORAND(d, a, b, c); \
} while (0)
/*
#define KHI_XA(d, a, b, c) do { \
DECL64(kt); \
AND64(kt, b, c); \
XOR64(d, a, kt); \
} while (0)
*/
#ifdef KHI
#undef KHI
@@ -134,65 +148,47 @@
do { \
DECL64(c0); \
DECL64(c1); \
DECL64(c2); \
DECL64(c3); \
DECL64(c4); \
DECL64(bnn); \
NOT64(bnn, b20); \
KHI_XO(c0, b00, b10, b20); \
KHI_XO(c1, b10, bnn, b30); \
KHI_XA(c2, b20, b30, b40); \
KHI_XO(c3, b30, b40, b00); \
KHI_XA(c4, b40, b00, b10); \
KHI_XA(b20, b20, b30, b40); \
KHI_XO(b30, b30, b40, b00); \
KHI_XA(b40, b40, b00, b10); \
MOV64(b00, c0); \
MOV64(b10, c1); \
MOV64(b20, c2); \
MOV64(b30, c3); \
MOV64(b40, c4); \
NOT64(bnn, b41); \
KHI_XO(c0, b01, b11, b21); \
KHI_XA(c1, b11, b21, b31); \
KHI_XO(c2, b21, b31, bnn); \
KHI_XO(c3, b31, b41, b01); \
KHI_XA(c4, b41, b01, b11); \
KHI_XO(b21, b21, b31, bnn); \
KHI_XO(b31, b31, b41, b01); \
KHI_XA(b41, b41, b01, b11); \
MOV64(b01, c0); \
MOV64(b11, c1); \
MOV64(b21, c2); \
MOV64(b31, c3); \
MOV64(b41, c4); \
NOT64(bnn, b32); \
KHI_XO(c0, b02, b12, b22); \
KHI_XA(c1, b12, b22, b32); \
KHI_XA(c2, b22, bnn, b42); \
KHI_XO(c3, bnn, b42, b02); \
KHI_XA(c4, b42, b02, b12); \
KHI_XA(b22, b22, bnn, b42); \
KHI_XO(b32, bnn, b42, b02); \
KHI_XA(b42, b42, b02, b12); \
MOV64(b02, c0); \
MOV64(b12, c1); \
MOV64(b22, c2); \
MOV64(b32, c3); \
MOV64(b42, c4); \
NOT64(bnn, b33); \
KHI_XA(c0, b03, b13, b23); \
KHI_XO(c1, b13, b23, b33); \
KHI_XO(c2, b23, bnn, b43); \
KHI_XA(c3, bnn, b43, b03); \
KHI_XO(c4, b43, b03, b13); \
KHI_XO(b23, b23, bnn, b43); \
KHI_XA(b33, bnn, b43, b03); \
KHI_XO(b43, b43, b03, b13); \
MOV64(b03, c0); \
MOV64(b13, c1); \
MOV64(b23, c2); \
MOV64(b33, c3); \
MOV64(b43, c4); \
NOT64(bnn, b14); \
KHI_XA(c0, b04, bnn, b24); \
KHI_XO(c1, bnn, b24, b34); \
KHI_XA(c2, b24, b34, b44); \
KHI_XO(c3, b34, b44, b04); \
KHI_XA(c4, b44, b04, b14); \
KHI_XA(b24, b24, b34, b44); \
KHI_XO(b34, b34, b44, b04); \
KHI_XA(b44, b44, b04, b14); \
MOV64(b04, c0); \
MOV64(b14, c1); \
MOV64(b24, c2); \
MOV64(b34, c3); \
MOV64(b44, c4); \
} while (0)
#ifdef IOTA
@@ -201,6 +197,7 @@
#define IOTA(r) XOR64_IOTA(a00, a00, r)
#ifdef P0
#undef P0
#undef P1
#undef P2
#undef P3

View File

@@ -66,6 +66,17 @@ static const uint32 CNS_INIT[128] __attribute((aligned(64))) = {
a = _mm512_xor_si512(a,c0);\
b = _mm512_xor_si512(b,c1);
#define MULT24W( a0, a1 ) \
do { \
__m512i b = _mm512_xor_si512( a0, \
_mm512_maskz_shuffle_epi32( 0xbbbb, a1, 16 ) ); \
a0 = _mm512_or_si512( _mm512_bsrli_epi128( b, 4 ), \
_mm512_bslli_epi128( a1,12 ) ); \
a1 = _mm512_or_si512( _mm512_bsrli_epi128( a1, 4 ), \
_mm512_bslli_epi128( b,12 ) ); \
} while(0)
/*
#define MULT24W( a0, a1, mask ) \
do { \
__m512i b = _mm512_xor_si512( a0, \
@@ -73,6 +84,7 @@ do { \
a0 = _mm512_or_si512( _mm512_bsrli_epi128(b,4), _mm512_bslli_epi128(a1,12) );\
a1 = _mm512_or_si512( _mm512_bsrli_epi128(a1,4), _mm512_bslli_epi128(b,12) );\
} while(0)
*/
// confirm pointer arithmetic
// ok but use array indexes
@@ -85,6 +97,21 @@ do { \
MIXWORD4W(*(x+3),*(x+7),*t,*(t+1));\
ADD_CONSTANT4W(*x, *(x+4), c0, c1);
#define SUBCRUMB4W(a0,a1,a2,a3,t)\
t = a0;\
a0 = mm512_xoror( a3, a0, a1 ); \
a2 = _mm512_xor_si512(a2,a3);\
a1 = _mm512_ternarylogic_epi64( a1, a3, t, 0x87 ); /* a1 xnor (a3 & t) */ \
a3 = mm512_xorand( a2, a3, t ); \
a2 = mm512_xorand( a1, a2, a0);\
a1 = _mm512_or_si512(a1,a3);\
a3 = _mm512_xor_si512(a3,a2);\
t = _mm512_xor_si512(t,a1);\
a2 = _mm512_and_si512(a2,a1);\
a1 = mm512_xnor(a1,a0);\
a0 = t;
/*
#define SUBCRUMB4W(a0,a1,a2,a3,t)\
t = _mm512_load_si512(&a0);\
a0 = _mm512_or_si512(a0,a1);\
@@ -103,7 +130,25 @@ do { \
a2 = _mm512_and_si512(a2,a1);\
a1 = _mm512_xor_si512(a1,a0);\
a0 = _mm512_load_si512(&t);
*/
#define MIXWORD4W(a,b,t1,t2)\
b = _mm512_xor_si512(a,b);\
t1 = _mm512_slli_epi32(a,2);\
t2 = _mm512_srli_epi32(a,30);\
a = mm512_xoror( b, t1, t2 ); \
t1 = _mm512_slli_epi32(b,14);\
t2 = _mm512_srli_epi32(b,18);\
b = _mm512_or_si512(t1,t2);\
b = mm512_xoror( a, t1, t2 ); \
t1 = _mm512_slli_epi32(a,10);\
t2 = _mm512_srli_epi32(a,22);\
a = mm512_xoror( b, t1, t2 ); \
t1 = _mm512_slli_epi32(b,1);\
t2 = _mm512_srli_epi32(b,31);\
b = _mm512_or_si512(t1,t2);
/*
#define MIXWORD4W(a,b,t1,t2)\
b = _mm512_xor_si512(a,b);\
t1 = _mm512_slli_epi32(a,2);\
@@ -121,6 +166,7 @@ do { \
t1 = _mm512_slli_epi32(b,1);\
t2 = _mm512_srli_epi32(b,31);\
b = _mm512_or_si512(t1,t2);
*/
#define STEP_PART24W(a0,a1,t0,t1,c0,c1,tmp0,tmp1)\
a1 = _mm512_shuffle_epi32(a1,147);\
@@ -235,21 +281,13 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
__m512i msg0, msg1;
__m512i tmp[2];
__m512i x[8];
const __m512i MASK = m512_const2_64( 0, 0x00000000ffffffff );
t0 = chainv[0];
t1 = chainv[1];
t0 = mm512_xor3( chainv[0], chainv[2], chainv[4] );
t1 = mm512_xor3( chainv[1], chainv[3], chainv[5] );
t0 = mm512_xor3( t0, chainv[6], chainv[8] );
t1 = mm512_xor3( t1, chainv[7], chainv[9] );
t0 = _mm512_xor_si512( t0, chainv[2] );
t1 = _mm512_xor_si512( t1, chainv[3] );
t0 = _mm512_xor_si512( t0, chainv[4] );
t1 = _mm512_xor_si512( t1, chainv[5] );
t0 = _mm512_xor_si512( t0, chainv[6] );
t1 = _mm512_xor_si512( t1, chainv[7] );
t0 = _mm512_xor_si512( t0, chainv[8] );
t1 = _mm512_xor_si512( t1, chainv[9] );
MULT24W( t0, t1, MASK );
MULT24W( t0, t1 );
msg0 = _mm512_shuffle_epi32( msg[0], 27 );
msg1 = _mm512_shuffle_epi32( msg[1], 27 );
@@ -268,68 +306,67 @@ void rnd512_4way( luffa_4way_context *state, __m512i *msg )
t0 = chainv[0];
t1 = chainv[1];
MULT24W( chainv[0], chainv[1], MASK );
MULT24W( chainv[0], chainv[1] );
chainv[0] = _mm512_xor_si512( chainv[0], chainv[2] );
chainv[1] = _mm512_xor_si512( chainv[1], chainv[3] );
MULT24W( chainv[2], chainv[3], MASK );
MULT24W( chainv[2], chainv[3] );
chainv[2] = _mm512_xor_si512(chainv[2], chainv[4]);
chainv[3] = _mm512_xor_si512(chainv[3], chainv[5]);
MULT24W( chainv[4], chainv[5], MASK );
MULT24W( chainv[4], chainv[5] );
chainv[4] = _mm512_xor_si512(chainv[4], chainv[6]);
chainv[5] = _mm512_xor_si512(chainv[5], chainv[7]);
MULT24W( chainv[6], chainv[7], MASK );
MULT24W( chainv[6], chainv[7] );
chainv[6] = _mm512_xor_si512(chainv[6], chainv[8]);
chainv[7] = _mm512_xor_si512(chainv[7], chainv[9]);
MULT24W( chainv[8], chainv[9], MASK );
MULT24W( chainv[8], chainv[9] );
chainv[8] = _mm512_xor_si512( chainv[8], t0 );
chainv[9] = _mm512_xor_si512( chainv[9], t1 );
t0 = chainv[8];
t1 = chainv[9];
MULT24W( chainv[8], chainv[9], MASK );
MULT24W( chainv[8], chainv[9] );
chainv[8] = _mm512_xor_si512( chainv[8], chainv[6] );
chainv[9] = _mm512_xor_si512( chainv[9], chainv[7] );
MULT24W( chainv[6], chainv[7], MASK );
MULT24W( chainv[6], chainv[7] );
chainv[6] = _mm512_xor_si512( chainv[6], chainv[4] );
chainv[7] = _mm512_xor_si512( chainv[7], chainv[5] );
MULT24W( chainv[4], chainv[5], MASK );
MULT24W( chainv[4], chainv[5] );
chainv[4] = _mm512_xor_si512( chainv[4], chainv[2] );
chainv[5] = _mm512_xor_si512( chainv[5], chainv[3] );
MULT24W( chainv[2], chainv[3], MASK );
MULT24W( chainv[2], chainv[3] );
chainv[2] = _mm512_xor_si512( chainv[2], chainv[0] );
chainv[3] = _mm512_xor_si512( chainv[3], chainv[1] );
MULT24W( chainv[0], chainv[1], MASK );
chainv[0] = _mm512_xor_si512( _mm512_xor_si512( chainv[0], t0 ), msg0 );
chainv[1] = _mm512_xor_si512( _mm512_xor_si512( chainv[1], t1 ), msg1 );
MULT24W( chainv[0], chainv[1] );
chainv[0] = mm512_xor3( chainv[0], t0, msg0 );
chainv[1] = mm512_xor3( chainv[1], t1, msg1 );
MULT24W( msg0, msg1, MASK );
MULT24W( msg0, msg1 );
chainv[2] = _mm512_xor_si512( chainv[2], msg0 );
chainv[3] = _mm512_xor_si512( chainv[3], msg1 );
MULT24W( msg0, msg1, MASK );
MULT24W( msg0, msg1 );
chainv[4] = _mm512_xor_si512( chainv[4], msg0 );
chainv[5] = _mm512_xor_si512( chainv[5], msg1 );
MULT24W( msg0, msg1, MASK );
MULT24W( msg0, msg1 );
chainv[6] = _mm512_xor_si512( chainv[6], msg0 );
chainv[7] = _mm512_xor_si512( chainv[7], msg1 );
MULT24W( msg0, msg1, MASK );
MULT24W( msg0, msg1);
chainv[8] = _mm512_xor_si512( chainv[8], msg0 );
chainv[9] = _mm512_xor_si512( chainv[9], msg1 );
MULT24W( msg0, msg1, MASK );
MULT24W( msg0, msg1 );
// replace with ror
chainv[3] = _mm512_rol_epi32( chainv[3], 1 );
chainv[5] = _mm512_rol_epi32( chainv[5], 2 );
chainv[7] = _mm512_rol_epi32( chainv[7], 3 );
@@ -388,19 +425,11 @@ void finalization512_4way( luffa_4way_context *state, uint32 *b )
/*---- blank round with m=0 ----*/
rnd512_4way( state, zero );
t[0] = chainv[0];
t[1] = chainv[1];
t[0] = _mm512_xor_si512( t[0], chainv[2] );
t[1] = _mm512_xor_si512( t[1], chainv[3] );
t[0] = _mm512_xor_si512( t[0], chainv[4] );
t[1] = _mm512_xor_si512( t[1], chainv[5] );
t[0] = _mm512_xor_si512( t[0], chainv[6] );
t[1] = _mm512_xor_si512( t[1], chainv[7] );
t[0] = _mm512_xor_si512( t[0], chainv[8] );
t[1] = _mm512_xor_si512( t[1], chainv[9] );
t[0] = mm512_xor3( chainv[0], chainv[2], chainv[4] );
t[1] = mm512_xor3( chainv[1], chainv[3], chainv[5] );
t[0] = mm512_xor3( t[0], chainv[6], chainv[8] );
t[1] = mm512_xor3( t[1], chainv[7], chainv[9] );
t[0] = _mm512_shuffle_epi32( t[0], 27 );
t[1] = _mm512_shuffle_epi32( t[1], 27 );
@@ -496,7 +525,7 @@ int luffa_4way_update( luffa_4way_context *state, const void *data,
{
// remaining data bytes
buffer[0] = _mm512_shuffle_epi8( vdata[0], shuff_bswap32 );
buffer[1] = m512_const2_64( 0, 0x0000000080000000 );
buffer[1] = m512_const1_i128( 0x0000000080000000 );
}
return 0;
}
@@ -520,7 +549,7 @@ int luffa_4way_close( luffa_4way_context *state, void *hashval )
rnd512_4way( state, buffer );
else
{ // empty pad block, constant data
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
msg[0] = m512_const1_i128( 0x0000000080000000 );
msg[1] = m512_zero;
rnd512_4way( state, msg );
}
@@ -583,13 +612,13 @@ int luffa512_4way_full( luffa_4way_context *state, void *output,
{
// padding of partial block
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m512_const2_64( 0, 0x0000000080000000 );
msg[1] = m512_const1_i128( 0x0000000080000000 );
rnd512_4way( state, msg );
}
else
{
// empty pad block
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
msg[0] = m512_const1_i128( 0x0000000080000000 );
msg[1] = m512_zero;
rnd512_4way( state, msg );
}
@@ -631,13 +660,13 @@ int luffa_4way_update_close( luffa_4way_context *state,
{
// padding of partial block
msg[0] = _mm512_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m512_const2_64( 0, 0x0000000080000000 );
msg[1] = m512_const1_i128( 0x0000000080000000 );
rnd512_4way( state, msg );
}
else
{
// empty pad block
msg[0] = m512_const2_64( 0, 0x0000000080000000 );
msg[0] = m512_const1_i128( 0x0000000080000000 );
msg[1] = m512_zero;
rnd512_4way( state, msg );
}
@@ -666,8 +695,6 @@ do { \
a1 = _mm256_or_si256( _mm256_srli_si256(a1,4), _mm256_slli_si256(b,12) ); \
} while(0)
// confirm pointer arithmetic
// ok but use array indexes
#define STEP_PART(x,c0,c1,t)\
SUBCRUMB(*x,*(x+1),*(x+2),*(x+3),*t);\
SUBCRUMB(*(x+5),*(x+6),*(x+7),*(x+4),*t);\
@@ -678,23 +705,23 @@ do { \
ADD_CONSTANT(*x, *(x+4), c0, c1);
#define SUBCRUMB(a0,a1,a2,a3,t)\
t = _mm256_load_si256(&a0);\
t = a0;\
a0 = _mm256_or_si256(a0,a1);\
a2 = _mm256_xor_si256(a2,a3);\
a1 = _mm256_andnot_si256(a1, m256_neg1 );\
a1 = mm256_not( a1 );\
a0 = _mm256_xor_si256(a0,a3);\
a3 = _mm256_and_si256(a3,t);\
a1 = _mm256_xor_si256(a1,a3);\
a3 = _mm256_xor_si256(a3,a2);\
a2 = _mm256_and_si256(a2,a0);\
a0 = _mm256_andnot_si256(a0, m256_neg1 );\
a0 = mm256_not( a0 );\
a2 = _mm256_xor_si256(a2,a1);\
a1 = _mm256_or_si256(a1,a3);\
t = _mm256_xor_si256(t,a1);\
a3 = _mm256_xor_si256(a3,a2);\
a2 = _mm256_and_si256(a2,a1);\
a1 = _mm256_xor_si256(a1,a0);\
a0 = _mm256_load_si256(&t);\
a0 = t;\
#define MIXWORD(a,b,t1,t2)\
b = _mm256_xor_si256(a,b);\
@@ -832,7 +859,7 @@ void rnd512_2way( luffa_2way_context *state, __m256i *msg )
__m256i msg0, msg1;
__m256i tmp[2];
__m256i x[8];
const __m256i MASK = m256_const2_64( 0, 0x00000000ffffffff );
const __m256i MASK = m256_const1_i128( 0x00000000ffffffff );
t0 = chainv[0];
t1 = chainv[1];
@@ -1088,7 +1115,7 @@ int luffa_2way_update( luffa_2way_context *state, const void *data,
{
// remaining data bytes
buffer[0] = _mm256_shuffle_epi8( vdata[0], shuff_bswap32 );
buffer[1] = m256_const2_64( 0, 0x0000000080000000 );
buffer[1] = m256_const1_i128( 0x0000000080000000 );
}
return 0;
}
@@ -1104,7 +1131,7 @@ int luffa_2way_close( luffa_2way_context *state, void *hashval )
rnd512_2way( state, buffer );
else
{ // empty pad block, constant data
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
msg[0] = m256_const1_i128( 0x0000000080000000 );
msg[1] = m256_zero;
rnd512_2way( state, msg );
}
@@ -1159,13 +1186,13 @@ int luffa512_2way_full( luffa_2way_context *state, void *output,
{
// padding of partial block
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m256_const2_64( 0, 0x0000000080000000 );
msg[1] = m256_const1_i128( 0x0000000080000000 );
rnd512_2way( state, msg );
}
else
{
// empty pad block
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
msg[0] = m256_const1_i128( 0x0000000080000000 );
msg[1] = m256_zero;
rnd512_2way( state, msg );
}
@@ -1206,13 +1233,13 @@ int luffa_2way_update_close( luffa_2way_context *state,
{
// padding of partial block
msg[0] = _mm256_shuffle_epi8( vdata[ 0 ], shuff_bswap32 );
msg[1] = m256_const2_64( 0, 0x0000000080000000 );
msg[1] = m256_const1_i128( 0x0000000080000000 );
rnd512_2way( state, msg );
}
else
{
// empty pad block
msg[0] = m256_const2_64( 0, 0x0000000080000000 );
msg[0] = m256_const1_i128( 0x0000000080000000 );
msg[1] = m256_zero;
rnd512_2way( state, msg );
}

View File

@@ -23,7 +23,7 @@
#include "simd-utils.h"
#include "luffa_for_sse2.h"
#define MULT2(a0,a1) do \
#define MULT2( a0, a1 ) do \
{ \
__m128i b = _mm_xor_si128( a0, _mm_shuffle_epi32( _mm_and_si128(a1,MASK), 16 ) ); \
a0 = _mm_or_si128( _mm_srli_si128(b,4), _mm_slli_si128(a1,12) ); \
@@ -345,11 +345,11 @@ HashReturn update_and_final_luffa( hashState_luffa *state, BitSequence* output,
// 16 byte partial block exists for 80 byte len
if ( state->rembytes )
// padding of partial block
rnd512( state, m128_const_64( 0, 0x80000000 ),
rnd512( state, m128_const_i128( 0x80000000 ),
mm128_bswap_32( cast_m128i( data ) ) );
else
// empty pad block
rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) );
rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) );
finalization512( state, (uint32*) output );
if ( state->hashbitlen > 512 )
@@ -394,11 +394,11 @@ int luffa_full( hashState_luffa *state, BitSequence* output, int hashbitlen,
// 16 byte partial block exists for 80 byte len
if ( state->rembytes )
// padding of partial block
rnd512( state, m128_const_64( 0, 0x80000000 ),
rnd512( state, m128_const_i128( 0x80000000 ),
mm128_bswap_32( cast_m128i( data ) ) );
else
// empty pad block
rnd512( state, m128_zero, m128_const_64( 0, 0x80000000 ) );
rnd512( state, m128_zero, m128_const_i128( 0x80000000 ) );
finalization512( state, (uint32*) output );
if ( state->hashbitlen > 512 )
@@ -606,7 +606,6 @@ static void finalization512( hashState_luffa *state, uint32 *b )
casti_m256i( b, 0 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 0 ), shuff_bswap32 );
// casti_m256i( b, 0 ) = mm256_bswap_32( casti_m256i( hash, 0 ) );
rnd512( state, zero, zero );
@@ -621,7 +620,6 @@ static void finalization512( hashState_luffa *state, uint32 *b )
casti_m256i( b, 1 ) = _mm256_shuffle_epi8(
casti_m256i( hash, 0 ), shuff_bswap32 );
// casti_m256i( b, 1 ) = mm256_bswap_32( casti_m256i( hash, 0 ) );
}
#else

View File

@@ -174,24 +174,19 @@ void allium_16way_hash( void *state, const void *input )
#if defined(__VAES__)
intrlv_4x128( vhash, hash0, hash1, hash2, hash3, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
dintrlv_4x128( state, state+32, state+64, state+96, vhash, 256 );
intrlv_4x128( vhash, hash4, hash5, hash6, hash7, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
dintrlv_4x128( state+128, state+160, state+192, state+224, vhash, 256 );
intrlv_4x128( vhash, hash8, hash9, hash10, hash11, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
dintrlv_4x128( state+256, state+288, state+320, state+352, vhash, 256 );
intrlv_4x128( vhash, hash12, hash13, hash14, hash15, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 256 );
intrlv_4x128( vhash, hash12, hash13, hash14, hash15, 256 );
groestl256_4way_full( &ctx.groestl, vhash, vhash, 32 );
dintrlv_4x128( state+384, state+416, state+448, state+480, vhash, 256 );
#else
@@ -262,8 +257,11 @@ typedef struct {
keccak256_4way_context keccak;
cubehashParam cube;
skein256_4way_context skein;
#if defined(__VAES__)
groestl256_2way_context groestl;
#else
hashState_groestl256 groestl;
#endif
} allium_8way_ctx_holder;
static __thread allium_8way_ctx_holder allium_8way_ctx;
@@ -273,7 +271,11 @@ bool init_allium_8way_ctx()
keccak256_4way_init( &allium_8way_ctx.keccak );
cubehashInit( &allium_8way_ctx.cube, 256, 16, 32 );
skein256_4way_init( &allium_8way_ctx.skein );
#if defined(__VAES__)
groestl256_2way_init( &allium_8way_ctx.groestl, 32 );
#else
init_groestl256( &allium_8way_ctx.groestl, 32 );
#endif
return true;
}
@@ -352,9 +354,28 @@ void allium_8way_hash( void *hash, const void *input )
skein256_4way_update( &ctx.skein, vhashB, 32 );
skein256_4way_close( &ctx.skein, vhashB );
#if defined(__VAES__)
uint64_t vhashC[4*2] __attribute__ ((aligned (64)));
uint64_t vhashD[4*2] __attribute__ ((aligned (64)));
rintrlv_4x64_2x128( vhashC, vhashD, vhashA, 256 );
groestl256_2way_full( &ctx.groestl, vhashC, vhashC, 32 );
groestl256_2way_full( &ctx.groestl, vhashD, vhashD, 32 );
dintrlv_2x128( hash0, hash1, vhashC, 256 );
dintrlv_2x128( hash2, hash3, vhashD, 256 );
rintrlv_4x64_2x128( vhashC, vhashD, vhashB, 256 );
groestl256_2way_full( &ctx.groestl, vhashC, vhashC, 32 );
groestl256_2way_full( &ctx.groestl, vhashD, vhashD, 32 );
dintrlv_2x128( hash4, hash5, vhashC, 256 );
dintrlv_2x128( hash6, hash7, vhashD, 256 );
#else
dintrlv_4x64( hash0, hash1, hash2, hash3, vhashA, 256 );
dintrlv_4x64( hash4, hash5, hash6, hash7, vhashB, 256 );
groestl256_full( &ctx.groestl, hash0, hash0, 256 );
groestl256_full( &ctx.groestl, hash1, hash1, 256 );
groestl256_full( &ctx.groestl, hash2, hash2, 256 );
@@ -363,6 +384,8 @@ void allium_8way_hash( void *hash, const void *input )
groestl256_full( &ctx.groestl, hash5, hash5, 256 );
groestl256_full( &ctx.groestl, hash6, hash6, 256 );
groestl256_full( &ctx.groestl, hash7, hash7, 256 );
#endif
}
int scanhash_allium_8way( struct work *work, uint32_t max_nonce,

View File

@@ -187,7 +187,8 @@ bool register_allium_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_allium;
gate->hash = (void*)&allium_hash;
#endif
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT
| VAES_OPT | VAES256_OPT;
opt_target_factor = 256.0;
return true;
};
@@ -215,9 +216,6 @@ void phi2_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
size_t t;
algo_gate.gen_merkle_root( merkle_tree, sctx );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
algo_gate.build_block_header( g_work, le32dec( sctx->job.version ),
(uint32_t*) sctx->job.prevhash, (uint32_t*) merkle_tree,
le32dec( sctx->job.ntime ), le32dec(sctx->job.nbits), NULL );
@@ -225,7 +223,6 @@ void phi2_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
g_work->data[ 20+t ] = ((uint32_t*)sctx->job.extra)[t];
}
bool register_phi2_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT | VAES_OPT;

View File

@@ -4,7 +4,7 @@
#include "algo/gost/sph_gost.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "lyra2.h"
#if defined(__VAES__)
#if defined(__VAES__) && defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#include "algo/echo/echo-hash-4way.h"
#elif defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"

View File

@@ -12,8 +12,7 @@
#include "algo/tiger/sph_tiger.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/ripemd/sph_ripemd.h"
#include <openssl/sha.h>
#include "algo/sha/sph_sha2.h"
#define EPSa DBL_EPSILON
#define EPS1 DBL_EPSILON
@@ -105,8 +104,8 @@ uint32_t sw2_( int nnounce )
}
typedef struct {
SHA256_CTX sha256;
SHA512_CTX sha512;
sph_sha256_context sha256;
sph_sha512_context sha512;
sph_keccak512_context keccak;
sph_whirlpool_context whirlpool;
sph_haval256_5_context haval;
@@ -118,8 +117,8 @@ m7m_ctx_holder m7m_ctx;
void init_m7m_ctx()
{
SHA256_Init( &m7m_ctx.sha256 );
SHA512_Init( &m7m_ctx.sha512 );
sph_sha256_init( &m7m_ctx );
sph_sha512_init( &m7m_ctx.sha512 );
sph_keccak512_init( &m7m_ctx.keccak );
sph_whirlpool_init( &m7m_ctx.whirlpool );
sph_haval256_5_init( &m7m_ctx.haval );
@@ -143,11 +142,10 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
uint32_t hash[8] __attribute__((aligned(64)));
uint8_t bhash[7][64] __attribute__((aligned(64)));
uint32_t n = pdata[19] - 1;
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
uint32_t usw_, mpzscale;
const uint32_t first_nonce = pdata[19];
char data_str[161], hash_str[65], target_str[65];
//uint8_t *bdata = 0;
uint8_t bdata[8192] __attribute__ ((aligned (64)));
int i, digits;
int bytes;
@@ -155,12 +153,12 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
m7m_ctx_holder ctx1, ctx2 __attribute__ ((aligned (64)));
memcpy( &ctx1, &m7m_ctx, sizeof(m7m_ctx) );
SHA256_CTX ctxf_sha256;
sph_sha256_context ctxf_sha256;
memcpy(data, pdata, 80);
SHA256_Update( &ctx1.sha256, data, M7_MIDSTATE_LEN );
SHA512_Update( &ctx1.sha512, data, M7_MIDSTATE_LEN );
sph_sha256( &ctx1.sha256, data, M7_MIDSTATE_LEN );
sph_sha512( &ctx1.sha512, data, M7_MIDSTATE_LEN );
sph_keccak512( &ctx1.keccak, data, M7_MIDSTATE_LEN );
sph_whirlpool( &ctx1.whirlpool, data, M7_MIDSTATE_LEN );
sph_haval256_5( &ctx1.haval, data, M7_MIDSTATE_LEN );
@@ -191,11 +189,11 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
memcpy( &ctx2, &ctx1, sizeof(m7m_ctx) );
SHA256_Update( &ctx2.sha256, data_p64, 80 - M7_MIDSTATE_LEN );
SHA256_Final( (unsigned char*) (bhash[0]), &ctx2.sha256 );
sph_sha256( &ctx2.sha256, data_p64, 80 - M7_MIDSTATE_LEN );
sph_sha256_close( &ctx2.sha256, bhash[0] );
SHA512_Update( &ctx2.sha512, data_p64, 80 - M7_MIDSTATE_LEN );
SHA512_Final( (unsigned char*) (bhash[1]), &ctx2.sha512 );
sph_sha512( &ctx2.sha512, data_p64, 80 - M7_MIDSTATE_LEN );
sph_sha512_close( &ctx2.sha512, bhash[1] );
sph_keccak512( &ctx2.keccak, data_p64, 80 - M7_MIDSTATE_LEN );
sph_keccak512_close( &ctx2.keccak, (void*)(bhash[2]) );
@@ -227,9 +225,9 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
bytes = mpz_sizeinbase(product, 256);
mpz_export((void *)bdata, NULL, -1, 1, 0, 0, product);
SHA256_Init( &ctxf_sha256 );
SHA256_Update( &ctxf_sha256, bdata, bytes );
SHA256_Final( (unsigned char*) hash, &ctxf_sha256 );
sph_sha256_init( &ctxf_sha256 );
sph_sha256( &ctxf_sha256, bdata, bytes );
sph_sha256_close( &ctxf_sha256, hash );
digits=(int)((sqrt((double)(n/2))*(1.+EPS))/9000+75);
mp_bitcnt_t prec = (long int)(digits*BITS_PER_DIGIT+16);
@@ -262,18 +260,13 @@ int scanhash_m7m_hash( struct work* work, uint64_t max_nonce,
mpzscale=bytes;
mpz_export(bdata, NULL, -1, 1, 0, 0, product);
SHA256_Init( &ctxf_sha256 );
SHA256_Update( &ctxf_sha256, bdata, bytes );
SHA256_Final( (unsigned char*) hash, &ctxf_sha256 );
}
sph_sha256_init( &ctxf_sha256 );
sph_sha256( &ctxf_sha256, bdata, bytes );
sph_sha256_close( &ctxf_sha256, hash );
}
if ( unlikely( valid_hash( (uint64_t*)hash, (uint64_t*)ptarget )
&& !opt_benchmark ) )
// if ( unlikely( hash[7] <= ptarget[7] ) )
// if ( likely( fulltest( hash, ptarget ) && !opt_benchmark ) )
{
if ( opt_debug )
{

View File

@@ -156,6 +156,8 @@ int scanhash_zr5( struct work *work, uint32_t max_nonce,
void zr5_get_new_work( struct work* work, struct work* g_work, int thr_id,
uint32_t* end_nonce_ptr )
{
// pthread_rwlock_rdlock( &g_work_lock );
// ignore POK in first word
const int wkcmp_sz = 72; // (19-1) * sizeof(uint32_t)
uint32_t *nonceptr = work->data + algo_gate.nonce_index;
@@ -171,6 +173,8 @@ void zr5_get_new_work( struct work* work, struct work* g_work, int thr_id,
}
else
++(*nonceptr);
// pthread_rwlock_unlock( &g_work_lock );
}
void zr5_display_pok( struct work* work )

View File

@@ -312,10 +312,26 @@ do { \
BUPDATE1_8W( 7, 1 ); \
} while (0)
#if defined(__AVX512VL__)
#define GAMMA_8W(n0, n1, n2, n4) \
( g ## n0 = _mm256_ternarylogic_epi32( a ## n0, a ## n2, a ## n1, 0x4b ) )
#define THETA_8W(n0, n1, n2, n4) \
( g ## n0 = mm256_xor3( a ## n0, a ## n1, a ## n4 ) )
#else
#define GAMMA_8W(n0, n1, n2, n4) \
(g ## n0 = _mm256_xor_si256( a ## n0, \
_mm256_or_si256( a ## n1, mm256_not( a ## n2 ) ) ) )
#define THETA_8W(n0, n1, n2, n4) \
( g ## n0 = _mm256_xor_si256( a ## n0, _mm256_xor_si256( a ## n1, \
a ## n4 ) ) )
#endif
#define PI_ALL_8W do { \
a0 = g0; \
a1 = mm256_rol_32( g7, 1 ); \
@@ -336,9 +352,6 @@ do { \
a16 = mm256_rol_32( g10, 8 ); \
} while (0)
#define THETA_8W(n0, n1, n2, n4) \
( g ## n0 = _mm256_xor_si256( a ## n0, _mm256_xor_si256( a ## n1, \
a ## n4 ) ) )
#define SIGMA_ALL_8W do { \
a0 = _mm256_xor_si256( g0, m256_one_32 ); \

View File

@@ -16,7 +16,7 @@
#include "algo/simd/simd-hash-2way.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/fugue/fugue-aesni.h"
#include "algo/shabal/shabal-hash-4way.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/haval/haval-hash-4way.h"
@@ -40,7 +40,7 @@ union _hmq1725_8way_context_overlay
cube_4way_context cube;
simd_4way_context simd;
hamsi512_8way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
shabal512_8way_context shabal;
sph_whirlpool_context whirlpool;
sha512_8way_context sha512;
@@ -363,14 +363,14 @@ extern void hmq1725_8way_hash(void *state, const void *input)
dintrlv_8x64_512( hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7, vhash );
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
sph_fugue512_full( &ctx.fugue, hash4, hash4, 64 );
sph_fugue512_full( &ctx.fugue, hash5, hash5, 64 );
sph_fugue512_full( &ctx.fugue, hash6, hash6, 64 );
sph_fugue512_full( &ctx.fugue, hash7, hash7, 64 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
fugue512_full( &ctx.fugue, hash4, hash4, 64 );
fugue512_full( &ctx.fugue, hash5, hash5, 64 );
fugue512_full( &ctx.fugue, hash6, hash6, 64 );
fugue512_full( &ctx.fugue, hash7, hash7, 64 );
intrlv_8x64_512( vhash, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
@@ -459,21 +459,21 @@ extern void hmq1725_8way_hash(void *state, const void *input)
m512_zero );
if ( hash0[0] & mask )
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
if ( hash1[0] & mask )
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
if ( hash2[0] & mask )
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
if ( hash3[0] & mask )
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
if ( hash4[0] & mask )
sph_fugue512_full( &ctx.fugue, hash4, hash4, 64 );
fugue512_full( &ctx.fugue, hash4, hash4, 64 );
if ( hash5[0] & mask )
sph_fugue512_full( &ctx.fugue, hash5, hash5, 64 );
fugue512_full( &ctx.fugue, hash5, hash5, 64 );
if ( hash6[0] & mask )
sph_fugue512_full( &ctx.fugue, hash6, hash6, 64 );
fugue512_full( &ctx.fugue, hash6, hash6, 64 );
if ( hash7[0] & mask )
sph_fugue512_full( &ctx.fugue, hash7, hash7, 64 );
fugue512_full( &ctx.fugue, hash7, hash7, 64 );
intrlv_8x64_512( vhashA, hash0, hash1, hash2, hash3,
hash4, hash5, hash6, hash7 );
@@ -628,7 +628,7 @@ union _hmq1725_4way_context_overlay
simd_2way_context simd;
hashState_echo echo;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
shabal512_4way_context shabal;
sph_whirlpool_context whirlpool;
sha512_4way_context sha512;
@@ -846,10 +846,10 @@ extern void hmq1725_4way_hash(void *state, const void *input)
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
// In this situation serial simd seems to be faster.
@@ -912,7 +912,7 @@ extern void hmq1725_4way_hash(void *state, const void *input)
sph_whirlpool512_full( &ctx.whirlpool, hash2, hash2, 64 );
sph_whirlpool512_full( &ctx.whirlpool, hash3, hash3, 64 );
// A = fugue serial, B = sha512 prarallel
// A = fugue serial, B = sha512 parallel
intrlv_4x64( vhash, hash0, hash1, hash2, hash3, 512 );
@@ -920,13 +920,13 @@ extern void hmq1725_4way_hash(void *state, const void *input)
h_mask = _mm256_movemask_epi8( vh_mask );
if ( hash0[0] & mask )
sph_fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
if ( hash1[0] & mask )
sph_fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
if ( hash2[0] & mask )
sph_fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
if ( hash3[0] & mask )
sph_fugue512_full( &ctx.fugue, hash3, hash3, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
intrlv_4x64( vhashA, hash0, hash1, hash2, hash3, 512 );

View File

@@ -17,13 +17,15 @@
#include "algo/shabal/sph_shabal.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/haval/sph-haval.h"
#include <openssl/sha.h>
#include "algo/sha/sph_sha2.h"
#if defined(__AES__)
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/groestl/sph_groestl.h"
#include "algo/echo/sph_echo.h"
#include "algo/fugue/sph_fugue.h"
#endif
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
@@ -40,17 +42,18 @@ typedef struct {
sph_shavite512_context shavite1, shavite2;
hashState_sd simd1, simd2;
sph_hamsi512_context hamsi1;
sph_fugue512_context fugue1, fugue2;
sph_shabal512_context shabal1;
sph_whirlpool_context whirlpool1, whirlpool2, whirlpool3, whirlpool4;
SHA512_CTX sha1, sha2;
sph_sha512_context sha1, sha2;
sph_haval256_5_context haval1, haval2;
#if defined(__AES__)
hashState_echo echo1, echo2;
hashState_groestl groestl1, groestl2;
hashState_fugue fugue1, fugue2;
#else
sph_groestl512_context groestl1, groestl2;
sph_echo512_context echo1, echo2;
sph_fugue512_context fugue1, fugue2;
#endif
} hmq1725_ctx_holder;
@@ -88,8 +91,13 @@ void init_hmq1725_ctx()
sph_hamsi512_init(&hmq1725_ctx.hamsi1);
#if defined(__AES__)
fugue512_Init( &hmq1725_ctx.fugue1, 512 );
fugue512_Init( &hmq1725_ctx.fugue2, 512 );
#else
sph_fugue512_init(&hmq1725_ctx.fugue1);
sph_fugue512_init(&hmq1725_ctx.fugue2);
#endif
sph_shabal512_init(&hmq1725_ctx.shabal1);
@@ -98,8 +106,8 @@ void init_hmq1725_ctx()
sph_whirlpool_init(&hmq1725_ctx.whirlpool3);
sph_whirlpool_init(&hmq1725_ctx.whirlpool4);
SHA512_Init( &hmq1725_ctx.sha1 );
SHA512_Init( &hmq1725_ctx.sha2 );
sph_sha512_init( &hmq1725_ctx.sha1 );
sph_sha512_init( &hmq1725_ctx.sha2 );
sph_haval256_5_init(&hmq1725_ctx.haval1);
sph_haval256_5_init(&hmq1725_ctx.haval2);
@@ -235,8 +243,13 @@ extern void hmq1725hash(void *state, const void *input)
sph_hamsi512 (&h_ctx.hamsi1, hashA, 64); //3
sph_hamsi512_close(&h_ctx.hamsi1, hashB); //4
#if defined(__AES__)
fugue512_Update( &h_ctx.fugue1, hashB, 512 ); //2 ////
fugue512_Final( &h_ctx.fugue1, hashA ); //3
#else
sph_fugue512 (&h_ctx.fugue1, hashB, 64); //2 ////
sph_fugue512_close(&h_ctx.fugue1, hashA); //3
#endif
if ( hashA[0] & mask ) //4
{
@@ -262,13 +275,18 @@ extern void hmq1725hash(void *state, const void *input)
if ( hashB[0] & mask ) //7
{
#if defined(__AES__)
fugue512_Update( &h_ctx.fugue2, hashB, 512 ); //
fugue512_Final( &h_ctx.fugue2, hashA ); //8
#else
sph_fugue512 (&h_ctx.fugue2, hashB, 64); //
sph_fugue512_close(&h_ctx.fugue2, hashA); //8
#endif
}
else
{
SHA512_Update( &h_ctx.sha1, hashB, 64 );
SHA512_Final( (unsigned char*) hashA, &h_ctx.sha1 );
sph_sha512( &h_ctx.sha1, hashB, 64 );
sph_sha512_close( &h_ctx.sha1, hashA );
}
#if defined(__AES__)
@@ -279,8 +297,8 @@ extern void hmq1725hash(void *state, const void *input)
sph_groestl512_close(&h_ctx.groestl2, hashB); //4
#endif
SHA512_Update( &h_ctx.sha2, hashB, 64 );
SHA512_Final( (unsigned char*) hashA, &h_ctx.sha2 );
sph_sha512( &h_ctx.sha2, hashB, 64 );
sph_sha512_close( &h_ctx.sha2, hashA );
if ( hashA[0] & mask ) //4
{

View File

@@ -127,10 +127,8 @@ void quark_8way_hash( void *state, const void *input )
rintrlv_8x64_4x128( vhashA, vhashB, vhash, 512 );
if ( ( vh_mask & 0x0f ) != 0x0f )
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
if ( ( vh_mask & 0xf0 ) != 0xf0 )
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
groestl512_4way_full( &ctx.groestl, vhashA, vhashA, 64 );
groestl512_4way_full( &ctx.groestl, vhashB, vhashB, 64 );
rintrlv_4x128_8x64( vhash, vhashA, vhashB, 512 );
@@ -139,22 +137,14 @@ void quark_8way_hash( void *state, const void *input )
dintrlv_8x64( hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
vhash, 512 );
if ( hash0[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
if ( hash1[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
if ( hash2[0] & 8)
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
if ( hash3[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
if ( hash4[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
if ( hash5[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
if ( hash6[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
if ( hash7[0] & 8 )
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
groestl512_full( &ctx.groestl, (char*)hash0, (char*)hash0, 512 );
groestl512_full( &ctx.groestl, (char*)hash1, (char*)hash1, 512 );
groestl512_full( &ctx.groestl, (char*)hash2, (char*)hash2, 512 );
groestl512_full( &ctx.groestl, (char*)hash3, (char*)hash3, 512 );
groestl512_full( &ctx.groestl, (char*)hash4, (char*)hash4, 512 );
groestl512_full( &ctx.groestl, (char*)hash5, (char*)hash5, 512 );
groestl512_full( &ctx.groestl, (char*)hash6, (char*)hash6, 512 );
groestl512_full( &ctx.groestl, (char*)hash7, (char*)hash7, 512 );
intrlv_8x64( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6, hash7,
512 );

View File

@@ -69,13 +69,9 @@ void lbry_build_block_header( struct work* g_work, uint32_t version,
void lbry_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
unsigned char merkle_root[64] = { 0 };
size_t t;
int i;
algo_gate.gen_merkle_root( merkle_root, sctx );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );

View File

@@ -7,28 +7,28 @@
#include <string.h>
#include <stdio.h>
#include "sph_ripemd.h"
#include <openssl/sha.h>
#include "algo/sha/sph_sha2.h"
void lbry_hash(void* output, const void* input)
{
SHA256_CTX ctx_sha256 __attribute__ ((aligned (64)));
SHA512_CTX ctx_sha512 __attribute__ ((aligned (64)));
sph_ripemd160_context ctx_ripemd __attribute__ ((aligned (64)));
sph_sha256_context ctx_sha256 __attribute__ ((aligned (64)));
sph_sha512_context ctx_sha512 __attribute__ ((aligned (64)));
sph_ripemd160_context ctx_ripemd __attribute__ ((aligned (64)));
uint32_t _ALIGN(64) hashA[16];
uint32_t _ALIGN(64) hashB[16];
uint32_t _ALIGN(64) hashC[16];
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, input, 112 );
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, input, 112 );
sph_sha256_close( &ctx_sha256, hashA );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, hashA, 32 );
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hashA, 32 );
sph_sha256_close( &ctx_sha256, hashA );
SHA512_Init( &ctx_sha512 );
SHA512_Update( &ctx_sha512, hashA, 32 );
SHA512_Final( (unsigned char*) hashA, &ctx_sha512 );
sph_sha512_init( &ctx_sha512 );
sph_sha512( &ctx_sha512, hashA, 32 );
sph_sha512_close( &ctx_sha512, hashA );
sph_ripemd160_init( &ctx_ripemd );
sph_ripemd160 ( &ctx_ripemd, hashA, 32 );
@@ -38,14 +38,14 @@ void lbry_hash(void* output, const void* input)
sph_ripemd160 ( &ctx_ripemd, hashA+8, 32 );
sph_ripemd160_close( &ctx_ripemd, hashC );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, hashB, 20 );
SHA256_Update( &ctx_sha256, hashC, 20 );
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hashB, 20 );
sph_sha256( &ctx_sha256, hashC, 20 );
sph_sha256_close( &ctx_sha256, hashA );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, hashA, 32 );
SHA256_Final( (unsigned char*) hashA, &ctx_sha256 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hashA, 32 );
sph_sha256_close( &ctx_sha256, hashA );
memcpy( output, hashA, 32 );
}

View File

@@ -1051,16 +1051,16 @@ int scanhash_neoscrypt( struct work *work,
uint32_t _ALIGN(64) hash[8];
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
while (pdata[19] < max_nonce && !work_restart[thr_id].restart)
{
neoscrypt((uint8_t *) hash, (uint8_t *) pdata );
/* Quick hash check */
if (hash[7] <= Htarg && fulltest_le(hash, ptarget)) {
*hashes_done = pdata[19] - first_nonce + 1;
return 1;
if (hash[7] <= Htarg && fulltest_le(hash, ptarget))
{
submit_solution( work, hash, mythr );
}
pdata[19]++;

View File

@@ -39,10 +39,17 @@
void
SHA256_Buf( const void * in, size_t len, uint8_t digest[32] )
{
SHA256_CTX ctx;
#if defined(HMAC_SPH_SHA)
sph_sha256_context ctx;
sph_sha256_init( &ctx );
sph_sha256( &ctx, in, len );
sph_sha256_close( &ctx, digest );
#else
SHA256_CTX ctx;
SHA256_Init( &ctx );
SHA256_Update( &ctx, in, len );
SHA256_Final( digest, &ctx );
#endif
}
/**
@@ -64,35 +71,59 @@ HMAC_SHA256_Buf( const void *K, size_t Klen, const void *in, size_t len,
void
HMAC_SHA256_Init( HMAC_SHA256_CTX *ctx, const void *_K, size_t Klen )
{
unsigned char pad[64];
unsigned char khash[32];
const unsigned char * K = _K;
size_t i;
unsigned char pad[64];
unsigned char khash[32];
const unsigned char * K = _K;
size_t i;
/* If Klen > 64, the key is really SHA256(K). */
if ( Klen > 64 )
/* If Klen > 64, the key is really SHA256(K). */
if ( Klen > 64 )
{
SHA256_Init( &ctx->ictx );
SHA256_Update( &ctx->ictx, K, Klen );
SHA256_Final( khash, &ctx->ictx );
K = khash;
Klen = 32;
}
#if defined(HMAC_SPH_SHA)
sph_sha256_init( &ctx->ictx );
sph_sha256( &ctx->ictx, K, Klen );
sph_sha256_close( &ctx->ictx, khash );
#else
SHA256_Init( &ctx->ictx );
SHA256_Update( &ctx->ictx, K, Klen );
SHA256_Final( khash, &ctx->ictx );
#endif
K = khash;
Klen = 32;
}
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
#if defined(HMAC_SPH_SHA)
sph_sha256_init( &ctx->ictx );
#else
SHA256_Init( &ctx->ictx );
#endif
for ( i = 0; i < Klen; i++ ) pad[i] = K[i] ^ 0x36;
memset( pad + Klen, 0x36, 64 - Klen );
SHA256_Update( &ctx->ictx, pad, 64 );
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
SHA256_Init( &ctx->octx );
memset( pad + Klen, 0x36, 64 - Klen );
#if defined(HMAC_SPH_SHA)
sph_sha256( &ctx->ictx, pad, 64 );
#else
SHA256_Update( &ctx->ictx, pad, 64 );
#endif
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
#if defined(HMAC_SPH_SHA)
sph_sha256_init( &ctx->octx );
#else
SHA256_Init( &ctx->octx );
#endif
for ( i = 0; i < Klen; i++ ) pad[i] = K[i] ^ 0x5c;
memset( pad + Klen, 0x5c, 64 - Klen );
SHA256_Update( &ctx->octx, pad, 64 );
#if defined(HMAC_SPH_SHA)
sph_sha256( &ctx->octx, pad, 64 );
#else
SHA256_Update( &ctx->octx, pad, 64 );
#endif
}
/* Add bytes to the HMAC-SHA256 operation. */
@@ -100,23 +131,33 @@ void
HMAC_SHA256_Update( HMAC_SHA256_CTX *ctx, const void *in, size_t len )
{
/* Feed data to the inner SHA256 operation. */
SHA256_Update( &ctx->ictx, in, len );
#if defined(HMAC_SPH_SHA)
sph_sha256( &ctx->ictx, in, len );
#else
SHA256_Update( &ctx->ictx, in, len );
#endif
}
/* Finish an HMAC-SHA256 operation. */
void
HMAC_SHA256_Final( unsigned char digest[32], HMAC_SHA256_CTX *ctx )
{
unsigned char ihash[32];
unsigned char ihash[32];
/* Finish the inner SHA256 operation. */
SHA256_Final( ihash, &ctx->ictx );
#if defined(HMAC_SPH_SHA)
sph_sha256_close( &ctx->ictx, ihash );
sph_sha256( &ctx->octx, ihash, 32 );
sph_sha256_close( &ctx->octx, digest );
#else
/* Finish the inner SHA256 operation. */
SHA256_Final( ihash, &ctx->ictx );
/* Feed the inner hash to the outer SHA256 operation. */
SHA256_Update( &ctx->octx, ihash, 32 );
/* Feed the inner hash to the outer SHA256 operation. */
SHA256_Update( &ctx->octx, ihash, 32 );
/* Finish the outer SHA256 operation. */
SHA256_Final( digest, &ctx->octx );
/* Finish the outer SHA256 operation. */
SHA256_Final( digest, &ctx->octx );
#endif
}
/**

View File

@@ -29,14 +29,24 @@
#ifndef HMAC_SHA256_H__
#define HMAC_SHA256_H__
//#define HMAC_SSL_SHA 1
#define HMAC_SPH_SHA 1
#include <sys/types.h>
#include <stdint.h>
#include "sph_sha2.h"
#include <openssl/sha.h>
typedef struct HMAC_SHA256Context
{
SHA256_CTX ictx;
SHA256_CTX octx;
#if defined(HMAC_SPH_SHA)
sph_sha256_context ictx;
sph_sha256_context octx;
#else
SHA256_CTX ictx;
SHA256_CTX octx;
#endif
} HMAC_SHA256_CTX;
void SHA256_Buf( const void *, size_t len, uint8_t digest[32] );

View File

@@ -59,6 +59,8 @@ void sha256_4way_update( sha256_4way_context *sc, const void *data,
size_t len );
void sha256_4way_close( sha256_4way_context *sc, void *dst );
void sha256_4way_full( void *dst, const void *data, size_t len );
void sha256_4way_transform( __m128i *state_out, const __m128i *data,
const __m128i *state_in );
#endif // SSE2
@@ -77,6 +79,8 @@ void sha256_8way_init( sha256_8way_context *sc );
void sha256_8way_update( sha256_8way_context *sc, const void *data, size_t len );
void sha256_8way_close( sha256_8way_context *sc, void *dst );
void sha256_8way_full( void *dst, const void *data, size_t len );
void sha256_8way_transform( __m256i *state_out, const __m256i *data,
const __m256i *state_in );
#endif // AVX2
@@ -95,6 +99,12 @@ void sha256_16way_init( sha256_16way_context *sc );
void sha256_16way_update( sha256_16way_context *sc, const void *data, size_t len );
void sha256_16way_close( sha256_16way_context *sc, void *dst );
void sha256_16way_full( void *dst, const void *data, size_t len );
void sha256_16way_transform( __m512i *state_out, const __m512i *data,
const __m512i *state_in );
void sha256_16way_prehash_3rounds( __m512i *state_mid, const __m512i *W,
const __m512i *state_in );
void sha256_16way_final_rounds( __m512i *state_out, const __m512i *data,
const __m512i *state_in, const __m512i *state_mid );
#endif // AVX512

View File

@@ -12,7 +12,6 @@
#include <string.h>
#include <inttypes.h>
#include <openssl/sha.h>
#if defined(USE_ASM) && defined(__arm__) && defined(__APCS_32__)
#define EXTERN_SHA256
@@ -196,18 +195,28 @@ static void sha256d_80_swap(uint32_t *hash, const uint32_t *data)
hash[i] = swab32(hash[i]);
}
extern void sha256d(unsigned char *hash, const unsigned char *data, int len)
#if defined (__SHA__)
#include "algo/sha/sph_sha2.h"
void sha256d(unsigned char *hash, const unsigned char *data, int len)
{
#if defined(__SHA__)
SHA256_CTX ctx;
SHA256_Init( &ctx );
SHA256_Update( &ctx, data, len );
SHA256_Final( (unsigned char*)hash, &ctx );
SHA256_Init( &ctx );
SHA256_Update( &ctx, hash, 32 );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256_context ctx __attribute__ ((aligned (64)));
sph_sha256_init( &ctx );
sph_sha256( &ctx, data, len );
sph_sha256_close( &ctx, hash );
sph_sha256_init( &ctx );
sph_sha256( &ctx, hash, 32 );
sph_sha256_close( &ctx, hash );
}
#else
void sha256d(unsigned char *hash, const unsigned char *data, int len)
{
uint32_t S[16], T[16];
int i, r;
@@ -229,9 +238,10 @@ extern void sha256d(unsigned char *hash, const unsigned char *data, int len)
sha256_transform(T, S, 0);
for (i = 0; i < 8; i++)
be32enc((uint32_t *)hash + i, T[i]);
#endif
}
#endif
static inline void sha256d_preextend(uint32_t *W)
{
W[16] = s1(W[14]) + W[ 9] + s0(W[ 1]) + W[ 0];
@@ -479,8 +489,8 @@ static inline void sha256d_ms(uint32_t *hash, uint32_t *W,
void sha256d_ms_4way(uint32_t *hash, uint32_t *data,
const uint32_t *midstate, const uint32_t *prehash);
static inline int scanhash_sha256d_4way(int thr_id, struct work *work,
uint32_t max_nonce, uint64_t *hashes_done)
static inline int scanhash_sha256d_4way( struct work *work,
uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
@@ -492,6 +502,7 @@ static inline int scanhash_sha256d_4way(int thr_id, struct work *work,
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id;
int i, j;
memcpy(data, pdata + 16, 64);
@@ -521,10 +532,8 @@ static inline int scanhash_sha256d_4way(int thr_id, struct work *work,
if (swab32(hash[4 * 7 + i]) <= Htarg) {
pdata[19] = data[4 * 3 + i];
sha256d_80_swap(hash, pdata);
if (fulltest(hash, ptarget)) {
*hashes_done = n - first_nonce + 1;
return 1;
}
if ( fulltest( hash, ptarget ) && !opt_benchmark )
submit_solution( work, hash, mythr );
}
}
} while (n < max_nonce && !work_restart[thr_id].restart);
@@ -541,8 +550,8 @@ static inline int scanhash_sha256d_4way(int thr_id, struct work *work,
void sha256d_ms_8way(uint32_t *hash, uint32_t *data,
const uint32_t *midstate, const uint32_t *prehash);
static inline int scanhash_sha256d_8way(int thr_id, struct work *work,
uint32_t max_nonce, uint64_t *hashes_done)
static inline int scanhash_sha256d_8way( struct work *work,
uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
@@ -554,6 +563,7 @@ static inline int scanhash_sha256d_8way(int thr_id, struct work *work,
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id;
int i, j;
memcpy(data, pdata + 16, 64);
@@ -583,10 +593,8 @@ static inline int scanhash_sha256d_8way(int thr_id, struct work *work,
if (swab32(hash[8 * 7 + i]) <= Htarg) {
pdata[19] = data[8 * 3 + i];
sha256d_80_swap(hash, pdata);
if (fulltest(hash, ptarget)) {
*hashes_done = n - first_nonce + 1;
return 1;
}
if ( fulltest( hash, ptarget ) && !opt_benchmark )
submit_solution( work, hash, mythr );
}
}
} while (n < max_nonce && !work_restart[thr_id].restart);
@@ -614,13 +622,11 @@ int scanhash_sha256d( struct work *work,
#ifdef HAVE_SHA256_8WAY
if (sha256_use_8way())
return scanhash_sha256d_8way(thr_id, work,
max_nonce, hashes_done);
return scanhash_sha256d_8way( work, max_nonce, hashes_done, mythr );
#endif
#ifdef HAVE_SHA256_4WAY
if (sha256_use_4way())
return scanhash_sha256d_4way(thr_id, work,
max_nonce, hashes_done);
return scanhash_sha256d_4way( work, max_nonce, hashes_done, mythr );
#endif
memcpy(data, pdata + 16, 64);
@@ -657,7 +663,7 @@ int scanhash_SHA256d( struct work *work, const uint32_t max_nonce,
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
int thr_id = mythr->id;
memcpy( data, pdata, 80 );
@@ -680,14 +686,9 @@ int scanhash_SHA256d( struct work *work, const uint32_t max_nonce,
bool register_sha256d_algo( algo_gate_t* gate )
{
#if defined(__SHA__)
gate->optimizations = SHA_OPT;
gate->scanhash = (void*)&scanhash_SHA256d;
#else
gate->optimizations = SSE2_OPT | AVX2_OPT;
gate->scanhash = (void*)&scanhash_sha256d;
#endif
gate->hash = (void*)&sha256d;
return true;
gate->hash = (void*)&sha256d;
return true;
};

View File

@@ -0,0 +1,345 @@
/* Intel SHA extensions using C intrinsics */
/* Written and place in public domain by Jeffrey Walton */
/* Based on code from Intel, and by Sean Gulley for */
/* the miTLS project. */
// A stripped down version with byte swapping removed.
#if defined(__SHA__)
#include "sha256-hash-opt.h"
void sha256_ni2way_transform( uint32_t *out_X, uint32_t*out_Y,
const void *msg_X, const void *msg_Y,
const uint32_t *in_X, const uint32_t *in_Y )
{
__m128i STATE0_X, STATE1_X, STATE0_Y, STATE1_Y;
__m128i MSG_X, MSG_Y, TMP_X, TMP_Y;
__m128i TMSG0_X, TMSG1_X, TMSG2_X, TMSG3_X;
__m128i TMSG0_Y, TMSG1_Y, TMSG2_Y, TMSG3_Y;
__m128i ABEF_SAVE_X, CDGH_SAVE_X,ABEF_SAVE_Y, CDGH_SAVE_Y;
// Load initial values
TMP_X = _mm_load_si128((__m128i*) &in_X[0]);
STATE1_X = _mm_load_si128((__m128i*) &in_X[4]);
TMP_Y = _mm_load_si128((__m128i*) &in_Y[0]);
STATE1_Y = _mm_load_si128((__m128i*) &in_Y[4]);
TMP_X = _mm_shuffle_epi32(TMP_X, 0xB1); // CDAB
TMP_Y = _mm_shuffle_epi32(TMP_Y, 0xB1); // CDAB
STATE1_X = _mm_shuffle_epi32(STATE1_X, 0x1B); // EFGH
STATE1_Y = _mm_shuffle_epi32(STATE1_Y, 0x1B); // EFGH
STATE0_X = _mm_alignr_epi8(TMP_X, STATE1_X, 8); // ABEF
STATE0_Y = _mm_alignr_epi8(TMP_Y, STATE1_Y, 8); // ABEF
STATE1_X = _mm_blend_epi16(STATE1_X, TMP_X, 0xF0); // CDGH
STATE1_Y = _mm_blend_epi16(STATE1_Y, TMP_Y, 0xF0); // CDGH
// Save current hash
ABEF_SAVE_X = STATE0_X;
ABEF_SAVE_Y = STATE0_Y;
CDGH_SAVE_X = STATE1_X;
CDGH_SAVE_Y = STATE1_Y;
// Rounds 0-3
TMSG0_X = _mm_load_si128((const __m128i*) (msg_X));
TMSG0_Y = _mm_load_si128((const __m128i*) (msg_Y));
TMP_X = _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL);
MSG_X = _mm_add_epi32( TMSG0_X, TMP_X );
MSG_Y = _mm_add_epi32( TMSG0_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
// Rounds 4-7
TMSG1_X = _mm_load_si128((const __m128i*) (msg_X+16));
TMSG1_Y = _mm_load_si128((const __m128i*) (msg_Y+16));
TMP_X = _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL);
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG0_X = _mm_sha256msg1_epu32(TMSG0_X, TMSG1_X);
TMSG0_Y = _mm_sha256msg1_epu32(TMSG0_Y, TMSG1_Y);
// Rounds 8-11
TMSG2_X = _mm_load_si128((const __m128i*) (msg_X+32));
TMSG2_Y = _mm_load_si128((const __m128i*) (msg_Y+32));
TMP_X = _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL);
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG1_X = _mm_sha256msg1_epu32(TMSG1_X, TMSG2_X);
TMSG1_Y = _mm_sha256msg1_epu32(TMSG1_Y, TMSG2_Y);
// Rounds 12-15
TMSG3_X = _mm_load_si128((const __m128i*) (msg_X+48));
TMSG3_Y = _mm_load_si128((const __m128i*) (msg_Y+48));
TMP_X = _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL);
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG3_X, TMSG2_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG3_Y, TMSG2_Y, 4);
TMSG0_X = _mm_add_epi32(TMSG0_X, TMP_X);
TMSG0_Y = _mm_add_epi32(TMSG0_Y, TMP_Y);
TMSG0_X = _mm_sha256msg2_epu32(TMSG0_X, TMSG3_X);
TMSG0_Y = _mm_sha256msg2_epu32(TMSG0_Y, TMSG3_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG2_X = _mm_sha256msg1_epu32(TMSG2_X, TMSG3_X);
TMSG2_Y = _mm_sha256msg1_epu32(TMSG2_Y, TMSG3_Y);
// Rounds 16-19
TMP_X = _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL);
MSG_X = _mm_add_epi32(TMSG0_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG0_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG0_X, TMSG3_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG0_Y, TMSG3_Y, 4);
TMSG1_X = _mm_add_epi32(TMSG1_X, TMP_X);
TMSG1_Y = _mm_add_epi32(TMSG1_Y, TMP_Y);
TMSG1_X = _mm_sha256msg2_epu32(TMSG1_X, TMSG0_X);
TMSG1_Y = _mm_sha256msg2_epu32(TMSG1_Y, TMSG0_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG3_X = _mm_sha256msg1_epu32(TMSG3_X, TMSG0_X);
TMSG3_Y = _mm_sha256msg1_epu32(TMSG3_Y, TMSG0_Y);
// Rounds 20-23
TMP_X = _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL);
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG1_X, TMSG0_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG1_Y, TMSG0_Y, 4);
TMSG2_X = _mm_add_epi32(TMSG2_X, TMP_X);
TMSG2_Y = _mm_add_epi32(TMSG2_Y, TMP_Y);
TMSG2_X = _mm_sha256msg2_epu32(TMSG2_X, TMSG1_X);
TMSG2_Y = _mm_sha256msg2_epu32(TMSG2_Y, TMSG1_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG0_X = _mm_sha256msg1_epu32(TMSG0_X, TMSG1_X);
TMSG0_Y = _mm_sha256msg1_epu32(TMSG0_Y, TMSG1_Y);
// Rounds 24-27
TMP_X = _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL);
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG2_X, TMSG1_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG2_Y, TMSG1_Y, 4);
TMSG3_X = _mm_add_epi32(TMSG3_X, TMP_X);
TMSG3_Y = _mm_add_epi32(TMSG3_Y, TMP_Y);
TMSG3_X = _mm_sha256msg2_epu32(TMSG3_X, TMSG2_X);
TMSG3_Y = _mm_sha256msg2_epu32(TMSG3_Y, TMSG2_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG1_X = _mm_sha256msg1_epu32(TMSG1_X, TMSG2_X);
TMSG1_Y = _mm_sha256msg1_epu32(TMSG1_Y, TMSG2_Y);
// Rounds 28-31
TMP_X = _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL);
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG3_X, TMSG2_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG3_Y, TMSG2_Y, 4);
TMSG0_X = _mm_add_epi32(TMSG0_X, TMP_X);
TMSG0_Y = _mm_add_epi32(TMSG0_Y, TMP_Y);
TMSG0_X = _mm_sha256msg2_epu32(TMSG0_X, TMSG3_X);
TMSG0_Y = _mm_sha256msg2_epu32(TMSG0_Y, TMSG3_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG2_X = _mm_sha256msg1_epu32(TMSG2_X, TMSG3_X);
TMSG2_Y = _mm_sha256msg1_epu32(TMSG2_Y, TMSG3_Y);
// Rounds 32-35
TMP_X = _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL);
MSG_X = _mm_add_epi32(TMSG0_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG0_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG0_X, TMSG3_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG0_Y, TMSG3_Y, 4);
TMSG1_X = _mm_add_epi32(TMSG1_X, TMP_X);
TMSG1_Y = _mm_add_epi32(TMSG1_Y, TMP_Y);
TMSG1_X = _mm_sha256msg2_epu32(TMSG1_X, TMSG0_X);
TMSG1_Y = _mm_sha256msg2_epu32(TMSG1_Y, TMSG0_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG3_X = _mm_sha256msg1_epu32(TMSG3_X, TMSG0_X);
TMSG3_Y = _mm_sha256msg1_epu32(TMSG3_Y, TMSG0_Y);
// Rounds 36-39
TMP_X = _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL);
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X);
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X);
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG1_X, TMSG0_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG1_Y, TMSG0_Y, 4);
TMSG2_X = _mm_add_epi32(TMSG2_X, TMP_X);
TMSG2_Y = _mm_add_epi32(TMSG2_Y, TMP_Y);
TMSG2_X = _mm_sha256msg2_epu32(TMSG2_X, TMSG1_X);
TMSG2_Y = _mm_sha256msg2_epu32(TMSG2_Y, TMSG1_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG0_X = _mm_sha256msg1_epu32(TMSG0_X, TMSG1_X);
TMSG0_Y = _mm_sha256msg1_epu32(TMSG0_Y, TMSG1_Y);
// Rounds 40-43
TMP_X = _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL);
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X);
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X);
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG2_X, TMSG1_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG2_Y, TMSG1_Y, 4);
TMSG3_X = _mm_add_epi32(TMSG3_X, TMP_X);
TMSG3_Y = _mm_add_epi32(TMSG3_Y, TMP_Y);
TMSG3_X = _mm_sha256msg2_epu32(TMSG3_X, TMSG2_X);
TMSG3_Y = _mm_sha256msg2_epu32(TMSG3_Y, TMSG2_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG1_X = _mm_sha256msg1_epu32(TMSG1_X, TMSG2_X);
TMSG1_Y = _mm_sha256msg1_epu32(TMSG1_Y, TMSG2_Y);
// Rounds 44-47
TMP_X = _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL);
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X);
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X);
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG3_X, TMSG2_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG3_Y, TMSG2_Y, 4);
TMSG0_X = _mm_add_epi32(TMSG0_X, TMP_X);
TMSG0_Y = _mm_add_epi32(TMSG0_Y, TMP_Y);
TMSG0_X = _mm_sha256msg2_epu32(TMSG0_X, TMSG3_X);
TMSG0_Y = _mm_sha256msg2_epu32(TMSG0_Y, TMSG3_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG2_X = _mm_sha256msg1_epu32(TMSG2_X, TMSG3_X);
TMSG2_Y = _mm_sha256msg1_epu32(TMSG2_Y, TMSG3_Y);
// Rounds 48-51
TMP_X = _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL);
MSG_X = _mm_add_epi32(TMSG0_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG0_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG0_X, TMSG3_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG0_Y, TMSG3_Y, 4);
TMSG1_X = _mm_add_epi32(TMSG1_X, TMP_X);
TMSG1_Y = _mm_add_epi32(TMSG1_Y, TMP_Y);
TMSG1_X = _mm_sha256msg2_epu32(TMSG1_X, TMSG0_X);
TMSG1_Y = _mm_sha256msg2_epu32(TMSG1_Y, TMSG0_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
TMSG3_X = _mm_sha256msg1_epu32(TMSG3_X, TMSG0_X);
TMSG3_Y = _mm_sha256msg1_epu32(TMSG3_Y, TMSG0_Y);
// Rounds 52-55
TMP_X = _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL);
MSG_X = _mm_add_epi32(TMSG1_X, TMP_X );
MSG_Y = _mm_add_epi32(TMSG1_Y, TMP_X );
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG1_X, TMSG0_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG1_Y, TMSG0_Y, 4);
TMSG2_X = _mm_add_epi32(TMSG2_X, TMP_X);
TMSG2_Y = _mm_add_epi32(TMSG2_Y, TMP_Y);
TMSG2_X = _mm_sha256msg2_epu32(TMSG2_X, TMSG1_X);
TMSG2_Y = _mm_sha256msg2_epu32(TMSG2_Y, TMSG1_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
// Rounds 56-59
TMP_X = _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL);
MSG_X = _mm_add_epi32(TMSG2_X, TMP_X);
MSG_Y = _mm_add_epi32(TMSG2_Y, TMP_X);
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
TMP_X = _mm_alignr_epi8(TMSG2_X, TMSG1_X, 4);
TMP_Y = _mm_alignr_epi8(TMSG2_Y, TMSG1_Y, 4);
TMSG3_X = _mm_add_epi32(TMSG3_X, TMP_X);
TMSG3_Y = _mm_add_epi32(TMSG3_Y, TMP_Y);
TMSG3_X = _mm_sha256msg2_epu32(TMSG3_X, TMSG2_X);
TMSG3_Y = _mm_sha256msg2_epu32(TMSG3_Y, TMSG2_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
// Rounds 60-63
TMP_X = _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL);
MSG_X = _mm_add_epi32(TMSG3_X, TMP_X);
MSG_Y = _mm_add_epi32(TMSG3_Y, TMP_X);
STATE1_X = _mm_sha256rnds2_epu32(STATE1_X, STATE0_X, MSG_X);
STATE1_Y = _mm_sha256rnds2_epu32(STATE1_Y, STATE0_Y, MSG_Y);
MSG_X = _mm_shuffle_epi32(MSG_X, 0x0E);
MSG_Y = _mm_shuffle_epi32(MSG_Y, 0x0E);
STATE0_X = _mm_sha256rnds2_epu32(STATE0_X, STATE1_X, MSG_X);
STATE0_Y = _mm_sha256rnds2_epu32(STATE0_Y, STATE1_Y, MSG_Y);
// Add values back to state
STATE0_X = _mm_add_epi32(STATE0_X, ABEF_SAVE_X);
STATE1_X = _mm_add_epi32(STATE1_X, CDGH_SAVE_X);
STATE0_Y = _mm_add_epi32(STATE0_Y, ABEF_SAVE_Y);
STATE1_Y = _mm_add_epi32(STATE1_Y, CDGH_SAVE_Y);
TMP_X = _mm_shuffle_epi32(STATE0_X, 0x1B); // FEBA
TMP_Y = _mm_shuffle_epi32(STATE0_Y, 0x1B); // FEBA
STATE1_X = _mm_shuffle_epi32(STATE1_X, 0xB1); // DCHG
STATE1_Y = _mm_shuffle_epi32(STATE1_Y, 0xB1); // DCHG
STATE0_X = _mm_blend_epi16(TMP_X, STATE1_X, 0xF0); // DCBA
STATE0_Y = _mm_blend_epi16(TMP_Y, STATE1_Y, 0xF0); // DCBA
STATE1_X = _mm_alignr_epi8(STATE1_X, TMP_X, 8); // ABEF
STATE1_Y = _mm_alignr_epi8(STATE1_Y, TMP_Y, 8); // ABEF
// Save state
_mm_store_si128((__m128i*) &out_X[0], STATE0_X);
_mm_store_si128((__m128i*) &out_X[4], STATE1_X);
_mm_store_si128((__m128i*) &out_Y[0], STATE0_Y);
_mm_store_si128((__m128i*) &out_Y[4], STATE1_Y);
}
#endif

View File

@@ -74,9 +74,20 @@ static const uint32_t K256[64] =
#define CHs(X, Y, Z) \
_mm_xor_si128( _mm_and_si128( _mm_xor_si128( Y, Z ), X ), Z )
/*
#define MAJs(X, Y, Z) \
_mm_or_si128( _mm_and_si128( X, Y ), \
_mm_and_si128( _mm_or_si128( X, Y ), Z ) )
*/
/*
#define MAJs(X, Y, Z) \
_mm_xor_si128( Y, _mm_and_si128( _mm_xor_si128( X, Y ), \
_mm_xor_si128( Y, Z ) ) )
*/
#define MAJs(X, Y, Z) \
_mm_xor_si128( Y, _mm_and_si128( X_xor_Y = _mm_xor_si128( X, Y ), \
Y_xor_Z ) )
#define BSG2_0(x) \
_mm_xor_si128( _mm_xor_si128( \
@@ -94,6 +105,7 @@ static const uint32_t K256[64] =
_mm_xor_si128( _mm_xor_si128( \
mm128_ror_32(x, 17), mm128_ror_32(x, 19) ), _mm_srli_epi32(x, 10) )
/*
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
do { \
__m128i K = _mm_set1_epi32( K256[( (j)+(i) )] ); \
@@ -122,9 +134,9 @@ do { \
H = _mm_add_epi32( T1, T2 ); \
D = _mm_add_epi32( D, T1 ); \
} while (0)
*/
/*
#define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \
do { \
__m128i T1, T2; \
@@ -132,16 +144,98 @@ do { \
T1 = _mm_add_epi32( H, mm128_add4_32( BSG2_1(E), CHs(E, F, G), \
K, W[i] ) ); \
T2 = _mm_add_epi32( BSG2_0(A), MAJs(A, B, C) ); \
Y_xor_Z = X_xor_Y; \
D = _mm_add_epi32( D, T1 ); \
H = _mm_add_epi32( T1, T2 ); \
} while (0)
*/
void sha256_4way_transform( __m128i *state_out, const __m128i *data,
const __m128i *state_in )
{
__m128i A, B, C, D, E, F, G, H, X_xor_Y, Y_xor_Z;
__m128i W[16];
memcpy_128( W, data, 16 );
A = state_in[0];
B = state_in[1];
C = state_in[2];
D = state_in[3];
E = state_in[4];
F = state_in[5];
G = state_in[6];
H = state_in[7];
Y_xor_Z = _mm_xor_si128( B, C );
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
for ( int j = 16; j < 64; j += 16 )
{
W[ 0] = SHA2s_MEXP( 14, 9, 1, 0 );
W[ 1] = SHA2s_MEXP( 15, 10, 2, 1 );
W[ 2] = SHA2s_MEXP( 0, 11, 3, 2 );
W[ 3] = SHA2s_MEXP( 1, 12, 4, 3 );
W[ 4] = SHA2s_MEXP( 2, 13, 5, 4 );
W[ 5] = SHA2s_MEXP( 3, 14, 6, 5 );
W[ 6] = SHA2s_MEXP( 4, 15, 7, 6 );
W[ 7] = SHA2s_MEXP( 5, 0, 8, 7 );
W[ 8] = SHA2s_MEXP( 6, 1, 9, 8 );
W[ 9] = SHA2s_MEXP( 7, 2, 10, 9 );
W[10] = SHA2s_MEXP( 8, 3, 11, 10 );
W[11] = SHA2s_MEXP( 9, 4, 12, 11 );
W[12] = SHA2s_MEXP( 10, 5, 13, 12 );
W[13] = SHA2s_MEXP( 11, 6, 14, 13 );
W[14] = SHA2s_MEXP( 12, 7, 15, 14 );
W[15] = SHA2s_MEXP( 13, 8, 0, 15 );
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
SHA2s_4WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
SHA2s_4WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
SHA2s_4WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
SHA2s_4WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
SHA2s_4WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
}
state_out[0] = _mm_add_epi32( state_in[0], A );
state_out[1] = _mm_add_epi32( state_in[1], B );
state_out[2] = _mm_add_epi32( state_in[2], C );
state_out[3] = _mm_add_epi32( state_in[3], D );
state_out[4] = _mm_add_epi32( state_in[4], E );
state_out[5] = _mm_add_epi32( state_in[5], F );
state_out[6] = _mm_add_epi32( state_in[6], G );
state_out[7] = _mm_add_epi32( state_in[7], H );
}
static void
sha256_4way_round( sha256_4way_context *ctx, __m128i *in, __m128i r[8] )
{
register __m128i A, B, C, D, E, F, G, H;
register __m128i A, B, C, D, E, F, G, H, X_xor_Y, Y_xor_Z;
__m128i W[16];
mm128_block_bswap_32( W, in );
@@ -170,6 +264,8 @@ sha256_4way_round( sha256_4way_context *ctx, __m128i *in, __m128i r[8] )
H = m128_const1_64( 0x5BE0CD195BE0CD19 );
}
Y_xor_Z = _mm_xor_si128( B, C );
SHA2s_4WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
SHA2s_4WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
SHA2s_4WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
@@ -321,10 +417,8 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst )
high = (sc->count_high << 3) | (low >> 29);
low = low << 3;
sc->buf[ pad >> 2 ] =
mm128_bswap_32( m128_const1_32( high ) );
sc->buf[ ( pad+4 ) >> 2 ] =
mm128_bswap_32( m128_const1_32( low ) );
sc->buf[ pad >> 2 ] = m128_const1_32( bswap_32( high ) );
sc->buf[( pad+4 ) >> 2 ] = m128_const1_32( bswap_32( low ) );
sha256_4way_round( sc, sc->buf, sc->val );
mm128_block_bswap_32( dst, sc->val );
@@ -342,12 +436,39 @@ void sha256_4way_full( void *dst, const void *data, size_t len )
// SHA-256 8 way
#if defined(__AVX512VL__)
#define CHx(X, Y, Z) \
_mm256_ternarylogic_epi32( X, Y, Z, 0xca )
#define MAJx(X, Y, Z) \
_mm256_ternarylogic_epi32( X, Y, Z, 0xe8 )
#define BSG2_0x(x) \
mm256_xor3( mm256_ror_32(x, 2), mm256_ror_32(x, 13), mm256_ror_32(x, 22) )
#define BSG2_1x(x) \
mm256_xor3( mm256_ror_32(x, 6), mm256_ror_32(x, 11), mm256_ror_32(x, 25) )
#define SSG2_0x(x) \
mm256_xor3( mm256_ror_32(x, 7), mm256_ror_32(x, 18), _mm256_srli_epi32(x, 3) )
#define SSG2_1x(x) \
mm256_xor3( mm256_ror_32(x, 17), mm256_ror_32(x, 19), _mm256_srli_epi32(x, 10) )
#else // AVX2
#define CHx(X, Y, Z) \
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( Y, Z ), X ), Z )
#define MAJx(X, Y, Z) \
_mm256_or_si256( _mm256_and_si256( X, Y ), \
_mm256_and_si256( _mm256_or_si256( X, Y ), Z ) )
_mm256_xor_si256( Y, _mm256_and_si256( _mm256_xor_si256( X, Y ), \
_mm256_xor_si256( Y, Z ) ) )
/*
#define MAJx(X, Y, Z) \
_mm256_xor_si256( Y, _mm256_and_si256( X_xor_Y = _mm256_xor_si256( X, Y ), \
Y_xor_Z ) )
*/
#define BSG2_0x(x) \
_mm256_xor_si256( _mm256_xor_si256( \
@@ -365,6 +486,8 @@ void sha256_4way_full( void *dst, const void *data, size_t len )
_mm256_xor_si256( _mm256_xor_si256( \
mm256_ror_32(x, 17), mm256_ror_32(x, 19) ), _mm256_srli_epi32(x, 10) )
#endif // AVX512 else AVX2
#define SHA2x_MEXP( a, b, c, d ) \
mm256_add4_32( SSG2_1x( W[a] ), W[b], SSG2_0x( W[c] ), W[d] );
@@ -379,8 +502,89 @@ do { \
H = _mm256_add_epi32( T1, T2 ); \
} while (0)
void sha256_8way_transform( __m256i *state_out, const __m256i *data,
const __m256i *state_in )
{
__m256i A, B, C, D, E, F, G, H;
__m256i W[16];
memcpy_256( W, data, 16 );
A = state_in[0];
B = state_in[1];
C = state_in[2];
D = state_in[3];
E = state_in[4];
F = state_in[5];
G = state_in[6];
H = state_in[7];
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
for ( int j = 16; j < 64; j += 16 )
{
W[ 0] = SHA2x_MEXP( 14, 9, 1, 0 );
W[ 1] = SHA2x_MEXP( 15, 10, 2, 1 );
W[ 2] = SHA2x_MEXP( 0, 11, 3, 2 );
W[ 3] = SHA2x_MEXP( 1, 12, 4, 3 );
W[ 4] = SHA2x_MEXP( 2, 13, 5, 4 );
W[ 5] = SHA2x_MEXP( 3, 14, 6, 5 );
W[ 6] = SHA2x_MEXP( 4, 15, 7, 6 );
W[ 7] = SHA2x_MEXP( 5, 0, 8, 7 );
W[ 8] = SHA2x_MEXP( 6, 1, 9, 8 );
W[ 9] = SHA2x_MEXP( 7, 2, 10, 9 );
W[10] = SHA2x_MEXP( 8, 3, 11, 10 );
W[11] = SHA2x_MEXP( 9, 4, 12, 11 );
W[12] = SHA2x_MEXP( 10, 5, 13, 12 );
W[13] = SHA2x_MEXP( 11, 6, 14, 13 );
W[14] = SHA2x_MEXP( 12, 7, 15, 14 );
W[15] = SHA2x_MEXP( 13, 8, 0, 15 );
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
SHA2s_8WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
SHA2s_8WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
SHA2s_8WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
SHA2s_8WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
SHA2s_8WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
SHA2s_8WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
SHA2s_8WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
SHA2s_8WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
}
state_out[0] = _mm256_add_epi32( state_in[0], A );
state_out[1] = _mm256_add_epi32( state_in[1], B );
state_out[2] = _mm256_add_epi32( state_in[2], C );
state_out[3] = _mm256_add_epi32( state_in[3], D );
state_out[4] = _mm256_add_epi32( state_in[4], E );
state_out[5] = _mm256_add_epi32( state_in[5], F );
state_out[6] = _mm256_add_epi32( state_in[6], G );
state_out[7] = _mm256_add_epi32( state_in[7], H );
}
static void
sha256_8way_round( sha256_8way_context *ctx, __m256i *in, __m256i r[8] )
sha256_8way_round( sha256_8way_context *ctx, __m256i *in, __m256i r[8] )
{
register __m256i A, B, C, D, E, F, G, H;
__m256i W[16];
@@ -566,10 +770,8 @@ void sha256_8way_close( sha256_8way_context *sc, void *dst )
high = (sc->count_high << 3) | (low >> 29);
low = low << 3;
sc->buf[ pad >> 2 ] =
mm256_bswap_32( m256_const1_32( high ) );
sc->buf[ ( pad+4 ) >> 2 ] =
mm256_bswap_32( m256_const1_32( low ) );
sc->buf[ pad >> 2 ] = m256_const1_32( bswap_32( high ) );
sc->buf[ ( pad+4 ) >> 2 ] = m256_const1_32( bswap_32( low ) );
sha256_8way_round( sc, sc->buf, sc->val );
@@ -589,27 +791,22 @@ void sha256_8way_full( void *dst, const void *data, size_t len )
// SHA-256 16 way
#define CHx16(X, Y, Z) \
_mm512_xor_si512( _mm512_and_si512( _mm512_xor_si512( Y, Z ), X ), Z )
_mm512_ternarylogic_epi32( X, Y, Z, 0xca )
#define MAJx16(X, Y, Z) \
_mm512_or_si512( _mm512_and_si512( X, Y ), \
_mm512_and_si512( _mm512_or_si512( X, Y ), Z ) )
_mm512_ternarylogic_epi32( X, Y, Z, 0xe8 )
#define BSG2_0x16(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_32(x, 2), mm512_ror_32(x, 13) ), mm512_ror_32( x, 22) )
mm512_xor3( mm512_ror_32(x, 2), mm512_ror_32(x, 13), mm512_ror_32(x, 22) )
#define BSG2_1x16(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_32(x, 6), mm512_ror_32(x, 11) ), mm512_ror_32( x, 25) )
mm512_xor3( mm512_ror_32(x, 6), mm512_ror_32(x, 11), mm512_ror_32(x, 25) )
#define SSG2_0x16(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_32(x, 7), mm512_ror_32(x, 18) ), _mm512_srli_epi32(x, 3) )
mm512_xor3( mm512_ror_32(x, 7), mm512_ror_32(x, 18), _mm512_srli_epi32(x, 3) )
#define SSG2_1x16(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_32(x, 17), mm512_ror_32(x, 19) ), _mm512_srli_epi32(x, 10) )
mm512_xor3( mm512_ror_32(x, 17), mm512_ror_32(x, 19), _mm512_srli_epi32(x, 10) )
#define SHA2x16_MEXP( a, b, c, d ) \
mm512_add4_32( SSG2_1x16( W[a] ), W[b], SSG2_0x16( W[c] ), W[d] );
@@ -625,10 +822,216 @@ do { \
H = _mm512_add_epi32( T1, T2 ); \
} while (0)
// Tranform one 16 lane by 64 byte message block and update state.
// Calling function is responsible for initializing the state, setting
// correct byte order, counting bits and padding of the final block.
// It's faster for multiple rounds of sha256 (sha256d/t/q) by eliminating
// redundant byte swapping.
//
void sha256_16way_transform( __m512i *state_out, const __m512i *data,
const __m512i *state_in )
{
__m512i A, B, C, D, E, F, G, H;
__m512i W[16];
memcpy_512( W, data, 16 );
A = state_in[0];
B = state_in[1];
C = state_in[2];
D = state_in[3];
E = state_in[4];
F = state_in[5];
G = state_in[6];
H = state_in[7];
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
for ( int j = 16; j < 64; j += 16 )
{
W[ 0] = SHA2x16_MEXP( 14, 9, 1, 0 );
W[ 1] = SHA2x16_MEXP( 15, 10, 2, 1 );
W[ 2] = SHA2x16_MEXP( 0, 11, 3, 2 );
W[ 3] = SHA2x16_MEXP( 1, 12, 4, 3 );
W[ 4] = SHA2x16_MEXP( 2, 13, 5, 4 );
W[ 5] = SHA2x16_MEXP( 3, 14, 6, 5 );
W[ 6] = SHA2x16_MEXP( 4, 15, 7, 6 );
W[ 7] = SHA2x16_MEXP( 5, 0, 8, 7 );
W[ 8] = SHA2x16_MEXP( 6, 1, 9, 8 );
W[ 9] = SHA2x16_MEXP( 7, 2, 10, 9 );
W[10] = SHA2x16_MEXP( 8, 3, 11, 10 );
W[11] = SHA2x16_MEXP( 9, 4, 12, 11 );
W[12] = SHA2x16_MEXP( 10, 5, 13, 12 );
W[13] = SHA2x16_MEXP( 11, 6, 14, 13 );
W[14] = SHA2x16_MEXP( 12, 7, 15, 14 );
W[15] = SHA2x16_MEXP( 13, 8, 0, 15 );
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
}
state_out[0] = _mm512_add_epi32( state_in[0], A );
state_out[1] = _mm512_add_epi32( state_in[1], B );
state_out[2] = _mm512_add_epi32( state_in[2], C );
state_out[3] = _mm512_add_epi32( state_in[3], D );
state_out[4] = _mm512_add_epi32( state_in[4], E );
state_out[5] = _mm512_add_epi32( state_in[5], F );
state_out[6] = _mm512_add_epi32( state_in[6], G );
state_out[7] = _mm512_add_epi32( state_in[7], H );
}
// Aggresive prehashing
void sha256_16way_prehash_3rounds( __m512i *state_mid, const __m512i *W,
const __m512i *state_in )
{
__m512i A, B, C, D, E, F, G, H;
A = _mm512_load_si512( state_in );
B = _mm512_load_si512( state_in + 1 );
C = _mm512_load_si512( state_in + 2 );
D = _mm512_load_si512( state_in + 3 );
E = _mm512_load_si512( state_in + 4 );
F = _mm512_load_si512( state_in + 5 );
G = _mm512_load_si512( state_in + 6 );
H = _mm512_load_si512( state_in + 7 );
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
_mm512_store_si512( state_mid , A );
_mm512_store_si512( state_mid + 1, B );
_mm512_store_si512( state_mid + 2, C );
_mm512_store_si512( state_mid + 3, D );
_mm512_store_si512( state_mid + 4, E );
_mm512_store_si512( state_mid + 5, F );
_mm512_store_si512( state_mid + 6, G );
_mm512_store_si512( state_mid + 7, H );
}
void sha256_16way_final_rounds( __m512i *state_out, const __m512i *data,
const __m512i *state_in, const __m512i *state_mid )
{
__m512i A, B, C, D, E, F, G, H;
__m512i W[16];
memcpy_512( W, data, 16 );
A = _mm512_load_si512( state_mid );
B = _mm512_load_si512( state_mid + 1 );
C = _mm512_load_si512( state_mid + 2 );
D = _mm512_load_si512( state_mid + 3 );
E = _mm512_load_si512( state_mid + 4 );
F = _mm512_load_si512( state_mid + 5 );
G = _mm512_load_si512( state_mid + 6 );
H = _mm512_load_si512( state_mid + 7 );
// SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
// SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
// SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, 0 );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, 0 );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, 0 );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, 0 );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, 0 );
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, 0 );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, 0 );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, 0 );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, 0 );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, 0 );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, 0 );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, 0 );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, 0 );
for ( int j = 16; j < 64; j += 16 )
{
W[ 0] = SHA2x16_MEXP( 14, 9, 1, 0 );
W[ 1] = SHA2x16_MEXP( 15, 10, 2, 1 );
W[ 2] = SHA2x16_MEXP( 0, 11, 3, 2 );
W[ 3] = SHA2x16_MEXP( 1, 12, 4, 3 );
W[ 4] = SHA2x16_MEXP( 2, 13, 5, 4 );
W[ 5] = SHA2x16_MEXP( 3, 14, 6, 5 );
W[ 6] = SHA2x16_MEXP( 4, 15, 7, 6 );
W[ 7] = SHA2x16_MEXP( 5, 0, 8, 7 );
W[ 8] = SHA2x16_MEXP( 6, 1, 9, 8 );
W[ 9] = SHA2x16_MEXP( 7, 2, 10, 9 );
W[10] = SHA2x16_MEXP( 8, 3, 11, 10 );
W[11] = SHA2x16_MEXP( 9, 4, 12, 11 );
W[12] = SHA2x16_MEXP( 10, 5, 13, 12 );
W[13] = SHA2x16_MEXP( 11, 6, 14, 13 );
W[14] = SHA2x16_MEXP( 12, 7, 15, 14 );
W[15] = SHA2x16_MEXP( 13, 8, 0, 15 );
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, j );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, j );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, j );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 3, j );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 4, j );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 5, j );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 6, j );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 7, j );
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 8, j );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 9, j );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 10, j );
SHA2s_16WAY_STEP( F, G, H, A, B, C, D, E, 11, j );
SHA2s_16WAY_STEP( E, F, G, H, A, B, C, D, 12, j );
SHA2s_16WAY_STEP( D, E, F, G, H, A, B, C, 13, j );
SHA2s_16WAY_STEP( C, D, E, F, G, H, A, B, 14, j );
SHA2s_16WAY_STEP( B, C, D, E, F, G, H, A, 15, j );
}
A = _mm512_add_epi32( A, _mm512_load_si512( state_in ) );
B = _mm512_add_epi32( B, _mm512_load_si512( state_in + 1 ) );
C = _mm512_add_epi32( C, _mm512_load_si512( state_in + 2 ) );
D = _mm512_add_epi32( D, _mm512_load_si512( state_in + 3 ) );
E = _mm512_add_epi32( E, _mm512_load_si512( state_in + 4 ) );
F = _mm512_add_epi32( F, _mm512_load_si512( state_in + 5 ) );
G = _mm512_add_epi32( G, _mm512_load_si512( state_in + 6 ) );
H = _mm512_add_epi32( H, _mm512_load_si512( state_in + 7 ) );
_mm512_store_si512( state_out , A );
_mm512_store_si512( state_out + 1, B );
_mm512_store_si512( state_out + 2, C );
_mm512_store_si512( state_out + 3, D );
_mm512_store_si512( state_out + 4, E );
_mm512_store_si512( state_out + 5, F );
_mm512_store_si512( state_out + 6, G );
_mm512_store_si512( state_out + 7, H );
}
static void
sha256_16way_round( sha256_16way_context *ctx, __m512i *in, __m512i r[8] )
{
register __m512i A, B, C, D, E, F, G, H;
register __m512i A, B, C, D, E, F, G, H;
__m512i W[16];
mm512_block_bswap_32( W , in );
@@ -657,6 +1060,7 @@ sha256_16way_round( sha256_16way_context *ctx, __m512i *in, __m512i r[8] )
H = m512_const1_64( 0x5BE0CD195BE0CD19 );
}
SHA2s_16WAY_STEP( A, B, C, D, E, F, G, H, 0, 0 );
SHA2s_16WAY_STEP( H, A, B, C, D, E, F, G, 1, 0 );
SHA2s_16WAY_STEP( G, H, A, B, C, D, E, F, 2, 0 );
@@ -800,10 +1204,8 @@ void sha256_16way_close( sha256_16way_context *sc, void *dst )
high = (sc->count_high << 3) | (low >> 29);
low = low << 3;
sc->buf[ pad >> 2 ] =
mm512_bswap_32( m512_const1_32( high ) );
sc->buf[ ( pad+4 ) >> 2 ] =
mm512_bswap_32( m512_const1_32( low ) );
sc->buf[ pad >> 2 ] = m512_const1_32( bswap_32( high ) );
sc->buf[ ( pad+4 ) >> 2 ] = m512_const1_32( bswap_32( low ) );
sha256_16way_round( sc, sc->buf, sc->val );

200
algo/sha/sha256-hash-opt.c Normal file
View File

@@ -0,0 +1,200 @@
/* Intel SHA extensions using C intrinsics */
/* Written and place in public domain by Jeffrey Walton */
/* Based on code from Intel, and by Sean Gulley for */
/* the miTLS project. */
// A stripped down version with byte swapping removed.
#if defined(__SHA__)
#include "sha256-hash-opt.h"
void sha256_opt_transform( uint32_t *state_out, const void *input,
const uint32_t *state_in )
{
__m128i STATE0, STATE1;
__m128i MSG, TMP;
__m128i TMSG0, TMSG1, TMSG2, TMSG3;
__m128i ABEF_SAVE, CDGH_SAVE;
// Load initial values
TMP = _mm_load_si128((__m128i*) &state_in[0]);
STATE1 = _mm_load_si128((__m128i*) &state_in[4]);
// MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
TMP = _mm_shuffle_epi32(TMP, 0xB1); // CDAB
STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH
STATE0 = _mm_alignr_epi8(TMP, STATE1, 8); // ABEF
STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0); // CDGH
// Save current hash
ABEF_SAVE = STATE0;
CDGH_SAVE = STATE1;
// Rounds 0-3
TMSG0 = _mm_load_si128((const __m128i*) (input+0));
// TMSG0 = _mm_shuffle_epi8(MSG, MASK);
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Rounds 4-7
TMSG1 = _mm_load_si128((const __m128i*) (input+16));
// TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
// Rounds 8-11
TMSG2 = _mm_load_si128((const __m128i*) (input+32));
// TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
// Rounds 12-15
TMSG3 = _mm_load_si128((const __m128i*) (input+48));
// TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
TMSG0 = _mm_add_epi32(TMSG0, TMP);
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
// Rounds 16-19
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
TMSG1 = _mm_add_epi32(TMSG1, TMP);
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
// Rounds 20-23
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
TMSG2 = _mm_add_epi32(TMSG2, TMP);
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
// Rounds 24-27
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
TMSG3 = _mm_add_epi32(TMSG3, TMP);
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
// Rounds 28-31
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
TMSG0 = _mm_add_epi32(TMSG0, TMP);
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
// Rounds 32-35
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
TMSG1 = _mm_add_epi32(TMSG1, TMP);
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
// Rounds 36-39
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
TMSG2 = _mm_add_epi32(TMSG2, TMP);
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
// Rounds 40-43
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
TMSG3 = _mm_add_epi32(TMSG3, TMP);
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
// Rounds 44-47
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
TMSG0 = _mm_add_epi32(TMSG0, TMP);
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
// Rounds 48-51
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
TMSG1 = _mm_add_epi32(TMSG1, TMP);
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
// Rounds 52-55
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
TMSG2 = _mm_add_epi32(TMSG2, TMP);
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Rounds 56-59
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
TMSG3 = _mm_add_epi32(TMSG3, TMP);
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Rounds 60-63
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Add values back to state
STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
TMP = _mm_shuffle_epi32(STATE0, 0x1B); // FEBA
STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); // DCHG
STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); // DCBA
STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); // ABEF
// Save state
_mm_store_si128((__m128i*) &state_out[0], STATE0);
_mm_store_si128((__m128i*) &state_out[4], STATE1);
}
#endif

View File

@@ -0,0 +1,18 @@
#ifndef SHA2_HASH_OPT_H__
#define SHA2_HASH_OPT_H__ 1
#include <stddef.h>
#include "simd-utils.h"
#if defined(__SHA__)
void sha256_opt_transform( uint32_t *state_out, const void *input,
const uint32_t *state_in );
// 2 way with interleaved instructions
void sha256_ni2way_transform( uint32_t *out_X, uint32_t*out_Y,
const void *msg_X, const void *msg_Y,
const uint32_t *in_X, const uint32_t *in_Y );
#endif
#endif

252
algo/sha/sha256d-4way.c Normal file
View File

@@ -0,0 +1,252 @@
#include "sha256t-gate.h"
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "sha-hash-4way.h"
#if defined(SHA256D_16WAY)
int scanhash_sha256d_16way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
__m512i block[16] __attribute__ ((aligned (64)));
__m512i hash32[8] __attribute__ ((aligned (32)));
__m512i initstate[8] __attribute__ ((aligned (32)));
__m512i midstate[8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
__m512i vdata[20] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 16;
uint32_t n = first_nonce;
__m512i *noncev = vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m512i last_byte = m512_const1_32( 0x80000000 );
const __m512i sixteen = m512_const1_32( 16 );
for ( int i = 0; i < 19; i++ )
vdata[i] = m512_const1_32( pdata[i] );
*noncev = _mm512_set_epi32( n+15, n+14, n+13, n+12, n+11, n+10, n+9, n+8,
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
// initialize state
initstate[0] = m512_const1_64( 0x6A09E6676A09E667 );
initstate[1] = m512_const1_64( 0xBB67AE85BB67AE85 );
initstate[2] = m512_const1_64( 0x3C6EF3723C6EF372 );
initstate[3] = m512_const1_64( 0xA54FF53AA54FF53A );
initstate[4] = m512_const1_64( 0x510E527F510E527F );
initstate[5] = m512_const1_64( 0x9B05688C9B05688C );
initstate[6] = m512_const1_64( 0x1F83D9AB1F83D9AB );
initstate[7] = m512_const1_64( 0x5BE0CD195BE0CD19 );
// hash first 64 bytes of data
sha256_16way_transform( midstate, vdata, initstate );
do
{
// 1. final 16 bytes of data, with padding
memcpy_512( block, vdata + 16, 4 );
block[ 4] = last_byte;
memset_zero_512( block + 5, 10 );
block[15] = m512_const1_32( 80*8 ); // bit count
sha256_16way_transform( hash32, block, midstate );
// 2. 32 byte hash from 1.
memcpy_512( block, hash32, 8 );
block[ 8] = last_byte;
memset_zero_512( block + 9, 6 );
block[15] = m512_const1_32( 32*8 ); // bit count
sha256_16way_transform( hash32, block, initstate );
// byte swap final hash for testing
mm512_block_bswap_32( hash32, hash32 );
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_16x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev, sixteen );
n += 16;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif
#if defined(SHA256D_8WAY)
int scanhash_sha256d_8way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
__m256i block[16] __attribute__ ((aligned (64)));
__m256i hash32[8] __attribute__ ((aligned (32)));
__m256i initstate[8] __attribute__ ((aligned (32)));
__m256i midstate[8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
__m256i vdata[20] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
__m256i *noncev = vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m256i last_byte = m256_const1_32( 0x80000000 );
const __m256i eight = m256_const1_32( 8 );
for ( int i = 0; i < 19; i++ )
vdata[i] = m256_const1_32( pdata[i] );
*noncev = _mm256_set_epi32( n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
// initialize state
initstate[0] = m256_const1_64( 0x6A09E6676A09E667 );
initstate[1] = m256_const1_64( 0xBB67AE85BB67AE85 );
initstate[2] = m256_const1_64( 0x3C6EF3723C6EF372 );
initstate[3] = m256_const1_64( 0xA54FF53AA54FF53A );
initstate[4] = m256_const1_64( 0x510E527F510E527F );
initstate[5] = m256_const1_64( 0x9B05688C9B05688C );
initstate[6] = m256_const1_64( 0x1F83D9AB1F83D9AB );
initstate[7] = m256_const1_64( 0x5BE0CD195BE0CD19 );
// hash first 64 bytes of data
sha256_8way_transform( midstate, vdata, initstate );
do
{
// 1. final 16 bytes of data, with padding
memcpy_256( block, vdata + 16, 4 );
block[ 4] = last_byte;
memset_zero_256( block + 5, 10 );
block[15] = m256_const1_32( 80*8 ); // bit count
sha256_8way_transform( hash32, block, midstate );
// 2. 32 byte hash from 1.
memcpy_256( block, hash32, 8 );
block[ 8] = last_byte;
memset_zero_256( block + 9, 6 );
block[15] = m256_const1_32( 32*8 ); // bit count
sha256_8way_transform( hash32, block, initstate );
// byte swap final hash for testing
mm256_block_bswap_32( hash32, hash32 );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev, eight );
n += 8;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif
#if defined(SHA256D_4WAY)
int scanhash_sha256d_4way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
__m128i block[16] __attribute__ ((aligned (64)));
__m128i hash32[8] __attribute__ ((aligned (32)));
__m128i initstate[8] __attribute__ ((aligned (32)));
__m128i midstate[8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
__m128i vdata[20] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
__m128i *noncev = vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m128i last_byte = m128_const1_32( 0x80000000 );
const __m128i four = m128_const1_32( 4 );
for ( int i = 0; i < 19; i++ )
vdata[i] = m128_const1_32( pdata[i] );
*noncev = _mm_set_epi32( n+ 3, n+ 2, n+1, n );
// initialize state
initstate[0] = m128_const1_64( 0x6A09E6676A09E667 );
initstate[1] = m128_const1_64( 0xBB67AE85BB67AE85 );
initstate[2] = m128_const1_64( 0x3C6EF3723C6EF372 );
initstate[3] = m128_const1_64( 0xA54FF53AA54FF53A );
initstate[4] = m128_const1_64( 0x510E527F510E527F );
initstate[5] = m128_const1_64( 0x9B05688C9B05688C );
initstate[6] = m128_const1_64( 0x1F83D9AB1F83D9AB );
initstate[7] = m128_const1_64( 0x5BE0CD195BE0CD19 );
// hash first 64 bytes of data
sha256_4way_transform( midstate, vdata, initstate );
do
{
// 1. final 16 bytes of data, with padding
memcpy_128( block, vdata + 16, 4 );
block[ 4] = last_byte;
memset_zero_128( block + 5, 10 );
block[15] = m128_const1_32( 80*8 ); // bit count
sha256_4way_transform( hash32, block, midstate );
// 2. 32 byte hash from 1.
memcpy_128( block, hash32, 8 );
block[ 8] = last_byte;
memset_zero_128( block + 9, 6 );
block[15] = m128_const1_32( 32*8 ); // bit count
sha256_4way_transform( hash32, block, initstate );
// byte swap final hash for testing
mm128_block_bswap_32( hash32, hash32 );
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_4x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm_add_epi32( *noncev, four );
n += 4;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif

View File

@@ -5,6 +5,79 @@
#include <stdio.h>
#include "sha-hash-4way.h"
#if defined(SHA256T_16WAY)
static __thread sha256_16way_context sha256_ctx16 __attribute__ ((aligned (64)));
void sha256q_16way_hash( void* output, const void* input )
{
uint32_t vhash[8*16] __attribute__ ((aligned (64)));
sha256_16way_context ctx;
memcpy( &ctx, &sha256_ctx16, sizeof ctx );
sha256_16way_update( &ctx, input + (64<<4), 16 );
sha256_16way_close( &ctx, vhash );
sha256_16way_init( &ctx );
sha256_16way_update( &ctx, vhash, 32 );
sha256_16way_close( &ctx, vhash );
sha256_16way_init( &ctx );
sha256_16way_update( &ctx, vhash, 32 );
sha256_16way_close( &ctx, vhash );
sha256_16way_init( &ctx );
sha256_16way_update( &ctx, vhash, 32 );
sha256_16way_close( &ctx, output );
}
int scanhash_sha256q_16way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*16] __attribute__ ((aligned (64)));
uint32_t hash32[8*16] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = &(hash32[7<<4]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 16;
uint32_t n = first_nonce;
__m512i *noncev = (__m512i*)vdata + 19; // aligned
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_16x32( vdata, pdata );
*noncev = _mm512_set_epi32( n+15, n+14, n+13, n+12, n+11, n+10, n+9, n+8,
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
sha256_16way_init( &sha256_ctx16 );
sha256_16way_update( &sha256_ctx16, vdata, 64 );
do
{
pdata[19] = n;
sha256q_16way_hash( hash32, vdata );
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_16x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev, m512_const1_32( 16 ) );
n += 16;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif
#if defined(SHA256T_8WAY)
static __thread sha256_8way_context sha256_ctx8 __attribute__ ((aligned (64)));
@@ -31,68 +104,47 @@ void sha256q_8way_hash( void* output, const void* input )
sha256_8way_close( &ctx, output );
}
int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
int scanhash_sha256q_8way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (32)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t hash32[8*8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = &(hash32[7<<3]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 19; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const uint64_t htmax[] = { 0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000 };
const uint32_t masks[] = { 0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0 };
// Need big endian data
mm256_bswap32_intrlv80_8x32( vdata, pdata );
*noncev = _mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n );
sha256_8way_init( &sha256_ctx8 );
sha256_8way_update( &sha256_ctx8, vdata, 64 );
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
do
{
uint32_t mask = masks[m];
do
{
*noncev = mm256_bswap_32(
_mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n ) );
pdata[19] = n;
sha256q_8way_hash( hash, vdata );
uint32_t *hash7 = &(hash[7<<3]);
for ( int lane = 0; lane < 8; lane++ )
if ( !( hash7[ lane ] & mask ) )
{
// deinterleave hash for lane
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 8;
} while ( (n < max_nonce-10) && !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
sha256q_8way_hash( hash32, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev, m256_const1_32( 8 ) );
n += 8;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -1,108 +1,74 @@
#include "sha256t-gate.h"
#if !defined(SHA256T_16WAY) && !defined(SHA256T_8WAY) && !defined(SHA256T_4WAY)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <openssl/sha.h>
#include "algo/sha/sph_sha2.h"
static __thread SHA256_CTX sha256q_ctx __attribute__ ((aligned (64)));
static __thread sph_sha256_context sha256q_ctx __attribute__ ((aligned (64)));
void sha256q_midstate( const void* input )
{
SHA256_Init( &sha256q_ctx );
SHA256_Update( &sha256q_ctx, input, 64 );
sph_sha256_init( &sha256q_ctx );
sph_sha256( &sha256q_ctx, input, 64 );
}
void sha256q_hash( void* output, const void* input )
int sha256q_hash( void* output, const void* input )
{
uint32_t _ALIGN(64) hash[16];
const int midlen = 64; // bytes
const int tail = 80 - midlen; // 16
SHA256_CTX ctx __attribute__ ((aligned (64)));
sph_sha256_context ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &sha256q_ctx, sizeof sha256q_ctx );
SHA256_Update( &ctx, input + midlen, tail );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256( &ctx, input + midlen, tail );
sph_sha256_close( &ctx, hash );
SHA256_Init( &ctx );
SHA256_Update( &ctx, hash, 32 );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256_init( &ctx );
sph_sha256( &ctx, hash, 32 );
sph_sha256_close( &ctx, hash );
SHA256_Init( &ctx );
SHA256_Update( &ctx, hash, 32 );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256_init( &ctx );
sph_sha256( &ctx, hash, 32 );
sph_sha256_close( &ctx, hash );
SHA256_Init( &ctx );
SHA256_Update( &ctx, hash, 32 );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256_init( &ctx );
sph_sha256( &ctx, hash, 32 );
sph_sha256_close( &ctx, output );
memcpy( output, hash, 32 );
return 1;
}
int scanhash_sha256q( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
#ifdef _MSC_VER
uint32_t __declspec(align(32)) hash64[8];
#else
uint32_t hash64[8] __attribute__((aligned(32)));
#endif
uint32_t endiandata[32];
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t last_nonce = max_nonce - 1;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
uint64_t htmax[] = {
0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000
};
uint32_t masks[] = {
0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0
};
mm128_bswap32_80( edata, pdata );
sha256q_midstate( edata );
// we need bigendian data...
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
sha256q_midstate( endiandata );
for ( int m = 0; m < 6; m++ )
do
{
if ( Htarg <= htmax[m] )
edata[19] = n;
if ( likely( sha256q_hash( hash, edata ) ) )
if ( unlikely( valid_hash( hash, ptarget ) && !bench ) )
{
uint32_t mask = masks[m];
do {
pdata[19] = ++n;
be32enc(&endiandata[19], n);
sha256q_hash( hash64, endiandata );
if ( !( hash64[7] & mask ) )
if ( fulltest( hash64, ptarget ) && !opt_benchmark )
submit_solution( work, hash64, mythr );
} while ( n < max_nonce && !work_restart[thr_id].restart );
break;
pdata[19] = bswap_32( n );
submit_solution( work, hash, mythr );
}
}
*hashes_done = n - first_nonce + 1;
n++;
} while ( n < last_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
#endif

View File

@@ -5,87 +5,178 @@
#include <stdio.h>
#include "sha-hash-4way.h"
#if defined(SHA256T_8WAY)
#if defined(SHA256T_16WAY)
static __thread sha256_8way_context sha256_ctx8 __attribute__ ((aligned (64)));
void sha256t_8way_hash( void* output, const void* input )
int scanhash_sha256t_16way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vhash[8*8] __attribute__ ((aligned (64)));
sha256_8way_context ctx;
memcpy( &ctx, &sha256_ctx8, sizeof ctx );
__m512i block[16] __attribute__ ((aligned (64)));
__m512i hash32[8] __attribute__ ((aligned (32)));
__m512i initstate[8] __attribute__ ((aligned (32)));
__m512i midstate[8] __attribute__ ((aligned (32)));
__m512i midstate2[8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
__m512i vdata[20] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 16;
uint32_t n = first_nonce;
__m512i *noncev = vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m512i last_byte = m512_const1_32( 0x80000000 );
const __m512i sixteen = m512_const1_32( 16 );
sha256_8way_update( &ctx, input + (64<<3), 16 );
sha256_8way_close( &ctx, vhash );
for ( int i = 0; i < 19; i++ )
vdata[i] = m512_const1_32( pdata[i] );
sha256_8way_init( &ctx );
sha256_8way_update( &ctx, vhash, 32 );
sha256_8way_close( &ctx, vhash );
*noncev = _mm512_set_epi32( n+15, n+14, n+13, n+12, n+11, n+10, n+9, n+8,
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
sha256_8way_init( &ctx );
sha256_8way_update( &ctx, vhash, 32 );
sha256_8way_close( &ctx, output );
// initialize state
initstate[0] = m512_const1_64( 0x6A09E6676A09E667 );
initstate[1] = m512_const1_64( 0xBB67AE85BB67AE85 );
initstate[2] = m512_const1_64( 0x3C6EF3723C6EF372 );
initstate[3] = m512_const1_64( 0xA54FF53AA54FF53A );
initstate[4] = m512_const1_64( 0x510E527F510E527F );
initstate[5] = m512_const1_64( 0x9B05688C9B05688C );
initstate[6] = m512_const1_64( 0x1F83D9AB1F83D9AB );
initstate[7] = m512_const1_64( 0x5BE0CD195BE0CD19 );
// hash first 64 byte block of data
sha256_16way_transform( midstate, vdata, initstate );
// Do 3 rounds on the first 12 bytes of the next block
sha256_16way_prehash_3rounds( midstate2, vdata + 16, midstate );
do
{
// 1. final 16 bytes of data, with padding
memcpy_512( block, vdata + 16, 4 );
block[ 4] = last_byte;
memset_zero_512( block + 5, 10 );
block[15] = m512_const1_32( 80*8 ); // bit count
sha256_16way_final_rounds( hash32, block, midstate, midstate2 );
// sha256_16way_transform( hash32, block, midstate );
// 2. 32 byte hash from 1.
memcpy_512( block, hash32, 8 );
block[ 8] = last_byte;
memset_zero_512( block + 9, 6 );
block[15] = m512_const1_32( 32*8 ); // bit count
sha256_16way_transform( hash32, block, initstate );
// 3. 32 byte hash from 2.
memcpy_512( block, hash32, 8 );
sha256_16way_transform( hash32, block, initstate );
// byte swap final hash for testing
mm512_block_bswap_32( hash32, hash32 );
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_16x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev, sixteen );
n += 16;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif
#if defined(SHA256T_8WAY)
int scanhash_sha256t_8way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (32)));
__m256i block[16] __attribute__ ((aligned (64)));
__m256i hash32[8] __attribute__ ((aligned (32)));
__m256i initstate[8] __attribute__ ((aligned (32)));
__m256i midstate[8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<3]);
__m256i vdata[20] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 19; // aligned
__m256i *noncev = vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m256i last_byte = m256_const1_32( 0x80000000 );
const __m256i eight = m256_const1_32( 8 );
const uint64_t htmax[] = { 0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000 };
const uint32_t masks[] = { 0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0 };
for ( int i = 0; i < 19; i++ )
vdata[i] = m256_const1_32( pdata[i] );
*noncev = _mm256_set_epi32( n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+1, n );
// Need big endian data
mm256_bswap32_intrlv80_8x32( vdata, pdata );
sha256_8way_init( &sha256_ctx8 );
sha256_8way_update( &sha256_ctx8, vdata, 64 );
// initialize state
initstate[0] = m256_const1_64( 0x6A09E6676A09E667 );
initstate[1] = m256_const1_64( 0xBB67AE85BB67AE85 );
initstate[2] = m256_const1_64( 0x3C6EF3723C6EF372 );
initstate[3] = m256_const1_64( 0xA54FF53AA54FF53A );
initstate[4] = m256_const1_64( 0x510E527F510E527F );
initstate[5] = m256_const1_64( 0x9B05688C9B05688C );
initstate[6] = m256_const1_64( 0x1F83D9AB1F83D9AB );
initstate[7] = m256_const1_64( 0x5BE0CD195BE0CD19 );
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
// hash first 64 bytes of data
sha256_8way_transform( midstate, vdata, initstate );
do
{
const uint32_t mask = masks[m];
do
// 1. final 16 bytes of data, with padding
memcpy_256( block, vdata + 16, 4 );
block[ 4] = last_byte;
memset_zero_256( block + 5, 10 );
block[15] = m256_const1_32( 80*8 ); // bit count
sha256_8way_transform( hash32, block, midstate );
// 2. 32 byte hash from 1.
memcpy_256( block, hash32, 8 );
block[ 8] = last_byte;
memset_zero_256( block + 9, 6 );
block[15] = m256_const1_32( 32*8 ); // bit count
sha256_8way_transform( hash32, block, initstate );
// 3. 32 byte hash from 2.
memcpy_256( block, hash32, 8 );
sha256_8way_transform( hash32, block, initstate );
// byte swap final hash for testing
mm256_block_bswap_32( hash32, hash32 );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
*noncev = mm256_bswap_32( _mm256_set_epi32(
n+7,n+6,n+5,n+4,n+3,n+2,n+1,n ) );
pdata[19] = n;
sha256t_8way_hash( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( !( hash7[ lane ] & mask ) )
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
// deinterleave hash for lane
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
n += 8;
} while ( (n < max_nonce-10) && !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce + 1;
}
*noncev = _mm256_add_epi32( *noncev, eight );
n += 8;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
@@ -93,82 +184,84 @@ int scanhash_sha256t_8way( struct work *work, const uint32_t max_nonce,
#if defined(SHA256T_4WAY)
static __thread sha256_4way_context sha256_ctx4 __attribute__ ((aligned (64)));
void sha256t_4way_hash( void* output, const void* input )
{
uint32_t vhash[8*4] __attribute__ ((aligned (64)));
sha256_4way_context ctx;
memcpy( &ctx, &sha256_ctx4, sizeof ctx );
sha256_4way_update( &ctx, input + (64<<2), 16 );
sha256_4way_close( &ctx, vhash );
sha256_4way_init( &ctx );
sha256_4way_update( &ctx, vhash, 32 );
sha256_4way_close( &ctx, vhash );
sha256_4way_init( &ctx );
sha256_4way_update( &ctx, vhash, 32 );
sha256_4way_close( &ctx, output );
}
int scanhash_sha256t_4way( struct work *work, const uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<2]);
__m128i block[16] __attribute__ ((aligned (64)));
__m128i hash32[8] __attribute__ ((aligned (32)));
__m128i initstate[8] __attribute__ ((aligned (32)));
__m128i midstate[8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
__m128i vdata[20] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( hash32[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t targ32_d7 = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
uint32_t n = first_nonce;
__m128i *noncev = (__m128i*)vdata + 19; // aligned
__m128i *noncev = vdata + 19;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m128i last_byte = m128_const1_32( 0x80000000 );
const __m128i four = m128_const1_32( 4 );
const uint64_t htmax[] = { 0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000 };
const uint32_t masks[] = { 0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0 };
for ( int i = 0; i < 19; i++ )
vdata[i] = m128_const1_32( pdata[i] );
mm128_bswap32_intrlv80_4x32( vdata, pdata );
sha256_4way_init( &sha256_ctx4 );
sha256_4way_update( &sha256_ctx4, vdata, 64 );
*noncev = _mm_set_epi32( n+ 3, n+ 2, n+1, n );
for ( int m = 0; m < 6; m++ ) if ( Htarg <= htmax[m] )
// initialize state
initstate[0] = m128_const1_64( 0x6A09E6676A09E667 );
initstate[1] = m128_const1_64( 0xBB67AE85BB67AE85 );
initstate[2] = m128_const1_64( 0x3C6EF3723C6EF372 );
initstate[3] = m128_const1_64( 0xA54FF53AA54FF53A );
initstate[4] = m128_const1_64( 0x510E527F510E527F );
initstate[5] = m128_const1_64( 0x9B05688C9B05688C );
initstate[6] = m128_const1_64( 0x1F83D9AB1F83D9AB );
initstate[7] = m128_const1_64( 0x5BE0CD195BE0CD19 );
// hash first 64 bytes of data
sha256_4way_transform( midstate, vdata, initstate );
do
{
const uint32_t mask = masks[m];
do {
*noncev = mm128_bswap_32( _mm_set_epi32( n+3,n+2,n+1,n ) );
pdata[19] = n;
// 1. final 16 bytes of data, with padding
memcpy_128( block, vdata + 16, 4 );
block[ 4] = last_byte;
memset_zero_128( block + 5, 10 );
block[15] = m128_const1_32( 80*8 ); // bit count
sha256_4way_transform( hash32, block, midstate );
sha256t_4way_hash( hash, vdata );
// 2. 32 byte hash from 1.
memcpy_128( block, hash32, 8 );
block[ 8] = last_byte;
memset_zero_128( block + 9, 6 );
block[15] = m128_const1_32( 32*8 ); // bit count
sha256_4way_transform( hash32, block, initstate );
for ( int lane = 0; lane < 4; lane++ )
if ( !( hash7[ lane ] & mask ) )
// 3. 32 byte hash from 2.
memcpy_128( block, hash32, 8 );
sha256_4way_transform( hash32, block, initstate );
// byte swap final hash for testing
mm128_block_bswap_32( hash32, hash32 );
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_4x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 4;
} while ( (n < max_nonce - 4) && !work_restart[thr_id].restart );
break;
}
*hashes_done = n - first_nonce + 1;
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm_add_epi32( *noncev, four );
n += 4;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}

View File

@@ -2,38 +2,37 @@
bool register_sha256t_algo( algo_gate_t* gate )
{
#if defined(SHA256T_8WAY)
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256t_8way;
gate->hash = (void*)&sha256t_8way_hash;
#elif defined(SHA256T_4WAY)
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256t_4way;
gate->hash = (void*)&sha256t_4way_hash;
#else
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
#if defined(SHA256T_16WAY)
gate->scanhash = (void*)&scanhash_sha256t_16way;
#elif defined(__SHA__)
gate->optimizations = SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256t;
gate->hash = (void*)&sha256t_hash;
#elif defined(SHA256T_8WAY)
gate->scanhash = (void*)&scanhash_sha256t_8way;
#else
gate->scanhash = (void*)&scanhash_sha256t_4way;
#endif
return true;
}
bool register_sha256q_algo( algo_gate_t* gate )
{
#if defined(SHA256T_8WAY)
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256q_8way;
gate->hash = (void*)&sha256q_8way_hash;
#elif defined(SHA256T_4WAY)
gate->optimizations = SSE2_OPT | AVX2_OPT | SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256q_4way;
gate->hash = (void*)&sha256q_4way_hash;
#else
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
#if defined(SHA256T_16WAY)
gate->scanhash = (void*)&scanhash_sha256q_16way;
gate->hash = (void*)&sha256q_16way_hash;
#elif defined(__SHA__)
gate->optimizations = SHA_OPT;
gate->scanhash = (void*)&scanhash_sha256q;
gate->hash = (void*)&sha256q_hash;
#elif defined(SHA256T_8WAY)
gate->scanhash = (void*)&scanhash_sha256q_8way;
gate->hash = (void*)&sha256q_8way_hash;
#else
gate->scanhash = (void*)&scanhash_sha256q_4way;
gate->hash = (void*)&sha256q_4way_hash;
#endif
return true;
}

View File

@@ -4,21 +4,28 @@
#include <stdint.h>
#include "algo-gate-api.h"
// Override multi way on ryzen, SHA is better.
#if !defined(__SHA__)
#if defined(__AVX2__)
#define SHA256T_8WAY
#elif defined(__SSE2__)
#define SHA256T_4WAY
#endif
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define SHA256T_16WAY 1
#elif defined(__AVX2__)
#define SHA256T_8WAY 1
#else
#define SHA256T_4WAY 1
#endif
bool register_sha256t_algo( algo_gate_t* gate );
bool register_sha256q_algo( algo_gate_t* gate );
#if defined(SHA256T_16WAY)
int scanhash_sha256t_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void sha256q_16way_hash( void *output, const void *input );
int scanhash_sha256q_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#if defined(SHA256T_8WAY)
void sha256t_8way_hash( void *output, const void *input );
int scanhash_sha256t_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void sha256q_8way_hash( void *output, const void *input );
@@ -28,7 +35,6 @@ int scanhash_sha256q_8way( struct work *work, uint32_t max_nonce,
#if defined(SHA256T_4WAY)
void sha256t_4way_hash( void *output, const void *input );
int scanhash_sha256t_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void sha256q_4way_hash( void *output, const void *input );
@@ -36,10 +42,14 @@ int scanhash_sha256q_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
void sha256t_hash( void *output, const void *input );
#if defined(__SHA__)
int scanhash_sha256t( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
void sha256q_hash( void *output, const void *input );
#endif
int sha256q_hash( void *output, const void *input );
int scanhash_sha256q( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );

View File

@@ -1,105 +1,210 @@
#include "sha256t-gate.h"
#if !defined(SHA256T_16WAY) && !defined(SHA256T_8WAY) && !defined(SHA256T_4WAY)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <openssl/sha.h>
//#include "algo/sha/sph_sha2.h"
#include "sha256-hash-opt.h"
static __thread SHA256_CTX sha256t_ctx __attribute__ ((aligned (64)));
#if defined(__SHA__)
// Only used on CPUs with SHA
/*
static __thread sph_sha256_context sha256t_ctx __attribute__ ((aligned (64)));
void sha256t_midstate( const void* input )
{
SHA256_Init( &sha256t_ctx );
SHA256_Update( &sha256t_ctx, input, 64 );
sph_sha256_init( &sha256t_ctx );
sph_sha256( &sha256t_ctx, input, 64 );
}
void sha256t_hash( void* output, const void* input )
int sha256t_hash( void* output, const void* input )
{
uint32_t _ALIGN(64) hash[16];
const int midlen = 64; // bytes
const int tail = 80 - midlen; // 16
SHA256_CTX ctx __attribute__ ((aligned (64)));
sph_sha256_context ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &sha256t_ctx, sizeof sha256t_ctx );
SHA256_Update( &ctx, input + midlen, tail );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256( &ctx, input + midlen, tail );
sph_sha256_close( &ctx, hash );
SHA256_Init( &ctx );
SHA256_Update( &ctx, hash, 32 );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256_init( &ctx );
sph_sha256( &ctx, hash, 32 );
sph_sha256_close( &ctx, hash );
SHA256_Init( &ctx );
SHA256_Update( &ctx, hash, 32 );
SHA256_Final( (unsigned char*)hash, &ctx );
sph_sha256_init( &ctx );
sph_sha256( &ctx, hash, 32 );
sph_sha256_close( &ctx, output );
memcpy( output, hash, 32 );
return 1;
}
*/
/*
int scanhash_sha256t( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t block[16] __attribute__ ((aligned (64)));
uint32_t hash32[8] __attribute__ ((aligned (32)));
uint32_t initstate[8] __attribute__ ((aligned (32)));
uint32_t midstate[8] __attribute__ ((aligned (32)));
// uint32_t edata[20] __attribute__((aligned(64)));
// uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 1;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
__m128i shuf_bswap32 =
_mm_set_epi64x( 0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL );
// mm128_bswap32_80( edata, pdata );
// sha256t_midstate( edata );
// initialize state
initstate[0] = 0x6A09E667;
initstate[1] = 0xBB67AE85;
initstate[2] = 0x3C6EF372;
initstate[3] = 0xA54FF53A;
initstate[4] = 0x510E527F;
initstate[5] = 0x9B05688C;
initstate[6] = 0x1F83D9AB;
initstate[7] = 0x5BE0CD19;
// hash first 64 bytes of data
sha256_opt_transform( midstate, pdata, initstate );
do
{
// 1. final 16 bytes of data, with padding
memcpy( block, pdata + 16, 16 );
block[ 4] = 0x80000000;
memset( block + 5, 0, 40 );
block[15] = 80*8; // bit count
sha256_opt_transform( hash32, block, midstate );
// 2. 32 byte hash from 1.
memcpy( block, hash32, 32 );
block[ 8] = 0x80000000;
memset( block + 9, 0, 24 );
block[15] = 32*8; // bit count
sha256_opt_transform( hash32, block, initstate );
// 3. 32 byte hash from 2.
memcpy( block, hash32, 32 );
sha256_opt_transform( hash32, block, initstate );
// byte swap final hash for testing
casti_m128i( hash32, 0 ) =
_mm_shuffle_epi8( casti_m128i( hash32, 0 ), shuf_bswap32 );
casti_m128i( hash32, 1 ) =
_mm_shuffle_epi8( casti_m128i( hash32, 1 ), shuf_bswap32 );
if ( unlikely( valid_hash( hash32, ptarget ) && !bench ) )
submit_solution( work, hash32, mythr );
n++;
pdata[19] = n;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
return 0;
}
*/
int scanhash_sha256t( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t block0[16] __attribute__ ((aligned (64)));
uint32_t block1[16] __attribute__ ((aligned (64)));
uint32_t hash0[8] __attribute__ ((aligned (32)));
uint32_t hash1[8] __attribute__ ((aligned (32)));
uint32_t initstate[8] __attribute__ ((aligned (32)));
uint32_t midstate[8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t n = pdata[19] - 1;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
#ifdef _MSC_VER
uint32_t __declspec(align(32)) hash64[8];
#else
uint32_t hash64[8] __attribute__((aligned(32)));
#endif
uint32_t endiandata[32];
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t last_nonce = max_nonce - 1;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
__m128i shuf_bswap32 =
_mm_set_epi64x( 0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL );
uint64_t htmax[] = {
0,
0xF,
0xFF,
0xFFF,
0xFFFF,
0x10000000
};
uint32_t masks[] = {
0xFFFFFFFF,
0xFFFFFFF0,
0xFFFFFF00,
0xFFFFF000,
0xFFFF0000,
0
};
// initialize state
initstate[0] = 0x6A09E667;
initstate[1] = 0xBB67AE85;
initstate[2] = 0x3C6EF372;
initstate[3] = 0xA54FF53A;
initstate[4] = 0x510E527F;
initstate[5] = 0x9B05688C;
initstate[6] = 0x1F83D9AB;
initstate[7] = 0x5BE0CD19;
// we need bigendian data...
casti_m128i( endiandata, 0 ) = mm128_bswap_32( casti_m128i( pdata, 0 ) );
casti_m128i( endiandata, 1 ) = mm128_bswap_32( casti_m128i( pdata, 1 ) );
casti_m128i( endiandata, 2 ) = mm128_bswap_32( casti_m128i( pdata, 2 ) );
casti_m128i( endiandata, 3 ) = mm128_bswap_32( casti_m128i( pdata, 3 ) );
casti_m128i( endiandata, 4 ) = mm128_bswap_32( casti_m128i( pdata, 4 ) );
// hash first 64 bytes of data
sha256_opt_transform( midstate, pdata, initstate );
sha256t_midstate( endiandata );
for ( int m = 0; m < 6; m++ )
do
{
if ( Htarg <= htmax[m] )
// 1. final 16 bytes of data, with padding
memcpy( block0, pdata + 16, 16 );
memcpy( block1, pdata + 16, 16 );
block0[ 3] = n;
block1[ 3] = n+1;
block0[ 4] = block1[ 4] = 0x80000000;
memset( block0 + 5, 0, 40 );
memset( block1 + 5, 0, 40 );
block0[15] = block1[15] = 80*8; // bit count
sha256_ni2way_transform( hash0, hash1, block0, block1, midstate, midstate );
// 2. 32 byte hash from 1.
memcpy( block0, hash0, 32 );
memcpy( block1, hash1, 32 );
block0[ 8] = block1[ 8] = 0x80000000;
memset( block0 + 9, 0, 24 );
memset( block1 + 9, 0, 24 );
block0[15] = block1[15] = 32*8; // bit count
sha256_ni2way_transform( hash0, hash1, block0, block1, initstate, initstate );
// 3. 32 byte hash from 2.
memcpy( block0, hash0, 32 );
memcpy( block1, hash1, 32 );
sha256_ni2way_transform( hash0, hash1, block0, block1, initstate, initstate );
// byte swap final hash for testing
casti_m128i( hash0, 0 ) =
_mm_shuffle_epi8( casti_m128i( hash0, 0 ), shuf_bswap32 );
casti_m128i( hash0, 1 ) =
_mm_shuffle_epi8( casti_m128i( hash0, 1 ), shuf_bswap32 );
casti_m128i( hash1, 0 ) =
_mm_shuffle_epi8( casti_m128i( hash1, 0 ), shuf_bswap32 );
casti_m128i( hash1, 1 ) =
_mm_shuffle_epi8( casti_m128i( hash1, 1 ), shuf_bswap32 );
if ( unlikely( valid_hash( hash0, ptarget ) && !bench ) )
{
uint32_t mask = masks[m];
do {
pdata[19] = ++n;
be32enc(&endiandata[19], n);
sha256t_hash( hash64, endiandata );
if ( !(hash64[7] & mask) )
if ( fulltest( hash64, ptarget ) && !opt_benchmark )
submit_solution( work, hash64, mythr );
} while ( n < max_nonce && !work_restart[thr_id].restart );
break;
pdata[19] = n;
submit_solution( work, hash0, mythr );
}
}
*hashes_done = n - first_nonce + 1;
if ( unlikely( valid_hash( hash1, ptarget ) && !bench ) )
{
pdata[19] = n+1;
submit_solution( work, hash1, mythr );
}
n += 2;
} while ( (n < last_nonce) && !work_restart[thr_id].restart );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#endif

View File

@@ -96,74 +96,22 @@ static const uint64_t K512[80] =
// SHA-512 8 way 64 bit
#define CH8W(X, Y, Z) \
_mm512_xor_si512( _mm512_and_si512( _mm512_xor_si512( Y, Z ), X ), Z )
_mm512_ternarylogic_epi64( X, Y, Z, 0xca )
#define MAJ8W(X, Y, Z) \
_mm512_or_si512( _mm512_and_si512( X, Y ), \
_mm512_and_si512( _mm512_or_si512( X, Y ), Z ) )
_mm512_ternarylogic_epi64( X, Y, Z, 0xe8 )
#define BSG8W_5_0(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_64(x, 28), mm512_ror_64(x, 34) ), mm512_ror_64(x, 39) )
mm512_xor3( mm512_ror_64(x, 28), mm512_ror_64(x, 34), mm512_ror_64(x, 39) )
#define BSG8W_5_1(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_64(x, 14), mm512_ror_64(x, 18) ), mm512_ror_64(x, 41) )
mm512_xor3( mm512_ror_64(x, 14), mm512_ror_64(x, 18), mm512_ror_64(x, 41) )
#define SSG8W_5_0(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_64(x, 1), mm512_ror_64(x, 8) ), _mm512_srli_epi64(x, 7) )
mm512_xor3( mm512_ror_64(x, 1), mm512_ror_64(x, 8), _mm512_srli_epi64(x, 7) )
#define SSG8W_5_1(x) \
_mm512_xor_si512( _mm512_xor_si512( \
mm512_ror_64(x, 19), mm512_ror_64(x, 61) ), _mm512_srli_epi64(x, 6) )
static inline __m512i ssg8w_512_add( __m512i w0, __m512i w1 )
{
__m512i w0a, w1a, w0b, w1b;
w0a = mm512_ror_64( w0, 1 );
w1a = mm512_ror_64( w1,19 );
w0b = mm512_ror_64( w0, 8 );
w1b = mm512_ror_64( w1,61 );
w0a = _mm512_xor_si512( w0a, w0b );
w1a = _mm512_xor_si512( w1a, w1b );
w0b = _mm512_srli_epi64( w0, 7 );
w1b = _mm512_srli_epi64( w1, 6 );
w0a = _mm512_xor_si512( w0a, w0b );
w1a = _mm512_xor_si512( w1a, w1b );
return _mm512_add_epi64( w0a, w1a );
}
#define SSG8W_512x2_0( w0, w1, i ) do \
{ \
__m512i X0a, X1a, X0b, X1b; \
X0a = mm512_ror_64( W[i-15], 1 ); \
X1a = mm512_ror_64( W[i-14], 1 ); \
X0b = mm512_ror_64( W[i-15], 8 ); \
X1b = mm512_ror_64( W[i-14], 8 ); \
X0a = _mm512_xor_si512( X0a, X0b ); \
X1a = _mm512_xor_si512( X1a, X1b ); \
X0b = _mm512_srli_epi64( W[i-15], 7 ); \
X1b = _mm512_srli_epi64( W[i-14], 7 ); \
w0 = _mm512_xor_si512( X0a, X0b ); \
w1 = _mm512_xor_si512( X1a, X1b ); \
} while(0)
#define SSG8W_512x2_1( w0, w1, i ) do \
{ \
__m512i X0a, X1a, X0b, X1b; \
X0a = mm512_ror_64( W[i-2],19 ); \
X1a = mm512_ror_64( W[i-1],19 ); \
X0b = mm512_ror_64( W[i-2],61 ); \
X1b = mm512_ror_64( W[i-1],61 ); \
X0a = _mm512_xor_si512( X0a, X0b ); \
X1a = _mm512_xor_si512( X1a, X1b ); \
X0b = _mm512_srli_epi64( W[i-2], 6 ); \
X1b = _mm512_srli_epi64( W[i-1], 6 ); \
w0 = _mm512_xor_si512( X0a, X0b ); \
w1 = _mm512_xor_si512( X1a, X1b ); \
} while(0)
mm512_xor3( mm512_ror_64(x, 19), mm512_ror_64(x, 61), _mm512_srli_epi64(x, 6) )
#define SHA3_8WAY_STEP(A, B, C, D, E, F, G, H, i) \
do { \
@@ -187,8 +135,8 @@ sha512_8way_round( sha512_8way_context *ctx, __m512i *in, __m512i r[8] )
mm512_block_bswap_64( W+8, in+8 );
for ( i = 16; i < 80; i++ )
W[i] = _mm512_add_epi64( ssg8w_512_add( W[i-15], W[i-2] ),
_mm512_add_epi64( W[ i- 7 ], W[ i-16 ] ) );
W[i] = mm512_add4_64( SSG8W_5_0( W[i-15] ), SSG8W_5_1( W[i-2] ),
W[ i- 7 ], W[ i-16 ] );
if ( ctx->initialized )
{
@@ -319,14 +267,20 @@ void sha512_8way_close( sha512_8way_context *sc, void *dst )
// SHA-512 4 way 64 bit
/*
#define CH(X, Y, Z) \
_mm256_xor_si256( _mm256_and_si256( _mm256_xor_si256( Y, Z ), X ), Z )
/*
#define MAJ(X, Y, Z) \
_mm256_or_si256( _mm256_and_si256( X, Y ), \
_mm256_and_si256( _mm256_or_si256( X, Y ), Z ) )
*/
#define MAJ(X, Y, Z) \
_mm256_xor_si256( Y, _mm256_and_si256( X_xor_Y = _mm256_xor_si256( X, Y ), \
Y_xor_Z ) )
#define BSG5_0(x) \
mm256_ror_64( _mm256_xor_si256( mm256_ror_64( \
_mm256_xor_si256( mm256_ror_64( x, 5 ), x ), 6 ), x ), 28 )
@@ -334,7 +288,7 @@ void sha512_8way_close( sha512_8way_context *sc, void *dst )
#define BSG5_1(x) \
mm256_ror_64( _mm256_xor_si256( mm256_ror_64( \
_mm256_xor_si256( mm256_ror_64( x, 23 ), x ), 4 ), x ), 14 )
*/
/*
#define BSG5_0(x) \
_mm256_xor_si256( _mm256_xor_si256( \
@@ -402,7 +356,7 @@ static inline __m256i ssg512_add( __m256i w0, __m256i w1 )
w1 = _mm256_xor_si256( X1a, X1b ); \
} while(0)
*/
/*
#define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \
do { \
__m256i K = _mm256_set1_epi64x( K512[ i ] ); \
@@ -431,7 +385,7 @@ do { \
H = _mm256_add_epi64( T1, T2 ); \
D = _mm256_add_epi64( D, T1 ); \
} while (0)
*/
/*
#define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \
do { \
@@ -445,7 +399,7 @@ do { \
} while (0)
*/
/*
#define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \
do { \
__m256i T1, T2; \
@@ -453,16 +407,17 @@ do { \
T1 = _mm256_add_epi64( H, mm256_add4_64( BSG5_1(E), CH(E, F, G), \
K, W[i] ) ); \
T2 = _mm256_add_epi64( BSG5_0(A), MAJ(A, B, C) ); \
Y_xor_Z = X_xor_Y; \
D = _mm256_add_epi64( D, T1 ); \
H = _mm256_add_epi64( T1, T2 ); \
} while (0)
*/
static void
sha512_4way_round( sha512_4way_context *ctx, __m256i *in, __m256i r[8] )
{
int i;
register __m256i A, B, C, D, E, F, G, H;
register __m256i A, B, C, D, E, F, G, H, X_xor_Y, Y_xor_Z;
__m256i W[80];
mm256_block_bswap_64( W , in );
@@ -495,6 +450,8 @@ sha512_4way_round( sha512_4way_context *ctx, __m256i *in, __m256i r[8] )
H = m256_const1_64( 0x5BE0CD19137E2179 );
}
Y_xor_Z = _mm256_xor_si256( B, C );
for ( i = 0; i < 80; i += 8 )
{
SHA3_4WAY_STEP( A, B, C, D, E, F, G, H, i + 0 );

View File

@@ -40,8 +40,8 @@
#endif
#define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
#define MAJ(X, Y, Z) (((Y) & (Z)) | (((Y) | (Z)) & (X)))
//#define MAJ(X, Y, Z) (((Y) & (Z)) | (((Y) | (Z)) & (X)))
#define MAJ( X, Y, Z ) ( Y ^ ( ( X ^ Y ) & ( Y ^ Z ) ) )
#define ROTR SPH_ROTR32
#define BSG2_0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
@@ -71,8 +71,200 @@ static const sph_u32 H256[8] = {
* of the compression function.
*/
#if SPH_SMALL_FOOTPRINT_SHA2
#if defined(__SHA__)
#include "simd-utils.h"
static void sha2_round( const uint8_t input[], uint32_t state[8] )
{
__m128i STATE0, STATE1;
__m128i MSG, TMP, MASK;
__m128i TMSG0, TMSG1, TMSG2, TMSG3;
__m128i ABEF_SAVE, CDGH_SAVE;
// Load initial values
TMP = _mm_load_si128((__m128i*) &state[0]);
STATE1 = _mm_load_si128((__m128i*) &state[4]);
MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
TMP = _mm_shuffle_epi32(TMP, 0xB1); // CDAB
STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH
STATE0 = _mm_alignr_epi8(TMP, STATE1, 8); // ABEF
STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0); // CDGH
// Save current hash
ABEF_SAVE = STATE0;
CDGH_SAVE = STATE1;
// Rounds 0-3
MSG = _mm_load_si128((const __m128i*) (input+0));
TMSG0 = _mm_shuffle_epi8(MSG, MASK);
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Rounds 4-7
TMSG1 = _mm_load_si128((const __m128i*) (input+16));
TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
// Rounds 8-11
TMSG2 = _mm_load_si128((const __m128i*) (input+32));
TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
// Rounds 12-15
TMSG3 = _mm_load_si128((const __m128i*) (input+48));
TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
TMSG0 = _mm_add_epi32(TMSG0, TMP);
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
// Rounds 16-19
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
TMSG1 = _mm_add_epi32(TMSG1, TMP);
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
// Rounds 20-23
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
TMSG2 = _mm_add_epi32(TMSG2, TMP);
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
// Rounds 24-27
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
TMSG3 = _mm_add_epi32(TMSG3, TMP);
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
// Rounds 28-31
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
TMSG0 = _mm_add_epi32(TMSG0, TMP);
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
// Rounds 32-35
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
TMSG1 = _mm_add_epi32(TMSG1, TMP);
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
// Rounds 36-39
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
TMSG2 = _mm_add_epi32(TMSG2, TMP);
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
// Rounds 40-43
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
TMSG3 = _mm_add_epi32(TMSG3, TMP);
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
// Rounds 44-47
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
TMSG0 = _mm_add_epi32(TMSG0, TMP);
TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
// Rounds 48-51
MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
TMSG1 = _mm_add_epi32(TMSG1, TMP);
TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
// Rounds 52-55
MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
TMSG2 = _mm_add_epi32(TMSG2, TMP);
TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Rounds 56-59
MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
TMSG3 = _mm_add_epi32(TMSG3, TMP);
TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Rounds 60-63
MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
MSG = _mm_shuffle_epi32(MSG, 0x0E);
STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
// Add values back to state
STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
TMP = _mm_shuffle_epi32(STATE0, 0x1B); // FEBA
STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); // DCHG
STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); // DCBA
STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); // ABEF
// Save state
_mm_store_si128((__m128i*) &state[0], STATE0);
_mm_store_si128((__m128i*) &state[4], STATE1);
}
#else // no SHA
/*
static const sph_u32 K[64] = {
SPH_C32(0x428A2F98), SPH_C32(0x71374491),
SPH_C32(0xB5C0FBCF), SPH_C32(0xE9B5DBA5),
@@ -107,6 +299,9 @@ static const sph_u32 K[64] = {
SPH_C32(0x90BEFFFA), SPH_C32(0xA4506CEB),
SPH_C32(0xBEF9A3F7), SPH_C32(0xC67178F2)
};
*/
#if SPH_SMALL_FOOTPRINT_SHA2
#define SHA2_MEXP1(in, pc) do { \
W[pc] = in(pc); \
@@ -191,7 +386,7 @@ static const sph_u32 K[64] = {
(r)[7] = SPH_T32((r)[7] + H); \
} while (0)
#else
#else // large footprint (default)
#define SHA2_ROUND_BODY(in, r) do { \
sph_u32 A, B, C, D, E, F, G, H, T1, T2; \
@@ -600,7 +795,7 @@ static const sph_u32 K[64] = {
(r)[7] = SPH_T32((r)[7] + H); \
} while (0)
#endif
#endif // small footprint else
/*
* One round of SHA-224 / SHA-256. The data must be aligned for 32-bit access.
@@ -613,6 +808,9 @@ sha2_round(const unsigned char *data, sph_u32 r[8])
#undef SHA2_IN
}
#endif // SHA else
/* see sph_sha2.h */
void
sph_sha224_init(void *cc)
@@ -653,7 +851,7 @@ void
sph_sha224_close(void *cc, void *dst)
{
sha224_close(cc, dst, 7);
sph_sha224_init(cc);
// sph_sha224_init(cc);
}
/* see sph_sha2.h */
@@ -661,7 +859,7 @@ void
sph_sha224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
sha224_addbits_and_close(cc, ub, n, dst, 7);
sph_sha224_init(cc);
// sph_sha224_init(cc);
}
/* see sph_sha2.h */
@@ -677,14 +875,22 @@ void
sph_sha256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
sha224_addbits_and_close(cc, ub, n, dst, 8);
sph_sha256_init(cc);
// sph_sha256_init(cc);
}
/* see sph_sha2.h */
void
sph_sha224_comp(const sph_u32 msg[16], sph_u32 val[8])
void sph_sha256_full( void *dst, const void *data, size_t len )
{
#define SHA2_IN(x) msg[x]
SHA2_ROUND_BODY(SHA2_IN, val);
#undef SHA2_IN
}
sph_sha256_context cc;
sph_sha256_init( &cc );
sph_sha256( &cc, data, len );
sph_sha256_close( &cc, dst );
}
/* see sph_sha2.h */
//void
//sph_sha224_comp(const sph_u32 msg[16], sph_u32 val[8])
//{
//#define SHA2_IN(x) msg[x]
// SHA2_ROUND_BODY(SHA2_IN, val);
//#undef SHA2_IN
//}

View File

@@ -73,7 +73,7 @@ typedef struct {
sph_u32 count_high, count_low;
#endif
#endif
} sph_sha224_context;
} sph_sha224_context __attribute__((aligned(64)));
/**
* This structure is a context for SHA-256 computations. It is identical
@@ -205,6 +205,10 @@ void sph_sha256_comp(const sph_u32 msg[16], sph_u32 val[8]);
#define sph_sha256_comp sph_sha224_comp
#endif
void sph_sha256_full( void *dst, const void *data, size_t len );
#if SPH_64
/**

View File

@@ -38,7 +38,8 @@
#if SPH_64
#define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
#define MAJ(X, Y, Z) (((X) & (Y)) | (((X) | (Y)) & (Z)))
//#define MAJ(X, Y, Z) (((X) & (Y)) | (((X) | (Y)) & (Z)))
#define MAJ( X, Y, Z ) ( Y ^ ( ( X ^ Y ) & ( Y ^ Z ) ) )
#define ROTR64 SPH_ROTR64

View File

@@ -310,12 +310,13 @@ do { \
#define PERM_ELT8(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \
do { \
xa0 = _mm256_xor_si256( xm, _mm256_xor_si256( xb1, _mm256_xor_si256( \
xa0 = mm256_xor3( xm, xb1, _mm256_xor_si256( \
_mm256_andnot_si256( xb3, xb2 ), \
_mm256_mullo_epi32( _mm256_xor_si256( xa0, _mm256_xor_si256( xc, \
_mm256_mullo_epi32( mm256_rol_32( xa1, 15 ), _mm256_set1_epi32(5UL) ) \
) ), _mm256_set1_epi32(3UL) ) ) ) ); \
xb0 = mm256_not( _mm256_xor_si256( xa0, mm256_rol_32( xb0, 1 ) ) ); \
_mm256_mullo_epi32( mm256_xor3( xa0, xc, \
_mm256_mullo_epi32( mm256_rol_32( xa1, 15 ), \
_mm256_set1_epi32(5UL) ) ), \
_mm256_set1_epi32(3UL) ) ) ); \
xb0 = mm256_xnor( xa0, mm256_rol_32( xb0, 1 ) ); \
} while (0)
#define PERM_STEP_0_8 do { \

View File

@@ -23,6 +23,19 @@ static const uint32_t IV512[] =
_mm256_blend_epi32( mm256_ror128_32( a ), \
mm256_ror128_32( b ), 0x88 )
#if defined(__VAES__)
#define mm256_aesenc_2x128( x, k ) \
_mm256_aesenc_epi128( x, _mm256_castsi128_si256( k ) )
#else
#define mm256_aesenc_2x128( x, k ) \
mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \
_mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) )
#endif
static void
c512_2way( shavite512_2way_context *ctx, const void *msg )
{
@@ -304,7 +317,7 @@ void shavite512_2way_close( shavite512_2way_context *ctx, void *dst )
uint32_t vp = ctx->ptr>>5;
// Terminating byte then zero pad
casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 );
casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 );
// Zero pad full vectors up to count
for ( ; vp < 6; vp++ )
@@ -384,13 +397,13 @@ void shavite512_2way_update_close( shavite512_2way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan.
{
casti_m256i( buf, 0 ) = m256_const2_64( 0, 0x0000000000000080 );
casti_m256i( buf, 0 ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
}
else // half full buf, everyone else.
{
casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 );
casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + vp, 6 - vp );
}
@@ -474,13 +487,13 @@ void shavite512_2way_full( shavite512_2way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan.
{
casti_m256i( buf, 0 ) = m256_const2_64( 0, 0x0000000000000080 );
casti_m256i( buf, 0 ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
}
else // half full buf, everyone else.
{
casti_m256i( buf, vp++ ) = m256_const2_64( 0, 0x0000000000000080 );
casti_m256i( buf, vp++ ) = m256_const1_i128( 0x0000000000000080 );
memset_zero_256( (__m256i*)buf + vp, 6 - vp );
}

View File

@@ -292,7 +292,7 @@ void shavite512_4way_close( shavite512_4way_context *ctx, void *dst )
uint32_t vp = ctx->ptr>>6;
// Terminating byte then zero pad
casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 );
casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 );
// Zero pad full vectors up to count
for ( ; vp < 6; vp++ )
@@ -372,13 +372,13 @@ void shavite512_4way_update_close( shavite512_4way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan.
{
casti_m512i( buf, 0 ) = m512_const2_64( 0, 0x0000000000000080 );
casti_m512i( buf, 0 ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
}
else // half full buf, everyone else.
{
casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 );
casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + vp, 6 - vp );
}
@@ -463,13 +463,13 @@ void shavite512_4way_full( shavite512_4way_context *ctx, void *dst,
if ( vp == 0 ) // empty buf, xevan.
{
casti_m512i( buf, 0 ) = m512_const2_64( 0, 0x0000000000000080 );
casti_m512i( buf, 0 ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + 1, 5 );
ctx->count0 = ctx->count1 = ctx->count2 = ctx->count3 = 0;
}
else // half full buf, everyone else.
{
casti_m512i( buf, vp++ ) = m512_const2_64( 0, 0x0000000000000080 );
casti_m512i( buf, vp++ ) = m512_const1_i128( 0x0000000000000080 );
memset_zero_512( (__m512i*)buf + vp, 6 - vp );
}

View File

@@ -2,14 +2,8 @@
#include <string.h>
#include <stdint.h>
#include "skein-hash-4way.h"
// 8 way is faster than SHA on Icelake
// SHA is faster than 4 way on Ryzen
//
#if defined(__SHA__)
#include <openssl/sha.h>
#endif
#include "algo/sha/sha-hash-4way.h"
#include "algo/sha/sph_sha2.h"
#if defined (SKEIN_8WAY)
@@ -93,7 +87,7 @@ void skeinhash_4way( void *state, const void *input )
uint32_t hash1[16] __attribute__ ((aligned (64)));
uint32_t hash2[16] __attribute__ ((aligned (64)));
uint32_t hash3[16] __attribute__ ((aligned (64)));
SHA256_CTX ctx_sha256;
sph_sha256_context ctx_sha256;
#else
uint32_t vhash32[16*4] __attribute__ ((aligned (64)));
sha256_4way_context ctx_sha256;
@@ -102,31 +96,29 @@ void skeinhash_4way( void *state, const void *input )
skein512_4way_final16( &ctx_skein, vhash64, input + (64*4) );
#if defined(__SHA__)
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash64, 512 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash0, 64 );
SHA256_Final( (unsigned char*)hash0, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash1, 64 );
SHA256_Final( (unsigned char*)hash1, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash2, 64 );
SHA256_Final( (unsigned char*)hash2, &ctx_sha256 );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash3, 64 );
SHA256_Final( (unsigned char*)hash3, &ctx_sha256 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hash0, 64 );
sph_sha256_close( &ctx_sha256, hash0 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hash1, 64 );
sph_sha256_close( &ctx_sha256, hash1 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hash2, 64 );
sph_sha256_close( &ctx_sha256, hash2 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hash3, 64 );
sph_sha256_close( &ctx_sha256, hash3 );
intrlv_4x32( state, hash0, hash1, hash2, hash3, 256 );
#else
rintrlv_4x64_4x32( vhash32, vhash64, 512 );
#else
rintrlv_4x64_4x32( vhash32, vhash64, 512 );
sha256_4way_init( &ctx_sha256 );
sha256_4way_update( &ctx_sha256, vhash32, 64 );
sha256_4way_close( &ctx_sha256, state );
#endif
}

View File

@@ -309,22 +309,16 @@ static const uint64_t IV512[] = {
sc->bcount = bcount; \
} while (0)
// AVX2 all scalar vars are now vectors representing 4 nonces in parallel
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define TFBIG_KINIT_8WAY( k0, k1, k2, k3, k4, k5, k6, k7, k8, t0, t1, t2 ) \
do { \
k8 = _mm512_xor_si512( _mm512_xor_si512( \
_mm512_xor_si512( _mm512_xor_si512( k0, k1 ), \
_mm512_xor_si512( k2, k3 ) ), \
_mm512_xor_si512( _mm512_xor_si512( k4, k5 ), \
_mm512_xor_si512( k6, k7 ) ) ), \
m512_const1_64( 0x1BD11BDAA9FC1A22) ); \
k8 = mm512_xor3( mm512_xor3( k0, k1, k2 ), mm512_xor3( k3, k4, k5 ), \
mm512_xor3( k6, k7, m512_const1_64( 0x1BD11BDAA9FC1A22) ));\
t2 = t0 ^ t1; \
} while (0)
#define TFBIG_ADDKEY_8WAY(w0, w1, w2, w3, w4, w5, w6, w7, k, t, s) \
do { \
w0 = _mm512_add_epi64( w0, SKBI(k,s,0) ); \
@@ -340,7 +334,6 @@ do { \
m512_const1_64( s ) ) ); \
} while (0)
#define TFBIG_MIX_8WAY(x0, x1, rc) \
do { \
x0 = _mm512_add_epi64( x0, x1 ); \
@@ -731,7 +724,7 @@ void skein512_8way_full( skein512_8way_context *sc, void *out, const void *data,
void
skein512_8way_prehash64( skein512_8way_context *sc, const void *data )
{
__m512i *vdata = (__m512*)data;
__m512i *vdata = (__m512i*)data;
__m512i *buf = sc->buf;
buf[0] = vdata[0];
buf[1] = vdata[1];

View File

@@ -5,21 +5,21 @@
#include <string.h>
#include <stdint.h>
#include "sph_skein.h"
#include <openssl/sha.h>
#include "algo/sha/sph_sha2.h"
void skeinhash(void *state, const void *input)
{
uint32_t hash[16] __attribute__ ((aligned (64)));
sph_skein512_context ctx_skein;
SHA256_CTX ctx_sha256;
sph_sha256_context ctx_sha256;
sph_skein512_init( &ctx_skein );
sph_skein512( &ctx_skein, input, 80 );
sph_skein512_close( &ctx_skein, hash );
SHA256_Init( &ctx_sha256 );
SHA256_Update( &ctx_sha256, (unsigned char*)hash, 64 );
SHA256_Final( (unsigned char*) hash, &ctx_sha256 );
sph_sha256_init( &ctx_sha256 );
sph_sha256( &ctx_sha256, hash, 64 );
sph_sha256_close( &ctx_sha256, hash );
memcpy(state, hash, 32);
}

View File

@@ -1,47 +0,0 @@
/*
* Copyright (c) 2000 Jeroen Ruigrok van der Werven <asmodai@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/include/stdbool.h,v 1.6 2002/08/16 07:33:14 alfred Exp $
*/
#ifndef _STDBOOL_H_
#define _STDBOOL_H_
#define __bool_true_false_are_defined 1
#ifndef __cplusplus
#define false 0
#define true 1
//#define bool _Bool
//#if __STDC_VERSION__ < 199901L && __GNUC__ < 3
//typedef int _Bool;
//#endif
typedef int bool;
#endif /* !__cplusplus */
#endif /* !_STDBOOL_H_ */

File diff suppressed because it is too large Load Diff

741
algo/verthash/Verthash.c Normal file
View File

@@ -0,0 +1,741 @@
/*
* Copyright 2018-2021 CryptoGraphics
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version. See LICENSE for more details.
*/
#include "algo-gate-api.h"
#include "Verthash.h"
#include "mm_malloc.h"
//-----------------------------------------------------------------------------
// Verthash info management
int verthash_info_init(verthash_info_t* info, const char* file_name)
{
// init fields to 0
info->fileName = NULL;
info->data = NULL;
info->dataSize = 0;
info->bitmask = 0;
size_t fileNameLen;
if ( !file_name || !( fileNameLen = strlen( file_name ) ) )
{
applog( LOG_ERR, "Invalid file specification" );
return -1;
}
info->fileName = (char*)malloc( fileNameLen + 1 );
if ( !info->fileName )
{
applog( LOG_ERR, "Failed to allocate memory for Verthash data" );
return -1;
}
memset( info->fileName, 0, fileNameLen + 1 );
memcpy( info->fileName, file_name, fileNameLen );
FILE *fileMiningData = fopen_utf8( info->fileName, "rb" );
if ( !fileMiningData )
{
if ( opt_data_file || !opt_verify )
{
if ( opt_data_file )
applog( LOG_ERR, "Verthash data file not found or invalid: %s",
info->fileName );
else
{
applog( LOG_ERR,
"No Verthash data file specified and default not found");
applog( LOG_NOTICE,
"Add '--verify' to create default 'verthash.dat'");
}
return -1;
}
else
{
applog( LOG_NOTICE, "Creating default 'verthash.dat' in current directory, this will take several minutes");
if ( verthash_generate_data_file( info->fileName ) )
return -1;
fileMiningData = fopen_utf8( info->fileName, "rb" );
if ( !fileMiningData )
{
applog( LOG_ERR, "File system error opening %s", info->fileName );
return -1;
}
applog( LOG_NOTICE, "Verthash data file created successfully" );
}
}
// Get file size
fseek(fileMiningData, 0, SEEK_END);
int fileSize = ftell(fileMiningData);
fseek(fileMiningData, 0, SEEK_SET);
if ( fileSize < 0 )
{
fclose(fileMiningData);
return 1;
}
// Allocate data
info->data = (uint8_t *)_mm_malloc( fileSize, 64 );
if (!info->data)
{
fclose(fileMiningData);
// Memory allocation fatal error.
return 2;
}
// Load data
if ( !fread( info->data, fileSize, 1, fileMiningData ) )
{
applog( LOG_ERR, "File system error reading %s", info->fileName );
fclose(fileMiningData);
return -1;
}
fclose(fileMiningData);
// Update fields
info->bitmask = ((fileSize - VH_HASH_OUT_SIZE)/VH_BYTE_ALIGNMENT) + 1;
info->dataSize = fileSize;
applog( LOG_NOTICE, "Using Verthash data file '%s'", info->fileName );
return 0;
}
//-----------------------------------------------------------------------------
void verthash_info_free(verthash_info_t* info)
{
free(info->fileName);
free(info->data);
info->dataSize = 0;
info->bitmask = 0;
}
//-----------------------------------------------------------------------------
// Verthash hash
#define VH_P0_SIZE 64
#define VH_N_ITER 8
#define VH_N_SUBSET VH_P0_SIZE*VH_N_ITER
#define VH_N_ROT 32
#define VH_N_INDEXES 4096
#define VH_BYTE_ALIGNMENT 16
static inline uint32_t fnv1a(const uint32_t a, const uint32_t b)
{
return (a ^ b) * 0x1000193;
}
#if 0
static void rotate_indexes( uint32_t *p )
{
#if defined(__AVX2__)
for ( size_t x = 0; x < VH_N_SUBSET / sizeof(__m256i); x += 8 )
{
__m256i *px = (__m256i*)p + x;
px[0] = mm256_rol_32( px[0], 1 );
px[1] = mm256_rol_32( px[1], 1 );
px[2] = mm256_rol_32( px[2], 1 );
px[3] = mm256_rol_32( px[3], 1 );
px[4] = mm256_rol_32( px[4], 1 );
px[5] = mm256_rol_32( px[5], 1 );
px[6] = mm256_rol_32( px[6], 1 );
px[7] = mm256_rol_32( px[7], 1 );
}
#else
for ( size_t x = 0; x < VH_N_SUBSET / sizeof(__m128i); x += 8 )
{
__m128i *px = (__m128i*)p0_index + x;
px[0] = mm128_rol_32( px[0], 1 );
px[1] = mm128_rol_32( px[1], 1 );
px[2] = mm128_rol_32( px[2], 1 );
px[3] = mm128_rol_32( px[3], 1 );
px[4] = mm128_rol_32( px[4], 1 );
px[5] = mm128_rol_32( px[5], 1 );
px[6] = mm128_rol_32( px[6], 1 );
px[7] = mm128_rol_32( px[7], 1 );
}
#endif
/*
for ( size_t x = 0; x < VH_N_SUBSET / sizeof(uint32_t); ++x )
p[x] = ( p[x] << 1 ) | ( p[x] >> 31 );
*/
}
#endif
static inline uint32_t rotl32( uint32_t a, size_t r )
{
return ( a << r ) | ( a >> (32-r) );
}
// Vectorized and targetted version of fnv1a
#if defined (__AVX2__)
#define MULXOR \
*(__m256i*)hash = _mm256_mullo_epi32( _mm256_xor_si256( \
*(__m256i*)hash, *(__m256i*)blob_off ), k );
#elif defined(__SSE41__)
#define MULXOR \
casti_m128i( hash, 0 ) = _mm_mullo_epi32( _mm_xor_si128( \
casti_m128i( hash, 0 ), casti_m128i( blob_off, 0 ) ), k ); \
casti_m128i( hash, 1 ) = _mm_mullo_epi32( _mm_xor_si128( \
casti_m128i( hash, 1 ), casti_m128i( blob_off, 1 ) ), k );
#else
#define MULXOR \
for ( size_t j = 0; j < VH_HASH_OUT_SIZE / sizeof(uint32_t); j++ ) \
hash[j] = fnv1a( hash[j], blob_off[j] ); \
#endif
#define UPDATE_ACCUMULATOR \
accumulator = fnv1a( accumulator, blob_off[0] ); \
accumulator = fnv1a( accumulator, blob_off[1] ); \
accumulator = fnv1a( accumulator, blob_off[2] ); \
accumulator = fnv1a( accumulator, blob_off[3] ); \
accumulator = fnv1a( accumulator, blob_off[4] ); \
accumulator = fnv1a( accumulator, blob_off[5] ); \
accumulator = fnv1a( accumulator, blob_off[6] ); \
accumulator = fnv1a( accumulator, blob_off[7] )
// first pass no rotate
#define ROUND_0 \
for ( size_t i = 0; i < VH_N_SUBSET / sizeof(uint32_t); i++ ) \
{ \
const uint32_t *blob_off = blob + \
( ( fnv1a( subset[i], accumulator ) % mdiv ) \
* ( VH_BYTE_ALIGNMENT / sizeof(uint32_t) ) ); \
UPDATE_ACCUMULATOR; \
MULXOR; \
}
// subsequent passes rotate by r on demand, no need for mass rotate
#define ROUND_r( r ) \
for ( size_t i = 0; i < VH_N_SUBSET / sizeof(uint32_t); i++ ) \
{ \
const uint32_t *blob_off = blob + \
( ( fnv1a( rotl32( subset[i], r ), accumulator ) % mdiv ) \
* ( VH_BYTE_ALIGNMENT / sizeof(uint32_t) ) ); \
UPDATE_ACCUMULATOR; \
MULXOR; \
}
void verthash_hash( const void *blob_bytes, const size_t blob_size,
const void *input, void *output )
{
uint32_t hash[ VH_HASH_OUT_SIZE / 4 ] __attribute__ ((aligned (64)));
uint32_t subset[ VH_N_SUBSET / 4 ] __attribute__ ((aligned (64)));
const uint32_t *blob = (const uint32_t*)blob_bytes;
uint32_t accumulator = 0x811c9dc5;
const uint32_t mdiv = ( ( blob_size - VH_HASH_OUT_SIZE )
/ VH_BYTE_ALIGNMENT ) + 1;
#if defined (__AVX2__)
const __m256i k = _mm256_set1_epi32( 0x1000193 );
#elif defined(__SSE41__)
const __m128i k = _mm_set1_epi32( 0x1000193 );
#endif
sha3( input, VH_HEADER_SIZE, hash, VH_HASH_OUT_SIZE );
verthash_sha3_512_final_8( subset, ( (uint64_t*)input )[ 9 ] );
ROUND_0;
for ( size_t r = 1; r < VH_N_ROT; ++r )
ROUND_r( r );
memcpy( output, hash, VH_HASH_OUT_SIZE );
}
//-----------------------------------------------------------------------------
// Verthash data file generator
#define NODE_SIZE 32
struct Graph
{
FILE *db;
int64_t log2;
int64_t pow2;
uint8_t *pk;
int64_t index;
};
int64_t Log2(int64_t x)
{
int64_t r = 0;
for (; x > 1; x >>= 1)
{
r++;
}
return r;
}
int64_t bfsToPost(struct Graph *g, const int64_t node)
{
return node & ~g->pow2;
}
int64_t numXi(int64_t index)
{
return (1 << ((uint64_t)index)) * (index + 1) * index;
}
void WriteId(struct Graph *g, uint8_t *Node, const int64_t id)
{
fseek(g->db, id * NODE_SIZE, SEEK_SET);
fwrite(Node, 1, NODE_SIZE, g->db);
}
void WriteNode(struct Graph *g, uint8_t *Node, const int64_t id)
{
const int64_t idx = bfsToPost(g, id);
WriteId(g, Node, idx);
}
void NewNode(struct Graph *g, const int64_t id, uint8_t *hash)
{
WriteNode(g, hash, id);
}
uint8_t *GetId(struct Graph *g, const int64_t id)
{
fseek(g->db, id * NODE_SIZE, SEEK_SET);
uint8_t *node = (uint8_t *)malloc(NODE_SIZE);
const size_t bytes_read = fread(node, 1, NODE_SIZE, g->db);
if(bytes_read != NODE_SIZE) {
return NULL;
}
return node;
}
uint8_t *GetNode(struct Graph *g, const int64_t id)
{
const int64_t idx = bfsToPost(g, id);
return GetId(g, idx);
}
uint32_t WriteVarInt(uint8_t *buffer, int64_t val)
{
memset(buffer, 0, NODE_SIZE);
uint64_t uval = ((uint64_t)(val)) << 1;
if (val < 0)
{
uval = ~uval;
}
uint32_t i = 0;
while (uval >= 0x80)
{
buffer[i] = (uint8_t)uval | 0x80;
uval >>= 7;
i++;
}
buffer[i] = (uint8_t)uval;
return i;
}
void ButterflyGraph(struct Graph *g, int64_t index, int64_t *count)
{
if (index == 0)
{
index = 1;
}
int64_t numLevel = 2 * index;
int64_t perLevel = (int64_t)(1 << (uint64_t)index);
int64_t begin = *count - perLevel;
int64_t level, i;
for (level = 1; level < numLevel; level++)
{
for (i = 0; i < perLevel; i++)
{
int64_t prev;
int64_t shift = index - level;
if (level > numLevel / 2)
{
shift = level - numLevel / 2;
}
if (((i >> (uint64_t)shift) & 1) == 0)
{
prev = i + (1 << (uint64_t)shift);
}
else
{
prev = i - (1 << (uint64_t)shift);
}
uint8_t *parent0 = GetNode(g, begin + (level - 1) * perLevel + prev);
uint8_t *parent1 = GetNode(g, *count - perLevel);
uint8_t *buf = (uint8_t *)malloc(NODE_SIZE);
WriteVarInt(buf, *count);
uint8_t *hashInput = (uint8_t *)malloc(NODE_SIZE * 4);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 2), parent0, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 3), parent1, NODE_SIZE);
uint8_t *hashOutput = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 4, hashOutput, NODE_SIZE);
NewNode(g, *count, hashOutput);
(*count)++;
free(hashOutput);
free(hashInput);
free(parent0);
free(parent1);
free(buf);
}
}
}
void XiGraphIter(struct Graph *g, int64_t index)
{
int64_t count = g->pow2;
int8_t stackSize = 5;
int64_t *stack = (int64_t *)malloc(sizeof(int64_t) * stackSize);
for (int i = 0; i < 5; i++)
stack[i] = index;
int8_t graphStackSize = 5;
int32_t *graphStack = (int32_t *)malloc(sizeof(int32_t) * graphStackSize);
for (int i = 0; i < 5; i++)
graphStack[i] = graphStackSize - i - 1;
int64_t i = 0;
int64_t graph = 0;
int64_t pow2index = 1 << ((uint64_t)index);
for (i = 0; i < pow2index; i++)
{
uint8_t *buf = (uint8_t *)malloc(NODE_SIZE);
WriteVarInt(buf, count);
uint8_t *hashInput = (uint8_t *)malloc(NODE_SIZE * 2);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
uint8_t *hashOutput = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 2, hashOutput, NODE_SIZE);
NewNode(g, count, hashOutput);
count++;
free(hashOutput);
free(hashInput);
free(buf);
}
if (index == 1)
{
ButterflyGraph(g, index, &count);
return;
}
while (stackSize != 0 && graphStackSize != 0)
{
index = stack[stackSize - 1];
graph = graphStack[graphStackSize - 1];
stackSize--;
if (stackSize > 0)
{
int64_t *tempStack = (int64_t *)malloc(sizeof(int64_t) * (stackSize));
memcpy(tempStack, stack, sizeof(int64_t) * (stackSize));
free(stack);
stack = tempStack;
}
graphStackSize--;
if (graphStackSize > 0)
{
int32_t *tempGraphStack = (int32_t *)malloc(sizeof(int32_t) * (graphStackSize));
memcpy(tempGraphStack, graphStack, sizeof(int32_t) * (graphStackSize));
free(graphStack);
graphStack = tempGraphStack;
}
int8_t indicesSize = 5;
int64_t *indices = (int64_t *)malloc(sizeof(int64_t) * indicesSize);
for (int i = 0; i < indicesSize; i++)
indices[i] = index - 1;
int8_t graphsSize = 5;
int32_t *graphs = (int32_t *)malloc(sizeof(int32_t) * graphsSize);
for (int i = 0; i < graphsSize; i++)
graphs[i] = graphsSize - i - 1;
int64_t pow2indexInner = 1 << ((uint64_t)index);
int64_t pow2indexInner_1 = 1 << ((uint64_t)index - 1);
if (graph == 0)
{
uint64_t sources = count - pow2indexInner;
for (i = 0; i < pow2indexInner_1; i++)
{
uint8_t *parent0 = GetNode(g, sources + i);
uint8_t *parent1 = GetNode(g, sources + i + pow2indexInner_1);
uint8_t *buf = (uint8_t *)malloc(NODE_SIZE);
WriteVarInt(buf, count);
uint8_t *hashInput = (uint8_t *)malloc(NODE_SIZE * 4);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 2), parent0, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 3), parent1, NODE_SIZE);
uint8_t *hashOutput = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 4, hashOutput, NODE_SIZE);
NewNode(g, count, hashOutput);
count++;
free(hashOutput);
free(hashInput);
free(parent0);
free(parent1);
free(buf);
}
}
else if (graph == 1)
{
uint64_t firstXi = count;
for (i = 0; i < pow2indexInner_1; i++)
{
uint64_t nodeId = firstXi + i;
uint8_t *parent = GetNode(g, firstXi - pow2indexInner_1 + i);
uint8_t *buf = (uint8_t *)malloc(NODE_SIZE);
WriteVarInt(buf, nodeId);
uint8_t *hashInput = (uint8_t *)malloc(NODE_SIZE * 3);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 2), parent, NODE_SIZE);
uint8_t *hashOutput = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 3, hashOutput, NODE_SIZE);
NewNode(g, count, hashOutput);
count++;
free(hashOutput);
free(hashInput);
free(parent);
free(buf);
}
}
else if (graph == 2)
{
uint64_t secondXi = count;
for (i = 0; i < pow2indexInner_1; i++)
{
uint64_t nodeId = secondXi + i;
uint8_t *parent = GetNode(g, secondXi - pow2indexInner_1 + i);
uint8_t *buf = (uint8_t *)malloc(NODE_SIZE);
WriteVarInt(buf, nodeId);
uint8_t *hashInput = (uint8_t *)malloc(NODE_SIZE * 3);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 2), parent, NODE_SIZE);
uint8_t *hashOutput = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 3, hashOutput, NODE_SIZE);
NewNode(g, count, hashOutput);
count++;
free(hashOutput);
free(hashInput);
free(parent);
free(buf);
}
}
else if (graph == 3)
{
uint64_t secondButter = count;
for (i = 0; i < pow2indexInner_1; i++)
{
uint64_t nodeId = secondButter + i;
uint8_t *parent = GetNode(g, secondButter - pow2indexInner_1 + i);
uint8_t *buf = (uint8_t *)malloc(NODE_SIZE);
WriteVarInt(buf, nodeId);
uint8_t *hashInput = (uint8_t *)malloc(NODE_SIZE * 3);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 2), parent, NODE_SIZE);
uint8_t *hashOutput = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 3, hashOutput, NODE_SIZE);
NewNode(g, count, hashOutput);
count++;
free(hashOutput);
free(hashInput);
free(parent);
free(buf);
}
}
else
{
uint64_t sinks = count;
uint64_t sources = sinks + pow2indexInner - numXi(index);
for (i = 0; i < pow2indexInner_1; i++)
{
uint64_t nodeId0 = sinks + i;
uint64_t nodeId1 = sinks + i + pow2indexInner_1;
uint8_t *parent0 = GetNode(g, sinks - pow2indexInner_1 + i);
uint8_t *parent1_0 = GetNode(g, sources + i);
uint8_t *parent1_1 = GetNode(g, sources + i + pow2indexInner_1);
uint8_t *buf = (uint8_t *)malloc(NODE_SIZE);
WriteVarInt(buf, nodeId0);
uint8_t *hashInput = (uint8_t *)malloc(NODE_SIZE * 4);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 2), parent0, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 3), parent1_0, NODE_SIZE);
uint8_t *hashOutput0 = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 4, hashOutput0, NODE_SIZE);
WriteVarInt(buf, nodeId1);
memcpy(hashInput, g->pk, NODE_SIZE);
memcpy(hashInput + NODE_SIZE, buf, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 2), parent0, NODE_SIZE);
memcpy(hashInput + (NODE_SIZE * 3), parent1_1, NODE_SIZE);
uint8_t *hashOutput1 = (uint8_t *)malloc(NODE_SIZE);
sha3(hashInput, NODE_SIZE * 4, hashOutput1, NODE_SIZE);
NewNode(g, nodeId0, hashOutput0);
NewNode(g, nodeId1, hashOutput1);
count += 2;
free(parent0);
free(parent1_0);
free(parent1_1);
free(buf);
free(hashInput);
free(hashOutput0);
free(hashOutput1);
}
}
if ((graph == 0 || graph == 3) ||
((graph == 1 || graph == 2) && index == 2))
{
ButterflyGraph(g, index - 1, &count);
}
else if (graph == 1 || graph == 2)
{
int64_t *tempStack = (int64_t *)malloc(sizeof(int64_t) * (stackSize + indicesSize));
memcpy(tempStack, stack, stackSize * sizeof(int64_t));
memcpy(tempStack + stackSize, indices, indicesSize * sizeof(int64_t));
stackSize += indicesSize;
free(stack);
stack = tempStack;
int32_t *tempGraphStack = (int32_t *)malloc(sizeof(int32_t) * (graphStackSize + graphsSize));
memcpy(tempGraphStack, graphStack, graphStackSize * sizeof(int32_t));
memcpy(tempGraphStack + graphStackSize, graphs, graphsSize * sizeof(int32_t));
graphStackSize += graphsSize;
free(graphStack);
graphStack = tempGraphStack;
}
free(indices);
free(graphs);
}
free(stack);
free(graphStack);
}
struct Graph *NewGraph(int64_t index, const char* targetFile, uint8_t *pk)
{
uint8_t exists = 0;
FILE *db;
if ((db = fopen_utf8(targetFile, "r")) != NULL)
{
fclose(db);
exists = 1;
}
db = fopen_utf8(targetFile, "wb+");
int64_t size = numXi(index);
int64_t log2 = Log2(size) + 1;
int64_t pow2 = 1 << ((uint64_t)log2);
struct Graph *g = (struct Graph *)malloc(sizeof(struct Graph));
if ( !g ) return NULL;
g->db = db;
g->log2 = log2;
g->pow2 = pow2;
g->pk = pk;
g->index = index;
if (exists == 0)
{
XiGraphIter(g, index);
}
fclose(db);
return g;
}
//-----------------------------------------------------------------------------
// use info for _mm_malloc, then verify file
int verthash_generate_data_file(const char* output_file_name)
{
const char *hashInput = "Verthash Proof-of-Space Datafile";
uint8_t *pk = (uint8_t*)malloc( NODE_SIZE );
if ( !pk )
{
applog( LOG_ERR, "Verthash data memory allocation failed");
return -1;
}
sha3( hashInput, 32, pk, NODE_SIZE );
int64_t index = 17;
if ( !NewGraph( index, output_file_name, pk ) )
{
applog( LOG_ERR, "Verthash file creation failed");
return -1;
}
return 0;
}

57
algo/verthash/Verthash.h Normal file
View File

@@ -0,0 +1,57 @@
/*
* Copyright 2018-2021 CryptoGraphics
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version. See LICENSE for more details.
*/
#ifndef Verthash_INCLUDE_ONCE
#define Verthash_INCLUDE_ONCE
#include "tiny_sha3/sha3.h"
#include "fopen_utf8.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
// Verthash constants used to compute bitmask, used inside kernel during IO pass
#define VH_HASH_OUT_SIZE 32
#define VH_BYTE_ALIGNMENT 16
#define VH_HEADER_SIZE 80
//-----------------------------------------------------------------------------
// Verthash data
//! Verthash C api for data maniputation.
typedef struct VerthashInfo
{
char* fileName;
uint8_t* data;
uint64_t dataSize;
uint32_t bitmask;
} verthash_info_t;
//! Must be called before usage. Reset all fields and set a mining data file name.
//! Error codes
//! 0 - Success(No error).
//! 1 - File name is invalid.
//! 2 - Memory allocation error
int verthash_info_init(verthash_info_t* info, const char* file_name);
//! Reset all fields and free allocated data.
void verthash_info_free(verthash_info_t* info);
//! Generate verthash data file and save it to specified location.
int verthash_generate_data_file(const char* output_file_name);
void verthash_hash( const void *blob_bytes, const size_t blob_size,
const void *input, void *output );
void verthash_sha3_512_prehash_72( const void *input );
void verthash_sha3_512_final_8( void *hash, const uint64_t nonce );
#endif // !Verthash_INCLUDE_ONCE

181
algo/verthash/fopen_utf8.c Normal file
View File

@@ -0,0 +1,181 @@
#ifndef H_FOPEN_UTF8
#define H_FOPEN_UTF8
#include "fopen_utf8.h"
#include <stdint.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
int utf8_char_size(const uint8_t *c)
{
const uint8_t m0x = 0x80, c0x = 0x00,
m10x = 0xC0, c10x = 0x80,
m110x = 0xE0, c110x = 0xC0,
m1110x = 0xF0, c1110x = 0xE0,
m11110x = 0xF8, c11110x = 0xF0;
if ((c[0] & m0x) == c0x)
return 1;
if ((c[0] & m110x) == c110x)
if ((c[1] & m10x) == c10x)
return 2;
if ((c[0] & m1110x) == c1110x)
if ((c[1] & m10x) == c10x)
if ((c[2] & m10x) == c10x)
return 3;
if ((c[0] & m11110x) == c11110x)
if ((c[1] & m10x) == c10x)
if ((c[2] & m10x) == c10x)
if ((c[3] & m10x) == c10x)
return 4;
if ((c[0] & m10x) == c10x) // not a first UTF-8 byte
return 0;
return -1; // if c[0] is a first byte but the other bytes don't match
}
uint32_t utf8_to_unicode32(const uint8_t *c, size_t *index)
{
uint32_t v;
int size;
const uint8_t m6 = 63, m5 = 31, m4 = 15, m3 = 7;
if (c==NULL)
return 0;
size = utf8_char_size(c);
if (size > 0 && index)
*index += size-1;
switch (size)
{
case 1:
v = c[0];
break;
case 2:
v = c[0] & m5;
v = v << 6 | (c[1] & m6);
break;
case 3:
v = c[0] & m4;
v = v << 6 | (c[1] & m6);
v = v << 6 | (c[2] & m6);
break;
case 4:
v = c[0] & m3;
v = v << 6 | (c[1] & m6);
v = v << 6 | (c[2] & m6);
v = v << 6 | (c[3] & m6);
break;
case 0: // not a first UTF-8 byte
case -1: // corrupt UTF-8 letter
default:
v = -1;
break;
}
return v;
}
int codepoint_utf16_size(uint32_t c)
{
if (c < 0x10000) return 1;
if (c < 0x110000) return 2;
return 0;
}
uint16_t *sprint_utf16(uint16_t *str, uint32_t c) // str must be able to hold 1 to 3 entries and will be null-terminated by this function
{
int c_size;
if (str==NULL)
return NULL;
c_size = codepoint_utf16_size(c);
switch (c_size)
{
case 1:
str[0] = c;
if (c > 0)
str[1] = '\0';
break;
case 2:
c -= 0x10000;
str[0] = 0xD800 + (c >> 10);
str[1] = 0xDC00 + (c & 0x3FF);
str[2] = '\0';
break;
default:
str[0] = '\0';
}
return str;
}
size_t strlen_utf8_to_utf16(const uint8_t *str)
{
size_t i, count;
uint32_t c;
for (i=0, count=0; ; i++)
{
if (str[i]==0)
return count;
c = utf8_to_unicode32(&str[i], &i);
count += codepoint_utf16_size(c);
}
}
uint16_t *utf8_to_utf16(const uint8_t *utf8, uint16_t *utf16)
{
size_t i, j;
uint32_t c;
if (utf8==NULL)
return NULL;
if (utf16==NULL)
utf16 = (uint16_t *) calloc(strlen_utf8_to_utf16(utf8) + 1, sizeof(uint16_t));
for (i=0, j=0, c=1; c; i++)
{
c = utf8_to_unicode32(&utf8[i], &i);
sprint_utf16(&utf16[j], c);
j += codepoint_utf16_size(c);
}
return utf16;
}
FILE *fopen_utf8(const char *path, const char *mode)
{
#ifdef _WIN32
wchar_t *wpath, wmode[8];
FILE *file;
if (utf8_to_utf16((const uint8_t *) mode, (uint16_t *) wmode)==NULL)
return NULL;
wpath = (wchar_t *) utf8_to_utf16((const uint8_t *) path, NULL);
if (wpath==NULL)
return NULL;
file = _wfopen(wpath, wmode);
free(wpath);
return file;
#else
return fopen(path, mode);
#endif
}
#endif

View File

@@ -0,0 +1,25 @@
#ifndef H_FOPEN_UTF8
#define H_FOPEN_UTF8
#ifdef __cplusplus
extern "C" {
#endif
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <stddef.h>
int utf8_char_size(const uint8_t *c);
uint32_t utf8_to_unicode32(const uint8_t *c, size_t *index);
int codepoint_utf16_size(uint32_t c);
uint16_t *sprint_utf16(uint16_t *str, uint32_t c);
size_t strlen_utf8_to_utf16(const uint8_t *str);
uint16_t *utf8_to_utf16(const uint8_t *utf8, uint16_t *utf16);
FILE *fopen_utf8(const char *path, const char *mode);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,301 @@
#if defined(__AVX2__)
// sha3-4way.c
// 19-Nov-11 Markku-Juhani O. Saarinen <mjos@iki.fi>
// vectorization by JayDDee 2021-03-27
//
// Revised 07-Aug-15 to match with official release of FIPS PUB 202 "SHA3"
// Revised 03-Sep-15 for portability + OpenSSL - style API
#include "sha3-4way.h"
// constants
static const uint64_t keccakf_rndc[24] = {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
};
void sha3_4way_keccakf( __m256i st[25] )
{
int i, j, r;
__m256i t, bc[5];
for ( r = 0; r < KECCAKF_ROUNDS; r++ )
{
// Theta
bc[0] = _mm256_xor_si256( st[0],
mm256_xor4( st[5], st[10], st[15], st[20] ) );
bc[1] = _mm256_xor_si256( st[1],
mm256_xor4( st[6], st[11], st[16], st[21] ) );
bc[2] = _mm256_xor_si256( st[2],
mm256_xor4( st[7], st[12], st[17], st[22] ) );
bc[3] = _mm256_xor_si256( st[3],
mm256_xor4( st[8], st[13], st[18], st[23] ) );
bc[4] = _mm256_xor_si256( st[4],
mm256_xor4( st[9], st[14], st[19], st[24] ) );
for ( i = 0; i < 5; i++ )
{
t = _mm256_xor_si256( bc[ (i+4) % 5 ],
mm256_rol_64( bc[ (i+1) % 5 ], 1 ) );
st[ i ] = _mm256_xor_si256( st[ i ], t );
st[ i+5 ] = _mm256_xor_si256( st[ i+ 5 ], t );
st[ i+10 ] = _mm256_xor_si256( st[ i+10 ], t );
st[ i+15 ] = _mm256_xor_si256( st[ i+15 ], t );
st[ i+20 ] = _mm256_xor_si256( st[ i+20 ], t );
}
// Rho Pi
#define RHO_PI( i, c ) \
bc[0] = st[ i ]; \
st[ i ] = mm256_rol_64( t, c ); \
t = bc[0]
t = st[1];
RHO_PI( 10, 1 );
RHO_PI( 7, 3 );
RHO_PI( 11, 6 );
RHO_PI( 17, 10 );
RHO_PI( 18, 15 );
RHO_PI( 3, 21 );
RHO_PI( 5, 28 );
RHO_PI( 16, 36 );
RHO_PI( 8, 45 );
RHO_PI( 21, 55 );
RHO_PI( 24, 2 );
RHO_PI( 4, 14 );
RHO_PI( 15, 27 );
RHO_PI( 23, 41 );
RHO_PI( 19, 56 );
RHO_PI( 13, 8 );
RHO_PI( 12, 25 );
RHO_PI( 2, 43 );
RHO_PI( 20, 62 );
RHO_PI( 14, 18 );
RHO_PI( 22, 39 );
RHO_PI( 9, 61 );
RHO_PI( 6, 20 );
RHO_PI( 1, 44 );
#undef RHO_PI
// Chi
for ( j = 0; j < 25; j += 5 )
{
memcpy( bc, &st[ j ], 5*32 );
st[ j ] = _mm256_xor_si256( st[ j ],
_mm256_andnot_si256( bc[1], bc[2] ) );
st[ j+1 ] = _mm256_xor_si256( st[ j+1 ],
_mm256_andnot_si256( bc[2], bc[3] ) );
st[ j+2 ] = _mm256_xor_si256( st[ j+2 ],
_mm256_andnot_si256( bc[3], bc[4] ) );
st[ j+3 ] = _mm256_xor_si256( st[ j+3 ],
_mm256_andnot_si256( bc[4], bc[0] ) );
st[ j+4 ] = _mm256_xor_si256( st[ j+4 ],
_mm256_andnot_si256( bc[0], bc[1] ) );
}
// Iota
st[0] = _mm256_xor_si256( st[0],
_mm256_set1_epi64x( keccakf_rndc[ r ] ) );
}
}
int sha3_4way_init( sha3_4way_ctx_t *c, int mdlen )
{
for ( int i = 0; i < 25; i++ ) c->st[ i ] = m256_zero;
c->mdlen = mdlen;
c->rsiz = 200 - 2 * mdlen;
c->pt = 0;
return 1;
}
int sha3_4way_update( sha3_4way_ctx_t *c, const void *data, size_t len )
{
size_t i;
int j = c->pt;
const int rsiz = c->rsiz / 8;
const int l = len / 8;
for ( i = 0; i < l; i++ )
{
c->st[ j ] = _mm256_xor_si256( c->st[ j ],
( (const __m256i*)data )[i] );
j++;
if ( j >= rsiz )
{
sha3_4way_keccakf( c->st );
j = 0;
}
}
c->pt = j;
return 1;
}
int sha3_4way_final( void *md, sha3_4way_ctx_t *c )
{
c->st[ c->pt ] = _mm256_xor_si256( c->st[ c->pt ],
m256_const1_64( 6 ) );
c->st[ c->rsiz / 8 - 1 ] =
_mm256_xor_si256( c->st[ c->rsiz / 8 - 1 ],
m256_const1_64( 0x8000000000000000 ) );
sha3_4way_keccakf( c->st );
memcpy( md, c->st, c->mdlen * 4 );
return 1;
}
void *sha3_4way( const void *in, size_t inlen, void *md, int mdlen )
{
sha3_4way_ctx_t ctx;
sha3_4way_init( &ctx, mdlen);
sha3_4way_update( &ctx, in, inlen );
sha3_4way_final( md, &ctx );
return md;
}
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
void sha3_8way_keccakf( __m512i st[25] )
{
int i, j, r;
__m512i t, bc[5];
// actual iteration
for ( r = 0; r < KECCAKF_ROUNDS; r++ )
{
// Theta
for ( i = 0; i < 5; i++ )
bc[i] = _mm512_xor_si512( st[i],
mm512_xor4( st[ i+5 ], st[ i+10 ], st[ i+15 ], st[i+20 ] ) );
for ( i = 0; i < 5; i++ )
{
t = _mm512_xor_si512( bc[(i + 4) % 5],
_mm512_rol_epi64( bc[(i + 1) % 5], 1 ) );
for ( j = 0; j < 25; j += 5 )
st[j + i] = _mm512_xor_si512( st[j + i], t );
}
// Rho Pi
#define RHO_PI( i, c ) \
bc[0] = st[ i ]; \
st[ i ] = _mm512_rol_epi64( t, c ); \
t = bc[0]
t = st[1];
RHO_PI( 10, 1 );
RHO_PI( 7, 3 );
RHO_PI( 11, 6 );
RHO_PI( 17, 10 );
RHO_PI( 18, 15 );
RHO_PI( 3, 21 );
RHO_PI( 5, 28 );
RHO_PI( 16, 36 );
RHO_PI( 8, 45 );
RHO_PI( 21, 55 );
RHO_PI( 24, 2 );
RHO_PI( 4, 14 );
RHO_PI( 15, 27 );
RHO_PI( 23, 41 );
RHO_PI( 19, 56 );
RHO_PI( 13, 8 );
RHO_PI( 12, 25 );
RHO_PI( 2, 43 );
RHO_PI( 20, 62 );
RHO_PI( 14, 18 );
RHO_PI( 22, 39 );
RHO_PI( 9, 61 );
RHO_PI( 6, 20 );
RHO_PI( 1, 44 );
#undef RHO_PI
// Chi
for ( j = 0; j < 25; j += 5 )
{
for ( i = 0; i < 5; i++ )
bc[i] = st[j + i];
for ( i = 0; i < 5; i++ )
st[ j+i ] = _mm512_xor_si512( st[ j+i ], _mm512_andnot_si512(
bc[ (i+1) % 5 ], bc[ (i+2) % 5 ] ) );
}
// Iota
st[0] = _mm512_xor_si512( st[0], _mm512_set1_epi64( keccakf_rndc[r] ) );
}
}
// Initialize the context for SHA3
int sha3_8way_init( sha3_8way_ctx_t *c, int mdlen )
{
for ( int i = 0; i < 25; i++ ) c->st[ i ] = m512_zero;
c->mdlen = mdlen;
c->rsiz = 200 - 2 * mdlen;
c->pt = 0;
return 1;
}
// update state with more data
int sha3_8way_update( sha3_8way_ctx_t *c, const void *data, size_t len )
{
size_t i;
int j = c->pt;
const int rsiz = c->rsiz / 8;
const int l = len / 8;
for ( i = 0; i < l; i++ )
{
c->st[ j ] = _mm512_xor_si512( c->st[ j ],
( (const __m512i*)data )[i] );
j++;
if ( j >= rsiz )
{
sha3_8way_keccakf( c->st );
j = 0;
}
}
c->pt = j;
return 1;
}
// finalize and output a hash
int sha3_8way_final( void *md, sha3_8way_ctx_t *c )
{
c->st[ c->pt ] =
_mm512_xor_si512( c->st[ c->pt ],
m512_const1_64( 6 ) );
c->st[ c->rsiz / 8 - 1 ] =
_mm512_xor_si512( c->st[ c->rsiz / 8 - 1 ],
m512_const1_64( 0x8000000000000000 ) );
sha3_8way_keccakf( c->st );
memcpy( md, c->st, c->mdlen * 8 );
return 1;
}
// compute a SHA-3 hash (md) of given byte length from "in"
void *sha3_8way( const void *in, size_t inlen, void *md, int mdlen )
{
sha3_8way_ctx_t sha3;
sha3_8way_init( &sha3, mdlen);
sha3_8way_update( &sha3, in, inlen );
sha3_8way_final( md, &sha3 );
return md;
}
#endif // AVX512
#endif // AVX2

View File

@@ -0,0 +1,67 @@
// sha3.h
// 19-Nov-11 Markku-Juhani O. Saarinen <mjos@iki.fi>
// 2021-03-27 JayDDee
//
#ifndef SHA3_4WAY_H
#define SHA3_4WAY_H
#include <stddef.h>
#include <stdint.h>
#include "simd-utils.h"
#if defined(__cplusplus)
extern "C" {
#endif
#ifndef KECCAKF_ROUNDS
#define KECCAKF_ROUNDS 24
#endif
#if defined(__AVX2__)
typedef struct
{
__m256i st[25]; // 64-bit words * 4 lanes
int pt, rsiz, mdlen; // these don't overflow
} sha3_4way_ctx_t __attribute__ ((aligned (64)));;
// Compression function.
void sha3_4way_keccakf( __m256i st[25] );
// OpenSSL - like interfece
int sha3_4way_init( sha3_4way_ctx_t *c, int mdlen ); // mdlen = hash output in bytes
int sha3_4way_update( sha3_4way_ctx_t *c, const void *data, size_t len );
int sha3_4way_final( void *md, sha3_4way_ctx_t *c ); // digest goes to md
// compute a sha3 hash (md) of given byte length from "in"
void *sha3_4way( const void *in, size_t inlen, void *md, int mdlen );
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// state context
typedef struct
{
__m512i st[25]; // 64-bit words * 8 lanes
int pt, rsiz, mdlen; // these don't overflow
} sha3_8way_ctx_t __attribute__ ((aligned (64)));;
// Compression function.
void sha3_8way_keccakf( __m512i st[25] );
// OpenSSL - like interfece
int sha3_8way_init( sha3_8way_ctx_t *c, int mdlen ); // mdlen = hash output in bytes
int sha3_8way_update( sha3_8way_ctx_t *c, const void *data, size_t len );
int sha3_8way_final( void *md, sha3_8way_ctx_t *c ); // digest goes to md
// compute a sha3 hash (md) of given byte length from "in"
void *sha3_8way( const void *in, size_t inlen, void *md, int mdlen );
#endif // AVX512
#endif // AVX2
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -0,0 +1,226 @@
// sha3.c
// 19-Nov-11 Markku-Juhani O. Saarinen <mjos@iki.fi>
// Revised 07-Aug-15 to match with official release of FIPS PUB 202 "SHA3"
// Revised 03-Sep-15 for portability + OpenSSL - style API
#include "sha3.h"
#include <string.h>
// update the state with given number of rounds
void sha3_keccakf(uint64_t st[25])
{
// constants
const uint64_t keccakf_rndc[24] = {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
};
/*
const int keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const int keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
*/
// variables
int i, j, r;
uint64_t t, bc[5];
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
uint8_t *v;
// endianess conversion. this is redundant on little-endian targets
for (i = 0; i < 25; i++) {
v = (uint8_t *) &st[i];
st[i] = ((uint64_t) v[0]) | (((uint64_t) v[1]) << 8) |
(((uint64_t) v[2]) << 16) | (((uint64_t) v[3]) << 24) |
(((uint64_t) v[4]) << 32) | (((uint64_t) v[5]) << 40) |
(((uint64_t) v[6]) << 48) | (((uint64_t) v[7]) << 56);
}
#endif
// actual iteration
for (r = 0; r < KECCAKF_ROUNDS; r++) {
// Theta
for (i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
for (j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
#define RHO_PI( i, c ) \
bc[0] = st[ i ]; \
st[ i ] = ROTL64( t, c ); \
t = bc[0]
t = st[1];
RHO_PI( 10, 1 );
RHO_PI( 7, 3 );
RHO_PI( 11, 6 );
RHO_PI( 17, 10 );
RHO_PI( 18, 15 );
RHO_PI( 3, 21 );
RHO_PI( 5, 28 );
RHO_PI( 16, 36 );
RHO_PI( 8, 45 );
RHO_PI( 21, 55 );
RHO_PI( 24, 2 );
RHO_PI( 4, 14 );
RHO_PI( 15, 27 );
RHO_PI( 23, 41 );
RHO_PI( 19, 56 );
RHO_PI( 13, 8 );
RHO_PI( 12, 25 );
RHO_PI( 2, 43 );
RHO_PI( 20, 62 );
RHO_PI( 14, 18 );
RHO_PI( 22, 39 );
RHO_PI( 9, 61 );
RHO_PI( 6, 20 );
RHO_PI( 1, 44 );
#undef RHO_PI
/*
for (i = 0; i < 24; i++) {
j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL64(t, keccakf_rotc[i]);
t = bc[0];
}
*/
// Chi
for (j = 0; j < 25; j += 5) {
for (i = 0; i < 5; i++)
bc[i] = st[j + i];
for (i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
// endianess conversion. this is redundant on little-endian targets
for (i = 0; i < 25; i++) {
v = (uint8_t *) &st[i];
t = st[i];
v[0] = t & 0xFF;
v[1] = (t >> 8) & 0xFF;
v[2] = (t >> 16) & 0xFF;
v[3] = (t >> 24) & 0xFF;
v[4] = (t >> 32) & 0xFF;
v[5] = (t >> 40) & 0xFF;
v[6] = (t >> 48) & 0xFF;
v[7] = (t >> 56) & 0xFF;
}
#endif
}
// Initialize the context for SHA3
int sha3_init(sha3_ctx_t *c, int mdlen)
{
int i;
for (i = 0; i < 25; i++)
c->st.q[i] = 0;
c->mdlen = mdlen;
c->rsiz = 200 - 2 * mdlen;
c->pt = 0;
return 1;
}
// update state with more data
int sha3_update(sha3_ctx_t *c, const void *data, size_t len)
{
size_t i;
int j = c->pt / 8;
const int rsiz = c->rsiz / 8;
const int l = len / 8;
for ( i = 0; i < l; i++ )
{
c->st.q[ j++ ] ^= ( ((const uint64_t *) data) [i] );
if ( j >= rsiz )
{
sha3_keccakf( c->st.q );
j = 0;
}
}
c->pt = j*8;
return 1;
}
// finalize and output a hash
int sha3_final(void *md, sha3_ctx_t *c)
{
c->st.q[ c->pt / 8 ] ^= 6;
c->st.q[ c->rsiz / 8 - 1 ] ^= 0x8000000000000000;
sha3_keccakf(c->st.q);
memcpy( md, c->st.q, c->mdlen );
return 1;
}
// compute a SHA-3 hash (md) of given byte length from "in"
void *sha3(const void *in, size_t inlen, void *md, int mdlen)
{
sha3_ctx_t sha3;
sha3_init(&sha3, mdlen);
sha3_update(&sha3, in, inlen);
sha3_final(md, &sha3);
return md;
}
// SHAKE128 and SHAKE256 extensible-output functionality
void shake_xof(sha3_ctx_t *c)
{
c->st.b[c->pt] ^= 0x1F;
c->st.b[c->rsiz - 1] ^= 0x80;
sha3_keccakf(c->st.q);
c->pt = 0;
}
void shake_out(sha3_ctx_t *c, void *out, size_t len)
{
size_t i;
int j;
j = c->pt;
for (i = 0; i < len; i++) {
if (j >= c->rsiz) {
sha3_keccakf(c->st.q);
j = 0;
}
((uint8_t *) out)[i] = c->st.b[j++];
}
c->pt = j;
}

View File

@@ -0,0 +1,55 @@
// sha3.h
// 19-Nov-11 Markku-Juhani O. Saarinen <mjos@iki.fi>
#ifndef SHA3_H
#define SHA3_H
#include <stddef.h>
#include <stdint.h>
#if defined(__cplusplus)
extern "C" {
#endif
#ifndef KECCAKF_ROUNDS
#define KECCAKF_ROUNDS 24
#endif
#ifndef ROTL64
#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
#endif
// state context
typedef struct {
union { // state:
uint8_t b[200]; // 8-bit bytes
uint64_t q[25]; // 64-bit words
} st;
int pt, rsiz, mdlen; // these don't overflow
} sha3_ctx_t;
// Compression function.
void sha3_keccakf(uint64_t st[25]);
// OpenSSL - like interfece
int sha3_init(sha3_ctx_t *c, int mdlen); // mdlen = hash output in bytes
int sha3_update(sha3_ctx_t *c, const void *data, size_t len);
int sha3_final(void *md, sha3_ctx_t *c); // digest goes to md
// compute a sha3 hash (md) of given byte length from "in"
void *sha3(const void *in, size_t inlen, void *md, int mdlen);
// SHAKE128 and SHAKE256 extensible-output functions
#define shake128_init(c) sha3_init(c, 16)
#define shake256_init(c) sha3_init(c, 32)
#define shake_update sha3_update
void shake_xof(sha3_ctx_t *c);
void shake_out(sha3_ctx_t *c, void *out, size_t len);
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -0,0 +1,176 @@
#include "algo-gate-api.h"
#include "algo/sha/sph_sha2.h"
#include "Verthash.h"
#include "tiny_sha3/sha3-4way.h"
static verthash_info_t verthashInfo;
// Verthash data file hash in bytes for verification
// 0x48aa21d7afededb63976d48a8ff8ec29d5b02563af4a1110b056cd43e83155a5
static const uint8_t verthashDatFileHash_bytes[32] =
{ 0xa5, 0x55, 0x31, 0xe8, 0x43, 0xcd, 0x56, 0xb0,
0x10, 0x11, 0x4a, 0xaf, 0x63, 0x25, 0xb0, 0xd5,
0x29, 0xec, 0xf8, 0x8f, 0x8a, 0xd4, 0x76, 0x39,
0xb6, 0xed, 0xed, 0xaf, 0xd7, 0x21, 0xaa, 0x48 };
#if defined(__AVX2__)
static __thread sha3_4way_ctx_t sha3_mid_ctxA;
static __thread sha3_4way_ctx_t sha3_mid_ctxB;
#else
static __thread sha3_ctx_t sha3_mid_ctx[8];
#endif
void verthash_sha3_512_prehash_72( const void *input )
{
#if defined(__AVX2__)
__m256i vin[10];
mm256_intrlv80_4x64( vin, input );
sha3_4way_init( &sha3_mid_ctxA, 64 );
sha3_4way_init( &sha3_mid_ctxB, 64 );
vin[0] = _mm256_add_epi8( vin[0], _mm256_set_epi64x( 4,3,2,1 ) );
sha3_4way_update( &sha3_mid_ctxA, vin, 72 );
vin[0] = _mm256_add_epi8( vin[0], _mm256_set1_epi64x( 4 ) );
sha3_4way_update( &sha3_mid_ctxB, vin, 72 );
#else
char in[80] __attribute__ ((aligned (64)));
memcpy( in, input, 80 );
for ( int i = 0; i < 8; i++ )
{
in[0] += 1;
sha3_init( &sha3_mid_ctx[i], 64 );
sha3_update( &sha3_mid_ctx[i], in, 72 );
}
#endif
}
void verthash_sha3_512_final_8( void *hash, const uint64_t nonce )
{
#if defined(__AVX2__)
__m256i vhashA[ 10 ] __attribute__ ((aligned (64)));
__m256i vhashB[ 10 ] __attribute__ ((aligned (64)));
sha3_4way_ctx_t ctx;
const __m256i vnonce = _mm256_set1_epi64x( nonce );
memcpy( &ctx, &sha3_mid_ctxA, sizeof ctx );
sha3_4way_update( &ctx, &vnonce, 8 );
sha3_4way_final( vhashA, &ctx );
memcpy( &ctx, &sha3_mid_ctxB, sizeof ctx );
sha3_4way_update( &ctx, &vnonce, 8 );
sha3_4way_final( vhashB, &ctx );
dintrlv_4x64( hash, hash+64, hash+128, hash+192, vhashA, 512 );
dintrlv_4x64( hash+256, hash+320, hash+384, hash+448, vhashB, 512 );
#else
for ( int i = 0; i < 8; i++ )
{
sha3_ctx_t ctx;
memcpy( &ctx, &sha3_mid_ctx[i], sizeof ctx );
sha3_update( &ctx, &nonce, 8 );
sha3_final( hash + i*64, &ctx );
}
#endif
}
int scanhash_verthash( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 1;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
mm128_bswap32_80( edata, pdata );
verthash_sha3_512_prehash_72( edata );
do
{
edata[19] = n;
verthash_hash( verthashInfo.data, verthashInfo.dataSize,
edata, hash );
if ( valid_hash( hash, ptarget ) && !bench )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash, mythr );
}
n++;
} while ( n < last_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
static const char *default_verthash_data_file = "verthash.dat";
bool register_verthash_algo( algo_gate_t* gate )
{
opt_target_factor = 256.0;
gate->scanhash = (void*)&scanhash_verthash;
gate->optimizations = AVX2_OPT;
const char *verthash_data_file = opt_data_file ? opt_data_file
: default_verthash_data_file;
int vhLoadResult = verthash_info_init( &verthashInfo, verthash_data_file );
if (vhLoadResult == 0) // No Error
{
if ( opt_verify )
{
uint8_t vhDataFileHash[32] = { 0 };
applog( LOG_NOTICE, "Verifying Verthash data" );
sph_sha256_full( vhDataFileHash, verthashInfo.data,
verthashInfo.dataSize );
if ( memcmp( vhDataFileHash, verthashDatFileHash_bytes,
sizeof(verthashDatFileHash_bytes) ) == 0 )
applog( LOG_NOTICE, "Verthash data has been verified" );
else
{
applog( LOG_ERR, "Verthash data verification has failed" );
return false;
}
}
}
else
{
// Handle Verthash error codes
if ( vhLoadResult == 1 )
{
applog( LOG_ERR, "Verthash data file not found: %s",
verthash_data_file );
if ( !opt_data_file )
applog( LOG_NOTICE, "Add '--verify' to create verthash.dat");
}
else if ( vhLoadResult == 2 )
applog( LOG_ERR, "Failed to allocate memory for Verthash data" );
// else // for debugging purposes
// applog( LOG_ERR, "Verthash data initialization unknown error code: %d",
// vhLoadResult );
return false;
}
printf("\n");
return true;
}

View File

@@ -7,7 +7,7 @@
#include "algo/jh/jh-hash-4way.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/cubehash/cube-hash-2way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/fugue/fugue-aesni.h"
#include "algo/gost/sph_gost.h"
#include "algo/echo/aes_ni/hash_api.h"
#if defined(__VAES__)
@@ -20,7 +20,7 @@ typedef struct {
skein512_8way_context skein;
jh512_8way_context jh;
cube_4way_context cube;
sph_fugue512_context fugue;
hashState_fugue fugue;
sph_gost512_context gost;
#if defined(__VAES__)
echo_4way_context echo;
@@ -36,7 +36,7 @@ void init_phi1612_8way_ctx()
skein512_8way_init( &phi1612_8way_ctx.skein );
jh512_8way_init( &phi1612_8way_ctx.jh );
cube_4way_init( &phi1612_8way_ctx.cube, 512, 16, 32 );
sph_fugue512_init( &phi1612_8way_ctx.fugue );
fugue512_Init( &phi1612_8way_ctx.fugue, 512 );
sph_gost512_init( &phi1612_8way_ctx.gost );
#if defined(__VAES__)
echo_4way_init( &phi1612_8way_ctx.echo, 512 );
@@ -79,29 +79,14 @@ void phi1612_8way_hash( void *state, const void *input )
dintrlv_4x128_512( hash4, hash5, hash6, hash7, vhash );
// Fugue
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash4, 64 );
sph_fugue512_close( &ctx.fugue, hash4 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash5, 64 );
sph_fugue512_close( &ctx.fugue, hash5 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash6, 64 );
sph_fugue512_close( &ctx.fugue, hash6 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash7, 64 );
sph_fugue512_close( &ctx.fugue, hash7 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
fugue512_full( &ctx.fugue, hash4, hash4, 64 );
fugue512_full( &ctx.fugue, hash5, hash5, 64 );
fugue512_full( &ctx.fugue, hash6, hash6, 64 );
fugue512_full( &ctx.fugue, hash7, hash7, 64 );
// Gost
sph_gost512( &ctx.gost, hash0, 64 );
@@ -223,7 +208,7 @@ typedef struct {
skein512_4way_context skein;
jh512_4way_context jh;
cubehashParam cube;
sph_fugue512_context fugue;
hashState_fugue fugue;
sph_gost512_context gost;
hashState_echo echo;
} phi1612_4way_ctx_holder;
@@ -235,7 +220,6 @@ void init_phi1612_4way_ctx()
skein512_4way_init( &phi1612_4way_ctx.skein );
jh512_4way_init( &phi1612_4way_ctx.jh );
cubehashInit( &phi1612_4way_ctx.cube, 512, 16, 32 );
sph_fugue512_init( &phi1612_4way_ctx.fugue );
sph_gost512_init( &phi1612_4way_ctx.gost );
init_echo( &phi1612_4way_ctx.echo, 512 );
};
@@ -275,17 +259,10 @@ void phi1612_4way_hash( void *state, const void *input )
cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*) hash3, 64 );
// Fugue
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
// Gost
sph_gost512( &ctx.gost, hash0, 64 );

View File

@@ -8,24 +8,28 @@
#include <stdio.h>
#include "algo/gost/sph_gost.h"
#include "algo/echo/sph_echo.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/skein/sph_skein.h"
#include "algo/jh/sph_jh.h"
#ifdef __AES__
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/echo/sph_echo.h"
#include "algo/fugue/sph_fugue.h"
#endif
typedef struct {
sph_skein512_context skein;
sph_jh512_context jh;
cubehashParam cube;
sph_fugue512_context fugue;
sph_gost512_context gost;
#ifdef __AES__
hashState_echo echo;
hashState_fugue fugue;
#else
sph_echo512_context echo;
sph_fugue512_context fugue;
#endif
} phi_ctx_holder;
@@ -38,12 +42,13 @@ void init_phi1612_ctx()
sph_skein512_init( &phi_ctx.skein );
sph_jh512_init( &phi_ctx.jh );
cubehashInit( &phi_ctx.cube, 512, 16, 32 );
sph_fugue512_init( &phi_ctx.fugue );
sph_gost512_init( &phi_ctx.gost );
#ifdef __AES__
init_echo( &phi_ctx.echo, 512 );
fugue512_Init( &phi_ctx.fugue, 512 );
#else
sph_echo512_init( &phi_ctx.echo );
sph_fugue512_init( &phi_ctx.fugue );
#endif
}
@@ -69,8 +74,13 @@ void phi1612_hash(void *output, const void *input)
cubehashUpdateDigest( &ctx.cube, (byte*) hash, (const byte*)hash, 64 );
#if defined(__AES__)
fugue512_Update( &ctx.fugue, hash, 512 );
fugue512_Final( &ctx.fugue, hash );
#else
sph_fugue512( &ctx.fugue, (const void*)hash, 64 );
sph_fugue512_close( &ctx.fugue, (void*)hash );
#endif
sph_gost512( &ctx.gost, hash, 64 );
sph_gost512_close( &ctx.gost, hash );

View File

@@ -5,7 +5,7 @@
#include <stdio.h>
#include "algo/skein/skein-hash-4way.h"
#include "algo/gost/sph_gost.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/fugue/fugue-aesni.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/cubehash/cube-hash-2way.h"
@@ -14,7 +14,7 @@
typedef struct {
skein512_8way_context skein;
cube_4way_context cube;
sph_fugue512_context fugue;
hashState_fugue fugue;
sph_gost512_context gost;
} skunk_8way_ctx_holder;
@@ -46,29 +46,15 @@ void skunk_8way_hash( void *output, const void *input )
cube_4way_init( &ctx.cube, 512, 16, 32 );
cube_4way_update_close( &ctx.cube, vhash, vhash, 64 );
dintrlv_4x128_512( hash4, hash5, hash6, hash7, vhash );
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
sph_fugue512( &ctx.fugue, hash4, 64 );
sph_fugue512_close( &ctx.fugue, hash4 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash5, 64 );
sph_fugue512_close( &ctx.fugue, hash5 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash6, 64 );
sph_fugue512_close( &ctx.fugue, hash6 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash7, 64 );
sph_fugue512_close( &ctx.fugue, hash7 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
fugue512_full( &ctx.fugue, hash4, hash4, 64 );
fugue512_full( &ctx.fugue, hash5, hash5, 64 );
fugue512_full( &ctx.fugue, hash6, hash6, 64 );
fugue512_full( &ctx.fugue, hash7, hash7, 64 );
sph_gost512( &ctx.gost, hash0, 64 );
sph_gost512_close( &ctx.gost, output );
@@ -140,7 +126,6 @@ bool skunk_8way_thread_init()
{
skein512_8way_init( &skunk_8way_ctx.skein );
cube_4way_init( &skunk_8way_ctx.cube, 512, 16, 32 );
sph_fugue512_init( &skunk_8way_ctx.fugue );
sph_gost512_init( &skunk_8way_ctx.gost );
return true;
}
@@ -150,7 +135,7 @@ bool skunk_8way_thread_init()
typedef struct {
skein512_4way_context skein;
cubehashParam cube;
sph_fugue512_context fugue;
hashState_fugue fugue;
sph_gost512_context gost;
} skunk_4way_ctx_holder;
@@ -178,17 +163,10 @@ void skunk_4way_hash( void *output, const void *input )
memcpy( &ctx.cube, &skunk_4way_ctx.cube, sizeof(cubehashParam) );
cubehashUpdateDigest( &ctx.cube, (byte*)hash3, (const byte*) hash3, 64 );
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
sph_fugue512_init( &ctx.fugue );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
fugue512_full( &ctx.fugue, hash0, hash0, 64 );
fugue512_full( &ctx.fugue, hash1, hash1, 64 );
fugue512_full( &ctx.fugue, hash2, hash2, 64 );
fugue512_full( &ctx.fugue, hash3, hash3, 64 );
sph_gost512( &ctx.gost, hash0, 64 );
sph_gost512_close( &ctx.gost, hash0 );
@@ -252,7 +230,6 @@ bool skunk_4way_thread_init()
{
skein512_4way_init( &skunk_4way_ctx.skein );
cubehashInit( &skunk_4way_ctx.cube, 512, 16, 32 );
sph_fugue512_init( &skunk_4way_ctx.fugue );
sph_gost512_init( &skunk_4way_ctx.gost );
return true;
}

View File

@@ -2,7 +2,7 @@
bool register_skunk_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | AES_OPT;
#if defined (SKUNK_8WAY)
gate->miner_thread_init = (void*)&skunk_8way_thread_init;
gate->scanhash = (void*)&scanhash_skunk_8way;

View File

@@ -8,13 +8,21 @@
#include <stdio.h>
#include "algo/gost/sph_gost.h"
#include "algo/skein/sph_skein.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/cubehash/cubehash_sse2.h"
#if defined(__AES__)
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/fugue/sph_fugue.h"
#endif
typedef struct {
sph_skein512_context skein;
cubehashParam cube;
#if defined(__AES__)
hashState_fugue fugue;
#else
sph_fugue512_context fugue;
#endif
sph_gost512_context gost;
} skunk_ctx_holder;
@@ -32,8 +40,13 @@ void skunkhash( void *output, const void *input )
cubehashUpdateDigest( &ctx.cube, (byte*) hash, (const byte*)hash, 64 );
#if defined(__AES__)
fugue512_Update( &ctx.fugue, hash, 512 );
fugue512_Final( &ctx.fugue, hash );
#else
sph_fugue512( &ctx.fugue, hash, 64 );
sph_fugue512_close( &ctx.fugue, hash );
#endif
sph_gost512( &ctx.gost, hash, 64 );
sph_gost512_close( &ctx.gost, hash );
@@ -87,8 +100,12 @@ bool skunk_thread_init()
{
sph_skein512_init( &skunk_ctx.skein );
cubehashInit( &skunk_ctx.cube, 512, 16, 32 );
sph_fugue512_init( &skunk_ctx.fugue );
sph_gost512_init( &skunk_ctx.gost );
#if defined(__AES__)
fugue512_Init( &skunk_ctx.fugue, 512 );
#else
sph_fugue512_init( &skunk_ctx.fugue );
#endif
sph_gost512_init( &skunk_ctx.gost );
return true;
}
#endif

View File

@@ -16,7 +16,7 @@
#include "algo/simd/simd-hash-2way.h"
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/fugue/fugue-aesni.h"
#if defined(__VAES__)
#include "algo/groestl/groestl512-hash-4way.h"
#include "algo/shavite/shavite-hash-4way.h"
@@ -35,7 +35,7 @@ typedef struct {
cube_4way_context cube;
simd_4way_context simd;
hamsi512_8way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
#if defined(__VAES__)
groestl512_4way_context groestl;
shavite512_4way_context shavite;
@@ -60,7 +60,7 @@ void init_x13_8way_ctx()
cube_4way_init( &x13_8way_ctx.cube, 512, 16, 32 );
simd_4way_init( &x13_8way_ctx.simd, 512 );
hamsi512_8way_init( &x13_8way_ctx.hamsi );
sph_fugue512_init( &x13_8way_ctx.fugue );
fugue512_Init( &x13_8way_ctx.fugue, 512 );
#if defined(__VAES__)
groestl512_4way_init( &x13_8way_ctx.groestl, 64 );
shavite512_4way_init( &x13_8way_ctx.shavite );
@@ -255,29 +255,29 @@ void x13_8way_hash( void *state, const void *input )
vhash );
// 13 Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash4, 64 );
sph_fugue512_close( &ctx.fugue, hash4 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash5, 64 );
sph_fugue512_close( &ctx.fugue, hash5 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash6, 64 );
sph_fugue512_close( &ctx.fugue, hash6 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash7, 64 );
sph_fugue512_close( &ctx.fugue, hash7 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, hash3 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash4, 512 );
fugue512_Final( &ctx.fugue, hash4 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash5, 512 );
fugue512_Final( &ctx.fugue, hash5 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash6, 512 );
fugue512_Final( &ctx.fugue, hash6 );
memcpy( &ctx.fugue, &x13_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash7, 512 );
fugue512_Final( &ctx.fugue, hash7 );
memcpy( state, hash0, 32 );
memcpy( state+ 32, hash1, 32 );
@@ -344,7 +344,7 @@ typedef struct {
simd_2way_context simd;
hashState_echo echo;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
} x13_4way_ctx_holder;
x13_4way_ctx_holder x13_4way_ctx __attribute__ ((aligned (64)));
@@ -363,7 +363,7 @@ void init_x13_4way_ctx()
simd_2way_init( &x13_4way_ctx.simd, 512 );
init_echo( &x13_4way_ctx.echo, 512 );
hamsi512_4way_init( &x13_4way_ctx.hamsi );
sph_fugue512_init( &x13_4way_ctx.fugue );
fugue512_Init( &x13_4way_ctx.fugue, 512 );
};
void x13_4way_hash( void *state, const void *input )
@@ -477,17 +477,17 @@ void x13_4way_hash( void *state, const void *input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 13 Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x13_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x13_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x13_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x13_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x13_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x13_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, hash3 );
memcpy( state, hash0, 32 );
memcpy( state+32, hash1, 32 );

View File

@@ -13,7 +13,6 @@
#include "algo/skein/sph_skein.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/nist.h"
@@ -21,9 +20,11 @@
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/groestl/sph_groestl.h"
#include "algo/echo/sph_echo.h"
#include "algo/fugue/sph_fugue.h"
#endif
typedef struct {
@@ -32,9 +33,11 @@ typedef struct {
#if defined(__AES__)
hashState_echo echo;
hashState_groestl groestl;
hashState_fugue fugue;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
sph_fugue512_context fugue;
#endif
sph_jh512_context jh;
sph_keccak512_context keccak;
@@ -44,7 +47,6 @@ typedef struct {
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
} x13_ctx_holder;
x13_ctx_holder x13_ctx;
@@ -56,9 +58,11 @@ void init_x13_ctx()
#if defined(__AES__)
init_groestl( &x13_ctx.groestl, 64 );
init_echo( &x13_ctx.echo, 512 );
fugue512_Init( &x13_ctx.fugue, 512 );
#else
sph_groestl512_init( &x13_ctx.groestl );
sph_echo512_init( &x13_ctx.echo );
sph_fugue512_init( &x13_ctx.fugue );
#endif
sph_skein512_init( &x13_ctx.skein );
sph_jh512_init( &x13_ctx.jh );
@@ -68,7 +72,6 @@ void init_x13_ctx()
sph_shavite512_init( &x13_ctx.shavite );
init_sd( &x13_ctx.simd, 512 );
sph_hamsi512_init( &x13_ctx.hamsi );
sph_fugue512_init( &x13_ctx.fugue );
};
void x13hash(void *output, const void *input)
@@ -84,11 +87,9 @@ void x13hash(void *output, const void *input)
sph_bmw512_close( &ctx.bmw, hash );
#if defined(__AES__)
init_groestl( &ctx.groestl, 64 );
update_and_final_groestl( &ctx.groestl, (char*)hash,
(const char*)hash, 512 );
#else
sph_groestl512_init( &ctx.groestl );
sph_groestl512( &ctx.groestl, hash, 64 );
sph_groestl512_close( &ctx.groestl, hash );
#endif
@@ -125,8 +126,13 @@ void x13hash(void *output, const void *input)
sph_hamsi512( &ctx.hamsi, hash, 64 );
sph_hamsi512_close( &ctx.hamsi, hash );
#if defined(__AES__)
fugue512_Update( &ctx.fugue, hash, 512 );
fugue512_Final( &ctx.fugue, hash );
#else
sph_fugue512( &ctx.fugue, hash, 64 );
sph_fugue512_close( &ctx.fugue, hash );
sph_fugue512_close( &ctx.fugue, hash );
#endif
memcpy( output, hash, 32 );
}

View File

@@ -16,7 +16,7 @@
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/sm3/sm3-hash-4way.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/fugue/fugue-aesni.h"
#if defined(__VAES__)
#include "algo/groestl/groestl512-hash-4way.h"
#include "algo/shavite/shavite-hash-4way.h"
@@ -35,7 +35,7 @@ typedef struct {
simd_4way_context simd;
sm3_8way_ctx_t sm3;
hamsi512_8way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
#if defined(__VAES__)
groestl512_4way_context groestl;
shavite512_4way_context shavite;
@@ -61,7 +61,7 @@ void init_x13bcd_8way_ctx()
simd_4way_init( &x13bcd_8way_ctx.simd, 512 );
sm3_8way_init( &x13bcd_8way_ctx.sm3 );
hamsi512_8way_init( &x13bcd_8way_ctx.hamsi );
sph_fugue512_init( &x13bcd_8way_ctx.fugue );
fugue512_Init( &x13bcd_8way_ctx.fugue, 512 );
#if defined(__VAES__)
groestl512_4way_init( &x13bcd_8way_ctx.groestl, 64 );
shavite512_4way_init( &x13bcd_8way_ctx.shavite );
@@ -257,36 +257,30 @@ void x13bcd_8way_hash( void *state, const void *input )
hash4, hash5, hash6, hash7, vhash );
// Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, state );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, state+32 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, state+64 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, state+96 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash4, 64 );
sph_fugue512_close( &ctx.fugue, state+128 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash5, 64 );
sph_fugue512_close( &ctx.fugue, state+160 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash6, 64 );
sph_fugue512_close( &ctx.fugue, state+192 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash7, 64 );
sph_fugue512_close( &ctx.fugue, state+224 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, state );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, state+32 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, state+64 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, state+96 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash4, 512 );
fugue512_Final( &ctx.fugue, state+128 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash5, 512 );
fugue512_Final( &ctx.fugue, state+160 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash6, 512 );
fugue512_Final( &ctx.fugue, state+192 );
memcpy( &ctx.fugue, &x13bcd_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash7, 512 );
fugue512_Final( &ctx.fugue, state+224 );
}
int scanhash_x13bcd_8way( struct work *work, uint32_t max_nonce,
@@ -346,7 +340,7 @@ typedef struct {
hashState_echo echo;
sm3_4way_ctx_t sm3;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
} x13bcd_4way_ctx_holder;
x13bcd_4way_ctx_holder x13bcd_4way_ctx __attribute__ ((aligned (64)));
@@ -366,7 +360,7 @@ void init_x13bcd_4way_ctx()
init_echo( &x13bcd_4way_ctx.echo, 512 );
sm3_4way_init( &x13bcd_4way_ctx.sm3 );
hamsi512_4way_init( &x13bcd_4way_ctx.hamsi );
sph_fugue512_init( &x13bcd_4way_ctx.fugue );
fugue512_Init( &x13bcd_4way_ctx.fugue, 512 );
};
void x13bcd_4way_hash( void *state, const void *input )
@@ -489,20 +483,17 @@ void x13bcd_4way_hash( void *state, const void *input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue,
sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x13bcd_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, hash3 );
memcpy( state, hash0, 32 );
memcpy( state+32, hash1, 32 );

View File

@@ -14,16 +14,17 @@
#include "algo/skein/sph_skein.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/nist.h"
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/groestl/sph_groestl.h"
#include "algo/echo/sph_echo.h"
#include "algo/fugue/sph_fugue.h"
#endif
typedef struct {
@@ -32,9 +33,11 @@ typedef struct {
#if defined(__AES__)
hashState_echo echo;
hashState_groestl groestl;
hashState_fugue fugue;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
sph_fugue512_context fugue;
#endif
sph_jh512_context jh;
sph_keccak512_context keccak;
@@ -43,7 +46,6 @@ typedef struct {
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sm3_ctx_t sm3;
} x13bcd_ctx_holder;
@@ -56,9 +58,11 @@ void init_x13bcd_ctx()
#if defined(__AES__)
init_groestl( &x13bcd_ctx.groestl, 64 );
init_echo( &x13bcd_ctx.echo, 512 );
fugue512_Init( &x13bcd_ctx.fugue, 512 );
#else
sph_groestl512_init( &x13bcd_ctx.groestl );
sph_echo512_init( &x13bcd_ctx.echo );
sph_fugue512_init( &x13bcd_ctx.fugue );
#endif
sph_skein512_init( &x13bcd_ctx.skein );
sph_jh512_init( &x13bcd_ctx.jh );
@@ -68,7 +72,6 @@ void init_x13bcd_ctx()
init_sd( &x13bcd_ctx.simd,512 );
sm3_init( &x13bcd_ctx.sm3 );
sph_hamsi512_init( &x13bcd_ctx.hamsi );
sph_fugue512_init( &x13bcd_ctx.fugue );
};
void x13bcd_hash(void *output, const void *input)
@@ -129,8 +132,13 @@ void x13bcd_hash(void *output, const void *input)
sph_hamsi512( &ctx.hamsi, hash, 64 );
sph_hamsi512_close( &ctx.hamsi, hash );
#if defined(__AES__)
fugue512_Update( &ctx.fugue, hash, 512 );
fugue512_Final( &ctx.fugue, hash );
#else
sph_fugue512( &ctx.fugue, hash, 64 );
sph_fugue512_close( &ctx.fugue, hash );
#endif
memcpy( output, hash, 32 );
}

View File

@@ -17,7 +17,7 @@
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/echo/sph_echo.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/fugue/fugue-aesni.h"
#include "algo/shabal/shabal-hash-4way.h"
#if defined(__VAES__)
#include "algo/groestl/groestl512-hash-4way.h"
@@ -37,7 +37,7 @@ typedef struct {
cube_4way_context cube;
simd_4way_context simd;
hamsi512_8way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
shabal512_8way_context shabal;
#if defined(__VAES__)
groestl512_4way_context groestl;
@@ -63,7 +63,7 @@ void init_x14_8way_ctx()
cube_4way_init( &x14_8way_ctx.cube, 512, 16, 32 );
simd_4way_init( &x14_8way_ctx.simd, 512 );
hamsi512_8way_init( &x14_8way_ctx.hamsi );
sph_fugue512_init( &x14_8way_ctx.fugue );
fugue512_Init( &x14_8way_ctx.fugue, 512 );
shabal512_8way_init( &x14_8way_ctx.shabal );
#if defined(__VAES__)
groestl512_4way_init( &x14_8way_ctx.groestl, 64 );
@@ -259,29 +259,29 @@ void x14_8way_hash( void *state, const void *input )
vhash );
// 13 Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash4, 64 );
sph_fugue512_close( &ctx.fugue, hash4 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash5, 64 );
sph_fugue512_close( &ctx.fugue, hash5 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash6, 64 );
sph_fugue512_close( &ctx.fugue, hash6 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash7, 64 );
sph_fugue512_close( &ctx.fugue, hash7 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, hash3 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash4, 512 );
fugue512_Final( &ctx.fugue, hash4 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash5, 512 );
fugue512_Final( &ctx.fugue, hash5 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash6, 512 );
fugue512_Final( &ctx.fugue, hash6 );
memcpy( &ctx.fugue, &x14_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash7, 512 );
fugue512_Final( &ctx.fugue, hash7 );
// 14 Shabal, parallel 32 bit
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
@@ -348,7 +348,7 @@ typedef struct {
simd_2way_context simd;
hashState_echo echo;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
shabal512_4way_context shabal;
} x14_4way_ctx_holder;
@@ -368,7 +368,7 @@ void init_x14_4way_ctx()
simd_2way_init( &x14_4way_ctx.simd, 512 );
init_echo( &x14_4way_ctx.echo, 512 );
hamsi512_4way_init( &x14_4way_ctx.hamsi );
sph_fugue512_init( &x14_4way_ctx.fugue );
fugue512_Init( &x14_4way_ctx.fugue, 512 );
shabal512_4way_init( &x14_4way_ctx.shabal );
};
@@ -483,17 +483,17 @@ void x14_4way_hash( void *state, const void *input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 13 Fugue serial
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x14_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x14_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x14_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x14_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x14_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x14_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, hash3 );
// 14 Shabal, parallel 32 bit
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );

View File

@@ -13,7 +13,6 @@
#include "algo/skein/sph_skein.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/sph_shabal.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
@@ -21,9 +20,11 @@
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/groestl/sph_groestl.h"
#include "algo/echo/sph_echo.h"
#include "algo/fugue/sph_fugue.h"
#endif
typedef struct {
@@ -32,9 +33,11 @@ typedef struct {
#if defined(__AES__)
hashState_groestl groestl;
hashState_echo echo;
hashState_fugue fugue;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
sph_fugue512_context fugue;
#endif
sph_jh512_context jh;
sph_keccak512_context keccak;
@@ -44,7 +47,6 @@ typedef struct {
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sph_shabal512_context shabal;
} x14_ctx_holder;
@@ -57,9 +59,11 @@ void init_x14_ctx()
#if defined(__AES__)
init_groestl( &x14_ctx.groestl, 64 );
init_echo( &x14_ctx.echo, 512 );
fugue512_Init( &x14_ctx.fugue, 512 );
#else
sph_groestl512_init( &x14_ctx.groestl );
sph_echo512_init( &x14_ctx.echo );
sph_fugue512_init( &x14_ctx.fugue );
#endif
sph_skein512_init( &x14_ctx.skein );
sph_jh512_init( &x14_ctx.jh );
@@ -69,7 +73,6 @@ void init_x14_ctx()
sph_shavite512_init( &x14_ctx.shavite );
init_sd( &x14_ctx.simd,512 );
sph_hamsi512_init( &x14_ctx.hamsi );
sph_fugue512_init( &x14_ctx.fugue );
sph_shabal512_init( &x14_ctx.shabal );
};
@@ -125,8 +128,13 @@ void x14hash(void *output, const void *input)
sph_hamsi512(&ctx.hamsi, hash, 64);
sph_hamsi512_close(&ctx.hamsi, hash);
#if defined(__AES__)
fugue512_Update( &ctx.fugue, hash, 512 );
fugue512_Final( &ctx.fugue, hash );
#else
sph_fugue512(&ctx.fugue, hash, 64);
sph_fugue512_close(&ctx.fugue, hash);
#endif
sph_shabal512( &ctx.shabal, hash, 64 );
sph_shabal512_close( &ctx.shabal, hash );

View File

@@ -17,7 +17,7 @@
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/echo/sph_echo.h"
#include "algo/hamsi/hamsi-hash-4way.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/fugue/fugue-aesni.h"
#include "algo/shabal/shabal-hash-4way.h"
#include "algo/whirlpool/sph_whirlpool.h"
#if defined(__VAES__)
@@ -38,7 +38,7 @@ typedef struct {
cube_4way_context cube;
simd_4way_context simd;
hamsi512_8way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
shabal512_8way_context shabal;
sph_whirlpool_context whirlpool;
#if defined(__VAES__)
@@ -65,7 +65,7 @@ void init_x15_8way_ctx()
cube_4way_init( &x15_8way_ctx.cube, 512, 16, 32 );
simd_4way_init( &x15_8way_ctx.simd, 512 );
hamsi512_8way_init( &x15_8way_ctx.hamsi );
sph_fugue512_init( &x15_8way_ctx.fugue );
fugue512_Init( &x15_8way_ctx.fugue, 512 );
shabal512_8way_init( &x15_8way_ctx.shabal );
sph_whirlpool_init( &x15_8way_ctx.whirlpool );
#if defined(__VAES__)
@@ -260,30 +260,29 @@ void x15_8way_hash( void *state, const void *input )
vhash );
// 13 Fugue
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash4, 64 );
sph_fugue512_close( &ctx.fugue, hash4 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash5, 64 );
sph_fugue512_close( &ctx.fugue, hash5 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash6, 64 );
sph_fugue512_close( &ctx.fugue, hash6 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash7, 64 );
sph_fugue512_close( &ctx.fugue, hash7 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, hash3 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash4, 512 );
fugue512_Final( &ctx.fugue, hash4 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash5, 512 );
fugue512_Final( &ctx.fugue, hash5 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash6, 512 );
fugue512_Final( &ctx.fugue, hash6 );
memcpy( &ctx.fugue, &x15_8way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash7, 512 );
fugue512_Final( &ctx.fugue, hash7 );
// 14 Shabal, parallel 32 bit
intrlv_8x32_512( vhash, hash0, hash1, hash2, hash3, hash4, hash5, hash6,
@@ -387,7 +386,7 @@ typedef struct {
simd_2way_context simd;
hashState_echo echo;
hamsi512_4way_context hamsi;
sph_fugue512_context fugue;
hashState_fugue fugue;
shabal512_4way_context shabal;
sph_whirlpool_context whirlpool;
} x15_4way_ctx_holder;
@@ -408,7 +407,7 @@ void init_x15_4way_ctx()
simd_2way_init( &x15_4way_ctx.simd, 512 );
init_echo( &x15_4way_ctx.echo, 512 );
hamsi512_4way_init( &x15_4way_ctx.hamsi );
sph_fugue512_init( &x15_4way_ctx.fugue );
fugue512_Init( &x15_4way_ctx.fugue, 512 );
shabal512_4way_init( &x15_4way_ctx.shabal );
sph_whirlpool_init( &x15_4way_ctx.whirlpool );
};
@@ -524,17 +523,17 @@ void x15_4way_hash( void *state, const void *input )
dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 );
// 13 Fugue
sph_fugue512( &ctx.fugue, hash0, 64 );
sph_fugue512_close( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x15_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash1, 64 );
sph_fugue512_close( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x15_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash2, 64 );
sph_fugue512_close( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x15_4way_ctx.fugue, sizeof(sph_fugue512_context) );
sph_fugue512( &ctx.fugue, hash3, 64 );
sph_fugue512_close( &ctx.fugue, hash3 );
fugue512_Update( &ctx.fugue, hash0, 512 );
fugue512_Final( &ctx.fugue, hash0 );
memcpy( &ctx.fugue, &x15_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash1, 512 );
fugue512_Final( &ctx.fugue, hash1 );
memcpy( &ctx.fugue, &x15_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash2, 512 );
fugue512_Final( &ctx.fugue, hash2 );
memcpy( &ctx.fugue, &x15_4way_ctx.fugue, sizeof(hashState_fugue) );
fugue512_Update( &ctx.fugue, hash3, 512 );
fugue512_Final( &ctx.fugue, hash3 );
// 14 Shabal, parallel 32 bit
intrlv_4x32( vhash, hash0, hash1, hash2, hash3, 512 );

View File

@@ -23,9 +23,11 @@
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/groestl/sph_groestl.h"
#include "algo/echo/sph_echo.h"
#include "algo/fugue/sph_fugue.h"
#endif
typedef struct {
@@ -34,9 +36,11 @@ typedef struct {
#if defined(__AES__)
hashState_echo echo;
hashState_groestl groestl;
hashState_fugue fugue;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
sph_fugue512_context fugue;
#endif
sph_jh512_context jh;
sph_keccak512_context keccak;
@@ -46,7 +50,6 @@ typedef struct {
sph_shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sph_shabal512_context shabal;
sph_whirlpool_context whirlpool;
} x15_ctx_holder;
@@ -60,9 +63,11 @@ void init_x15_ctx()
#if defined(__AES__)
init_groestl( &x15_ctx.groestl, 64 );
init_echo( &x15_ctx.echo, 512 );
fugue512_Init( &x15_ctx.fugue, 512 );
#else
sph_groestl512_init( &x15_ctx.groestl );
sph_echo512_init( &x15_ctx.echo );
sph_fugue512_init( &x15_ctx.fugue );
#endif
sph_skein512_init( &x15_ctx.skein );
sph_jh512_init( &x15_ctx.jh );
@@ -72,7 +77,6 @@ void init_x15_ctx()
sph_shavite512_init( &x15_ctx.shavite );
init_sd( &x15_ctx.simd, 512 );
sph_hamsi512_init( &x15_ctx.hamsi );
sph_fugue512_init( &x15_ctx.fugue );
sph_shabal512_init( &x15_ctx.shabal );
sph_whirlpool_init( &x15_ctx.whirlpool );
};
@@ -131,8 +135,13 @@ void x15hash(void *output, const void *input)
sph_hamsi512( &ctx.hamsi, hash, 64 );
sph_hamsi512_close( &ctx.hamsi, hash );
#if defined(__AES__)
fugue512_Update( &ctx.fugue, hash, 512 );
fugue512_Final( &ctx.fugue, hash );
#else
sph_fugue512( &ctx.fugue, hash, 64 );
sph_fugue512_close( &ctx.fugue, hash );
#endif
sph_shabal512( &ctx.shabal, hash, 64 );
sph_shabal512_close( &ctx.shabal, hash );

View File

@@ -6,30 +6,6 @@
*/
#include "x16r-gate.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "algo/blake/sph_blake.h"
#include "algo/bmw/sph_bmw.h"
#include "algo/groestl/sph_groestl.h"
#include "algo/jh/sph_jh.h"
#include "algo/keccak/sph_keccak.h"
#include "algo/skein/sph_skein.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/nist.h"
#include "algo/echo/sph_echo.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/fugue/sph_fugue.h"
#include "algo/shabal/sph_shabal.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include <openssl/sha.h>
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#endif
static void hex_getAlgoString(const uint32_t* prevblock, char *output)
{
char *sptr = output;
@@ -47,34 +23,6 @@ static void hex_getAlgoString(const uint32_t* prevblock, char *output)
*sptr = '\0';
}
/*
union _hex_context_overlay
{
#if defined(__AES__)
hashState_echo echo;
hashState_groestl groestl;
#else
sph_groestl512_context groestl;
sph_echo512_context echo;
#endif
sph_blake512_context blake;
sph_bmw512_context bmw;
sph_skein512_context skein;
sph_jh512_context jh;
sph_keccak512_context keccak;
hashState_luffa luffa;
cubehashParam cube;
shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_fugue512_context fugue;
sph_shabal512_context shabal;
sph_whirlpool_context whirlpool;
SHA512_CTX sha512;
};
typedef union _hex_context_overlay hex_context_overlay;
*/
static __thread x16r_context_overlay hex_ctx;
int hex_hash( void* output, const void* input, int thrid )
@@ -187,8 +135,12 @@ int hex_hash( void* output, const void* input, int thrid )
sph_hamsi512_close( &ctx.hamsi, hash );
break;
case FUGUE:
#if defined(__AES__)
fugue512_full( &ctx.fugue, hash, in, size );
#else
sph_fugue512_full( &ctx.fugue, hash, in, size );
break;
#endif
break;
case SHABAL:
if ( i == 0 )
sph_shabal512( &ctx.shabal, in+64, 16 );
@@ -209,9 +161,9 @@ int hex_hash( void* output, const void* input, int thrid )
sph_whirlpool512_full( &ctx.whirlpool, hash, in, size );
break;
case SHA_512:
SHA512_Init( &ctx.sha512 );
SHA512_Update( &ctx.sha512, in, size );
SHA512_Final( (unsigned char*) hash, &ctx.sha512 );
sph_sha512_init( &ctx.sha512 );
sph_sha512( &ctx.sha512, in, size );
sph_sha512_close( &ctx.sha512, hash );
break;
}

302
algo/x16/minotaur.c Normal file
View File

@@ -0,0 +1,302 @@
// Minotaur hash
#include "algo-gate-api.h"
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "algo/blake/sph_blake.h"
#include "algo/bmw/sph_bmw.h"
#include "algo/jh/sph_jh.h"
#include "algo/keccak/sph_keccak.h"
#include "algo/skein/sph_skein.h"
#include "algo/shavite/sph_shavite.h"
#include "algo/luffa/luffa_for_sse2.h"
#include "algo/cubehash/cubehash_sse2.h"
#include "algo/simd/nist.h"
#include "algo/hamsi/sph_hamsi.h"
#include "algo/shabal/sph_shabal.h"
#include "algo/whirlpool/sph_whirlpool.h"
#include "algo/sha/sph_sha2.h"
#if defined(__AES__)
#include "algo/echo/aes_ni/hash_api.h"
#include "algo/groestl/aes_ni/hash-groestl.h"
#include "algo/fugue/fugue-aesni.h"
#else
#include "algo/echo/sph_echo.h"
#include "algo/groestl/sph_groestl.h"
#include "algo/fugue/sph_fugue.h"
#endif
// Config
#define MINOTAUR_ALGO_COUNT 16
typedef struct TortureNode TortureNode;
typedef struct TortureGarden TortureGarden;
// Graph of hash algos plus SPH contexts
struct TortureGarden
{
#if defined(__AES__)
hashState_echo echo;
hashState_groestl groestl;
hashState_fugue fugue;
#else
sph_echo512_context echo;
sph_groestl512_context groestl;
sph_fugue512_context fugue;
#endif
sph_blake512_context blake;
sph_bmw512_context bmw;
sph_skein512_context skein;
sph_jh512_context jh;
sph_keccak512_context keccak;
hashState_luffa luffa;
cubehashParam cube;
shavite512_context shavite;
hashState_sd simd;
sph_hamsi512_context hamsi;
sph_shabal512_context shabal;
sph_whirlpool_context whirlpool;
sph_sha512_context sha512;
struct TortureNode {
unsigned int algo;
TortureNode *child[2];
} nodes[22];
} __attribute__ ((aligned (64)));
// Get a 64-byte hash for given 64-byte input, using given TortureGarden contexts and given algo index
static void get_hash( void *output, const void *input, TortureGarden *garden,
unsigned int algo )
{
unsigned char hash[64] __attribute__ ((aligned (64)));
switch (algo) {
case 0:
sph_blake512_init(&garden->blake);
sph_blake512(&garden->blake, input, 64);
sph_blake512_close(&garden->blake, hash);
break;
case 1:
sph_bmw512_init(&garden->bmw);
sph_bmw512(&garden->bmw, input, 64);
sph_bmw512_close(&garden->bmw, hash);
break;
case 2:
cubehashInit( &garden->cube, 512, 16, 32 );
cubehashUpdateDigest( &garden->cube, (byte*)hash,
(const byte*)input, 64 );
break;
case 3:
#if defined(__AES__)
echo_full( &garden->echo, (BitSequence *)hash, 512,
(const BitSequence *)input, 64 );
#else
sph_echo512_init(&garden->echo);
sph_echo512(&garden->echo, input, 64);
sph_echo512_close(&garden->echo, hash);
#endif
break;
case 4:
#if defined(__AES__)
fugue512_full( &garden->fugue, hash, input, 64 );
#else
sph_fugue512_full( &garden->fugue, hash, input, 64 );
#endif
break;
case 5:
#if defined(__AES__)
groestl512_full( &garden->groestl, (char*)hash, (char*)input, 512 );
#else
sph_groestl512_init(&garden->groestl);
sph_groestl512(&garden->groestl, input, 64);
sph_groestl512_close(&garden->groestl, hash);
#endif
break;
case 6:
sph_hamsi512_init(&garden->hamsi);
sph_hamsi512(&garden->hamsi, input, 64);
sph_hamsi512_close(&garden->hamsi, hash);
break;
case 7:
sph_sha512_init( &garden->sha512 );
sph_sha512( &garden->sha512, input, 64 );
sph_sha512_close( &garden->sha512, hash );
break;
case 8:
sph_jh512_init(&garden->jh);
sph_jh512(&garden->jh, input, 64);
sph_jh512_close(&garden->jh, hash);
break;
case 9:
sph_keccak512_init(&garden->keccak);
sph_keccak512(&garden->keccak, input, 64);
sph_keccak512_close(&garden->keccak, hash);
break;
case 10:
init_luffa( &garden->luffa, 512 );
update_and_final_luffa( &garden->luffa, (BitSequence*)hash,
(const BitSequence*)input, 64 );
break;
case 11:
sph_shabal512_init(&garden->shabal);
sph_shabal512(&garden->shabal, input, 64);
sph_shabal512_close(&garden->shabal, hash);
break;
case 12:
sph_shavite512_init(&garden->shavite);
sph_shavite512(&garden->shavite, input, 64);
sph_shavite512_close(&garden->shavite, hash);
break;
case 13:
init_sd( &garden->simd, 512 );
update_final_sd( &garden->simd, (BitSequence *)hash,
(const BitSequence*)input, 512 );
break;
case 14:
sph_skein512_init(&garden->skein);
sph_skein512(&garden->skein, input, 64);
sph_skein512_close(&garden->skein, hash);
break;
case 15:
sph_whirlpool_init(&garden->whirlpool);
sph_whirlpool(&garden->whirlpool, input, 64);
sph_whirlpool_close(&garden->whirlpool, hash);
break;
}
memcpy(output, hash, 64);
}
static __thread TortureGarden garden;
bool initialize_torture_garden()
{
// Create torture garden nodes. Note that both sides of 19 and 20 lead to 21, and 21 has no children (to make traversal complete).
garden.nodes[ 0].child[0] = &garden.nodes[ 1];
garden.nodes[ 0].child[1] = &garden.nodes[ 2];
garden.nodes[ 1].child[0] = &garden.nodes[ 3];
garden.nodes[ 1].child[1] = &garden.nodes[ 4];
garden.nodes[ 2].child[0] = &garden.nodes[ 5];
garden.nodes[ 2].child[1] = &garden.nodes[ 6];
garden.nodes[ 3].child[0] = &garden.nodes[ 7];
garden.nodes[ 3].child[1] = &garden.nodes[ 8];
garden.nodes[ 4].child[0] = &garden.nodes[ 9];
garden.nodes[ 4].child[1] = &garden.nodes[10];
garden.nodes[ 5].child[0] = &garden.nodes[11];
garden.nodes[ 5].child[1] = &garden.nodes[12];
garden.nodes[ 6].child[0] = &garden.nodes[13];
garden.nodes[ 6].child[1] = &garden.nodes[14];
garden.nodes[ 7].child[0] = &garden.nodes[15];
garden.nodes[ 7].child[1] = &garden.nodes[16];
garden.nodes[ 8].child[0] = &garden.nodes[15];
garden.nodes[ 8].child[1] = &garden.nodes[16];
garden.nodes[ 9].child[0] = &garden.nodes[15];
garden.nodes[ 9].child[1] = &garden.nodes[16];
garden.nodes[10].child[0] = &garden.nodes[15];
garden.nodes[10].child[1] = &garden.nodes[16];
garden.nodes[11].child[0] = &garden.nodes[17];
garden.nodes[11].child[1] = &garden.nodes[18];
garden.nodes[12].child[0] = &garden.nodes[17];
garden.nodes[12].child[1] = &garden.nodes[18];
garden.nodes[13].child[0] = &garden.nodes[17];
garden.nodes[13].child[1] = &garden.nodes[18];
garden.nodes[14].child[0] = &garden.nodes[17];
garden.nodes[14].child[1] = &garden.nodes[18];
garden.nodes[15].child[0] = &garden.nodes[19];
garden.nodes[15].child[1] = &garden.nodes[20];
garden.nodes[16].child[0] = &garden.nodes[19];
garden.nodes[16].child[1] = &garden.nodes[20];
garden.nodes[17].child[0] = &garden.nodes[19];
garden.nodes[17].child[1] = &garden.nodes[20];
garden.nodes[18].child[0] = &garden.nodes[19];
garden.nodes[18].child[1] = &garden.nodes[20];
garden.nodes[19].child[0] = &garden.nodes[21];
garden.nodes[19].child[1] = &garden.nodes[21];
garden.nodes[20].child[0] = &garden.nodes[21];
garden.nodes[20].child[1] = &garden.nodes[21];
garden.nodes[21].child[0] = NULL;
garden.nodes[21].child[1] = NULL;
return true;
}
// Produce a 32-byte hash from 80-byte input data
int minotaur_hash( void *output, const void *input, int thr_id )
{
unsigned char hash[64] __attribute__ ((aligned (64)));
// Find initial sha512 hash
sph_sha512_init( &garden.sha512 );
sph_sha512( &garden.sha512, input, 80 );
sph_sha512_close( &garden.sha512, hash );
// algo 6 (Hamsi) is very slow. It's faster to skip hashing this nonce
// if Hamsi is needed but only the first and last functions are
// currently known. Abort if either is Hamsi.
if ( ( ( hash[ 0] % MINOTAUR_ALGO_COUNT ) == 6 )
|| ( ( hash[21] % MINOTAUR_ALGO_COUNT ) == 6 ) )
return 0;
// Assign algos to torture garden nodes based on initial hash
for ( int i = 0; i < 22; i++ )
garden.nodes[i].algo = hash[i] % MINOTAUR_ALGO_COUNT;
// Send the initial hash through the torture garden
TortureNode *node = &garden.nodes[0];
while ( node )
{
get_hash( hash, hash, &garden, node->algo );
node = node->child[ hash[63] & 1 ];
}
memcpy( output, hash, 32 );
return 1;
}
int scanhash_minotaur( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 1;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
uint64_t skipped = 0;
mm128_bswap32_80( edata, pdata );
do
{
edata[19] = n;
if ( likely( algo_gate.hash( hash, edata, thr_id ) ) )
{
if ( unlikely( valid_hash( hash, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash, mythr );
}
}
else skipped++;
n++;
} while ( n < last_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce - skipped;
pdata[19] = n;
return 0;
}
bool register_minotaur_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_minotaur;
gate->hash = (void*)&minotaur_hash;
gate->optimizations = SSE2_OPT | AES_OPT | AVX2_OPT | AVX512_OPT;
gate->miner_thread_init = (void*)&initialize_torture_garden;
return true;
};

View File

@@ -16,8 +16,7 @@
#if defined (X16R_8WAY)
// Perform midstate prehash of hash functions with block size <= 64 bytes
// and interleave 4x64 before nonce insertion for final hash.
// Perform midstate prehash of hash functions with block size <= 72 bytes.
void x16r_8way_prehash( void *vdata, void *pdata )
{
@@ -34,6 +33,11 @@ void x16r_8way_prehash( void *vdata, void *pdata )
jh512_8way_init( &x16r_ctx.jh );
jh512_8way_update( &x16r_ctx.jh, vdata, 64 );
break;
case KECCAK:
mm512_bswap32_intrlv80_8x64( vdata, pdata );
keccak512_8way_init( &x16r_ctx.keccak );
keccak512_8way_update( &x16r_ctx.keccak, vdata, 72 );
break;
case SKEIN:
mm512_bswap32_intrlv80_8x64( vdata, pdata );
skein512_8way_init( &x16r_ctx.skein );
@@ -173,13 +177,13 @@ int x16r_8way_hash_generic( void* output, const void* input, int thrid )
hash7, vhash );
break;
case KECCAK:
keccak512_8way_init( &ctx.keccak );
if ( i == 0 )
keccak512_8way_update( &ctx.keccak, input, size );
if ( i == 0 )
keccak512_8way_update( &ctx.keccak, input + (72<<3), 8 );
else
{
intrlv_8x64( vhash, in0, in1, in2, in3, in4, in5, in6, in7,
size<<3 );
keccak512_8way_init( &ctx.keccak );
keccak512_8way_update( &ctx.keccak, vhash, size );
}
keccak512_8way_close( &ctx.keccak, vhash );
@@ -347,14 +351,14 @@ int x16r_8way_hash_generic( void* output, const void* input, int thrid )
hash7, vhash );
break;
case FUGUE:
sph_fugue512_full( &ctx.fugue, hash0, in0, size );
sph_fugue512_full( &ctx.fugue, hash1, in1, size );
sph_fugue512_full( &ctx.fugue, hash2, in2, size );
sph_fugue512_full( &ctx.fugue, hash3, in3, size );
sph_fugue512_full( &ctx.fugue, hash4, in4, size );
sph_fugue512_full( &ctx.fugue, hash5, in5, size );
sph_fugue512_full( &ctx.fugue, hash6, in6, size );
sph_fugue512_full( &ctx.fugue, hash7, in7, size );
fugue512_full( &ctx.fugue, hash0, in0, size );
fugue512_full( &ctx.fugue, hash1, in1, size );
fugue512_full( &ctx.fugue, hash2, in2, size );
fugue512_full( &ctx.fugue, hash3, in3, size );
fugue512_full( &ctx.fugue, hash4, in4, size );
fugue512_full( &ctx.fugue, hash5, in5, size );
fugue512_full( &ctx.fugue, hash6, in6, size );
fugue512_full( &ctx.fugue, hash7, in7, size );
break;
case SHABAL:
intrlv_8x32( vhash, in0, in1, in2, in3, in4, in5, in6, in7,
@@ -490,6 +494,7 @@ int scanhash_x16r_8way( struct work *work, uint32_t max_nonce,
{
x16_r_s_getAlgoString( (const uint8_t*)bedata1, x16r_hash_order );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_INFO, "hash order %s (%08x)", x16r_hash_order, ntime );
}
@@ -533,6 +538,11 @@ void x16r_4way_prehash( void *vdata, void *pdata )
jh512_4way_init( &x16r_ctx.jh );
jh512_4way_update( &x16r_ctx.jh, vdata, 64 );
break;
case KECCAK:
mm256_bswap32_intrlv80_4x64( vdata, pdata );
keccak512_4way_init( &x16r_ctx.keccak );
keccak512_4way_update( &x16r_ctx.keccak, vdata, 72 );
break;
case SKEIN:
mm256_bswap32_intrlv80_4x64( vdata, pdata );
skein512_4way_prehash64( &x16r_ctx.skein, vdata );
@@ -619,11 +629,20 @@ int x16r_4way_hash_generic( void* output, const void* input, int thrid )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case GROESTL:
#if defined(__VAES__)
intrlv_2x128( vhash, in0, in1, size<<3 );
groestl512_2way_full( &ctx.groestl, vhash, vhash, size );
dintrlv_2x128_512( hash0, hash1, vhash );
intrlv_2x128( vhash, in2, in3, size<<3 );
groestl512_2way_full( &ctx.groestl, vhash, vhash, size );
dintrlv_2x128_512( hash2, hash3, vhash );
#else
groestl512_full( &ctx.groestl, (char*)hash0, (char*)in0, size<<3 );
groestl512_full( &ctx.groestl, (char*)hash1, (char*)in1, size<<3 );
groestl512_full( &ctx.groestl, (char*)hash2, (char*)in2, size<<3 );
groestl512_full( &ctx.groestl, (char*)hash3, (char*)in3, size<<3 );
break;
#endif
break;
case JH:
if ( i == 0 )
jh512_4way_update( &ctx.jh, input + (64<<2), 16 );
@@ -637,12 +656,12 @@ int x16r_4way_hash_generic( void* output, const void* input, int thrid )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case KECCAK:
keccak512_4way_init( &ctx.keccak );
if ( i == 0 )
keccak512_4way_update( &ctx.keccak, input, size );
if ( i == 0 )
keccak512_4way_update( &ctx.keccak, input + (72<<2), 8 );
else
{
intrlv_4x64( vhash, in0, in1, in2, in3, size<<3 );
keccak512_4way_init( &ctx.keccak );
keccak512_4way_update( &ctx.keccak, vhash, size );
}
keccak512_4way_close( &ctx.keccak, vhash );
@@ -711,11 +730,20 @@ int x16r_4way_hash_generic( void* output, const void* input, int thrid )
}
break;
case SHAVITE:
#if defined(__VAES__)
intrlv_2x128( vhash, in0, in1, size<<3 );
shavite512_2way_full( &ctx.shavite, vhash, vhash, size );
dintrlv_2x128_512( hash0, hash1, vhash );
intrlv_2x128( vhash, in2, in3, size<<3 );
shavite512_2way_full( &ctx.shavite, vhash, vhash, size );
dintrlv_2x128_512( hash2, hash3, vhash );
#else
shavite512_full( &ctx.shavite, hash0, in0, size );
shavite512_full( &ctx.shavite, hash1, in1, size );
shavite512_full( &ctx.shavite, hash2, in2, size );
shavite512_full( &ctx.shavite, hash3, in3, size );
break;
#endif
break;
case SIMD:
intrlv_2x128( vhash, in0, in1, size<<3 );
simd512_2way_full( &ctx.simd, vhash, vhash, size );
@@ -725,6 +753,14 @@ int x16r_4way_hash_generic( void* output, const void* input, int thrid )
dintrlv_2x128_512( hash2, hash3, vhash );
break;
case ECHO:
#if defined(__VAES__)
intrlv_2x128( vhash, in0, in1, size<<3 );
echo_2way_full( &ctx.echo, vhash, 512, vhash, size );
dintrlv_2x128_512( hash0, hash1, vhash );
intrlv_2x128( vhash, in2, in3, size<<3 );
echo_2way_full( &ctx.echo, vhash, 512, vhash, size );
dintrlv_2x128_512( hash2, hash3, vhash );
#else
echo_full( &ctx.echo, (BitSequence *)hash0, 512,
(const BitSequence *)in0, size );
echo_full( &ctx.echo, (BitSequence *)hash1, 512,
@@ -733,7 +769,8 @@ int x16r_4way_hash_generic( void* output, const void* input, int thrid )
(const BitSequence *)in2, size );
echo_full( &ctx.echo, (BitSequence *)hash3, 512,
(const BitSequence *)in3, size );
break;
#endif
break;
case HAMSI:
if ( i == 0 )
hamsi512_4way_update( &ctx.hamsi, input + (64<<2), 16 );
@@ -747,10 +784,10 @@ int x16r_4way_hash_generic( void* output, const void* input, int thrid )
dintrlv_4x64_512( hash0, hash1, hash2, hash3, vhash );
break;
case FUGUE:
sph_fugue512_full( &ctx.fugue, hash0, in0, size );
sph_fugue512_full( &ctx.fugue, hash1, in1, size );
sph_fugue512_full( &ctx.fugue, hash2, in2, size );
sph_fugue512_full( &ctx.fugue, hash3, in3, size );
fugue512_full( &ctx.fugue, hash0, in0, size );
fugue512_full( &ctx.fugue, hash1, in1, size );
fugue512_full( &ctx.fugue, hash2, in2, size );
fugue512_full( &ctx.fugue, hash3, in3, size );
break;
case SHABAL:
intrlv_4x32( vhash, in0, in1, in2, in3, size<<3 );
@@ -856,7 +893,7 @@ int scanhash_x16r_4way( struct work *work, uint32_t max_nonce,
x16_r_s_getAlgoString( (const uint8_t*)bedata1, x16r_hash_order );
s_ntime = ntime;
if ( opt_debug && !thr_id )
applog( LOG_INFO, "hash order %s (%08x)", x16r_hash_order, ntime );
applog( LOG_INFO, "hash order %s (%08x)", x16r_hash_order, ntime );
}
x16r_4way_prehash( vdata, pdata );

Some files were not shown because too many files have changed in this diff Show More