Compare commits

...

114 Commits

Author SHA1 Message Date
Jay D Dee
47e24b50e8 v24.4 2024-07-01 00:33:19 -04:00
Jay D Dee
c47c4a8885 v24.3 2024-05-28 18:20:19 -04:00
Jay D Dee
042d13d1e1 v24.2 2024-05-20 23:08:50 -04:00
Jay D Dee
4f930574cc v24.1 2024-04-16 21:31:35 -04:00
Jay D Dee
9d3a46c355 v23.15 2023-11-30 14:36:47 -05:00
Jay D Dee
4e3f1b926f v23.14 2023-11-28 00:58:43 -05:00
Jay D Dee
045b42babf v23.13 2023-11-21 14:18:15 -05:00
Jay D Dee
fc696dbbe5 v23.12 2023-11-20 11:51:57 -05:00
Jay D Dee
f3fde95f27 v23.10 2023-11-15 11:05:41 -05:00
Jay D Dee
0a78013cbe v23.9 2023-11-12 18:48:50 -05:00
Jay D Dee
26b9429589 v23.8 2023-11-11 16:48:57 -05:00
Jay D Dee
e043698442 v23.7 2023-11-07 04:59:44 -05:00
Jay D Dee
46dca7a493 v23.6 2023-10-28 16:22:14 -04:00
Jay D Dee
160608cce5 v23.5 2023-10-25 20:36:20 -04:00
Jay D Dee
31c4dedf59 v3.23.4 2023-10-06 22:18:09 -04:00
Jay D Dee
bc5a5c6df8 v3.23.3 2023-09-28 18:43:18 -04:00
Jay D Dee
be88afc349 v3.23.2 2023-09-21 12:34:06 -04:00
Jay D Dee
d6b5750362 v3.23.1 2023-09-13 11:48:52 -04:00
Jay D Dee
4378d2f841 v3.23.0 2023-08-30 20:15:48 -04:00
Jay D Dee
57a6b7b58b v3.22.3 2023-06-14 11:07:40 -04:00
Jay D Dee
de564ccbde v3.22.2 2023-04-06 13:38:37 -04:00
Jay D Dee
fcd7727b0d v3.22.1 2023-03-24 18:29:42 -04:00
Jay D Dee
3dd6787531 v3.22.0 2023-03-21 17:12:51 -04:00
Jay D Dee
cae1ce2ab7 v3.21.5 2023-03-15 12:27:04 -04:00
Jay D Dee
7a91c41d74 v3.21.4 2023-03-13 14:54:38 -04:00
Jay D Dee
c6bc9d67fb v3.21.3 Unreleased 2023-03-13 03:20:13 -04:00
Jay D Dee
b339450898 v3.21.3 2023-03-11 14:54:49 -05:00
Jay D Dee
fb93160641 v3.21.2 2023-03-03 12:38:31 -05:00
Jay D Dee
520d4d5384 v3.21.1 2023-02-08 22:11:05 -05:00
Jay D Dee
da7030faa8 v3.21.0 2022-12-21 13:09:14 -05:00
Jay D Dee
bd84f199fe v3.20.3 2022-10-21 23:12:18 -04:00
Jay D Dee
58030e2788 v3.20.2 2022-08-01 20:21:05 -04:00
Jay D Dee
1321ac474c v3.20.1 2022-07-26 18:36:40 -04:00
Jay D Dee
40d07c0097 v3.20.0 2022-07-17 13:30:50 -04:00
Jay D Dee
f552f2b1e8 v3.19.9 2022-07-10 11:04:00 -04:00
Jay D Dee
26b8927632 v3.19.8 2022-05-27 18:12:30 -04:00
Jay D Dee
db76d3865f v3.19.7 2022-04-02 12:44:57 -04:00
Jay D Dee
5b678d2481 v3.19.6 2022-02-21 23:14:24 -05:00
Jay D Dee
90137b391e v3.19.5 2022-01-30 20:59:54 -05:00
Jay D Dee
8727d79182 v3.19.4 2022-01-12 21:08:25 -05:00
Jay D Dee
17ccbc328f v3.19.3 2022-01-07 12:07:38 -05:00
Jay D Dee
0e3945ddb5 v3.19.2 2021-12-30 16:28:24 -05:00
Jay D Dee
7d2ef7973d v3.19.1 2021-11-20 00:46:01 -05:00
Jay D Dee
e6fd9b1d69 v3.19.0 2021-11-10 21:33:44 -05:00
Jay D Dee
1a234cbe53 v3.18.2 2021-10-19 22:35:36 -04:00
Jay D Dee
47cc5dcff5 v3.18.1 2021-10-10 22:50:19 -04:00
Jay D Dee
2cd1507c2e v3.7.4 2021-09-29 17:31:16 -04:00
Jay D Dee
9b905fccc8 v3.17.1 2021-07-26 15:01:37 -04:00
Jay D Dee
92b3733925 v3.17.0 2021-07-15 20:30:44 -04:00
Jay D Dee
19cc88d102 v3.16.5 2021-06-26 12:27:44 -04:00
Jay D Dee
a053690170 v3.16.4 2021-06-23 21:52:42 -04:00
Jay D Dee
3c5e8921b7 v3.16.3 2021-05-06 14:55:03 -04:00
Jay D Dee
f3333b0070 v3.16.2 2021-04-08 18:09:31 -04:00
Jay D Dee
902ec046dd v3.16.1 2021-03-24 18:24:20 -04:00
Jay D Dee
d0b4941321 v3.16.0 2021-03-19 15:45:32 -04:00
Jay D Dee
40089428c5 v3.15.7 2021-03-08 22:44:44 -05:00
Jay D Dee
dc6b007a18 v3.15.6 2021-02-12 15:16:53 -05:00
Jay D Dee
06bfaa1249 v3.15.5 2020-12-21 13:25:33 -05:00
Jay D Dee
6566e99a13 v3.15.4 2020-12-15 13:15:02 -05:00
Jay D Dee
ccfccbadd5 v3.15.3 2020-12-10 18:23:49 -05:00
Jay D Dee
45ecd0de14 v3.15.2 2020-11-15 17:57:06 -05:00
Jay D Dee
4fa8fcea8b v3.15.1 2020-11-09 13:19:05 -05:00
Jay D Dee
c85fb3842b v3.15.0 2020-10-02 10:48:37 -04:00
Jay D Dee
cdd587537e v3.14.3 2020-06-18 17:30:26 -04:00
Jay D Dee
51a1d91abd v3.14.2 2020-05-30 21:20:44 -04:00
Jay D Dee
13563e2598 v3.14.1 2020-05-21 13:00:29 -04:00
Jay D Dee
9571f85d53 v3.14.0 2020-05-20 13:56:35 -04:00
Jay D Dee
0e69756634 v3.13.2-segwit-test 2020-05-18 18:17:27 -04:00
Jay D Dee
9653bca1e2 v3.13.1.1 2020-05-17 19:21:37 -04:00
Jay D Dee
1c0719e8a4 v3.13.1 2020-05-10 21:34:55 -04:00
Jay D Dee
8b4b4dc613 v3.13.0.1 2020-05-07 17:57:04 -04:00
Jay D Dee
e76feaced8 v3.13.0 2020-05-06 00:53:43 -04:00
Jay D Dee
5e088d00d0 v3.12.8.2 2020-04-24 21:18:56 -04:00
Jay D Dee
972d4d70db v3.12.8.1 2020-04-17 16:12:45 -04:00
Jay D Dee
e96a6bd699 v3.12.8 2020-04-09 12:56:18 -04:00
Jay D Dee
fb9163185a v3.12.7 2020-03-20 16:30:12 -04:00
Jay D Dee
6e8b8ed34f v3.12.6.1 2020-03-07 14:11:06 -05:00
Jay D Dee
c0aadbcc99 v3.12.6 2020-03-05 18:43:20 -05:00
Jay D Dee
3da149418a v3.12.5 2020-03-01 13:18:17 -05:00
Jay D Dee
720610cce5 v3.12.4.6 2020-02-28 18:20:32 -05:00
Jay D Dee
cedcf4d070 v3.12.4.5 2020-02-28 02:42:22 -05:00
Jay D Dee
81b50c3c71 v3.12.4.4 2020-02-25 14:07:32 -05:00
Jay D Dee
0e1e88f53e v3.12.4.3 2020-02-24 21:35:19 -05:00
Jay D Dee
45c77a5c81 v3.12.4.2 2020-02-23 15:31:06 -05:00
Jay D Dee
dbce7e0721 v3.12.4.1 2020-02-22 18:06:39 -05:00
Jay D Dee
6d66051de6 v3.12.4 2020-02-21 16:34:53 -05:00
Jay D Dee
b93be8816a v3.12.3.1 2020-02-18 12:05:47 -05:00
Jay D Dee
19b0ac6d5c v3.12.3 2020-02-13 04:25:33 -05:00
Jay D Dee
3da2b958cf v3.12.2 2020-02-09 13:30:40 -05:00
Jay D Dee
dc2f8d81d3 v3.12.1 2020-02-07 20:18:20 -05:00
Jay D Dee
fc97ef174a v3.12.0.1 2020-02-06 22:50:20 -05:00
Jay D Dee
13523a12f9 v3.12.0 2020-02-05 22:50:58 -05:00
Jay D Dee
1b76cee239 v3.11.9 2020-02-04 01:31:59 -05:00
Jay D Dee
0681ca996d v3.11.8 2020-01-30 03:47:11 -05:00
Jay D Dee
88f81fda0b v3.11.7 2020-01-26 04:33:39 -05:00
Jay D Dee
103e6ad36c v3.11.6 2020-01-23 00:11:08 -05:00
Jay D Dee
1a7a573675 v3.11.5 2020-01-18 15:14:27 -05:00
Jay D Dee
70089d1224 v3.11.2 2020-01-08 14:44:47 -05:00
Jay D Dee
3572cb53c4 v3.11.0 2020-01-02 23:54:08 -05:00
Jay D Dee
241bc26767 v3.10.6 2019-12-25 01:26:26 -05:00
Jay D Dee
c65b0ff7a6 v3.10.5 2019-12-21 13:19:29 -05:00
Jay D Dee
a17ff6f189 v3.10.2 2019-12-09 15:59:02 -05:00
Jay D Dee
73430b13b1 v3.10.1 2019-12-05 19:09:23 -05:00
Jay D Dee
40039386a0 v3.10.0 2019-12-03 12:26:11 -05:00
Jay D Dee
91ec6f1771 v3.9.11 2019-11-26 09:22:03 -05:00
Jay D Dee
a52c5eccf7 v3.9.10 2019-11-22 20:29:18 -05:00
Jay D Dee
86b889e1b0 v3.9.9.1 2019-10-24 14:11:26 -04:00
Jay D Dee
72330eb5a7 v3.9.9 2019-10-10 19:58:34 -04:00
Jay D Dee
789c8b70bc v3.9.8.1 2019-10-01 14:17:36 -04:00
Jay D Dee
01550d94a2 v3.9.8 2019-09-26 22:37:26 -04:00
Jay D Dee
a042fb7612 v3.9.7 2019-08-03 10:39:54 -04:00
Jay D Dee
9d49e0be7a v3.9.6.2 2019-07-30 10:16:43 -04:00
Jay D Dee
a51f59086b v3.9.6.1 2019-07-18 19:46:57 -04:00
Jay D Dee
6f49ba09b7 v3.9.6 2019-07-17 17:54:38 -04:00
608 changed files with 101349 additions and 91164 deletions

View File

@@ -33,3 +33,6 @@ Jay D Dee
xcouiz@gmail.com
Cryply
Colin Percival
Alexander Peslyak

View File

@@ -1,12 +1,16 @@
These instructions may be out of date, see the Wiki for the latest...
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
Requirements:
1. Requirements:
---------------
Intel Core2 or newer, or AMD Steamroller or newer CPU. ARM CPUs are not
supported.
64 bit Linux operating system. Apple is not supported.
Building on linux prerequisites:
2. Building on linux prerequisites:
-----------------------------------
It is assumed users know how to install packages on their system and
be able to compile standard source packages. This is basic Linux and
@@ -20,49 +24,86 @@ http://askubuntu.com/questions/457526/how-to-install-cpuminer-in-ubuntu
Install any additional dependencies needed by cpuminer-opt. The list below
are some of the ones that may not be in the default install and need to
be installed manually. There may be others, read the error messages they
will give a clue as to the missing package.
be installed manually. There may be others, read the compiler error messages,
they will give a clue as to the missing package.
The following command should install everything you need on Debian based
distributions such as Ubuntu:
distributions such as Ubuntu. Fedora and other distributions may have similar
but different package names.
sudo apt-get install build-essential libssl-dev libcurl4-openssl-dev libjansson-dev libgmp-dev automake zlib1g-dev
build-essential (Development Tools package group on Fedora)
automake
libjansson-dev
libgmp-dev
libcurl4-openssl-dev
libssl-dev
lib-thread
zlib1g-dev
$ sudo apt-get install build-essential automake libssl-dev libcurl4-openssl-dev libjansson-dev libgmp-dev zlib1g-dev git
SHA support on AMD Ryzen CPUs requires gcc version 5 or higher and
openssl 1.1.0e or higher. Add one of the following, depending on the
compiler version, to CFLAGS:
"-march=native" or "-march=znver1" or "-msha".
openssl 1.1.0e or higher.
znver1 and znver2 should be recognized on most recent version of GCC and
znver3 is available with GCC 11. GCC 11 also includes rocketlake support.
In the meantime here are some suggestions to compile with new CPUs:
"-march=native" is usually the best choice, used by build.sh.
"-march=znver2 -mvaes" can be used for Ryzen 5000 if znver3 is not recongized.
"-mcascadelake -msha" or
"-mcometlake -mavx512 -msha" can be used for Rocket Lake.
Features can also be added individually:
"-msha" adds support for HW accelerated sha256.
"-mavx512" adds support for 512 bit vectors
"-mvaes" add support for parallel AES
Additional instructions for static compilalation can be found here:
https://lxadm.com/Static_compilation_of_cpuminer
Static builds should only considered in a homogeneous HW and SW environment.
Local builds will always have the best performance and compatibility.
Extract cpuminer source.
3. Download cpuminer-opt
------------------------
tar xvzf cpuminer-opt-x.y.z.tar.gz
cd cpuminer-opt-x.y.z
Download the source code for the latest realease from the official repository.
Run ./build.sh to build on Linux or execute the following commands.
https://github.com/JayDDee/cpuminer-opt/releases
./autogen.sh
CFLAGS="-O3 -march=native -Wall" ./configure --with-curl
make
Extract the source code.
Start mining.
$ tar xvzf cpuminer-opt-x.y.z.tar.gz
Alternatively it can be cloned from git.
$ git clone https://github.com/JayDDee/cpuminer-opt.git
4. Build cpuminer-opt
---------------------
It is recomended to Build with default options, this will usuallly
produce the best results.
$ ./build.sh to build on Linux or execute the following commands.
or
$ ./autogen.sh
$ CFLAGS="-O3 -march=native -Wall" ./configure --with-curl
$ make -j n
n is the number of threads.
5. Start mining.
----------------
$ ./cpuminer -a algo -o url -u username -p password
./cpuminer -a algo -o url -u username -p password
Windows
-------
See also INSTAL_WINDOWS
The following procedure is obsolete and uses an old compiler.
Precompiled Windows binaries are built on a Linux host using Mingw
with a more recent compiler than the following Windows hosted procedure.

View File

@@ -1,173 +1,4 @@
Instructions for compiling cpuminer-opt for Windows.
Please consult the wiki for Windows compile instructions.
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
Windows compilation using Visual Studio is not supported. Mingw64 is
used on a Linux system (bare metal or virtual machine) to cross-compile
cpuminer-opt executable binaries for Windows.
These instructions were written for Debian and Ubuntu compatible distributions
but should work on other major distributions as well. However some of the
package names or file paths may be different.
It is assumed a Linux system is already available and running. And the user
has enough Linux knowledge to find and install packages and follow these
instructions.
First it is a good idea to create new user specifically for cross compiling.
It keeps all mingw stuff contained and isolated from the rest of the system.
Step by step...
1. Install necessary packages from the distribution's repositories.
Refer to Linux compile instructions and install required packages.
Additionally, install mingw-64.
sudo apt-get install mingw-w64
2. Create a local library directory for packages to be compiled in the next
step. Recommended location is $HOME/usr/lib/
3. Download and build other packages for mingw that don't have a mingw64
version available in the repositories.
Download the following source code packages from their respective and
respected download locations, copy them to ~/usr/lib/ and uncompress them.
openssl
curl
gmp
In most cases the latest vesrion is ok but it's safest to download
the same major and minor version as included in your distribution.
Run the following commands or follow the supplied instructions.
Do not run "make install" unless you are using ~/usr/lib, which isn't
recommended.
Some instructions insist on running "make check". If make check fails
it may still work, YMMV.
You can speed up "make" by using all CPU cores available with "-j n" where
n is the number of CPU threads you want to use.
openssl:
./Configure mingw64 shared --cross-compile-prefix=x86_64-w64-mingw32
make
curl:
./configure --with-winssl --with-winidn --host=x86_64-w64-mingw32
make
gmp:
./configure --host=x86_64-w64-mingw32
make
4. Tweak the environment.
This step is required everytime you login or the commands can be added to
.bashrc.
Define some local variables to point to local library.
export LOCAL_LIB="$HOME/usr/lib"
export LDFLAGS="-L$LOCAL_LIB/curl/lib/.libs -L$LOCAL_LIB/gmp/.libs -L$LOCAL_LIB/openssl"
export CONFIGURE_ARGS="--with-curl=$LOCAL_LIB/curl --with-crypto=$LOCAL_LIB/openssl --host=x86_64-w64-mingw32"
Create a release directory and copy some dll files previously built.
This can be done outside of cpuminer-opt and only needs to be done once.
If the release directory is in cpuminer-opt directory it needs to be
recreated every a source package is decompressed.
mkdir release
cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libgcc_s_seh-1.dll release/
cp $LOCAL_LIB/openssl/libcrypto-1_1-x64.dll release/
cp $LOCAL_LIB/curl/lib/.libs/libcurl-4.dll release/
The following steps need to be done every time a new source package is
opened.
5. Download cpuminer-opt
Download the latest source code package of cpumuner-opt to your desired
location. .zip or .tar.gz, your choice.
https://github.com/JayDDee/cpuminer-opt/releases
Decompress and change to the cpuminer-opt directory.
6. Prepare to compile
Create a link to the locally compiled version of gmp.h
ln -s $LOCAL_LIB/gmp-version/gmp.h ./gmp.h
Edit configure.ac to fix lipthread package name.
sed -i 's/"-lpthread"/"-lpthreadGC2"/g' configure.ac
7. Compile
you can use the default compile if you intend to use cpuminer-opt on the
same CPU and the virtual machine supports that architecture.
./build.sh
Otherwise you can compile manually while setting options in CFLAGS.
Some common options:
To compile for a specific CPU architecture:
CFLAGS="-O3 -march=znver1 -Wall" ./configure --with-curl
This will compile for AMD Ryzen.
You can compile more generically for a set of specific CPU features
if you know what features you want:
CFLAGS="-O3 -maes -msse4.2 -Wall" ./configure --with-curl
This will compile for an older CPU that does not have AVX.
You can find several examples in build-allarch.sh
If you have a CPU with more than 64 threads and Windows 7 or higher you
can enable the CPU Groups feature:
-D_WIN32_WINNT==0x0601
Once you have run configure successfully run make with n CPU threads:
make -j n
Copy cpuminer.exe to the release directory, compress and copy the release
directory to a Windows system and run cpuminer.exe from the command line.
Run cpuminer
In a command windows change directories to the unzipped release folder.
to get a list of all options:
cpuminer.exe --help
Command options are specific to where you mine. Refer to the pool's
instructions on how to set them.

View File

@@ -16,54 +16,36 @@ bin_PROGRAMS = cpuminer
dist_man_MANS = cpuminer.1
cpuminer_SOURCES = \
dummy.cpp \
cpu-miner.c \
util.c \
uint256.cpp \
api.c \
sysinfos.c \
algo-gate-api.c\
crypto/oaes_lib.c \
crypto/c_keccak.c \
crypto/c_groestl.c \
crypto/c_blake256.c \
crypto/c_jh.c \
crypto/c_skein.c \
crypto/hash.c \
crypto/aesb.c \
crypto/magimath.cpp \
algo/argon2/argon2a/argon2a.c \
algo/argon2/argon2a/ar2/argon2.c \
algo/argon2/argon2a/ar2/opt.c \
algo/argon2/argon2a/ar2/cores.c \
algo/argon2/argon2a/ar2/ar2-scrypt-jane.c \
algo/argon2/argon2a/ar2/blake2b.c \
algo/argon2/argon2d/argon2d-gate.c \
algo/argon2/argon2d/blake2/blake2b.c \
algo/argon2/argon2d/argon2d/argon2.c \
algo/argon2/argon2d/argon2d/core.c \
algo/argon2/argon2d/argon2d/opt.c \
algo/argon2/argon2d/argon2d/argon2d_thread.c \
algo/argon2/argon2d/argon2d/encoding.c \
malloc-huge.c \
algo/argon2d/argon2d-gate.c \
algo/argon2d/blake2/blake2b.c \
algo/argon2d/argon2d/argon2.c \
algo/argon2d/argon2d/core.c \
algo/argon2d/argon2d/opt.c \
algo/argon2d/argon2d/argon2d_thread.c \
algo/argon2d/argon2d/encoding.c \
algo/blake/sph_blake.c \
algo/blake/blake256-hash-4way.c \
algo/blake/blake512-hash-4way.c \
algo/blake/blake256-hash.c \
algo/blake/blake512-hash.c \
algo/blake/blake-gate.c \
algo/blake/blake.c \
algo/blake/blake-4way.c \
algo/blake/sph_blake2b.c \
algo/blake/blake2b.c \
algo/blake/sph-blake2s.c \
algo/blake/blake2s-hash-4way.c \
algo/blake/blake2s-hash.c \
algo/blake/blake2s.c \
algo/blake/blake2s-gate.c \
algo/blake/blake2s-4way.c \
algo/blake/blake2b-hash.c \
algo/blake/blake2b.c \
algo/blake/blakecoin-gate.c \
algo/blake/mod_blakecoin.c \
algo/blake/blakecoin.c \
algo/blake/blakecoin-4way.c \
algo/blake/decred-gate.c \
algo/blake/decred.c \
algo/blake/decred-4way.c \
algo/blake/pentablake-gate.c \
algo/blake/pentablake-4way.c \
algo/blake/pentablake.c \
@@ -71,36 +53,33 @@ cpuminer_SOURCES = \
algo/bmw/bmw256-hash-4way.c \
algo/bmw/bmw512-hash-4way.c \
algo/bmw/bmw256.c \
algo/cryptonight/cryptolight.c \
algo/cryptonight/cryptonight-common.c\
algo/cryptonight/cryptonight-aesni.c\
algo/cryptonight/cryptonight.c\
algo/cubehash/sph_cubehash.c \
algo/bmw/bmw512-gate.c \
algo/bmw/bmw512.c \
algo/bmw/bmw512-4way.c \
algo/cubehash/cubehash_sse2.c\
algo/cubehash/cube-hash-2way.c \
algo/cubehash/sph_cubehash.c \
algo/echo/sph_echo.c \
algo/echo/echo-hash-4way.c \
algo/echo/aes_ni/hash.c\
algo/gost/sph_gost.c \
algo/groestl/groestl-gate.c \
algo/groestl/groestl512-hash-4way.c \
algo/groestl/groestl256-hash-4way.c \
algo/groestl/sph_groestl.c \
algo/groestl/groestl.c \
algo/groestl/groestl-4way.c \
algo/groestl/myrgr-gate.c \
algo/groestl/myrgr-4way.c \
algo/groestl/myr-groestl.c \
algo/groestl/aes_ni/hash-groestl.c \
algo/groestl/aes_ni/hash-groestl256.c \
algo/fugue/sph_fugue.c \
algo/fugue/fugue-aesni.c \
algo/hamsi/sph_hamsi.c \
algo/hamsi/hamsi-hash-4way.c \
algo/haval/haval.c \
algo/haval/haval-hash-4way.c \
algo/heavy/sph_hefty1.c \
algo/heavy/heavy.c \
algo/heavy/bastion.c \
algo/hodl/aes.c \
algo/hodl/hodl-gate.c \
algo/hodl/hodl-wolf.c \
algo/hodl/sha512_avx.c \
algo/hodl/sha512_avx2.c \
algo/jh/sph_jh.c \
algo/jh/jh-hash-4way.c \
algo/jh/jha-gate.c \
@@ -111,35 +90,36 @@ cpuminer_SOURCES = \
algo/keccak/keccak-hash-4way.c \
algo/keccak/keccak-4way.c\
algo/keccak/keccak-gate.c \
algo/keccak/sse2/keccak.c \
algo/luffa/sph_luffa.c \
algo/luffa/luffa.c \
algo/keccak/sha3d-4way.c \
algo/keccak/sha3d.c \
algo/lanehash/lane.c \
algo/luffa/luffa_for_sse2.c \
algo/luffa/luffa-hash-2way.c \
algo/luffa/sph_luffa.c \
algo/lyra2/lyra2.c \
algo/lyra2/sponge.c \
algo/lyra2/sponge-2way.c \
algo/lyra2/lyra2-hash-2way.c \
algo/lyra2/lyra2-gate.c \
algo/lyra2/lyra2rev2.c \
algo/lyra2/lyra2rev2-4way.c \
algo/lyra2/lyra2rev3.c \
algo/lyra2/lyra2rev3-4way.c \
algo/lyra2/lyra2re.c \
algo/lyra2/lyra2z.c \
algo/lyra2/lyra2z-4way.c \
algo/lyra2/lyra2z330.c \
algo/lyra2/lyra2h.c \
algo/lyra2/lyra2h-4way.c \
algo/lyra2/allium-4way.c \
algo/lyra2/allium.c \
algo/lyra2/phi2-4way.c \
algo/lyra2/phi2.c \
algo/m7m.c \
algo/m7m/m7m.c \
algo/nist5/nist5-gate.c \
algo/nist5/nist5-4way.c \
algo/nist5/nist5.c \
algo/nist5/zr5.c \
algo/panama/panama-hash-4way.c \
algo/panama/sph_panama.c \
algo/radiogatun/sph_radiogatun.c \
algo/quark/quark-gate.c \
algo/quark/quark.c \
algo/quark/quark-4way.c \
@@ -161,28 +141,35 @@ cpuminer_SOURCES = \
algo/ripemd/lbry.c \
algo/ripemd/lbry-4way.c \
algo/scrypt/scrypt.c \
algo/scrypt/scrypt-core-4way.c \
algo/scrypt/neoscrypt.c \
algo/scrypt/pluck.c \
algo/scryptjane/scrypt-jane.c \
algo/sha/sha1.c \
algo/sha/sha1-hash.c \
algo/sha/sha256-hash.c \
algo/sha/sph_sha2.c \
algo/sha/sph_sha2big.c \
algo/sha/sha2-hash-4way.c \
algo/sha/sha256_hash_11way.c \
algo/sha/sha2.c \
algo/sha/sha256-hash-4way.c \
algo/sha/sha512-hash-4way.c \
algo/sha/hmac-sha256-hash.c \
algo/sha/hmac-sha256-hash-4way.c \
algo/sha/sha256d.c \
algo/sha/sha256d-4way.c \
algo/sha/sha256t-gate.c \
algo/sha/sha256t-4way.c \
algo/sha/sha256t.c \
algo/sha/sha256q-4way.c \
algo/sha/sha256q.c \
algo/sha/sha512256d-4way.c \
algo/sha/sha256dt.c \
algo/shabal/sph_shabal.c \
algo/shabal/shabal-hash-4way.c \
algo/shavite/sph_shavite.c \
algo/shavite/sph-shavite-aesni.c \
algo/shavite/shavite-hash-2way.c \
algo/shavite/shavite-hash-4way.c \
algo/shavite/shavite.c \
algo/simd/sph_simd.c \
algo/simd/nist.c \
algo/simd/vector.c \
algo/simd/sph_simd.c \
algo/simd/simd-hash-2way.c \
algo/skein/sph_skein.c \
algo/skein/skein-hash-4way.c \
@@ -191,12 +178,16 @@ cpuminer_SOURCES = \
algo/skein/skein-gate.c \
algo/skein/skein2.c \
algo/skein/skein2-4way.c \
algo/skein/skein2-gate.c \
algo/sm3/sm3.c \
algo/sm3/sm3-hash-4way.c \
algo/swifftx/swifftx.c \
algo/tiger/sph_tiger.c \
algo/verthash/verthash-gate.c \
algo/verthash/Verthash.c \
algo/verthash/fopen_utf8.c \
algo/verthash/tiny_sha3/sha3.c \
algo/verthash/tiny_sha3/sha3-4way.c \
algo/whirlpool/sph_whirlpool.c \
algo/whirlpool/whirlpool-hash-4way.c \
algo/whirlpool/whirlpool-gate.c \
algo/whirlpool/whirlpool.c \
algo/whirlpool/whirlpoolx.c \
@@ -218,7 +209,6 @@ cpuminer_SOURCES = \
algo/x11/timetravel10-gate.c \
algo/x11/timetravel10.c \
algo/x11/timetravel10-4way.c \
algo/x11/fresh.c \
algo/x11/x11evo.c \
algo/x11/x11evo-4way.c \
algo/x11/x11evo-gate.c \
@@ -237,7 +227,8 @@ cpuminer_SOURCES = \
algo/x13/skunk-gate.c \
algo/x13/skunk-4way.c \
algo/x13/skunk.c \
algo/x13/drop.c \
algo/x13/x13bcd-4way.c \
algo/x13/x13bcd.c \
algo/x14/x14-gate.c \
algo/x14/x14.c \
algo/x14/x14-4way.c \
@@ -254,6 +245,15 @@ cpuminer_SOURCES = \
algo/x16/x16r-gate.c \
algo/x16/x16r.c \
algo/x16/x16r-4way.c \
algo/x16/x16rv2.c \
algo/x16/x16rv2-4way.c \
algo/x16/x16rt.c \
algo/x16/x16rt-4way.c \
algo/x16/hex.c \
algo/x16/x20r.c \
algo/x16/x21s-4way.c \
algo/x16/x21s.c \
algo/x16/minotaur.c \
algo/x17/x17-gate.c \
algo/x17/x17.c \
algo/x17/x17-4way.c \
@@ -263,27 +263,23 @@ cpuminer_SOURCES = \
algo/x17/sonoa-gate.c \
algo/x17/sonoa-4way.c \
algo/x17/sonoa.c \
algo/x20/x20r.c \
algo/yescrypt/yescrypt.c \
algo/yescrypt/sha256_Y.c \
algo/yescrypt/yescrypt-best.c \
algo/yespower/yespower.c \
algo/yespower/sha256_p.c \
algo/yespower/yespower-opt.c
algo/x22/x22i-4way.c \
algo/x22/x22i.c \
algo/x22/x22i-gate.c \
algo/x22/x25x.c \
algo/x22/x25x-4way.c \
algo/yespower/yespower-gate.c \
algo/yespower/yespower-blake2b.c \
algo/yespower/crypto/hmac-blake2b.c \
algo/yespower/yescrypt-r8g.c \
algo/yespower/yespower-opt.c \
algo/yespower/yespower-ref.c \
algo/yespower/yespower-blake2b-ref.c
disable_flags =
if USE_ASM
cpuminer_SOURCES += asm/neoscrypt_asm.S
if ARCH_x86
cpuminer_SOURCES += asm/sha2-x86.S asm/scrypt-x86.S asm/aesb-x86.S
endif
if ARCH_x86_64
cpuminer_SOURCES += asm/sha2-x64.S asm/scrypt-x64.S asm/aesb-x64.S
endif
if ARCH_ARM
cpuminer_SOURCES += asm/sha2-arm.S asm/scrypt-arm.S
endif
else
disable_flags += -DNOASM
endif
@@ -293,7 +289,7 @@ if HAVE_WINDOWS
endif
cpuminer_LDFLAGS = @LDFLAGS@
cpuminer_LDADD = @LIBCURL@ @JANSSON_LIBS@ @PTHREAD_LIBS@ @WS2_LIBS@ -lssl -lcrypto -lgmp
cpuminer_LDADD = @LIBCURL@ @JANSSON_LIBS@ @PTHREAD_LIBS@ @WS2_LIBS@ -lgmp
cpuminer_CPPFLAGS = @LIBCURL_CPPFLAGS@ $(ALL_INCLUDES)
cpuminer_CFLAGS = -Wno-pointer-sign -Wno-pointer-to-int-cast $(disable_flags)

154
README.md
View File

@@ -12,10 +12,24 @@ a false positive, they are flagged simply because they are cryptocurrency
miners. The source code is open for anyone to inspect. If you don't trust
the software, don't use it.
New thread:
https://bitcointalk.org/index.php?topic=5226770.msg53865575#msg53865575
Old thread:
https://bitcointalk.org/index.php?topic=1326803.0
mailto://jayddee246@gmail.com
This note is to confirm that bitcointalk users JayDDee and joblo are the
same person.
I created a new BCT user JayDDee to match my github user id.
The old thread has been locked but still contains useful information for
reading.
See file RELEASE_NOTES for change log and INSTALL_LINUX or INSTALL_WINDOWS
for compile instructions.
@@ -23,25 +37,33 @@ Requirements
------------
1. A x86_64 architecture CPU with a minimum of SSE2 support. This includes
Intel Core2 and newer and AMD equivalents. In order to take advantage of AES_NI
optimizations a CPU with AES_NI is required. This includes Intel Westbridge
and newer and AMD equivalents. Further optimizations are available on some
algoritms for CPUs with AVX and AVX2, Sandybridge and Haswell respectively.
Intel Core2 and newer and AMD equivalents. Further optimizations are available
on some algoritms for CPUs with AES, AVX, AVX2, SHA, AVX512 and VAES.
Older CPUs are supported by cpuminer-multi by TPruvot but at reduced
performance.
32 bit CPUs are not supported.
Other CPU architectures such as ARM, Raspberry Pi, RISC-V, Xeon Phi, etc,
are not supported.
ARM CPUs are not supported.
Mobile CPUs like laptop computers are not recommended because they aren't
designed for extreme heat of operating at full load for extended periods of
time.
2. 64 bit Linux OS. Ubuntu and Fedora based distributions, including Mint and
Centos, are known to work and have all dependencies in their repositories.
Others may work but may require more effort. Older versions such as Centos 6
don't work due to missing features.
64 bit Windows OS is supported with mingw_w64 and msys or pre-built binaries.
Older CPUs and ARM architecture may be supported by cpuminer-multi by TPruvot.
2. 64 bit Linux or Windows OS. Ubuntu and Fedora based distributions,
including Mint and Centos, are known to work and have all dependencies
in their repositories. Others may work but may require more effort. Older
versions such as Centos 6 don't work due to missing features.
Windows 7 or newer is supported with mingw_w64 and msys or using the pre-built
binaries. WindowsXP 64 bit is YMMV.
FreeBSD is not actively tested but should work, YMMV.
MacOS, OSx and Android are not supported.
3. Stratum pool. Some algos may work wallet mining using getwork or GBT. YMMV.
3. Stratum pool supporting stratum+tcp:// or stratum+ssl:// protocols or
RPC getwork using http:// or https://.
GBT is YMMV.
Supported Algorithms
--------------------
@@ -52,51 +74,51 @@ Supported Algorithms
argon2d250 argon2d-crds, Credits (CRDS)
argon2d500 argon2d-dyn, Dynamic (DYN)
argon2d4096 argon2d-uis, Unitus, (UIS)
axiom Shabal-256 MemoHash
bastion
blake Blake-256 (SFR)
blake Blake-256
blake2b Blake2-512
blake2s Blake2-256
blakecoin blake256r8
blake2s Blake-2 S
bmw BMW 256
c11 Chaincoin
bmw512 BMW 512
c11
decred
deep Deepcoin (DCN)
dmd-gr Diamond-Groestl
drop Dropcoin
fresh Fresh
groestl Groestl coin
heavy Heavy
hmq1725 Espers
hodl Hodlcoin
hex x16r-hex
hmq1725
jha Jackpotcoin
keccak Maxcoin
keccakc Creative coin
lbry LBC, LBRY Credits
luffa Luffa
lyra2h Hppcoin
lyra2h
lyra2re lyra2
lyra2rev2 lyra2v2
lyra2rev3 lyrav2v3, Vertcoin
lyra2rev3 lyrav2v3
lyra2z
lyra2z330 Lyra2 330 rows, Zoin (ZOI)
m7m Magi (XMG)
lyra2z330
m7m
minotaur
minotaurx
myr-gr Myriad-Groestl
neoscrypt NeoScrypt(128, 2, 1)
nist5 Nist5
pentablake Pentablake
phi1612 phi, LUX coin (original algo)
phi2 LUX coin (new algo)
pluck Pluck:128 (Supcoin)
phi1612 phi
phi2
polytimos Ninja
power2b MicroBitcoin (MBC)
quark Quark
qubit Qubit
scrypt scrypt(1024, 1, 1) (default)
scrypt:N scrypt(N, 1, 1)
scryptjane:nf
scryptn2 scrypt(1048576, 1, 1)
sha256d Double SHA-256
sha256q Quad SHA-256, Pyrite (PYE)
sha256t Triple SHA-256, Onecoin (OC)
shavite3 Shavite3
sha256dt
sha256q Quad SHA-256
sha256t Triple SHA-256
sha3d Double keccak256 (BSHA3)
sha512256d
skein Skein+Sha (Skeincoin)
skein2 Double Skein (Woodcoin)
skunk Signatum (SIGT)
@@ -106,31 +128,66 @@ Supported Algorithms
tribus Denarius (DNR)
vanilla blake256r8vnl (VCash)
veltor (VLT)
verthash Vertcoin
whirlpool
whirlpoolx
x11 Dash
x11evo Revolvercoin
x11gost sib (SibCoin)
x12 Galaxie Cash (GCH)
x13 X13
x12
x13
x13bcd bcd
x13sm3 hsr (Hshare)
x14 X14
x15 X15
x16r Ravencoin (RVN)
x16s pigeoncoin (PGN)
x14
x15
x16r
x16rv2
x16rt
x16rt-veil veil
x16s
x17
x20r
x21s
x22i
x25x
xevan Bitsend (BSD)
yescrypt Globalboost-Y (BSTY)
yescryptr8 BitZeny (ZNY)
yescryptr8g Koto (KOTO)
yescryptr16 Eli
yescryptr32 WAVI
yespower Cryply
yespowerr16 Yenten (YTN)
yespower-b2b generic yespower + blake2b
zr5 Ziftr
Many variations of scrypt based algos can be mine by specifying their
parameters:
scryptn2: --algo scrypt --param-n 1048576
cpupower: --algo yespower --param-key "CPUpower: The number of CPU working or available for proof-of-work mining"
power2b: --algo yespower-b2b --param-n 2048 --param-r 32 --param-key "Now I am become Death, the destroyer of worlds"
sugarchain: --algo yespower --param-n 2048 -param-r 32 --param-key "Satoshi Nakamoto 31/Oct/2008 Proof-of-work is essentially one-CPU-one-vote"
yespoweriots: --algo yespower --param-n 2048 --param-key "Iots is committed to the development of IOT"
yespowerlitb: --algo yespower --param-n 2048 --param-r 32 --param-key "LITBpower: The number of LITB working or available for proof-of-work mini"
yespoweric: --algo yespower --param-n 2048 --param-r 32 --param-key "IsotopeC"
yespowerurx: --algo yespower --param-n 2048 --param-r 32 --param-key "UraniumX"
yespowerltncg: --algo yespower --param-n 2048 --param-r 32 --param-key "LTNCGYES"
Errata
------
Old algorithms that are no longer used frequently will not have the latest
optimizations.
Cryptonight and variants are no longer supported, use another miner.
Neoscrypt crashes on Windows, use legacy version.
@@ -148,14 +205,17 @@ Benchmark testing does not work for x11evo.
Bugs
----
Users are encouraged to post their bug reports on the Bitcoin Talk
forum at:
Users are encouraged to post their bug reports using git issues or on the
Bitcoin Talk forum or opening an issue in git:
https://bitcointalk.org/index.php?topic=1326803.0
All problem reports must be accompanied by a proper definition.
https://github.com/JayDDee/cpuminer-opt/issues
All problem reports must be accompanied by a proper problem definition.
This should include how the problem occurred, the command line and
output from the miner showing the startup and any errors.
output from the miner showing the startup messages and any errors.
A history is also useful, ie did it work before.
Donations
---------
@@ -163,10 +223,6 @@ Donations
cpuminer-opt has no fees of any kind but donations are accepted.
BTC: 12tdvfF7KmAsihBXQXynT6E6th2c2pByTT
ETH: 0x72122edabcae9d3f57eab0729305a425f6fef6d0
LTC: LdUwoHJnux9r9EKqFWNvAi45kQompHk6e8
BCH: 1QKYkB6atn4P7RFozyziAXLEnurwnUM1cQ
BTG: GVUyECtRHeC5D58z9F3nGGfVQndwnsPnHQ
Happy mining!

View File

@@ -1,8 +1,22 @@
This file is included in the Windows binary package. Compile instructions
for Linux and Windows can be found in RELEASE_NOTES.
cpuminer is a console program that is executed from a DOS command prompt.
There is no GUI and no mouse support.
cpuminer-opt is open source and free of any fees. Many forks exist that are
closed source and contain usage fees. support open source free software.
This package is officially avalaible only from:
https://github.com/JayDDee/cpuminer-opt
No other sources should be trusted.
cpuminer is a console program that is executed from a DOS or Powershell
command prompt. There is no GUI and no mouse support.
New users are encouraged to consult the cpuminer-opt Wiki for detailed
information on usage:
https://github.com/JayDDee/cpuminer-opt/wiki
Miner programs are often flagged as malware by antivirus programs. This is
a false positive, they are flagged simply because they are cryptocurrency
@@ -10,26 +24,54 @@ miners. The source code is open for anyone to inspect. If you don't trust
the software, don't use it.
Choose the exe that best matches you CPU's features or use trial and
error to find the fastest one that doesn't crash. Pay attention to
error to find the fastest one that works. Pay attention to
the features listed at cpuminer startup to ensure you are mining at
optimum speed using the best available features.
Architecture names and compile options used are only provided for Intel
Core series. Even the newest Pentium and Celeron CPUs are often missing
features.
Architecture names and compile options used are only provided for
mainstream desktop CPUs. Budget CPUs like Pentium and Celeron are often
missing some features. Check your CPU.
AMD CPUs older than Piledriver, including Athlon x2 and Phenom II x4, are not
supported by cpuminer-opt due to an incompatible implementation of SSE2 on
these CPUs. Some algos may crash the miner with an invalid instruction.
Users are recommended to use an unoptimized miner such as cpuminer-multi.
Support for AMD CPUs older than Ryzen is incomplete and without specific
recommendations. Find the best fit. CPUs older than Piledriver, including
Athlon x2 and Phenom II x4, are not supported by cpuminer-opt due to an
incompatible implementation of SSE2 on these CPUs.
Exe name Compile flags Arch name
More information for Intel and AMD CPU architectures and their features
can be found on Wikipedia.
https://en.wikipedia.org/wiki/List_of_Intel_CPU_microarchitectures
https://en.wikipedia.org/wiki/List_of_AMD_CPU_microarchitectures
File name Architecture name
cpuminer-sse2.exe Core2, Nehalem, generic x86_64 with SSE2
cpuminer-aes-sse42.exe Westmere
cpuminer-avx.exe Sandybridge, Ivybridge
cpuminer-avx2.exe Haswell, Skylake, Kabylake, Coffeelake, Cometlake
cpuminer-avx2-sha.exe AMD Zen1, Zen2
cpuminer-avx2-sha-vaes.exe Intel Alderlake*, AMD Zen3
cpuminer-avx512.exe Intel HEDT Skylake-X, Cascadelake
cpuminer-avx512-sha-vaes.exe AMD Zen4, Intel Rocketlake, Icelake
* Alderlake is a hybrid architecture with a mix of E-cores & P-cores. Although
the P-cores can support AVX512 the E-cores can't so Intel decided to disable
AVX512 on the the P-cores.
Notes about included DLL files:
Downloading DLL files from alternative sources presents an inherent
security risk if their source is unknown. All DLL files included have
been copied from the Ubuntu-20.04 installation or compiled by me from
source code obtained from the author's official repository. The exact
procedure is documented in the build instructions for Windows:
https://github.com/JayDDee/cpuminer-opt/wiki/Compiling-from-source
Some included DLL files may already be installed on the system by Windows or
third party packages. They often will work and may be used instead of the
included version of the files.
cpuminer-sse2.exe "-msse2" Core2, Nehalem
cpuminer-aes-sse42.exe "-march=westmere" Westmere
cpuminer-avx.exe "-march=corei7-avx" Sandy-Ivybridge
cpuminer-avx2.exe "-march=core-avx2" Haswell, Sky-Kaby-Coffeelake
cpuminer-zen "-march=znver1" AMD Ryzen, Threadripper
If you like this software feel free to donate:

File diff suppressed because it is too large Load Diff

226
aclocal.m4 vendored
View File

@@ -1,6 +1,6 @@
# generated automatically by aclocal 1.15.1 -*- Autoconf -*-
# generated automatically by aclocal 1.16.5 -*- Autoconf -*-
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2021 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -14,13 +14,13 @@
m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],,
[m4_warning([this file was generated for autoconf 2.69.
m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],,
[m4_warning([this file was generated for autoconf 2.71.
You have another version of autoconf. It may work, but is not guaranteed to.
If you have problems, you may need to regenerate the build system entirely.
To do so, use the procedure documented by the package, typically 'autoreconf'.])])
# Copyright (C) 2002-2017 Free Software Foundation, Inc.
# Copyright (C) 2002-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -32,10 +32,10 @@ To do so, use the procedure documented by the package, typically 'autoreconf'.])
# generated from the m4 files accompanying Automake X.Y.
# (This private macro should not be called outside this file.)
AC_DEFUN([AM_AUTOMAKE_VERSION],
[am__api_version='1.15'
[am__api_version='1.16'
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
dnl require some minimum version. Point them to the right macro.
m4_if([$1], [1.15.1], [],
m4_if([$1], [1.16.5], [],
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
])
@@ -51,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], [])
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
[AM_AUTOMAKE_VERSION([1.15.1])dnl
[AM_AUTOMAKE_VERSION([1.16.5])dnl
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
# Figure out how to run the assembler. -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -78,7 +78,7 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -130,7 +130,7 @@ am_aux_dir=`cd "$ac_aux_dir" && pwd`
# AM_CONDITIONAL -*- Autoconf -*-
# Copyright (C) 1997-2017 Free Software Foundation, Inc.
# Copyright (C) 1997-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -161,7 +161,7 @@ AC_CONFIG_COMMANDS_PRE(
Usually this means the macro was only invoked conditionally.]])
fi])])
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
# Copyright (C) 1999-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -352,13 +352,12 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
# Generate code to set up dependency tracking. -*- Autoconf -*-
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
# Copyright (C) 1999-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# _AM_OUTPUT_DEPENDENCY_COMMANDS
# ------------------------------
AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
@@ -366,49 +365,43 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# Older Autoconf quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
# if we detect the quoting.
case $CONFIG_FILES in
*\'*) eval set x "$CONFIG_FILES" ;;
*) set x $CONFIG_FILES ;;
esac
# TODO: see whether this extra hack can be removed once we start
# requiring Autoconf 2.70 or later.
AS_CASE([$CONFIG_FILES],
[*\'*], [eval set x "$CONFIG_FILES"],
[*], [set x $CONFIG_FILES])
shift
for mf
# Used to flag and report bootstrapping failures.
am_rc=0
for am_mf
do
# Strip MF so we end up with the name of the file.
mf=`echo "$mf" | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile or not.
# We used to match only the files named 'Makefile.in', but
# some people rename them; so instead we look at the file content.
# Grep'ing the first line is not enough: some people post-process
# each Makefile.in and add a new line on top of each file to say so.
# Grep'ing the whole file is not good either: AIX grep has a line
am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile which includes
# dependency-tracking related rules and includes.
# Grep'ing the whole file directly is not great: AIX grep has a line
# limit of 2048, but all sed's we know have understand at least 4000.
if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
dirpart=`AS_DIRNAME("$mf")`
else
continue
fi
# Extract the definition of DEPDIR, am__include, and am__quote
# from the Makefile without running 'make'.
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
test -z "$am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
# simplest approach to changing $(DEPDIR) to its actual value in the
# expansion.
for file in `sed -n "
s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
# Make sure the directory exists.
test -f "$dirpart/$file" && continue
fdir=`AS_DIRNAME(["$file"])`
AS_MKDIR_P([$dirpart/$fdir])
# echo "creating $dirpart/$file"
echo '# dummy' > "$dirpart/$file"
done
sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \
|| continue
am_dirpart=`AS_DIRNAME(["$am_mf"])`
am_filepart=`AS_BASENAME(["$am_mf"])`
AM_RUN_LOG([cd "$am_dirpart" \
&& sed -e '/# am--include-marker/d' "$am_filepart" \
| $MAKE -f - am--depfiles]) || am_rc=$?
done
if test $am_rc -ne 0; then
AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments
for automatic dependency tracking. If GNU make was not used, consider
re-running the configure script with MAKE="gmake" (or whatever is
necessary). You can also try re-running configure with the
'--disable-dependency-tracking' option to at least be able to build
the package (albeit without support for automatic dependency tracking).])
fi
AS_UNSET([am_dirpart])
AS_UNSET([am_filepart])
AS_UNSET([am_mf])
AS_UNSET([am_rc])
rm -f conftest-deps.mk
}
])# _AM_OUTPUT_DEPENDENCY_COMMANDS
@@ -417,18 +410,17 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# -----------------------------
# This macro should only be invoked once -- use via AC_REQUIRE.
#
# This code is only required when automatic dependency tracking
# is enabled. FIXME. This creates each '.P' file that we will
# need in order to bootstrap the dependency handling code.
# This code is only required when automatic dependency tracking is enabled.
# This creates each '.Po' and '.Plo' makefile fragment that we'll need in
# order to bootstrap the dependency handling code.
AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
[AC_CONFIG_COMMANDS([depfiles],
[test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
[AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
])
[AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])])
# Do all the work for Automake. -*- Autoconf -*-
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -456,6 +448,10 @@ m4_defn([AC_PROG_CC])
# release and drop the old call support.
AC_DEFUN([AM_INIT_AUTOMAKE],
[AC_PREREQ([2.65])dnl
m4_ifdef([_$0_ALREADY_INIT],
[m4_fatal([$0 expanded multiple times
]m4_defn([_$0_ALREADY_INIT]))],
[m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl
dnl Autoconf wants to disallow AM_ names. We explicitly allow
dnl the ones we care about.
m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
@@ -492,7 +488,7 @@ m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
[_AM_SET_OPTIONS([$1])dnl
dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
m4_if(
m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]),
m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]),
[ok:ok],,
[m4_fatal([AC_INIT should be called with package and version arguments])])dnl
AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
@@ -515,8 +511,8 @@ AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
AC_REQUIRE([AC_PROG_MKDIR_P])dnl
# For better backward compatibility. To be removed once Automake 1.9.x
# dies out for good. For more background, see:
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
# <https://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
# <https://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
# We need awk for the "check" target (and possibly the TAP driver). The
# system "awk" is bad on some platforms.
@@ -544,6 +540,20 @@ AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
[m4_define([AC_PROG_OBJCXX],
m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
])
# Variables for tags utilities; see am/tags.am
if test -z "$CTAGS"; then
CTAGS=ctags
fi
AC_SUBST([CTAGS])
if test -z "$ETAGS"; then
ETAGS=etags
fi
AC_SUBST([ETAGS])
if test -z "$CSCOPE"; then
CSCOPE=cscope
fi
AC_SUBST([CSCOPE])
AC_REQUIRE([AM_SILENT_RULES])dnl
dnl The testsuite driver may need to know about EXEEXT, so add the
dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This
@@ -583,7 +593,7 @@ END
Aborting the configuration process, to ensure you take notice of the issue.
You can download and install GNU coreutils to get an 'rm' implementation
that behaves properly: <http://www.gnu.org/software/coreutils/>.
that behaves properly: <https://www.gnu.org/software/coreutils/>.
If you want to complete the configuration process using your problematic
'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
@@ -625,7 +635,7 @@ for _am_header in $config_headers :; do
done
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -646,7 +656,7 @@ if test x"${install_sh+set}" != xset; then
fi
AC_SUBST([install_sh])])
# Copyright (C) 2003-2017 Free Software Foundation, Inc.
# Copyright (C) 2003-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -668,7 +678,7 @@ AC_SUBST([am__leading_dot])])
# Add --enable-maintainer-mode option to configure. -*- Autoconf -*-
# From Jim Meyering
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -703,7 +713,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
# Check to see how 'make' treats includes. -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -711,49 +721,42 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
# AM_MAKE_INCLUDE()
# -----------------
# Check to see how make treats includes.
# Check whether make has an 'include' directive that can support all
# the idioms we need for our automatic dependency tracking code.
AC_DEFUN([AM_MAKE_INCLUDE],
[am_make=${MAKE-make}
cat > confinc << 'END'
[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive])
cat > confinc.mk << 'END'
am__doit:
@echo this is the am__doit target
@echo this is the am__doit target >confinc.out
.PHONY: am__doit
END
# If we don't find an include directive, just comment out the code.
AC_MSG_CHECKING([for style of include used by $am_make])
am__include="#"
am__quote=
_am_result=none
# First try GNU make style include.
echo "include confinc" > confmf
# Ignore all kinds of additional output from 'make'.
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=include
am__quote=
_am_result=GNU
;;
esac
# Now try BSD make style include.
if test "$am__include" = "#"; then
echo '.include "confinc"' > confmf
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=.include
am__quote="\""
_am_result=BSD
;;
esac
fi
AC_SUBST([am__include])
AC_SUBST([am__quote])
AC_MSG_RESULT([$_am_result])
rm -f confinc confmf
])
# BSD make does it like this.
echo '.include "confinc.mk" # ignored' > confmf.BSD
# Other make implementations (GNU, Solaris 10, AIX) do it like this.
echo 'include confinc.mk # ignored' > confmf.GNU
_am_result=no
for s in GNU BSD; do
AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out])
AS_CASE([$?:`cat confinc.out 2>/dev/null`],
['0:this is the am__doit target'],
[AS_CASE([$s],
[BSD], [am__include='.include' am__quote='"'],
[am__include='include' am__quote=''])])
if test "$am__include" != "#"; then
_am_result="yes ($s style)"
break
fi
done
rm -f confinc.* confmf.*
AC_MSG_RESULT([${_am_result}])
AC_SUBST([am__include])])
AC_SUBST([am__quote])])
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
# Copyright (C) 1997-2017 Free Software Foundation, Inc.
# Copyright (C) 1997-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -774,12 +777,7 @@ AC_DEFUN([AM_MISSING_HAS_RUN],
[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
AC_REQUIRE_AUX_FILE([missing])dnl
if test x"${MISSING+set}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
*)
MISSING="\${SHELL} $am_aux_dir/missing" ;;
esac
MISSING="\${SHELL} '$am_aux_dir/missing'"
fi
# Use eval to expand $SHELL
if eval "$MISSING --is-lightweight"; then
@@ -792,7 +790,7 @@ fi
# Helper functions for option handling. -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -821,7 +819,7 @@ AC_DEFUN([_AM_SET_OPTIONS],
AC_DEFUN([_AM_IF_OPTION],
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
# Copyright (C) 1999-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -868,7 +866,7 @@ AC_LANG_POP([C])])
# For backward compatibility.
AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -887,7 +885,7 @@ AC_DEFUN([AM_RUN_LOG],
# Check to make sure that the build environment is sane. -*- Autoconf -*-
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
# Copyright (C) 1996-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -968,7 +966,7 @@ AC_CONFIG_COMMANDS_PRE(
rm -f conftest.file
])
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# Copyright (C) 2009-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1028,7 +1026,7 @@ AC_SUBST([AM_BACKSLASH])dnl
_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
])
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
# Copyright (C) 2001-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1056,7 +1054,7 @@ fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
AC_SUBST([INSTALL_STRIP_PROGRAM])])
# Copyright (C) 2006-2017 Free Software Foundation, Inc.
# Copyright (C) 2006-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1075,7 +1073,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Check how to create a tarball. -*- Autoconf -*-
# Copyright (C) 2004-2017 Free Software Foundation, Inc.
# Copyright (C) 2004-2021 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,

View File

@@ -15,8 +15,6 @@
#include <stdbool.h>
#include <memory.h>
#include <unistd.h>
#include <openssl/sha.h>
//#include "miner.h"
#include "algo-gate-api.h"
// Define null and standard functions.
@@ -69,7 +67,6 @@ void do_nothing () {}
bool return_true () { return true; }
bool return_false () { return false; }
void *return_null () { return NULL; }
void call_error () { printf("ERR: Uninitialized function pointer\n"); }
void algo_not_tested()
{
@@ -90,49 +87,185 @@ void algo_not_implemented()
}
// default null functions
// deprecated, use generic as default
int null_scanhash()
{
applog(LOG_WARNING,"SWERR: undefined scanhash function in algo_gate");
return 0;
}
void null_hash()
// Default generic scanhash can be used in many cases. Not to be used when
// prehashing can be done or when byte swapping the data can be avoided.
int scanhash_generic( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t edata[20] __attribute__((aligned(64)));
uint32_t hash[8] __attribute__((aligned(64)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 1;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
v128_bswap32_80( edata, pdata );
do
{
edata[19] = n;
if ( likely( algo_gate.hash( hash, edata, thr_id ) ) )
if ( unlikely( valid_hash( hash, ptarget ) && !bench ) )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash, mythr );
}
n++;
} while ( n < last_nonce && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
#if defined(__AVX2__)
//int scanhash_4way_64_64( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
//int scanhash_4way_64_640( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
int scanhash_4way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*4] __attribute__ ((aligned (64)));
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash32_d7 = &(hash32[ 7*4 ]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 4;
__m256i *noncev = (__m256i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
// overwrite byte swapped nonce with original byte order for proper
// incrementing. The nonce only needs to byte swapped if it is to be
// sumbitted.
*noncev = mm256_intrlv_blend_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( likely( algo_gate.hash( hash32, vdata, thr_id ) ) )
for ( int lane = 0; lane < 4; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 && !bench ) )
{
extr_lane_4x32( lane_hash, hash32, lane, 256 );
if ( valid_hash( lane_hash, ptarget ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm256_add_epi32( *noncev,
_mm256_set1_epi64x( 0x0000000400000000 ) );
n += 4;
} while ( likely( ( n <= last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
//int scanhash_8way_32_32( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
#endif
#if defined(SIMD512)
//int scanhash_8way_64_64( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
//int scanhash_8way_64_640( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
int scanhash_8way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*8] __attribute__ ((aligned (128)));
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash32_d7 = &(hash32[7*8]);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = max_nonce - 8;
__m512i *noncev = (__m512i*)vdata + 9;
uint32_t n = first_nonce;
const int thr_id = mythr->id;
const uint32_t targ32_d7 = ptarget[7];
const bool bench = opt_benchmark;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
*noncev = mm512_intrlv_blend_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n, 0 ), *noncev );
do
{
if ( likely( algo_gate.hash( hash32, vdata, thr_id ) ) )
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( ( hash32_d7[ lane ] <= targ32_d7 ) && !bench ) )
{
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) ) )
{
pdata[19] = bswap_32( n + lane );
submit_solution( work, lane_hash, mythr );
}
}
*noncev = _mm512_add_epi32( *noncev,
_mm512_set1_epi64( 0x0000000800000000 ) );
n += 8;
} while ( likely( ( n < last_nonce ) && !work_restart[thr_id].restart ) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
//int scanhash_16way_32_32( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr )
#endif
int null_hash()
{
applog(LOG_WARNING,"SWERR: null_hash unsafe null function");
};
void null_hash_suw()
{
applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function");
return 0;
};
void init_algo_gate( algo_gate_t* gate )
static void init_algo_gate( algo_gate_t* gate )
{
gate->miner_thread_init = (void*)&return_true;
gate->scanhash = (void*)&null_scanhash;
gate->scanhash = (void*)&scanhash_generic;
gate->hash = (void*)&null_hash;
gate->hash_suw = (void*)&null_hash_suw;
gate->get_new_work = (void*)&std_get_new_work;
gate->get_nonceptr = (void*)&std_get_nonceptr;
gate->work_decode = (void*)&std_le_work_decode;
gate->decode_extra_data = (void*)&do_nothing;
gate->wait_for_diff = (void*)&std_wait_for_diff;
gate->get_max64 = (void*)&get_max64_0x1fffffLL;
gate->gen_merkle_root = (void*)&sha256d_gen_merkle_root;
gate->stratum_gen_work = (void*)&std_stratum_gen_work;
gate->build_stratum_request = (void*)&std_le_build_stratum_request;
gate->malloc_txs_request = (void*)&std_malloc_txs_request;
gate->set_target = (void*)&std_set_target;
gate->submit_getwork_result = (void*)&std_le_submit_getwork_result;
gate->build_block_header = (void*)&std_build_block_header;
gate->build_extraheader = (void*)&std_build_extraheader;
gate->set_work_data_endian = (void*)&do_nothing;
gate->calc_network_diff = (void*)&std_calc_network_diff;
gate->ready_to_mine = (void*)&std_ready_to_mine;
gate->resync_threads = (void*)&do_nothing;
gate->do_this_thread = (void*)&return_true;
gate->longpoll_rpc_call = (void*)&std_longpoll_rpc_call;
gate->stratum_handle_response = (void*)&std_stratum_handle_response;
gate->get_work_data_size = (void*)&std_get_work_data_size;
gate->optimizations = EMPTY_SET;
gate->ntime_index = STD_NTIME_INDEX;
@@ -145,9 +278,11 @@ void init_algo_gate( algo_gate_t* gate )
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wimplicit-function-declaration"
// called by each thread that uses the gate
// Called once by main
bool register_algo_gate( int algo, algo_gate_t *gate )
{
bool rc = false;
if ( NULL == gate )
{
applog(LOG_ERR,"FAIL: algo_gate registration failed, NULL gate\n");
@@ -156,105 +291,105 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
init_algo_gate( gate );
switch (algo)
switch ( algo )
{
case ALGO_ALLIUM: register_allium_algo ( gate ); break;
case ALGO_ANIME: register_anime_algo ( gate ); break;
case ALGO_ARGON2: register_argon2_algo ( gate ); break;
case ALGO_ARGON2D250: register_argon2d_crds_algo ( gate ); break;
case ALGO_ARGON2D500: register_argon2d_dyn_algo ( gate ); break;
case ALGO_ARGON2D4096: register_argon2d4096_algo ( gate ); break;
case ALGO_AXIOM: register_axiom_algo ( gate ); break;
case ALGO_BASTION: register_bastion_algo ( gate ); break;
case ALGO_BLAKE: register_blake_algo ( gate ); break;
case ALGO_BLAKECOIN: register_blakecoin_algo ( gate ); break;
// case ALGO_BLAKE2B: register_blake2b_algo ( gate ); break;
case ALGO_BLAKE2S: register_blake2s_algo ( gate ); break;
case ALGO_C11: register_c11_algo ( gate ); break;
case ALGO_CRYPTOLIGHT: register_cryptolight_algo ( gate ); break;
case ALGO_CRYPTONIGHT: register_cryptonight_algo ( gate ); break;
case ALGO_CRYPTONIGHTV7: register_cryptonightv7_algo ( gate ); break;
case ALGO_DECRED: register_decred_algo ( gate ); break;
case ALGO_DEEP: register_deep_algo ( gate ); break;
case ALGO_DMD_GR: register_dmd_gr_algo ( gate ); break;
case ALGO_DROP: register_drop_algo ( gate ); break;
case ALGO_FRESH: register_fresh_algo ( gate ); break;
case ALGO_GROESTL: register_groestl_algo ( gate ); break;
case ALGO_HEAVY: register_heavy_algo ( gate ); break;
case ALGO_HMQ1725: register_hmq1725_algo ( gate ); break;
case ALGO_HODL: register_hodl_algo ( gate ); break;
case ALGO_JHA: register_jha_algo ( gate ); break;
case ALGO_KECCAK: register_keccak_algo ( gate ); break;
case ALGO_KECCAKC: register_keccakc_algo ( gate ); break;
case ALGO_LBRY: register_lbry_algo ( gate ); break;
case ALGO_LUFFA: register_luffa_algo ( gate ); break;
case ALGO_LYRA2H: register_lyra2h_algo ( gate ); break;
case ALGO_LYRA2RE: register_lyra2re_algo ( gate ); break;
case ALGO_LYRA2REV2: register_lyra2rev2_algo ( gate ); break;
case ALGO_LYRA2REV3: register_lyra2rev3_algo ( gate ); break;
case ALGO_LYRA2Z: register_lyra2z_algo ( gate ); break;
case ALGO_LYRA2Z330: register_lyra2z330_algo ( gate ); break;
case ALGO_M7M: register_m7m_algo ( gate ); break;
case ALGO_MYR_GR: register_myriad_algo ( gate ); break;
case ALGO_NEOSCRYPT: register_neoscrypt_algo ( gate ); break;
case ALGO_NIST5: register_nist5_algo ( gate ); break;
case ALGO_PENTABLAKE: register_pentablake_algo ( gate ); break;
case ALGO_PHI1612: register_phi1612_algo ( gate ); break;
case ALGO_PHI2: register_phi2_algo ( gate ); break;
case ALGO_PLUCK: register_pluck_algo ( gate ); break;
case ALGO_POLYTIMOS: register_polytimos_algo ( gate ); break;
case ALGO_QUARK: register_quark_algo ( gate ); break;
case ALGO_QUBIT: register_qubit_algo ( gate ); break;
case ALGO_SCRYPT: register_scrypt_algo ( gate ); break;
case ALGO_SCRYPTJANE: register_scryptjane_algo ( gate ); break;
case ALGO_SHA256D: register_sha256d_algo ( gate ); break;
case ALGO_SHA256Q: register_sha256q_algo ( gate ); break;
case ALGO_SHA256T: register_sha256t_algo ( gate ); break;
case ALGO_SHAVITE3: register_shavite_algo ( gate ); break;
case ALGO_SKEIN: register_skein_algo ( gate ); break;
case ALGO_SKEIN2: register_skein2_algo ( gate ); break;
case ALGO_SKUNK: register_skunk_algo ( gate ); break;
case ALGO_SONOA: register_sonoa_algo ( gate ); break;
case ALGO_TIMETRAVEL: register_timetravel_algo ( gate ); break;
case ALGO_TIMETRAVEL10: register_timetravel10_algo ( gate ); break;
case ALGO_TRIBUS: register_tribus_algo ( gate ); break;
case ALGO_VANILLA: register_vanilla_algo ( gate ); break;
case ALGO_VELTOR: register_veltor_algo ( gate ); break;
case ALGO_WHIRLPOOL: register_whirlpool_algo ( gate ); break;
case ALGO_WHIRLPOOLX: register_whirlpoolx_algo ( gate ); break;
case ALGO_X11: register_x11_algo ( gate ); break;
case ALGO_X11EVO: register_x11evo_algo ( gate ); break;
case ALGO_X11GOST: register_x11gost_algo ( gate ); break;
case ALGO_X12: register_x12_algo ( gate ); break;
case ALGO_X13: register_x13_algo ( gate ); break;
case ALGO_X13SM3: register_x13sm3_algo ( gate ); break;
case ALGO_X14: register_x14_algo ( gate ); break;
case ALGO_X15: register_x15_algo ( gate ); break;
case ALGO_X16R: register_x16r_algo ( gate ); break;
case ALGO_X16S: register_x16s_algo ( gate ); break;
case ALGO_X17: register_x17_algo ( gate ); break;
case ALGO_XEVAN: register_xevan_algo ( gate ); break;
/* case ALGO_YESCRYPT: register_yescrypt_05_algo ( gate ); break;
case ALGO_YESCRYPTR8: register_yescryptr8_05_algo ( gate ); break;
case ALGO_YESCRYPTR16: register_yescryptr16_05_algo ( gate ); break;
case ALGO_YESCRYPTR32: register_yescryptr32_05_algo ( gate ); break;
*/
case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break;
case ALGO_YESCRYPTR8: register_yescryptr8_algo ( gate ); break;
case ALGO_YESCRYPTR16: register_yescryptr16_algo ( gate ); break;
case ALGO_YESCRYPTR32: register_yescryptr32_algo ( gate ); break;
case ALGO_YESPOWER: register_yespower_algo ( gate ); break;
case ALGO_YESPOWERR16: register_yespowerr16_algo ( gate ); break;
case ALGO_ZR5: register_zr5_algo ( gate ); break;
case ALGO_ALLIUM: rc = register_allium_algo ( gate ); break;
case ALGO_ANIME: rc = register_anime_algo ( gate ); break;
case ALGO_ARGON2D250: rc = register_argon2d_crds_algo ( gate ); break;
case ALGO_ARGON2D500: rc = register_argon2d_dyn_algo ( gate ); break;
case ALGO_ARGON2D4096: rc = register_argon2d4096_algo ( gate ); break;
case ALGO_AXIOM: rc = register_axiom_algo ( gate ); break;
case ALGO_BLAKE: rc = register_blake_algo ( gate ); break;
case ALGO_BLAKE2B: rc = register_blake2b_algo ( gate ); break;
case ALGO_BLAKE2S: rc = register_blake2s_algo ( gate ); break;
case ALGO_BLAKECOIN: rc = register_blakecoin_algo ( gate ); break;
case ALGO_BMW512: rc = register_bmw512_algo ( gate ); break;
case ALGO_C11: rc = register_c11_algo ( gate ); break;
case ALGO_DEEP: rc = register_deep_algo ( gate ); break;
case ALGO_DMD_GR: rc = register_dmd_gr_algo ( gate ); break;
case ALGO_GROESTL: rc = register_groestl_algo ( gate ); break;
case ALGO_HEX: rc = register_hex_algo ( gate ); break;
case ALGO_HMQ1725: rc = register_hmq1725_algo ( gate ); break;
case ALGO_JHA: rc = register_jha_algo ( gate ); break;
case ALGO_KECCAK: rc = register_keccak_algo ( gate ); break;
case ALGO_KECCAKC: rc = register_keccakc_algo ( gate ); break;
case ALGO_LBRY: rc = register_lbry_algo ( gate ); break;
case ALGO_LYRA2H: rc = register_lyra2h_algo ( gate ); break;
case ALGO_LYRA2RE: rc = register_lyra2re_algo ( gate ); break;
case ALGO_LYRA2REV2: rc = register_lyra2rev2_algo ( gate ); break;
case ALGO_LYRA2REV3: rc = register_lyra2rev3_algo ( gate ); break;
case ALGO_LYRA2Z: rc = register_lyra2z_algo ( gate ); break;
case ALGO_LYRA2Z330: rc = register_lyra2z330_algo ( gate ); break;
case ALGO_M7M: rc = register_m7m_algo ( gate ); break;
case ALGO_MINOTAUR: rc = register_minotaur_algo ( gate ); break;
case ALGO_MINOTAURX: rc = register_minotaur_algo ( gate ); break;
case ALGO_MYR_GR: rc = register_myriad_algo ( gate ); break;
case ALGO_NEOSCRYPT: rc = register_neoscrypt_algo ( gate ); break;
case ALGO_NIST5: rc = register_nist5_algo ( gate ); break;
case ALGO_PENTABLAKE: rc = register_pentablake_algo ( gate ); break;
case ALGO_PHI1612: rc = register_phi1612_algo ( gate ); break;
case ALGO_PHI2: rc = register_phi2_algo ( gate ); break;
case ALGO_POLYTIMOS: rc = register_polytimos_algo ( gate ); break;
case ALGO_POWER2B: rc = register_power2b_algo ( gate ); break;
case ALGO_QUARK: rc = register_quark_algo ( gate ); break;
case ALGO_QUBIT: rc = register_qubit_algo ( gate ); break;
case ALGO_SCRYPT: rc = register_scrypt_algo ( gate ); break;
case ALGO_SHA256D: rc = register_sha256d_algo ( gate ); break;
case ALGO_SHA256DT: rc = register_sha256dt_algo ( gate ); break;
case ALGO_SHA256Q: rc = register_sha256q_algo ( gate ); break;
case ALGO_SHA256T: rc = register_sha256t_algo ( gate ); break;
case ALGO_SHA3D: rc = register_sha3d_algo ( gate ); break;
case ALGO_SHA512256D: rc = register_sha512256d_algo ( gate ); break;
case ALGO_SHAVITE3: rc = register_shavite_algo ( gate ); break;
case ALGO_SKEIN: rc = register_skein_algo ( gate ); break;
case ALGO_SKEIN2: rc = register_skein2_algo ( gate ); break;
case ALGO_SKUNK: rc = register_skunk_algo ( gate ); break;
case ALGO_SONOA: rc = register_sonoa_algo ( gate ); break;
case ALGO_TIMETRAVEL: rc = register_timetravel_algo ( gate ); break;
case ALGO_TIMETRAVEL10: rc = register_timetravel10_algo ( gate ); break;
case ALGO_TRIBUS: rc = register_tribus_algo ( gate ); break;
case ALGO_VANILLA: rc = register_vanilla_algo ( gate ); break;
case ALGO_VELTOR: rc = register_veltor_algo ( gate ); break;
case ALGO_VERTHASH: rc = register_verthash_algo ( gate ); break;
case ALGO_WHIRLPOOL: rc = register_whirlpool_algo ( gate ); break;
case ALGO_WHIRLPOOLX: rc = register_whirlpoolx_algo ( gate ); break;
case ALGO_X11: rc = register_x11_algo ( gate ); break;
case ALGO_X11EVO: rc = register_x11evo_algo ( gate ); break;
case ALGO_X11GOST: rc = register_x11gost_algo ( gate ); break;
case ALGO_X12: rc = register_x12_algo ( gate ); break;
case ALGO_X13: rc = register_x13_algo ( gate ); break;
case ALGO_X13BCD: rc = register_x13bcd_algo ( gate ); break;
case ALGO_X13SM3: rc = register_x13sm3_algo ( gate ); break;
case ALGO_X14: rc = register_x14_algo ( gate ); break;
case ALGO_X15: rc = register_x15_algo ( gate ); break;
case ALGO_X16R: rc = register_x16r_algo ( gate ); break;
case ALGO_X16RV2: rc = register_x16rv2_algo ( gate ); break;
case ALGO_X16RT: rc = register_x16rt_algo ( gate ); break;
case ALGO_X16RT_VEIL: rc = register_x16rt_veil_algo ( gate ); break;
case ALGO_X16S: rc = register_x16s_algo ( gate ); break;
case ALGO_X17: rc = register_x17_algo ( gate ); break;
case ALGO_X20R: rc = register_x20r_algo ( gate ); break;
case ALGO_X21S: rc = register_x21s_algo ( gate ); break;
case ALGO_X22I: rc = register_x22i_algo ( gate ); break;
case ALGO_X25X: rc = register_x25x_algo ( gate ); break;
case ALGO_XEVAN: rc = register_xevan_algo ( gate ); break;
case ALGO_YESCRYPT: rc = register_yescrypt_algo ( gate ); break;
case ALGO_YESCRYPTR8: rc = register_yescryptr8_algo ( gate ); break;
case ALGO_YESCRYPTR8G: rc = register_yescryptr8g_algo ( gate ); break;
case ALGO_YESCRYPTR16: rc = register_yescryptr16_algo ( gate ); break;
case ALGO_YESCRYPTR32: rc = register_yescryptr32_algo ( gate ); break;
case ALGO_YESPOWER: rc = register_yespower_algo ( gate ); break;
case ALGO_YESPOWERR16: rc = register_yespowerr16_algo ( gate ); break;
case ALGO_YESPOWER_B2B: rc = register_yespower_b2b_algo ( gate ); break;
case ALGO_ZR5: rc = register_zr5_algo ( gate ); break;
default:
applog(LOG_ERR,"FAIL: algo_gate registration failed, unknown algo %s.\n", algo_names[opt_algo] );
applog(LOG_ERR,"BUG: unregistered algorithm %s.\n", algo_names[opt_algo] );
return false;
} // switch
// ensure required functions were defined.
if ( gate->scanhash == (void*)&null_scanhash )
if ( !rc )
{
applog(LOG_ERR, "FAIL: Required algo_gate functions undefined\n");
applog(LOG_ERR, "FAIL: %s algorithm failed to initialize\n", algo_names[opt_algo] );
return false;
}
return true;
@@ -263,30 +398,6 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
// restore warnings
#pragma GCC diagnostic pop
// override std defaults with jr2 defaults
bool register_json_rpc2( algo_gate_t *gate )
{
applog(LOG_WARNING,"\nCryptonight algorithm and variants are no longer");
applog(LOG_WARNING,"supported by cpuminer-opt. Shares submitted will");
applog(LOG_WARNING,"likely be rejected. Proceed at your own risk.\n");
gate->wait_for_diff = (void*)&do_nothing;
gate->get_new_work = (void*)&jr2_get_new_work;
gate->get_nonceptr = (void*)&jr2_get_nonceptr;
gate->stratum_gen_work = (void*)&jr2_stratum_gen_work;
gate->build_stratum_request = (void*)&jr2_build_stratum_request;
gate->submit_getwork_result = (void*)&jr2_submit_getwork_result;
gate->longpoll_rpc_call = (void*)&jr2_longpoll_rpc_call;
gate->work_decode = (void*)&jr2_work_decode;
gate->stratum_handle_response = (void*)&jr2_stratum_handle_response;
gate->nonce_index = JR2_NONCE_INDEX;
jsonrpc_2 = true; // still needed
opt_extranonce = false;
// have_gbt = false;
return true;
}
// run the alternate hash function for a specific algo
void exec_hash_function( int algo, void *output, const void *pdata )
{
algo_gate_t gate;
@@ -306,41 +417,34 @@ void exec_hash_function( int algo, void *output, const void *pdata )
const char* const algo_alias_map[][2] =
{
// alias proper
{ "argon2d-crds", "argon2d250" },
{ "argon2d-dyn", "argon2d500" },
{ "argon2d-uis", "argon2d4096" },
{ "bitcore", "timetravel10" },
{ "bitzeny", "yescryptr8" },
{ "blake256r8", "blakecoin" },
{ "blake256r8vnl", "vanilla" },
{ "blake256r14", "blake" },
{ "blake256r14dcr", "decred" },
{ "cryptonote", "cryptonight" },
{ "cryptonight-light", "cryptolight" },
{ "diamond", "dmd-gr" },
{ "droplp", "drop" },
{ "espers", "hmq1725" },
{ "flax", "c11" },
{ "hsr", "x13sm3" },
{ "jackpot", "jha" },
{ "jane", "scryptjane" },
{ "lyra2", "lyra2re" },
{ "lyra2v2", "lyra2rev2" },
{ "lyra2v3", "lyra2rev3" },
{ "lyra2zoin", "lyra2z330" },
{ "myrgr", "myr-gr" },
{ "myriad", "myr-gr" },
{ "neo", "neoscrypt" },
{ "phi", "phi1612" },
// { "sia", "blake2b" },
{ "sib", "x11gost" },
{ "timetravel8", "timetravel" },
{ "ziftr", "zr5" },
{ "yenten", "yescryptr16" },
{ "yescryptr8k", "yescrypt" },
{ "zcoin", "lyra2z" },
{ "zoin", "lyra2z330" },
{ NULL, NULL }
{ "argon2d-dyn", "argon2d500" },
{ "argon2d-uis", "argon2d4096" },
{ "bcd", "x13bcd" },
{ "bitcore", "timetravel10" },
{ "bitzeny", "yescryptr8" },
{ "blake256r8", "blakecoin" },
{ "blake256r8vnl", "vanilla" },
{ "blake256r14", "blake" },
{ "diamond", "dmd-gr" },
{ "espers", "hmq1725" },
{ "flax", "c11" },
{ "hsr", "x13sm3" },
{ "jackpot", "jha" },
{ "lyra2", "lyra2re" },
{ "lyra2v2", "lyra2rev2" },
{ "lyra2v3", "lyra2rev3" },
{ "myrgr", "myr-gr" },
{ "myriad", "myr-gr" },
{ "neo", "neoscrypt" },
{ "phi", "phi1612" },
{ "scryptn2", "scrypt:1048576" },
{ "sib", "x11gost" },
{ "timetravel8", "timetravel" },
{ "veil", "x16rt-veil" },
{ "x16r-hex", "hex" },
{ "yenten", "yescryptr16" },
{ "ziftr", "zr5" },
{ NULL, NULL }
};
// if arg is a valid alias for a known algo it is updated with the proper
@@ -353,7 +457,7 @@ void get_algo_alias( char** algo_or_alias )
if ( !strcasecmp( *algo_or_alias, algo_alias_map[i][ ALIAS ] ) )
{
// found valid alias, return proper name
*algo_or_alias = (char* const)( algo_alias_map[i][ PROPER ] );
*algo_or_alias = (char*)( algo_alias_map[i][ PROPER ] );
return;
}
}
@@ -361,40 +465,3 @@ void get_algo_alias( char** algo_or_alias )
#undef ALIAS
#undef PROPER
bool submit_solution( struct work *work, void *hash,
struct thr_info *thr )
{
work_set_target_ratio( work, hash );
if ( submit_work( thr, work ) )
{
if ( !opt_quiet )
applog( LOG_BLUE, "Share %d submitted by thread %d, job %s.",
accepted_share_count + rejected_share_count + 1,
thr->id, work->job_id );
return true;
}
else
applog( LOG_WARNING, "Failed to submit share." );
return false;
}
bool submit_lane_solution( struct work *work, void *hash,
struct thr_info *thr, int lane )
{
work_set_target_ratio( work, hash );
if ( submit_work( thr, work ) )
{
if ( !opt_quiet )
// applog( LOG_BLUE, "Share %d submitted by thread %d, lane %d.",
// accepted_share_count + rejected_share_count + 1,
// thr->id, lane );
applog( LOG_BLUE, "Share %d submitted by thread %d, lane %d, job %s.",
accepted_share_count + rejected_share_count + 1, thr->id,
lane, work->job_id );
return true;
}
else
applog( LOG_WARNING, "Failed to submit share." );
return false;
}

View File

@@ -1,3 +1,6 @@
#ifndef __ALGO_GATE_API_H__
#define __ALGO_GATE_API_H__ 1
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
@@ -35,7 +38,7 @@
// 6. Determine if other non existant functions are required.
// That is determined by the need to add code in cpu-miner.c
// that applies only to the new algo. That is forbidden. All
// algo specific code must be in theh algo's file.
// algo specific code must be in the algo's file.
//
// 7. If new functions need to be added to the gate add the type
// to the structure, declare a null instance in this file and define
@@ -48,10 +51,10 @@
// instances as they are defined by default, or unsafe functions that
// are not needed by the algo.
//
// 9. Add an case entry to the switch/case in function register_gate
// 9. Add a case entry to the switch/case in function register_gate
// in file algo-gate-api.c for the new algo.
//
// 10 If a new function type was defined add an entry to ini talgo_gate
// 10 If a new function type was defined add an entry to init algo_gate
// to initialize the new function to its null instance described in step 7.
//
// 11. If the new algo has aliases add them to the alias array in
@@ -75,7 +78,7 @@
// my hack at creating a set data type using bit masks. Set inclusion,
// exclusion union and intersection operations are provided for convenience. In // some cases it may be desireable to use boolean algebra directly on the
// data to perfomr set operations. Sets can be represented as single
// data to perform set operations. Sets can be represented as single
// elements, a bitwise OR of multiple elements, a bitwise OR of multiple
// set variables or constants, or combinations of the above.
// Examples:
@@ -85,68 +88,90 @@
typedef uint32_t set_t;
#define EMPTY_SET 0
#define SSE2_OPT 1
#define AES_OPT 2
#define SSE42_OPT 4
#define AVX_OPT 8
#define AVX2_OPT 0x10
#define SHA_OPT 0x20
#define AVX512_OPT 0x40
#define EMPTY_SET 0
#define SSE2_OPT 1 // parity with NEON
#define SSSE3_OPT 1 << 1 // Intel Core2
#define SSE41_OPT 1 << 2
#define SSE42_OPT 1 << 3
#define AVX_OPT 1 << 4 // Intel Sandybridge
#define AVX2_OPT 1 << 5 // Intel Haswell, AMD Zen1
#define AVX512_OPT 1 << 6 // Skylake-X, Zen4 (AVX512[F,VL,DQ,BW])
#define AES_OPT 1 << 7 // Intel Westmere, AArch64
#define VAES_OPT 1 << 8 // Icelake, Zen3
#define SHA256_OPT 1 << 9 // Zen1, Icelake, AArch64
#define SHA512_OPT 1 << 10 // Intel Arrow Lake, AArch64
#define NEON_OPT 1 << 11 // AArch64
#define AVX10_256 1 << 12
#define AVX10_512 1 << 13
// AVX10 does not have explicit algo features:
// AVX10_512 is compatible with AVX512 + VAES
// AVX10_256 is compatible with AVX2 + VAES
// return set containing all elements from sets a & b
inline set_t set_union ( set_t a, set_t b ) { return a | b; }
static inline set_t set_union ( set_t a, set_t b ) { return a | b; }
// return set contained common elements from sets a & b
inline set_t set_intsec ( set_t a, set_t b) { return a & b; }
static inline set_t set_intsec ( set_t a, set_t b) { return a & b; }
// all elements in set a are included in set b
inline bool set_incl ( set_t a, set_t b ) { return (a & b) == a; }
static inline bool set_incl ( set_t a, set_t b ) { return (a & b) == a; }
// no elements in set a are included in set b
inline bool set_excl ( set_t a, set_t b ) { return (a & b) == 0; }
static inline bool set_excl ( set_t a, set_t b ) { return (a & b) == 0; }
typedef struct
{
// special case, only one target, provides a callback for scanhash to
// submit work with less overhead.
// bool (*submit_work ) ( struct thr_info*, const struct work* );
// mandatory functions, must be overwritten
// Added a 5th arg for the thread_info structure to replace the int thr id
// in the first arg. Both will co-exist during the trasition.
//int ( *scanhash ) ( int, struct work*, uint32_t, uint64_t* );
// Mandatory functions, one of these is mandatory. If a generic scanhash
// is used a custom target hash function must be registered, with a custom
// scanhash the target hash function can be called directly and doesn't need
// to be registered with the gate.
int ( *scanhash ) ( struct work*, uint32_t, uint64_t*, struct thr_info* );
// optional unsafe, must be overwritten if algo uses function
void ( *hash ) ( void*, const void*, uint32_t ) ;
void ( *hash_suw ) ( void*, const void* );
int ( *hash ) ( void*, const void*, int );
//optional, safe to use default in most cases
bool ( *miner_thread_init ) ( int );
void ( *stratum_gen_work ) ( struct stratum_ctx*, struct work* );
void ( *get_new_work ) ( struct work*, struct work*, int, uint32_t*,
bool );
uint32_t *( *get_nonceptr ) ( uint32_t* );
void ( *decode_extra_data ) ( struct work*, uint64_t* );
void ( *wait_for_diff ) ( struct stratum_ctx* );
int64_t ( *get_max64 ) ();
bool ( *work_decode ) ( const json_t*, struct work* );
void ( *set_target) ( struct work*, double );
bool ( *submit_getwork_result ) ( CURL*, struct work* );
void ( *gen_merkle_root ) ( char*, struct stratum_ctx* );
void ( *build_extraheader ) ( struct work*, struct stratum_ctx* );
void ( *build_block_header ) ( struct work*, uint32_t, uint32_t*,
uint32_t*, uint32_t, uint32_t );
void ( *build_stratum_request ) ( char*, struct work*, struct stratum_ctx* );
char* ( *malloc_txs_request ) ( struct work* );
void ( *set_work_data_endian ) ( struct work* );
double ( *calc_network_diff ) ( struct work* );
bool ( *ready_to_mine ) ( struct work*, struct stratum_ctx*, int );
void ( *resync_threads ) ( struct work* );
bool ( *do_this_thread ) ( int );
json_t* (*longpoll_rpc_call) ( CURL*, int*, char* );
bool ( *stratum_handle_response )( json_t* );
// Called once by each miner thread to allocate thread local buffers and
// other initialization specific to miner threads.
bool ( *miner_thread_init ) ( int );
// Get thread local copy of blockheader with unique nonce.
void ( *get_new_work ) ( struct work*, struct work*, int, uint32_t* );
// Decode getwork blockheader
bool ( *work_decode ) ( struct work* );
// Extra getwork data
void ( *decode_extra_data ) ( struct work*, uint64_t* );
bool ( *submit_getwork_result ) ( CURL*, struct work* );
void ( *gen_merkle_root ) ( char*, struct stratum_ctx* );
// Increment extranonce
void ( *build_extraheader ) ( struct work*, struct stratum_ctx* );
void ( *build_block_header ) ( struct work*, uint32_t, uint32_t*,
uint32_t*, uint32_t, uint32_t,
unsigned char* );
// Build mining.submit message
void ( *build_stratum_request ) ( char*, struct work*, struct stratum_ctx* );
char* ( *malloc_txs_request ) ( struct work* );
// Big endian or little endian
void ( *set_work_data_endian ) ( struct work* );
// Diverge mining threads
bool ( *do_this_thread ) ( int );
// After do_this_thread
void ( *resync_threads ) ( int, struct work* );
json_t* ( *longpoll_rpc_call ) ( CURL*, int*, char* );
set_t optimizations;
int ( *get_work_data_size ) ();
int ntime_index;
@@ -184,91 +209,96 @@ void four_way_not_tested();
#define STD_WORK_DATA_SIZE 128
#define STD_WORK_CMP_SIZE 76
#define JR2_NONCE_INDEX 39 // 8 bit offset
//#define JR2_NONCE_INDEX 39 // 8 bit offset
// These indexes are only used with JSON RPC2 and are not gated.
#define JR2_WORK_CMP_INDEX_2 43
#define JR2_WORK_CMP_SIZE_2 33
//#define JR2_WORK_CMP_INDEX_2 43
//#define JR2_WORK_CMP_SIZE_2 33
// allways returns failure
// deprecated, use generic instead
int null_scanhash();
// Allow algos to submit from scanhash loop.
bool submit_solution( struct work *work, void *hash,
struct thr_info *thr );
bool submit_lane_solution( struct work *work, void *hash,
struct thr_info *thr, int lane );
// Default generic, may be used in many cases.
// N-way is more complicated, requires many different implementations
// depending on architecture, input format, and output format.
// Naming convention is scanhash_[N]way_[input format]in_[output format]out
// N = number of lanes
// input/output format:
// 32: 32 bit interleaved parallel lanes
// 64: 64 bit interleaved parallel lanes
// 640: input only, not interleaved, contiguous serial 640 bit lanes.
// 256: output only, not interleaved, contiguous serial 256 bit lanes.
bool submit_work( struct thr_info *thr, const struct work *work_in );
int scanhash_generic( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#if defined(__AVX2__)
//int scanhash_4way_64in_64out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_4way_64in_256out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
int scanhash_4way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_8way_32in_32out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
#endif
#if defined(SIMD512)
//int scanhash_8way_64in_64out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_8way_64in_256out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
int scanhash_8way_64in_32out( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
//int scanhash_16way_32in_32out( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done, struct thr_info *mythr );
#endif
// displays warning
void null_hash ();
void null_hash_suw();
int null_hash();
// optional safe targets, default listed first unless noted.
void std_wait_for_diff();
uint32_t *std_get_nonceptr( uint32_t *work_data );
uint32_t *jr2_get_nonceptr( uint32_t *work_data );
void std_get_new_work( struct work *work, struct work *g_work, int thr_id,
uint32_t* end_nonce_ptr, bool clean_job );
void jr2_get_new_work( struct work *work, struct work *g_work, int thr_id,
uint32_t* end_nonce_ptr );
void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *work );
void jr2_stratum_gen_work( struct stratum_ctx *sctx, struct work *work );
void sha256d_gen_merkle_root( char *merkle_root, struct stratum_ctx *sctx );
void SHA256_gen_merkle_root ( char *merkle_root, struct stratum_ctx *sctx );
void sha256_gen_merkle_root ( char *merkle_root, struct stratum_ctx *sctx );
// OpenSSL sha256 deprecated
//void SHA256_gen_merkle_root ( char *merkle_root, struct stratum_ctx *sctx );
// pick your favorite or define your own
int64_t get_max64_0x1fffffLL(); // default
int64_t get_max64_0x40LL();
int64_t get_max64_0x3ffff();
int64_t get_max64_0x3fffffLL();
int64_t get_max64_0x1ffff();
int64_t get_max64_0xffffLL();
void std_set_target( struct work *work, double job_diff );
void alt_set_target( struct work* work, double job_diff );
void scrypt_set_target( struct work *work, double job_diff );
bool std_le_work_decode( const json_t *val, struct work *work );
bool std_be_work_decode( const json_t *val, struct work *work );
bool jr2_work_decode( const json_t *val, struct work *work );
bool std_le_work_decode( struct work *work );
bool std_be_work_decode( struct work *work );
bool std_le_submit_getwork_result( CURL *curl, struct work *work );
bool std_be_submit_getwork_result( CURL *curl, struct work *work );
bool jr2_submit_getwork_result( CURL *curl, struct work *work );
void std_le_build_stratum_request( char *req, struct work *work );
void std_be_build_stratum_request( char *req, struct work *work );
void jr2_build_stratum_request ( char *req, struct work *work );
char* std_malloc_txs_request( struct work *work );
// Default is do_nothing (assumed LE)
// Default is do_nothing, little endian is assumed
void set_work_data_big_endian( struct work *work );
double std_calc_network_diff( struct work *work );
void std_build_block_header( struct work* g_work, uint32_t version,
uint32_t *prevhash, uint32_t *merkle_root,
uint32_t ntime, uint32_t nbits );
uint32_t *prevhash, uint32_t *merkle_root,
uint32_t ntime, uint32_t nbits,
unsigned char *final_sapling_hash );
void std_build_extraheader( struct work *work, struct stratum_ctx *sctx );
json_t* std_longpoll_rpc_call( CURL *curl, int *err, char *lp_url );
json_t* jr2_longpoll_rpc_call( CURL *curl, int *err );
bool std_stratum_handle_response( json_t *val );
bool jr2_stratum_handle_response( json_t *val );
bool std_ready_to_mine( struct work* work, struct stratum_ctx* stratum,
int thr_id );
int std_get_work_data_size();
@@ -278,19 +308,17 @@ int std_get_work_data_size();
// by calling the algo's register function.
bool register_algo_gate( int algo, algo_gate_t *gate );
// Override any default gate functions that are applicable and do any other
// algo-specific initialization.
// Called by algos to verride any default gate functions that are applicable
// and do any other algo-specific initialization.
// The register functions for all the algos can be declared here to reduce
// compiler warnings but that's just more work for devs adding new algos.
bool register_algo( algo_gate_t *gate );
// Overrides a common set of functions used by RPC2 and other RPC2-specific
// init. Called by algo's register function before initializing algo-specific
// functions and data.
bool register_json_rpc2( algo_gate_t *gate );
// use this to call the hash function of an algo directly, ie util.c test.
void exec_hash_function( int algo, void *output, const void *pdata );
void get_algo_alias( char** algo_or_alias );
// Validate a string as a known algo and alias, updates arg to proper
// algo name if valid alias, NULL if invalid alias or algo.
void get_algo_alias( char **algo_or_alias );
#endif

View File

@@ -1,249 +0,0 @@
/*
scrypt-jane by Andrew M, https://github.com/floodyberry/scrypt-jane
Public Domain or MIT License, whichever is easier
*/
#include <string.h>
#if defined( _WINDOWS )
#if !defined( QT_GUI )
extern "C" {
#endif
#endif
#include "ar2-scrypt-jane.h"
#include "sj/scrypt-jane-portable.h"
#include "sj/scrypt-jane-hash.h"
#include "sj/scrypt-jane-romix.h"
#include "sj/scrypt-jane-test-vectors.h"
#define scrypt_maxNfactor 30 /* (1 << (30 + 1)) = ~2 billion */
#if (SCRYPT_BLOCK_BYTES == 64)
#define scrypt_r_32kb 8 /* (1 << 8) = 256 * 2 blocks in a chunk * 64 bytes = Max of 32kb in a chunk */
#elif (SCRYPT_BLOCK_BYTES == 128)
#define scrypt_r_32kb 7 /* (1 << 7) = 128 * 2 blocks in a chunk * 128 bytes = Max of 32kb in a chunk */
#elif (SCRYPT_BLOCK_BYTES == 256)
#define scrypt_r_32kb 6 /* (1 << 6) = 64 * 2 blocks in a chunk * 256 bytes = Max of 32kb in a chunk */
#elif (SCRYPT_BLOCK_BYTES == 512)
#define scrypt_r_32kb 5 /* (1 << 5) = 32 * 2 blocks in a chunk * 512 bytes = Max of 32kb in a chunk */
#endif
#define scrypt_maxrfactor scrypt_r_32kb /* 32kb */
#define scrypt_maxpfactor 25 /* (1 << 25) = ~33 million */
#include <stdio.h>
//#include <malloc.h>
static void NORETURN
scrypt_fatal_error_default(const char *msg) {
fprintf(stderr, "%s\n", msg);
exit(1);
}
static scrypt_fatal_errorfn scrypt_fatal_error = scrypt_fatal_error_default;
void scrypt_set_fatal_error(scrypt_fatal_errorfn fn) {
scrypt_fatal_error = fn;
}
static int scrypt_power_on_self_test(void)
{
const scrypt_test_setting *t;
uint8_t test_digest[64];
uint32_t i;
int res = 7, scrypt_valid;
if (!scrypt_test_mix()) {
#if !defined(SCRYPT_TEST)
scrypt_fatal_error("scrypt: mix function power-on-self-test failed");
#endif
res &= ~1;
}
if (!scrypt_test_hash()) {
#if !defined(SCRYPT_TEST)
scrypt_fatal_error("scrypt: hash function power-on-self-test failed");
#endif
res &= ~2;
}
for (i = 0, scrypt_valid = 1; post_settings[i].pw; i++) {
t = post_settings + i;
scrypt((uint8_t *)t->pw, strlen(t->pw), (uint8_t *)t->salt, strlen(t->salt), t->Nfactor, t->rfactor, t->pfactor, test_digest, sizeof(test_digest));
scrypt_valid &= scrypt_verify(post_vectors[i], test_digest, sizeof(test_digest));
}
if (!scrypt_valid) {
#if !defined(SCRYPT_TEST)
scrypt_fatal_error("scrypt: scrypt power-on-self-test failed");
#endif
res &= ~4;
}
return res;
}
typedef struct scrypt_aligned_alloc_t {
uint8_t *mem, *ptr;
} scrypt_aligned_alloc;
#ifdef SCRYPT_TEST_SPEED
static uint8_t *mem_base = (uint8_t *)0;
static size_t mem_bump = 0;
/* allocations are assumed to be multiples of 64 bytes and total allocations not to exceed ~1.01gb */
static scrypt_aligned_alloc scrypt_alloc(uint64_t size)
{
scrypt_aligned_alloc aa;
if (!mem_base) {
mem_base = (uint8_t *)malloc((1024 * 1024 * 1024) + (1024 * 1024) + (SCRYPT_BLOCK_BYTES - 1));
if (!mem_base)
scrypt_fatal_error("scrypt: out of memory");
mem_base = (uint8_t *)(((size_t)mem_base + (SCRYPT_BLOCK_BYTES - 1)) & ~(SCRYPT_BLOCK_BYTES - 1));
}
aa.mem = mem_base + mem_bump;
aa.ptr = aa.mem;
mem_bump += (size_t)size;
return aa;
}
static void scrypt_free(scrypt_aligned_alloc *aa) {
mem_bump = 0;
}
#else
static scrypt_aligned_alloc scrypt_alloc(uint64_t size)
{
static const size_t max_alloc = (size_t)-1;
scrypt_aligned_alloc aa;
size += (SCRYPT_BLOCK_BYTES - 1);
if (size > max_alloc)
scrypt_fatal_error("scrypt: not enough address space on this CPU to allocate required memory");
aa.mem = (uint8_t *)malloc((size_t)size);
aa.ptr = (uint8_t *)(((size_t)aa.mem + (SCRYPT_BLOCK_BYTES - 1)) & ~(SCRYPT_BLOCK_BYTES - 1));
if (!aa.mem)
scrypt_fatal_error("scrypt: out of memory");
return aa;
}
static void scrypt_free(scrypt_aligned_alloc *aa)
{
free(aa->mem);
}
#endif /* SCRYPT_TEST_SPEED */
void scrypt(const uint8_t *password, size_t password_len, const uint8_t *salt, size_t salt_len,
uint8_t Nfactor, uint8_t rfactor, uint8_t pfactor, uint8_t *out, size_t bytes)
{
scrypt_aligned_alloc YX, V;
uint8_t *X, *Y;
uint32_t N, r, p, chunk_bytes, i;
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
scrypt_ROMixfn scrypt_ROMix = scrypt_getROMix();
#endif
#if !defined(SCRYPT_TEST)
static int power_on_self_test = 0;
if (!power_on_self_test) {
power_on_self_test = 1;
if (!scrypt_power_on_self_test())
scrypt_fatal_error("scrypt: power on self test failed");
}
#endif
if (Nfactor > scrypt_maxNfactor)
scrypt_fatal_error("scrypt: N out of range");
if (rfactor > scrypt_maxrfactor)
scrypt_fatal_error("scrypt: r out of range");
if (pfactor > scrypt_maxpfactor)
scrypt_fatal_error("scrypt: p out of range");
N = (1 << (Nfactor + 1));
r = (1 << rfactor);
p = (1 << pfactor);
chunk_bytes = SCRYPT_BLOCK_BYTES * r * 2;
V = scrypt_alloc((uint64_t)N * chunk_bytes);
YX = scrypt_alloc((p + 1) * chunk_bytes);
/* 1: X = PBKDF2(password, salt) */
Y = YX.ptr;
X = Y + chunk_bytes;
scrypt_pbkdf2(password, password_len, salt, salt_len, 1, X, chunk_bytes * p);
/* 2: X = ROMix(X) */
for (i = 0; i < p; i++)
scrypt_ROMix((scrypt_mix_word_t *)(X + (chunk_bytes * i)), (scrypt_mix_word_t *)Y, (scrypt_mix_word_t *)V.ptr, N, r);
/* 3: Out = PBKDF2(password, X) */
scrypt_pbkdf2(password, password_len, X, chunk_bytes * p, 1, out, bytes);
scrypt_ensure_zero(YX.ptr, (p + 1) * chunk_bytes);
scrypt_free(&V);
scrypt_free(&YX);
}
#define Nfactor 8
#define rfactor 0
#define pfactor 0
#if (SCRYPT_BLOCK_BYTES == 64)
#define chunk_bytes 128
#elif (SCRYPT_BLOCK_BYTES == 128)
#define chunk_bytes 256
#elif (SCRYPT_BLOCK_BYTES == 256)
#define chunk_bytes 512
#elif (SCRYPT_BLOCK_BYTES == 512)
#define chunk_bytes 1024
#endif
void my_scrypt(const uint8_t *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint8_t *out)
{
scrypt_aligned_alloc YX, V;
uint8_t *X, *Y;
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
scrypt_ROMixfn scrypt_ROMix = scrypt_getROMix();
#endif
/*
#if !defined(SCRYPT_TEST)
static int power_on_self_test = 0;
if (!power_on_self_test) {
power_on_self_test = 1;
if (!scrypt_power_on_self_test())
scrypt_fatal_error("scrypt: power on self test failed");
}
#endif
*/
V = scrypt_alloc((uint64_t)512 * chunk_bytes);
YX = scrypt_alloc(2 * chunk_bytes);
/* 1: X = PBKDF2(password, salt) */
Y = YX.ptr;
X = Y + chunk_bytes;
scrypt_pbkdf2(password, password_len, salt, salt_len, 1, X, chunk_bytes);
/* 2: X = ROMix(X) */
scrypt_ROMix((scrypt_mix_word_t *)X, (scrypt_mix_word_t *)Y, (scrypt_mix_word_t *)V.ptr, 512, 1);
/* 3: Out = PBKDF2(password, X) */
scrypt_pbkdf2(password, password_len, X, chunk_bytes, 1, out, 32);
scrypt_ensure_zero(YX.ptr, 2 * chunk_bytes);
scrypt_free(&V);
scrypt_free(&YX);
}
#if defined( _WINDOWS )
#if !defined( QT_GUI )
} /* extern "C" */
#endif
#endif

View File

@@ -1,35 +0,0 @@
#ifndef AR2_SCRYPT_JANE_H
#define AR2_SCRYPT_JANE_H
#ifdef _MSC_VER
#undef SCRYPT_CHOOSE_COMPILETIME
#endif
//#define SCRYPT_TEST
#define SCRYPT_SKEIN512
#define SCRYPT_SALSA64
/*
Nfactor: Increases CPU & Memory Hardness
N = (1 << (Nfactor + 1)): How many times to mix a chunk and how many temporary chunks are used
rfactor: Increases Memory Hardness
r = (1 << rfactor): How large a chunk is
pfactor: Increases CPU Hardness
p = (1 << pfactor): Number of times to mix the main chunk
A block is the basic mixing unit (salsa/chacha block = 64 bytes)
A chunk is (2 * r) blocks
~Memory used = (N + 2) * ((2 * r) * block size)
*/
#include <stdlib.h>
#include <stdint.h>
typedef void (*scrypt_fatal_errorfn)(const char *msg);
void scrypt_set_fatal_error(scrypt_fatal_errorfn fn);
void scrypt(const unsigned char *password, size_t password_len, const unsigned char *salt, size_t salt_len, unsigned char Nfactor, unsigned char rfactor, unsigned char pfactor, unsigned char *out, size_t bytes);
void my_scrypt(const uint8_t *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint8_t *out);
#endif /* AR2_SCRYPT_JANE_H */

View File

@@ -1,284 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include "argon2.h"
#include "cores.h"
/* Error messages */
static const char *Argon2_ErrorMessage[] = {
/*{ARGON2_OK, */ "OK",
/*},
{ARGON2_OUTPUT_PTR_NULL, */ "Output pointer is NULL",
/*},
{ARGON2_OUTPUT_TOO_SHORT, */ "Output is too short",
/*},
{ARGON2_OUTPUT_TOO_LONG, */ "Output is too long",
/*},
{ARGON2_PWD_TOO_SHORT, */ "Password is too short",
/*},
{ARGON2_PWD_TOO_LONG, */ "Password is too long",
/*},
{ARGON2_SALT_TOO_SHORT, */ "Salt is too short",
/*},
{ARGON2_SALT_TOO_LONG, */ "Salt is too long",
/*},
{ARGON2_AD_TOO_SHORT, */ "Associated data is too short",
/*},
{ARGON2_AD_TOO_LONG, */ "Associated date is too long",
/*},
{ARGON2_SECRET_TOO_SHORT, */ "Secret is too short",
/*},
{ARGON2_SECRET_TOO_LONG, */ "Secret is too long",
/*},
{ARGON2_TIME_TOO_SMALL, */ "Time cost is too small",
/*},
{ARGON2_TIME_TOO_LARGE, */ "Time cost is too large",
/*},
{ARGON2_MEMORY_TOO_LITTLE, */ "Memory cost is too small",
/*},
{ARGON2_MEMORY_TOO_MUCH, */ "Memory cost is too large",
/*},
{ARGON2_LANES_TOO_FEW, */ "Too few lanes",
/*},
{ARGON2_LANES_TOO_MANY, */ "Too many lanes",
/*},
{ARGON2_PWD_PTR_MISMATCH, */ "Password pointer is NULL, but password length is not 0",
/*},
{ARGON2_SALT_PTR_MISMATCH, */ "Salt pointer is NULL, but salt length is not 0",
/*},
{ARGON2_SECRET_PTR_MISMATCH, */ "Secret pointer is NULL, but secret length is not 0",
/*},
{ARGON2_AD_PTR_MISMATCH, */ "Associated data pointer is NULL, but ad length is not 0",
/*},
{ARGON2_MEMORY_ALLOCATION_ERROR, */ "Memory allocation error",
/*},
{ARGON2_FREE_MEMORY_CBK_NULL, */ "The free memory callback is NULL",
/*},
{ARGON2_ALLOCATE_MEMORY_CBK_NULL, */ "The allocate memory callback is NULL",
/*},
{ARGON2_INCORRECT_PARAMETER, */ "Argon2_Context context is NULL",
/*},
{ARGON2_INCORRECT_TYPE, */ "There is no such version of Argon2",
/*},
{ARGON2_OUT_PTR_MISMATCH, */ "Output pointer mismatch",
/*},
{ARGON2_THREADS_TOO_FEW, */ "Not enough threads",
/*},
{ARGON2_THREADS_TOO_MANY, */ "Too many threads",
/*},
{ARGON2_MISSING_ARGS, */ "Missing arguments", /*},*/
};
int argon2d(argon2_context *context) { return ar2_argon2_core(context, Argon2_d); }
int argon2i(argon2_context *context) { return ar2_argon2_core(context, Argon2_i); }
int ar2_verify_d(argon2_context *context, const char *hash)
{
int result;
/*if (0 == context->outlen || NULL == hash) {
return ARGON2_OUT_PTR_MISMATCH;
}*/
result = ar2_argon2_core(context, Argon2_d);
if (ARGON2_OK != result) {
return result;
}
return 0 == memcmp(hash, context->out, 32);
}
const char *error_message(int error_code)
{
enum {
/* Make sure---at compile time---that the enum size matches the array
size */
ERROR_STRING_CHECK =
1 /
!!((sizeof(Argon2_ErrorMessage) / sizeof(Argon2_ErrorMessage[0])) ==
ARGON2_ERROR_CODES_LENGTH)
};
if (error_code < ARGON2_ERROR_CODES_LENGTH) {
return Argon2_ErrorMessage[(argon2_error_codes)error_code];
}
return "Unknown error code.";
}
/* encoding/decoding helpers */
/*
* Some macros for constant-time comparisons. These work over values in
* the 0..255 range. Returned value is 0x00 on "false", 0xFF on "true".
*/
#define EQ(x, y) ((((0U - ((unsigned)(x) ^ (unsigned)(y))) >> 8) & 0xFF) ^ 0xFF)
#define GT(x, y) ((((unsigned)(y) - (unsigned)(x)) >> 8) & 0xFF)
#define GE(x, y) (GT(y, x) ^ 0xFF)
#define LT(x, y) GT(y, x)
#define LE(x, y) GE(y, x)
/*
* Convert value x (0..63) to corresponding Base64 character.
*/
static int b64_byte_to_char(unsigned x) {
//static inline int b64_byte_to_char(unsigned x) {
return (LT(x, 26) & (x + 'A')) |
(GE(x, 26) & LT(x, 52) & (x + ('a' - 26))) |
(GE(x, 52) & LT(x, 62) & (x + ('0' - 52))) | (EQ(x, 62) & '+') |
(EQ(x, 63) & '/');
}
/*
* Convert some bytes to Base64. 'dst_len' is the length (in characters)
* of the output buffer 'dst'; if that buffer is not large enough to
* receive the result (including the terminating 0), then (size_t)-1
* is returned. Otherwise, the zero-terminated Base64 string is written
* in the buffer, and the output length (counted WITHOUT the terminating
* zero) is returned.
*/
static size_t to_base64(char *dst, size_t dst_len, const void *src)
{
size_t olen;
const unsigned char *buf;
unsigned acc, acc_len;
olen = 43;
/*switch (32 % 3) {
case 2:
olen++;*/
/* fall through */
/*case 1:
olen += 2;
break;
}*/
if (dst_len <= olen) {
return (size_t)-1;
}
acc = 0;
acc_len = 0;
buf = (const unsigned char *)src;
size_t src_len = 32;
while (src_len-- > 0) {
acc = (acc << 8) + (*buf++);
acc_len += 8;
while (acc_len >= 6) {
acc_len -= 6;
*dst++ = b64_byte_to_char((acc >> acc_len) & 0x3F);
}
}
if (acc_len > 0) {
*dst++ = b64_byte_to_char((acc << (6 - acc_len)) & 0x3F);
}
*dst++ = 0;
return olen;
}
/* ==================================================================== */
/*
* Code specific to Argon2i.
*
* The code below applies the following format:
*
* $argon2i$m=<num>,t=<num>,p=<num>[,keyid=<bin>][,data=<bin>][$<bin>[$<bin>]]
*
* where <num> is a decimal integer (positive, fits in an 'unsigned long')
* and <bin> is Base64-encoded data (no '=' padding characters, no newline
* or whitespace). The "keyid" is a binary identifier for a key (up to 8
* bytes); "data" is associated data (up to 32 bytes). When the 'keyid'
* (resp. the 'data') is empty, then it is ommitted from the output.
*
* The last two binary chunks (encoded in Base64) are, in that order,
* the salt and the output. Both are optional, but you cannot have an
* output without a salt. The binary salt length is between 8 and 48 bytes.
* The output length is always exactly 32 bytes.
*/
int ar2_encode_string(char *dst, size_t dst_len, argon2_context *ctx)
{
#define SS(str) \
do { \
size_t pp_len = strlen(str); \
if (pp_len >= dst_len) { \
return 0; \
} \
memcpy(dst, str, pp_len + 1); \
dst += pp_len; \
dst_len -= pp_len; \
} while (0)
#define SX(x) \
do { \
char tmp[30]; \
sprintf(tmp, "%lu", (unsigned long)(x)); \
SS(tmp); \
} while (0);
#define SB(buf) \
do { \
size_t sb_len = to_base64(dst, dst_len, buf); \
if (sb_len == (size_t)-1) { \
return 0; \
} \
dst += sb_len; \
dst_len -= sb_len; \
} while (0);
SS("$argon2i$m=");
SX(16);
SS(",t=");
SX(2);
SS(",p=");
SX(1);
/*if (ctx->adlen > 0) {
SS(",data=");
SB(ctx->ad, ctx->adlen);
}*/
/*if (ctx->saltlen == 0)
return 1;*/
SS("$");
SB(ctx->salt);
/*if (ctx->outlen32 == 0)
return 1;*/
SS("$");
SB(ctx->out);
return 1;
#undef SS
#undef SX
#undef SB
}

View File

@@ -1,292 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#ifndef ARGON2_H
#define ARGON2_H
#include <stdint.h>
#include <stddef.h>
#include <limits.h>
#if defined(__cplusplus)
extern "C" {
#endif
/*************************Argon2 input parameter
* restrictions**************************************************/
/* Minimum and maximum number of lanes (degree of parallelism) */
#define ARGON2_MIN_LANES UINT32_C(1)
#define ARGON2_MAX_LANES UINT32_C(0xFFFFFF)
/* Minimum and maximum number of threads */
#define ARGON2_MIN_THREADS UINT32_C(1)
#define ARGON2_MAX_THREADS UINT32_C(0xFFFFFF)
/* Number of synchronization points between lanes per pass */
#define ARGON2_SYNC_POINTS UINT32_C(4)
/* Minimum and maximum digest size in bytes */
#define ARGON2_MIN_OUTLEN UINT32_C(4)
#define ARGON2_MAX_OUTLEN UINT32_C(0xFFFFFFFF)
/* Minimum and maximum number of memory blocks (each of BLOCK_SIZE bytes) */
#define ARGON2_MIN_MEMORY (2 * ARGON2_SYNC_POINTS) /* 2 blocks per slice */
#define ARGON2_MIN(a, b) ((a) < (b) ? (a) : (b))
/* Max memory size is half the addressing space, topping at 2^32 blocks (4 TB)
*/
#define ARGON2_MAX_MEMORY_BITS \
ARGON2_MIN(UINT32_C(32), (sizeof(void *) * CHAR_BIT - 10 - 1))
#define ARGON2_MAX_MEMORY \
ARGON2_MIN(UINT32_C(0xFFFFFFFF), UINT64_C(1) << ARGON2_MAX_MEMORY_BITS)
/* Minimum and maximum number of passes */
#define ARGON2_MIN_TIME UINT32_C(1)
#define ARGON2_MAX_TIME UINT32_C(0xFFFFFFFF)
/* Minimum and maximum password length in bytes */
#define ARGON2_MIN_PWD_LENGTH UINT32_C(0)
#define ARGON2_MAX_PWD_LENGTH UINT32_C(0xFFFFFFFF)
/* Minimum and maximum associated data length in bytes */
#define ARGON2_MIN_AD_LENGTH UINT32_C(0)
#define ARGON2_MAX_AD_LENGTH UINT32_C(0xFFFFFFFF)
/* Minimum and maximum salt length in bytes */
#define ARGON2_MIN_SALT_LENGTH UINT32_C(8)
#define ARGON2_MAX_SALT_LENGTH UINT32_C(0xFFFFFFFF)
/* Minimum and maximum key length in bytes */
#define ARGON2_MIN_SECRET UINT32_C(0)
#define ARGON2_MAX_SECRET UINT32_C(0xFFFFFFFF)
#define ARGON2_FLAG_CLEAR_PASSWORD (UINT32_C(1) << 0)
#define ARGON2_FLAG_CLEAR_SECRET (UINT32_C(1) << 1)
#define ARGON2_FLAG_CLEAR_MEMORY (UINT32_C(1) << 2)
#define ARGON2_DEFAULT_FLAGS \
(ARGON2_FLAG_CLEAR_PASSWORD | ARGON2_FLAG_CLEAR_MEMORY)
/* Error codes */
typedef enum Argon2_ErrorCodes {
ARGON2_OK = 0,
ARGON2_OUTPUT_PTR_NULL = 1,
ARGON2_OUTPUT_TOO_SHORT = 2,
ARGON2_OUTPUT_TOO_LONG = 3,
ARGON2_PWD_TOO_SHORT = 4,
ARGON2_PWD_TOO_LONG = 5,
ARGON2_SALT_TOO_SHORT = 6,
ARGON2_SALT_TOO_LONG = 7,
ARGON2_AD_TOO_SHORT = 8,
ARGON2_AD_TOO_LONG = 9,
ARGON2_SECRET_TOO_SHORT = 10,
ARGON2_SECRET_TOO_LONG = 11,
ARGON2_TIME_TOO_SMALL = 12,
ARGON2_TIME_TOO_LARGE = 13,
ARGON2_MEMORY_TOO_LITTLE = 14,
ARGON2_MEMORY_TOO_MUCH = 15,
ARGON2_LANES_TOO_FEW = 16,
ARGON2_LANES_TOO_MANY = 17,
ARGON2_PWD_PTR_MISMATCH = 18, /* NULL ptr with non-zero length */
ARGON2_SALT_PTR_MISMATCH = 19, /* NULL ptr with non-zero length */
ARGON2_SECRET_PTR_MISMATCH = 20, /* NULL ptr with non-zero length */
ARGON2_AD_PTR_MISMATCH = 21, /* NULL ptr with non-zero length */
ARGON2_MEMORY_ALLOCATION_ERROR = 22,
ARGON2_FREE_MEMORY_CBK_NULL = 23,
ARGON2_ALLOCATE_MEMORY_CBK_NULL = 24,
ARGON2_INCORRECT_PARAMETER = 25,
ARGON2_INCORRECT_TYPE = 26,
ARGON2_OUT_PTR_MISMATCH = 27,
ARGON2_THREADS_TOO_FEW = 28,
ARGON2_THREADS_TOO_MANY = 29,
ARGON2_MISSING_ARGS = 30,
ARGON2_ERROR_CODES_LENGTH /* Do NOT remove; Do NOT add error codes after
this
error code */
} argon2_error_codes;
/* Memory allocator types --- for external allocation */
typedef int (*allocate_fptr)(uint8_t **memory, size_t bytes_to_allocate);
typedef void (*deallocate_fptr)(uint8_t *memory, size_t bytes_to_allocate);
/* Argon2 external data structures */
/*
*****Context: structure to hold Argon2 inputs:
* output array and its length,
* password and its length,
* salt and its length,
* secret and its length,
* associated data and its length,
* number of passes, amount of used memory (in KBytes, can be rounded up a bit)
* number of parallel threads that will be run.
* All the parameters above affect the output hash value.
* Additionally, two function pointers can be provided to allocate and
deallocate the memory (if NULL, memory will be allocated internally).
* Also, three flags indicate whether to erase password, secret as soon as they
are pre-hashed (and thus not needed anymore), and the entire memory
****************************
Simplest situation: you have output array out[8], password is stored in
pwd[32], salt is stored in salt[16], you do not have keys nor associated data.
You need to spend 1 GB of RAM and you run 5 passes of Argon2d with 4 parallel
lanes.
You want to erase the password, but you're OK with last pass not being erased.
You want to use the default memory allocator.
*/
typedef struct Argon2_Context {
uint8_t *out; /* output array */
uint8_t *pwd; /* password array */
uint8_t *salt; /* salt array */
/*uint8_t *secret;*/ /* key array */
/*uint8_t *ad;*/ /* associated data array */
allocate_fptr allocate_cbk; /* pointer to memory allocator */
deallocate_fptr free_cbk; /* pointer to memory deallocator */
/*uint32_t outlen;*/ /* digest length */
uint32_t pwdlen; /* password length */
/*uint32_t saltlen;*/ /* salt length */
/*uint32_t secretlen;*/ /* key length */
/*uint32_t adlen;*/ /* associated data length */
/*uint32_t t_cost;*/ /* number of passes */
/*uint32_t m_cost;*/ /* amount of memory requested (KB) */
/*uint32_t lanes;*/ /* number of lanes */
/*uint32_t threads;*/ /* maximum number of threads */
/*uint32_t flags;*/ /* array of bool options */
} argon2_context;
/**
* Function to hash the inputs in the memory-hard fashion (uses Argon2i)
* @param out Pointer to the memory where the hash digest will be written
* @param outlen Digest length in bytes
* @param in Pointer to the input (password)
* @param inlen Input length in bytes
* @param salt Pointer to the salt
* @param saltlen Salt length in bytes
* @pre @a out must have at least @a outlen bytes allocated
* @pre @a in must be at least @inlen bytes long
* @pre @a saltlen must be at least @saltlen bytes long
* @return Zero if successful, 1 otherwise.
*/
/*int hash_argon2i(void *out, size_t outlen, const void *in, size_t inlen,
const void *salt, size_t saltlen, unsigned int t_cost,
unsigned int m_cost);*/
/* same for argon2d */
/*int hash_argon2d(void *out, size_t outlen, const void *in, size_t inlen,
const void *salt, size_t saltlen, unsigned int t_cost,
unsigned int m_cost);*/
/*
* **************Argon2d: Version of Argon2 that picks memory blocks depending
* on the password and salt. Only for side-channel-free
* environment!!***************
* @param context Pointer to current Argon2 context
* @return Zero if successful, a non zero error code otherwise
*/
int argon2d(argon2_context *context);
/*
* * **************Argon2i: Version of Argon2 that picks memory blocks
*independent on the password and salt. Good for side-channels,
******************* but worse w.r.t. tradeoff attacks if
*******************only one pass is used***************
* @param context Pointer to current Argon2 context
* @return Zero if successful, a non zero error code otherwise
*/
int argon2i(argon2_context *context);
/*
* * **************Argon2di: Reserved name***************
* @param context Pointer to current Argon2 context
* @return Zero if successful, a non zero error code otherwise
*/
int argon2di(argon2_context *context);
/*
* * **************Argon2ds: Argon2d hardened against GPU attacks, 20%
* slower***************
* @param context Pointer to current Argon2 context
* @return Zero if successful, a non zero error code otherwise
*/
int argon2ds(argon2_context *context);
/*
* * **************Argon2id: First half-pass over memory is
*password-independent, the rest are password-dependent
********************OK against side channels: they reduce to 1/2-pass
*Argon2i***************
* @param context Pointer to current Argon2 context
* @return Zero if successful, a non zero error code otherwise
*/
int argon2id(argon2_context *context);
/*
* Verify if a given password is correct for Argon2d hashing
* @param context Pointer to current Argon2 context
* @param hash The password hash to verify. The length of the hash is
* specified by the context outlen member
* @return Zero if successful, a non zero error code otherwise
*/
int ar2_verify_d(argon2_context *context, const char *hash);
/*
* Get the associated error message for given error code
* @return The error message associated with the given error code
*/
const char *error_message(int error_code);
/* ==================================================================== */
/*
* Code specific to Argon2i.
*
* The code below applies the following format:
*
* $argon2i$m=<num>,t=<num>,p=<num>[,keyid=<bin>][,data=<bin>][$<bin>[$<bin>]]
*
* where <num> is a decimal integer (positive, fits in an 'unsigned long')
* and <bin> is Base64-encoded data (no '=' padding characters, no newline
* or whitespace). The "keyid" is a binary identifier for a key (up to 8
* bytes); "data" is associated data (up to 32 bytes). When the 'keyid'
* (resp. the 'data') is empty, then it is ommitted from the output.
*
* The last two binary chunks (encoded in Base64) are, in that order,
* the salt and the output. Both are optional, but you cannot have an
* output without a salt. The binary salt length is between 8 and 48 bytes.
* The output length is always exactly 32 bytes.
*/
int ar2_encode_string(char *dst, size_t dst_len, argon2_context *ctx);
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -1,114 +0,0 @@
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#ifdef _MSC_VER
#include <intrin.h>
#endif
#include "argon2.h"
static uint64_t rdtsc(void)
{
#ifdef _MSC_VER
return __rdtsc();
#else
#if defined(__amd64__) || defined(__x86_64__)
uint64_t rax, rdx;
__asm__ __volatile__("rdtsc" : "=a"(rax), "=d"(rdx) : :);
return (rdx << 32) | rax;
#elif defined(__i386__) || defined(__i386) || defined(__X86__)
uint64_t rax;
__asm__ __volatile__("rdtsc" : "=A"(rax) : :);
return rax;
#else
#error "Not implemented!"
#endif
#endif
}
/*
* Benchmarks Argon2 with salt length 16, password length 16, t_cost 1,
and different m_cost and threads
*/
static void benchmark()
{
#define BENCH_OUTLEN 16
#define BENCH_INLEN 16
const uint32_t inlen = BENCH_INLEN;
const unsigned outlen = BENCH_OUTLEN;
unsigned char out[BENCH_OUTLEN];
unsigned char pwd_array[BENCH_INLEN];
unsigned char salt_array[BENCH_INLEN];
#undef BENCH_INLEN
#undef BENCH_OUTLEN
uint32_t t_cost = 1;
uint32_t m_cost;
uint32_t thread_test[6] = {1, 2, 4, 6, 8, 16};
memset(pwd_array, 0, inlen);
memset(salt_array, 1, inlen);
for (m_cost = (uint32_t)1 << 10; m_cost <= (uint32_t)1 << 22; m_cost *= 2) {
unsigned i;
for (i = 0; i < 6; ++i) {
argon2_context context;
uint32_t thread_n = thread_test[i];
uint64_t stop_cycles, stop_cycles_i;
clock_t stop_time;
uint64_t delta_d, delta_i;
double mcycles_d, mcycles_i, run_time;
clock_t start_time = clock();
uint64_t start_cycles = rdtsc();
context.out = out;
context.outlen = outlen;
context.pwd = pwd_array;
context.pwdlen = inlen;
context.salt = salt_array;
context.saltlen = inlen;
context.secret = NULL;
context.secretlen = 0;
context.ad = NULL;
context.adlen = 0;
context.t_cost = t_cost;
context.m_cost = m_cost;
context.lanes = thread_n;
context.threads = thread_n;
context.allocate_cbk = NULL;
context.free_cbk = NULL;
context.flags = 0;
argon2d(&context);
stop_cycles = rdtsc();
argon2i(&context);
stop_cycles_i = rdtsc();
stop_time = clock();
delta_d = (stop_cycles - start_cycles) / (m_cost);
delta_i = (stop_cycles_i - stop_cycles) / (m_cost);
mcycles_d = (double)(stop_cycles - start_cycles) / (1UL << 20);
mcycles_i = (double)(stop_cycles_i - stop_cycles) / (1UL << 20);
printf("Argon2d %d iterations %d MiB %d threads: %2.2f cpb %2.2f "
"Mcycles \n",
t_cost, m_cost >> 10, thread_n, (float)delta_d / 1024,
mcycles_d);
printf("Argon2i %d iterations %d MiB %d threads: %2.2f cpb %2.2f "
"Mcycles \n",
t_cost, m_cost >> 10, thread_n, (float)delta_i / 1024,
mcycles_i);
run_time = ((double)stop_time - start_time) / (CLOCKS_PER_SEC);
printf("%2.4f seconds\n\n", run_time);
}
}
}
int main()
{
benchmark();
return ARGON2_OK;
}

View File

@@ -1,143 +0,0 @@
#ifndef PORTABLE_BLAKE2_IMPL_H
#define PORTABLE_BLAKE2_IMPL_H
#include <stdint.h>
#include <string.h>
#if defined(_MSC_VER)
#define BLAKE2_INLINE __inline
#elif defined(__GNUC__) || defined(__clang__)
#define BLAKE2_INLINE __inline__
#else
#define BLAKE2_INLINE
#endif
/* Argon2 Team - Begin Code */
/*
Not an exhaustive list, but should cover the majority of modern platforms
Additionally, the code will always be correct---this is only a performance
tweak.
*/
#if (defined(__BYTE_ORDER__) && \
(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || defined(__MIPSEL__) || \
defined(__AARCH64EL__) || defined(__amd64__) || defined(__i386__) || \
defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64) || \
defined(_M_ARM)
#define NATIVE_LITTLE_ENDIAN
#endif
/* Argon2 Team - End Code */
static BLAKE2_INLINE uint32_t load32(const void *src) {
#if defined(NATIVE_LITTLE_ENDIAN)
uint32_t w;
memcpy(&w, src, sizeof w);
return w;
#else
const uint8_t *p = (const uint8_t *)src;
uint32_t w = *p++;
w |= (uint32_t)(*p++) << 8;
w |= (uint32_t)(*p++) << 16;
w |= (uint32_t)(*p++) << 24;
return w;
#endif
}
static BLAKE2_INLINE uint64_t load64(const void *src) {
#if defined(NATIVE_LITTLE_ENDIAN)
uint64_t w;
memcpy(&w, src, sizeof w);
return w;
#else
const uint8_t *p = (const uint8_t *)src;
uint64_t w = *p++;
w |= (uint64_t)(*p++) << 8;
w |= (uint64_t)(*p++) << 16;
w |= (uint64_t)(*p++) << 24;
w |= (uint64_t)(*p++) << 32;
w |= (uint64_t)(*p++) << 40;
w |= (uint64_t)(*p++) << 48;
w |= (uint64_t)(*p++) << 56;
return w;
#endif
}
static BLAKE2_INLINE void store32(void *dst, uint32_t w) {
#if defined(NATIVE_LITTLE_ENDIAN)
memcpy(dst, &w, sizeof w);
#else
uint8_t *p = (uint8_t *)dst;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
#endif
}
static BLAKE2_INLINE void store64(void *dst, uint64_t w) {
#if defined(NATIVE_LITTLE_ENDIAN)
memcpy(dst, &w, sizeof w);
#else
uint8_t *p = (uint8_t *)dst;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
#endif
}
static BLAKE2_INLINE uint64_t load48(const void *src) {
const uint8_t *p = (const uint8_t *)src;
uint64_t w = *p++;
w |= (uint64_t)(*p++) << 8;
w |= (uint64_t)(*p++) << 16;
w |= (uint64_t)(*p++) << 24;
w |= (uint64_t)(*p++) << 32;
w |= (uint64_t)(*p++) << 40;
return w;
}
static BLAKE2_INLINE void store48(void *dst, uint64_t w) {
uint8_t *p = (uint8_t *)dst;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
w >>= 8;
*p++ = (uint8_t)w;
}
static BLAKE2_INLINE uint32_t rotr32(const uint32_t w, const unsigned c) {
return (w >> c) | (w << (32 - c));
}
static BLAKE2_INLINE uint64_t rotr64(const uint64_t w, const unsigned c) {
return (w >> c) | (w << (64 - c));
}
/* prevents compiler optimizing out memset() */
static BLAKE2_INLINE void burn(void *v, size_t n) {
static void *(*const volatile memset_v)(void *, int, size_t) = &memset;
memset_v(v, 0, n);
}
#endif

View File

@@ -1,76 +0,0 @@
#ifndef PORTABLE_BLAKE2_H
#define PORTABLE_BLAKE2_H
#include <stddef.h>
#include <stdint.h>
#include <limits.h>
#if defined(__cplusplus)
extern "C" {
#endif
enum blake2b_constant {
BLAKE2B_BLOCKBYTES = 128,
BLAKE2B_OUTBYTES = 64,
BLAKE2B_KEYBYTES = 64,
BLAKE2B_SALTBYTES = 16,
BLAKE2B_PERSONALBYTES = 16
};
#pragma pack(push, 1)
typedef struct __blake2b_param {
uint8_t digest_length; /* 1 */
uint8_t key_length; /* 2 */
uint8_t fanout; /* 3 */
uint8_t depth; /* 4 */
uint32_t leaf_length; /* 8 */
uint64_t node_offset; /* 16 */
uint8_t node_depth; /* 17 */
uint8_t inner_length; /* 18 */
uint8_t reserved[14]; /* 32 */
uint8_t salt[BLAKE2B_SALTBYTES]; /* 48 */
uint8_t personal[BLAKE2B_PERSONALBYTES]; /* 64 */
} blake2b_param;
#pragma pack(pop)
typedef struct __blake2b_state {
uint64_t h[8];
uint64_t t[2];
uint64_t f[2];
unsigned buflen;
unsigned outlen;
uint8_t last_node;
uint8_t buf[BLAKE2B_BLOCKBYTES];
} blake2b_state;
/* Ensure param structs have not been wrongly padded */
/* Poor man's static_assert */
enum {
blake2_size_check_0 = 1 / !!(CHAR_BIT == 8),
blake2_size_check_2 =
1 / !!(sizeof(blake2b_param) == sizeof(uint64_t) * CHAR_BIT)
};
/* Streaming API */
int ar2_blake2b_init(blake2b_state *S, size_t outlen);
int ar2_blake2b_init_key(blake2b_state *S, size_t outlen, const void *key,
size_t keylen);
int ar2_blake2b_init_param(blake2b_state *S, const blake2b_param *P);
int ar2_blake2b_update(blake2b_state *S, const void *in, size_t inlen);
void my_blake2b_update(blake2b_state *S, const void *in, size_t inlen);
int ar2_blake2b_final(blake2b_state *S, void *out, size_t outlen);
/* Simple API */
int ar2_blake2b(void *out, const void *in, const void *key, size_t keylen);
/* Argon2 Team - Begin Code */
int ar2_blake2b_long(void *out, const void *in);
/* Argon2 Team - End Code */
/* Miouyouyou */
void ar2_blake2b_too(void *out, const void *in);
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -1,162 +0,0 @@
#ifndef BLAKE_ROUND_MKA_OPT_H
#define BLAKE_ROUND_MKA_OPT_H
#include "blake2-impl.h"
#if defined(_MSC_VER)
#include <intrin.h>
#endif
#include <immintrin.h>
#if defined(__XOP__) && (defined(__GNUC__) || defined(__clang__))
#include <x86intrin.h>
#endif
#if !defined(__XOP__)
#if defined(__SSSE3__)
#define r16 \
(_mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9))
#define r24 \
(_mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10))
#define _mm_roti_epi64(x, c) \
(-(c) == 32) \
? _mm_shuffle_epi32((x), _MM_SHUFFLE(2, 3, 0, 1)) \
: (-(c) == 24) \
? _mm_shuffle_epi8((x), r24) \
: (-(c) == 16) \
? _mm_shuffle_epi8((x), r16) \
: (-(c) == 63) \
? _mm_xor_si128(_mm_srli_epi64((x), -(c)), \
_mm_add_epi64((x), (x))) \
: _mm_xor_si128(_mm_srli_epi64((x), -(c)), \
_mm_slli_epi64((x), 64 - (-(c))))
#else /* defined(__SSE2__) */
#define _mm_roti_epi64(r, c) \
_mm_xor_si128(_mm_srli_epi64((r), -(c)), _mm_slli_epi64((r), 64 - (-(c))))
#endif
#else
#endif
static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
const __m128i z = _mm_mul_epu32(x, y);
return _mm_add_epi64(_mm_add_epi64(x, y), _mm_add_epi64(z, z));
}
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = fBlaMka(A0, B0); \
A1 = fBlaMka(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = _mm_roti_epi64(D0, -32); \
D1 = _mm_roti_epi64(D1, -32); \
\
C0 = fBlaMka(C0, D0); \
C1 = fBlaMka(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = _mm_roti_epi64(B0, -24); \
B1 = _mm_roti_epi64(B1, -24); \
} while ((void)0, 0)
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = fBlaMka(A0, B0); \
A1 = fBlaMka(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = _mm_roti_epi64(D0, -16); \
D1 = _mm_roti_epi64(D1, -16); \
\
C0 = fBlaMka(C0, D0); \
C1 = fBlaMka(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = _mm_roti_epi64(B0, -63); \
B1 = _mm_roti_epi64(B1, -63); \
} while ((void)0, 0)
#if defined(__SSSE3__)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B1, B0, 8); \
__m128i t1 = _mm_alignr_epi8(B0, B1, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = C0; \
C0 = C1; \
C1 = t0; \
\
t0 = _mm_alignr_epi8(D1, D0, 8); \
t1 = _mm_alignr_epi8(D0, D1, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B0, B1, 8); \
__m128i t1 = _mm_alignr_epi8(B1, B0, 8); \
B0 = t0; \
B1 = t1; \
\
t0 = C0; \
C0 = C1; \
C1 = t0; \
\
t0 = _mm_alignr_epi8(D0, D1, 8); \
t1 = _mm_alignr_epi8(D1, D0, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#else /* SSE2 */
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = D0; \
__m128i t1 = B0; \
D0 = C0; \
C0 = C1; \
C1 = D0; \
D0 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t0, t0)); \
D1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(D1, D1)); \
B0 = _mm_unpackhi_epi64(B0, _mm_unpacklo_epi64(B1, B1)); \
B1 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(t1, t1)); \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = C0; \
C0 = C1; \
C1 = t0; \
t0 = B0; \
__m128i t1 = D0; \
B0 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(B0, B0)); \
B1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(B1, B1)); \
D0 = _mm_unpackhi_epi64(D0, _mm_unpacklo_epi64(D1, D1)); \
D1 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t1, t1)); \
} while ((void)0, 0)
#endif
#define BLAKE2_ROUND(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
\
G1(A0, B0, C0, D0, A1, B1, C1, D1); \
G2(A0, B0, C0, D0, A1, B1, C1, D1); \
\
UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1); \
} while ((void)0, 0)
#endif

View File

@@ -1,39 +0,0 @@
#ifndef BLAKE_ROUND_MKA_H
#define BLAKE_ROUND_MKA_H
#include "blake2.h"
#include "blake2-impl.h"
/*designed by the Lyra PHC team */
static BLAKE2_INLINE uint64_t fBlaMka(uint64_t x, uint64_t y) {
const uint64_t m = UINT64_C(0xFFFFFFFF);
const uint64_t xy = (x & m) * (y & m);
return x + y + 2 * xy;
}
#define G(a, b, c, d) \
do { \
a = fBlaMka(a, b); \
d = rotr64(d ^ a, 32); \
c = fBlaMka(c, d); \
b = rotr64(b ^ c, 24); \
a = fBlaMka(a, b); \
d = rotr64(d ^ a, 16); \
c = fBlaMka(c, d); \
b = rotr64(b ^ c, 63); \
} while ((void)0, 0)
#define BLAKE2_ROUND_NOMSG(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, \
v12, v13, v14, v15) \
do { \
G(v0, v4, v8, v12); \
G(v1, v5, v9, v13); \
G(v2, v6, v10, v14); \
G(v3, v7, v11, v15); \
G(v0, v5, v10, v15); \
G(v1, v6, v11, v12); \
G(v2, v7, v8, v13); \
G(v3, v4, v9, v14); \
} while ((void)0, 0)
#endif

View File

@@ -1,316 +0,0 @@
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
#include "blake2/blake2.h"
#include "blake2/blake2-impl.h"
#if defined(_MSC_VER)
// i know there is a trick but nvm :p
#define PRIu64 "%llu"
#define PRIx64 "%llx"
#endif
static const uint64_t blake2b_IV[8] = {
UINT64_C(0x6a09e667f3bcc908), UINT64_C(0xbb67ae8584caa73b),
UINT64_C(0x3c6ef372fe94f82b), UINT64_C(0xa54ff53a5f1d36f1),
UINT64_C(0x510e527fade682d1), UINT64_C(0x9b05688c2b3e6c1f),
UINT64_C(0x1f83d9abfb41bd6b), UINT64_C(0x5be0cd19137e2179)
};
static const unsigned int blake2b_sigma[12][16] = {
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3},
{11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4},
{7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8},
{9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13},
{2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9},
{12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11},
{13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10},
{6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5},
{10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3},
};
static BLAKE2_INLINE void blake2b_set_lastnode(blake2b_state *S) {
S->f[1] = (uint64_t)-1;
}
static BLAKE2_INLINE void blake2b_set_lastblock(blake2b_state *S) {
if (S->last_node) {
blake2b_set_lastnode(S);
}
S->f[0] = (uint64_t)-1;
}
static BLAKE2_INLINE void blake2b_increment_counter(blake2b_state *S, uint64_t inc) {
S->t[0] += inc;
S->t[1] += (S->t[0] < inc);
}
static BLAKE2_INLINE void blake2b_invalidate_state(blake2b_state *S) {
burn(S, sizeof(*S)); /* wipe */
blake2b_set_lastblock(S); /* invalidate for further use */
}
static BLAKE2_INLINE void blake2b_init0(blake2b_state *S) {
memset(S, 0, sizeof(*S));
memcpy(S->h, blake2b_IV, sizeof(S->h));
}
/*
void print_state(blake2b_state BlakeHash)
{
printf(".h = {UINT64_C(%" PRIu64 "), UINT64_C(%" PRIu64 "),\n"
"UINT64_C(%" PRIu64 "), UINT64_C(%" PRIu64 "),\n"
"UINT64_C(%" PRIu64 "), UINT64_C(%" PRIu64 "),\n"
"UINT64_C(%" PRIu64 "), UINT64_C(%" PRIu64 ")},\n"
".t = {UINT64_C(%" PRIu64 "), UINT64_C(%" PRIu64 ")},\n"
".f = {UINT64_C(%" PRIu64 "), UINT64_C(%" PRIu64 ")}\n",
BlakeHash.h[0], BlakeHash.h[1], BlakeHash.h[2], BlakeHash.h[3],
BlakeHash.h[4], BlakeHash.h[5], BlakeHash.h[6], BlakeHash.h[7],
BlakeHash.t[0], BlakeHash.t[1],
BlakeHash.f[0], BlakeHash.f[1]);
printf(".buf = {");
for (register uint8_t i = 0; i < BLAKE2B_BLOCKBYTES; i++)
printf("%" PRIu8 ", ", BlakeHash.buf[i]);
puts("\n");
printf("}\n.buflen = %d\n.outlen = %d\n",
BlakeHash.buflen, BlakeHash.outlen);
printf(".last_node = %" PRIu8 "\n", BlakeHash.last_node);
fflush(stdout);
}
*/
static const blake2b_state miou = {
.h = {
UINT64_C(7640891576939301128), UINT64_C(13503953896175478587),
UINT64_C(4354685564936845355), UINT64_C(11912009170470909681),
UINT64_C(5840696475078001361), UINT64_C(11170449401992604703),
UINT64_C(2270897969802886507), UINT64_C(6620516959819538809)
},
.t = {UINT64_C(0), UINT64_C(0)},
.f = {UINT64_C(0), UINT64_C(0)},
.buf = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
},
.buflen = 0,
.outlen = 64,
.last_node = 0
};
int ar2_blake2b_init_param(blake2b_state *S, const blake2b_param *P)
{
const unsigned char *p = (const unsigned char *)P;
unsigned int i;
if (NULL == P || NULL == S) {
return -1;
}
blake2b_init0(S);
/* IV XOR Parameter Block */
for (i = 0; i < 8; ++i) {
S->h[i] ^= load64(&p[i * sizeof(S->h[i])]);
}
S->outlen = P->digest_length;
return 0;
}
void compare_buffs(uint64_t *h, size_t outlen)
{
// printf("CMP : %d", memcmp(h, miou.h, 8*(sizeof(uint64_t))));
printf("miou : %" PRIu64 " - h : %" PRIu64 " - outlen : %ld\n", miou.h[0], h[0], outlen);
fflush(stdout);
}
/* Sequential blake2b initialization */
int ar2_blake2b_init(blake2b_state *S, size_t outlen)
{
memcpy(S, &miou, sizeof(*S));
S->h[0] += outlen;
return 0;
}
void print64(const char *name, const uint64_t *array, uint16_t size)
{
printf("%s = {", name);
for (uint8_t i = 0; i < size; i++) printf("UINT64_C(%" PRIu64 "), ", array[i]);
printf("};\n");
}
int ar2_blake2b_init_key(blake2b_state *S, size_t outlen, const void *key, size_t keylen)
{
return 0;
}
static void blake2b_compress(blake2b_state *S, const uint8_t *block)
{
uint64_t m[16];
uint64_t v[16];
unsigned int i, r;
for (i = 0; i < 16; ++i) {
m[i] = load64(block + i * 8);
}
for (i = 0; i < 8; ++i) {
v[i] = S->h[i];
}
v[8] = blake2b_IV[0];
v[9] = blake2b_IV[1];
v[10] = blake2b_IV[2];
v[11] = blake2b_IV[3];
v[12] = blake2b_IV[4] ^ S->t[0];
v[13] = blake2b_IV[5]/* ^ S->t[1]*/;
v[14] = blake2b_IV[6] ^ S->f[0];
v[15] = blake2b_IV[7]/* ^ S->f[1]*/;
#define G(r, i, a, b, c, d) \
do { \
a = a + b + m[blake2b_sigma[r][2 * i + 0]]; \
d = rotr64(d ^ a, 32); \
c = c + d; \
b = rotr64(b ^ c, 24); \
a = a + b + m[blake2b_sigma[r][2 * i + 1]]; \
d = rotr64(d ^ a, 16); \
c = c + d; \
b = rotr64(b ^ c, 63); \
} while ((void)0, 0)
#define ROUND(r) \
do { \
G(r, 0, v[0], v[4], v[8], v[12]); \
G(r, 1, v[1], v[5], v[9], v[13]); \
G(r, 2, v[2], v[6], v[10], v[14]); \
G(r, 3, v[3], v[7], v[11], v[15]); \
G(r, 4, v[0], v[5], v[10], v[15]); \
G(r, 5, v[1], v[6], v[11], v[12]); \
G(r, 6, v[2], v[7], v[8], v[13]); \
G(r, 7, v[3], v[4], v[9], v[14]); \
} while ((void)0, 0)
for (r = 0; r < 12; ++r) ROUND(r);
for (i = 0; i < 8; ++i) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
#undef G
#undef ROUND
}
int ar2_blake2b_update(blake2b_state *S, const void *in, size_t inlen)
{
const uint8_t *pin = (const uint8_t *)in;
/* Complete current block */
memcpy(&S->buf[4], pin, 124);
blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES);
blake2b_compress(S, S->buf);
S->buflen = 0;
pin += 124;
register int8_t i = 7;
/* Avoid buffer copies when possible */
while (i--) {
blake2b_increment_counter(S, BLAKE2B_BLOCKBYTES);
blake2b_compress(S, pin);
pin += BLAKE2B_BLOCKBYTES;
}
memcpy(&S->buf[S->buflen], pin, 4);
S->buflen += 4;
return 0;
}
void my_blake2b_update(blake2b_state *S, const void *in, size_t inlen)
{
memcpy(&S->buf[S->buflen], in, inlen);
S->buflen += (unsigned int)inlen;
}
int ar2_blake2b_final(blake2b_state *S, void *out, size_t outlen)
{
uint8_t buffer[BLAKE2B_OUTBYTES] = {0};
unsigned int i;
blake2b_increment_counter(S, S->buflen);
blake2b_set_lastblock(S);
memset(&S->buf[S->buflen], 0, BLAKE2B_BLOCKBYTES - S->buflen); /* Padding */
blake2b_compress(S, S->buf);
for (i = 0; i < 8; ++i) { /* Output full hash to temp buffer */
store64(buffer + sizeof(S->h[i]) * i, S->h[i]);
}
memcpy(out, buffer, S->outlen);
burn(buffer, sizeof(buffer));
burn(S->buf, sizeof(S->buf));
burn(S->h, sizeof(S->h));
return 0;
}
int ar2_blake2b(void *out, const void *in, const void *key, size_t keylen)
{
blake2b_state S;
ar2_blake2b_init(&S, 64);
my_blake2b_update(&S, in, 64);
ar2_blake2b_final(&S, out, 64);
burn(&S, sizeof(S));
return 0;
}
void ar2_blake2b_too(void *pout, const void *in)
{
uint8_t *out = (uint8_t *)pout;
uint8_t out_buffer[64];
uint8_t in_buffer[64];
blake2b_state blake_state;
ar2_blake2b_init(&blake_state, 64);
blake_state.buflen = blake_state.buf[1] = 4;
my_blake2b_update(&blake_state, in, 72);
ar2_blake2b_final(&blake_state, out_buffer, 64);
memcpy(out, out_buffer, 32);
out += 32;
register uint8_t i = 29;
while (i--) {
memcpy(in_buffer, out_buffer, 64);
ar2_blake2b(out_buffer, in_buffer, NULL, 0);
memcpy(out, out_buffer, 32);
out += 32;
}
memcpy(in_buffer, out_buffer, 64);
ar2_blake2b(out_buffer, in_buffer, NULL, 0);
memcpy(out, out_buffer, 64);
burn(&blake_state, sizeof(blake_state));
}
/* Argon2 Team - Begin Code */
int ar2_blake2b_long(void *pout, const void *in)
{
uint8_t *out = (uint8_t *)pout;
blake2b_state blake_state;
uint8_t outlen_bytes[sizeof(uint32_t)] = {0};
store32(outlen_bytes, 32);
ar2_blake2b_init(&blake_state, 32);
my_blake2b_update(&blake_state, outlen_bytes, sizeof(outlen_bytes));
ar2_blake2b_update(&blake_state, in, 1024);
ar2_blake2b_final(&blake_state, out, 32);
burn(&blake_state, sizeof(blake_state));
return 0;
}
/* Argon2 Team - End Code */

View File

@@ -1,349 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
/*For memory wiping*/
#ifdef _MSC_VER
#include <windows.h>
#include <winbase.h> /* For SecureZeroMemory */
#endif
#if defined __STDC_LIB_EXT1__
#define __STDC_WANT_LIB_EXT1__ 1
#endif
#define VC_GE_2005(version) (version >= 1400)
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "argon2.h"
#include "cores.h"
#include "blake2/blake2.h"
#include "blake2/blake2-impl.h"
#ifdef GENKAT
#include "genkat.h"
#endif
#if defined(__clang__)
#if __has_attribute(optnone)
#define NOT_OPTIMIZED __attribute__((optnone))
#endif
#elif defined(__GNUC__)
#define GCC_VERSION \
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 40400
#define NOT_OPTIMIZED __attribute__((optimize("O0")))
#endif
#endif
#ifndef NOT_OPTIMIZED
#define NOT_OPTIMIZED
#endif
/***************Instance and Position constructors**********/
void ar2_init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
//inline void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
void ar2_copy_block(block *dst, const block *src) {
//inline void copy_block(block *dst, const block *src) {
memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_WORDS_IN_BLOCK);
}
void ar2_xor_block(block *dst, const block *src) {
//inline void xor_block(block *dst, const block *src) {
int i;
for (i = 0; i < ARGON2_WORDS_IN_BLOCK; ++i) {
dst->v[i] ^= src->v[i];
}
}
static void ar2_load_block(block *dst, const void *input) {
//static inline void load_block(block *dst, const void *input) {
unsigned i;
for (i = 0; i < ARGON2_WORDS_IN_BLOCK; ++i) {
dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i]));
}
}
static void ar2_store_block(void *output, const block *src) {
//static inline void store_block(void *output, const block *src) {
unsigned i;
for (i = 0; i < ARGON2_WORDS_IN_BLOCK; ++i) {
store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]);
}
}
/***************Memory allocators*****************/
int ar2_allocate_memory(block **memory, uint32_t m_cost) {
if (memory != NULL) {
size_t memory_size = sizeof(block) * m_cost;
if (m_cost != 0 &&
memory_size / m_cost !=
sizeof(block)) { /*1. Check for multiplication overflow*/
return ARGON2_MEMORY_ALLOCATION_ERROR;
}
*memory = (block *)malloc(memory_size); /*2. Try to allocate*/
if (!*memory) {
return ARGON2_MEMORY_ALLOCATION_ERROR;
}
return ARGON2_OK;
} else {
return ARGON2_MEMORY_ALLOCATION_ERROR;
}
}
void ar2_secure_wipe_memory(void *v, size_t n) { memset(v, 0, n); }
//inline void secure_wipe_memory(void *v, size_t n) { memset(v, 0, n); }
/*********Memory functions*/
void ar2_clear_memory(argon2_instance_t *instance, int clear) {
//inline void clear_memory(argon2_instance_t *instance, int clear) {
if (instance->memory != NULL && clear) {
ar2_secure_wipe_memory(instance->memory,
sizeof(block) * /*instance->memory_blocks*/16);
}
}
void ar2_free_memory(block *memory) { free(memory); }
//inline void free_memory(block *memory) { free(memory); }
void ar2_finalize(const argon2_context *context, argon2_instance_t *instance) {
if (context != NULL && instance != NULL) {
block blockhash;
ar2_copy_block(&blockhash, instance->memory + 15);
/* Hash the result */
{
uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
ar2_store_block(blockhash_bytes, &blockhash);
ar2_blake2b_long(context->out, blockhash_bytes);
ar2_secure_wipe_memory(blockhash.v, ARGON2_BLOCK_SIZE);
ar2_secure_wipe_memory(blockhash_bytes, ARGON2_BLOCK_SIZE); /* clear blockhash_bytes */
}
#ifdef GENKAT
print_tag(context->out, context->outlen);
#endif
/* Clear memory */
// clear_memory(instance, 1);
ar2_free_memory(instance->memory);
}
}
uint32_t ar2_index_alpha(const argon2_instance_t *instance,
const argon2_position_t *position, uint32_t pseudo_rand,
int same_lane) {
/*
* Pass 0:
* This lane : all already finished segments plus already constructed
* blocks in this segment
* Other lanes : all already finished segments
* Pass 1+:
* This lane : (SYNC_POINTS - 1) last segments plus already constructed
* blocks in this segment
* Other lanes : (SYNC_POINTS - 1) last segments
*/
uint32_t reference_area_size;
uint64_t relative_position;
uint32_t start_position, absolute_position;
if (0 == position->pass) {
/* First pass */
if (0 == position->slice) {
/* First slice */
reference_area_size =
position->index - 1; /* all but the previous */
} else {
if (same_lane) {
/* The same lane => add current segment */
reference_area_size =
position->slice * 4 +
position->index - 1;
} else {
reference_area_size =
position->slice * 4 +
((position->index == 0) ? (-1) : 0);
}
}
} else {
/* Second pass */
if (same_lane) {reference_area_size = 11 + position->index;}
else {reference_area_size = 12 - (position->index == 0);}
}
/* 1.2.4. Mapping pseudo_rand to 0..<reference_area_size-1> and produce
* relative position */
relative_position = pseudo_rand;
relative_position = relative_position * relative_position >> 32;
relative_position = reference_area_size - 1 -
(reference_area_size * relative_position >> 32);
/* 1.2.5 Computing starting position */
start_position = 0;
if (0 != position->pass) {
start_position = (position->slice == ARGON2_SYNC_POINTS - 1)
? 0 : (position->slice + 1) * 4;
}
/* 1.2.6. Computing absolute position */
absolute_position = (start_position + relative_position) % 16;
return absolute_position;
}
void ar2_fill_memory_blocks(argon2_instance_t *instance) {
uint32_t r, s;
for (r = 0; r < 2; ++r) {
for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
argon2_position_t position;
position.pass = r;
position.lane = 0;
position.slice = (uint8_t)s;
position.index = 0;
ar2_fill_segment(instance, position);
}
#ifdef GENKAT
internal_kat(instance, r); /* Print all memory blocks */
#endif
}
}
void ar2_fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
/* Make the first and second block in each lane as G(H0||i||0) or
G(H0||i||1) */
uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, 0);
ar2_blake2b_too(blockhash_bytes, blockhash);
ar2_load_block(&instance->memory[0], blockhash_bytes);
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
ar2_blake2b_too(blockhash_bytes, blockhash);
ar2_load_block(&instance->memory[1], blockhash_bytes);
ar2_secure_wipe_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
}
static const blake2b_state base_hash = {
.h = {
UINT64_C(7640891576939301192), UINT64_C(13503953896175478587),
UINT64_C(4354685564936845355), UINT64_C(11912009170470909681),
UINT64_C(5840696475078001361), UINT64_C(11170449401992604703),
UINT64_C(2270897969802886507), UINT64_C(6620516959819538809)
},
.t = {UINT64_C(0),UINT64_C(0)},
.f = {UINT64_C(0),UINT64_C(0)},
.buf = {
1, 0, 0, 0, 32, 0, 0, 0, 16, 0, 0, 0, 2, 0, 0, 0, 16, 0, 0, 0, 1, 0,
0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.buflen = 28,
.outlen = 64,
.last_node = 0
};
#define PWDLEN 32
#define SALTLEN 32
#define SECRETLEN 0
#define ADLEN 0
void ar2_initial_hash(uint8_t *blockhash, argon2_context *context,
argon2_type type) {
uint8_t value[sizeof(uint32_t)];
/* Is it generating cache invalidation between cores ? */
blake2b_state BlakeHash = base_hash;
BlakeHash.buf[20] = (uint8_t) type;
my_blake2b_update(&BlakeHash, (const uint8_t *)context->pwd,
PWDLEN);
ar2_secure_wipe_memory(context->pwd, PWDLEN);
context->pwdlen = 0;
store32(&value, SALTLEN);
my_blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
my_blake2b_update(&BlakeHash, (const uint8_t *)context->salt,
SALTLEN);
store32(&value, SECRETLEN);
my_blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
store32(&value, ADLEN);
my_blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
ar2_blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
}
int ar2_initialize(argon2_instance_t *instance, argon2_context *context) {
/* 1. Memory allocation */
ar2_allocate_memory(&(instance->memory), 16);
/* 2. Initial hashing */
/* H_0 + 8 extra bytes to produce the first blocks */
/* Hashing all inputs */
uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
ar2_initial_hash(blockhash, context, instance->type);
/* Zeroing 8 extra bytes */
ar2_secure_wipe_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
ARGON2_PREHASH_SEED_LENGTH -
ARGON2_PREHASH_DIGEST_LENGTH);
#ifdef GENKAT
initial_kat(blockhash, context, instance->type);
#endif
/* 3. Creating first blocks, we always have at least two blocks in a slice
*/
ar2_fill_first_blocks(blockhash, instance);
/* Clearing the hash */
ar2_secure_wipe_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);
return ARGON2_OK;
}
int ar2_argon2_core(argon2_context *context, argon2_type type) {
argon2_instance_t instance;
instance.memory = NULL;
instance.type = type;
/* 3. Initialization: Hashing inputs, allocating memory, filling first
* blocks
*/
int result = ar2_initialize(&instance, context);
if (ARGON2_OK != result) return result;
/* 4. Filling memory */
ar2_fill_memory_blocks(&instance);
/* 5. Finalization */
ar2_finalize(context, &instance);
return ARGON2_OK;
}

View File

@@ -1,216 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#ifndef ARGON2_CORES_H
#define ARGON2_CORES_H
#if defined(_MSC_VER)
#include <Windows.h>
#include <process.h>
#define ALIGN(n) __declspec(align(n))
#elif defined(__GNUC__) || defined(__clang)
#define ALIGN(x) __attribute__((__aligned__(x)))
#else
#define ALIGN(x)
#endif
/*************************Argon2 internal
* constants**************************************************/
enum argon2_core_constants {
/* Version of the algorithm */
ARGON2_VERSION_NUMBER = 0x10,
/* Memory block size in bytes */
ARGON2_BLOCK_SIZE = 1024,
ARGON2_WORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 8,
ARGON2_QWORDS_IN_BLOCK = 64,
/* Number of pseudo-random values generated by one call to Blake in Argon2i
to
generate reference block positions */
ARGON2_ADDRESSES_IN_BLOCK = 128,
/* Pre-hashing digest length and its extension*/
ARGON2_PREHASH_DIGEST_LENGTH = 64,
ARGON2_PREHASH_SEED_LENGTH = 72
};
/* Argon2 primitive type */
typedef enum Argon2_type { Argon2_d = 0, Argon2_i = 1 } argon2_type;
/*************************Argon2 internal data
* types**************************************************/
/*
* Structure for the (1KB) memory block implemented as 128 64-bit words.
* Memory blocks can be copied, XORed. Internal words can be accessed by [] (no
* bounds checking).
*/
typedef struct _block { uint64_t v[ARGON2_WORDS_IN_BLOCK]; } ALIGN(16) block;
/*****************Functions that work with the block******************/
/* Initialize each byte of the block with @in */
void ar2_init_block_value(block *b, uint8_t in);
/* Copy block @src to block @dst */
void ar2_copy_block(block *dst, const block *src);
/* XOR @src onto @dst bytewise */
void ar2_xor_block(block *dst, const block *src);
/*
* Argon2 instance: memory pointer, number of passes, amount of memory, type,
* and derived values.
* Used to evaluate the number and location of blocks to construct in each
* thread
*/
typedef struct Argon2_instance_t {
block *memory; /* Memory pointer */
argon2_type type;
int print_internals; /* whether to print the memory blocks */
} argon2_instance_t;
/*
* Argon2 position: where we construct the block right now. Used to distribute
* work between threads.
*/
typedef struct Argon2_position_t {
uint32_t pass;
uint32_t lane;
uint8_t slice;
uint32_t index;
} argon2_position_t;
/*************************Argon2 core
* functions**************************************************/
/* Allocates memory to the given pointer
* @param memory pointer to the pointer to the memory
* @param m_cost number of blocks to allocate in the memory
* @return ARGON2_OK if @memory is a valid pointer and memory is allocated
*/
int ar2_allocate_memory(block **memory, uint32_t m_cost);
/* Function that securely cleans the memory
* @param mem Pointer to the memory
* @param s Memory size in bytes
*/
void ar2_secure_wipe_memory(void *v, size_t n);
/* Clears memory
* @param instance pointer to the current instance
* @param clear_memory indicates if we clear the memory with zeros.
*/
void ar2_clear_memory(argon2_instance_t *instance, int clear);
/* Deallocates memory
* @param memory pointer to the blocks
*/
void ar2_free_memory(block *memory);
/*
* Computes absolute position of reference block in the lane following a skewed
* distribution and using a pseudo-random value as input
* @param instance Pointer to the current instance
* @param position Pointer to the current position
* @param pseudo_rand 32-bit pseudo-random value used to determine the position
* @param same_lane Indicates if the block will be taken from the current lane.
* If so we can reference the current segment
* @pre All pointers must be valid
*/
uint32_t ar2_index_alpha(const argon2_instance_t *instance,
const argon2_position_t *position, uint32_t pseudo_rand,
int same_lane);
/*
* Function that validates all inputs against predefined restrictions and return
* an error code
* @param context Pointer to current Argon2 context
* @return ARGON2_OK if everything is all right, otherwise one of error codes
* (all defined in <argon2.h>
*/
int ar2_validate_inputs(const argon2_context *context);
/*
* Hashes all the inputs into @a blockhash[PREHASH_DIGEST_LENGTH], clears
* password and secret if needed
* @param context Pointer to the Argon2 internal structure containing memory
* pointer, and parameters for time and space requirements.
* @param blockhash Buffer for pre-hashing digest
* @param type Argon2 type
* @pre @a blockhash must have at least @a PREHASH_DIGEST_LENGTH bytes
* allocated
*/
void ar2_initial_hash(uint8_t *blockhash, argon2_context *context,
argon2_type type);
/*
* Function creates first 2 blocks per lane
* @param instance Pointer to the current instance
* @param blockhash Pointer to the pre-hashing digest
* @pre blockhash must point to @a PREHASH_SEED_LENGTH allocated values
*/
void ar2_fill_firsts_blocks(uint8_t *blockhash, const argon2_instance_t *instance);
/*
* Function allocates memory, hashes the inputs with Blake, and creates first
* two blocks. Returns the pointer to the main memory with 2 blocks per lane
* initialized
* @param context Pointer to the Argon2 internal structure containing memory
* pointer, and parameters for time and space requirements.
* @param instance Current Argon2 instance
* @return Zero if successful, -1 if memory failed to allocate. @context->state
* will be modified if successful.
*/
int ar2_initialize(argon2_instance_t *instance, argon2_context *context);
/*
* XORing the last block of each lane, hashing it, making the tag. Deallocates
* the memory.
* @param context Pointer to current Argon2 context (use only the out parameters
* from it)
* @param instance Pointer to current instance of Argon2
* @pre instance->state must point to necessary amount of memory
* @pre context->out must point to outlen bytes of memory
* @pre if context->free_cbk is not NULL, it should point to a function that
* deallocates memory
*/
void ar2_finalize(const argon2_context *context, argon2_instance_t *instance);
/*
* Function that fills the segment using previous segments also from other
* threads
* @param instance Pointer to the current instance
* @param position Current position
* @pre all block pointers must be valid
*/
void ar2_fill_segment(const argon2_instance_t *instance,
argon2_position_t position);
/*
* Function that fills the entire memory t_cost times based on the first two
* blocks in each lane
* @param instance Pointer to the current instance
*/
void ar2_fill_memory_blocks(argon2_instance_t *instance);
/*
* Function that performs memory-hard hashing with certain degree of parallelism
* @param context Pointer to the Argon2 internal structure
* @return Error code if smth is wrong, ARGON2_OK otherwise
*/
int ar2_argon2_core(argon2_context *context, argon2_type type);
#endif

View File

@@ -1,186 +0,0 @@
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "argon2.h"
#include "cores.h"
void initial_kat(const uint8_t *blockhash, const argon2_context *context,
argon2_type type)
{
unsigned i;
if (blockhash != NULL && context != NULL) {
printf("=======================================");
switch (type) {
case Argon2_d:
printf("Argon2d\n");
break;
case Argon2_i:
printf("Argon2i\n");
break;
default:
break;
}
printf("Memory: %u KiB, Iterations: %u, Parallelism: %u lanes, Tag "
"length: %u bytes\n",
context->m_cost, context->t_cost, context->lanes,
context->outlen);
printf("Password[%u]: ", context->pwdlen);
if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
printf("CLEARED\n");
} else {
for (i = 0; i < context->pwdlen; ++i) {
printf("%2.2x ", ((unsigned char *)context->pwd)[i]);
}
printf("\n");
}
printf("Salt[%u]: ", context->saltlen);
for (i = 0; i < context->saltlen; ++i) {
printf("%2.2x ", ((unsigned char *)context->salt)[i]);
}
printf("\n");
printf("Secret[%u]: ", context->secretlen);
if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
printf("CLEARED\n");
} else {
for (i = 0; i < context->secretlen; ++i) {
printf("%2.2x ", ((unsigned char *)context->secret)[i]);
}
printf("\n");
}
printf("Associated data[%u]: ", context->adlen);
for (i = 0; i < context->adlen; ++i) {
printf("%2.2x ", ((unsigned char *)context->ad)[i]);
}
printf("\n");
printf("Pre-hashing digest: ");
for (i = 0; i < ARGON2_PREHASH_DIGEST_LENGTH; ++i) {
printf("%2.2x ", ((unsigned char *)blockhash)[i]);
}
printf("\n");
}
}
void print_tag(const void *out, uint32_t outlen)
{
unsigned i;
if (out != NULL) {
printf("Tag: ");
for (i = 0; i < outlen; ++i) {
printf("%2.2x ", ((uint8_t *)out)[i]);
}
printf("\n");
}
}
void internal_kat(const argon2_instance_t *instance, uint32_t pass)
{
if (instance != NULL) {
uint32_t i, j;
printf("\n After pass %u:\n", pass);
for (i = 0; i < instance->memory_blocks; ++i) {
uint32_t how_many_words =
(instance->memory_blocks > ARGON2_WORDS_IN_BLOCK)
? 1
: ARGON2_WORDS_IN_BLOCK;
for (j = 0; j < how_many_words; ++j)
printf("Block %.4u [%3u]: %016" PRIx64 "\n", i, j,
instance->memory[i].v[j]);
}
}
}
static void fatal(const char *error) {
fprintf(stderr, "Error: %s\n", error);
exit(1);
}
static void generate_testvectors(const char *type)
{
#define TEST_OUTLEN 32
#define TEST_PWDLEN 32
#define TEST_SALTLEN 16
#define TEST_SECRETLEN 8
#define TEST_ADLEN 12
argon2_context context;
unsigned char out[TEST_OUTLEN];
unsigned char pwd[TEST_PWDLEN];
unsigned char salt[TEST_SALTLEN];
unsigned char secret[TEST_SECRETLEN];
unsigned char ad[TEST_ADLEN];
const allocate_fptr myown_allocator = NULL;
const deallocate_fptr myown_deallocator = NULL;
unsigned t_cost = 3;
unsigned m_cost = 16;
unsigned lanes = 4;
memset(pwd, 1, TEST_OUTLEN);
memset(salt, 2, TEST_SALTLEN);
memset(secret, 3, TEST_SECRETLEN);
memset(ad, 4, TEST_ADLEN);
context.out = out;
context.outlen = TEST_OUTLEN;
context.pwd = pwd;
context.pwdlen = TEST_PWDLEN;
context.salt = salt;
context.saltlen = TEST_SALTLEN;
context.secret = secret;
context.secretlen = TEST_SECRETLEN;
context.ad = ad;
context.adlen = TEST_ADLEN;
context.t_cost = t_cost;
context.m_cost = m_cost;
context.lanes = lanes;
context.threads = lanes;
context.allocate_cbk = myown_allocator;
context.free_cbk = myown_deallocator;
context.flags = 0;
#undef TEST_OUTLEN
#undef TEST_PWDLEN
#undef TEST_SALTLEN
#undef TEST_SECRETLEN
#undef TEST_ADLEN
if (!strcmp(type, "d")) {
argon2d(&context);
} else if (!strcmp(type, "i")) {
argon2i(&context);
} else
fatal("wrong Argon2 type");
}
int main(int argc, char *argv[])
{
const char *type = (argc > 1) ? argv[1] : "i";
generate_testvectors(type);
return ARGON2_OK;
}

View File

@@ -1,45 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#ifndef ARGON2_KAT_H
#define ARGON2_KAT_H
/*
* Initial KAT function that prints the inputs to the file
* @param blockhash Array that contains pre-hashing digest
* @param context Holds inputs
* @param type Argon2 type
* @pre blockhash must point to INPUT_INITIAL_HASH_LENGTH bytes
* @pre context member pointers must point to allocated memory of size according
* to the length values
*/
void initial_kat(const uint8_t *blockhash, const argon2_context *context,
argon2_type type);
/*
* Function that prints the output tag
* @param out output array pointer
* @param outlen digest length
* @pre out must point to @a outlen bytes
**/
void print_tag(const void *out, uint32_t outlen);
/*
* Function that prints the internal state at given moment
* @param instance pointer to the current instance
* @param pass current pass number
* @pre instance must have necessary memory allocated
**/
void internal_kat(const argon2_instance_t *instance, uint32_t pass);
#endif

View File

@@ -1,185 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
#include <immintrin.h>
#include "argon2.h"
#include "cores.h"
#include "opt.h"
#include "blake2/blake2.h"
#include "blake2/blamka-round-opt.h"
void ar2_fill_block(__m128i *state, __m128i const *ref_block, __m128i *next_block)
{
__m128i ALIGN(16) block_XY[ARGON2_QWORDS_IN_BLOCK];
uint32_t i;
for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; i++) {
block_XY[i] = state[i] = _mm_xor_si128(
state[i], _mm_load_si128(&ref_block[i]));
}
BLAKE2_ROUND(state[0], state[1], state[2], state[3], state[4], state[5], state[6], state[7]);
BLAKE2_ROUND(state[8], state[9], state[10], state[11], state[12], state[13], state[14], state[15]);
BLAKE2_ROUND(state[16], state[17], state[18], state[19], state[20], state[21], state[22], state[23]);
BLAKE2_ROUND(state[24], state[25], state[26], state[27], state[28], state[29], state[30], state[31]);
BLAKE2_ROUND(state[32], state[33], state[34], state[35], state[36], state[37], state[38], state[39]);
BLAKE2_ROUND(state[40], state[41], state[42], state[43], state[44], state[45], state[46], state[47]);
BLAKE2_ROUND(state[48], state[49], state[50], state[51], state[52], state[53], state[54], state[55]);
BLAKE2_ROUND(state[56], state[57], state[58], state[59], state[60], state[61], state[62], state[63]);
/*for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(state[8 * i + 0], state[8 * i + 1], state[8 * i + 2],
state[8 * i + 3], state[8 * i + 4], state[8 * i + 5],
state[8 * i + 6], state[8 * i + 7]);
}*/
BLAKE2_ROUND(state[0], state[8], state[16], state[24], state[32], state[40], state[48], state[56]);
BLAKE2_ROUND(state[1], state[9], state[17], state[25], state[33], state[41], state[49], state[57]);
BLAKE2_ROUND(state[2], state[10], state[18], state[26], state[34], state[42], state[50], state[58]);
BLAKE2_ROUND(state[3], state[11], state[19], state[27], state[35], state[43], state[51], state[59]);
BLAKE2_ROUND(state[4], state[12], state[20], state[28], state[36], state[44], state[52], state[60]);
BLAKE2_ROUND(state[5], state[13], state[21], state[29], state[37], state[45], state[53], state[61]);
BLAKE2_ROUND(state[6], state[14], state[22], state[30], state[38], state[46], state[54], state[62]);
BLAKE2_ROUND(state[7], state[15], state[23], state[31], state[39], state[47], state[55], state[63]);
/*for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(state[8 * 0 + i], state[8 * 1 + i], state[8 * 2 + i],
state[8 * 3 + i], state[8 * 4 + i], state[8 * 5 + i],
state[8 * 6 + i], state[8 * 7 + i]);
}*/
for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; i++) {
state[i] = _mm_xor_si128(state[i], block_XY[i]);
_mm_storeu_si128(&next_block[i], state[i]);
}
}
static const uint64_t bad_rands[32] = {
UINT64_C(17023632018251376180), UINT64_C(4911461131397773491),
UINT64_C(15927076453364631751), UINT64_C(7860239898779391109),
UINT64_C(11820267568857244377), UINT64_C(12188179869468676617),
UINT64_C(3732913385414474778), UINT64_C(7651458777762572084),
UINT64_C(3062274162574341415), UINT64_C(17922653540258786897),
UINT64_C(17393848266100524980), UINT64_C(8539695715554563839),
UINT64_C(13824538050656654359), UINT64_C(12078939433126460936),
UINT64_C(15331979418564540430), UINT64_C(12058346794217174273),
UINT64_C(13593922096015221049), UINT64_C(18356682276374416500),
UINT64_C(4968040514092703824), UINT64_C(11202790346130235567),
UINT64_C(2276229735041314644), UINT64_C(220837743321691382),
UINT64_C(4861211596230784273), UINT64_C(6330592584132590331),
UINT64_C(3515580430960296763), UINT64_C(9869356316971855173),
UINT64_C(485533243489193056), UINT64_C(14596447761048148032),
UINT64_C(16531790085730132900), UINT64_C(17328824500878824371),
UINT64_C(8548260058287621283), UINT64_C(8641748798041936364)
};
void ar2_generate_addresses(const argon2_instance_t *instance,
const argon2_position_t *position,
uint64_t *pseudo_rands)
{
uint8_t offset = position->pass * 16 + position->slice * 4;
pseudo_rands[0] = bad_rands[offset++];
pseudo_rands[1] = bad_rands[offset++];
pseudo_rands[2] = bad_rands[offset++];
pseudo_rands[3] = bad_rands[offset++];
/*if ((position->pass == 1 && position->slice == 3))
print64("pseudo_rands", pseudo_rands, 4);*/
}
#define SEGMENT_LENGTH 4
#define LANE_LENGTH 16
#define POS_LANE 0
void ar2_fill_segment(const argon2_instance_t *instance,
argon2_position_t position)
{
block *ref_block = NULL, *curr_block = NULL;
uint64_t pseudo_rand, ref_index;
uint32_t prev_offset, curr_offset;
uint8_t i;
__m128i state[64];
int data_independent_addressing = (instance->type == Argon2_i);
/* Pseudo-random values that determine the reference block position */
uint64_t *pseudo_rands = NULL;
pseudo_rands = (uint64_t *)malloc(/*sizeof(uint64_t) * 4*/32);
if (data_independent_addressing) {
ar2_generate_addresses(instance, &position, pseudo_rands);
}
i = 0;
if ((0 == position.pass) && (0 == position.slice)) {
i = 2; /* we have already generated the first two blocks */
}
/*printf("Position.lane = %d\nPosition.slice = %d\nStarting index : %d\n", position.lane, position.slice, starting_index);*/
/* Offset of the current block */
curr_offset = position.slice * 4 + i;
if (0 == curr_offset % 16) {
/* Last block in this lane */
prev_offset = curr_offset + /*instance->lane_length - 1*/15;
} else {
/* Previous block */
prev_offset = curr_offset - 1;
}
memcpy(state, ((instance->memory + prev_offset)->v), ARGON2_BLOCK_SIZE);
for (; i < SEGMENT_LENGTH;
++i, ++curr_offset, ++prev_offset) {
/*1.1 Rotating prev_offset if needed */
if (curr_offset % LANE_LENGTH == 1) {
prev_offset = curr_offset - 1;
}
/* 1.2 Computing the index of the reference block */
/* 1.2.1 Taking pseudo-random value from the previous block */
if (data_independent_addressing) {
pseudo_rand = pseudo_rands[i];
} else {
pseudo_rand = instance->memory[prev_offset].v[0];
}
/* 1.2.2 Computing the lane of the reference block */
/* 1.2.3 Computing the number of possible reference block within the
* lane.
*/
position.index = i;
ref_index = ar2_index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,1);
/* 2 Creating a new block */
ref_block = instance->memory + ref_index;
curr_block = instance->memory + curr_offset;
ar2_fill_block(state, (__m128i const *)ref_block->v, (__m128i *)curr_block->v);
}
free(pseudo_rands);
}

View File

@@ -1,49 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#ifndef ARGON2_OPT_H
#define ARGON2_OPT_H
/*
* Function fills a new memory block. Differs from the
* @param state Pointer to the just produced block. Content will be updated(!)
* @param ref_block Pointer to the reference block
* @param next_block Pointer to the block to be constructed
* @pre all block pointers must be valid
*/
void ar2_fill_block(__m128i *state, __m128i const *ref_block, __m128i *next_block);
/*
* Generate pseudo-random values to reference blocks in the segment and puts
* them into the array
* @param instance Pointer to the current instance
* @param position Pointer to the current position
* @param pseudo_rands Pointer to the array of 64-bit values
* @pre pseudo_rands must point to @a instance->segment_length allocated values
*/
void ar2_generate_addresses(const argon2_instance_t *instance,
const argon2_position_t *position,
uint64_t *pseudo_rands);
/*
* Function that fills the segment using previous segments also from other
* threads.
* Identical to the reference code except that it calls optimized FillBlock()
* @param instance Pointer to the current instance
* @param position Current position
* @pre all block pointers must be valid
*/
void ar2_fill_segment(const argon2_instance_t *instance,
argon2_position_t position);
#endif /* ARGON2_OPT_H */

View File

@@ -1,174 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include "argon2.h"
#include "cores.h"
#include "ref.h"
#include "blake2/blamka-round-ref.h"
#include "blake2/blake2-impl.h"
#include "blake2/blake2.h"
void fill_block(const block *prev_block, const block *ref_block,
block *next_block) {
block blockR, block_tmp;
unsigned i;
copy_block(&blockR, ref_block);
xor_block(&blockR, prev_block);
copy_block(&block_tmp, &blockR);
/* Apply Blake2 on columns of 64-bit words: (0,1,...,15) , then
(16,17,..31)... finally (112,113,...127) */
for (i = 0; i < 8; ++i) {
BLAKE2_ROUND_NOMSG(
blockR.v[16 * i], blockR.v[16 * i + 1], blockR.v[16 * i + 2],
blockR.v[16 * i + 3], blockR.v[16 * i + 4], blockR.v[16 * i + 5],
blockR.v[16 * i + 6], blockR.v[16 * i + 7], blockR.v[16 * i + 8],
blockR.v[16 * i + 9], blockR.v[16 * i + 10], blockR.v[16 * i + 11],
blockR.v[16 * i + 12], blockR.v[16 * i + 13], blockR.v[16 * i + 14],
blockR.v[16 * i + 15]);
}
/* Apply Blake2 on rows of 64-bit words: (0,1,16,17,...112,113), then
(2,3,18,19,...,114,115).. finally (14,15,30,31,...,126,127) */
for (i = 0; i < 8; i++) {
BLAKE2_ROUND_NOMSG(
blockR.v[2 * i], blockR.v[2 * i + 1], blockR.v[2 * i + 16],
blockR.v[2 * i + 17], blockR.v[2 * i + 32], blockR.v[2 * i + 33],
blockR.v[2 * i + 48], blockR.v[2 * i + 49], blockR.v[2 * i + 64],
blockR.v[2 * i + 65], blockR.v[2 * i + 80], blockR.v[2 * i + 81],
blockR.v[2 * i + 96], blockR.v[2 * i + 97], blockR.v[2 * i + 112],
blockR.v[2 * i + 113]);
}
copy_block(next_block, &block_tmp);
xor_block(next_block, &blockR);
}
void generate_addresses(const argon2_instance_t *instance,
const argon2_position_t *position,
uint64_t *pseudo_rands) {
block zero_block, input_block, address_block;
uint32_t i;
init_block_value(&zero_block, 0);
init_block_value(&input_block, 0);
init_block_value(&address_block, 0);
if (instance != NULL && position != NULL) {
input_block.v[0] = position->pass;
input_block.v[1] = position->lane;
input_block.v[2] = position->slice;
input_block.v[3] = 16;
input_block.v[4] = 2;
input_block.v[5] = instance->type;
for (i = 0; i < 4; ++i) {
if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) {
input_block.v[6]++;
fill_block(&zero_block, &input_block, &address_block);
fill_block(&zero_block, &address_block, &address_block);
}
pseudo_rands[i] = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK];
}
}
}
void fill_segment(const argon2_instance_t *instance,
argon2_position_t position) {
block *ref_block = NULL, *curr_block = NULL;
uint64_t pseudo_rand, ref_index, ref_lane;
uint32_t prev_offset, curr_offset;
uint32_t starting_index;
uint32_t i;
int data_independent_addressing = (instance->type == Argon2_i);
/* Pseudo-random values that determine the reference block position */
uint64_t *pseudo_rands = NULL;
if (instance == NULL) {
return;
}
pseudo_rands =
(uint64_t *)malloc(sizeof(uint64_t) * 4);
if (pseudo_rands == NULL) {
return;
}
if (data_independent_addressing) {
generate_addresses(instance, &position, pseudo_rands);
}
starting_index = 0;
if ((0 == position.pass) && (0 == position.slice)) {
starting_index = 2; /* we have already generated the first two blocks */
}
/* Offset of the current block */
curr_offset = position.lane * 16 +
position.slice * 4 + starting_index;
if (0 == curr_offset % 16) {
/* Last block in this lane */
prev_offset = curr_offset + 16 - 1;
} else {
/* Previous block */
prev_offset = curr_offset - 1;
}
for (i = starting_index; i < 4; ++i, ++curr_offset, ++prev_offset) {
/*1.1 Rotating prev_offset if needed */
if (curr_offset % 16 == 1) {
prev_offset = curr_offset - 1;
}
/* 1.2 Computing the index of the reference block */
/* 1.2.1 Taking pseudo-random value from the previous block */
if (data_independent_addressing) {
pseudo_rand = pseudo_rands[i];
} else {
pseudo_rand = instance->memory[prev_offset].v[0];
}
/* 1.2.2 Computing the lane of the reference block */
ref_lane = ((pseudo_rand >> 32)) % 1;
if ((position.pass == 0) && (position.slice == 0)) {
/* Can not reference other lanes yet */
ref_lane = position.lane;
}
/* 1.2.3 Computing the number of possible reference block within the
* lane.
*/
position.index = i;
ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
ref_lane == position.lane);
/* 2 Creating a new block */
ref_block =
instance->memory + 16 * ref_lane + ref_index;
curr_block = instance->memory + curr_offset;
fill_block(instance->memory + prev_offset, ref_block, curr_block);
}
free(pseudo_rands);
}

View File

@@ -1,49 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#ifndef ARGON2_REF_H
#define ARGON2_REF_H
/*
* Function fills a new memory block
* @param prev_block Pointer to the previous block
* @param ref_block Pointer to the reference block
* @param next_block Pointer to the block to be constructed
* @pre all block pointers must be valid
*/
void fill_block(const block *prev_block, const block *ref_block,
block *next_block);
/*
* Generate pseudo-random values to reference blocks in the segment and puts
* them into the array
* @param instance Pointer to the current instance
* @param position Pointer to the current position
* @param pseudo_rands Pointer to the array of 64-bit values
* @pre pseudo_rands must point to @a instance->segment_length allocated values
*/
void generate_addresses(const argon2_instance_t *instance,
const argon2_position_t *position,
uint64_t *pseudo_rands);
/*
* Function that fills the segment using previous segments also from other
* threads
* @param instance Pointer to the current instance
* @param position Current position
* @pre all block pointers must be valid
*/
void fill_segment(const argon2_instance_t *instance,
argon2_position_t position);
#endif /* ARGON2_REF_H */

View File

@@ -1,223 +0,0 @@
/*
* Argon2 source code package
*
* Written by Daniel Dinu and Dmitry Khovratovich, 2015
*
* This work is licensed under a Creative Commons CC0 1.0 License/Waiver.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with
* this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "argon2.h"
#include "cores.h"
#define T_COST_DEF 3
#define LOG_M_COST_DEF 12 /* 2^12 = 4 MiB */
#define LANES_DEF 1
#define THREADS_DEF 1
#define OUT_LEN 32
#define SALT_LEN 16
#define UNUSED_PARAMETER(x) (void)(x)
static void usage(const char *cmd) {
printf("Usage: %s pwd salt [-y version] [-t iterations] [-m memory] [-p "
"parallelism]\n",
cmd);
printf("Parameters:\n");
printf("\tpwd\t\tThe password to hash\n");
printf("\tsalt\t\tThe salt to use, at most 16 characters\n");
printf("\t-d\t\tUse Argon2d instead of Argon2i (which is the default)\n");
printf("\t-t N\t\tSets the number of iterations to N (default = %d)\n",
T_COST_DEF);
printf("\t-m N\t\tSets the memory usage of 2^N KiB (default %d)\n",
LOG_M_COST_DEF);
printf("\t-p N\t\tSets parallelism to N threads (default %d)\n",
THREADS_DEF);
}
static void fatal(const char *error) {
fprintf(stderr, "Error: %s\n", error);
exit(1);
}
/*
Runs Argon2 with certain inputs and parameters, inputs not cleared. Prints the
Base64-encoded hash string
@out output array with at least 32 bytes allocated
@pwd NULL-terminated string, presumably from argv[]
@salt salt array with at least SALTLEN_DEF bytes allocated
@t_cost number of iterations
@m_cost amount of requested memory in KB
@lanes amount of requested parallelism
@threads actual parallelism
@type String, only "d" and "i" are accepted
*/
static void run(uint8_t *out, char *pwd, uint8_t *salt, uint32_t t_cost,
uint32_t m_cost, uint32_t lanes, uint32_t threads,
const char *type) {
clock_t start_time, stop_time;
unsigned pwd_length;
argon2_context context;
int i;
start_time = clock();
if (!pwd) {
fatal("password missing");
}
if (!salt) {
secure_wipe_memory(pwd, strlen(pwd));
fatal("salt missing");
}
pwd_length = strlen(pwd);
UNUSED_PARAMETER(threads);
context.out = out;
context.outlen = OUT_LEN;
context.pwd = (uint8_t *)pwd;
context.pwdlen = pwd_length;
context.salt = salt;
context.saltlen = SALT_LEN;
context.secret = NULL;
context.secretlen = 0;
context.ad = NULL;
context.adlen = 0;
context.t_cost = t_cost;
context.m_cost = m_cost;
context.lanes = lanes;
context.threads = lanes;
context.allocate_cbk = NULL;
context.free_cbk = NULL;
context.flags = ARGON2_FLAG_CLEAR_PASSWORD;
if (!strcmp(type, "d")) {
int result = argon2d(&context);
if (result != ARGON2_OK)
fatal(error_message(result));
} else if (!strcmp(type, "i")) {
int result = argon2i(&context);
if (result != ARGON2_OK)
fatal(error_message(result));
} else {
secure_wipe_memory(pwd, strlen(pwd));
fatal("wrong Argon2 type");
}
stop_time = clock();
/* add back when proper decoding */
/*
char encoded[300];
encode_string(encoded, sizeof encoded, &context);
printf("%s\n", encoded);
*/
printf("Hash:\t\t");
for (i = 0; i < context.outlen; ++i) {
printf("%02x", context.out[i]);
}
printf("\n");
printf("%2.3f seconds\n",
((double)stop_time - start_time) / (CLOCKS_PER_SEC));
}
int main(int argc, char *argv[]) {
unsigned char out[OUT_LEN];
uint32_t m_cost = 1 << LOG_M_COST_DEF;
uint32_t t_cost = T_COST_DEF;
uint32_t lanes = LANES_DEF;
uint32_t threads = THREADS_DEF;
char *pwd = NULL;
uint8_t salt[SALT_LEN];
const char *type = "i";
int i;
if (argc < 3) {
usage(argv[0]);
return ARGON2_MISSING_ARGS;
}
/* get password and salt from command line */
pwd = argv[1];
if (strlen(argv[2]) > SALT_LEN) {
fatal("salt too long");
}
memset(salt, 0x00, SALT_LEN); /* pad with null bytes */
memcpy(salt, argv[2], strlen(argv[2]));
/* parse options */
for (i = 3; i < argc; i++) {
const char *a = argv[i];
unsigned long input = 0;
if (!strcmp(a, "-m")) {
if (i < argc - 1) {
i++;
input = strtoul(argv[i], NULL, 10);
if (input == 0 || input == ULONG_MAX ||
input > ARGON2_MAX_MEMORY_BITS) {
fatal("bad numeric input for -m");
}
m_cost = ARGON2_MIN(UINT64_C(1) << input, UINT32_C(0xFFFFFFFF));
if (m_cost > ARGON2_MAX_MEMORY) {
fatal("m_cost overflow");
}
continue;
} else {
fatal("missing -m argument");
}
} else if (!strcmp(a, "-t")) {
if (i < argc - 1) {
i++;
input = strtoul(argv[i], NULL, 10);
if (input == 0 || input == ULONG_MAX ||
input > ARGON2_MAX_TIME) {
fatal("bad numeric input for -t");
}
t_cost = input;
continue;
} else {
fatal("missing -t argument");
}
} else if (!strcmp(a, "-p")) {
if (i < argc - 1) {
i++;
input = strtoul(argv[i], NULL, 10);
if (input == 0 || input == ULONG_MAX ||
input > ARGON2_MAX_THREADS || input > ARGON2_MAX_LANES) {
fatal("bad numeric input for -p");
}
threads = input;
lanes = threads;
continue;
} else {
fatal("missing -p argument");
}
} else if (!strcmp(a, "-d")) {
type = "d";
} else {
fatal("unknown argument");
}
}
printf("Type:\t\tArgon2%c\n", type[0]);
printf("Iterations:\t%" PRIu32 " \n", t_cost);
printf("Memory:\t\t%" PRIu32 " KiB\n", m_cost);
printf("Parallelism:\t%" PRIu32 " \n", lanes);
run(out, pwd, salt, t_cost, m_cost, lanes, threads, type);
return ARGON2_OK;
}

View File

@@ -1,38 +0,0 @@
#if defined(SCRYPT_SKEIN512)
#include "scrypt-jane-hash_skein512.h"
#else
#define SCRYPT_HASH "ERROR"
#define SCRYPT_HASH_BLOCK_SIZE 64
#define SCRYPT_HASH_DIGEST_SIZE 64
typedef struct scrypt_hash_state_t { size_t dummy; } scrypt_hash_state;
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
static void scrypt_hash_init(scrypt_hash_state *S) {}
static void scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {}
static void scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {0};
#error must define a hash function!
#endif
#include "scrypt-jane-pbkdf2.h"
#define SCRYPT_TEST_HASH_LEN 257 /* (2 * largest block size) + 1 */
static int
scrypt_test_hash(void) {
scrypt_hash_state st;
scrypt_hash_digest hash, final;
uint8_t msg[SCRYPT_TEST_HASH_LEN];
size_t i;
for (i = 0; i < SCRYPT_TEST_HASH_LEN; i++)
msg[i] = (uint8_t)i;
scrypt_hash_init(&st);
for (i = 0; i < SCRYPT_TEST_HASH_LEN + 1; i++) {
scrypt_hash(hash, msg, i);
scrypt_hash_update(&st, hash, sizeof(hash));
}
scrypt_hash_finish(&st, final);
return scrypt_verify(final, scrypt_test_hash_expected, SCRYPT_HASH_DIGEST_SIZE);
}

View File

@@ -1,188 +0,0 @@
#define SCRYPT_HASH "Skein-512"
#define SCRYPT_HASH_BLOCK_SIZE 64
#define SCRYPT_HASH_DIGEST_SIZE 64
typedef uint8_t scrypt_hash_digest[SCRYPT_HASH_DIGEST_SIZE];
typedef struct scrypt_hash_state_t {
uint64_t X[8], T[2];
uint32_t leftover;
uint8_t buffer[SCRYPT_HASH_BLOCK_SIZE];
} scrypt_hash_state;
#include <stdio.h>
static void
skein512_blocks(scrypt_hash_state *S, const uint8_t *in, size_t blocks, size_t add) {
uint64_t X[8], key[8], Xt[9+18], T[3+1];
size_t r;
while (blocks--) {
T[0] = S->T[0] + add;
T[1] = S->T[1];
T[2] = T[0] ^ T[1];
key[0] = U8TO64_LE(in + 0); Xt[0] = S->X[0]; X[0] = key[0] + Xt[0];
key[1] = U8TO64_LE(in + 8); Xt[1] = S->X[1]; X[1] = key[1] + Xt[1];
key[2] = U8TO64_LE(in + 16); Xt[2] = S->X[2]; X[2] = key[2] + Xt[2];
key[3] = U8TO64_LE(in + 24); Xt[3] = S->X[3]; X[3] = key[3] + Xt[3];
key[4] = U8TO64_LE(in + 32); Xt[4] = S->X[4]; X[4] = key[4] + Xt[4];
key[5] = U8TO64_LE(in + 40); Xt[5] = S->X[5]; X[5] = key[5] + Xt[5] + T[0];
key[6] = U8TO64_LE(in + 48); Xt[6] = S->X[6]; X[6] = key[6] + Xt[6] + T[1];
key[7] = U8TO64_LE(in + 56); Xt[7] = S->X[7]; X[7] = key[7] + Xt[7];
Xt[8] = 0x1BD11BDAA9FC1A22ull ^ Xt[0] ^ Xt[1] ^ Xt[2] ^ Xt[3] ^ Xt[4] ^ Xt[5] ^ Xt[6] ^ Xt[7];
in += SCRYPT_HASH_BLOCK_SIZE;
for (r = 0; r < 18; r++)
Xt[r + 9] = Xt[r + 0];
for (r = 0; r < 18; r += 2) {
X[0] += X[1]; X[1] = ROTL64(X[1], 46) ^ X[0];
X[2] += X[3]; X[3] = ROTL64(X[3], 36) ^ X[2];
X[4] += X[5]; X[5] = ROTL64(X[5], 19) ^ X[4];
X[6] += X[7]; X[7] = ROTL64(X[7], 37) ^ X[6];
X[2] += X[1]; X[1] = ROTL64(X[1], 33) ^ X[2];
X[0] += X[3]; X[3] = ROTL64(X[3], 42) ^ X[0];
X[6] += X[5]; X[5] = ROTL64(X[5], 14) ^ X[6];
X[4] += X[7]; X[7] = ROTL64(X[7], 27) ^ X[4];
X[4] += X[1]; X[1] = ROTL64(X[1], 17) ^ X[4];
X[6] += X[3]; X[3] = ROTL64(X[3], 49) ^ X[6];
X[0] += X[5]; X[5] = ROTL64(X[5], 36) ^ X[0];
X[2] += X[7]; X[7] = ROTL64(X[7], 39) ^ X[2];
X[6] += X[1]; X[1] = ROTL64(X[1], 44) ^ X[6];
X[4] += X[3]; X[3] = ROTL64(X[3], 56) ^ X[4];
X[2] += X[5]; X[5] = ROTL64(X[5], 54) ^ X[2];
X[0] += X[7]; X[7] = ROTL64(X[7], 9) ^ X[0];
X[0] += Xt[r + 1];
X[1] += Xt[r + 2];
X[2] += Xt[r + 3];
X[3] += Xt[r + 4];
X[4] += Xt[r + 5];
X[5] += Xt[r + 6] + T[1];
X[6] += Xt[r + 7] + T[2];
X[7] += Xt[r + 8] + r + 1;
T[3] = T[0];
T[0] = T[1];
T[1] = T[2];
T[2] = T[3];
X[0] += X[1]; X[1] = ROTL64(X[1], 39) ^ X[0];
X[2] += X[3]; X[3] = ROTL64(X[3], 30) ^ X[2];
X[4] += X[5]; X[5] = ROTL64(X[5], 34) ^ X[4];
X[6] += X[7]; X[7] = ROTL64(X[7], 24) ^ X[6];
X[2] += X[1]; X[1] = ROTL64(X[1], 13) ^ X[2];
X[0] += X[3]; X[3] = ROTL64(X[3], 17) ^ X[0];
X[6] += X[5]; X[5] = ROTL64(X[5], 10) ^ X[6];
X[4] += X[7]; X[7] = ROTL64(X[7], 50) ^ X[4];
X[4] += X[1]; X[1] = ROTL64(X[1], 25) ^ X[4];
X[6] += X[3]; X[3] = ROTL64(X[3], 29) ^ X[6];
X[0] += X[5]; X[5] = ROTL64(X[5], 39) ^ X[0];
X[2] += X[7]; X[7] = ROTL64(X[7], 43) ^ X[2];
X[6] += X[1]; X[1] = ROTL64(X[1], 8) ^ X[6];
X[4] += X[3]; X[3] = ROTL64(X[3], 22) ^ X[4];
X[2] += X[5]; X[5] = ROTL64(X[5], 56) ^ X[2];
X[0] += X[7]; X[7] = ROTL64(X[7], 35) ^ X[0];
X[0] += Xt[r + 2];
X[1] += Xt[r + 3];
X[2] += Xt[r + 4];
X[3] += Xt[r + 5];
X[4] += Xt[r + 6];
X[5] += Xt[r + 7] + T[1];
X[6] += Xt[r + 8] + T[2];
X[7] += Xt[r + 9] + r + 2;
T[3] = T[0];
T[0] = T[1];
T[1] = T[2];
T[2] = T[3];
}
S->X[0] = key[0] ^ X[0];
S->X[1] = key[1] ^ X[1];
S->X[2] = key[2] ^ X[2];
S->X[3] = key[3] ^ X[3];
S->X[4] = key[4] ^ X[4];
S->X[5] = key[5] ^ X[5];
S->X[6] = key[6] ^ X[6];
S->X[7] = key[7] ^ X[7];
S->T[0] = T[0];
S->T[1] = T[1] & ~0x4000000000000000ull;
}
}
static void
scrypt_hash_init(scrypt_hash_state *S) {
S->X[0] = 0x4903ADFF749C51CEull;
S->X[1] = 0x0D95DE399746DF03ull;
S->X[2] = 0x8FD1934127C79BCEull;
S->X[3] = 0x9A255629FF352CB1ull;
S->X[4] = 0x5DB62599DF6CA7B0ull;
S->X[5] = 0xEABE394CA9D5C3F4ull;
S->X[6] = 0x991112C71A75B523ull;
S->X[7] = 0xAE18A40B660FCC33ull;
S->T[0] = 0x0000000000000000ull;
S->T[1] = 0x7000000000000000ull;
S->leftover = 0;
}
static void
scrypt_hash_update(scrypt_hash_state *S, const uint8_t *in, size_t inlen) {
size_t blocks, want;
/* skein processes the final <=64 bytes raw, so we can only update if there are at least 64+1 bytes available */
if ((S->leftover + inlen) > SCRYPT_HASH_BLOCK_SIZE) {
/* handle the previous data, we know there is enough for at least one block */
if (S->leftover) {
want = (SCRYPT_HASH_BLOCK_SIZE - S->leftover);
memcpy(S->buffer + S->leftover, in, want);
in += want;
inlen -= want;
S->leftover = 0;
skein512_blocks(S, S->buffer, 1, SCRYPT_HASH_BLOCK_SIZE);
}
/* handle the current data if there's more than one block */
if (inlen > SCRYPT_HASH_BLOCK_SIZE) {
blocks = ((inlen - 1) & ~(SCRYPT_HASH_BLOCK_SIZE - 1));
skein512_blocks(S, in, blocks / SCRYPT_HASH_BLOCK_SIZE, SCRYPT_HASH_BLOCK_SIZE);
inlen -= blocks;
in += blocks;
}
}
/* handle leftover data */
memcpy(S->buffer + S->leftover, in, inlen);
S->leftover += (int) inlen;
}
static void
scrypt_hash_finish(scrypt_hash_state *S, uint8_t *hash) {
memset(S->buffer + S->leftover, 0, SCRYPT_HASH_BLOCK_SIZE - S->leftover);
S->T[1] |= 0x8000000000000000ull;
skein512_blocks(S, S->buffer, 1, S->leftover);
memset(S->buffer, 0, SCRYPT_HASH_BLOCK_SIZE);
S->T[0] = 0;
S->T[1] = 0xff00000000000000ull;
skein512_blocks(S, S->buffer, 1, 8);
U64TO8_LE(&hash[ 0], S->X[0]);
U64TO8_LE(&hash[ 8], S->X[1]);
U64TO8_LE(&hash[16], S->X[2]);
U64TO8_LE(&hash[24], S->X[3]);
U64TO8_LE(&hash[32], S->X[4]);
U64TO8_LE(&hash[40], S->X[5]);
U64TO8_LE(&hash[48], S->X[6]);
U64TO8_LE(&hash[56], S->X[7]);
}
static const uint8_t scrypt_test_hash_expected[SCRYPT_HASH_DIGEST_SIZE] = {
0x4d,0x52,0x29,0xff,0x10,0xbc,0xd2,0x62,0xd1,0x61,0x83,0xc8,0xe6,0xf0,0x83,0xc4,
0x9f,0xf5,0x6a,0x42,0x75,0x2a,0x26,0x4e,0xf0,0x28,0x72,0x28,0x47,0xe8,0x23,0xdf,
0x1e,0x64,0xf1,0x51,0x38,0x35,0x9d,0xc2,0x83,0xfc,0x35,0x4e,0xc0,0x52,0x5f,0x41,
0x6a,0x0b,0x7d,0xf5,0xce,0x98,0xde,0x6f,0x36,0xd8,0x51,0x15,0x78,0x78,0x93,0x67,
};

View File

@@ -1,367 +0,0 @@
/* x64 */
#if defined(X86_64ASM_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_AVX
asm_naked_fn_proto(void, scrypt_ChunkMix_avx)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
a2(vmovdqa xmm4,[rax+64])
a2(vmovdqa xmm5,[rax+80])
a2(vmovdqa xmm6,[rax+96])
a2(vmovdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_avx_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a3(vpxor xmm4,xmm4,[r9+64])
a3(vpxor xmm5,xmm5,[r9+80])
a3(vpxor xmm6,xmm6,[r9+96])
a3(vpxor xmm7,xmm7,[r9+112])
a1(scrypt_ChunkMix_avx_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_avx_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
a3(vpxor xmm4,xmm4,[rsi+r9+64])
a3(vpxor xmm5,xmm5,[rsi+r9+80])
a3(vpxor xmm6,xmm6,[rsi+r9+96])
a3(vpxor xmm7,xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_avx_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a3(vpxor xmm4,xmm4,[rdx+r9+64])
a3(vpxor xmm5,xmm5,[rdx+r9+80])
a3(vpxor xmm6,xmm6,[rdx+r9+96])
a3(vpxor xmm7,xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_avx_no_xor2:)
a2(vmovdqa [rsp+0],xmm0)
a2(vmovdqa [rsp+16],xmm1)
a2(vmovdqa [rsp+32],xmm2)
a2(vmovdqa [rsp+48],xmm3)
a2(vmovdqa [rsp+64],xmm4)
a2(vmovdqa [rsp+80],xmm5)
a2(vmovdqa [rsp+96],xmm6)
a2(vmovdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_avx_loop: )
a3(vpaddq xmm8, xmm0, xmm2)
a3(vpaddq xmm9, xmm1, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm6, xmm6, xmm8)
a3(vpxor xmm7, xmm7, xmm9)
a3(vpaddq xmm10, xmm0, xmm6)
a3(vpaddq xmm11, xmm1, xmm7)
a3(vpsrlq xmm8, xmm10, 51)
a3(vpsrlq xmm9, xmm11, 51)
a3(vpsllq xmm10, xmm10, 13)
a3(vpsllq xmm11, xmm11, 13)
a3(vpxor xmm4, xmm4, xmm8)
a3(vpxor xmm5, xmm5, xmm9)
a3(vpxor xmm4, xmm4, xmm10)
a3(vpxor xmm5, xmm5, xmm11)
a3(vpaddq xmm8, xmm6, xmm4)
a3(vpaddq xmm9, xmm7, xmm5)
a3(vpsrlq xmm10, xmm8, 25)
a3(vpsrlq xmm11, xmm9, 25)
a3(vpsllq xmm8, xmm8, 39)
a3(vpsllq xmm9, xmm9, 39)
a3(vpxor xmm2, xmm2, xmm10)
a3(vpxor xmm3, xmm3, xmm11)
a3(vpxor xmm2, xmm2, xmm8)
a3(vpxor xmm3, xmm3, xmm9)
a3(vpaddq xmm10, xmm4, xmm2)
a3(vpaddq xmm11, xmm5, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm0, xmm0, xmm10)
a3(vpxor xmm1, xmm1, xmm11)
a2(vmovdqa xmm8, xmm2)
a2(vmovdqa xmm9, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm9, xmm8, 8)
a4(vpalignr xmm7, xmm8, xmm9, 8)
a3(vpaddq xmm10, xmm0, xmm2)
a3(vpaddq xmm11, xmm1, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm6, xmm6, xmm10)
a3(vpxor xmm7, xmm7, xmm11)
a3(vpaddq xmm8, xmm0, xmm6)
a3(vpaddq xmm9, xmm1, xmm7)
a3(vpsrlq xmm10, xmm8, 51)
a3(vpsrlq xmm11, xmm9, 51)
a3(vpsllq xmm8, xmm8, 13)
a3(vpsllq xmm9, xmm9, 13)
a3(vpxor xmm5, xmm5, xmm10)
a3(vpxor xmm4, xmm4, xmm11)
a3(vpxor xmm5, xmm5, xmm8)
a3(vpxor xmm4, xmm4, xmm9)
a3(vpaddq xmm10, xmm6, xmm5)
a3(vpaddq xmm11, xmm7, xmm4)
a3(vpsrlq xmm8, xmm10, 25)
a3(vpsrlq xmm9, xmm11, 25)
a3(vpsllq xmm10, xmm10, 39)
a3(vpsllq xmm11, xmm11, 39)
a3(vpxor xmm2, xmm2, xmm8)
a3(vpxor xmm3, xmm3, xmm9)
a3(vpxor xmm2, xmm2, xmm10)
a3(vpxor xmm3, xmm3, xmm11)
a3(vpaddq xmm8, xmm5, xmm2)
a3(vpaddq xmm9, xmm4, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm0, xmm0, xmm8)
a3(vpxor xmm1, xmm1, xmm9)
a2(vmovdqa xmm10, xmm2)
a2(vmovdqa xmm11, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm11, xmm10, 8)
a4(vpalignr xmm7, xmm10, xmm11, 8)
a2(sub rax, 2)
aj(ja scrypt_salsa64_avx_loop)
a3(vpaddq xmm0,xmm0,[rsp+0])
a3(vpaddq xmm1,xmm1,[rsp+16])
a3(vpaddq xmm2,xmm2,[rsp+32])
a3(vpaddq xmm3,xmm3,[rsp+48])
a3(vpaddq xmm4,xmm4,[rsp+64])
a3(vpaddq xmm5,xmm5,[rsp+80])
a3(vpaddq xmm6,xmm6,[rsp+96])
a3(vpaddq xmm7,xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
a2(vmovdqa [rax+64],xmm4)
a2(vmovdqa [rax+80],xmm5)
a2(vmovdqa [rax+96],xmm6)
a2(vmovdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_avx_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_avx)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_AVX) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_AVX
static void asm_calling_convention
scrypt_ChunkMix_avx(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1,z2,z3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x5 = _mm_xor_si128(x5, z2);
x4 = _mm_xor_si128(x4, z3);
x5 = _mm_xor_si128(x5, z0);
x4 = _mm_xor_si128(x4, z1);
z0 = _mm_add_epi64(x5, x6);
z1 = _mm_add_epi64(x4, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x5);
z1 = _mm_add_epi64(x3, x4);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_AVX)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-AVX"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View File

@@ -1,221 +0,0 @@
/* x64 */
#if defined(X86_64ASM_AVX2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_AVX2
asm_naked_fn_proto(void, scrypt_ChunkMix_avx2)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_avx2)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa ymm0,[rax+0])
a2(vmovdqa ymm1,[rax+32])
a2(vmovdqa ymm2,[rax+64])
a2(vmovdqa ymm3,[rax+96])
aj(jz scrypt_ChunkMix_avx2_no_xor1)
a3(vpxor ymm0,ymm0,[r9+0])
a3(vpxor ymm1,ymm1,[r9+32])
a3(vpxor ymm2,ymm2,[r9+64])
a3(vpxor ymm3,ymm3,[r9+96])
a1(scrypt_ChunkMix_avx2_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_avx2_loop:)
a2(and rdx, rdx)
a3(vpxor ymm0,ymm0,[rsi+r9+0])
a3(vpxor ymm1,ymm1,[rsi+r9+32])
a3(vpxor ymm2,ymm2,[rsi+r9+64])
a3(vpxor ymm3,ymm3,[rsi+r9+96])
aj(jz scrypt_ChunkMix_avx2_no_xor2)
a3(vpxor ymm0,ymm0,[rdx+r9+0])
a3(vpxor ymm1,ymm1,[rdx+r9+32])
a3(vpxor ymm2,ymm2,[rdx+r9+64])
a3(vpxor ymm3,ymm3,[rdx+r9+96])
a1(scrypt_ChunkMix_avx2_no_xor2:)
a2(vmovdqa ymm6,ymm0)
a2(vmovdqa ymm7,ymm1)
a2(vmovdqa ymm8,ymm2)
a2(vmovdqa ymm9,ymm3)
a2(mov rax,4)
a1(scrypt_salsa64_avx2_loop: )
a3(vpaddq ymm4, ymm1, ymm0)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpxor ymm3, ymm3, ymm4)
a3(vpaddq ymm4, ymm0, ymm3)
a3(vpsrlq ymm5, ymm4, 51)
a3(vpxor ymm2, ymm2, ymm5)
a3(vpsllq ymm4, ymm4, 13)
a3(vpxor ymm2, ymm2, ymm4)
a3(vpaddq ymm4, ymm3, ymm2)
a3(vpsrlq ymm5, ymm4, 25)
a3(vpxor ymm1, ymm1, ymm5)
a3(vpsllq ymm4, ymm4, 39)
a3(vpxor ymm1, ymm1, ymm4)
a3(vpaddq ymm4, ymm2, ymm1)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpermq ymm1, ymm1, 0x39)
a3(vpermq ymm10, ymm2, 0x4e)
a3(vpxor ymm0, ymm0, ymm4)
a3(vpermq ymm3, ymm3, 0x93)
a3(vpaddq ymm4, ymm3, ymm0)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpxor ymm1, ymm1, ymm4)
a3(vpaddq ymm4, ymm0, ymm1)
a3(vpsrlq ymm5, ymm4, 51)
a3(vpxor ymm10, ymm10, ymm5)
a3(vpsllq ymm4, ymm4, 13)
a3(vpxor ymm10, ymm10, ymm4)
a3(vpaddq ymm4, ymm1, ymm10)
a3(vpsrlq ymm5, ymm4, 25)
a3(vpxor ymm3, ymm3, ymm5)
a3(vpsllq ymm4, ymm4, 39)
a3(vpermq ymm1, ymm1, 0x93)
a3(vpxor ymm3, ymm3, ymm4)
a3(vpermq ymm2, ymm10, 0x4e)
a3(vpaddq ymm4, ymm10, ymm3)
a3(vpshufd ymm4, ymm4, 0xb1)
a3(vpermq ymm3, ymm3, 0x39)
a3(vpxor ymm0, ymm0, ymm4)
a1(dec rax)
aj(jnz scrypt_salsa64_avx2_loop)
a3(vpaddq ymm0,ymm0,ymm6)
a3(vpaddq ymm1,ymm1,ymm7)
a3(vpaddq ymm2,ymm2,ymm8)
a3(vpaddq ymm3,ymm3,ymm9)
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],ymm0)
a2(vmovdqa [rax+32],ymm1)
a2(vmovdqa [rax+64],ymm2)
a2(vmovdqa [rax+96],ymm3)
aj(jne scrypt_ChunkMix_avx2_loop)
a1(vzeroupper)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_avx2)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_AVX2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_AVX2
static void asm_calling_convention
scrypt_ChunkMix_avx2(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
ymmi *ymmp,y0,y1,y2,y3,t0,t1,t2,t3,z0,z1;
size_t rounds;
/* 1: X = B_{2r - 1} */
ymmp = (ymmi *)scrypt_block(Bin, blocksPerChunk - 1);
y0 = ymmp[0];
y1 = ymmp[1];
y2 = ymmp[2];
y3 = ymmp[3];
if (Bxor) {
ymmp = (ymmi *)scrypt_block(Bxor, blocksPerChunk - 1);
y0 = _mm256_xor_si256(y0, ymmp[0]);
y1 = _mm256_xor_si256(y1, ymmp[1]);
y2 = _mm256_xor_si256(y2, ymmp[2]);
y3 = _mm256_xor_si256(y3, ymmp[3]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
ymmp = (ymmi *)scrypt_block(Bin, i);
y0 = _mm256_xor_si256(y0, ymmp[0]);
y1 = _mm256_xor_si256(y1, ymmp[1]);
y2 = _mm256_xor_si256(y2, ymmp[2]);
y3 = _mm256_xor_si256(y3, ymmp[3]);
if (Bxor) {
ymmp = (ymmi *)scrypt_block(Bxor, i);
y0 = _mm256_xor_si256(y0, ymmp[0]);
y1 = _mm256_xor_si256(y1, ymmp[1]);
y2 = _mm256_xor_si256(y2, ymmp[2]);
y3 = _mm256_xor_si256(y3, ymmp[3]);
}
t0 = y0;
t1 = y1;
t2 = y2;
t3 = y3;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm256_add_epi64(y0, y1);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y3 = _mm256_xor_si256(y3, z0);
z0 = _mm256_add_epi64(y3, y0);
z1 = _mm256_srli_epi64(z0, 64-13);
y2 = _mm256_xor_si256(y2, z1);
z0 = _mm256_slli_epi64(z0, 13);
y2 = _mm256_xor_si256(y2, z0);
z0 = _mm256_add_epi64(y2, y3);
z1 = _mm256_srli_epi64(z0, 64-39);
y1 = _mm256_xor_si256(y1, z1);
z0 = _mm256_slli_epi64(z0, 39);
y1 = _mm256_xor_si256(y1, z0);
y1 = _mm256_permute4x64_epi64(y1, _MM_SHUFFLE(0,3,2,1));
y2 = _mm256_permute4x64_epi64(y2, _MM_SHUFFLE(1,0,3,2));
y3 = _mm256_permute4x64_epi64(y3, _MM_SHUFFLE(2,1,0,3));
z0 = _mm256_add_epi64(y1, y2);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y0 = _mm256_xor_si256(y0, z0);
z0 = _mm256_add_epi64(y0, y3);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y1 = _mm256_xor_si256(y1, z0);
z0 = _mm256_add_epi64(y1, y0);
z1 = _mm256_srli_epi64(z0, 64-13);
y2 = _mm256_xor_si256(y2, z1);
z0 = _mm256_slli_epi64(z0, 13);
y2 = _mm256_xor_si256(y2, z0);
z0 = _mm256_add_epi64(y2, y1);
z1 = _mm256_srli_epi64(z0, 64-39);
y3 = _mm256_xor_si256(y3, z1);
z0 = _mm256_slli_epi64(z0, 39);
y3 = _mm256_xor_si256(y3, z0);
z0 = _mm256_add_epi64(y3, y2);
z0 = _mm256_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
y0 = _mm256_xor_si256(y0, z0);
y1 = _mm256_permute4x64_epi64(y1, _MM_SHUFFLE(2,1,0,3));
y2 = _mm256_permute4x64_epi64(y2, _MM_SHUFFLE(1,0,3,2));
y3 = _mm256_permute4x64_epi64(y3, _MM_SHUFFLE(0,3,2,1));
}
y0 = _mm256_add_epi64(y0, t0);
y1 = _mm256_add_epi64(y1, t1);
y2 = _mm256_add_epi64(y2, t2);
y3 = _mm256_add_epi64(y3, t3);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
ymmp = (ymmi *)scrypt_block(Bout, (i / 2) + half);
ymmp[0] = y0;
ymmp[1] = y1;
ymmp[2] = y2;
ymmp[3] = y3;
}
}
#endif
#if defined(SCRYPT_SALSA64_AVX2)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-AVX2"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View File

@@ -1,449 +0,0 @@
/* x64 */
#if defined(X86_64ASM_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_SSE2
asm_naked_fn_proto(void, scrypt_ChunkMix_sse2)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_sse2)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(movdqa xmm0,[rax+0])
a2(movdqa xmm1,[rax+16])
a2(movdqa xmm2,[rax+32])
a2(movdqa xmm3,[rax+48])
a2(movdqa xmm4,[rax+64])
a2(movdqa xmm5,[rax+80])
a2(movdqa xmm6,[rax+96])
a2(movdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_sse2_no_xor1)
a2(pxor xmm0,[r9+0])
a2(pxor xmm1,[r9+16])
a2(pxor xmm2,[r9+32])
a2(pxor xmm3,[r9+48])
a2(pxor xmm4,[r9+64])
a2(pxor xmm5,[r9+80])
a2(pxor xmm6,[r9+96])
a2(pxor xmm7,[r9+112])
a1(scrypt_ChunkMix_sse2_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_sse2_loop:)
a2(and rdx, rdx)
a2(pxor xmm0,[rsi+r9+0])
a2(pxor xmm1,[rsi+r9+16])
a2(pxor xmm2,[rsi+r9+32])
a2(pxor xmm3,[rsi+r9+48])
a2(pxor xmm4,[rsi+r9+64])
a2(pxor xmm5,[rsi+r9+80])
a2(pxor xmm6,[rsi+r9+96])
a2(pxor xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_sse2_no_xor2)
a2(pxor xmm0,[rdx+r9+0])
a2(pxor xmm1,[rdx+r9+16])
a2(pxor xmm2,[rdx+r9+32])
a2(pxor xmm3,[rdx+r9+48])
a2(pxor xmm4,[rdx+r9+64])
a2(pxor xmm5,[rdx+r9+80])
a2(pxor xmm6,[rdx+r9+96])
a2(pxor xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_sse2_no_xor2:)
a2(movdqa [rsp+0],xmm0)
a2(movdqa [rsp+16],xmm1)
a2(movdqa [rsp+32],xmm2)
a2(movdqa [rsp+48],xmm3)
a2(movdqa [rsp+64],xmm4)
a2(movdqa [rsp+80],xmm5)
a2(movdqa [rsp+96],xmm6)
a2(movdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_sse2_loop: )
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm4, xmm10)
a2(pxor xmm5, xmm11)
a2(pxor xmm4, xmm8)
a2(pxor xmm5, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm4)
a2(paddq xmm11, xmm5)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm4)
a2(movdqa xmm9, xmm5)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm8, xmm2)
a2(movdqa xmm9, xmm3)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(movdqa xmm2, xmm7)
a2(movdqa xmm3, xmm6)
a2(punpcklqdq xmm10, xmm6)
a2(punpcklqdq xmm11, xmm7)
a2(movdqa xmm6, xmm8)
a2(movdqa xmm7, xmm9)
a2(punpcklqdq xmm9, xmm9)
a2(punpcklqdq xmm8, xmm8)
a2(punpckhqdq xmm2, xmm10)
a2(punpckhqdq xmm3, xmm11)
a2(punpckhqdq xmm6, xmm9)
a2(punpckhqdq xmm7, xmm8)
a2(sub rax, 2)
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm5, xmm10)
a2(pxor xmm4, xmm11)
a2(pxor xmm5, xmm8)
a2(pxor xmm4, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm5)
a2(paddq xmm11, xmm4)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm5)
a2(movdqa xmm9, xmm4)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm8, xmm2)
a2(movdqa xmm9, xmm3)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(movdqa xmm2, xmm7)
a2(movdqa xmm3, xmm6)
a2(punpcklqdq xmm10, xmm6)
a2(punpcklqdq xmm11, xmm7)
a2(movdqa xmm6, xmm8)
a2(movdqa xmm7, xmm9)
a2(punpcklqdq xmm9, xmm9)
a2(punpcklqdq xmm8, xmm8)
a2(punpckhqdq xmm2, xmm10)
a2(punpckhqdq xmm3, xmm11)
a2(punpckhqdq xmm6, xmm9)
a2(punpckhqdq xmm7, xmm8)
aj(ja scrypt_salsa64_sse2_loop)
a2(paddq xmm0,[rsp+0])
a2(paddq xmm1,[rsp+16])
a2(paddq xmm2,[rsp+32])
a2(paddq xmm3,[rsp+48])
a2(paddq xmm4,[rsp+64])
a2(paddq xmm5,[rsp+80])
a2(paddq xmm6,[rsp+96])
a2(paddq xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(movdqa [rax+0],xmm0)
a2(movdqa [rax+16],xmm1)
a2(movdqa [rax+32],xmm2)
a2(movdqa [rax+48],xmm3)
a2(movdqa [rax+64],xmm4)
a2(movdqa [rax+80],xmm5)
a2(movdqa [rax+96],xmm6)
a2(movdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_sse2_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_sse2)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_SSE2) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_SSE2
static void asm_calling_convention
scrypt_ChunkMix_sse2(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1,z2,z3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x4;
z1 = x5;
z2 = x2;
z3 = x3;
x4 = z1;
x5 = z0;
x2 = _mm_unpackhi_epi64(x7, _mm_unpacklo_epi64(x6, x6));
x3 = _mm_unpackhi_epi64(x6, _mm_unpacklo_epi64(x7, x7));
x6 = _mm_unpackhi_epi64(z2, _mm_unpacklo_epi64(z3, z3));
x7 = _mm_unpackhi_epi64(z3, _mm_unpacklo_epi64(z2, z2));
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x4;
z1 = x5;
z2 = x2;
z3 = x3;
x4 = z1;
x5 = z0;
x2 = _mm_unpackhi_epi64(x7, _mm_unpacklo_epi64(x6, x6));
x3 = _mm_unpackhi_epi64(x6, _mm_unpacklo_epi64(x7, x7));
x6 = _mm_unpackhi_epi64(z2, _mm_unpacklo_epi64(z3, z3));
x7 = _mm_unpackhi_epi64(z3, _mm_unpacklo_epi64(z2, z2));
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_SSE2)
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-SSE2"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif
/* sse3/avx use this as well */
#if defined(SCRYPT_SALSA64_INCLUDED)
/*
Default layout:
0 1 2 3
4 5 6 7
8 9 10 11
12 13 14 15
SSE2 layout:
0 5 10 15
12 1 6 11
8 13 2 7
4 9 14 3
*/
static void asm_calling_convention
salsa64_core_tangle_sse2(uint64_t *blocks, size_t count) {
uint64_t t;
while (count--) {
t = blocks[1]; blocks[1] = blocks[5]; blocks[5] = t;
t = blocks[2]; blocks[2] = blocks[10]; blocks[10] = t;
t = blocks[3]; blocks[3] = blocks[15]; blocks[15] = t;
t = blocks[4]; blocks[4] = blocks[12]; blocks[12] = t;
t = blocks[7]; blocks[7] = blocks[11]; blocks[11] = t;
t = blocks[9]; blocks[9] = blocks[13]; blocks[13] = t;
blocks += 16;
}
}
#endif

View File

@@ -1,399 +0,0 @@
/* x64 */
#if defined(X86_64ASM_SSSE3) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_SSSE3
asm_naked_fn_proto(void, scrypt_ChunkMix_ssse3)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_ssse3)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(movdqa xmm0,[rax+0])
a2(movdqa xmm1,[rax+16])
a2(movdqa xmm2,[rax+32])
a2(movdqa xmm3,[rax+48])
a2(movdqa xmm4,[rax+64])
a2(movdqa xmm5,[rax+80])
a2(movdqa xmm6,[rax+96])
a2(movdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_ssse3_no_xor1)
a2(pxor xmm0,[r9+0])
a2(pxor xmm1,[r9+16])
a2(pxor xmm2,[r9+32])
a2(pxor xmm3,[r9+48])
a2(pxor xmm4,[r9+64])
a2(pxor xmm5,[r9+80])
a2(pxor xmm6,[r9+96])
a2(pxor xmm7,[r9+112])
a1(scrypt_ChunkMix_ssse3_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_ssse3_loop:)
a2(and rdx, rdx)
a2(pxor xmm0,[rsi+r9+0])
a2(pxor xmm1,[rsi+r9+16])
a2(pxor xmm2,[rsi+r9+32])
a2(pxor xmm3,[rsi+r9+48])
a2(pxor xmm4,[rsi+r9+64])
a2(pxor xmm5,[rsi+r9+80])
a2(pxor xmm6,[rsi+r9+96])
a2(pxor xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_ssse3_no_xor2)
a2(pxor xmm0,[rdx+r9+0])
a2(pxor xmm1,[rdx+r9+16])
a2(pxor xmm2,[rdx+r9+32])
a2(pxor xmm3,[rdx+r9+48])
a2(pxor xmm4,[rdx+r9+64])
a2(pxor xmm5,[rdx+r9+80])
a2(pxor xmm6,[rdx+r9+96])
a2(pxor xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_ssse3_no_xor2:)
a2(movdqa [rsp+0],xmm0)
a2(movdqa [rsp+16],xmm1)
a2(movdqa [rsp+32],xmm2)
a2(movdqa [rsp+48],xmm3)
a2(movdqa [rsp+64],xmm4)
a2(movdqa [rsp+80],xmm5)
a2(movdqa [rsp+96],xmm6)
a2(movdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_ssse3_loop: )
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm4, xmm10)
a2(pxor xmm5, xmm11)
a2(pxor xmm4, xmm8)
a2(pxor xmm5, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm4)
a2(paddq xmm11, xmm5)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm4)
a2(movdqa xmm9, xmm5)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm10, xmm2)
a2(movdqa xmm11, xmm3)
a2(movdqa xmm2, xmm6)
a2(movdqa xmm3, xmm7)
a3(palignr xmm2, xmm7, 8)
a3(palignr xmm3, xmm6, 8)
a2(movdqa xmm6, xmm11)
a2(movdqa xmm7, xmm10)
a3(palignr xmm6, xmm10, 8)
a3(palignr xmm7, xmm11, 8)
a2(sub rax, 2)
a2(movdqa xmm8, xmm0)
a2(movdqa xmm9, xmm1)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm6, xmm8)
a2(pxor xmm7, xmm9)
a2(movdqa xmm10, xmm0)
a2(movdqa xmm11, xmm1)
a2(paddq xmm10, xmm6)
a2(paddq xmm11, xmm7)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 51)
a2(psrlq xmm11, 51)
a2(psllq xmm8, 13)
a2(psllq xmm9, 13)
a2(pxor xmm5, xmm10)
a2(pxor xmm4, xmm11)
a2(pxor xmm5, xmm8)
a2(pxor xmm4, xmm9)
a2(movdqa xmm10, xmm6)
a2(movdqa xmm11, xmm7)
a2(paddq xmm10, xmm5)
a2(paddq xmm11, xmm4)
a2(movdqa xmm8, xmm10)
a2(movdqa xmm9, xmm11)
a2(psrlq xmm10, 25)
a2(psrlq xmm11, 25)
a2(psllq xmm8, 39)
a2(psllq xmm9, 39)
a2(pxor xmm2, xmm10)
a2(pxor xmm3, xmm11)
a2(pxor xmm2, xmm8)
a2(pxor xmm3, xmm9)
a2(movdqa xmm8, xmm5)
a2(movdqa xmm9, xmm4)
a2(paddq xmm8, xmm2)
a2(paddq xmm9, xmm3)
a3(pshufd xmm8, xmm8, 0xb1)
a3(pshufd xmm9, xmm9, 0xb1)
a2(pxor xmm0, xmm8)
a2(pxor xmm1, xmm9)
a2(movdqa xmm10, xmm2)
a2(movdqa xmm11, xmm3)
a2(movdqa xmm2, xmm6)
a2(movdqa xmm3, xmm7)
a3(palignr xmm2, xmm7, 8)
a3(palignr xmm3, xmm6, 8)
a2(movdqa xmm6, xmm11)
a2(movdqa xmm7, xmm10)
a3(palignr xmm6, xmm10, 8)
a3(palignr xmm7, xmm11, 8)
aj(ja scrypt_salsa64_ssse3_loop)
a2(paddq xmm0,[rsp+0])
a2(paddq xmm1,[rsp+16])
a2(paddq xmm2,[rsp+32])
a2(paddq xmm3,[rsp+48])
a2(paddq xmm4,[rsp+64])
a2(paddq xmm5,[rsp+80])
a2(paddq xmm6,[rsp+96])
a2(paddq xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(movdqa [rax+0],xmm0)
a2(movdqa [rax+16],xmm1)
a2(movdqa [rax+32],xmm2)
a2(movdqa [rax+48],xmm3)
a2(movdqa [rax+64],xmm4)
a2(movdqa [rax+80],xmm5)
a2(movdqa [rax+96],xmm6)
a2(movdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_ssse3_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_ssse3)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_SSSE3) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_SSSE3
static void asm_calling_convention
scrypt_ChunkMix_ssse3(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1,z2,z3;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z2);
x5 = _mm_xor_si128(x5, z3);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z2 = _mm_srli_epi64(z0, 64-13);
z3 = _mm_srli_epi64(z1, 64-13);
z0 = _mm_slli_epi64(z0, 13);
z1 = _mm_slli_epi64(z1, 13);
x5 = _mm_xor_si128(x5, z2);
x4 = _mm_xor_si128(x4, z3);
x5 = _mm_xor_si128(x5, z0);
x4 = _mm_xor_si128(x4, z1);
z0 = _mm_add_epi64(x5, x6);
z1 = _mm_add_epi64(x4, x7);
z2 = _mm_srli_epi64(z0, 64-39);
z3 = _mm_srli_epi64(z1, 64-39);
z0 = _mm_slli_epi64(z0, 39);
z1 = _mm_slli_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z2);
x3 = _mm_xor_si128(x3, z3);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x5);
z1 = _mm_add_epi64(x3, x4);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-SSSE3"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View File

@@ -1,335 +0,0 @@
/* x64 */
#if defined(X86_64ASM_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)) && !defined(CPU_X86_FORCE_INTRINSICS)
#define SCRYPT_SALSA64_XOP
asm_naked_fn_proto(void, scrypt_ChunkMix_xop)(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r)
asm_naked_fn(scrypt_ChunkMix_xop)
a1(push rbp)
a2(mov rbp, rsp)
a2(and rsp, ~63)
a2(sub rsp, 128)
a2(lea rcx,[ecx*2]) /* zero extend uint32_t by using ecx, win64 can leave garbage in the top half */
a2(shl rcx,7)
a2(lea r9,[rcx-128])
a2(lea rax,[rsi+r9])
a2(lea r9,[rdx+r9])
a2(and rdx, rdx)
a2(vmovdqa xmm0,[rax+0])
a2(vmovdqa xmm1,[rax+16])
a2(vmovdqa xmm2,[rax+32])
a2(vmovdqa xmm3,[rax+48])
a2(vmovdqa xmm4,[rax+64])
a2(vmovdqa xmm5,[rax+80])
a2(vmovdqa xmm6,[rax+96])
a2(vmovdqa xmm7,[rax+112])
aj(jz scrypt_ChunkMix_xop_no_xor1)
a3(vpxor xmm0,xmm0,[r9+0])
a3(vpxor xmm1,xmm1,[r9+16])
a3(vpxor xmm2,xmm2,[r9+32])
a3(vpxor xmm3,xmm3,[r9+48])
a3(vpxor xmm4,xmm4,[r9+64])
a3(vpxor xmm5,xmm5,[r9+80])
a3(vpxor xmm6,xmm6,[r9+96])
a3(vpxor xmm7,xmm7,[r9+112])
a1(scrypt_ChunkMix_xop_no_xor1:)
a2(xor r9,r9)
a2(xor r8,r8)
a1(scrypt_ChunkMix_xop_loop:)
a2(and rdx, rdx)
a3(vpxor xmm0,xmm0,[rsi+r9+0])
a3(vpxor xmm1,xmm1,[rsi+r9+16])
a3(vpxor xmm2,xmm2,[rsi+r9+32])
a3(vpxor xmm3,xmm3,[rsi+r9+48])
a3(vpxor xmm4,xmm4,[rsi+r9+64])
a3(vpxor xmm5,xmm5,[rsi+r9+80])
a3(vpxor xmm6,xmm6,[rsi+r9+96])
a3(vpxor xmm7,xmm7,[rsi+r9+112])
aj(jz scrypt_ChunkMix_xop_no_xor2)
a3(vpxor xmm0,xmm0,[rdx+r9+0])
a3(vpxor xmm1,xmm1,[rdx+r9+16])
a3(vpxor xmm2,xmm2,[rdx+r9+32])
a3(vpxor xmm3,xmm3,[rdx+r9+48])
a3(vpxor xmm4,xmm4,[rdx+r9+64])
a3(vpxor xmm5,xmm5,[rdx+r9+80])
a3(vpxor xmm6,xmm6,[rdx+r9+96])
a3(vpxor xmm7,xmm7,[rdx+r9+112])
a1(scrypt_ChunkMix_xop_no_xor2:)
a2(vmovdqa [rsp+0],xmm0)
a2(vmovdqa [rsp+16],xmm1)
a2(vmovdqa [rsp+32],xmm2)
a2(vmovdqa [rsp+48],xmm3)
a2(vmovdqa [rsp+64],xmm4)
a2(vmovdqa [rsp+80],xmm5)
a2(vmovdqa [rsp+96],xmm6)
a2(vmovdqa [rsp+112],xmm7)
a2(mov rax,8)
a1(scrypt_salsa64_xop_loop: )
a3(vpaddq xmm8, xmm0, xmm2)
a3(vpaddq xmm9, xmm1, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm6, xmm6, xmm8)
a3(vpxor xmm7, xmm7, xmm9)
a3(vpaddq xmm10, xmm0, xmm6)
a3(vpaddq xmm11, xmm1, xmm7)
a3(vprotq xmm10, xmm10, 13)
a3(vprotq xmm11, xmm11, 13)
a3(vpxor xmm4, xmm4, xmm10)
a3(vpxor xmm5, xmm5, xmm11)
a3(vpaddq xmm8, xmm6, xmm4)
a3(vpaddq xmm9, xmm7, xmm5)
a3(vprotq xmm8, xmm8, 39)
a3(vprotq xmm9, xmm9, 39)
a3(vpxor xmm2, xmm2, xmm8)
a3(vpxor xmm3, xmm3, xmm9)
a3(vpaddq xmm10, xmm4, xmm2)
a3(vpaddq xmm11, xmm5, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm0, xmm0, xmm10)
a3(vpxor xmm1, xmm1, xmm11)
a2(vmovdqa xmm8, xmm2)
a2(vmovdqa xmm9, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm9, xmm8, 8)
a4(vpalignr xmm7, xmm8, xmm9, 8)
a3(vpaddq xmm10, xmm0, xmm2)
a3(vpaddq xmm11, xmm1, xmm3)
a3(vpshufd xmm10, xmm10, 0xb1)
a3(vpshufd xmm11, xmm11, 0xb1)
a3(vpxor xmm6, xmm6, xmm10)
a3(vpxor xmm7, xmm7, xmm11)
a3(vpaddq xmm8, xmm0, xmm6)
a3(vpaddq xmm9, xmm1, xmm7)
a3(vprotq xmm8, xmm8, 13)
a3(vprotq xmm9, xmm9, 13)
a3(vpxor xmm5, xmm5, xmm8)
a3(vpxor xmm4, xmm4, xmm9)
a3(vpaddq xmm10, xmm6, xmm5)
a3(vpaddq xmm11, xmm7, xmm4)
a3(vprotq xmm10, xmm10, 39)
a3(vprotq xmm11, xmm11, 39)
a3(vpxor xmm2, xmm2, xmm10)
a3(vpxor xmm3, xmm3, xmm11)
a3(vpaddq xmm8, xmm5, xmm2)
a3(vpaddq xmm9, xmm4, xmm3)
a3(vpshufd xmm8, xmm8, 0xb1)
a3(vpshufd xmm9, xmm9, 0xb1)
a3(vpxor xmm0, xmm0, xmm8)
a3(vpxor xmm1, xmm1, xmm9)
a2(vmovdqa xmm10, xmm2)
a2(vmovdqa xmm11, xmm3)
a4(vpalignr xmm2, xmm6, xmm7, 8)
a4(vpalignr xmm3, xmm7, xmm6, 8)
a4(vpalignr xmm6, xmm11, xmm10, 8)
a4(vpalignr xmm7, xmm10, xmm11, 8)
a2(sub rax, 2)
aj(ja scrypt_salsa64_xop_loop)
a3(vpaddq xmm0,xmm0,[rsp+0])
a3(vpaddq xmm1,xmm1,[rsp+16])
a3(vpaddq xmm2,xmm2,[rsp+32])
a3(vpaddq xmm3,xmm3,[rsp+48])
a3(vpaddq xmm4,xmm4,[rsp+64])
a3(vpaddq xmm5,xmm5,[rsp+80])
a3(vpaddq xmm6,xmm6,[rsp+96])
a3(vpaddq xmm7,xmm7,[rsp+112])
a2(lea rax,[r8+r9])
a2(xor r8,rcx)
a2(and rax,~0xff)
a2(add r9,128)
a2(shr rax,1)
a2(add rax, rdi)
a2(cmp r9,rcx)
a2(vmovdqa [rax+0],xmm0)
a2(vmovdqa [rax+16],xmm1)
a2(vmovdqa [rax+32],xmm2)
a2(vmovdqa [rax+48],xmm3)
a2(vmovdqa [rax+64],xmm4)
a2(vmovdqa [rax+80],xmm5)
a2(vmovdqa [rax+96],xmm6)
a2(vmovdqa [rax+112],xmm7)
aj(jne scrypt_ChunkMix_xop_loop)
a2(mov rsp, rbp)
a1(pop rbp)
a1(ret)
asm_naked_fn_end(scrypt_ChunkMix_xop)
#endif
/* intrinsic */
#if defined(X86_INTRINSIC_XOP) && (!defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED))
#define SCRYPT_SALSA64_XOP
static void asm_calling_convention
scrypt_ChunkMix_xop(uint64_t *Bout/*[chunkBytes]*/, uint64_t *Bin/*[chunkBytes]*/, uint64_t *Bxor/*[chunkBytes]*/, uint32_t r) {
uint32_t i, blocksPerChunk = r * 2, half = 0;
xmmi *xmmp,x0,x1,x2,x3,x4,x5,x6,x7,t0,t1,t2,t3,t4,t5,t6,t7,z0,z1;
size_t rounds;
/* 1: X = B_{2r - 1} */
xmmp = (xmmi *)scrypt_block(Bin, blocksPerChunk - 1);
x0 = xmmp[0];
x1 = xmmp[1];
x2 = xmmp[2];
x3 = xmmp[3];
x4 = xmmp[4];
x5 = xmmp[5];
x6 = xmmp[6];
x7 = xmmp[7];
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, blocksPerChunk - 1);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= r) {
/* 3: X = H(X ^ B_i) */
xmmp = (xmmi *)scrypt_block(Bin, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
if (Bxor) {
xmmp = (xmmi *)scrypt_block(Bxor, i);
x0 = _mm_xor_si128(x0, xmmp[0]);
x1 = _mm_xor_si128(x1, xmmp[1]);
x2 = _mm_xor_si128(x2, xmmp[2]);
x3 = _mm_xor_si128(x3, xmmp[3]);
x4 = _mm_xor_si128(x4, xmmp[4]);
x5 = _mm_xor_si128(x5, xmmp[5]);
x6 = _mm_xor_si128(x6, xmmp[6]);
x7 = _mm_xor_si128(x7, xmmp[7]);
}
t0 = x0;
t1 = x1;
t2 = x2;
t3 = x3;
t4 = x4;
t5 = x5;
t6 = x6;
t7 = x7;
for (rounds = 8; rounds; rounds -= 2) {
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z0 = _mm_roti_epi64(z0, 13);
z1 = _mm_roti_epi64(z1, 13);
x4 = _mm_xor_si128(x4, z0);
x5 = _mm_xor_si128(x5, z1);
z0 = _mm_add_epi64(x4, x6);
z1 = _mm_add_epi64(x5, x7);
z0 = _mm_roti_epi64(z0, 39);
z1 = _mm_roti_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x4);
z1 = _mm_add_epi64(x3, x5);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
z0 = _mm_add_epi64(x0, x2);
z1 = _mm_add_epi64(x1, x3);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x6 = _mm_xor_si128(x6, z0);
x7 = _mm_xor_si128(x7, z1);
z0 = _mm_add_epi64(x6, x0);
z1 = _mm_add_epi64(x7, x1);
z0 = _mm_roti_epi64(z0, 13);
z1 = _mm_roti_epi64(z1, 13);
x5 = _mm_xor_si128(x5, z0);
x4 = _mm_xor_si128(x4, z1);
z0 = _mm_add_epi64(x5, x6);
z1 = _mm_add_epi64(x4, x7);
z0 = _mm_roti_epi64(z0, 39);
z1 = _mm_roti_epi64(z1, 39);
x2 = _mm_xor_si128(x2, z0);
x3 = _mm_xor_si128(x3, z1);
z0 = _mm_add_epi64(x2, x5);
z1 = _mm_add_epi64(x3, x4);
z0 = _mm_shuffle_epi32(z0, _MM_SHUFFLE(2,3,0,1));
z1 = _mm_shuffle_epi32(z1, _MM_SHUFFLE(2,3,0,1));
x0 = _mm_xor_si128(x0, z0);
x1 = _mm_xor_si128(x1, z1);
z0 = x2;
z1 = x3;
x2 = _mm_alignr_epi8(x6, x7, 8);
x3 = _mm_alignr_epi8(x7, x6, 8);
x6 = _mm_alignr_epi8(z1, z0, 8);
x7 = _mm_alignr_epi8(z0, z1, 8);
}
x0 = _mm_add_epi64(x0, t0);
x1 = _mm_add_epi64(x1, t1);
x2 = _mm_add_epi64(x2, t2);
x3 = _mm_add_epi64(x3, t3);
x4 = _mm_add_epi64(x4, t4);
x5 = _mm_add_epi64(x5, t5);
x6 = _mm_add_epi64(x6, t6);
x7 = _mm_add_epi64(x7, t7);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
xmmp = (xmmi *)scrypt_block(Bout, (i / 2) + half);
xmmp[0] = x0;
xmmp[1] = x1;
xmmp[2] = x2;
xmmp[3] = x3;
xmmp[4] = x4;
xmmp[5] = x5;
xmmp[6] = x6;
xmmp[7] = x7;
}
}
#endif
#if defined(SCRYPT_SALSA64_XOP)
/* uses salsa64_core_tangle_sse2 */
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8-XOP"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#endif

View File

@@ -1,41 +0,0 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_SALSA64_INCLUDED)
#undef SCRYPT_MIX
#define SCRYPT_MIX "Salsa64/8 Ref"
#undef SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_INCLUDED
#define SCRYPT_SALSA64_BASIC
static void
salsa64_core_basic(uint64_t state[16]) {
const size_t rounds = 8;
uint64_t v[16], t;
size_t i;
for (i = 0; i < 16; i++) v[i] = state[i];
#define G(a,b,c,d) \
t = v[a]+v[d]; t = ROTL64(t, 32); v[b] ^= t; \
t = v[b]+v[a]; t = ROTL64(t, 13); v[c] ^= t; \
t = v[c]+v[b]; t = ROTL64(t, 39); v[d] ^= t; \
t = v[d]+v[c]; t = ROTL64(t, 32); v[a] ^= t; \
for (i = 0; i < rounds; i += 2) {
G( 0, 4, 8,12);
G( 5, 9,13, 1);
G(10,14, 2, 6);
G(15, 3, 7,11);
G( 0, 1, 2, 3);
G( 5, 6, 7, 4);
G(10,11, 8, 9);
G(15,12,13,14);
}
for (i = 0; i < 16; i++) state[i] += v[i];
#undef G
}
#endif

View File

@@ -1,112 +0,0 @@
typedef struct scrypt_hmac_state_t {
scrypt_hash_state inner, outer;
} scrypt_hmac_state;
static void
scrypt_hash(scrypt_hash_digest hash, const uint8_t *m, size_t mlen) {
scrypt_hash_state st;
scrypt_hash_init(&st);
scrypt_hash_update(&st, m, mlen);
scrypt_hash_finish(&st, hash);
}
/* hmac */
static void
scrypt_hmac_init(scrypt_hmac_state *st, const uint8_t *key, size_t keylen) {
uint8_t pad[SCRYPT_HASH_BLOCK_SIZE] = {0};
size_t i;
scrypt_hash_init(&st->inner);
scrypt_hash_init(&st->outer);
if (keylen <= SCRYPT_HASH_BLOCK_SIZE) {
/* use the key directly if it's <= blocksize bytes */
memcpy(pad, key, keylen);
} else {
/* if it's > blocksize bytes, hash it */
scrypt_hash(pad, key, keylen);
}
/* inner = (key ^ 0x36) */
/* h(inner || ...) */
for (i = 0; i < SCRYPT_HASH_BLOCK_SIZE; i++)
pad[i] ^= 0x36;
scrypt_hash_update(&st->inner, pad, SCRYPT_HASH_BLOCK_SIZE);
/* outer = (key ^ 0x5c) */
/* h(outer || ...) */
for (i = 0; i < SCRYPT_HASH_BLOCK_SIZE; i++)
pad[i] ^= (0x5c ^ 0x36);
scrypt_hash_update(&st->outer, pad, SCRYPT_HASH_BLOCK_SIZE);
scrypt_ensure_zero(pad, sizeof(pad));
}
static void
scrypt_hmac_update(scrypt_hmac_state *st, const uint8_t *m, size_t mlen) {
/* h(inner || m...) */
scrypt_hash_update(&st->inner, m, mlen);
}
static void
scrypt_hmac_finish(scrypt_hmac_state *st, scrypt_hash_digest mac) {
/* h(inner || m) */
scrypt_hash_digest innerhash;
scrypt_hash_finish(&st->inner, innerhash);
/* h(outer || h(inner || m)) */
scrypt_hash_update(&st->outer, innerhash, sizeof(innerhash));
scrypt_hash_finish(&st->outer, mac);
scrypt_ensure_zero(st, sizeof(*st));
}
static void
scrypt_pbkdf2(const uint8_t *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint64_t N, uint8_t *out, size_t bytes) {
scrypt_hmac_state hmac_pw, hmac_pw_salt, work;
scrypt_hash_digest ti, u;
uint8_t be[4];
uint32_t i, j, blocks;
uint64_t c;
/* bytes must be <= (0xffffffff - (SCRYPT_HASH_DIGEST_SIZE - 1)), which they will always be under scrypt */
/* hmac(password, ...) */
scrypt_hmac_init(&hmac_pw, password, password_len);
/* hmac(password, salt...) */
hmac_pw_salt = hmac_pw;
scrypt_hmac_update(&hmac_pw_salt, salt, salt_len);
blocks = ((uint32_t)bytes + (SCRYPT_HASH_DIGEST_SIZE - 1)) / SCRYPT_HASH_DIGEST_SIZE;
for (i = 1; i <= blocks; i++) {
/* U1 = hmac(password, salt || be(i)) */
U32TO8_BE(be, i);
work = hmac_pw_salt;
scrypt_hmac_update(&work, be, 4);
scrypt_hmac_finish(&work, ti);
memcpy(u, ti, sizeof(u));
/* T[i] = U1 ^ U2 ^ U3... */
for (c = 0; c < N - 1; c++) {
/* UX = hmac(password, U{X-1}) */
work = hmac_pw;
scrypt_hmac_update(&work, u, SCRYPT_HASH_DIGEST_SIZE);
scrypt_hmac_finish(&work, u);
/* T[i] ^= UX */
for (j = 0; j < sizeof(u); j++)
ti[j] ^= u[j];
}
memcpy(out, ti, (bytes > SCRYPT_HASH_DIGEST_SIZE) ? SCRYPT_HASH_DIGEST_SIZE : bytes);
out += SCRYPT_HASH_DIGEST_SIZE;
bytes -= SCRYPT_HASH_DIGEST_SIZE;
}
scrypt_ensure_zero(ti, sizeof(ti));
scrypt_ensure_zero(u, sizeof(u));
scrypt_ensure_zero(&hmac_pw, sizeof(hmac_pw));
scrypt_ensure_zero(&hmac_pw_salt, sizeof(hmac_pw_salt));
}

View File

@@ -1,463 +0,0 @@
#if defined(CPU_X86) && (defined(COMPILER_MSVC) || defined(COMPILER_GCC))
#define X86ASM
/* gcc 2.95 royally screws up stack alignments on variables */
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS6PP)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 30000)))
#define X86ASM_SSE
#define X86ASM_SSE2
#endif
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS2005)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 40102)))
#define X86ASM_SSSE3
#endif
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS2010SP1)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 40400)))
#define X86ASM_AVX
#define X86ASM_XOP
#endif
#if ((defined(COMPILER_MSVC) && (COMPILER_MSVC >= COMPILER_MSVC_VS2012)) || (defined(COMPILER_GCC) && (COMPILER_GCC >= 40700)))
#define X86ASM_AVX2
#endif
#endif
#if defined(CPU_X86_64) && defined(COMPILER_GCC)
#define X86_64ASM
#define X86_64ASM_SSE2
#if (COMPILER_GCC >= 40102)
#define X86_64ASM_SSSE3
#endif
#if (COMPILER_GCC >= 40400)
#define X86_64ASM_AVX
#define X86_64ASM_XOP
#endif
#if (COMPILER_GCC >= 40700)
#define X86_64ASM_AVX2
#endif
#endif
#if defined(COMPILER_MSVC) && (defined(CPU_X86_FORCE_INTRINSICS) || defined(CPU_X86_64))
#define X86_INTRINSIC
#if defined(CPU_X86_64) || defined(X86ASM_SSE)
#define X86_INTRINSIC_SSE
#endif
#if defined(CPU_X86_64) || defined(X86ASM_SSE2)
#define X86_INTRINSIC_SSE2
#endif
#if (COMPILER_MSVC >= COMPILER_MSVC_VS2005)
#define X86_INTRINSIC_SSSE3
#endif
#if (COMPILER_MSVC >= COMPILER_MSVC_VS2010SP1)
#define X86_INTRINSIC_AVX
#define X86_INTRINSIC_XOP
#endif
#if (COMPILER_MSVC >= COMPILER_MSVC_VS2012)
#define X86_INTRINSIC_AVX2
#endif
#endif
#if defined(COMPILER_GCC) && defined(CPU_X86_FORCE_INTRINSICS)
#define X86_INTRINSIC
#if defined(__SSE__)
#define X86_INTRINSIC_SSE
#endif
#if defined(__SSE2__)
#define X86_INTRINSIC_SSE2
#endif
#if defined(__SSSE3__)
#define X86_INTRINSIC_SSSE3
#endif
#if defined(__AVX__)
#define X86_INTRINSIC_AVX
#endif
#if defined(__XOP__)
#define X86_INTRINSIC_XOP
#endif
#if defined(__AVX2__)
#define X86_INTRINSIC_AVX2
#endif
#endif
/* only use simd on windows (or SSE2 on gcc)! */
#if defined(CPU_X86_FORCE_INTRINSICS) || defined(X86_INTRINSIC)
#if defined(X86_INTRINSIC_SSE)
#include <mmintrin.h>
#include <xmmintrin.h>
typedef __m64 qmm;
typedef __m128 xmm;
typedef __m128d xmmd;
#endif
#if defined(X86_INTRINSIC_SSE2)
#include <emmintrin.h>
typedef __m128i xmmi;
#endif
#if defined(X86_INTRINSIC_SSSE3)
#include <tmmintrin.h>
#endif
#if defined(X86_INTRINSIC_AVX)
#include <immintrin.h>
#endif
#if defined(X86_INTRINSIC_XOP)
#if defined(COMPILER_MSVC)
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#endif
#if defined(X86_INTRINSIC_AVX2)
typedef __m256i ymmi;
#endif
#endif
#if defined(X86_INTRINSIC_SSE2)
typedef union packedelem8_t {
uint8_t u[16];
xmmi v;
} packedelem8;
typedef union packedelem32_t {
uint32_t u[4];
xmmi v;
} packedelem32;
typedef union packedelem64_t {
uint64_t u[2];
xmmi v;
} packedelem64;
#else
typedef union packedelem8_t {
uint8_t u[16];
uint32_t dw[4];
} packedelem8;
typedef union packedelem32_t {
uint32_t u[4];
uint8_t b[16];
} packedelem32;
typedef union packedelem64_t {
uint64_t u[2];
uint8_t b[16];
} packedelem64;
#endif
#if defined(X86_INTRINSIC_SSSE3)
static const packedelem8 ALIGN(16) ssse3_rotl16_32bit = {{2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13}};
static const packedelem8 ALIGN(16) ssse3_rotl8_32bit = {{3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14}};
#endif
/*
x86 inline asm for gcc/msvc. usage:
asm_naked_fn_proto(return_type, name) (type parm1, type parm2..)
asm_naked_fn(name)
a1(..)
a2(.., ..)
a3(.., .., ..)
64bit OR 0 paramters: a1(ret)
32bit AND n parameters: aret(4n), eg aret(16) for 4 parameters
asm_naked_fn_end(name)
*/
#if defined(X86ASM) || defined(X86_64ASM)
#if defined(COMPILER_MSVC)
#pragma warning(disable : 4731) /* frame pointer modified by inline assembly */
#define a1(x) __asm {x}
#define a2(x, y) __asm {x, y}
#define a3(x, y, z) __asm {x, y, z}
#define a4(x, y, z, w) __asm {x, y, z, w}
#define aj(x) __asm {x}
#define asm_align8 a1(ALIGN 8)
#define asm_align16 a1(ALIGN 16)
#define asm_calling_convention STDCALL
#define aret(n) a1(ret n)
#define asm_naked_fn_proto(type, fn) static NAKED type asm_calling_convention fn
#define asm_naked_fn(fn) {
#define asm_naked_fn_end(fn) }
#elif defined(COMPILER_GCC)
#define GNU_AS1(x) #x ";\n"
#define GNU_AS2(x, y) #x ", " #y ";\n"
#define GNU_AS3(x, y, z) #x ", " #y ", " #z ";\n"
#define GNU_AS4(x, y, z, w) #x ", " #y ", " #z ", " #w ";\n"
#define GNU_ASFN(x) "\n_" #x ":\n" #x ":\n"
#define GNU_ASJ(x) ".att_syntax prefix\n" #x "\n.intel_syntax noprefix\n"
#define a1(x) GNU_AS1(x)
#define a2(x, y) GNU_AS2(x, y)
#define a3(x, y, z) GNU_AS3(x, y, z)
#define a4(x, y, z, w) GNU_AS4(x, y, z, w)
#define aj(x) GNU_ASJ(x)
#define asm_align8 ".p2align 3,,7"
#define asm_align16 ".p2align 4,,15"
#if defined(OS_WINDOWS)
#define asm_calling_convention CDECL
#define aret(n) a1(ret)
#if defined(X86_64ASM)
#define asm_naked_fn(fn) ; __asm__ ( \
".text\n" \
asm_align16 GNU_ASFN(fn) \
"subq $136, %rsp;" \
"movdqa %xmm6, 0(%rsp);" \
"movdqa %xmm7, 16(%rsp);" \
"movdqa %xmm8, 32(%rsp);" \
"movdqa %xmm9, 48(%rsp);" \
"movdqa %xmm10, 64(%rsp);" \
"movdqa %xmm11, 80(%rsp);" \
"movdqa %xmm12, 96(%rsp);" \
"movq %rdi, 112(%rsp);" \
"movq %rsi, 120(%rsp);" \
"movq %rcx, %rdi;" \
"movq %rdx, %rsi;" \
"movq %r8, %rdx;" \
"movq %r9, %rcx;" \
"call 1f;" \
"movdqa 0(%rsp), %xmm6;" \
"movdqa 16(%rsp), %xmm7;" \
"movdqa 32(%rsp), %xmm8;" \
"movdqa 48(%rsp), %xmm9;" \
"movdqa 64(%rsp), %xmm10;" \
"movdqa 80(%rsp), %xmm11;" \
"movdqa 96(%rsp), %xmm12;" \
"movq 112(%rsp), %rdi;" \
"movq 120(%rsp), %rsi;" \
"addq $136, %rsp;" \
"ret;" \
".intel_syntax noprefix;" \
".p2align 4,,15;" \
"1:;"
#else
#define asm_naked_fn(fn) ; __asm__ (".intel_syntax noprefix;\n.text\n" asm_align16 GNU_ASFN(fn)
#endif
#else
#define asm_calling_convention STDCALL
#define aret(n) a1(ret n)
#define asm_naked_fn(fn) ; __asm__ (".intel_syntax noprefix;\n.text\n" asm_align16 GNU_ASFN(fn)
#endif
#define asm_naked_fn_proto(type, fn) extern type asm_calling_convention fn
#define asm_naked_fn_end(fn) ".att_syntax prefix;\n" );
#define asm_gcc() __asm__ __volatile__(".intel_syntax noprefix;\n"
#define asm_gcc_parms() ".att_syntax prefix;"
#define asm_gcc_trashed() __asm__ __volatile__("" :::
#define asm_gcc_end() );
#else
need x86 asm
#endif
#endif /* X86ASM || X86_64ASM */
#if defined(CPU_X86) || defined(CPU_X86_64)
typedef enum cpu_flags_x86_t {
cpu_mmx = 1 << 0,
cpu_sse = 1 << 1,
cpu_sse2 = 1 << 2,
cpu_sse3 = 1 << 3,
cpu_ssse3 = 1 << 4,
cpu_sse4_1 = 1 << 5,
cpu_sse4_2 = 1 << 6,
cpu_avx = 1 << 7,
cpu_xop = 1 << 8,
cpu_avx2 = 1 << 9
} cpu_flags_x86;
typedef enum cpu_vendors_x86_t {
cpu_nobody,
cpu_intel,
cpu_amd
} cpu_vendors_x86;
typedef struct x86_regs_t {
uint32_t eax, ebx, ecx, edx;
} x86_regs;
#if defined(X86ASM)
asm_naked_fn_proto(int, has_cpuid)(void)
asm_naked_fn(has_cpuid)
a1(pushfd)
a1(pop eax)
a2(mov ecx, eax)
a2(xor eax, 0x200000)
a1(push eax)
a1(popfd)
a1(pushfd)
a1(pop eax)
a2(xor eax, ecx)
a2(shr eax, 21)
a2(and eax, 1)
a1(push ecx)
a1(popfd)
a1(ret)
asm_naked_fn_end(has_cpuid)
#endif /* X86ASM */
static void NOINLINE
get_cpuid(x86_regs *regs, uint32_t flags) {
#if defined(COMPILER_MSVC)
__cpuid((int *)regs, (int)flags);
#else
#if defined(CPU_X86_64)
#define cpuid_bx rbx
#else
#define cpuid_bx ebx
#endif
asm_gcc()
a1(push cpuid_bx)
a2(xor ecx, ecx)
a1(cpuid)
a2(mov [%1 + 0], eax)
a2(mov [%1 + 4], ebx)
a2(mov [%1 + 8], ecx)
a2(mov [%1 + 12], edx)
a1(pop cpuid_bx)
asm_gcc_parms() : "+a"(flags) : "S"(regs) : "%ecx", "%edx", "cc"
asm_gcc_end()
#endif
}
#if defined(X86ASM_AVX) || defined(X86_64ASM_AVX)
static uint64_t NOINLINE
get_xgetbv(uint32_t flags) {
#if defined(COMPILER_MSVC)
return _xgetbv(flags);
#else
uint32_t lo, hi;
asm_gcc()
a1(xgetbv)
asm_gcc_parms() : "+c"(flags), "=a" (lo), "=d" (hi)
asm_gcc_end()
return ((uint64_t)lo | ((uint64_t)hi << 32));
#endif
}
#endif // AVX support
#if defined(SCRYPT_TEST_SPEED)
size_t cpu_detect_mask = (size_t)-1;
#endif
static size_t
detect_cpu(void) {
//union { uint8_t s[12]; uint32_t i[3]; } vendor_string;
//cpu_vendors_x86 vendor = cpu_nobody;
x86_regs regs;
uint32_t max_level, max_ext_level;
size_t cpu_flags = 0;
#if defined(X86ASM_AVX) || defined(X86_64ASM_AVX)
uint64_t xgetbv_flags;
#endif
#if defined(CPU_X86)
if (!has_cpuid())
return cpu_flags;
#endif
get_cpuid(&regs, 0);
max_level = regs.eax;
#if 0
vendor_string.i[0] = regs.ebx;
vendor_string.i[1] = regs.edx;
vendor_string.i[2] = regs.ecx;
if (scrypt_verify(vendor_string.s, (const uint8_t *)"GenuineIntel", 12))
vendor = cpu_intel;
else if (scrypt_verify(vendor_string.s, (const uint8_t *)"AuthenticAMD", 12))
vendor = cpu_amd;
#endif
if (max_level & 0x00000500) {
/* "Intel P5 pre-B0" */
cpu_flags |= cpu_mmx;
return cpu_flags;
}
if (max_level < 1)
return cpu_flags;
get_cpuid(&regs, 1);
#if defined(X86ASM_AVX) || defined(X86_64ASM_AVX)
/* xsave/xrestore */
if (regs.ecx & (1 << 27)) {
xgetbv_flags = get_xgetbv(0);
if ((regs.ecx & (1 << 28)) && (xgetbv_flags & 0x6)) cpu_flags |= cpu_avx;
}
#endif
if (regs.ecx & (1 << 20)) cpu_flags |= cpu_sse4_2;
if (regs.ecx & (1 << 19)) cpu_flags |= cpu_sse4_2;
if (regs.ecx & (1 << 9)) cpu_flags |= cpu_ssse3;
if (regs.ecx & (1 )) cpu_flags |= cpu_sse3;
if (regs.edx & (1 << 26)) cpu_flags |= cpu_sse2;
if (regs.edx & (1 << 25)) cpu_flags |= cpu_sse;
if (regs.edx & (1 << 23)) cpu_flags |= cpu_mmx;
if (cpu_flags & cpu_avx) {
if (max_level >= 7) {
get_cpuid(&regs, 7);
if (regs.ebx & (1 << 5)) cpu_flags |= cpu_avx2;
}
get_cpuid(&regs, 0x80000000);
max_ext_level = regs.eax;
if (max_ext_level >= 0x80000001) {
get_cpuid(&regs, 0x80000001);
if (regs.ecx & (1 << 11)) cpu_flags |= cpu_xop;
}
}
#if defined(SCRYPT_TEST_SPEED)
cpu_flags &= cpu_detect_mask;
#endif
return cpu_flags;
}
#if defined(SCRYPT_TEST_SPEED)
static const char *
get_top_cpuflag_desc(size_t flag) {
if (flag & cpu_avx2) return "AVX2";
else if (flag & cpu_xop) return "XOP";
else if (flag & cpu_avx) return "AVX";
else if (flag & cpu_sse4_2) return "SSE4.2";
else if (flag & cpu_sse4_1) return "SSE4.1";
else if (flag & cpu_ssse3) return "SSSE3";
else if (flag & cpu_sse2) return "SSE2";
else if (flag & cpu_sse) return "SSE";
else if (flag & cpu_mmx) return "MMX";
else return "Basic";
}
#endif
/* enable the highest system-wide option */
#if defined(SCRYPT_CHOOSE_COMPILETIME)
#if !defined(__AVX2__)
#undef X86_64ASM_AVX2
#undef X86ASM_AVX2
#undef X86_INTRINSIC_AVX2
#endif
#if !defined(__XOP__)
#undef X86_64ASM_XOP
#undef X86ASM_XOP
#undef X86_INTRINSIC_XOP
#endif
#if !defined(__AVX__)
#undef X86_64ASM_AVX
#undef X86ASM_AVX
#undef X86_INTRINSIC_AVX
#endif
#if !defined(__SSSE3__)
#undef X86_64ASM_SSSE3
#undef X86ASM_SSSE3
#undef X86_INTRINSIC_SSSE3
#endif
#if !defined(__SSE2__)
#undef X86_64ASM_SSE2
#undef X86ASM_SSE2
#undef X86_INTRINSIC_SSE2
#endif
#endif
#endif /* defined(CPU_X86) || defined(CPU_X86_64) */

View File

@@ -1,310 +0,0 @@
/* determine os */
#if defined(_WIN32) || defined(_WIN64) || defined(__TOS_WIN__) || defined(__WINDOWS__)
#include <windows.h>
#include <wincrypt.h>
#define OS_WINDOWS
#elif defined(sun) || defined(__sun) || defined(__SVR4) || defined(__svr4__)
#include <sys/mman.h>
#include <sys/time.h>
#include <fcntl.h>
#define OS_SOLARIS
#else
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/param.h> /* need this to define BSD */
#include <unistd.h>
#include <fcntl.h>
#define OS_NIX
#if defined(__linux__)
#include <endian.h>
#define OS_LINUX
#elif defined(BSD)
#define OS_BSD
#if defined(MACOS_X) || (defined(__APPLE__) & defined(__MACH__))
#define OS_OSX
#elif defined(macintosh) || defined(Macintosh)
#define OS_MAC
#elif defined(__OpenBSD__)
#define OS_OPENBSD
#endif
#endif
#endif
/* determine compiler */
#if defined(_MSC_VER)
#define COMPILER_MSVC_VS6 120000000
#define COMPILER_MSVC_VS6PP 121000000
#define COMPILER_MSVC_VS2002 130000000
#define COMPILER_MSVC_VS2003 131000000
#define COMPILER_MSVC_VS2005 140050727
#define COMPILER_MSVC_VS2008 150000000
#define COMPILER_MSVC_VS2008SP1 150030729
#define COMPILER_MSVC_VS2010 160000000
#define COMPILER_MSVC_VS2010SP1 160040219
#define COMPILER_MSVC_VS2012RC 170000000
#define COMPILER_MSVC_VS2012 170050727
#if _MSC_FULL_VER > 100000000
#define COMPILER_MSVC (_MSC_FULL_VER)
#else
#define COMPILER_MSVC (_MSC_FULL_VER * 10)
#endif
#if ((_MSC_VER == 1200) && defined(_mm_free))
#undef COMPILER_MSVC
#define COMPILER_MSVC COMPILER_MSVC_VS6PP
#endif
#pragma warning(disable : 4127) /* conditional expression is constant */
#pragma warning(disable : 4100) /* unreferenced formal parameter */
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#include <float.h>
#include <stdlib.h> /* _rotl */
#include <intrin.h>
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef signed int int32_t;
typedef unsigned __int64 uint64_t;
typedef signed __int64 int64_t;
#define ROTL32(a,b) _rotl(a,b)
#define ROTR32(a,b) _rotr(a,b)
#define ROTL64(a,b) _rotl64(a,b)
#define ROTR64(a,b) _rotr64(a,b)
#undef NOINLINE
#define NOINLINE __declspec(noinline)
#undef NORETURN
#define NORETURN
#undef INLINE
#define INLINE __forceinline
#undef FASTCALL
#define FASTCALL __fastcall
#undef CDECL
#define CDECL __cdecl
#undef STDCALL
#define STDCALL __stdcall
#undef NAKED
#define NAKED __declspec(naked)
#define ALIGN(n) __declspec(align(n))
#endif
#if defined(__ICC)
#define COMPILER_INTEL
#endif
#if defined(__GNUC__)
#if (__GNUC__ >= 3)
#define COMPILER_GCC_PATCHLEVEL __GNUC_PATCHLEVEL__
#else
#define COMPILER_GCC_PATCHLEVEL 0
#endif
#define COMPILER_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + COMPILER_GCC_PATCHLEVEL)
#define ROTL32(a,b) (((a) << (b)) | ((a) >> (32 - b)))
#define ROTR32(a,b) (((a) >> (b)) | ((a) << (32 - b)))
#define ROTL64(a,b) (((a) << (b)) | ((a) >> (64 - b)))
#define ROTR64(a,b) (((a) >> (b)) | ((a) << (64 - b)))
#undef NOINLINE
#if (COMPILER_GCC >= 30000)
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
#undef NORETURN
#if (COMPILER_GCC >= 30000)
#define NORETURN __attribute__((noreturn))
#else
#define NORETURN
#endif
#undef INLINE
#if (COMPILER_GCC >= 30000)
#define INLINE __attribute__((always_inline))
#else
#define INLINE inline
#endif
#undef FASTCALL
#if (COMPILER_GCC >= 30400)
#define FASTCALL __attribute__((fastcall))
#else
#define FASTCALL
#endif
#undef CDECL
#define CDECL __attribute__((cdecl))
#undef STDCALL
#define STDCALL __attribute__((stdcall))
#define ALIGN(n) __attribute__((aligned(n)))
#include <stdint.h>
#endif
#if defined(__MINGW32__) || defined(__MINGW64__)
#define COMPILER_MINGW
#endif
#if defined(__PATHCC__)
#define COMPILER_PATHCC
#endif
#define OPTIONAL_INLINE
#if defined(OPTIONAL_INLINE)
#undef OPTIONAL_INLINE
#define OPTIONAL_INLINE INLINE
#else
#define OPTIONAL_INLINE
#endif
#define CRYPTO_FN NOINLINE STDCALL
/* determine cpu */
#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__ ) || defined(_M_X64)
#define CPU_X86_64
#elif defined(__i586__) || defined(__i686__) || (defined(_M_IX86) && (_M_IX86 >= 500))
#define CPU_X86 500
#elif defined(__i486__) || (defined(_M_IX86) && (_M_IX86 >= 400))
#define CPU_X86 400
#elif defined(__i386__) || (defined(_M_IX86) && (_M_IX86 >= 300)) || defined(__X86__) || defined(_X86_) || defined(__I86__)
#define CPU_X86 300
#elif defined(__ia64__) || defined(_IA64) || defined(__IA64__) || defined(_M_IA64) || defined(__ia64)
#define CPU_IA64
#endif
#if defined(__sparc__) || defined(__sparc) || defined(__sparcv9)
#define CPU_SPARC
#if defined(__sparcv9)
#define CPU_SPARC64
#endif
#endif
#if defined(CPU_X86_64) || defined(CPU_IA64) || defined(CPU_SPARC64) || defined(__64BIT__) || defined(__LP64__) || defined(_LP64) || (defined(_MIPS_SZLONG) && (_MIPS_SZLONG == 64))
#define CPU_64BITS
#undef FASTCALL
#define FASTCALL
#undef CDECL
#define CDECL
#undef STDCALL
#define STDCALL
#endif
#if defined(powerpc) || defined(__PPC__) || defined(__ppc__) || defined(_ARCH_PPC) || defined(__powerpc__) || defined(__powerpc) || defined(POWERPC) || defined(_M_PPC)
#define CPU_PPC
#if defined(_ARCH_PWR7)
#define CPU_POWER7
#elif defined(__64BIT__)
#define CPU_PPC64
#else
#define CPU_PPC32
#endif
#endif
#if defined(__hppa__) || defined(__hppa)
#define CPU_HPPA
#endif
#if defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA)
#define CPU_ALPHA
#endif
/* endian */
#if ((defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && (__BYTE_ORDER == __LITTLE_ENDIAN)) || \
(defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && (BYTE_ORDER == LITTLE_ENDIAN)) || \
(defined(CPU_X86) || defined(CPU_X86_64)) || \
(defined(vax) || defined(MIPSEL) || defined(_MIPSEL)))
#define CPU_LE
#elif ((defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && (__BYTE_ORDER == __BIG_ENDIAN)) || \
(defined(BYTE_ORDER) && defined(BIG_ENDIAN) && (BYTE_ORDER == BIG_ENDIAN)) || \
(defined(CPU_SPARC) || defined(CPU_PPC) || defined(mc68000) || defined(sel)) || defined(_MIPSEB))
#define CPU_BE
#else
/* unknown endian! */
#endif
#define U8TO32_BE(p) \
(((uint32_t)((p)[0]) << 24) | ((uint32_t)((p)[1]) << 16) | \
((uint32_t)((p)[2]) << 8) | ((uint32_t)((p)[3]) ))
#define U8TO32_LE(p) \
(((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \
((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24))
#define U32TO8_BE(p, v) \
(p)[0] = (uint8_t)((v) >> 24); (p)[1] = (uint8_t)((v) >> 16); \
(p)[2] = (uint8_t)((v) >> 8); (p)[3] = (uint8_t)((v) );
#define U32TO8_LE(p, v) \
(p)[0] = (uint8_t)((v) ); (p)[1] = (uint8_t)((v) >> 8); \
(p)[2] = (uint8_t)((v) >> 16); (p)[3] = (uint8_t)((v) >> 24);
#define U8TO64_BE(p) \
(((uint64_t)U8TO32_BE(p) << 32) | (uint64_t)U8TO32_BE((p) + 4))
#define U8TO64_LE(p) \
(((uint64_t)U8TO32_LE(p)) | ((uint64_t)U8TO32_LE((p) + 4) << 32))
#define U64TO8_BE(p, v) \
U32TO8_BE((p), (uint32_t)((v) >> 32)); \
U32TO8_BE((p) + 4, (uint32_t)((v) ));
#define U64TO8_LE(p, v) \
U32TO8_LE((p), (uint32_t)((v) )); \
U32TO8_LE((p) + 4, (uint32_t)((v) >> 32));
#define U32_SWAP(v) { \
(v) = (((v) << 8) & 0xFF00FF00 ) | (((v) >> 8) & 0xFF00FF ); \
(v) = ((v) << 16) | ((v) >> 16); \
}
#define U64_SWAP(v) { \
(v) = (((v) << 8) & 0xFF00FF00FF00FF00ull ) | (((v) >> 8) & 0x00FF00FF00FF00FFull ); \
(v) = (((v) << 16) & 0xFFFF0000FFFF0000ull ) | (((v) >> 16) & 0x0000FFFF0000FFFFull ); \
(v) = ((v) << 32) | ((v) >> 32); \
}
static int
scrypt_verify(const uint8_t *x, const uint8_t *y, size_t len) {
uint32_t differentbits = 0;
while (len--)
differentbits |= (*x++ ^ *y++);
return (1 & ((differentbits - 1) >> 8));
}
static void
scrypt_ensure_zero(void *p, size_t len) {
#if ((defined(CPU_X86) || defined(CPU_X86_64)) && defined(COMPILER_MSVC))
__stosb((unsigned char *)p, 0, len);
#elif (defined(CPU_X86) && defined(COMPILER_GCC))
__asm__ __volatile__(
"pushl %%edi;\n"
"pushl %%ecx;\n"
"rep stosb;\n"
"popl %%ecx;\n"
"popl %%edi;\n"
:: "a"(0), "D"(p), "c"(len) : "cc", "memory"
);
#elif (defined(CPU_X86_64) && defined(COMPILER_GCC))
__asm__ __volatile__(
"pushq %%rdi;\n"
"pushq %%rcx;\n"
"rep stosb;\n"
"popq %%rcx;\n"
"popq %%rdi;\n"
:: "a"(0), "D"(p), "c"(len) : "cc", "memory"
);
#else
volatile uint8_t *b = (volatile uint8_t *)p;
size_t i;
for (i = 0; i < len; i++)
b[i] = 0;
#endif
}
#include "scrypt-jane-portable-x86.h"
#if !defined(asm_calling_convention)
#define asm_calling_convention
#endif

View File

@@ -1,74 +0,0 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
/* function type returned by scrypt_getROMix, used with cpu detection */
typedef void (FASTCALL *scrypt_ROMixfn)(scrypt_mix_word_t *X/*[chunkWords]*/, scrypt_mix_word_t *Y/*[chunkWords]*/, scrypt_mix_word_t *V/*[chunkWords * N]*/, uint32_t N, uint32_t r);
#endif
/* romix pre/post nop function */
static void asm_calling_convention
scrypt_romix_nop(scrypt_mix_word_t *blocks, size_t nblocks) {
(void)blocks; (void)nblocks;
}
/* romix pre/post endian conversion function */
static void asm_calling_convention
scrypt_romix_convert_endian(scrypt_mix_word_t *blocks, size_t nblocks) {
#if !defined(CPU_LE)
static const union { uint8_t b[2]; uint16_t w; } endian_test = {{1,0}};
size_t i;
if (endian_test.w == 0x100) {
nblocks *= SCRYPT_BLOCK_WORDS;
for (i = 0; i < nblocks; i++) {
SCRYPT_WORD_ENDIAN_SWAP(blocks[i]);
}
}
#else
(void)blocks; (void)nblocks;
#endif
}
/* chunkmix test function */
typedef void (asm_calling_convention *chunkmixfn)(scrypt_mix_word_t *Bout/*[chunkWords]*/, scrypt_mix_word_t *Bin/*[chunkWords]*/, scrypt_mix_word_t *Bxor/*[chunkWords]*/, uint32_t r);
typedef void (asm_calling_convention *blockfixfn)(scrypt_mix_word_t *blocks, size_t nblocks);
static int
scrypt_test_mix_instance(chunkmixfn mixfn, blockfixfn prefn, blockfixfn postfn, const uint8_t expected[16]) {
/* r = 2, (2 * r) = 4 blocks in a chunk, 4 * SCRYPT_BLOCK_WORDS total */
const uint32_t r = 2, blocks = 2 * r, words = blocks * SCRYPT_BLOCK_WORDS;
#if (defined(X86ASM_AVX2) || defined(X86_64ASM_AVX2) || defined(X86_INTRINSIC_AVX2))
scrypt_mix_word_t ALIGN(32) chunk[2][4 * SCRYPT_BLOCK_WORDS], v;
#else
scrypt_mix_word_t ALIGN(16) chunk[2][4 * SCRYPT_BLOCK_WORDS], v;
#endif
uint8_t final[16];
size_t i;
for (i = 0; i < words; i++) {
v = (scrypt_mix_word_t)i;
v = (v << 8) | v;
v = (v << 16) | v;
chunk[0][i] = v;
}
prefn(chunk[0], blocks);
mixfn(chunk[1], chunk[0], NULL, r);
postfn(chunk[1], blocks);
/* grab the last 16 bytes of the final block */
for (i = 0; i < 16; i += sizeof(scrypt_mix_word_t)) {
SCRYPT_WORDTO8_LE(final + i, chunk[1][words - (16 / sizeof(scrypt_mix_word_t)) + (i / sizeof(scrypt_mix_word_t))]);
}
return scrypt_verify(expected, final, 16);
}
/* returns a pointer to item i, where item is len scrypt_mix_word_t's long */
static scrypt_mix_word_t *
scrypt_item(scrypt_mix_word_t *base, scrypt_mix_word_t i, scrypt_mix_word_t len) {
return base + (i * len);
}
/* returns a pointer to block i */
static scrypt_mix_word_t *
scrypt_block(scrypt_mix_word_t *base, scrypt_mix_word_t i) {
return base + (i * SCRYPT_BLOCK_WORDS);
}

View File

@@ -1,122 +0,0 @@
#if !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_HAVE_ROMIX)
#if defined(SCRYPT_CHOOSE_COMPILETIME)
#undef SCRYPT_ROMIX_FN
#define SCRYPT_ROMIX_FN scrypt_ROMix
#endif
#undef SCRYPT_HAVE_ROMIX
#define SCRYPT_HAVE_ROMIX
#if !defined(SCRYPT_CHUNKMIX_FN)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_basic
/*
Bout = ChunkMix(Bin)
2*r: number of blocks in the chunk
*/
static void asm_calling_convention
SCRYPT_CHUNKMIX_FN(scrypt_mix_word_t *Bout/*[chunkWords]*/, scrypt_mix_word_t *Bin/*[chunkWords]*/, scrypt_mix_word_t *Bxor/*[chunkWords]*/, uint32_t r) {
#if (defined(X86ASM_AVX2) || defined(X86_64ASM_AVX2) || defined(X86_INTRINSIC_AVX2))
scrypt_mix_word_t ALIGN(32) X[SCRYPT_BLOCK_WORDS], *block;
#else
scrypt_mix_word_t ALIGN(16) X[SCRYPT_BLOCK_WORDS], *block;
#endif
uint32_t i, j, blocksPerChunk = /*r * 2*/2, half = 0;
/* 1: X = B_{2r - 1} */
block = scrypt_block(Bin, blocksPerChunk - 1);
for (i = 0; i < SCRYPT_BLOCK_WORDS; i++)
X[i] = block[i];
if (Bxor) {
block = scrypt_block(Bxor, blocksPerChunk - 1);
for (i = 0; i < SCRYPT_BLOCK_WORDS; i++)
X[i] ^= block[i];
}
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < blocksPerChunk; i++, half ^= /*r*/1) {
/* 3: X = H(X ^ B_i) */
block = scrypt_block(Bin, i);
for (j = 0; j < SCRYPT_BLOCK_WORDS; j++)
X[j] ^= block[j];
if (Bxor) {
block = scrypt_block(Bxor, i);
for (j = 0; j < SCRYPT_BLOCK_WORDS; j++)
X[j] ^= block[j];
}
SCRYPT_MIX_FN(X);
/* 4: Y_i = X */
/* 6: B'[0..r-1] = Y_even */
/* 6: B'[r..2r-1] = Y_odd */
block = scrypt_block(Bout, (i / 2) + half);
for (j = 0; j < SCRYPT_BLOCK_WORDS; j++)
block[j] = X[j];
}
}
#endif
/*
X = ROMix(X)
X: chunk to mix
Y: scratch chunk
N: number of rounds
V[N]: array of chunks to randomly index in to
2*r: number of blocks in a chunk
*/
static void NOINLINE FASTCALL
SCRYPT_ROMIX_FN(scrypt_mix_word_t *X/*[chunkWords]*/, scrypt_mix_word_t *Y/*[chunkWords]*/, scrypt_mix_word_t *V/*[N * chunkWords]*/, uint32_t N, uint32_t r) {
uint32_t i, j, chunkWords = (uint32_t)(SCRYPT_BLOCK_WORDS * 2);
scrypt_mix_word_t *block = V;
SCRYPT_ROMIX_TANGLE_FN(X, 2);
/* 1: X = B */
/* implicit */
/* 2: for i = 0 to N - 1 do */
memcpy(block, X, chunkWords * sizeof(scrypt_mix_word_t));
for (i = 0; i < /*N - 1*/511; i++, block += chunkWords) {
/* 3: V_i = X */
/* 4: X = H(X) */
SCRYPT_CHUNKMIX_FN(block + chunkWords, block, NULL, /*r*/1);
}
SCRYPT_CHUNKMIX_FN(X, block, NULL, 1);
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < /*N*/512; i += 2) {
/* 7: j = Integerify(X) % N */
j = X[chunkWords - SCRYPT_BLOCK_WORDS] & /*(N - 1)*/511;
/* 8: X = H(Y ^ V_j) */
SCRYPT_CHUNKMIX_FN(Y, X, scrypt_item(V, j, chunkWords), 1);
/* 7: j = Integerify(Y) % N */
j = Y[chunkWords - SCRYPT_BLOCK_WORDS] & /*(N - 1)*/511;
/* 8: X = H(Y ^ V_j) */
SCRYPT_CHUNKMIX_FN(X, Y, scrypt_item(V, j, chunkWords), 1);
}
/* 10: B' = X */
/* implicit */
SCRYPT_ROMIX_UNTANGLE_FN(X, 2);
}
#endif /* !defined(SCRYPT_CHOOSE_COMPILETIME) || !defined(SCRYPT_HAVE_ROMIX) */
#undef SCRYPT_CHUNKMIX_FN
#undef SCRYPT_ROMIX_FN
#undef SCRYPT_MIX_FN
#undef SCRYPT_ROMIX_TANGLE_FN
#undef SCRYPT_ROMIX_UNTANGLE_FN

View File

@@ -1,23 +0,0 @@
#if defined(SCRYPT_SALSA64)
#include "scrypt-jane-salsa64.h"
#else
#define SCRYPT_MIX_BASE "ERROR"
typedef uint32_t scrypt_mix_word_t;
#define SCRYPT_WORDTO8_LE U32TO8_LE
#define SCRYPT_WORD_ENDIAN_SWAP U32_SWAP
#define SCRYPT_BLOCK_BYTES 64
#define SCRYPT_BLOCK_WORDS (SCRYPT_BLOCK_BYTES / sizeof(scrypt_mix_word_t))
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
static void FASTCALL scrypt_ROMix_error(scrypt_mix_word_t *X/*[chunkWords]*/, scrypt_mix_word_t *Y/*[chunkWords]*/, scrypt_mix_word_t *V/*[chunkWords * N]*/, uint32_t N, uint32_t r) {}
static scrypt_ROMixfn scrypt_getROMix(void) { return scrypt_ROMix_error; }
#else
static void FASTCALL scrypt_ROMix(scrypt_mix_word_t *X, scrypt_mix_word_t *Y, scrypt_mix_word_t *V, uint32_t N, uint32_t r) {}
#endif
static int scrypt_test_mix(void) { return 0; }
#error must define a mix function!
#endif
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
#undef SCRYPT_MIX
#define SCRYPT_MIX SCRYPT_MIX_BASE
#endif

View File

@@ -1,183 +0,0 @@
#define SCRYPT_MIX_BASE "Salsa64/8"
typedef uint64_t scrypt_mix_word_t;
#define SCRYPT_WORDTO8_LE U64TO8_LE
#define SCRYPT_WORD_ENDIAN_SWAP U64_SWAP
#define SCRYPT_BLOCK_BYTES 128
#define SCRYPT_BLOCK_WORDS (SCRYPT_BLOCK_BYTES / sizeof(scrypt_mix_word_t))
/* must have these here in case block bytes is ever != 64 */
#include "scrypt-jane-romix-basic.h"
#include "scrypt-jane-mix_salsa64-avx2.h"
#include "scrypt-jane-mix_salsa64-xop.h"
#include "scrypt-jane-mix_salsa64-avx.h"
#include "scrypt-jane-mix_salsa64-ssse3.h"
#include "scrypt-jane-mix_salsa64-sse2.h"
#include "scrypt-jane-mix_salsa64.h"
#if defined(SCRYPT_SALSA64_AVX2)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_avx2
#define SCRYPT_ROMIX_FN scrypt_ROMix_avx2
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_XOP)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_xop
#define SCRYPT_ROMIX_FN scrypt_ROMix_xop
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_AVX)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_avx
#define SCRYPT_ROMIX_FN scrypt_ROMix_avx
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_ssse3
#define SCRYPT_ROMIX_FN scrypt_ROMix_ssse3
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
#if defined(SCRYPT_SALSA64_SSE2)
#define SCRYPT_CHUNKMIX_FN scrypt_ChunkMix_sse2
#define SCRYPT_ROMIX_FN scrypt_ROMix_sse2
#define SCRYPT_ROMIX_TANGLE_FN salsa64_core_tangle_sse2
#define SCRYPT_ROMIX_UNTANGLE_FN salsa64_core_tangle_sse2
#include "scrypt-jane-romix-template.h"
#endif
/* cpu agnostic */
#define SCRYPT_ROMIX_FN scrypt_ROMix_basic
#define SCRYPT_MIX_FN salsa64_core_basic
#define SCRYPT_ROMIX_TANGLE_FN scrypt_romix_convert_endian
#define SCRYPT_ROMIX_UNTANGLE_FN scrypt_romix_convert_endian
#include "scrypt-jane-romix-template.h"
#if !defined(SCRYPT_CHOOSE_COMPILETIME)
static scrypt_ROMixfn
scrypt_getROMix(void) {
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_SALSA64_AVX2)
if (cpuflags & cpu_avx2)
return scrypt_ROMix_avx2;
else
#endif
#if defined(SCRYPT_SALSA64_XOP)
if (cpuflags & cpu_xop)
return scrypt_ROMix_xop;
else
#endif
#if defined(SCRYPT_SALSA64_AVX)
if (cpuflags & cpu_avx)
return scrypt_ROMix_avx;
else
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
if (cpuflags & cpu_ssse3)
return scrypt_ROMix_ssse3;
else
#endif
#if defined(SCRYPT_SALSA64_SSE2)
if (cpuflags & cpu_sse2)
return scrypt_ROMix_sse2;
else
#endif
return scrypt_ROMix_basic;
}
#endif
#if defined(SCRYPT_TEST_SPEED)
static size_t
available_implementations(void) {
size_t cpuflags = detect_cpu();
size_t flags = 0;
#if defined(SCRYPT_SALSA64_AVX2)
if (cpuflags & cpu_avx2)
flags |= cpu_avx2;
#endif
#if defined(SCRYPT_SALSA64_XOP)
if (cpuflags & cpu_xop)
flags |= cpu_xop;
#endif
#if defined(SCRYPT_SALSA64_AVX)
if (cpuflags & cpu_avx)
flags |= cpu_avx;
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
if (cpuflags & cpu_ssse3)
flags |= cpu_ssse3;
#endif
#if defined(SCRYPT_SALSA64_SSE2)
if (cpuflags & cpu_sse2)
flags |= cpu_sse2;
#endif
return flags;
}
#endif
static int
scrypt_test_mix(void) {
static const uint8_t expected[16] = {
0xf8,0x92,0x9b,0xf8,0xcc,0x1d,0xce,0x2e,0x13,0x82,0xac,0x96,0xb2,0x6c,0xee,0x2c,
};
int ret = 1;
size_t cpuflags = detect_cpu();
#if defined(SCRYPT_SALSA64_AVX2)
if (cpuflags & cpu_avx2)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_avx2, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_XOP)
if (cpuflags & cpu_xop)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_xop, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_AVX)
if (cpuflags & cpu_avx)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_avx, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_SSSE3)
if (cpuflags & cpu_ssse3)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_ssse3, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_SSE2)
if (cpuflags & cpu_sse2)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_sse2, salsa64_core_tangle_sse2, salsa64_core_tangle_sse2, expected);
#endif
#if defined(SCRYPT_SALSA64_BASIC)
ret &= scrypt_test_mix_instance(scrypt_ChunkMix_basic, scrypt_romix_convert_endian, scrypt_romix_convert_endian, expected);
#endif
return ret;
}

View File

@@ -1,28 +0,0 @@
typedef struct scrypt_test_setting_t {
const char *pw, *salt;
uint8_t Nfactor, rfactor, pfactor;
} scrypt_test_setting;
static const scrypt_test_setting post_settings[] = {
{"", "", 3, 0, 0},
{"password", "NaCl", 9, 3, 4},
{0, 0, 0, 0, 0}
};
#if defined(SCRYPT_SKEIN512)
#if defined(SCRYPT_SALSA64)
static const uint8_t post_vectors[][64] = {
{0xd2,0xad,0x32,0x05,0xee,0x80,0xe3,0x44,0x70,0xc6,0x34,0xde,0x05,0xb6,0xcf,0x60,
0x89,0x98,0x70,0xc0,0xb8,0xf5,0x54,0xf1,0xa6,0xb2,0xc8,0x76,0x34,0xec,0xc4,0x59,
0x8e,0x64,0x42,0xd0,0xa9,0xed,0xe7,0x19,0xb2,0x8a,0x11,0xc6,0xa6,0xbf,0xa7,0xa9,
0x4e,0x44,0x32,0x7e,0x12,0x91,0x9d,0xfe,0x52,0x48,0xa8,0x27,0xb3,0xfc,0xb1,0x89},
{0xd6,0x67,0xd2,0x3e,0x30,0x1e,0x9d,0xe2,0x55,0x68,0x17,0x3d,0x2b,0x75,0x5a,0xe5,
0x04,0xfb,0x3d,0x0e,0x86,0xe0,0xaa,0x1d,0xd4,0x72,0xda,0xb0,0x79,0x41,0xb7,0x99,
0x68,0xe5,0xd9,0x55,0x79,0x7d,0xc3,0xd1,0xa6,0x56,0xc1,0xbe,0x0b,0x6c,0x62,0x23,
0x66,0x67,0x91,0x47,0x99,0x13,0x6b,0xe3,0xda,0x59,0x55,0x18,0x67,0x8f,0x2e,0x3b}
};
#endif
#else
static const uint8_t post_vectors[][64] = {{0}};
#endif

View File

@@ -1,92 +0,0 @@
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <openssl/sha.h>
#include "ar2/argon2.h"
#include "ar2/cores.h"
#include "ar2/ar2-scrypt-jane.h"
#include "algo-gate-api.h"
#define T_COSTS 2
#define M_COSTS 16
#define MASK 8
#define ZERO 0
inline void argon_call(void *out, void *in, void *salt, int type)
{
argon2_context context;
context.out = (uint8_t *)out;
context.pwd = (uint8_t *)in;
context.salt = (uint8_t*)salt;
context.pwdlen = 0;
context.allocate_cbk = NULL;
context.free_cbk = NULL;
ar2_argon2_core(&context, type);
}
void argon2hash(void *output, const void *input)
{
uint32_t _ALIGN(64) hashA[8], hashB[8];
my_scrypt((const unsigned char *)input, 80,
(const unsigned char *)input, 80,
(unsigned char *)hashA);
argon_call(hashB, hashA, hashA, (hashA[0] & MASK) == ZERO);
my_scrypt((const unsigned char *)hashB, 32,
(const unsigned char *)hashB, 32,
(unsigned char *)output);
}
int scanhash_argon2( struct work* work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t nonce = first_nonce;
swab32_array( endiandata, pdata, 20 );
do {
be32enc(&endiandata[19], nonce);
argon2hash(hash, endiandata);
if (hash[7] <= Htarg && fulltest(hash, ptarget)) {
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
work_set_target_ratio(work, hash);
return 1;
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}
int64_t argon2_get_max64 ()
{
return 0x1ffLL;
}
bool register_argon2_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT | AVX_OPT | AVX2_OPT;
gate->scanhash = (void*)&scanhash_argon2;
gate->hash = (void*)&argon2hash;
gate->gen_merkle_root = (void*)&SHA256_gen_merkle_root;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&argon2_get_max64;
return true;
};

View File

@@ -1,4 +1,5 @@
#include "argon2d-gate.h"
#include "simd-utils.h"
#include "argon2d/argon2.h"
static const size_t INPUT_BYTES = 80; // Lenth of a block header in bytes. Input Length = Salt Length (salt = input)
@@ -36,43 +37,39 @@ void argon2d_crds_hash( void *output, const void *input )
int scanhash_argon2d_crds( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t _ALIGN(64) edata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
uint32_t nonce = first_nonce;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
swab32_array( edata, pdata, 20 );
uint32_t nonce = first_nonce;
do {
be32enc(&edata[19], nonce);
argon2d_crds_hash( hash, edata );
if ( hash[7] <= Htarg && fulltest( hash, ptarget ) && !opt_benchmark )
{
pdata[19] = nonce;
submit_solution( work, hash, mythr );
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
swab32_array( endiandata, pdata, 20 );
do {
be32enc(&endiandata[19], nonce);
argon2d_crds_hash( hash, endiandata );
if ( hash[7] <= Htarg && fulltest( hash, ptarget ) )
{
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
work_set_target_ratio(work, hash);
return 1;
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
}
bool register_argon2d_crds_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_argon2d_crds;
gate->hash = (void*)&argon2d_crds_hash;
gate->set_target = (void*)&scrypt_set_target;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
opt_target_factor = 65536.0;
return true;
}
@@ -107,43 +104,41 @@ void argon2d_dyn_hash( void *output, const void *input )
int scanhash_argon2d_dyn( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t _ALIGN(64) edata[20];
uint32_t _ALIGN(64) hash[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const int thr_id = mythr->id;
const uint32_t first_nonce = (const uint32_t)pdata[19];
const uint32_t last_nonce = (const uint32_t)max_nonce;
uint32_t nonce = first_nonce;
const bool bench = opt_benchmark;
const uint32_t first_nonce = pdata[19];
const uint32_t Htarg = ptarget[7];
v128_bswap32_80( edata, pdata );
do
{
edata[19] = nonce;
argon2d_dyn_hash( hash, edata );
if ( unlikely( valid_hash( (uint64_t*)hash, (uint64_t*)ptarget )
&& !bench ) )
{
pdata[19] = bswap_32( nonce );;
submit_solution( work, hash, mythr );
}
nonce++;
} while ( likely( nonce < last_nonce && !work_restart[thr_id].restart ) );
uint32_t nonce = first_nonce;
swab32_array( endiandata, pdata, 20 );
do {
be32enc(&endiandata[19], nonce);
argon2d_dyn_hash( hash, endiandata );
if ( hash[7] <= Htarg && fulltest( hash, ptarget ) )
{
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
work_set_target_ratio(work, hash);
return 1;
}
nonce++;
} while (nonce < max_nonce && !work_restart[thr_id].restart);
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce + 1;
return 0;
pdata[19] = nonce;
*hashes_done = pdata[19] - first_nonce;
return 0;
}
bool register_argon2d_dyn_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_argon2d_dyn;
gate->hash = (void*)&argon2d_dyn_hash;
gate->set_target = (void*)&scrypt_set_target;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
opt_target_factor = 65536.0;
return true;
}
@@ -153,48 +148,42 @@ int scanhash_argon2d4096( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) vhash[8];
uint32_t _ALIGN(64) endiandata[20];
uint32_t _ALIGN(64) edata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
const uint32_t last_nonce = (const uint32_t)max_nonce;
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
const int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t t_cost = 1; // 1 iteration
uint32_t m_cost = 4096; // use 4MB
uint32_t parallelism = 1; // 1 thread, 2 lanes
const bool bench = opt_benchmark;
for ( int i = 0; i < 19; i++ )
be32enc( &endiandata[i], pdata[i] );
v128_bswap32_80( edata, pdata );
do {
be32enc( &endiandata[19], n );
argon2d_hash_raw( t_cost, m_cost, parallelism, (char*) endiandata, 80,
(char*) endiandata, 80, (char*) vhash, 32, ARGON2_VERSION_13 );
if ( vhash[7] < Htarg && fulltest( vhash, ptarget ) )
edata[19] = n;
argon2d_hash_raw( t_cost, m_cost, parallelism, (char*) edata, 80,
(char*) edata, 80, (char*) vhash, 32, ARGON2_VERSION_13 );
if ( unlikely( valid_hash( vhash, ptarget ) && !bench ) )
{
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return true;
be32enc( &pdata[19], n );
submit_solution( work, vhash, mythr );
}
n++;
} while ( likely( n < last_nonce && !work_restart[thr_id].restart ) );
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
*hashes_done = n - first_nonce;
pdata[19] = n;
return 0;
}
int64_t get_max64_0x1ff() { return 0x1ff; }
bool register_argon2d4096_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_argon2d4096;
gate->set_target = (void*)&scrypt_set_target;
gate->get_max64 = (void*)&get_max64_0x1ff;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT |NEON_OPT;
opt_target_factor = 65536.0;
return true;
}

View File

@@ -28,6 +28,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#include <mm_malloc.h>
#include "core.h"
#include "argon2d_thread.h"
@@ -99,7 +100,8 @@ int allocate_memory(const argon2_context *context, uint8_t **memory,
if (context->allocate_cbk) {
(context->allocate_cbk)(memory, memory_size);
} else {
*memory = malloc(memory_size);
*memory = mm_malloc( memory_size, 64 );
// *memory = malloc(memory_size);
}
if (*memory == NULL) {
@@ -116,7 +118,8 @@ void free_memory(const argon2_context *context, uint8_t *memory,
if (context->free_cbk) {
(context->free_cbk)(memory, memory_size);
} else {
free(memory);
// free(memory);
mm_free( memory );
}
}

View File

@@ -18,6 +18,7 @@
#ifndef ARGON2_CORE_H
#define ARGON2_CORE_H
#include "miner.h"
#include "argon2.h"
#define CONST_CAST(x) (x)(uintptr_t)

View File

@@ -21,7 +21,7 @@
#include "argon2.h"
#include "core.h"
#include "simd-utils.h"
#include "../blake2/blake2.h"
#include "../blake2/blamka-round-opt.h"
@@ -35,26 +35,37 @@
* @pre all block pointers must be valid
*/
#if defined(__AVX512F__)
#if defined(SIMD512)
static void fill_block(__m512i *state, const block *ref_block,
block *next_block, int with_xor) {
static inline __m512i blamka( __m512i x, __m512i y )
{
__m512i xy = _mm512_mul_epu32( x, y );
return _mm512_add_epi64( _mm512_add_epi64( x, y ),
_mm512_add_epi64( xy, xy ) );
}
static void fill_block( __m512i *state, const block *ref_block,
block *next_block, int with_xor )
{
__m512i block_XY[ARGON2_512BIT_WORDS_IN_BLOCK];
unsigned int i;
if (with_xor) {
for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
state[i] = _mm512_xor_si512(
state[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
block_XY[i] = _mm512_xor_si512(
state[i], _mm512_loadu_si512((const __m512i *)next_block->v + i));
}
} else {
for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
block_XY[i] = state[i] = _mm512_xor_si512(
state[i], _mm512_loadu_si512((const __m512i *)ref_block->v + i));
if ( with_xor )
{
for ( i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++ )
{
state[i] = _mm512_xor_si512( state[i],
_mm512_load_si512( (const __m512i*)ref_block->v + i ) );
block_XY[i] = _mm512_xor_si512( state[i],
_mm512_load_si512( (const __m512i*)next_block->v + i ) );
}
}
else
{
for ( i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++ )
block_XY[i] = state[i] = _mm512_xor_si512( state[i],
_mm512_load_si512( (const __m512i*)ref_block->v + i ) );
}
BLAKE2_ROUND_1( state[ 0], state[ 1], state[ 2], state[ 3],
state[ 4], state[ 5], state[ 6], state[ 7] );
@@ -66,46 +77,37 @@ static void fill_block(__m512i *state, const block *ref_block,
BLAKE2_ROUND_2( state[ 1], state[ 3], state[ 5], state[ 7],
state[ 9], state[11], state[13], state[15] );
/*
for (i = 0; i < 2; ++i) {
BLAKE2_ROUND_1(
state[8 * i + 0], state[8 * i + 1], state[8 * i + 2], state[8 * i + 3],
state[8 * i + 4], state[8 * i + 5], state[8 * i + 6], state[8 * i + 7]);
}
for (i = 0; i < 2; ++i) {
BLAKE2_ROUND_2(
state[2 * 0 + i], state[2 * 1 + i], state[2 * 2 + i], state[2 * 3 + i],
state[2 * 4 + i], state[2 * 5 + i], state[2 * 6 + i], state[2 * 7 + i]);
}
*/
for (i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++) {
state[i] = _mm512_xor_si512(state[i], block_XY[i]);
_mm512_storeu_si512((__m512i *)next_block->v + i, state[i]);
for ( i = 0; i < ARGON2_512BIT_WORDS_IN_BLOCK; i++ )
{
state[i] = _mm512_xor_si512( state[i], block_XY[i] );
_mm512_store_si512( (__m512i*)next_block->v + i, state[i] );
}
}
#elif defined(__AVX2__)
static void fill_block(__m256i *state, const block *ref_block,
block *next_block, int with_xor) {
static void fill_block( __m256i *state, const block *ref_block,
block *next_block, int with_xor )
{
__m256i block_XY[ARGON2_HWORDS_IN_BLOCK];
unsigned int i;
if (with_xor) {
for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
state[i] = _mm256_xor_si256(
state[i], _mm256_loadu_si256((const __m256i *)ref_block->v + i));
block_XY[i] = _mm256_xor_si256(
state[i], _mm256_loadu_si256((const __m256i *)next_block->v + i));
}
} else {
for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
block_XY[i] = state[i] = _mm256_xor_si256(
state[i], _mm256_loadu_si256((const __m256i *)ref_block->v + i));
if ( with_xor )
{
for ( i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++ )
{
state[i] = _mm256_xor_si256( state[i],
_mm256_load_si256( (const __m256i*)ref_block->v + i) );
block_XY[i] = _mm256_xor_si256( state[i],
_mm256_load_si256( (const __m256i*)next_block->v + i) );
}
}
else
{
for ( i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++ )
block_XY[i] = state[i] = _mm256_xor_si256( state[i],
_mm256_load_si256( (const __m256i*)ref_block->v + i) );
}
BLAKE2_ROUND_1( state[ 0], state[ 4], state[ 1], state[ 5],
state[ 2], state[ 6], state[ 3], state[ 7] );
@@ -125,44 +127,37 @@ static void fill_block(__m256i *state, const block *ref_block,
BLAKE2_ROUND_2( state[ 3], state[ 7], state[11], state[15],
state[19], state[23], state[27], state[31] );
/*
for (i = 0; i < 4; ++i) {
BLAKE2_ROUND_1(state[8 * i + 0], state[8 * i + 4], state[8 * i + 1], state[8 * i + 5],
state[8 * i + 2], state[8 * i + 6], state[8 * i + 3], state[8 * i + 7]);
}
for (i = 0; i < 4; ++i) {
BLAKE2_ROUND_2(state[ 0 + i], state[ 4 + i], state[ 8 + i], state[12 + i],
state[16 + i], state[20 + i], state[24 + i], state[28 + i]);
}
*/
for (i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++) {
state[i] = _mm256_xor_si256(state[i], block_XY[i]);
_mm256_storeu_si256((__m256i *)next_block->v + i, state[i]);
for ( i = 0; i < ARGON2_HWORDS_IN_BLOCK; i++ )
{
state[i] = _mm256_xor_si256( state[i], block_XY[i] );
_mm256_store_si256( (__m256i*)next_block->v + i, state[i] );
}
}
#else // SSE2
static void fill_block(__m128i *state, const block *ref_block,
block *next_block, int with_xor) {
__m128i block_XY[ARGON2_OWORDS_IN_BLOCK];
static void fill_block( v128u64_t *state, const block *ref_block,
block *next_block, int with_xor )
{
v128u64_t block_XY[ARGON2_OWORDS_IN_BLOCK];
unsigned int i;
if (with_xor) {
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
state[i] = _mm_xor_si128(
state[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
block_XY[i] = _mm_xor_si128(
state[i], _mm_loadu_si128((const __m128i *)next_block->v + i));
}
} else {
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
block_XY[i] = state[i] = _mm_xor_si128(
state[i], _mm_loadu_si128((const __m128i *)ref_block->v + i));
if ( with_xor )
{
for ( i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++ )
{
state[i] = v128_xor( state[i],
v128_load( (const v128_t*)ref_block->v + i) );
block_XY[i] = v128_xor( state[i],
v128_load( (const v128_t*)next_block->v + i) );
}
}
else
{
for ( i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++ )
block_XY[i] = state[i] = v128_xor( state[i],
v128_load( (const v128_t*)ref_block->v + i) );
}
BLAKE2_ROUND( state[ 0], state[ 1], state[ 2], state[ 3],
state[ 4], state[ 5], state[ 6], state[ 7] );
@@ -198,22 +193,10 @@ static void fill_block(__m128i *state, const block *ref_block,
BLAKE2_ROUND( state[ 7], state[15], state[23], state[31],
state[39], state[47], state[55], state[63] );
/*
for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(state[8 * i + 0], state[8 * i + 1], state[8 * i + 2],
state[8 * i + 3], state[8 * i + 4], state[8 * i + 5],
state[8 * i + 6], state[8 * i + 7]);
}
for (i = 0; i < 8; ++i) {
BLAKE2_ROUND(state[8 * 0 + i], state[8 * 1 + i], state[8 * 2 + i],
state[8 * 3 + i], state[8 * 4 + i], state[8 * 5 + i],
state[8 * 6 + i], state[8 * 7 + i]);
}
*/
for (i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++) {
state[i] = _mm_xor_si128(state[i], block_XY[i]);
_mm_storeu_si128((__m128i *)next_block->v + i, state[i]);
for ( i = 0; i < ARGON2_OWORDS_IN_BLOCK; i++ )
{
state[i] = v128_xor( state[i], block_XY[i] );
v128_store( (v128_t*)next_block->v + i, state[i] );
}
}
@@ -229,8 +212,8 @@ static void next_addresses(block *address_block, block *input_block) {
__m256i zero_block[ARGON2_HWORDS_IN_BLOCK];
__m256i zero2_block[ARGON2_HWORDS_IN_BLOCK];
#else
__m128i zero_block[ARGON2_OWORDS_IN_BLOCK];
__m128i zero2_block[ARGON2_OWORDS_IN_BLOCK];
v128_t zero_block[ARGON2_OWORDS_IN_BLOCK];
v128_t zero2_block[ARGON2_OWORDS_IN_BLOCK];
#endif
memset(zero_block, 0, sizeof(zero_block));
@@ -254,12 +237,12 @@ void fill_segment(const argon2_instance_t *instance,
uint64_t pseudo_rand, ref_index, ref_lane;
uint32_t prev_offset, curr_offset;
uint32_t starting_index, i;
#if defined(__AVX512F__)
#if defined(SIMD512)
__m512i state[ARGON2_512BIT_WORDS_IN_BLOCK];
#elif defined(__AVX2__)
__m256i state[ARGON2_HWORDS_IN_BLOCK];
#else
__m128i state[ARGON2_OWORDS_IN_BLOCK];
v128u64_t state[ARGON2_OWORDS_IN_BLOCK];
#endif
// int data_independent_addressing;

View File

@@ -19,95 +19,57 @@
#define BLAKE_ROUND_MKA_OPT_H
#include "blake2-impl.h"
#include "simd-utils.h"
#include <emmintrin.h>
#if defined(__SSSE3__)
#include <tmmintrin.h> /* for _mm_shuffle_epi8 and _mm_alignr_epi8 */
#endif
#if !defined(SIMD512)
#if defined(__XOP__) && (defined(__GNUC__) || defined(__clang__))
#include <x86intrin.h>
#endif
#if !defined(__AVX512F__)
#if !defined(__AVX2__)
#if !defined(__XOP__)
#if defined(__SSSE3__)
#define r16 \
(_mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9))
#define r24 \
(_mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10))
#define _mm_roti_epi64(x, c) \
(-(c) == 32) \
? _mm_shuffle_epi32((x), _MM_SHUFFLE(2, 3, 0, 1)) \
: (-(c) == 24) \
? _mm_shuffle_epi8((x), r24) \
: (-(c) == 16) \
? _mm_shuffle_epi8((x), r16) \
: (-(c) == 63) \
? _mm_xor_si128(_mm_srli_epi64((x), -(c)), \
_mm_add_epi64((x), (x))) \
: _mm_xor_si128(_mm_srli_epi64((x), -(c)), \
_mm_slli_epi64((x), 64 - (-(c))))
#else /* defined(__SSE2__) */
#define _mm_roti_epi64(r, c) \
_mm_xor_si128(_mm_srli_epi64((r), -(c)), _mm_slli_epi64((r), 64 - (-(c))))
#endif
#else
#endif
static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
const __m128i z = _mm_mul_epu32(x, y);
return _mm_add_epi64(_mm_add_epi64(x, y), _mm_add_epi64(z, z));
static BLAKE2_INLINE v128_t fBlaMka(v128_t x, v128_t y)
{
const v128u64_t z = v128_mulw32( x, y );
return (v128u32_t)v128_add64( v128_add64( (v128u64_t)x, (v128u64_t)y ),
v128_add64( z, z ) );
}
#define G1(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = fBlaMka(A0, B0); \
A1 = fBlaMka(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = _mm_roti_epi64(D0, -32); \
D1 = _mm_roti_epi64(D1, -32); \
\
C0 = fBlaMka(C0, D0); \
C1 = fBlaMka(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = _mm_roti_epi64(B0, -24); \
B1 = _mm_roti_epi64(B1, -24); \
} while ((void)0, 0)
#define G1( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
A0 = fBlaMka( A0, B0 ); \
A1 = fBlaMka( A1, B1 ); \
D0 = v128_xor( D0, A0 ); \
D1 = v128_xor( D1, A1 ); \
D0 = v128_ror64( D0, 32 ); \
D1 = v128_ror64( D1, 32 ); \
C0 = fBlaMka( C0, D0 ); \
C1 = fBlaMka( C1, D1 ); \
B0 = v128_xor( B0, C0 ); \
B1 = v128_xor( B1, C1 ); \
B0 = v128_ror64( B0, 24 ); \
B1 = v128_ror64( B1, 24 ); \
}
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
A0 = fBlaMka(A0, B0); \
A1 = fBlaMka(A1, B1); \
\
D0 = _mm_xor_si128(D0, A0); \
D1 = _mm_xor_si128(D1, A1); \
\
D0 = _mm_roti_epi64(D0, -16); \
D1 = _mm_roti_epi64(D1, -16); \
\
C0 = fBlaMka(C0, D0); \
C1 = fBlaMka(C1, D1); \
\
B0 = _mm_xor_si128(B0, C0); \
B1 = _mm_xor_si128(B1, C1); \
\
B0 = _mm_roti_epi64(B0, -63); \
B1 = _mm_roti_epi64(B1, -63); \
} while ((void)0, 0)
#define G2( A0, B0, C0, D0, A1, B1, C1, D1 ) \
{ \
A0 = fBlaMka( A0, B0 ); \
A1 = fBlaMka( A1, B1 ); \
D0 = v128_xor( D0, A0 ); \
D1 = v128_xor( D1, A1 ); \
D0 = v128_ror64( D0, 16 ); \
D1 = v128_ror64( D1, 16 ); \
C0 = fBlaMka( C0, D0 ); \
C1 = fBlaMka( C1, D1 ); \
B0 = v128_xor( B0, C0 ); \
B1 = v128_xor( B1, C1 ); \
B0 = v128_ror64( B0, 63 ); \
B1 = v128_ror64( B1, 63 ); \
}
#if defined(__SSSE3__) || defined(__ARM_NEON)
#if defined(__SSSE3__)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B1, B0, 8); \
__m128i t1 = _mm_alignr_epi8(B0, B1, 8); \
v128_t t0 = v128_alignr8(B1, B0, 8); \
v128_t t1 = v128_alignr8(B0, B1, 8); \
B0 = t0; \
B1 = t1; \
\
@@ -115,16 +77,16 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
C0 = C1; \
C1 = t0; \
\
t0 = _mm_alignr_epi8(D1, D0, 8); \
t1 = _mm_alignr_epi8(D0, D1, 8); \
t0 = v128_alignr8(D1, D0, 8); \
t1 = v128_alignr8(D0, D1, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = _mm_alignr_epi8(B0, B1, 8); \
__m128i t1 = _mm_alignr_epi8(B1, B0, 8); \
v128_t t0 = v128_alignr8(B0, B1, 8); \
v128_t t1 = v128_alignr8(B1, B0, 8); \
B0 = t0; \
B1 = t1; \
\
@@ -132,37 +94,39 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
C0 = C1; \
C1 = t0; \
\
t0 = _mm_alignr_epi8(D0, D1, 8); \
t1 = _mm_alignr_epi8(D1, D0, 8); \
t0 = v128_alignr8(D0, D1, 8); \
t1 = v128_alignr8(D1, D0, 8); \
D0 = t1; \
D1 = t0; \
} while ((void)0, 0)
#else /* SSE2 */
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0 = D0; \
__m128i t1 = B0; \
v128_t t0 = D0; \
v128_t t1 = B0; \
D0 = C0; \
C0 = C1; \
C1 = D0; \
D0 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t0, t0)); \
D1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(D1, D1)); \
B0 = _mm_unpackhi_epi64(B0, _mm_unpacklo_epi64(B1, B1)); \
B1 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(t1, t1)); \
D0 = v128_unpackhi64(D1, v128_unpacklo64(t0, t0)); \
D1 = v128_unpackhi64(t0, v128_unpacklo64(D1, D1)); \
B0 = v128_unpackhi64(B0, v128_unpacklo64(B1, B1)); \
B1 = v128_unpackhi64(B1, v128_unpacklo64(t1, t1)); \
} while ((void)0, 0)
#define UNDIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
do { \
__m128i t0, t1; \
v128_t t0, t1; \
t0 = C0; \
C0 = C1; \
C1 = t0; \
t0 = B0; \
t1 = D0; \
B0 = _mm_unpackhi_epi64(B1, _mm_unpacklo_epi64(B0, B0)); \
B1 = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(B1, B1)); \
D0 = _mm_unpackhi_epi64(D0, _mm_unpacklo_epi64(D1, D1)); \
D1 = _mm_unpackhi_epi64(D1, _mm_unpacklo_epi64(t1, t1)); \
B0 = v128_unpackhi64(B1, v128_unpacklo64(B0, B0)); \
B1 = v128_unpackhi64(t0, v128_unpacklo64(B1, B1)); \
D0 = v128_unpackhi64(D0, v128_unpacklo64(D1, D1)); \
D1 = v128_unpackhi64(D1, v128_unpacklo64(t1, t1)); \
} while ((void)0, 0)
#endif
@@ -182,64 +146,63 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
#include <immintrin.h>
#define rotr32(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1))
#define rotr24(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10))
#define rotr16(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9))
#define rotr63(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x)))
#define rotr32( x ) mm256_ror_64( x, 32 )
#define rotr24( x ) mm256_ror_64( x, 24 )
#define rotr16( x ) mm256_ror_64( x, 16 )
#define rotr63( x ) mm256_rol_64( x, 1 )
//#define rotr32(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1))
//#define rotr24(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10))
//#define rotr16(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9))
//#define rotr63(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x)))
#define G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
__m256i ml = _mm256_mul_epu32(A0, B0); \
ml = _mm256_add_epi64(ml, ml); \
A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \
__m256i ml0, ml1; \
ml0 = _mm256_mul_epu32(A0, B0); \
ml1 = _mm256_mul_epu32(A1, B1); \
ml0 = _mm256_add_epi64(ml0, ml0); \
ml1 = _mm256_add_epi64(ml1, ml1); \
A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml0)); \
A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml1)); \
D0 = _mm256_xor_si256(D0, A0); \
D0 = rotr32(D0); \
\
ml = _mm256_mul_epu32(C0, D0); \
ml = _mm256_add_epi64(ml, ml); \
C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \
\
B0 = _mm256_xor_si256(B0, C0); \
B0 = rotr24(B0); \
\
ml = _mm256_mul_epu32(A1, B1); \
ml = _mm256_add_epi64(ml, ml); \
A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \
D1 = _mm256_xor_si256(D1, A1); \
D0 = rotr32(D0); \
D1 = rotr32(D1); \
\
ml = _mm256_mul_epu32(C1, D1); \
ml = _mm256_add_epi64(ml, ml); \
C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \
\
ml0 = _mm256_mul_epu32(C0, D0); \
ml1 = _mm256_mul_epu32(C1, D1); \
ml0 = _mm256_add_epi64(ml0, ml0); \
ml1 = _mm256_add_epi64(ml1, ml1); \
C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml0)); \
C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml1)); \
B0 = _mm256_xor_si256(B0, C0); \
B1 = _mm256_xor_si256(B1, C1); \
B0 = rotr24(B0); \
B1 = rotr24(B1); \
} while((void)0, 0);
#define G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \
__m256i ml = _mm256_mul_epu32(A0, B0); \
ml = _mm256_add_epi64(ml, ml); \
A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \
__m256i ml0, ml1; \
ml0 = _mm256_mul_epu32(A0, B0); \
ml1 = _mm256_mul_epu32(A1, B1); \
ml0 = _mm256_add_epi64(ml0, ml0); \
ml1 = _mm256_add_epi64(ml1, ml1); \
A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml0)); \
A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml1)); \
D0 = _mm256_xor_si256(D0, A0); \
D0 = rotr16(D0); \
\
ml = _mm256_mul_epu32(C0, D0); \
ml = _mm256_add_epi64(ml, ml); \
C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \
B0 = _mm256_xor_si256(B0, C0); \
B0 = rotr63(B0); \
\
ml = _mm256_mul_epu32(A1, B1); \
ml = _mm256_add_epi64(ml, ml); \
A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \
D1 = _mm256_xor_si256(D1, A1); \
D0 = rotr16(D0); \
D1 = rotr16(D1); \
\
ml = _mm256_mul_epu32(C1, D1); \
ml = _mm256_add_epi64(ml, ml); \
C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \
ml0 = _mm256_mul_epu32(C0, D0); \
ml1 = _mm256_mul_epu32(C1, D1); \
ml0 = _mm256_add_epi64(ml0, ml0); \
ml1 = _mm256_add_epi64(ml1, ml1); \
C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml0)); \
C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml1)); \
B0 = _mm256_xor_si256(B0, C0); \
B1 = _mm256_xor_si256(B1, C1); \
B0 = rotr63(B0); \
B1 = rotr63(B1); \
} while((void)0, 0);
@@ -259,16 +222,14 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
__m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
__m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
B1 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
B0 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
\
tmp1 = C0; \
B0 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
C0 = C1; \
C1 = tmp1; \
\
tmp1 = _mm256_blend_epi32(D0, D1, 0xCC); \
tmp2 = _mm256_blend_epi32(D0, D1, 0x33); \
D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
C1 = tmp1; \
tmp1 = _mm256_blend_epi32(D0, D1, 0xCC); \
D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
} while(0);
#define UNDIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \
@@ -287,16 +248,14 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
__m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \
__m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \
B0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
B1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
\
tmp1 = C0; \
B1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
C0 = C1; \
C1 = tmp1; \
\
tmp1 = _mm256_blend_epi32(D0, D1, 0x33); \
tmp2 = _mm256_blend_epi32(D0, D1, 0xCC); \
D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
C1 = tmp1; \
tmp1 = _mm256_blend_epi32(D0, D1, 0x33); \
D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \
D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \
} while((void)0, 0);
#define BLAKE2_ROUND_1(A0, A1, B0, B1, C0, C1, D0, D1) \
@@ -331,9 +290,7 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
#include <immintrin.h>
#define ror64(x, n) _mm512_ror_epi64((x), (n))
static __m512i muladd(__m512i x, __m512i y)
static inline __m512i muladd(__m512i x, __m512i y)
{
__m512i z = _mm512_mul_epu32(x, y);
return _mm512_add_epi64(_mm512_add_epi64(x, y), _mm512_add_epi64(z, z));
@@ -347,8 +304,8 @@ static __m512i muladd(__m512i x, __m512i y)
D0 = _mm512_xor_si512(D0, A0); \
D1 = _mm512_xor_si512(D1, A1); \
\
D0 = ror64(D0, 32); \
D1 = ror64(D1, 32); \
D0 = _mm512_ror_epi64(D0, 32); \
D1 = _mm512_ror_epi64(D1, 32); \
\
C0 = muladd(C0, D0); \
C1 = muladd(C1, D1); \
@@ -356,8 +313,8 @@ static __m512i muladd(__m512i x, __m512i y)
B0 = _mm512_xor_si512(B0, C0); \
B1 = _mm512_xor_si512(B1, C1); \
\
B0 = ror64(B0, 24); \
B1 = ror64(B1, 24); \
B0 = _mm512_ror_epi64(B0, 24); \
B1 = _mm512_ror_epi64(B1, 24); \
} while ((void)0, 0)
#define G2(A0, B0, C0, D0, A1, B1, C1, D1) \
@@ -368,8 +325,8 @@ static __m512i muladd(__m512i x, __m512i y)
D0 = _mm512_xor_si512(D0, A0); \
D1 = _mm512_xor_si512(D1, A1); \
\
D0 = ror64(D0, 16); \
D1 = ror64(D1, 16); \
D0 = _mm512_ror_epi64(D0, 16); \
D1 = _mm512_ror_epi64(D1, 16); \
\
C0 = muladd(C0, D0); \
C1 = muladd(C1, D1); \
@@ -377,8 +334,8 @@ static __m512i muladd(__m512i x, __m512i y)
B0 = _mm512_xor_si512(B0, C0); \
B1 = _mm512_xor_si512(B1, C1); \
\
B0 = ror64(B0, 63); \
B1 = ror64(B1, 63); \
B0 = _mm512_ror_epi64(B0, 63); \
B1 = _mm512_ror_epi64(B1, 63); \
} while ((void)0, 0)
#define DIAGONALIZE(A0, B0, C0, D0, A1, B1, C1, D1) \
@@ -420,24 +377,23 @@ static __m512i muladd(__m512i x, __m512i y)
#define SWAP_HALVES(A0, A1) \
do { \
__m512i t0, t1; \
t0 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(1, 0, 1, 0)); \
t1 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(3, 2, 3, 2)); \
A0 = t0; \
A1 = t1; \
__m512i t; \
t = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(1, 0, 1, 0)); \
A1 = _mm512_shuffle_i64x2(A0, A1, _MM_SHUFFLE(3, 2, 3, 2)); \
A0 = t; \
} while((void)0, 0)
#define SWAP_QUARTERS(A0, A1) \
do { \
SWAP_HALVES(A0, A1); \
A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
A0 = _mm512_shuffle_i64x2( A0, A0, 0xd8 ); \
A1 = _mm512_shuffle_i64x2( A1, A1, 0xd8 ); \
} while((void)0, 0)
#define UNSWAP_QUARTERS(A0, A1) \
do { \
A0 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A0); \
A1 = _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 1, 4, 5, 2, 3, 6, 7), A1); \
A0 = _mm512_shuffle_i64x2( A0, A0, 0xd8 ); \
A1 = _mm512_shuffle_i64x2( A1, A1, 0xd8 ); \
SWAP_HALVES(A0, A1); \
} while((void)0, 0)
@@ -468,4 +424,5 @@ static __m512i muladd(__m512i x, __m512i y)
} while ((void)0, 0)
#endif /* __AVX512F__ */
#endif /* BLAKE_ROUND_MKA_OPT_H */

View File

@@ -1,5 +1,5 @@
#include "blake-gate.h"
#include "blake-hash-4way.h"
#include "blake256-hash.h"
#include <string.h>
#include <stdint.h>
#include <memory.h>
@@ -13,7 +13,7 @@ void blakehash_4way(void *state, const void *input)
uint32_t vhash[8*4] __attribute__ ((aligned (64)));
blake256r14_4way_context ctx;
memcpy( &ctx, &blake_4w_ctx, sizeof ctx );
blake256r14_4way( &ctx, input + (64<<2), 16 );
blake256r14_4way_update( &ctx, input + (64<<2), 16 );
blake256r14_4way_close( &ctx, vhash );
dintrlv_4x32( state, state+32, state+64, state+96, vhash, 256 );
}
@@ -34,12 +34,12 @@ int scanhash_blake_4way( struct work *work, uint32_t max_nonce,
if (opt_benchmark)
HTarget = 0x7f;
mm128_bswap32_intrlv80_4x32( vdata, pdata );
v128_bswap32_intrlv80_4x32( vdata, pdata );
blake256r14_4way_init( &blake_4w_ctx );
blake256r14_4way( &blake_4w_ctx, vdata, 64 );
blake256r14_4way_update( &blake_4w_ctx, vdata, 64 );
do {
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
*noncev = v128_bswap32( _mm_set_epi32( n+3, n+2, n+1, n ) );
blakehash_4way( hash, vdata );
@@ -48,7 +48,7 @@ int scanhash_blake_4way( struct work *work, uint32_t max_nonce,
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
submit_solution( work, hash+(i<<3), mythr );
}
n += 4;
@@ -107,7 +107,7 @@ int scanhash_blake_8way( struct work *work, uint32_t max_nonce,
if ( (hash+i)[7] <= HTarget && fulltest( hash+i, ptarget ) )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
submit_solution( work, hash+(i<<3), mythr );
}
n += 8;

View File

@@ -1,18 +1,8 @@
#include "blake-gate.h"
int64_t blake_get_max64 ()
{
return 0x7ffffLL;
}
bool register_blake_algo( algo_gate_t* gate )
{
gate->optimizations = AVX2_OPT;
gate->get_max64 = (void*)&blake_get_max64;
//#if defined (__AVX2__) && defined (FOUR_WAY)
// gate->optimizations = SSE2_OPT | AVX2_OPT;
// gate->scanhash = (void*)&scanhash_blake_8way;
// gate->hash = (void*)&blakehash_8way;
#if defined(BLAKE_4WAY)
four_way_not_tested();
gate->scanhash = (void*)&scanhash_blake_4way;

View File

@@ -1,144 +0,0 @@
/* $Id: sph_blake.h 252 2011-06-07 17:55:14Z tp $ */
/**
* BLAKE interface. BLAKE is a family of functions which differ by their
* output size; this implementation defines BLAKE for output sizes 224,
* 256, 384 and 512 bits. This implementation conforms to the "third
* round" specification.
*
* ==========================(LICENSE BEGIN)============================
*
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ===========================(LICENSE END)=============================
*
* @file sph_blake.h
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
*/
#ifndef __BLAKE_HASH_4WAY__
#define __BLAKE_HASH_4WAY__ 1
//#ifdef __SSE4_2__
#ifdef __cplusplus
extern "C"{
#endif
#include <stddef.h>
#include "algo/sha/sph_types.h"
#include "simd-utils.h"
#define SPH_SIZE_blake256 256
#define SPH_SIZE_blake512 512
// With SSE4.2 only Blake-256 4 way is available.
// With AVX2 Blake-256 8way & Blake-512 4 way are also available.
// Blake-256 4 way
typedef struct {
unsigned char buf[64<<2];
uint32_t H[8<<2];
uint32_t S[4<<2];
// __m128i buf[16] __attribute__ ((aligned (64)));
// __m128i H[8];
// __m128i S[4];
size_t ptr;
uint32_t T0, T1;
int rounds; // 14 for blake, 8 for blakecoin & vanilla
} blake_4way_small_context __attribute__ ((aligned (64)));
// Default 14 rounds
typedef blake_4way_small_context blake256_4way_context;
void blake256_4way_init(void *ctx);
void blake256_4way(void *ctx, const void *data, size_t len);
void blake256_4way_close(void *ctx, void *dst);
// 14 rounds, blake, decred
typedef blake_4way_small_context blake256r14_4way_context;
void blake256r14_4way_init(void *cc);
void blake256r14_4way(void *cc, const void *data, size_t len);
void blake256r14_4way_close(void *cc, void *dst);
// 8 rounds, blakecoin, vanilla
typedef blake_4way_small_context blake256r8_4way_context;
void blake256r8_4way_init(void *cc);
void blake256r8_4way(void *cc, const void *data, size_t len);
void blake256r8_4way_close(void *cc, void *dst);
#ifdef __AVX2__
// Blake-256 8 way
typedef struct {
__m256i buf[16] __attribute__ ((aligned (64)));
__m256i H[8];
__m256i S[4];
size_t ptr;
sph_u32 T0, T1;
int rounds; // 14 for blake, 8 for blakecoin & vanilla
} blake_8way_small_context;
// Default 14 rounds
typedef blake_8way_small_context blake256_8way_context;
void blake256_8way_init(void *cc);
void blake256_8way(void *cc, const void *data, size_t len);
void blake256_8way_close(void *cc, void *dst);
// 14 rounds, blake, decred
typedef blake_8way_small_context blake256r14_8way_context;
void blake256r14_8way_init(void *cc);
void blake256r14_8way(void *cc, const void *data, size_t len);
void blake256r14_8way_close(void *cc, void *dst);
// 8 rounds, blakecoin, vanilla
typedef blake_8way_small_context blake256r8_8way_context;
void blake256r8_8way_init(void *cc);
void blake256r8_8way(void *cc, const void *data, size_t len);
void blake256r8_8way_close(void *cc, void *dst);
// Blake-512 4 way
typedef struct {
__m256i buf[16] __attribute__ ((aligned (64)));
__m256i H[8];
__m256i S[4];
size_t ptr;
sph_u64 T0, T1;
} blake_4way_big_context;
typedef blake_4way_big_context blake512_4way_context;
void blake512_4way_init(void *cc);
void blake512_4way(void *cc, const void *data, size_t len);
void blake512_4way_close(void *cc, void *dst);
void blake512_4way_addbits_and_close(
void *cc, unsigned ub, unsigned n, void *dst);
#endif // AVX2
#ifdef __cplusplus
}
#endif
#endif // BLAKE_HASH_4WAY_H__

File diff suppressed because it is too large Load Diff

View File

@@ -1,322 +0,0 @@
// convert blake256 32 bit to use 64 bit with serial vectoring
//
// cut calls to GS in half
//
// combine V
// v0 = {V0,V1}
// v1 = {V2,V3}
// v2 = {V4,V5}
// v3 = {V6,V7}
// v4 = {V8,V9}
// v5 = {VA,VB}
// v6 = {VC,VD}
// v7 = {CE,VF}
//
// v6x = {VD,VC} swap(VC,VD) swap(v6)
// v7x = {VF,VE} swap(VE,VF) swap(v7)
//
// V0 = v1v0
// V1 = v3v2
// V2 = v5v4
// V3 = v7v6
// V4 = v9v8
// V5 = vbva
// V6 = vdvc
// V7 = vfve
//
// The rotate in ROUND is to effect straddle and unstraddle for the third
// and 4th iteration of GS.
// It concatenates 2 contiguous 256 bit vectors and extracts the middle
// 256 bits. After the transform they must be restored with only the
// chosen bits modified in the original 2 vectors.
// ror1x128 achieves this by putting the chosen bits in arg1, the "low"
// 256 bit vector and saves the untouched bits temporailly in arg0, the
// "high" 256 bit vector. Simply reverse the process to restore data back
// to original positions.
// Use standard 4way when AVX2 is not available use x2 mode with AVX2.
//
// Data is organised the same as 32 bit 4 way, in effect serial vectoring
// on top of parallel vectoring. Same data in the same place just taking
// two chunks at a time.
//
// Transparent to user, x2 mode used when AVX2 detected.
// Use existing 4way context but revert to scalar types.
// Same interleave function (128 bit) or x2 with 256 bit?
// User trsnaparency would have to apply to interleave as well.
//
// Use common 4way update and close
/*
typedef struct {
unsigned char buf[64<<2];
uint32_t H[8<<2];
uint32_t S[4<<2];
size_t ptr;
uint32_t T0, T1;
int rounds; // 14 for blake, 8 for blakecoin & vanilla
} blakex2_4way_small_context __attribute__ ((aligned (64)));
*/
static void
blake32x2_4way_init( blake_4way_small_context *ctx, const uint32_t *iv,
const uint32_t *salt, int rounds )
{
casti_m128i( ctx->H, 0 ) = _mm_set1_epi32( iv[0] );
casti_m128i( ctx->H, 1 ) = _mm_set1_epi32( iv[1] );
casti_m128i( ctx->H, 2 ) = _mm_set1_epi32( iv[2] );
casti_m128i( ctx->H, 3 ) = _mm_set1_epi32( iv[3] );
casti_m128i( ctx->H, 4 ) = _mm_set1_epi32( iv[4] );
casti_m128i( ctx->H, 5 ) = _mm_set1_epi32( iv[5] );
casti_m128i( ctx->H, 6 ) = _mm_set1_epi32( iv[6] );
casti_m128i( ctx->H, 7 ) = _mm_set1_epi32( iv[7] );
casti_m128i( ctx->S, 0 ) = m128_zero;
casti_m128i( ctx->S, 1 ) = m128_zero;
casti_m128i( ctx->S, 2 ) = m128_zero;
casti_m128i( ctx->S, 3 ) = m128_zero;
/*
sc->S[0] = _mm_set1_epi32( salt[0] );
sc->S[1] = _mm_set1_epi32( salt[1] );
sc->S[2] = _mm_set1_epi32( salt[2] );
sc->S[3] = _mm_set1_epi32( salt[3] );
*/
ctx->T0 = ctx->T1 = 0;
ctx->ptr = 0;
ctx->rounds = rounds;
}
static void
blake32x2( blake_4way_small_context *ctx, const void *data, size_t len )
{
__m128i *buf = (__m256i*)ctx->buf;
size_t bptr = ctx->ptr << 2;
size_t vptr = ctx->ptr >> 3;
size_t blen = len << 2;
// unsigned char *buf = ctx->buf;
// size_t ptr = ctx->ptr<<4; // repurposed
DECL_STATE32x2
// buf = sc->buf;
// ptr = sc->ptr;
// adjust len for use with ptr, clen, all absolute bytes.
// int blen = len<<2;
if ( blen < (sizeof ctx->buf) - bptr )
{
memcpy( buf + vptr, data, blen );
ptr += blen;
ctx->ptr = bptr >> 2;;
return;
}
READ_STATE32( ctx );
while ( blen > 0 )
{
size_t clen;
clen = ( sizeof sc->buf ) - ptr;
if ( clen > blen )
clen = blen;
memcpy( buf + vptr, data, clen );
bptr += clen;
vptr = bptr >> 5;
data = (const unsigned char *)data + clen;
blen -= clen;
if ( bptr == sizeof ctx->buf )
{
if ( ( T0 = T0 + 512 ) < 512 ) // not needed, will never rollover
T1 += 1;
COMPRESS32x2_4WAY( ctx->rounds );
ptr = 0;
}
}
WRITE_STATE32x2( ctx );
ctx->ptr = bptr >> 2;
}
static void
blake32x2_4way_close( blake_4way_small_context *ctx, void *dst )
{
__m256i buf[8] __attribute__ ((aligned (64)));
size_t ptr = ctx->ptr;
size_t vptr = ctx->ptr>>2;
unsigned bit_len = ( (unsigned)ptr << 3 ); // one lane
uint32_t th = ctx->T1;
uint32_t tl = ctx->T0 + bit_len;
if ( ptr == 0 )
{
ctx->T0 = 0xFFFFFE00UL;
ctx->T1 = 0xFFFFFFFFUL;
}
else if ( ctx->T0 == 0 )
{
ctx->T0 = 0xFFFFFE00UL + bit_len;
ctx->T1 -= 1;
}
else
ctx->T0 -= 512 - bit_len;
// memset doesn't do ints
buf[ vptr ] = _mm256_set_epi32( 0,0,0,0, 0x80, 0x80, 0x80, 0x80 );
if ( vptr < 5 )
{
memset_zero_256( buf + vptr + 1, 6 - vptr );
buf[ 6 ] = _mm256_or_si256( vbuf[ 6 ], _mm256_set_epi32(
0x01000000UL,0x01000000UL,0x01000000UL,0x01000000UL, 0,0,0,0 ) );
buf[ 7 ] = mm256_bswap_32( _mm256_set_epi32( tl,tl,tl,tl,
th,th,th,th ) );
blake32x2_4way( ctx, buf + vptr, 64 - ptr );
}
else
{
memset_zero_256( vbuf + vptr + 1, 7 - vptr );
blake32x2_4way( ctx, vbuf + ptr, 64 - ptr );
ctx->T0 = 0xFFFFFE00UL;
ctx->T1 = 0xFFFFFFFFUL;
buf[ 6 ] = mm256_zero;
buf[ 6 ] = _mm256_set_epi32( 0,0,0,0,
0x01000000UL,0x01000000UL,0x01000000UL,0x01000000UL );
buf[ 7 ] = mm256_bswap_32( _mm256_set_epi32( tl, tl, tl, tl,
th, th, th, th );
blake32x2_4way( ctx, buf, 64 );
}
casti_m256i( dst, 0 ) = mm256_bswap_32( casti_m256i( ctx->H, 0 ) );
casti_m256i( dst, 1 ) = mm256_bswap_32( casti_m256i( ctx->H, 1 ) );
casti_m256i( dst, 2 ) = mm256_bswap_32( casti_m256i( ctx->H, 2 ) );
casti_m256i( dst, 3 ) = mm256_bswap_32( casti_m256i( ctx->H, 3 ) );
}
#define DECL_STATE32x2_4WAY \
__m256i H0, H1, H2, H3; \
__m256i S0, S1; \
uint32_t T0, T1;
#define READ_STATE32x2_4WAY(state) do \
{ \
H0 = casti_m256i( state->H, 0 ); \
H1 = casti_m256i( state->H, 1 ); \
H2 = casti_m256i( state->H, 2 ); \
H3 = casti_m256i( state->H, 3 ); \
S0 = casti_m256i( state->S, 0 ); \
S1 = casti_m256i( state->S, 1 ); \
T0 = state->T0; \
T1 = state->T1; \
#define WRITE_STATE32x2_4WAY(state) do { \
casti_m256i( state->H, 0 ) = H0; \
casti_m256i( state->H, 1 ) = H1; \
casti_m256i( state->H, 2 ) = H2; \
casti_m256i( state->H, 3 ) = H3; \
casti_m256i( state->S, 0 ) = S0; \
casti_m256i( state->S, 1 ) = S1; \
state->T0 = T0; \
state->T1 = T1; \
} while (0)
#define GSx2_4WAY( m0m2, m1m3, c0c2, c1c3, a, b, c, d ) do \
{ \
a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \
_mm256_set_epi32( c1,c3, c1,c3, c1,c3, c1,c3 ), \
_mm256_set_epi32( m0,m2, m0,m2, m0,m2, m0,m2 ) ), b ), a ); \
d = mm256_ror_32( _mm_xor_si128( d, a ), 16 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \
a = _mm256_add_epi32( _mm256_add_epi32( _mm256_xor_si256( \
_mm256_set_epi32( c0,c2, c0,c2, c0,c2, c0,c2 ), \
_mm256_set_epi32( m1,m3, m1,m3, m1,m3, m1,m3 ) ), b ), a ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \
} while (0)
#define ROUND_Sx2_4WAY(r) do \
{ \
GS2_4WAY( Mx(r, 0), Mx(r, 1), Mx(r, 2), Mx(r, 3), \
CSx(r, 0), CSx(r, 1), CSx(r, 2), CSx(r, 3), V0, V2, V4, V6 ); \
GS2_4WAY( Mx(r, 4), Mx(r, 5), Mx(r, 6), Mx(r, 7), \
CSx(r, 4), CSx(r, 5), CSx(r, 6), CSx(r, 7), V1, V3, V5, V7 ); \
mm256_ror1x128_512( V3, V2 ); \
mm256_ror1x128_512( V6, V7 ); \
GS2_4WAY( Mx(r, 8), Mx(r, 9), Mx(r, A), Mx(r, B), \
CSx(r, 8), CSx(r, 9), CSx(r, A), CSx(r, B), V0, V2, V5, V7 ); \
GS2_4WAY( Mx(r, C), Mx(r, D), Mx(r, C), Mx(r, D), \
CSx(r, C), CSx(r, D), CSx(r, C), CSx(r, D), V1, V3, V4, V6 ); \
mm256_rol1x128_512( V2, V3 ); \
mm256_rol1x128_512( V7, V6 );
#define COMPRESS32x2_4WAY( rounds ) do \
{ \
__m256i M0, M1, M2, M3, M4, M5, M6, M7; \
__m256i V0, V1, V2, V3, V4, V5, V6, V7; \
unsigned r; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
V3 = H3; \
V4 = _mm256_xor_si256( S0, _mm256_set_epi32( CS1, CS1, CS1, CS1, \
CS0, CS0, CS0, CS0 ) ); \
V5 = _mm256_xor_si256( S1, _mm256_set_epi32( CS3, CS3, CS3, CS3, \
CS2, CS2, CS2, CS2 ) ); \
V6 = _mm256_xor_si256( _mm256_set1_epi32( T0 ), \
_mm256_set_epi32( CS5, CS5, CS5, CS5, \
CS4, CS4, CS4, CS4 ) ); \
V7 = _mm256_xor_si256( _mm256_set1_epi32( T1 ), \
_mm256_set_epi32( CS7, CS7, CS7, CS7, \
CS6, CS6, CS6, CS6 ) ); \
M0 = mm256_bswap_32( buf[ 0] ); \
M1 = mm256_bswap_32( buf[ 1] ); \
M2 = mm256_bswap_32( buf[ 2] ); \
M3 = mm256_bswap_32( buf[ 3] ); \
M4 = mm256_bswap_32( buf[ 4] ); \
M5 = mm256_bswap_32( buf[ 5] ); \
M6 = mm256_bswap_32( buf[ 6] ); \
M7 = mm256_bswap_32( buf[ 7] ); \
ROUND_Sx2_4WAY(0); \
ROUND_Sx2_4WAY(1); \
ROUND_Sx2_4WAY(2); \
ROUND_Sx2_4WAY(3); \
ROUND_Sx2_4WAY(4); \
ROUND_Sx2_4WAY(5); \
ROUND_Sx2_4WAY(6); \
ROUND_Sx2_4WAY(7); \
if (rounds == 14) \
{ \
ROUND_Sx2_4WAY(8); \
ROUND_Sx2_4WAY(9); \
ROUND_Sx2_4WAY(0); \
ROUND_Sx2_4WAY(1); \
ROUND_Sx2_4WAY(2); \
ROUND_Sx2_4WAY(3); \
} \
H0 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( V8, V0 ), S0 ), H0 ); \
H1 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( V9, V1 ), S1 ), H1 ); \
H2 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( VA, V2 ), S2 ), H2 ); \
H3 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( VB, V3 ), S3 ), H3 ); \
} while (0)

3028
algo/blake/blake256-hash.c Normal file

File diff suppressed because it is too large Load Diff

210
algo/blake/blake256-hash.h Normal file
View File

@@ -0,0 +1,210 @@
#ifndef BLAKE256_HASH__
#define BLAKE256_HASH__ 1
#include <stddef.h>
#include "simd-utils.h"
#include "sph_blake.h"
////////////////////////////
//
// Blake-256 1 way SSE2
//
//#define blake256_context sph_blake256_context
#define blake256_init sph_blake256_init
#define blake256_update sph_blake256
#define blake256_update_le sph_blake256_update_le
#define blake256_close sph_blake256_close
//TODO decouple from SPH
typedef struct
{
unsigned char buf[64];
size_t ptr;
uint32_t H[8];
uint32_t S[4];
uint32_t T0, T1;
} blake256_context __attribute__ ((aligned (32)));
void blake256_transform_le( uint32_t *H, const uint32_t *buf,
const uint32_t T0, const uint32_t T1, int rounds );
/*
void blake256_init( blake256_context *sc );
void blake256_update( blake256_context *sc, const void *data, size_t len );
void blake256_close( blake256_context *sc, void *dst );
void blake256_full( blake256_context *sc, void *dst, const void *data,
size_t len );
*/
//////////////////////////////////
//
// Blake-256 4 way SSE2, NEON
typedef struct
{
unsigned char buf[64<<2];
uint32_t H[8<<2];
size_t ptr;
uint32_t T0, T1;
int rounds; // 14 for blake, 8 for blakecoin & vanilla
} blake_4x32_small_context __attribute__ ((aligned (64)));
// Default, 14 rounds
typedef blake_4x32_small_context blake256_4x32_context;
void blake256_4x32_init(void *ctx);
void blake256_4x32_update(void *ctx, const void *data, size_t len);
void blake256_4x32_close(void *ctx, void *dst);
// 14 rounds
typedef blake_4x32_small_context blake256r14_4x32_context;
void blake256r14_4x32_init(void *cc);
void blake256r14_4x32_update(void *cc, const void *data, size_t len);
void blake256r14_4x32_close(void *cc, void *dst);
// 8 rounds, blakecoin, vanilla
typedef blake_4x32_small_context blake256r8_4x32_context;
void blake256r8_4x32_init(void *cc);
void blake256r8_4x32_update(void *cc, const void *data, size_t len);
void blake256r8_4x32_close(void *cc, void *dst);
void blake256_4x32_round0_prehash_le( void *midstate, const void *midhash,
void *data );
void blake256_4x32_final_rounds_le( void *final_hash, const void *midstate,
const void *midhash, const void *data, const int rounds );
#define blake_4way_small_context blake256_4x32_context
#define blake256_4way_context blake256_4x32_context
#define blake256_4way_init blake256_4x32_init
#define blake256_4way_update blake256_4x32_update
#define blake256_4way_close blake256_4x32_close
#define blake256_4way_update_le blake256_4x32_update_le
#define blake256_4way_close_le blake256_4x32_close_le
#define blake256_4way_round0_prehash_le blake256_4x32_round0_prehash_le
#define blake256_4way_final_rounds_le blake256_4x32_final_rounds_le
#define blake256r14_4way_context blake256r14_4x32_context
#define blake256r14_4way_init blake256r14_4x32_init
#define blake256r14_4way_update blake256r14_4x32_update
#define blake256r14_4way_close blake256r14_4x32_close
#define blake256r8_4way_context blake256r14_4x32_context
#define blake256r8_4way_init blake256r14_4x32_init
#define blake256r8_4way_update blake256r14_4x32_update
#define blake256r8_4way_close blake256r14_4x32_close
#ifdef __AVX2__
//////////////////////////////
//
// Blake-256 8 way AVX2
typedef struct
{
__m256i buf[16] __attribute__ ((aligned (64)));
__m256i H[8];
size_t ptr;
uint32_t T0, T1;
int rounds; // 14 for blake, 8 for blakecoin & vanilla
} blake_8way_small_context;
// Default 14 rounds
typedef blake_8way_small_context blake256_8way_context;
void blake256_8way_init(void *cc);
void blake256_8way_update(void *cc, const void *data, size_t len);
void blake256_8way_close(void *cc, void *dst);
void blake256_8way_update_le(void *cc, const void *data, size_t len);
void blake256_8way_close_le(void *cc, void *dst);
void blake256_8way_round0_prehash_le( void *midstate, const void *midhash,
void *data );
void blake256_8way_final_rounds_le( void *final_hash, const void *midstate,
const void *midhash, const void *data, const int rounds );
// 14 rounds, blake, decred
typedef blake_8way_small_context blake256r14_8way_context;
void blake256r14_8way_init(void *cc);
void blake256r14_8way_update(void *cc, const void *data, size_t len);
void blake256r14_8way_close(void *cc, void *dst);
// 8 rounds, blakecoin, vanilla
typedef blake_8way_small_context blake256r8_8way_context;
void blake256r8_8way_init(void *cc);
void blake256r8_8way_update(void *cc, const void *data, size_t len);
void blake256r8_8way_close(void *cc, void *dst);
#define blake_8x32_small_context blake256_8way_context
#define blake_8x32_init blake256_8way_init
#define blake_8x32_update blake256_8way_update
#define blake_8x32_close blake256_8way_close
#define blake_8x32_update_le blake256_8way_update_le
#define blake_8x32_close_le blake256_8way_close_le
#define blake_8x32_round0_prehash_le blake256_8way_round0_prehash
#define blake_8x32_final_rounds_le blake256_8way_final_rounds_le
#define blake256r14_8x32_context blake256r14_8way_context
#define blake256r14_8x32_init blake256r14_8way_init
#define blake256r14_8x32_update blake256r14_8way_update
#define blake256r14_8x32_close blake256r14_8way_close
#define blake256r8_8x32_context blake256r14_8way_context
#define blake256r8_8x32_init blake256r14_8way_init
#define blake256r8_8x32_update blake256r14_8way_update
#define blake256r8_8x32_close blake256r14_8way_close
#if defined(SIMD512)
///////////////////////////////////
//
// Blake-256 16 way AVX512
typedef struct
{
__m512i buf[16];
__m512i H[8];
size_t ptr;
uint32_t T0, T1;
int rounds; // 14 for blake, 8 for blakecoin & vanilla
} blake_16way_small_context __attribute__ ((aligned (128)));
// Default 14 rounds
typedef blake_16way_small_context blake256_16way_context;
void blake256_16way_init(void *cc);
void blake256_16way_update(void *cc, const void *data, size_t len);
void blake256_16way_close(void *cc, void *dst);
// Expects data in little endian order, no byte swap needed
void blake256_16way_update_le(void *cc, const void *data, size_t len);
void blake256_16way_close_le(void *cc, void *dst);
void blake256_16way_round0_prehash_le( void *midstate, const void *midhash,
void *data );
void blake256_16way_final_rounds_le( void *final_hash, const void *midstate,
const void *midhash, const void *data, const int rounds );
// 14 rounds, blake, decred
typedef blake_16way_small_context blake256r14_16way_context;
void blake256r14_16way_init(void *cc);
void blake256r14_16way_update(void *cc, const void *data, size_t len);
void blake256r14_16way_close(void *cc, void *dst);
// 8 rounds, blakecoin, vanilla
typedef blake_16way_small_context blake256r8_16way_context;
void blake256r8_16way_init(void *cc);
void blake256r8_16way_update(void *cc, const void *data, size_t len);
void blake256r8_16way_close(void *cc, void *dst);
#define blake_16x32_small_context blake256_16way_context
#define blake_16x32_init blake256_16way_init
#define blake_16x32_update blake256_16way_update
#define blake_16x32_close blake256_16way_close
#define blake_16x32_update_le blake256_16way_update_le
#define blake_16x32_close_le blake256_16way_close_le
#define blake_16x32_round0_prehash_le blake256_16way_round0_prehash
#define blake_16x32_final_rounds_le blake256_16way_final_rounds_le
#define blake256r14_16x32_context blake256r14_16way_context
#define blake256r14_16x32_init blake256r14_16way_init
#define blake256r14_16x32_update blake256r14_16way_update
#define blake256r14_16x32_close blake256r14_16way_close
#define blake256r8_16x32_context blake256r8_16way_context
#define blake256r8_16x32_init blake256r8_16way_init
#define blake256r8_16x32_update blake256r8_16way_update
#define blake256r8_16x32_close blake256r8_16way_close
#endif // AVX512
#endif // AVX2
#endif // BLAKE256_HASH_H__

548
algo/blake/blake2b-hash.c Normal file
View File

@@ -0,0 +1,548 @@
/*
* Copyright 2009 Colin Percival, 2014 savale
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "blake2b-hash.h"
#if defined(__AVX2__)
static const uint8_t sigma[12][16] =
{
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
#define Z00 0
#define Z01 1
#define Z02 2
#define Z03 3
#define Z04 4
#define Z05 5
#define Z06 6
#define Z07 7
#define Z08 8
#define Z09 9
#define Z0A A
#define Z0B B
#define Z0C C
#define Z0D D
#define Z0E E
#define Z0F F
#define Z10 E
#define Z11 A
#define Z12 4
#define Z13 8
#define Z14 9
#define Z15 F
#define Z16 D
#define Z17 6
#define Z18 1
#define Z19 C
#define Z1A 0
#define Z1B 2
#define Z1C B
#define Z1D 7
#define Z1E 5
#define Z1F 3
#define Z20 B
#define Z21 8
#define Z22 C
#define Z23 0
#define Z24 5
#define Z25 2
#define Z26 F
#define Z27 D
#define Z28 A
#define Z29 E
#define Z2A 3
#define Z2B 6
#define Z2C 7
#define Z2D 1
#define Z2E 9
#define Z2F 4
#define Z30 7
#define Z31 9
#define Z32 3
#define Z33 1
#define Z34 D
#define Z35 C
#define Z36 B
#define Z37 E
#define Z38 2
#define Z39 6
#define Z3A 5
#define Z3B A
#define Z3C 4
#define Z3D 0
#define Z3E F
#define Z3F 8
#define Z40 9
#define Z41 0
#define Z42 5
#define Z43 7
#define Z44 2
#define Z45 4
#define Z46 A
#define Z47 F
#define Z48 E
#define Z49 1
#define Z4A B
#define Z4B C
#define Z4C 6
#define Z4D 8
#define Z4E 3
#define Z4F D
#define Z50 2
#define Z51 C
#define Z52 6
#define Z53 A
#define Z54 0
#define Z55 B
#define Z56 8
#define Z57 3
#define Z58 4
#define Z59 D
#define Z5A 7
#define Z5B 5
#define Z5C F
#define Z5D E
#define Z5E 1
#define Z5F 9
#define Z60 C
#define Z61 5
#define Z62 1
#define Z63 F
#define Z64 E
#define Z65 D
#define Z66 4
#define Z67 A
#define Z68 0
#define Z69 7
#define Z6A 6
#define Z6B 3
#define Z6C 9
#define Z6D 2
#define Z6E 8
#define Z6F B
#define Z70 D
#define Z71 B
#define Z72 7
#define Z73 E
#define Z74 C
#define Z75 1
#define Z76 3
#define Z77 9
#define Z78 5
#define Z79 0
#define Z7A F
#define Z7B 4
#define Z7C 8
#define Z7D 6
#define Z7E 2
#define Z7F A
#define Z80 6
#define Z81 F
#define Z82 E
#define Z83 9
#define Z84 B
#define Z85 3
#define Z86 0
#define Z87 8
#define Z88 C
#define Z89 2
#define Z8A D
#define Z8B 7
#define Z8C 1
#define Z8D 4
#define Z8E A
#define Z8F 5
#define Z90 A
#define Z91 2
#define Z92 8
#define Z93 4
#define Z94 7
#define Z95 6
#define Z96 1
#define Z97 5
#define Z98 F
#define Z99 B
#define Z9A 9
#define Z9B E
#define Z9C 3
#define Z9D C
#define Z9E D
#define Z9F 0
#define Mx(r, i) Mx_(Z ## r ## i)
#define Mx_(n) Mx__(n)
#define Mx__(n) M ## n
#if defined(SIMD512)
#define B2B8W_G(a, b, c, d, x, y) \
{ \
v[a] = _mm512_add_epi64( _mm512_add_epi64( v[a], v[b] ), x ); \
v[d] = mm512_ror_64( _mm512_xor_si512( v[d], v[a] ), 32 ); \
v[c] = _mm512_add_epi64( v[c], v[d] ); \
v[b] = mm512_ror_64( _mm512_xor_si512( v[b], v[c] ), 24 ); \
v[a] = _mm512_add_epi64( _mm512_add_epi64( v[a], v[b] ), y ); \
v[d] = mm512_ror_64( _mm512_xor_si512( v[d], v[a] ), 16 ); \
v[c] = _mm512_add_epi64( v[c], v[d] ); \
v[b] = mm512_ror_64( _mm512_xor_si512( v[b], v[c] ), 63 ); \
}
static void blake2b_8x64_compress( blake2b_8x64_ctx *ctx, int last )
{
__m512i v[16], m[16];
v[ 0] = ctx->h[0];
v[ 1] = ctx->h[1];
v[ 2] = ctx->h[2];
v[ 3] = ctx->h[3];
v[ 4] = ctx->h[4];
v[ 5] = ctx->h[5];
v[ 6] = ctx->h[6];
v[ 7] = ctx->h[7];
v[ 8] = v512_64( 0x6A09E667F3BCC908 );
v[ 9] = v512_64( 0xBB67AE8584CAA73B );
v[10] = v512_64( 0x3C6EF372FE94F82B );
v[11] = v512_64( 0xA54FF53A5F1D36F1 );
v[12] = v512_64( 0x510E527FADE682D1 );
v[13] = v512_64( 0x9B05688C2B3E6C1F );
v[14] = v512_64( 0x1F83D9ABFB41BD6B );
v[15] = v512_64( 0x5BE0CD19137E2179 );
v[12] = _mm512_xor_si512( v[12], v512_64( ctx->t[0] ) );
v[13] = _mm512_xor_si512( v[13], v512_64( ctx->t[1] ) );
if ( last )
v[14] = mm512_not( v[14] );
m[ 0] = ctx->b[ 0];
m[ 1] = ctx->b[ 1];
m[ 2] = ctx->b[ 2];
m[ 3] = ctx->b[ 3];
m[ 4] = ctx->b[ 4];
m[ 5] = ctx->b[ 5];
m[ 6] = ctx->b[ 6];
m[ 7] = ctx->b[ 7];
m[ 8] = ctx->b[ 8];
m[ 9] = ctx->b[ 9];
m[10] = ctx->b[10];
m[11] = ctx->b[11];
m[12] = ctx->b[12];
m[13] = ctx->b[13];
m[14] = ctx->b[14];
m[15] = ctx->b[15];
for ( int i = 0; i < 12; i++ )
{
B2B8W_G( 0, 4, 8, 12, m[ sigma[i][ 0] ], m[ sigma[i][ 1] ] );
B2B8W_G( 1, 5, 9, 13, m[ sigma[i][ 2] ], m[ sigma[i][ 3] ] );
B2B8W_G( 2, 6, 10, 14, m[ sigma[i][ 4] ], m[ sigma[i][ 5] ] );
B2B8W_G( 3, 7, 11, 15, m[ sigma[i][ 6] ], m[ sigma[i][ 7] ] );
B2B8W_G( 0, 5, 10, 15, m[ sigma[i][ 8] ], m[ sigma[i][ 9] ] );
B2B8W_G( 1, 6, 11, 12, m[ sigma[i][10] ], m[ sigma[i][11] ] );
B2B8W_G( 2, 7, 8, 13, m[ sigma[i][12] ], m[ sigma[i][13] ] );
B2B8W_G( 3, 4, 9, 14, m[ sigma[i][14] ], m[ sigma[i][15] ] );
}
ctx->h[0] = mm512_xor3( ctx->h[0], v[0], v[ 8] );
ctx->h[1] = mm512_xor3( ctx->h[1], v[1], v[ 9] );
ctx->h[2] = mm512_xor3( ctx->h[2], v[2], v[10] );
ctx->h[3] = mm512_xor3( ctx->h[3], v[3], v[11] );
ctx->h[4] = mm512_xor3( ctx->h[4], v[4], v[12] );
ctx->h[5] = mm512_xor3( ctx->h[5], v[5], v[13] );
ctx->h[6] = mm512_xor3( ctx->h[6], v[6], v[14] );
ctx->h[7] = mm512_xor3( ctx->h[7], v[7], v[15] );
}
int blake2b_8x64_init( blake2b_8x64_ctx *ctx )
{
size_t i;
ctx->h[0] = v512_64( 0x6A09E667F3BCC908 );
ctx->h[1] = v512_64( 0xBB67AE8584CAA73B );
ctx->h[2] = v512_64( 0x3C6EF372FE94F82B );
ctx->h[3] = v512_64( 0xA54FF53A5F1D36F1 );
ctx->h[4] = v512_64( 0x510E527FADE682D1 );
ctx->h[5] = v512_64( 0x9B05688C2B3E6C1F );
ctx->h[6] = v512_64( 0x1F83D9ABFB41BD6B );
ctx->h[7] = v512_64( 0x5BE0CD19137E2179 );
ctx->h[0] = _mm512_xor_si512( ctx->h[0], v512_64( 0x01010020 ) );
ctx->t[0] = 0;
ctx->t[1] = 0;
ctx->c = 0;
ctx->outlen = 32;
for ( i = 0; i < 16; i++ )
ctx->b[i] = m512_zero;
return 0;
}
void blake2b_8x64_update( blake2b_8x64_ctx *ctx, const void *input,
size_t inlen )
{
__m512i* in =(__m512i*)input;
size_t i, c;
c = ctx->c >> 3;
for ( i = 0; i < (inlen >> 3); i++ )
{
if ( ctx->c == 128 )
{
ctx->t[0] += ctx->c;
if ( ctx->t[0] < ctx->c )
ctx->t[1]++;
blake2b_8x64_compress( ctx, 0 );
ctx->c = 0;
}
ctx->b[ c++ ] = in[i];
ctx->c += 8;
}
}
void blake2b_8x64_final( blake2b_8x64_ctx *ctx, void *out )
{
size_t c;
c = ctx->c >> 3;
ctx->t[0] += ctx->c;
if ( ctx->t[0] < ctx->c )
ctx->t[1]++;
while ( ctx->c < 128 )
{
ctx->b[c++] = m512_zero;
ctx->c += 8;
}
blake2b_8x64_compress( ctx, 1 ); // final block flag = 1
casti_m512i( out, 0 ) = ctx->h[0];
casti_m512i( out, 1 ) = ctx->h[1];
casti_m512i( out, 2 ) = ctx->h[2];
casti_m512i( out, 3 ) = ctx->h[3];
}
#endif // AVX512
// AVX2
// G Mixing function.
#define B2B_G(a, b, c, d, x, y) \
{ \
v[a] = _mm256_add_epi64( _mm256_add_epi64( v[a], v[b] ), x ); \
v[d] = mm256_ror_64( _mm256_xor_si256( v[d], v[a] ), 32 ); \
v[c] = _mm256_add_epi64( v[c], v[d] ); \
v[b] = mm256_ror_64( _mm256_xor_si256( v[b], v[c] ), 24 ); \
v[a] = _mm256_add_epi64( _mm256_add_epi64( v[a], v[b] ), y ); \
v[d] = mm256_ror_64( _mm256_xor_si256( v[d], v[a] ), 16 ); \
v[c] = _mm256_add_epi64( v[c], v[d] ); \
v[b] = mm256_ror_64( _mm256_xor_si256( v[b], v[c] ), 63 ); \
}
// Initialization Vector.
/*
static const uint64_t blake2b_iv[8] = {
0x6A09E667F3BCC908, 0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1,
0x510E527FADE682D1, 0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179
};
*/
static void blake2b_4x64_compress( blake2b_4x64_ctx *ctx, int last )
{
__m256i v[16], m[16];
v[ 0] = ctx->h[0];
v[ 1] = ctx->h[1];
v[ 2] = ctx->h[2];
v[ 3] = ctx->h[3];
v[ 4] = ctx->h[4];
v[ 5] = ctx->h[5];
v[ 6] = ctx->h[6];
v[ 7] = ctx->h[7];
v[ 8] = v256_64( 0x6A09E667F3BCC908 );
v[ 9] = v256_64( 0xBB67AE8584CAA73B );
v[10] = v256_64( 0x3C6EF372FE94F82B );
v[11] = v256_64( 0xA54FF53A5F1D36F1 );
v[12] = v256_64( 0x510E527FADE682D1 );
v[13] = v256_64( 0x9B05688C2B3E6C1F );
v[14] = v256_64( 0x1F83D9ABFB41BD6B );
v[15] = v256_64( 0x5BE0CD19137E2179 );
v[12] = _mm256_xor_si256( v[12], v256_64( ctx->t[0] ) );
v[13] = _mm256_xor_si256( v[13], v256_64( ctx->t[1] ) );
if ( last )
v[14] = mm256_not( v[14] );
m[ 0] = ctx->b[ 0];
m[ 1] = ctx->b[ 1];
m[ 2] = ctx->b[ 2];
m[ 3] = ctx->b[ 3];
m[ 4] = ctx->b[ 4];
m[ 5] = ctx->b[ 5];
m[ 6] = ctx->b[ 6];
m[ 7] = ctx->b[ 7];
m[ 8] = ctx->b[ 8];
m[ 9] = ctx->b[ 9];
m[10] = ctx->b[10];
m[11] = ctx->b[11];
m[12] = ctx->b[12];
m[13] = ctx->b[13];
m[14] = ctx->b[14];
m[15] = ctx->b[15];
for ( int i = 0; i < 12; i++ )
{
B2B_G( 0, 4, 8, 12, m[ sigma[i][ 0] ], m[ sigma[i][ 1] ] );
B2B_G( 1, 5, 9, 13, m[ sigma[i][ 2] ], m[ sigma[i][ 3] ] );
B2B_G( 2, 6, 10, 14, m[ sigma[i][ 4] ], m[ sigma[i][ 5] ] );
B2B_G( 3, 7, 11, 15, m[ sigma[i][ 6] ], m[ sigma[i][ 7] ] );
B2B_G( 0, 5, 10, 15, m[ sigma[i][ 8] ], m[ sigma[i][ 9] ] );
B2B_G( 1, 6, 11, 12, m[ sigma[i][10] ], m[ sigma[i][11] ] );
B2B_G( 2, 7, 8, 13, m[ sigma[i][12] ], m[ sigma[i][13] ] );
B2B_G( 3, 4, 9, 14, m[ sigma[i][14] ], m[ sigma[i][15] ] );
}
ctx->h[0] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[0], v[0] ), v[ 8] );
ctx->h[1] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[1], v[1] ), v[ 9] );
ctx->h[2] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[2], v[2] ), v[10] );
ctx->h[3] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[3], v[3] ), v[11] );
ctx->h[4] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[4], v[4] ), v[12] );
ctx->h[5] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[5], v[5] ), v[13] );
ctx->h[6] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[6], v[6] ), v[14] );
ctx->h[7] = _mm256_xor_si256( _mm256_xor_si256( ctx->h[7], v[7] ), v[15] );
}
int blake2b_4x64_init( blake2b_4x64_ctx *ctx )
{
size_t i;
ctx->h[0] = v256_64( 0x6A09E667F3BCC908 );
ctx->h[1] = v256_64( 0xBB67AE8584CAA73B );
ctx->h[2] = v256_64( 0x3C6EF372FE94F82B );
ctx->h[3] = v256_64( 0xA54FF53A5F1D36F1 );
ctx->h[4] = v256_64( 0x510E527FADE682D1 );
ctx->h[5] = v256_64( 0x9B05688C2B3E6C1F );
ctx->h[6] = v256_64( 0x1F83D9ABFB41BD6B );
ctx->h[7] = v256_64( 0x5BE0CD19137E2179 );
ctx->h[0] = _mm256_xor_si256( ctx->h[0], v256_64( 0x01010020 ) );
ctx->t[0] = 0;
ctx->t[1] = 0;
ctx->c = 0;
ctx->outlen = 32;
for ( i = 0; i < 16; i++ )
ctx->b[i] = m256_zero;
return 0;
}
void blake2b_4x64_update( blake2b_4x64_ctx *ctx, const void *input,
size_t inlen )
{
__m256i* in =(__m256i*)input;
size_t i, c;
c = ctx->c >> 3;
for ( i = 0; i < (inlen >> 3); i++ )
{
if ( ctx->c == 128 )
{
ctx->t[0] += ctx->c;
if ( ctx->t[0] < ctx->c )
ctx->t[1]++;
blake2b_4x64_compress( ctx, 0 );
ctx->c = 0;
}
ctx->b[ c++ ] = in[i];
ctx->c += 8;
}
}
void blake2b_4x64_final( blake2b_4x64_ctx *ctx, void *out )
{
size_t c;
c = ctx->c >> 3;
ctx->t[0] += ctx->c;
if ( ctx->t[0] < ctx->c )
ctx->t[1]++;
while ( ctx->c < 128 )
{
ctx->b[c++] = m256_zero;
ctx->c += 8;
}
blake2b_4x64_compress( ctx, 1 ); // final block flag = 1
casti_m256i( out, 0 ) = ctx->h[0];
casti_m256i( out, 1 ) = ctx->h[1];
casti_m256i( out, 2 ) = ctx->h[2];
casti_m256i( out, 3 ) = ctx->h[3];
}
#endif // AVX2

63
algo/blake/blake2b-hash.h Normal file
View File

@@ -0,0 +1,63 @@
#pragma once
#ifndef BLAKE2B_HASH_4WAY_H__
#define BLAKE2B_HASH_4WAY_H__
#include "simd-utils.h"
#include <stddef.h>
#include <stdint.h>
#if defined(_MSC_VER)
#include <inttypes.h>
#define inline __inline
#define ALIGN(x) __declspec(align(x))
#else
#define ALIGN(x) __attribute__((aligned(x)))
#endif
#if defined(SIMD512)
typedef struct ALIGN( 64 ) {
__m512i b[16]; // input buffer
__m512i h[8]; // chained state
uint64_t t[2]; // total number of bytes
size_t c; // pointer for b[]
size_t outlen; // digest size
} blake2b_8x64_ctx;
int blake2b_8x64_init( blake2b_8x64_ctx *ctx );
void blake2b_8x64_update( blake2b_8x64_ctx *ctx, const void *input,
size_t inlen );
void blake2b_8x64_final( blake2b_8x64_ctx *ctx, void *out );
#define blake2b_8way_ctx blake2b_8x64_ctx
#define blake2b_8way_init blake2b_8x64_init
#define blake2b_8way_update blake2b_8x64_update
#define blake2b_8way_final blake2b_8x64_final
#endif
#if defined(__AVX2__)
// state context
typedef struct ALIGN( 64 ) {
__m256i b[16]; // input buffer
__m256i h[8]; // chained state
uint64_t t[2]; // total number of bytes
size_t c; // pointer for b[]
size_t outlen; // digest size
} blake2b_4x64_ctx;
int blake2b_4x64_init( blake2b_4x64_ctx *ctx );
void blake2b_4x64_update( blake2b_4x64_ctx *ctx, const void *input,
size_t inlen );
void blake2b_4x64_final( blake2b_4x64_ctx *ctx, void *out );
#define blake2b_4way_ctx blake2b_4x64_ctx
#define blake2b_4way_init blake2b_4x64_init
#define blake2b_4way_update blake2b_4x64_update
#define blake2b_4way_final blake2b_4x64_final
#endif
#endif

View File

@@ -1,231 +1,175 @@
/**
* Blake2-B Implementation
* tpruvot@github 2015-2016
*/
#include "algo-gate-api.h"
#include <string.h>
#include <stdint.h>
#include "algo/blake/sph_blake2b.h"
#include "blake2b-hash.h"
//static __thread sph_blake2b_ctx s_midstate;
//static __thread sph_blake2b_ctx s_ctx;
#define MIDLEN 76
#define A 64
#if defined(SIMD512)
#define BLAKE2B_8WAY
#elif defined(__AVX2__)
#define BLAKE2B_4WAY
#endif
#if defined(BLAKE2B_8WAY)
int scanhash_blake2b_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*8] __attribute__ ((aligned (128)));;
uint32_t vdata[20*8] __attribute__ ((aligned (64)));;
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
blake2b_8way_ctx ctx __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[49]); // 3*16+1
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id;
__m512i *noncev = (__m512i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
mm512_bswap32_intrlv80_8x64( vdata, pdata );
do {
*noncev = mm512_intrlv_blend_32( mm512_bswap_32(
_mm512_set_epi32( n+7, 0, n+6, 0, n+5, 0, n+4, 0,
n+3, 0, n+2, 0, n+1, 0, n , 0 ) ), *noncev );
blake2b_8way_init( &ctx );
blake2b_8way_update( &ctx, vdata, 80 );
blake2b_8way_final( &ctx, hash );
for ( int lane = 0; lane < 8; lane++ )
if ( hash7[ lane<<1 ] <= Htarg )
{
extr_lane_8x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 8;
} while ( (n < max_nonce-8) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(BLAKE2B_4WAY)
// Function not used, code inlined.
void blake2b_4way_hash(void *output, const void *input)
{
blake2b_4way_ctx ctx;
blake2b_4way_init( &ctx );
blake2b_4way_update( &ctx, input, 80 );
blake2b_4way_final( &ctx, output );
}
int scanhash_blake2b_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash[8*4] __attribute__ ((aligned (64)));;
uint32_t vdata[20*4] __attribute__ ((aligned (32)));;
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
blake2b_4way_ctx ctx __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[25]); // 3*8+1
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id;
__m256i *noncev = (__m256i*)vdata + 9; // aligned
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
mm256_bswap32_intrlv80_4x64( vdata, pdata );
do {
*noncev = mm256_intrlv_blend_32( mm256_bswap_32(
_mm256_set_epi32( n+3, 0, n+2, 0, n+1, 0, n, 0 ) ), *noncev );
blake2b_4way_init( &ctx );
blake2b_4way_update( &ctx, vdata, 80 );
blake2b_4way_final( &ctx, hash );
for ( int lane = 0; lane < 4; lane++ )
if ( hash7[ lane<<1 ] <= Htarg )
{
extr_lane_4x64( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 4;
} while ( (n < max_nonce-4) && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
return 0;
}
#else
#include "algo/blake/sph_blake2b.h"
void blake2b_hash(void *output, const void *input)
{
uint8_t _ALIGN(A) hash[32];
sph_blake2b_ctx ctx __attribute__ ((aligned (64)));
uint8_t _ALIGN(32) hash[32];
sph_blake2b_ctx ctx __attribute__ ((aligned (32)));
sph_blake2b_init(&ctx, 32, NULL, 0);
sph_blake2b_update(&ctx, input, 80);
sph_blake2b_final(&ctx, hash);
sph_blake2b_init(&ctx, 32, NULL, 0);
sph_blake2b_update(&ctx, input, 80);
sph_blake2b_final(&ctx, hash);
memcpy(output, hash, 32);
memcpy(output, hash, 32);
}
/*
static void blake2b_hash_end(uint32_t *output, const uint32_t *input)
{
s_ctx.outlen = MIDLEN;
memcpy(&s_ctx, &s_midstate, 32 + 16 + MIDLEN);
sph_blake2b_update(&s_ctx, (uint8_t*) &input[MIDLEN/4], 80 - MIDLEN);
sph_blake2b_final(&s_ctx, (uint8_t*) output);
}
*/
int scanhash_blake2b( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(A) vhashcpu[8];
uint32_t _ALIGN(A) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
uint32_t _ALIGN(32) hash64[8];
uint32_t _ALIGN(32) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id;
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[8];
v128_bswap32_80( endiandata, pdata );
uint32_t n = first_nonce;
do {
endiandata[19] = n;
blake2b_hash( hash64, endiandata );
if ( unlikely( valid_hash( hash64, ptarget ) ) && !opt_benchmark )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash64, mythr );
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
for (int i=0; i < 19; i++) {
be32enc(&endiandata[i], pdata[i]);
}
// midstate (untested yet)
//blake2b_init(&s_midstate, 32, NULL, 0);
//blake2b_update(&s_midstate, (uint8_t*) endiandata, MIDLEN);
//memcpy(&s_ctx, &s_midstate, sizeof(blake2b_ctx));
do {
be32enc(&endiandata[8], n);
//blake2b_hash_end(vhashcpu, endiandata);
blake2b_hash(vhashcpu, endiandata);
if (vhashcpu[7] < Htarg && fulltest(vhashcpu, ptarget)) {
work_set_target_ratio(work, vhashcpu);
*hashes_done = n - first_nonce + 1;
pdata[8] = n;
return 1;
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[8] = n;
return 0;
return 0;
}
static inline void swab256(void *dest_p, const void *src_p)
{
uint32_t *dest = (uint32_t *)dest_p;
const uint32_t *src = (uint32_t *)src_p;
dest[0] = swab32(src[7]);
dest[1] = swab32(src[6]);
dest[2] = swab32(src[5]);
dest[3] = swab32(src[4]);
dest[4] = swab32(src[3]);
dest[5] = swab32(src[2]);
dest[6] = swab32(src[1]);
dest[7] = swab32(src[0]);
}
/* compute nbits to get the network diff */
void blake2b_calc_network_diff(struct work *work)
{
// sample for diff 43.281 : 1c05ea29
uint32_t nbits = work->data[11]; // unsure if correct
uint32_t bits = (nbits & 0xffffff);
int16_t shift = (swab32(nbits) & 0xff); // 0x1c = 28
double d = (double)0x0000ffff / (double)bits;
for (int m=shift; m < 29; m++) d *= 256.0;
for (int m=29; m < shift; m++) d /= 256.0;
if (opt_debug_diff)
applog(LOG_DEBUG, "net diff: %f -> shift %u, bits %08x", d, shift, bits);
net_diff = d;
}
void blake2b_be_build_stratum_request( char *req, struct work *work )
{
unsigned char *xnonce2str;
uint32_t ntime, nonce;
char ntimestr[9], noncestr[9];
be32enc( &ntime, work->data[ algo_gate.ntime_index ] );
be32enc( &nonce, work->data[ algo_gate.nonce_index ] );
bin2hex( ntimestr, (char*)(&ntime), sizeof(uint32_t) );
bin2hex( noncestr, (char*)(&nonce), sizeof(uint32_t) );
uint16_t high_nonce = swab32(work->data[9]) >> 16;
xnonce2str = abin2hex((unsigned char*)(&high_nonce), 2);
snprintf( req, JSON_BUF_LEN,
"{\"method\": \"mining.submit\", \"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\":4}",
rpc_user, work->job_id, xnonce2str, ntimestr, noncestr );
free( xnonce2str );
}
#define min(a,b) (a>b ? (b) :(a))
// merkle root handled here, no need for gen_merkle_root gate target
void blake2b_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
uchar merkle_root[64] = { 0 };
uint32_t extraheader[32] = { 0 };
int headersize = 0;
size_t t;
int i;
// merkle root
memcpy( merkle_root, sctx->job.coinbase, 32 );
headersize = min( (int)sctx->job.coinbase_size - 32, sizeof(extraheader) );
memcpy( extraheader, &sctx->job.coinbase[32], headersize );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
memset( g_work->data, 0, sizeof(g_work->data) );
// g_work->data[0] = le32dec( sctx->job.version );
// for ( i = 0; i < 8; i++ )
// g_work->data[1 + i] = le32dec( (uint32_t *) sctx->job.prevhash + i );
for ( i = 0; i < 8; i++ )
g_work->data[i] = ((uint32_t*)sctx->job.prevhash)[7-i];
// for ( i = 0; i < 8; i++ )
// g_work->data[9 + i] = be32dec( (uint32_t *) merkle_root + i );
g_work->data[8] = 0; // nonce
g_work->data[9] = swab32( extraheader[0] ) | ( rand() & 0xf0 );
g_work->data[10] = be32dec( sctx->job.ntime );
g_work->data[11] = be32dec( sctx->job.nbits );
for ( i = 0; i < 8; i++ )
g_work->data[12+i] = ( (uint32_t*)merkle_root )[i];
}
#undef min
void blake2b_get_new_work( struct work* work, struct work* g_work, int thr_id,
uint32_t* end_nonce_ptr, bool clean_job )
{
const int wkcmp_sz = 32; // bytes
const int wkcmp_off = 32 + 16;
uint32_t *nonceptr = algo_gate.get_nonceptr( work->data );
if ( memcmp( &work->data[ wkcmp_off ], &g_work->data[ wkcmp_off ], wkcmp_sz )
&& ( clean_job || ( *nonceptr >= *end_nonce_ptr )
|| strcmp( work->job_id, g_work->job_id ) ) )
{
work_free( work );
work_copy( work, g_work );
*nonceptr = ( 0xffffffffU / opt_n_threads ) * thr_id;
if ( opt_randomize )
*nonceptr += ( (rand() *4 ) & UINT32_MAX ) / opt_n_threads;
*end_nonce_ptr = ( 0xffffffffU / opt_n_threads ) * (thr_id+1) - 0x20;
}
else
++(*nonceptr);
// suprnova job_id check without data/target/height change...
// we just may have copied new g_wwork to work so why this test here?
// if ( have_stratum && strcmp( work->job_id, g_work->job_id ) )
// exit thread loop
// continue;
// else
// {
// nonceptr[1] += 0x10;
// nonceptr[1] |= thr_id;
// }
}
bool blake2b_ready_to_mine( struct work* work, struct stratum_ctx* stratum,
int thr_id )
{
if ( have_stratum && strcmp( stratum->job.job_id, work->job_id ) )
// need to regen g_work..
return false;
// extradata: prevent duplicates
work->data[ 8 ] += 0x10;
work->data[ 8 + 1 ] |= thr_id;
return true;
}
double blake2b_get_max64() { return 0x1fffffLL; }
#endif
bool register_blake2b_algo( algo_gate_t* gate )
{
algo_not_tested();
gate->ntime_index = 10;
gate->nbits_index = 11;
gate->nonce_index = 8;
gate->work_cmp_size = 32;
gate->scanhash = (void*)&scanhash_blake2b;
gate->hash = (void*)&blake2b_hash;
gate->calc_network_diff = (void*)&blake2b_calc_network_diff;
gate->build_stratum_request = (void*)&blake2b_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
gate->build_extraheader = (void*)&blake2b_build_extraheader;
gate->get_new_work = (void*)&blake2b_get_new_work;
gate->get_max64 = (void*)&blake2b_get_max64;
gate->ready_to_mine = (void*)&blake2b_ready_to_mine;
have_gbt = false;
#if defined(BLAKE2B_8WAY)
gate->scanhash = (void*)&scanhash_blake2b_8way;
#elif defined(BLAKE2B_4WAY)
gate->scanhash = (void*)&scanhash_blake2b_4way;
gate->hash = (void*)&blake2b_4way_hash;
#else
gate->scanhash = (void*)&scanhash_blake2b;
gate->hash = (void*)&blake2b_hash;
#endif
gate->optimizations = AVX2_OPT | AVX512_OPT;
return true;
}
};

View File

@@ -1,119 +0,0 @@
#include "blake2s-gate.h"
#include "blake2s-hash-4way.h"
#include <string.h>
#include <stdint.h>
#if defined(BLAKE2S_8WAY)
static __thread blake2s_8way_state blake2s_8w_ctx;
void blake2s_8way_hash( void *output, const void *input )
{
uint32_t vhash[8*8] __attribute__ ((aligned (64)));
blake2s_8way_state ctx;
memcpy( &ctx, &blake2s_8w_ctx, sizeof ctx );
blake2s_8way_update( &ctx, input + (64<<3), 16 );
blake2s_8way_final( &ctx, vhash, BLAKE2S_OUTBYTES );
dintrlv_8x32( output, output+ 32, output+ 64, output+ 96,
output+128, output+160, output+192, output+224,
vhash, 256 );
}
int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake2s_8way_init( &blake2s_8w_ctx, BLAKE2S_OUTBYTES );
blake2s_8way_update( &blake2s_8w_ctx, vdata, 64 );
do {
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
pdata[19] = n;
blake2s_8way_hash( hash, vdata );
for ( int i = 0; i < 8; i++ )
if ( (hash+(i<<3))[7] <= Htarg )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 8;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(BLAKE2S_4WAY)
static __thread blake2s_4way_state blake2s_4w_ctx;
void blake2s_4way_hash( void *output, const void *input )
{
uint32_t vhash[8*4] __attribute__ ((aligned (64)));
blake2s_4way_state ctx;
memcpy( &ctx, &blake2s_4w_ctx, sizeof ctx );
blake2s_4way_update( &ctx, input + (64<<2), 16 );
blake2s_4way_final( &ctx, vhash, BLAKE2S_OUTBYTES );
dintrlv_4x32( output, output+32, output+64, output+96,
vhash, 256 );
}
int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m128i *noncev = (__m128i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
mm128_bswap32_intrlv80_4x32( vdata, pdata );
blake2s_4way_init( &blake2s_4w_ctx, BLAKE2S_OUTBYTES );
blake2s_4way_update( &blake2s_4w_ctx, vdata, 64 );
do {
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
pdata[19] = n;
blake2s_4way_hash( hash, vdata );
for ( int i = 0; i < 4; i++ )
if ( (hash+(i<<3))[7] <= Htarg )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

View File

@@ -1,27 +0,0 @@
#include "blake2s-gate.h"
// changed to get_max64_0x3fffffLL in cpuminer-multi-decred
int64_t blake2s_get_max64 ()
{
return 0x7ffffLL;
}
bool register_blake2s_algo( algo_gate_t* gate )
{
#if defined(BLAKE2S_8WAY)
gate->scanhash = (void*)&scanhash_blake2s_8way;
gate->hash = (void*)&blake2s_8way_hash;
#elif defined(BLAKE2S_4WAY)
gate->scanhash = (void*)&scanhash_blake2s_4way;
gate->hash = (void*)&blake2s_4way_hash;
#else
gate->scanhash = (void*)&scanhash_blake2s;
gate->hash = (void*)&blake2s_hash;
#endif
gate->get_max64 = (void*)&blake2s_get_max64;
gate->optimizations = SSE42_OPT | AVX2_OPT;
return true;
};

View File

@@ -1,35 +0,0 @@
#ifndef __BLAKE2S_GATE_H__
#define __BLAKE2S_GATE_H__ 1
#include <stdint.h>
#include "algo-gate-api.h"
#if defined(__SSE4_2__)
#define BLAKE2S_4WAY
#endif
#if defined(__AVX2__)
#define BLAKE2S_8WAY
#endif
bool register_blake2s_algo( algo_gate_t* gate );
#if defined(BLAKE2S_8WAY)
void blake2s_8way_hash( void *state, const void *input );
int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined (BLAKE2S_4WAY)
void blake2s_4way_hash( void *state, const void *input );
int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#else
void blake2s_hash( void *state, const void *input );
int scanhash_blake2s( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#endif

View File

@@ -1,362 +0,0 @@
/**
* BLAKE2 reference source code package - reference C implementations
*
* Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
*
* To the extent possible under law, the author(s) have dedicated all copyright
* and related and neighboring rights to this software to the public domain
* worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along with
* this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include "blake2s-hash-4way.h"
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#if defined(__SSE4_2__)
static const uint32_t blake2s_IV[8] =
{
0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL
};
static const uint8_t blake2s_sigma[10][16] =
{
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } ,
};
// define a constant for initial param.
int blake2s_4way_init( blake2s_4way_state *S, const uint8_t outlen )
{
blake2s_nway_param P[1];
P->digest_length = outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
*((uint64_t*)(P->node_offset)) = 0;
P->node_depth = 0;
P->inner_length = 0;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
memset( S, 0, sizeof( blake2s_4way_state ) );
for( int i = 0; i < 8; ++i )
S->h[i] = _mm_set1_epi32( blake2s_IV[i] );
uint32_t *p = ( uint32_t * )( P );
/* IV XOR ParamBlock */
for ( size_t i = 0; i < 8; ++i )
S->h[i] = _mm_xor_si128( S->h[i], _mm_set1_epi32( p[i] ) );
return 0;
}
int blake2s_4way_compress( blake2s_4way_state *S, const __m128i* block )
{
__m128i m[16];
__m128i v[16];
memcpy_128( m, block, 16 );
memcpy_128( v, S->h, 8 );
v[ 8] = _mm_set1_epi32( blake2s_IV[0] );
v[ 9] = _mm_set1_epi32( blake2s_IV[1] );
v[10] = _mm_set1_epi32( blake2s_IV[2] );
v[11] = _mm_set1_epi32( blake2s_IV[3] );
v[12] = _mm_xor_si128( _mm_set1_epi32( S->t[0] ),
_mm_set1_epi32( blake2s_IV[4] ) );
v[13] = _mm_xor_si128( _mm_set1_epi32( S->t[1] ),
_mm_set1_epi32( blake2s_IV[5] ) );
v[14] = _mm_xor_si128( _mm_set1_epi32( S->f[0] ),
_mm_set1_epi32( blake2s_IV[6] ) );
v[15] = _mm_xor_si128( _mm_set1_epi32( S->f[1] ),
_mm_set1_epi32( blake2s_IV[7] ) );
#define G4W(r,i,a,b,c,d) \
do { \
a = _mm_add_epi32( _mm_add_epi32( a, b ), m[ blake2s_sigma[r][2*i+0] ] ); \
d = mm128_ror_32( _mm_xor_si128( d, a ), 16 ); \
c = _mm_add_epi32( c, d ); \
b = mm128_ror_32( _mm_xor_si128( b, c ), 12 ); \
a = _mm_add_epi32( _mm_add_epi32( a, b ), m[ blake2s_sigma[r][2*i+1] ] ); \
d = mm128_ror_32( _mm_xor_si128( d, a ), 8 ); \
c = _mm_add_epi32( c, d ); \
b = mm128_ror_32( _mm_xor_si128( b, c ), 7 ); \
} while(0)
#define ROUND4W(r) \
do { \
G4W( r, 0, v[ 0], v[ 4], v[ 8], v[12] ); \
G4W( r, 1, v[ 1], v[ 5], v[ 9], v[13] ); \
G4W( r, 2, v[ 2], v[ 6], v[10], v[14] ); \
G4W( r, 3, v[ 3], v[ 7], v[11], v[15] ); \
G4W( r, 4, v[ 0], v[ 5], v[10], v[15] ); \
G4W( r, 5, v[ 1], v[ 6], v[11], v[12] ); \
G4W( r, 6, v[ 2], v[ 7], v[ 8], v[13] ); \
G4W( r, 7, v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
ROUND4W( 0 );
ROUND4W( 1 );
ROUND4W( 2 );
ROUND4W( 3 );
ROUND4W( 4 );
ROUND4W( 5 );
ROUND4W( 6 );
ROUND4W( 7 );
ROUND4W( 8 );
ROUND4W( 9 );
for( size_t i = 0; i < 8; ++i )
S->h[i] = _mm_xor_si128( _mm_xor_si128( S->h[i], v[i] ), v[i + 8] );
#undef G4W
#undef ROUND4W
return 0;
}
int blake2s_4way_update( blake2s_4way_state *S, const void *in,
uint64_t inlen )
{
__m128i *input = (__m128i*)in;
__m128i *buf = (__m128i*)S->buf;
const int bsize = BLAKE2S_BLOCKBYTES;
while( inlen > 0 )
{
size_t left = S->buflen;
if( inlen >= bsize - left )
{
memcpy_128( buf + (left>>2), input, (bsize - left) >> 2 );
S->buflen += bsize - left;
S->t[0] += BLAKE2S_BLOCKBYTES;
S->t[1] += ( S->t[0] < BLAKE2S_BLOCKBYTES );
blake2s_4way_compress( S, buf );
S->buflen = 0;
input += ( bsize >> 2 );
inlen -= bsize;
}
else
{
memcpy_128( buf + ( left>>2 ), input, inlen>>2 );
S->buflen += (size_t) inlen;
input += ( inlen>>2 );
inlen -= inlen;
}
}
return 0;
}
int blake2s_4way_final( blake2s_4way_state *S, void *out, uint8_t outlen )
{
__m128i *buf = (__m128i*)S->buf;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node )
S->f[1] = ~0U;
S->f[0] = ~0U;
memset_zero_128( buf + ( S->buflen>>2 ),
( BLAKE2S_BLOCKBYTES - S->buflen ) >> 2 );
blake2s_4way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_m128i( out, i ) = S->h[ i ];
return 0;
}
#if defined(__AVX2__)
int blake2s_8way_compress( blake2s_8way_state *S, const __m256i *block )
{
__m256i m[16];
__m256i v[16];
memcpy_256( m, block, 16 );
memcpy_256( v, S->h, 8 );
v[ 8] = _mm256_set1_epi32( blake2s_IV[0] );
v[ 9] = _mm256_set1_epi32( blake2s_IV[1] );
v[10] = _mm256_set1_epi32( blake2s_IV[2] );
v[11] = _mm256_set1_epi32( blake2s_IV[3] );
v[12] = _mm256_xor_si256( _mm256_set1_epi32( S->t[0] ),
_mm256_set1_epi32( blake2s_IV[4] ) );
v[13] = _mm256_xor_si256( _mm256_set1_epi32( S->t[1] ),
_mm256_set1_epi32( blake2s_IV[5] ) );
v[14] = _mm256_xor_si256( _mm256_set1_epi32( S->f[0] ),
_mm256_set1_epi32( blake2s_IV[6] ) );
v[15] = _mm256_xor_si256( _mm256_set1_epi32( S->f[1] ),
_mm256_set1_epi32( blake2s_IV[7] ) );
#define G8W(r,i,a,b,c,d) \
do { \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), \
m[ blake2s_sigma[r][2*i+0] ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), \
m[ blake2s_sigma[r][2*i+1] ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \
} while(0)
#define ROUND8W(r) \
do { \
G8W( r, 0, v[ 0], v[ 4], v[ 8], v[12] ); \
G8W( r, 1, v[ 1], v[ 5], v[ 9], v[13] ); \
G8W( r, 2, v[ 2], v[ 6], v[10], v[14] ); \
G8W( r, 3, v[ 3], v[ 7], v[11], v[15] ); \
G8W( r, 4, v[ 0], v[ 5], v[10], v[15] ); \
G8W( r, 5, v[ 1], v[ 6], v[11], v[12] ); \
G8W( r, 6, v[ 2], v[ 7], v[ 8], v[13] ); \
G8W( r, 7, v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
ROUND8W( 0 );
ROUND8W( 1 );
ROUND8W( 2 );
ROUND8W( 3 );
ROUND8W( 4 );
ROUND8W( 5 );
ROUND8W( 6 );
ROUND8W( 7 );
ROUND8W( 8 );
ROUND8W( 9 );
for( size_t i = 0; i < 8; ++i )
S->h[i] = _mm256_xor_si256( _mm256_xor_si256( S->h[i], v[i] ), v[i + 8] );
#undef G8W
#undef ROUND8W
return 0;
}
int blake2s_8way_init( blake2s_8way_state *S, const uint8_t outlen )
{
blake2s_nway_param P[1];
P->digest_length = outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
*((uint64_t*)(P->node_offset)) = 0;
P->node_depth = 0;
P->inner_length = 0;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
memset( S, 0, sizeof( blake2s_8way_state ) );
for( int i = 0; i < 8; ++i )
S->h[i] = _mm256_set1_epi32( blake2s_IV[i] );
uint32_t *p = ( uint32_t * )( P );
/* IV XOR ParamBlock */
for ( size_t i = 0; i < 8; ++i )
S->h[i] = _mm256_xor_si256( S->h[i], _mm256_set1_epi32( p[i] ) );
return 0;
}
int blake2s_8way_update( blake2s_8way_state *S, const void *in,
uint64_t inlen )
{
__m256i *input = (__m256i*)in;
__m256i *buf = (__m256i*)S->buf;
const int bsize = BLAKE2S_BLOCKBYTES;
while( inlen > 0 )
{
size_t left = S->buflen;
if( inlen >= bsize - left )
{
memcpy_256( buf + (left>>2), input, (bsize - left) >> 2 );
S->buflen += bsize - left;
S->t[0] += BLAKE2S_BLOCKBYTES;
S->t[1] += ( S->t[0] < BLAKE2S_BLOCKBYTES );
blake2s_8way_compress( S, buf );
S->buflen = 0;
input += ( bsize >> 2 );
inlen -= bsize;
}
else
{
memcpy_256( buf + ( left>>2 ), input, inlen>>2 );
S->buflen += (size_t) inlen;
input += ( inlen>>2 );
inlen -= inlen;
}
}
return 0;
}
int blake2s_8way_final( blake2s_8way_state *S, void *out, uint8_t outlen )
{
__m256i *buf = (__m256i*)S->buf;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node )
S->f[1] = ~0U;
S->f[0] = ~0U;
memset_zero_256( buf + ( S->buflen>>2 ),
( BLAKE2S_BLOCKBYTES - S->buflen ) >> 2 );
blake2s_8way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_m256i( out, i ) = S->h[ i ];
return 0;
}
#endif // __AVX2__
#if 0
int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen )
{
blake2s_state S[1];
/* Verify parameters */
if ( NULL == in ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key ) keylen = 0; /* Fail here instead if keylen != 0 and key == NULL? */
if( keylen > 0 )
{
if( blake2s_init_key( S, outlen, key, keylen ) < 0 ) return -1;
}
else
{
if( blake2s_init( S, outlen ) < 0 ) return -1;
}
blake2s_update( S, ( uint8_t * )in, inlen );
blake2s_final( S, out, outlen );
return 0;
}
#endif
#endif // __SSE4_2__

689
algo/blake/blake2s-hash.c Normal file
View File

@@ -0,0 +1,689 @@
/**
* BLAKE2 reference source code package - reference C implementations
*
* Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
*
* To the extent possible under law, the author(s) have dedicated all copyright
* and related and neighboring rights to this software to the public domain
* worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along with
* this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include "blake2s-hash.h"
#include "simd-utils.h"
#include <stdint.h>
#include <string.h>
#include <stdio.h>
//#if defined(__SSE4_2__)
#if defined(__SSE2__) || defined(__ARM_NEON)
/*
static const uint32_t blake2s_IV[8] =
{
0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL
};
*/
static const uint8_t blake2s_sigma[10][16] =
{
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } ,
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } ,
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } ,
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } ,
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } ,
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } ,
};
// define a constant for initial param.
int blake2s_4way_init( blake2s_4way_state *S, const uint8_t outlen )
{
blake2s_nway_param P[1];
P->digest_length = outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
*((uint64_t*)(P->node_offset)) = 0;
P->node_depth = 0;
P->inner_length = 0;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
memset( S, 0, sizeof( blake2s_4way_state ) );
S->h[0] = v128_64( 0x6A09E6676A09E667ULL );
S->h[1] = v128_64( 0xBB67AE85BB67AE85ULL );
S->h[2] = v128_64( 0x3C6EF3723C6EF372ULL );
S->h[3] = v128_64( 0xA54FF53AA54FF53AULL );
S->h[4] = v128_64( 0x510E527F510E527FULL );
S->h[5] = v128_64( 0x9B05688C9B05688CULL );
S->h[6] = v128_64( 0x1F83D9AB1F83D9ABULL );
S->h[7] = v128_64( 0x5BE0CD195BE0CD19ULL );
// for( int i = 0; i < 8; ++i )
// S->h[i] = v128_32( blake2s_IV[i] );
uint32_t *p = ( uint32_t * )( P );
/* IV XOR ParamBlock */
for ( size_t i = 0; i < 8; ++i )
S->h[i] = v128_xor( S->h[i], v128_32( p[i] ) );
return 0;
}
int blake2s_4way_compress( blake2s_4way_state *S, const v128_t* block )
{
v128_t m[16];
v128_t v[16];
v128_memcpy( m, block, 16 );
v128_memcpy( v, S->h, 8 );
v[ 8] = v128_64( 0x6A09E6676A09E667ULL );
v[ 9] = v128_64( 0xBB67AE85BB67AE85ULL );
v[10] = v128_64( 0x3C6EF3723C6EF372ULL );
v[11] = v128_64( 0xA54FF53AA54FF53AULL );
v[12] = v128_xor( v128_32( S->t[0] ),
v128_64( 0x510E527F510E527FULL ) );
v[13] = v128_xor( v128_32( S->t[1] ),
v128_64( 0x9B05688C9B05688CULL ) );
v[14] = v128_xor( v128_32( S->f[0] ),
v128_64( 0x1F83D9AB1F83D9ABULL ) );
v[15] = v128_xor( v128_32( S->f[1] ),
v128_64( 0x5BE0CD195BE0CD19ULL ) );
#define G4W( sigma0, sigma1, a, b, c, d ) \
do { \
uint8_t s0 = sigma0; \
uint8_t s1 = sigma1; \
a = v128_add32( v128_add32( a, b ), m[ s0 ] ); \
d = v128_ror32( v128_xor( d, a ), 16 ); \
c = v128_add32( c, d ); \
b = v128_ror32( v128_xor( b, c ), 12 ); \
a = v128_add32( v128_add32( a, b ), m[ s1 ] ); \
d = v128_ror32( v128_xor( d, a ), 8 ); \
c = v128_add32( c, d ); \
b = v128_ror32( v128_xor( b, c ), 7 ); \
} while(0)
#define ROUND4W(r) \
do { \
uint8_t *sigma = (uint8_t*)&blake2s_sigma[r]; \
G4W( sigma[ 0], sigma[ 1], v[ 0], v[ 4], v[ 8], v[12] ); \
G4W( sigma[ 2], sigma[ 3], v[ 1], v[ 5], v[ 9], v[13] ); \
G4W( sigma[ 4], sigma[ 5], v[ 2], v[ 6], v[10], v[14] ); \
G4W( sigma[ 6], sigma[ 7], v[ 3], v[ 7], v[11], v[15] ); \
G4W( sigma[ 8], sigma[ 9], v[ 0], v[ 5], v[10], v[15] ); \
G4W( sigma[10], sigma[11], v[ 1], v[ 6], v[11], v[12] ); \
G4W( sigma[12], sigma[13], v[ 2], v[ 7], v[ 8], v[13] ); \
G4W( sigma[14], sigma[15], v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
ROUND4W( 0 );
ROUND4W( 1 );
ROUND4W( 2 );
ROUND4W( 3 );
ROUND4W( 4 );
ROUND4W( 5 );
ROUND4W( 6 );
ROUND4W( 7 );
ROUND4W( 8 );
ROUND4W( 9 );
for( size_t i = 0; i < 8; ++i )
S->h[i] = v128_xor( v128_xor( S->h[i], v[i] ), v[i + 8] );
#undef G4W
#undef ROUND4W
return 0;
}
// There is a problem that can't be resolved internally.
// If the last block is a full 64 bytes it should not be compressed in
// update but left for final. However, when streaming, it isn't known
// which block is last. There may be a subsequent call to update to add
// more data.
//
// The reference code handled this by juggling 2 blocks at a time at
// a significant performance penalty.
//
// Instead a new function is introduced called full_blocks which combines
// update and final and is to be used in non-streaming mode where the data
// is a multiple of 64 bytes.
//
// Supported:
// 64 + 16 bytes (blake2s with midstate optimization)
// 80 bytes (blake2s without midstate optimization)
// Any multiple of 64 bytes in one shot (x25x)
//
// Unsupported:
// Stream of full 64 byte blocks one at a time.
// use only when streaming more data or final block not full.
int blake2s_4way_update( blake2s_4way_state *S, const void *in,
uint64_t inlen )
{
v128_t *input = (v128_t*)in;
v128_t *buf = (v128_t*)S->buf;
while( inlen > 0 )
{
size_t left = S->buflen;
if( inlen >= 64 - left )
{
v128_memcpy( buf + (left>>2), input, (64 - left) >> 2 );
S->buflen += 64 - left;
S->t[0] += 64;
S->t[1] += ( S->t[0] < 64 );
blake2s_4way_compress( S, buf );
S->buflen = 0;
input += ( 64 >> 2 );
inlen -= 64;
}
else
{
v128_memcpy( buf + ( left>>2 ), input, inlen>>2 );
S->buflen += (size_t) inlen;
input += ( inlen>>2 );
inlen -= inlen;
}
}
return 0;
}
int blake2s_4way_final( blake2s_4way_state *S, void *out, uint8_t outlen )
{
v128_t *buf = (v128_t*)S->buf;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node )
S->f[1] = ~0U;
S->f[0] = ~0U;
v128_memset_zero( buf + ( S->buflen>>2 ),
( 64 - S->buflen ) >> 2 );
blake2s_4way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_v128( out, i ) = S->h[ i ];
return 0;
}
// Update and final when inlen is a multiple of 64 bytes
int blake2s_4way_full_blocks( blake2s_4way_state *S, void *out,
const void *input, uint64_t inlen )
{
v128_t *in = (v128_t*)input;
v128_t *buf = (v128_t*)S->buf;
while( inlen > 64 )
{
v128_memcpy( buf, in, 64 >> 2 );
S->buflen = 64;
inlen -= 64;
S->t[0] += 64;
S->t[1] += ( S->t[0] < 64 );
blake2s_4way_compress( S, buf );
S->buflen = 0;
in += ( 64 >> 2 );
}
// last block
v128_memcpy( buf, in, 64 >> 2 );
S->buflen = 64;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node ) S->f[1] = ~0U;
S->f[0] = ~0U;
blake2s_4way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_v128( out, i ) = S->h[ i ];
return 0;
}
#if defined(__AVX2__)
// The commented code below is slower on Intel but faster on
// Zen1 AVX2. It's also faster than Zen1 AVX.
// Ryzen gen2 is unknown at this time.
int blake2s_8way_compress( blake2s_8way_state *S, const __m256i *block )
{
__m256i m[16];
__m256i v[16];
memcpy_256( m, block, 16 );
memcpy_256( v, S->h, 8 );
v[ 8] = v256_64( 0x6A09E6676A09E667ULL );
v[ 9] = v256_64( 0xBB67AE85BB67AE85ULL );
v[10] = v256_64( 0x3C6EF3723C6EF372ULL );
v[11] = v256_64( 0xA54FF53AA54FF53AULL );
v[12] = _mm256_xor_si256( v256_32( S->t[0] ),
v256_64( 0x510E527F510E527FULL ) );
v[13] = _mm256_xor_si256( v256_32( S->t[1] ),
v256_64( 0x9B05688C9B05688CULL ) );
v[14] = _mm256_xor_si256( v256_32( S->f[0] ),
v256_64( 0x1F83D9AB1F83D9ABULL ) );
v[15] = _mm256_xor_si256( v256_32( S->f[1] ),
v256_64( 0x5BE0CD195BE0CD19ULL ) );
/*
v[ 8] = v256_32( blake2s_IV[0] );
v[ 9] = v256_32( blake2s_IV[1] );
v[10] = v256_32( blake2s_IV[2] );
v[11] = v256_32( blake2s_IV[3] );
v[12] = _mm256_xor_si256( v256_32( S->t[0] ),
v256_32( blake2s_IV[4] ) );
v[13] = _mm256_xor_si256( v256_32( S->t[1] ),
v256_32( blake2s_IV[5] ) );
v[14] = _mm256_xor_si256( v256_32( S->f[0] ),
v256_32( blake2s_IV[6] ) );
v[15] = _mm256_xor_si256( v256_32( S->f[1] ),
v256_32( blake2s_IV[7] ) );
#define G8W(r,i,a,b,c,d) \
do { \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), \
m[ blake2s_sigma[r][2*i+0] ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), \
m[ blake2s_sigma[r][2*i+1] ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \
} while(0)
*/
#define G8W( sigma0, sigma1, a, b, c, d) \
do { \
uint8_t s0 = sigma0; \
uint8_t s1 = sigma1; \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), m[ s0 ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 12 ); \
a = _mm256_add_epi32( _mm256_add_epi32( a, b ), m[ s1 ] ); \
d = mm256_ror_32( _mm256_xor_si256( d, a ), 8 ); \
c = _mm256_add_epi32( c, d ); \
b = mm256_ror_32( _mm256_xor_si256( b, c ), 7 ); \
} while(0)
#define ROUND8W(r) \
do { \
uint8_t *sigma = (uint8_t*)&blake2s_sigma[r]; \
G8W( sigma[ 0], sigma[ 1], v[ 0], v[ 4], v[ 8], v[12] ); \
G8W( sigma[ 2], sigma[ 3], v[ 1], v[ 5], v[ 9], v[13] ); \
G8W( sigma[ 4], sigma[ 5], v[ 2], v[ 6], v[10], v[14] ); \
G8W( sigma[ 6], sigma[ 7], v[ 3], v[ 7], v[11], v[15] ); \
G8W( sigma[ 8], sigma[ 9], v[ 0], v[ 5], v[10], v[15] ); \
G8W( sigma[10], sigma[11], v[ 1], v[ 6], v[11], v[12] ); \
G8W( sigma[12], sigma[13], v[ 2], v[ 7], v[ 8], v[13] ); \
G8W( sigma[14], sigma[15], v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
/*
#define ROUND8W(r) \
do { \
G8W( r, 0, v[ 0], v[ 4], v[ 8], v[12] ); \
G8W( r, 1, v[ 1], v[ 5], v[ 9], v[13] ); \
G8W( r, 2, v[ 2], v[ 6], v[10], v[14] ); \
G8W( r, 3, v[ 3], v[ 7], v[11], v[15] ); \
G8W( r, 4, v[ 0], v[ 5], v[10], v[15] ); \
G8W( r, 5, v[ 1], v[ 6], v[11], v[12] ); \
G8W( r, 6, v[ 2], v[ 7], v[ 8], v[13] ); \
G8W( r, 7, v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
*/
ROUND8W( 0 );
ROUND8W( 1 );
ROUND8W( 2 );
ROUND8W( 3 );
ROUND8W( 4 );
ROUND8W( 5 );
ROUND8W( 6 );
ROUND8W( 7 );
ROUND8W( 8 );
ROUND8W( 9 );
for( size_t i = 0; i < 8; ++i )
S->h[i] = mm256_xor3( S->h[i], v[i], v[i + 8] );
#undef G8W
#undef ROUND8W
return 0;
}
int blake2s_8way_init( blake2s_8way_state *S, const uint8_t outlen )
{
blake2s_nway_param P[1];
P->digest_length = outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
*((uint64_t*)(P->node_offset)) = 0;
P->node_depth = 0;
P->inner_length = 0;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
memset( S, 0, sizeof( blake2s_8way_state ) );
S->h[0] = v256_64( 0x6A09E6676A09E667ULL );
S->h[1] = v256_64( 0xBB67AE85BB67AE85ULL );
S->h[2] = v256_64( 0x3C6EF3723C6EF372ULL );
S->h[3] = v256_64( 0xA54FF53AA54FF53AULL );
S->h[4] = v256_64( 0x510E527F510E527FULL );
S->h[5] = v256_64( 0x9B05688C9B05688CULL );
S->h[6] = v256_64( 0x1F83D9AB1F83D9ABULL );
S->h[7] = v256_64( 0x5BE0CD195BE0CD19ULL );
// for( int i = 0; i < 8; ++i )
// S->h[i] = v256_32( blake2s_IV[i] );
uint32_t *p = ( uint32_t * )( P );
/* IV XOR ParamBlock */
for ( size_t i = 0; i < 8; ++i )
S->h[i] = _mm256_xor_si256( S->h[i], v256_32( p[i] ) );
return 0;
}
int blake2s_8way_update( blake2s_8way_state *S, const void *in,
uint64_t inlen )
{
__m256i *input = (__m256i*)in;
__m256i *buf = (__m256i*)S->buf;
const int bsize = 64;
while( inlen > 0 )
{
size_t left = S->buflen;
if( inlen >= bsize - left )
{
memcpy_256( buf + (left>>2), input, (bsize - left) >> 2 );
S->buflen += bsize - left;
S->t[0] += 64;
S->t[1] += ( S->t[0] < 64 );
blake2s_8way_compress( S, buf );
S->buflen = 0;
input += ( bsize >> 2 );
inlen -= bsize;
}
else
{
memcpy_256( buf + ( left>>2 ), input, inlen>>2 );
S->buflen += (size_t) inlen;
input += ( inlen>>2 );
inlen -= inlen;
}
}
return 0;
}
int blake2s_8way_final( blake2s_8way_state *S, void *out, uint8_t outlen )
{
__m256i *buf = (__m256i*)S->buf;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node )
S->f[1] = ~0U;
S->f[0] = ~0U;
memset_zero_256( buf + ( S->buflen>>2 ),( 64 - S->buflen ) >> 2 );
blake2s_8way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_m256i( out, i ) = S->h[ i ];
return 0;
}
// Update and final when inlen is a multiple of 64 bytes
int blake2s_8way_full_blocks( blake2s_8way_state *S, void *out,
const void *input, uint64_t inlen )
{
__m256i *in = (__m256i*)input;
__m256i *buf = (__m256i*)S->buf;
while( inlen > 64 )
{
memcpy_256( buf, in, 64 >> 2 );
S->buflen = 64;
inlen -= 64;
S->t[0] += 64;
S->t[1] += ( S->t[0] < 64 );
blake2s_8way_compress( S, buf );
S->buflen = 0;
in += ( 64 >> 2 );
}
// last block
memcpy_256( buf, in, 64 >> 2 );
S->buflen = 64;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node ) S->f[1] = ~0U;
S->f[0] = ~0U;
blake2s_8way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_m256i( out, i ) = S->h[ i ];
return 0;
}
#endif // __AVX2__
#if defined(SIMD512)
// Blake2s-256 16 way
int blake2s_16way_compress( blake2s_16way_state *S, const __m512i *block )
{
__m512i m[16];
__m512i v[16];
memcpy_512( m, block, 16 );
memcpy_512( v, S->h, 8 );
v[ 8] = v512_64( 0x6A09E6676A09E667ULL );
v[ 9] = v512_64( 0xBB67AE85BB67AE85ULL );
v[10] = v512_64( 0x3C6EF3723C6EF372ULL );
v[11] = v512_64( 0xA54FF53AA54FF53AULL );
v[12] = _mm512_xor_si512( v512_32( S->t[0] ),
v512_64( 0x510E527F510E527FULL ) );
v[13] = _mm512_xor_si512( v512_32( S->t[1] ),
v512_64( 0x9B05688C9B05688CULL ) );
v[14] = _mm512_xor_si512( v512_32( S->f[0] ),
v512_64( 0x1F83D9AB1F83D9ABULL ) );
v[15] = _mm512_xor_si512( v512_32( S->f[1] ),
v512_64( 0x5BE0CD195BE0CD19ULL ) );
#define G16W( sigma0, sigma1, a, b, c, d) \
do { \
uint8_t s0 = sigma0; \
uint8_t s1 = sigma1; \
a = _mm512_add_epi32( _mm512_add_epi32( a, b ), m[ s0 ] ); \
d = mm512_ror_32( _mm512_xor_si512( d, a ), 16 ); \
c = _mm512_add_epi32( c, d ); \
b = mm512_ror_32( _mm512_xor_si512( b, c ), 12 ); \
a = _mm512_add_epi32( _mm512_add_epi32( a, b ), m[ s1 ] ); \
d = mm512_ror_32( _mm512_xor_si512( d, a ), 8 ); \
c = _mm512_add_epi32( c, d ); \
b = mm512_ror_32( _mm512_xor_si512( b, c ), 7 ); \
} while(0)
#define ROUND16W(r) \
do { \
uint8_t *sigma = (uint8_t*)&blake2s_sigma[r]; \
G16W( sigma[ 0], sigma[ 1], v[ 0], v[ 4], v[ 8], v[12] ); \
G16W( sigma[ 2], sigma[ 3], v[ 1], v[ 5], v[ 9], v[13] ); \
G16W( sigma[ 4], sigma[ 5], v[ 2], v[ 6], v[10], v[14] ); \
G16W( sigma[ 6], sigma[ 7], v[ 3], v[ 7], v[11], v[15] ); \
G16W( sigma[ 8], sigma[ 9], v[ 0], v[ 5], v[10], v[15] ); \
G16W( sigma[10], sigma[11], v[ 1], v[ 6], v[11], v[12] ); \
G16W( sigma[12], sigma[13], v[ 2], v[ 7], v[ 8], v[13] ); \
G16W( sigma[14], sigma[15], v[ 3], v[ 4], v[ 9], v[14] ); \
} while(0)
ROUND16W( 0 );
ROUND16W( 1 );
ROUND16W( 2 );
ROUND16W( 3 );
ROUND16W( 4 );
ROUND16W( 5 );
ROUND16W( 6 );
ROUND16W( 7 );
ROUND16W( 8 );
ROUND16W( 9 );
for( size_t i = 0; i < 8; ++i )
S->h[i] = mm512_xor3( S->h[i], v[i], v[i + 8] );
#undef G16W
#undef ROUND16W
return 0;
}
int blake2s_16way_init( blake2s_16way_state *S, const uint8_t outlen )
{
blake2s_nway_param P[1];
P->digest_length = outlen;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
*((uint64_t*)(P->node_offset)) = 0;
P->node_depth = 0;
P->inner_length = 0;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
memset( S, 0, sizeof( blake2s_16way_state ) );
S->h[0] = v512_64( 0x6A09E6676A09E667ULL );
S->h[1] = v512_64( 0xBB67AE85BB67AE85ULL );
S->h[2] = v512_64( 0x3C6EF3723C6EF372ULL );
S->h[3] = v512_64( 0xA54FF53AA54FF53AULL );
S->h[4] = v512_64( 0x510E527F510E527FULL );
S->h[5] = v512_64( 0x9B05688C9B05688CULL );
S->h[6] = v512_64( 0x1F83D9AB1F83D9ABULL );
S->h[7] = v512_64( 0x5BE0CD195BE0CD19ULL );
uint32_t *p = ( uint32_t * )( P );
/* IV XOR ParamBlock */
for ( size_t i = 0; i < 8; ++i )
S->h[i] = _mm512_xor_si512( S->h[i], v512_32( p[i] ) );
return 0;
}
int blake2s_16way_update( blake2s_16way_state *S, const void *in,
uint64_t inlen )
{
__m512i *input = (__m512i*)in;
__m512i *buf = (__m512i*)S->buf;
const int bsize = 64;
while( inlen > 0 )
{
size_t left = S->buflen;
if( inlen >= bsize - left )
{
memcpy_512( buf + (left>>2), input, (bsize - left) >> 2 );
S->buflen += bsize - left;
S->t[0] += 64;
S->t[1] += ( S->t[0] < 64 );
blake2s_16way_compress( S, buf );
S->buflen = 0;
input += ( bsize >> 2 );
inlen -= bsize;
}
else
{
memcpy_512( buf + ( left>>2 ), input, inlen>>2 );
S->buflen += (size_t) inlen;
input += ( inlen>>2 );
inlen -= inlen;
}
}
return 0;
}
int blake2s_16way_final( blake2s_16way_state *S, void *out, uint8_t outlen )
{
__m512i *buf = (__m512i*)S->buf;
S->t[0] += S->buflen;
S->t[1] += ( S->t[0] < S->buflen );
if ( S->last_node )
S->f[1] = ~0U;
S->f[0] = ~0U;
memset_zero_512( buf + ( S->buflen>>2 ),
( 64 - S->buflen ) >> 2 );
blake2s_16way_compress( S, buf );
for ( int i = 0; i < 8; ++i )
casti_m512i( out, i ) = S->h[ i ];
return 0;
}
#endif // AVX512
#if 0
int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen )
{
blake2s_state S[1];
/* Verify parameters */
if ( NULL == in ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key ) keylen = 0; /* Fail here instead if keylen != 0 and key == NULL? */
if( keylen > 0 )
{
if( blake2s_init_key( S, outlen, key, keylen ) < 0 ) return -1;
}
else
{
if( blake2s_init( S, outlen ) < 0 ) return -1;
}
blake2s_update( S, ( uint8_t * )in, inlen );
blake2s_final( S, out, outlen );
return 0;
}
#endif
#endif // __SSE4_2__

View File

@@ -11,10 +11,10 @@
* this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
//#pragma once
#ifndef __BLAKE2S_HASH_4WAY_H__
#define __BLAKE2S_HASH_4WAY_H__ 1
#ifndef BLAKE2S_HASH_4WAY_H__
#define BLAKE2S_HASH_4WAY_H__ 1
#if defined(__SSE4_2__)
#if defined(__SSE2__) || defined(__ARM_NEON)
#include "simd-utils.h"
@@ -29,21 +29,6 @@
#define ALIGN(x) __attribute__((aligned(x)))
#endif
#if defined(__cplusplus)
extern "C" {
#endif
enum blake2s_constant
{
BLAKE2S_BLOCKBYTES = 64,
BLAKE2S_OUTBYTES = 32,
BLAKE2S_KEYBYTES = 32,
BLAKE2S_SALTBYTES = 8,
BLAKE2S_PERSONALBYTES = 8
};
#pragma pack(push, 1)
typedef struct __blake2s_nway_param
{
uint8_t digest_length; // 1
@@ -55,15 +40,14 @@ typedef struct __blake2s_nway_param
uint8_t node_depth; // 15
uint8_t inner_length; // 16
// uint8_t reserved[0];
uint8_t salt[BLAKE2S_SALTBYTES]; // 24
uint8_t personal[BLAKE2S_PERSONALBYTES]; // 32
uint8_t salt[8]; // 24
uint8_t personal[8]; // 32
} blake2s_nway_param;
#pragma pack(pop)
ALIGN( 64 ) typedef struct __blake2s_4way_state
typedef struct ALIGN( 64 ) __blake2s_4way_state
{
__m128i h[8];
uint8_t buf[ BLAKE2S_BLOCKBYTES * 4 ];
v128_t h[8];
uint8_t buf[ 64 * 4 ];
uint32_t t[2];
uint32_t f[2];
size_t buflen;
@@ -74,13 +58,16 @@ int blake2s_4way_init( blake2s_4way_state *S, const uint8_t outlen );
int blake2s_4way_update( blake2s_4way_state *S, const void *in,
uint64_t inlen );
int blake2s_4way_final( blake2s_4way_state *S, void *out, uint8_t outlen );
int blake2s_4way_full_blocks( blake2s_4way_state *S, void *out,
const void *input, uint64_t inlen );
#if defined(__AVX2__)
ALIGN( 64 ) typedef struct __blake2s_8way_state
typedef struct ALIGN( 64 ) __blake2s_8way_state
{
__m256i h[8];
uint8_t buf[ BLAKE2S_BLOCKBYTES * 8 ];
uint8_t buf[ 64 * 8 ];
uint32_t t[2];
uint32_t f[2];
size_t buflen;
@@ -91,6 +78,27 @@ int blake2s_8way_init( blake2s_8way_state *S, const uint8_t outlen );
int blake2s_8way_update( blake2s_8way_state *S, const void *in,
uint64_t inlen );
int blake2s_8way_final( blake2s_8way_state *S, void *out, uint8_t outlen );
int blake2s_8way_full_blocks( blake2s_8way_state *S, void *out,
const void *input, uint64_t inlen );
#endif
#if defined(SIMD512)
typedef struct ALIGN( 64 ) __blake2s_16way_state
{
__m512i h[8];
uint8_t buf[ 64 * 16 ];
uint32_t t[2];
uint32_t f[2];
size_t buflen;
uint8_t last_node;
} blake2s_16way_state ;
int blake2s_16way_init( blake2s_16way_state *S, const uint8_t outlen );
int blake2s_16way_update( blake2s_16way_state *S, const void *in,
uint64_t inlen );
int blake2s_16way_final( blake2s_16way_state *S, void *out, uint8_t outlen );
#endif
@@ -103,10 +111,6 @@ int blake2s_8way_final( blake2s_8way_state *S, void *out, uint8_t outlen );
#define blake2s_simple(out, in, inlen) blake2s(out, in, NULL, 32, inlen, 0)
#endif
#if defined(__cplusplus)
}
#endif
#endif // __SSE4_2__
#endif // __SSE2__
#endif

View File

@@ -1,87 +1,251 @@
#include "blake2s-gate.h"
#include "algo-gate-api.h"
#include "blake2s-hash.h"
#include <string.h>
#include <stdint.h>
#if defined(SIMD512)
#define BLAKE2S_16WAY
#elif defined(__AVX2__)
#define BLAKE2S_8WAY
#elif defined(__SSE2__) || defined(__ARM_NEON)
// #define BLAKE2S_4WAY
#endif
#if defined(BLAKE2S_16WAY)
static __thread blake2s_16way_state blake2s_16w_ctx;
void blake2s_16way_hash( void *output, const void *input )
{
blake2s_16way_state ctx;
memcpy( &ctx, &blake2s_16w_ctx, sizeof ctx );
blake2s_16way_update( &ctx, input + (64<<4), 16 );
blake2s_16way_final( &ctx, output, 32 );
}
int scanhash_blake2s_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*16] __attribute__ ((aligned (128)));
uint32_t hash[8*16] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (64)));
uint32_t *hash7 = &(hash[7<<4]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m512i *noncev = (__m512i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id;
mm512_bswap32_intrlv80_16x32( vdata, pdata );
blake2s_16way_init( &blake2s_16w_ctx, 32 );
blake2s_16way_update( &blake2s_16w_ctx, vdata, 64 );
do {
*noncev = mm512_bswap_32( _mm512_set_epi32(
n+15, n+14, n+13, n+12, n+11, n+10, n+ 9, n+ 8,
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n+ 1, n ) );
pdata[19] = n;
blake2s_16way_hash( hash, vdata );
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
{
extr_lane_16x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 16;
} while ( (n < max_nonce-16) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(BLAKE2S_8WAY)
static __thread blake2s_8way_state blake2s_8w_ctx;
void blake2s_8way_hash( void *output, const void *input )
{
blake2s_8way_state ctx;
memcpy( &ctx, &blake2s_8w_ctx, sizeof ctx );
blake2s_8way_update( &ctx, input + (64<<3), 16 );
blake2s_8way_final( &ctx, output, 32 );
}
int scanhash_blake2s_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<3]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
__m256i *noncev = (__m256i*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id;
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake2s_8way_init( &blake2s_8w_ctx, 32 );
blake2s_8way_update( &blake2s_8w_ctx, vdata, 64 );
do {
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
pdata[19] = n;
blake2s_8way_hash( hash, vdata );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash7[lane] <= Htarg ) )
{
extr_lane_8x32( lane_hash, hash, lane, 256 );
if ( likely( fulltest( lane_hash, ptarget ) && !opt_benchmark ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 8;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#elif defined(BLAKE2S_4WAY)
static __thread blake2s_4way_state blake2s_4w_ctx;
void blake2s_4way_hash( void *output, const void *input )
{
blake2s_4way_state ctx;
memcpy( &ctx, &blake2s_4w_ctx, sizeof ctx );
blake2s_4way_update( &ctx, input + (64<<2), 16 );
blake2s_4way_final( &ctx, output, 32 );
}
int scanhash_blake2s_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash7 = &(hash[7<<2]);
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
v128_t *noncev = (v128_t*)vdata + 19; // aligned
uint32_t n = first_nonce;
int thr_id = mythr->id;
v128_bswap32_intrlv80_4x32( vdata, pdata );
blake2s_4way_init( &blake2s_4w_ctx, 32 );
blake2s_4way_update( &blake2s_4w_ctx, vdata, 64 );
do {
*noncev = v128_bswap32( v128_set32( n+3, n+2, n+1, n ) );
pdata[19] = n;
blake2s_4way_hash( hash, vdata );
for ( int lane = 0; lane < 4; lane++ ) if ( hash7[lane] <= Htarg )
{
extr_lane_4x32( lane_hash, hash, lane, 256 );
if ( fulltest( lane_hash, ptarget ) && !opt_benchmark )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
n += 4;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#else
#include "sph-blake2s.h"
static __thread blake2s_state blake2s_ctx;
//static __thread blake2s_state s_ctx;
#define MIDLEN 76
void blake2s_hash( void *output, const void *input )
{
unsigned char _ALIGN(64) hash[BLAKE2S_OUTBYTES];
blake2s_state ctx __attribute__ ((aligned (64)));
unsigned char _ALIGN(32) hash[32];
blake2s_state ctx __attribute__ ((aligned (32)));
memcpy( &ctx, &blake2s_ctx, sizeof ctx );
blake2s_update( &ctx, input+64, 16 );
// blake2s_init(&ctx, BLAKE2S_OUTBYTES);
// blake2s_update(&ctx, input, 80);
blake2s_final( &ctx, hash, BLAKE2S_OUTBYTES );
blake2s_final( &ctx, hash, 32 );
memcpy(output, hash, 32);
memcpy(output, hash, 32);
}
/*
static void blake2s_hash_end(uint32_t *output, const uint32_t *input)
int scanhash_blake2s( struct work *work,uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
s_ctx.buflen = MIDLEN;
memcpy(&s_ctx, &s_midstate, 32 + 16 + MIDLEN);
blake2s_update(&s_ctx, (uint8_t*) &input[MIDLEN/4], 80 - MIDLEN);
blake2s_final(&s_ctx, (uint8_t*) output, BLAKE2S_OUTBYTES);
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
uint32_t _ALIGN(32) hash32[8];
uint32_t _ALIGN(32) endiandata[20];
const int thr_id = mythr->id;
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
v128_bswap32_80( endiandata, pdata );
// midstate
blake2s_init( &blake2s_ctx, 32 );
blake2s_update( &blake2s_ctx, (uint8_t*) endiandata, 64 );
do
{
endiandata[19] = n;
blake2s_hash( hash32, endiandata );
if ( unlikely( valid_hash( hash32, ptarget ) ) && !opt_benchmark )
{
pdata[19] = bswap_32( n );
submit_solution( work, hash32, mythr );
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}
*/
int scanhash_blake2s( struct work *work,
uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t _ALIGN(64) hash64[8];
uint32_t _ALIGN(64) endiandata[20];
int thr_id = mythr->id; // thr_id arg is deprecated
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
swab32_array( endiandata, pdata, 20 );
// midstate
blake2s_init( &blake2s_ctx, BLAKE2S_OUTBYTES );
blake2s_update( &blake2s_ctx, (uint8_t*) endiandata, 64 );
do {
be32enc(&endiandata[19], n);
blake2s_hash( hash64, endiandata );
if (hash64[7] < Htarg && fulltest(hash64, ptarget)) {
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return true;
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}
/*
// changed to get_max64_0x3fffffLL in cpuminer-multi-decred
int64_t blake2s_get_max64 ()
{
return 0x7ffffLL;
}
#endif
bool register_blake2s_algo( algo_gate_t* gate )
{
#if defined(BLAKE2S_16WAY)
gate->scanhash = (void*)&scanhash_blake2s_16way;
gate->hash = (void*)&blake2s_16way_hash;
#elif defined(BLAKE2S_8WAY)
gate->scanhash = (void*)&scanhash_blake2s_8way;
gate->hash = (void*)&blake2s_8way_hash;
#elif defined(BLAKE2S_4WAY)
gate->scanhash = (void*)&scanhash_blake2s_4way;
gate->hash = (void*)&blake2s_4way_hash;
#else
gate->scanhash = (void*)&scanhash_blake2s;
gate->hash = (void*)&blake2s_hash;
gate->get_max64 = (void*)&blake2s_get_max64;
#endif
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT | NEON_OPT;
return true;
};
*/

View File

@@ -1,701 +0,0 @@
/* $Id: blake.c 252 2011-06-07 17:55:14Z tp $ */
/*
* BLAKE implementation.
*
* ==========================(LICENSE BEGIN)============================
*
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ===========================(LICENSE END)=============================
*
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
*/
#if defined (__AVX2__)
#include <stddef.h>
#include <string.h>
#include <limits.h>
#include "blake-hash-4way.h"
#ifdef __cplusplus
extern "C"{
#endif
#if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_BLAKE
#define SPH_SMALL_FOOTPRINT_BLAKE 1
#endif
#if SPH_64 && (SPH_SMALL_FOOTPRINT_BLAKE || !SPH_64_TRUE)
#define SPH_COMPACT_BLAKE_64 1
#endif
#ifdef _MSC_VER
#pragma warning (disable: 4146)
#endif
// Blake-512
static const sph_u64 IV512[8] = {
SPH_C64(0x6A09E667F3BCC908), SPH_C64(0xBB67AE8584CAA73B),
SPH_C64(0x3C6EF372FE94F82B), SPH_C64(0xA54FF53A5F1D36F1),
SPH_C64(0x510E527FADE682D1), SPH_C64(0x9B05688C2B3E6C1F),
SPH_C64(0x1F83D9ABFB41BD6B), SPH_C64(0x5BE0CD19137E2179)
};
#if SPH_COMPACT_BLAKE_32 || SPH_COMPACT_BLAKE_64
// Blake-256 4 & 8 way, Blake-512 4 way
static const unsigned sigma[16][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }
};
#endif
#define Z00 0
#define Z01 1
#define Z02 2
#define Z03 3
#define Z04 4
#define Z05 5
#define Z06 6
#define Z07 7
#define Z08 8
#define Z09 9
#define Z0A A
#define Z0B B
#define Z0C C
#define Z0D D
#define Z0E E
#define Z0F F
#define Z10 E
#define Z11 A
#define Z12 4
#define Z13 8
#define Z14 9
#define Z15 F
#define Z16 D
#define Z17 6
#define Z18 1
#define Z19 C
#define Z1A 0
#define Z1B 2
#define Z1C B
#define Z1D 7
#define Z1E 5
#define Z1F 3
#define Z20 B
#define Z21 8
#define Z22 C
#define Z23 0
#define Z24 5
#define Z25 2
#define Z26 F
#define Z27 D
#define Z28 A
#define Z29 E
#define Z2A 3
#define Z2B 6
#define Z2C 7
#define Z2D 1
#define Z2E 9
#define Z2F 4
#define Z30 7
#define Z31 9
#define Z32 3
#define Z33 1
#define Z34 D
#define Z35 C
#define Z36 B
#define Z37 E
#define Z38 2
#define Z39 6
#define Z3A 5
#define Z3B A
#define Z3C 4
#define Z3D 0
#define Z3E F
#define Z3F 8
#define Z40 9
#define Z41 0
#define Z42 5
#define Z43 7
#define Z44 2
#define Z45 4
#define Z46 A
#define Z47 F
#define Z48 E
#define Z49 1
#define Z4A B
#define Z4B C
#define Z4C 6
#define Z4D 8
#define Z4E 3
#define Z4F D
#define Z50 2
#define Z51 C
#define Z52 6
#define Z53 A
#define Z54 0
#define Z55 B
#define Z56 8
#define Z57 3
#define Z58 4
#define Z59 D
#define Z5A 7
#define Z5B 5
#define Z5C F
#define Z5D E
#define Z5E 1
#define Z5F 9
#define Z60 C
#define Z61 5
#define Z62 1
#define Z63 F
#define Z64 E
#define Z65 D
#define Z66 4
#define Z67 A
#define Z68 0
#define Z69 7
#define Z6A 6
#define Z6B 3
#define Z6C 9
#define Z6D 2
#define Z6E 8
#define Z6F B
#define Z70 D
#define Z71 B
#define Z72 7
#define Z73 E
#define Z74 C
#define Z75 1
#define Z76 3
#define Z77 9
#define Z78 5
#define Z79 0
#define Z7A F
#define Z7B 4
#define Z7C 8
#define Z7D 6
#define Z7E 2
#define Z7F A
#define Z80 6
#define Z81 F
#define Z82 E
#define Z83 9
#define Z84 B
#define Z85 3
#define Z86 0
#define Z87 8
#define Z88 C
#define Z89 2
#define Z8A D
#define Z8B 7
#define Z8C 1
#define Z8D 4
#define Z8E A
#define Z8F 5
#define Z90 A
#define Z91 2
#define Z92 8
#define Z93 4
#define Z94 7
#define Z95 6
#define Z96 1
#define Z97 5
#define Z98 F
#define Z99 B
#define Z9A 9
#define Z9B E
#define Z9C 3
#define Z9D C
#define Z9E D
#define Z9F 0
#define Mx(r, i) Mx_(Z ## r ## i)
#define Mx_(n) Mx__(n)
#define Mx__(n) M ## n
// Blake-512 4 way
#define CBx(r, i) CBx_(Z ## r ## i)
#define CBx_(n) CBx__(n)
#define CBx__(n) CB ## n
#define CB0 SPH_C64(0x243F6A8885A308D3)
#define CB1 SPH_C64(0x13198A2E03707344)
#define CB2 SPH_C64(0xA4093822299F31D0)
#define CB3 SPH_C64(0x082EFA98EC4E6C89)
#define CB4 SPH_C64(0x452821E638D01377)
#define CB5 SPH_C64(0xBE5466CF34E90C6C)
#define CB6 SPH_C64(0xC0AC29B7C97C50DD)
#define CB7 SPH_C64(0x3F84D5B5B5470917)
#define CB8 SPH_C64(0x9216D5D98979FB1B)
#define CB9 SPH_C64(0xD1310BA698DFB5AC)
#define CBA SPH_C64(0x2FFD72DBD01ADFB7)
#define CBB SPH_C64(0xB8E1AFED6A267E96)
#define CBC SPH_C64(0xBA7C9045F12C7F99)
#define CBD SPH_C64(0x24A19947B3916CF7)
#define CBE SPH_C64(0x0801F2E2858EFC16)
#define CBF SPH_C64(0x636920D871574E69)
#if SPH_COMPACT_BLAKE_64
// not used
static const sph_u64 CB[16] = {
SPH_C64(0x243F6A8885A308D3), SPH_C64(0x13198A2E03707344),
SPH_C64(0xA4093822299F31D0), SPH_C64(0x082EFA98EC4E6C89),
SPH_C64(0x452821E638D01377), SPH_C64(0xBE5466CF34E90C6C),
SPH_C64(0xC0AC29B7C97C50DD), SPH_C64(0x3F84D5B5B5470917),
SPH_C64(0x9216D5D98979FB1B), SPH_C64(0xD1310BA698DFB5AC),
SPH_C64(0x2FFD72DBD01ADFB7), SPH_C64(0xB8E1AFED6A267E96),
SPH_C64(0xBA7C9045F12C7F99), SPH_C64(0x24A19947B3916CF7),
SPH_C64(0x0801F2E2858EFC16), SPH_C64(0x636920D871574E69)
};
#endif
// Blake-512 4 way
#define GB_4WAY(m0, m1, c0, c1, a, b, c, d) do { \
a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \
_mm256_set_epi64x( c1, c1, c1, c1 ), m0 ), b ), a ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 32 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 25 ); \
a = _mm256_add_epi64( _mm256_add_epi64( _mm256_xor_si256( \
_mm256_set_epi64x( c0, c0, c0, c0 ), m1 ), b ), a ); \
d = mm256_ror_64( _mm256_xor_si256( d, a ), 16 ); \
c = _mm256_add_epi64( c, d ); \
b = mm256_ror_64( _mm256_xor_si256( b, c ), 11 ); \
} while (0)
#if SPH_COMPACT_BLAKE_64
// not used
#define ROUND_B_4WAY(r) do { \
GB_4WAY(M[sigma[r][0x0]], M[sigma[r][0x1]], \
CB[sigma[r][0x0]], CB[sigma[r][0x1]], V0, V4, V8, VC); \
GB_4WAY(M[sigma[r][0x2]], M[sigma[r][0x3]], \
CB[sigma[r][0x2]], CB[sigma[r][0x3]], V1, V5, V9, VD); \
GB_4WAY(M[sigma[r][0x4]], M[sigma[r][0x5]], \
CB[sigma[r][0x4]], CB[sigma[r][0x5]], V2, V6, VA, VE); \
GB_4WAY(M[sigma[r][0x6]], M[sigma[r][0x7]], \
CB[sigma[r][0x6]], CB[sigma[r][0x7]], V3, V7, VB, VF); \
GB_4WAY(M[sigma[r][0x8]], M[sigma[r][0x9]], \
CB[sigma[r][0x8]], CB[sigma[r][0x9]], V0, V5, VA, VF); \
GB_4WAY(M[sigma[r][0xA]], M[sigma[r][0xB]], \
CB[sigma[r][0xA]], CB[sigma[r][0xB]], V1, V6, VB, VC); \
GB_4WAY(M[sigma[r][0xC]], M[sigma[r][0xD]], \
CB[sigma[r][0xC]], CB[sigma[r][0xD]], V2, V7, V8, VD); \
GB_4WAY(M[sigma[r][0xE]], M[sigma[r][0xF]], \
CB[sigma[r][0xE]], CB[sigma[r][0xF]], V3, V4, V9, VE); \
} while (0)
#else
//current_impl
#define ROUND_B_4WAY(r) do { \
GB_4WAY(Mx(r, 0), Mx(r, 1), CBx(r, 0), CBx(r, 1), V0, V4, V8, VC); \
GB_4WAY(Mx(r, 2), Mx(r, 3), CBx(r, 2), CBx(r, 3), V1, V5, V9, VD); \
GB_4WAY(Mx(r, 4), Mx(r, 5), CBx(r, 4), CBx(r, 5), V2, V6, VA, VE); \
GB_4WAY(Mx(r, 6), Mx(r, 7), CBx(r, 6), CBx(r, 7), V3, V7, VB, VF); \
GB_4WAY(Mx(r, 8), Mx(r, 9), CBx(r, 8), CBx(r, 9), V0, V5, VA, VF); \
GB_4WAY(Mx(r, A), Mx(r, B), CBx(r, A), CBx(r, B), V1, V6, VB, VC); \
GB_4WAY(Mx(r, C), Mx(r, D), CBx(r, C), CBx(r, D), V2, V7, V8, VD); \
GB_4WAY(Mx(r, E), Mx(r, F), CBx(r, E), CBx(r, F), V3, V4, V9, VE); \
} while (0)
#endif
// Blake-512 4 way
#define DECL_STATE64_4WAY \
__m256i H0, H1, H2, H3, H4, H5, H6, H7; \
__m256i S0, S1, S2, S3; \
sph_u64 T0, T1;
#define READ_STATE64_4WAY(state) do { \
H0 = (state)->H[0]; \
H1 = (state)->H[1]; \
H2 = (state)->H[2]; \
H3 = (state)->H[3]; \
H4 = (state)->H[4]; \
H5 = (state)->H[5]; \
H6 = (state)->H[6]; \
H7 = (state)->H[7]; \
S0 = (state)->S[0]; \
S1 = (state)->S[1]; \
S2 = (state)->S[2]; \
S3 = (state)->S[3]; \
T0 = (state)->T0; \
T1 = (state)->T1; \
} while (0)
#define WRITE_STATE64_4WAY(state) do { \
(state)->H[0] = H0; \
(state)->H[1] = H1; \
(state)->H[2] = H2; \
(state)->H[3] = H3; \
(state)->H[4] = H4; \
(state)->H[5] = H5; \
(state)->H[6] = H6; \
(state)->H[7] = H7; \
(state)->S[0] = S0; \
(state)->S[1] = S1; \
(state)->S[2] = S2; \
(state)->S[3] = S3; \
(state)->T0 = T0; \
(state)->T1 = T1; \
} while (0)
#if SPH_COMPACT_BLAKE_64
// not used
#define COMPRESS64_4WAY do { \
__m256i M[16]; \
__m256i V0, V1, V2, V3, V4, V5, V6, V7; \
__m256i V8, V9, VA, VB, VC, VD, VE, VF; \
unsigned r; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
V3 = H3; \
V4 = H4; \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \
V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \
VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \
VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \
VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \
VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \
_mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \
VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \
_mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \
M[0x0] = mm256_bswap_64( *(buf+0) ); \
M[0x1] = mm256_bswap_64( *(buf+1) ); \
M[0x2] = mm256_bswap_64( *(buf+2) ); \
M[0x3] = mm256_bswap_64( *(buf+3) ); \
M[0x4] = mm256_bswap_64( *(buf+4) ); \
M[0x5] = mm256_bswap_64( *(buf+5) ); \
M[0x6] = mm256_bswap_64( *(buf+6) ); \
M[0x7] = mm256_bswap_64( *(buf+7) ); \
M[0x8] = mm256_bswap_64( *(buf+8) ); \
M[0x9] = mm256_bswap_64( *(buf+9) ); \
M[0xA] = mm256_bswap_64( *(buf+10) ); \
M[0xB] = mm256_bswap_64( *(buf+11) ); \
M[0xC] = mm256_bswap_64( *(buf+12) ); \
M[0xD] = mm256_bswap_64( *(buf+13) ); \
M[0xE] = mm256_bswap_64( *(buf+14) ); \
M[0xF] = mm256_bswap_64( *(buf+15) ); \
for (r = 0; r < 16; r ++) \
ROUND_B_4WAY(r); \
H0 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S0, V0 ), V8 ), H0 ); \
H1 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S1, V1 ), V9 ), H1 ); \
H2 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S2, V2 ), VA ), H2 ); \
H3 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S3, V3 ), VB ), H3 ); \
H4 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S0, V4 ), VC ), H4 ); \
H5 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S1, V5 ), VD ), H5 ); \
H6 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S2, V6 ), VE ), H6 ); \
H7 = _mm256_xor_si256( _mm256_xor_si256( \
_mm256_xor_si256( S3, V7 ), VF ), H7 ); \
} while (0)
#else
//current impl
#define COMPRESS64_4WAY do \
{ \
__m256i M0, M1, M2, M3, M4, M5, M6, M7; \
__m256i M8, M9, MA, MB, MC, MD, ME, MF; \
__m256i V0, V1, V2, V3, V4, V5, V6, V7; \
__m256i V8, V9, VA, VB, VC, VD, VE, VF; \
__m256i shuf_bswap64; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
V3 = H3; \
V4 = H4; \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = _mm256_xor_si256( S0, _mm256_set1_epi64x( CB0 ) ); \
V9 = _mm256_xor_si256( S1, _mm256_set1_epi64x( CB1 ) ); \
VA = _mm256_xor_si256( S2, _mm256_set1_epi64x( CB2 ) ); \
VB = _mm256_xor_si256( S3, _mm256_set1_epi64x( CB3 ) ); \
VC = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
_mm256_set1_epi64x( CB4 ) ); \
VD = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \
_mm256_set1_epi64x( CB5 ) ); \
VE = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \
_mm256_set1_epi64x( CB6 ) ); \
VF = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \
_mm256_set1_epi64x( CB7 ) ); \
shuf_bswap64 = _mm256_set_epi64x( 0x08090a0b0c0d0e0f, 0x0001020304050607, \
0x08090a0b0c0d0e0f, 0x0001020304050607 ); \
M0 = _mm256_shuffle_epi8( *(buf+ 0), shuf_bswap64 ); \
M1 = _mm256_shuffle_epi8( *(buf+ 1), shuf_bswap64 ); \
M2 = _mm256_shuffle_epi8( *(buf+ 2), shuf_bswap64 ); \
M3 = _mm256_shuffle_epi8( *(buf+ 3), shuf_bswap64 ); \
M4 = _mm256_shuffle_epi8( *(buf+ 4), shuf_bswap64 ); \
M5 = _mm256_shuffle_epi8( *(buf+ 5), shuf_bswap64 ); \
M6 = _mm256_shuffle_epi8( *(buf+ 6), shuf_bswap64 ); \
M7 = _mm256_shuffle_epi8( *(buf+ 7), shuf_bswap64 ); \
M8 = _mm256_shuffle_epi8( *(buf+ 8), shuf_bswap64 ); \
M9 = _mm256_shuffle_epi8( *(buf+ 9), shuf_bswap64 ); \
MA = _mm256_shuffle_epi8( *(buf+10), shuf_bswap64 ); \
MB = _mm256_shuffle_epi8( *(buf+11), shuf_bswap64 ); \
MC = _mm256_shuffle_epi8( *(buf+12), shuf_bswap64 ); \
MD = _mm256_shuffle_epi8( *(buf+13), shuf_bswap64 ); \
ME = _mm256_shuffle_epi8( *(buf+14), shuf_bswap64 ); \
MF = _mm256_shuffle_epi8( *(buf+15), shuf_bswap64 ); \
ROUND_B_4WAY(0); \
ROUND_B_4WAY(1); \
ROUND_B_4WAY(2); \
ROUND_B_4WAY(3); \
ROUND_B_4WAY(4); \
ROUND_B_4WAY(5); \
ROUND_B_4WAY(6); \
ROUND_B_4WAY(7); \
ROUND_B_4WAY(8); \
ROUND_B_4WAY(9); \
ROUND_B_4WAY(0); \
ROUND_B_4WAY(1); \
ROUND_B_4WAY(2); \
ROUND_B_4WAY(3); \
ROUND_B_4WAY(4); \
ROUND_B_4WAY(5); \
H0 = mm256_xor4( V8, V0, S0, H0 ); \
H1 = mm256_xor4( V9, V1, S1, H1 ); \
H2 = mm256_xor4( VA, V2, S2, H2 ); \
H3 = mm256_xor4( VB, V3, S3, H3 ); \
H4 = mm256_xor4( VC, V4, S0, H4 ); \
H5 = mm256_xor4( VD, V5, S1, H5 ); \
H6 = mm256_xor4( VE, V6, S2, H6 ); \
H7 = mm256_xor4( VF, V7, S3, H7 ); \
} while (0)
#endif
static const sph_u64 salt_zero_big[4] = { 0, 0, 0, 0 };
static void
blake64_4way_init( blake_4way_big_context *sc, const sph_u64 *iv,
const sph_u64 *salt )
{
__m256i zero = m256_zero;
casti_m256i( sc->H, 0 ) = _mm256_set1_epi64x( iv[0] );
casti_m256i( sc->H, 1 ) = _mm256_set1_epi64x( iv[1] );
casti_m256i( sc->H, 2 ) = _mm256_set1_epi64x( iv[2] );
casti_m256i( sc->H, 3 ) = _mm256_set1_epi64x( iv[3] );
casti_m256i( sc->H, 4 ) = _mm256_set1_epi64x( iv[4] );
casti_m256i( sc->H, 5 ) = _mm256_set1_epi64x( iv[5] );
casti_m256i( sc->H, 6 ) = _mm256_set1_epi64x( iv[6] );
casti_m256i( sc->H, 7 ) = _mm256_set1_epi64x( iv[7] );
casti_m256i( sc->S, 0 ) = zero;
casti_m256i( sc->S, 1 ) = zero;
casti_m256i( sc->S, 2 ) = zero;
casti_m256i( sc->S, 3 ) = zero;
sc->T0 = sc->T1 = 0;
sc->ptr = 0;
}
static void
blake64_4way( blake_4way_big_context *sc, const void *data, size_t len)
{
__m256i *vdata = (__m256i*)data;
__m256i *buf;
size_t ptr;
DECL_STATE64_4WAY
const int buf_size = 128; // sizeof/8
buf = sc->buf;
ptr = sc->ptr;
if ( len < (buf_size - ptr) )
{
memcpy_256( buf + (ptr>>3), vdata, len>>3 );
ptr += len;
sc->ptr = ptr;
return;
}
READ_STATE64_4WAY(sc);
while ( len > 0 )
{
size_t clen;
clen = buf_size - ptr;
if ( clen > len )
clen = len;
memcpy_256( buf + (ptr>>3), vdata, clen>>3 );
ptr += clen;
vdata = vdata + (clen>>3);
len -= clen;
if (ptr == buf_size )
{
if ((T0 = SPH_T64(T0 + 1024)) < 1024)
T1 = SPH_T64(T1 + 1);
COMPRESS64_4WAY;
ptr = 0;
}
}
WRITE_STATE64_4WAY(sc);
sc->ptr = ptr;
}
static void
blake64_4way_close( blake_4way_big_context *sc,
unsigned ub, unsigned n, void *dst, size_t out_size_w64)
{
__m256i buf[16];
size_t ptr;
unsigned bit_len;
uint64_t z, zz;
sph_u64 th, tl;
ptr = sc->ptr;
bit_len = ((unsigned)ptr << 3);
z = 0x80 >> n;
zz = ((ub & -z) | z) & 0xFF;
buf[ptr>>3] = _mm256_set_epi64x( zz, zz, zz, zz );
tl = sc->T0 + bit_len;
th = sc->T1;
if (ptr == 0 )
{
sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL);
sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL);
}
else if ( sc->T0 == 0 )
{
sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL) + bit_len;
sc->T1 = SPH_T64(sc->T1 - 1);
}
else
{
sc->T0 -= 1024 - bit_len;
}
if ( ptr <= 104 )
{
memset_zero_256( buf + (ptr>>3) + 1, (104-ptr) >> 3 );
if ( out_size_w64 == 8 )
buf[(104>>3)] = _mm256_or_si256( buf[(104>>3)],
_mm256_set1_epi64x( 0x0100000000000000ULL ) );
*(buf+(112>>3)) = mm256_bswap_64(
_mm256_set_epi64x( th, th, th, th ) );
*(buf+(120>>3)) = mm256_bswap_64(
_mm256_set_epi64x( tl, tl, tl, tl ) );
blake64_4way( sc, buf + (ptr>>3), 128 - ptr );
}
else
{
memset_zero_256( buf + (ptr>>3) + 1, (120 - ptr) >> 3 );
blake64_4way( sc, buf + (ptr>>3), 128 - ptr );
sc->T0 = SPH_C64(0xFFFFFFFFFFFFFC00ULL);
sc->T1 = SPH_C64(0xFFFFFFFFFFFFFFFFULL);
memset_zero_256( buf, 112>>3 );
if ( out_size_w64 == 8 )
buf[104>>3] = _mm256_set1_epi64x( 0x0100000000000000ULL );
*(buf+(112>>3)) = mm256_bswap_64(
_mm256_set_epi64x( th, th, th, th ) );
*(buf+(120>>3)) = mm256_bswap_64(
_mm256_set_epi64x( tl, tl, tl, tl ) );
blake64_4way( sc, buf, 128 );
}
mm256_block_bswap_64( (__m256i*)dst, sc->H );
}
void
blake512_4way_init(void *cc)
{
blake64_4way_init(cc, IV512, salt_zero_big);
}
void
blake512_4way(void *cc, const void *data, size_t len)
{
blake64_4way(cc, data, len);
}
void
blake512_4way_close(void *cc, void *dst)
{
blake512_4way_addbits_and_close(cc, 0, 0, dst);
}
void
blake512_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
blake64_4way_close(cc, ub, n, dst, 8);
}
#ifdef __cplusplus
}
#endif
#endif

2399
algo/blake/blake512-hash.c Normal file

File diff suppressed because it is too large Load Diff

138
algo/blake/blake512-hash.h Normal file
View File

@@ -0,0 +1,138 @@
#ifndef BLAKE512_HASH__
#define BLAKE512_HASH__ 1
#include <stddef.h>
#include "simd-utils.h"
#if defined(__SSE2__) || defined(__ARM_NEON)
/////////////////////////
//
// Blake-512 1 way SSE2, AVX2, NEON
typedef struct
{
unsigned char buf[128]; /* first field, for alignment */
uint64_t H[8];
uint64_t T0, T1;
size_t ptr;
} blake512_context __attribute__ ((aligned (32)));
void blake512_transform( uint64_t *H, const uint64_t *buf,
const uint64_t T0, const uint64_t T1 );
void blake512_init( blake512_context *sc );
void blake512_update( blake512_context *sc, const void *data, size_t len );
void blake512_close( blake512_context *sc, void *dst );
void blake512_full( blake512_context *sc, void *dst, const void *data,
size_t len );
/////////////////////////
//
// Blake-512 2 way SSE2 & NEON
typedef struct
{
v128u64_t buf[16];
v128u64_t H[8];
v128u64_t S[4];
size_t ptr;
uint64_t T0, T1;
} blake_2x64_big_context __attribute__ ((aligned (32)));
typedef blake_2x64_big_context blake512_2x64_context;
void blake512_2x64_init( blake_2x64_big_context *sc );
void blake512_2x64_update( void *cc, const void *data, size_t len );
void blake512_2x64_close( void *cc, void *dst );
void blake512_2x64_full( blake_2x64_big_context *sc, void * dst,
const void *data, size_t len );
void blake512_2x64_full_le( blake_2x64_big_context *sc, void * dst,
const void *data, size_t len );
void blake512_2x64_prehash_part1_le( blake_2x64_big_context *sc,
v128u64_t *midstate, const void *data );
void blake512_2x64_prehash_part2_le( blake_2x64_big_context *sc,
void *hash, const v128u64_t nonce, const v128u64_t *midstate );
#ifdef __AVX2__
/////////////////////////
//
// Blake-512 4 way AVX2
typedef struct
{
__m256i buf[16];
__m256i H[8];
__m256i S[4];
size_t ptr;
uint64_t T0, T1;
} blake_4x64_big_context __attribute__ ((aligned (64)));
typedef blake_4x64_big_context blake512_4x64_context;
void blake512_4x64_init( blake_4x64_big_context *sc );
void blake512_4x64_update( void *cc, const void *data, size_t len );
void blake512_4x64_close( void *cc, void *dst );
void blake512_4x64_full( blake_4x64_big_context *sc, void * dst,
const void *data, size_t len );
void blake512_4x64_full_le( blake_4x64_big_context *sc, void * dst,
const void *data, size_t len );
void blake512_4x64_prehash_le( blake_4x64_big_context *sc, __m256i *midstate,
const void *data );
void blake512_4x64_final_le( blake_4x64_big_context *sc, void *hash,
const __m256i nonce, const __m256i *midstate );
#define blake_4way_big_context blake_4x64_big_context
#define blake512_4way_context blake512_4x64_context
#define blake512_4way_init blake512_4x64_init
#define blake512_4way_update blake512_4x64_update
#define blake512_4way_close blake512_4x64_close
#define blake512_4way_full blake512_4x64_full
#define blake512_4way_full_le blake512_4x64_full_le
#define blake512_4way_prehash_le blake512_4x64_prehash_le
#define blake512_4way_final_le blake512_4x64_final_le
#if defined(SIMD512)
////////////////////////////
//
// Blake-512 8 way AVX512
typedef struct
{
__m512i buf[16];
__m512i H[8];
__m512i S[4];
size_t ptr;
uint64_t T0, T1;
} blake_8x64_big_context __attribute__ ((aligned (128)));
typedef blake_8x64_big_context blake512_8x64_context;
void blake512_8x64_init( blake_8x64_big_context *sc );
void blake512_8x64_update( void *cc, const void *data, size_t len );
void blake512_8x64_close( void *cc, void *dst );
void blake512_8x64_full( blake_8x64_big_context *sc, void * dst,
const void *data, size_t len );
void blake512_8x64_full_le( blake_8x64_big_context *sc, void * dst,
const void *data, size_t len );
void blake512_8x64_prehash_le( blake_8x64_big_context *sc, __m512i *midstate,
const void *data );
void blake512_8x64_final_le( blake_8x64_big_context *sc, void *hash,
const __m512i nonce, const __m512i *midstate );
#define blake_8way_big_context blake_8x64_big_context
#define blake512_8way_context blake512_8x64_context
#define blake512_8way_init blake512_8x64_init
#define blake512_8way_update blake512_8x64_update
#define blake512_8way_close blake512_8x64_close
#define blake512_8way_full blake512_8x64_full
#define blake512_8way_full_le blake512_8x64_full_le
#define blake512_8way_prehash_le blake512_8x64_prehash_le
#define blake512_8way_final_le blake512_8x64_final_le
#endif // AVX512
#endif // AVX2
#endif // SSE2 or NEON
#endif // BLAKE512_HASH_H__

View File

@@ -1,10 +1,152 @@
#include "blakecoin-gate.h"
#include "blake-hash-4way.h"
#include "blake256-hash.h"
#include <string.h>
#include <stdint.h>
#include <memory.h>
#if defined (BLAKECOIN_4WAY)
#define rounds 8
#if defined (BLAKECOIN_16WAY)
int scanhash_blakecoin_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*16] __attribute__ ((aligned (64)));
uint32_t midstate_vars[16*16] __attribute__ ((aligned (64)));
__m512i block0_hash[8] __attribute__ ((aligned (64)));
__m512i block_buf[16] __attribute__ ((aligned (64)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( ((__m512i*)hash32)[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
uint32_t phash[8] __attribute__ ((aligned (64))) =
{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
uint32_t n = pdata[19];
const uint32_t first_nonce = (const uint32_t) n;
const uint32_t last_nonce = max_nonce - 16;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m512i sixteen = v512_32( 16 );
// Prehash first block
blake256_transform_le( phash, pdata, 512, 0, rounds );
block0_hash[0] = v512_32( phash[0] );
block0_hash[1] = v512_32( phash[1] );
block0_hash[2] = v512_32( phash[2] );
block0_hash[3] = v512_32( phash[3] );
block0_hash[4] = v512_32( phash[4] );
block0_hash[5] = v512_32( phash[5] );
block0_hash[6] = v512_32( phash[6] );
block0_hash[7] = v512_32( phash[7] );
// Build vectored second block, interleave last 16 bytes of data using
// unique nonces.
block_buf[0] = v512_32( pdata[16] );
block_buf[1] = v512_32( pdata[17] );
block_buf[2] = v512_32( pdata[18] );
block_buf[3] =
_mm512_set_epi32( n+15, n+14, n+13, n+12, n+11, n+10, n+ 9, n+ 8,
n+ 7, n+ 6, n+ 5, n+ 4, n+ 3, n+ 2, n +1, n );
// Partialy prehash second block without touching nonces in block_buf[3].
blake256_16way_round0_prehash_le( midstate_vars, block0_hash, block_buf );
do {
blake256_16way_final_rounds_le( hash32, midstate_vars, block0_hash,
block_buf, rounds );
for ( int lane = 0; lane < 16; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_16x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
block_buf[3] = _mm512_add_epi32( block_buf[3], sixteen );
n += 16;
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (BLAKECOIN_8WAY)
int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t hash32[8*8] __attribute__ ((aligned (64)));
uint32_t midstate_vars[16*8] __attribute__ ((aligned (32)));
__m256i block0_hash[8] __attribute__ ((aligned (32)));
__m256i block_buf[16] __attribute__ ((aligned (32)));
uint32_t lane_hash[8] __attribute__ ((aligned (32)));
uint32_t *hash32_d7 = (uint32_t*)&( ((__m256i*)hash32)[7] );
uint32_t *pdata = work->data;
const uint32_t *ptarget = work->target;
const uint32_t targ32_d7 = ptarget[7];
uint32_t phash[8] __attribute__ ((aligned (32))) =
{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
uint32_t n = pdata[19];
const uint32_t first_nonce = (const uint32_t) n;
const uint32_t last_nonce = max_nonce - 8;
const int thr_id = mythr->id;
const bool bench = opt_benchmark;
const __m256i eight = v256_32( 8 );
// Prehash first block
blake256_transform_le( phash, pdata, 512, 0, rounds );
block0_hash[0] = v256_32( phash[0] );
block0_hash[1] = v256_32( phash[1] );
block0_hash[2] = v256_32( phash[2] );
block0_hash[3] = v256_32( phash[3] );
block0_hash[4] = v256_32( phash[4] );
block0_hash[5] = v256_32( phash[5] );
block0_hash[6] = v256_32( phash[6] );
block0_hash[7] = v256_32( phash[7] );
// Build vectored second block, interleave last 16 bytes of data using
// unique nonces.
block_buf[0] = v256_32( pdata[16] );
block_buf[1] = v256_32( pdata[17] );
block_buf[2] = v256_32( pdata[18] );
block_buf[3] = _mm256_set_epi32( n+7, n+6, n+5, n+4, n+3, n+2, n+1, n );
// Partialy prehash second block without touching nonces in block_buf[3].
blake256_8way_round0_prehash_le( midstate_vars, block0_hash, block_buf );
do {
blake256_8way_final_rounds_le( hash32, midstate_vars, block0_hash,
block_buf, rounds );
for ( int lane = 0; lane < 8; lane++ )
if ( unlikely( hash32_d7[ lane ] <= targ32_d7 ) )
{
extr_lane_8x32( lane_hash, hash32, lane, 256 );
if ( likely( valid_hash( lane_hash, ptarget ) && !bench ) )
{
pdata[19] = n + lane;
submit_solution( work, lane_hash, mythr );
}
}
block_buf[3] = _mm256_add_epi32( block_buf[3], eight );
n += 8;
} while ( likely( (n < last_nonce) && !work_restart[thr_id].restart) );
pdata[19] = n;
*hashes_done = n - first_nonce;
return 0;
}
#elif defined (BLAKECOIN_4WAY)
blake256r8_4way_context blakecoin_4w_ctx;
@@ -14,7 +156,7 @@ void blakecoin_4way_hash(void *state, const void *input)
blake256r8_4way_context ctx;
memcpy( &ctx, &blakecoin_4w_ctx, sizeof ctx );
blake256r8_4way( &ctx, input + (64<<2), 16 );
blake256r8_4way_update( &ctx, input + (64<<2), 16 );
blake256r8_4way_close( &ctx, vhash );
dintrlv_4x32( state, state+32, state+64, state+96, vhash, 256 );
@@ -35,12 +177,12 @@ int scanhash_blakecoin_4way( struct work *work, uint32_t max_nonce,
if ( opt_benchmark )
HTarget = 0x7f;
mm128_bswap32_intrlv80_4x32( vdata, pdata );
v128_bswap32_intrlv80_4x32( vdata, pdata );
blake256r8_4way_init( &blakecoin_4w_ctx );
blake256r8_4way( &blakecoin_4w_ctx, vdata, 64 );
blake256r8_4way_update( &blakecoin_4w_ctx, vdata, 64 );
do {
*noncev = mm128_bswap_32( _mm_set_epi32( n+3, n+2, n+1, n ) );
*noncev = v128_bswap32( _mm_set_epi32( n+3, n+2, n+1, n ) );
pdata[19] = n;
blakecoin_4way_hash( hash, vdata );
@@ -49,7 +191,7 @@ int scanhash_blakecoin_4way( struct work *work, uint32_t max_nonce,
&& !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
submit_solution( work, hash+(i<<3), mythr );
}
n += 4;
@@ -61,61 +203,3 @@ int scanhash_blakecoin_4way( struct work *work, uint32_t max_nonce,
#endif
#if defined(BLAKECOIN_8WAY)
blake256r8_8way_context blakecoin_8w_ctx;
void blakecoin_8way_hash( void *state, const void *input )
{
uint32_t vhash[8*8] __attribute__ ((aligned (64)));
blake256r8_8way_context ctx;
memcpy( &ctx, &blakecoin_8w_ctx, sizeof ctx );
blake256r8_8way( &ctx, input + (64<<3), 16 );
blake256r8_8way_close( &ctx, vhash );
dintrlv_8x32( state, state+ 32, state+ 64, state+ 96, state+128,
state+160, state+192, state+224, vhash, 256 );
}
int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[20*8] __attribute__ ((aligned (64)));
uint32_t hash[8*8] __attribute__ ((aligned (32)));
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t HTarget = ptarget[7];
uint32_t n = first_nonce;
__m256i *noncev = (__m256i*)vdata + 19; // aligned
int thr_id = mythr->id; // thr_id arg is deprecated
if ( opt_benchmark )
HTarget = 0x7f;
mm256_bswap32_intrlv80_8x32( vdata, pdata );
blake256r8_8way_init( &blakecoin_8w_ctx );
blake256r8_8way( &blakecoin_8w_ctx, vdata, 64 );
do {
*noncev = mm256_bswap_32( _mm256_set_epi32( n+7, n+6, n+5, n+4,
n+3, n+2, n+1, n ) );
pdata[19] = n;
blakecoin_8way_hash( hash, vdata );
for ( int i = 0; i < 8; i++ )
if ( (hash+(i<<3))[7] <= HTarget && fulltest( hash+(i<<3), ptarget )
&& !opt_benchmark )
{
pdata[19] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 8;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

View File

@@ -1,20 +1,13 @@
#include "blakecoin-gate.h"
#include <memory.h>
// changed to get_max64_0x3fffffLL in cpuminer-multi-decred
int64_t blakecoin_get_max64 ()
{
return 0x7ffffLL;
// return 0x3fffffLL;
}
// vanilla uses default gen merkle root, otherwise identical to blakecoin
bool register_vanilla_algo( algo_gate_t* gate )
{
#if defined(BLAKECOIN_8WAY)
#if defined(BLAKECOIN_16WAY)
gate->scanhash = (void*)&scanhash_blakecoin_16way;
#elif defined(BLAKECOIN_8WAY)
gate->scanhash = (void*)&scanhash_blakecoin_8way;
gate->hash = (void*)&blakecoin_8way_hash;
#elif defined(BLAKECOIN_4WAY)
gate->scanhash = (void*)&scanhash_blakecoin_4way;
gate->hash = (void*)&blakecoin_4way_hash;
@@ -22,15 +15,14 @@ bool register_vanilla_algo( algo_gate_t* gate )
gate->scanhash = (void*)&scanhash_blakecoin;
gate->hash = (void*)&blakecoinhash;
#endif
gate->optimizations = SSE42_OPT | AVX2_OPT;
gate->get_max64 = (void*)&blakecoin_get_max64;
gate->optimizations = SSE2_OPT | AVX2_OPT | AVX512_OPT;
return true;
}
bool register_blakecoin_algo( algo_gate_t* gate )
{
register_vanilla_algo( gate );
gate->gen_merkle_root = (void*)&SHA256_gen_merkle_root;
gate->gen_merkle_root = (void*)&sha256_gen_merkle_root;
return true;
}

View File

@@ -1,30 +1,36 @@
#ifndef __BLAKECOIN_GATE_H__
#define __BLAKECOIN_GATE_H__ 1
#ifndef BLAKECOIN_GATE_H__
#define BLAKECOIN_GATE_H__ 1
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__SSE4_2__)
#if defined(SIMD512)
#define BLAKECOIN_16WAY
#elif defined(__AVX2__)
#define BLAKECOIN_8WAY
#elif defined(__SSE2__) // always true
#define BLAKECOIN_4WAY
#endif
#if defined(__AVX2__)
#define BLAKECOIN_8WAY
#endif
#if defined (BLAKECOIN_8WAY)
void blakecoin_8way_hash(void *state, const void *input);
#if defined (BLAKECOIN_16WAY)
int scanhash_blakecoin_16way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#elif defined (BLAKECOIN_8WAY)
//void blakecoin_8way_hash(void *state, const void *input);
int scanhash_blakecoin_8way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#if defined (BLAKECOIN_4WAY)
#elif defined (BLAKECOIN_4WAY)
void blakecoin_4way_hash(void *state, const void *input);
int scanhash_blakecoin_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#else // never used
void blakecoinhash( void *state, const void *input );
int scanhash_blakecoin( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
#endif

View File

@@ -1,4 +1,7 @@
#include "blakecoin-gate.h"
#if !defined(BLAKECOIN_16WAY) && !defined(BLAKECOIN_8WAY) && !defined(BLAKECOIN_4WAY)
#define BLAKE32_ROUNDS 8
#include "sph_blake.h"
@@ -9,7 +12,6 @@ void blakecoin_close(void *cc, void *dst);
#include <string.h>
#include <stdint.h>
#include <memory.h>
#include <openssl/sha.h>
// context management is staged for efficiency.
// 1. global initial ctx cached on startup
@@ -32,8 +34,8 @@ void blakecoinhash( void *state, const void *input )
uint8_t hash[64] __attribute__ ((aligned (32)));
uint8_t *ending = (uint8_t*) input + 64;
// copy cached midstate
memcpy( &ctx, &blake_mid_ctx, sizeof ctx );
// copy cached midstate
memcpy( &ctx, &blake_mid_ctx, sizeof ctx );
blakecoin( &ctx, ending, 16 );
blakecoin_close( &ctx, hash );
memcpy( state, hash, 32 );
@@ -42,8 +44,8 @@ void blakecoinhash( void *state, const void *input )
int scanhash_blakecoin( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
uint32_t HTarget = ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
@@ -57,10 +59,10 @@ int scanhash_blakecoin( struct work *work, uint32_t max_nonce,
HTarget = 0x7f;
// we need big endian data...
for (int kk=0; kk < 19; kk++)
be32enc(&endiandata[kk], ((uint32_t*)pdata)[kk]);
for (int kk=0; kk < 19; kk++)
be32enc(&endiandata[kk], ((uint32_t*)pdata)[kk]);
blake_midstate_init( endiandata );
blake_midstate_init( endiandata );
#ifdef DEBUG_ALGO
applog(LOG_DEBUG,"[%d] Target=%08x %08x", thr_id, ptarget[6], ptarget[7]);
@@ -93,33 +95,4 @@ int scanhash_blakecoin( struct work *work, uint32_t max_nonce,
return 0;
}
/*
void blakecoin_gen_merkle_root ( char* merkle_root, struct stratum_ctx* sctx )
{
SHA256( sctx->job.coinbase, (int)sctx->job.coinbase_size, merkle_root );
}
*/
/*
// changed to get_max64_0x3fffffLL in cpuminer-multi-decred
int64_t blakecoin_get_max64 ()
{
return 0x7ffffLL;
}
// vanilla uses default gen merkle root, otherwise identical to blakecoin
bool register_vanilla_algo( algo_gate_t* gate )
{
gate->scanhash = (void*)&scanhash_blakecoin;
gate->hash = (void*)&blakecoinhash;
gate->get_max64 = (void*)&blakecoin_get_max64;
blakecoin_init( &blake_init_ctx );
return true;
}
bool register_blakecoin_algo( algo_gate_t* gate )
{
register_vanilla_algo( gate );
gate->gen_merkle_root = (void*)&SHA256_gen_merkle_root;
return true;
}
*/
#endif

View File

@@ -1,74 +0,0 @@
#include "decred-gate.h"
#include "blake-hash-4way.h"
#include <string.h>
#include <stdint.h>
#include <memory.h>
#include <unistd.h>
#if defined (DECRED_4WAY)
static __thread blake256_4way_context blake_mid;
void decred_hash_4way( void *state, const void *input )
{
uint32_t vhash[8*4] __attribute__ ((aligned (64)));
// uint32_t hash0[8] __attribute__ ((aligned (32)));
// uint32_t hash1[8] __attribute__ ((aligned (32)));
// uint32_t hash2[8] __attribute__ ((aligned (32)));
// uint32_t hash3[8] __attribute__ ((aligned (32)));
const void *tail = input + ( DECRED_MIDSTATE_LEN << 2 );
int tail_len = 180 - DECRED_MIDSTATE_LEN;
blake256_4way_context ctx __attribute__ ((aligned (64)));
memcpy( &ctx, &blake_mid, sizeof(blake_mid) );
blake256_4way( &ctx, tail, tail_len );
blake256_4way_close( &ctx, vhash );
dintrlv_4x32( state, state+32, state+64, state+96, vhash, 256 );
}
int scanhash_decred_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t vdata[48*4] __attribute__ ((aligned (64)));
uint32_t hash[8*4] __attribute__ ((aligned (32)));
uint32_t _ALIGN(64) edata[48];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[DECRED_NONCE_INDEX];
uint32_t n = first_nonce;
const uint32_t HTarget = opt_benchmark ? 0x7f : ptarget[7];
int thr_id = mythr->id; // thr_id arg is deprecated
// copy to buffer guaranteed to be aligned.
memcpy( edata, pdata, 180 );
// use the old way until new way updated for size.
mm128_intrlv_4x32x( vdata, edata, edata, edata, edata, 180*8 );
blake256_4way_init( &blake_mid );
blake256_4way( &blake_mid, vdata, DECRED_MIDSTATE_LEN );
uint32_t *noncep = vdata + DECRED_NONCE_INDEX * 4;
do {
* noncep = n;
*(noncep+1) = n+1;
*(noncep+2) = n+2;
*(noncep+3) = n+3;
decred_hash_4way( hash, vdata );
for ( int i = 0; i < 4; i++ )
if ( (hash+(i<<3))[7] <= HTarget )
if ( fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[DECRED_NONCE_INDEX] = n+i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
}
n += 4;
} while ( (n < max_nonce) && !work_restart[thr_id].restart );
*hashes_done = n - first_nonce + 1;
return 0;
}
#endif

View File

@@ -1,173 +0,0 @@
#include "decred-gate.h"
#include <unistd.h>
#include <memory.h>
#include <string.h>
uint32_t *decred_get_nonceptr( uint32_t *work_data )
{
return &work_data[ DECRED_NONCE_INDEX ];
}
double decred_calc_network_diff( struct work* work )
{
// sample for diff 43.281 : 1c05ea29
// todo: endian reversed on longpoll could be zr5 specific...
uint32_t nbits = work->data[ DECRED_NBITS_INDEX ];
uint32_t bits = ( nbits & 0xffffff );
int16_t shift = ( swab32(nbits) & 0xff ); // 0x1c = 28
int m;
double d = (double)0x0000ffff / (double)bits;
for ( m = shift; m < 29; m++ )
d *= 256.0;
for ( m = 29; m < shift; m++ )
d /= 256.0;
if ( shift == 28 )
d *= 256.0; // testnet
if ( opt_debug_diff )
applog( LOG_DEBUG, "net diff: %f -> shift %u, bits %08x", d,
shift, bits );
return net_diff;
}
void decred_decode_extradata( struct work* work, uint64_t* net_blocks )
{
// some random extradata to make the work unique
work->data[ DECRED_XNONCE_INDEX ] = (rand()*4);
work->height = work->data[32];
if (!have_longpoll && work->height > *net_blocks + 1)
{
char netinfo[64] = { 0 };
if (opt_showdiff && net_diff > 0.)
{
if (net_diff != work->targetdiff)
sprintf(netinfo, ", diff %.3f, target %.1f", net_diff,
work->targetdiff);
else
sprintf(netinfo, ", diff %.3f", net_diff);
}
applog(LOG_BLUE, "%s block %d%s", algo_names[opt_algo], work->height,
netinfo);
*net_blocks = work->height - 1;
}
}
void decred_be_build_stratum_request( char *req, struct work *work,
struct stratum_ctx *sctx )
{
unsigned char *xnonce2str;
uint32_t ntime, nonce;
char ntimestr[9], noncestr[9];
be32enc( &ntime, work->data[ DECRED_NTIME_INDEX ] );
be32enc( &nonce, work->data[ DECRED_NONCE_INDEX ] );
bin2hex( ntimestr, (char*)(&ntime), sizeof(uint32_t) );
bin2hex( noncestr, (char*)(&nonce), sizeof(uint32_t) );
xnonce2str = abin2hex( (char*)( &work->data[ DECRED_XNONCE_INDEX ] ),
sctx->xnonce1_size );
snprintf( req, JSON_BUF_LEN,
"{\"method\": \"mining.submit\", \"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\":4}",
rpc_user, work->job_id, xnonce2str, ntimestr, noncestr );
free(xnonce2str);
}
#define min(a,b) (a>b ? (b) :(a))
void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
uchar merkle_root[64] = { 0 };
uint32_t extraheader[32] = { 0 };
int headersize = 0;
uint32_t* extradata = (uint32_t*) sctx->xnonce1;
size_t t;
int i;
// getwork over stratum, getwork merkle + header passed in coinb1
memcpy(merkle_root, sctx->job.coinbase, 32);
headersize = min((int)sctx->job.coinbase_size - 32,
sizeof(extraheader) );
memcpy( extraheader, &sctx->job.coinbase[32], headersize );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );
for ( i = 0; i < 8; i++ )
g_work->data[1 + i] = swab32(
le32dec( (uint32_t *) sctx->job.prevhash + i ) );
for ( i = 0; i < 8; i++ )
g_work->data[9 + i] = swab32( be32dec( (uint32_t *) merkle_root + i ) );
// for ( i = 0; i < 8; i++ ) // prevhash
// g_work->data[1 + i] = swab32( g_work->data[1 + i] );
// for ( i = 0; i < 8; i++ ) // merkle
// g_work->data[9 + i] = swab32( g_work->data[9 + i] );
for ( i = 0; i < headersize/4; i++ ) // header
g_work->data[17 + i] = extraheader[i];
// extradata
for ( i = 0; i < sctx->xnonce1_size/4; i++ )
g_work->data[ DECRED_XNONCE_INDEX + i ] = extradata[i];
for ( i = DECRED_XNONCE_INDEX + sctx->xnonce1_size/4; i < 45; i++ )
g_work->data[i] = 0;
g_work->data[37] = (rand()*4) << 8;
// block header suffix from coinb2 (stake version)
memcpy( &g_work->data[44],
&sctx->job.coinbase[ sctx->job.coinbase_size-4 ], 4 );
sctx->bloc_height = g_work->data[32];
//applog_hex(work->data, 180);
//applog_hex(&work->data[36], 36);
}
#undef min
bool decred_ready_to_mine( struct work* work, struct stratum_ctx* stratum,
int thr_id )
{
if ( have_stratum && strcmp(stratum->job.job_id, work->job_id) )
// need to regen g_work..
return false;
if ( have_stratum && !work->data[0] && !opt_benchmark )
{
sleep(1);
return false;
}
// extradata: prevent duplicates
work->data[ DECRED_XNONCE_INDEX ] += 1;
work->data[ DECRED_XNONCE_INDEX + 1 ] |= thr_id;
return true;
}
int decred_get_work_data_size() { return DECRED_DATA_SIZE; }
bool register_decred_algo( algo_gate_t* gate )
{
#if defined(DECRED_4WAY)
four_way_not_tested();
gate->scanhash = (void*)&scanhash_decred_4way;
gate->hash = (void*)&decred_hash_4way;
#else
gate->scanhash = (void*)&scanhash_decred;
gate->hash = (void*)&decred_hash;
#endif
gate->optimizations = AVX2_OPT;
gate->get_nonceptr = (void*)&decred_get_nonceptr;
gate->get_max64 = (void*)&get_max64_0x3fffffLL;
gate->decode_extra_data = (void*)&decred_decode_extradata;
gate->build_stratum_request = (void*)&decred_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
gate->build_extraheader = (void*)&decred_build_extraheader;
gate->ready_to_mine = (void*)&decred_ready_to_mine;
gate->nbits_index = DECRED_NBITS_INDEX;
gate->ntime_index = DECRED_NTIME_INDEX;
gate->nonce_index = DECRED_NONCE_INDEX;
gate->get_work_data_size = (void*)&decred_get_work_data_size;
gate->work_cmp_size = DECRED_WORK_COMPARE_SIZE;
allow_mininginfo = false;
have_gbt = false;
return true;
}

View File

@@ -1,36 +0,0 @@
#ifndef __DECRED_GATE_H__
#define __DECRED_GATE_H__
#include "algo-gate-api.h"
#include <stdint.h>
#define DECRED_NBITS_INDEX 29
#define DECRED_NTIME_INDEX 34
#define DECRED_NONCE_INDEX 35
#define DECRED_XNONCE_INDEX 36
#define DECRED_DATA_SIZE 192
#define DECRED_WORK_COMPARE_SIZE 140
#define DECRED_MIDSTATE_LEN 128
#if defined (__AVX2__)
//void blakehash_84way(void *state, const void *input);
//int scanhash_blake_8way( struct work *work, uint32_t max_nonce,
// uint64_t *hashes_done );
#endif
#if defined(__SSE4_2__)
#define DECRED_4WAY
#endif
#if defined (DECRED_4WAY)
void decred_hash_4way(void *state, const void *input);
int scanhash_decred_4way( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif
void decred_hash( void *state, const void *input );
int scanhash_decred( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr );
#endif

View File

@@ -1,288 +0,0 @@
#include "decred-gate.h"
#include "sph_blake.h"
#include <string.h>
#include <stdint.h>
#include <memory.h>
#include <unistd.h>
/*
#ifndef min
#define min(a,b) (a>b ? b : a)
#endif
#ifndef max
#define max(a,b) (a<b ? b : a)
#endif
*/
/*
#define DECRED_NBITS_INDEX 29
#define DECRED_NTIME_INDEX 34
#define DECRED_NONCE_INDEX 35
#define DECRED_XNONCE_INDEX 36
#define DECRED_DATA_SIZE 192
#define DECRED_WORK_COMPARE_SIZE 140
*/
static __thread sph_blake256_context blake_mid;
static __thread bool ctx_midstate_done = false;
void decred_hash(void *state, const void *input)
{
// #define MIDSTATE_LEN 128
sph_blake256_context ctx __attribute__ ((aligned (64)));
uint8_t *ending = (uint8_t*) input;
ending += DECRED_MIDSTATE_LEN;
if (!ctx_midstate_done) {
sph_blake256_init(&blake_mid);
sph_blake256(&blake_mid, input, DECRED_MIDSTATE_LEN);
ctx_midstate_done = true;
}
memcpy(&ctx, &blake_mid, sizeof(blake_mid));
sph_blake256(&ctx, ending, (180 - DECRED_MIDSTATE_LEN));
sph_blake256_close(&ctx, state);
}
void decred_hash_simple(void *state, const void *input)
{
sph_blake256_context ctx;
sph_blake256_init(&ctx);
sph_blake256(&ctx, input, 180);
sph_blake256_close(&ctx, state);
}
int scanhash_decred( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) endiandata[48];
uint32_t _ALIGN(64) hash32[8];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
int thr_id = mythr->id; // thr_id arg is deprecated
// #define DCR_NONCE_OFT32 35
const uint32_t first_nonce = pdata[DECRED_NONCE_INDEX];
const uint32_t HTarget = opt_benchmark ? 0x7f : ptarget[7];
uint32_t n = first_nonce;
ctx_midstate_done = false;
#if 1
memcpy(endiandata, pdata, 180);
#else
for (int k=0; k < (180/4); k++)
be32enc(&endiandata[k], pdata[k]);
#endif
#ifdef DEBUG_ALGO
if (!thr_id) applog(LOG_DEBUG,"[%d] Target=%08x %08x", thr_id, ptarget[6], ptarget[7]);
#endif
do {
//be32enc(&endiandata[DCR_NONCE_OFT32], n);
endiandata[DECRED_NONCE_INDEX] = n;
decred_hash(hash32, endiandata);
if (hash32[7] <= HTarget && fulltest(hash32, ptarget)) {
work_set_target_ratio(work, hash32);
*hashes_done = n - first_nonce + 1;
#ifdef DEBUG_ALGO
applog(LOG_BLUE, "Nonce : %08x %08x", n, swab32(n));
applog_hash(ptarget);
applog_compare_hash(hash32, ptarget);
#endif
pdata[DECRED_NONCE_INDEX] = n;
return 1;
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[DECRED_NONCE_INDEX] = n;
return 0;
}
/*
uint32_t *decred_get_nonceptr( uint32_t *work_data )
{
return &work_data[ DECRED_NONCE_INDEX ];
}
double decred_calc_network_diff( struct work* work )
{
// sample for diff 43.281 : 1c05ea29
// todo: endian reversed on longpoll could be zr5 specific...
uint32_t nbits = work->data[ DECRED_NBITS_INDEX ];
uint32_t bits = ( nbits & 0xffffff );
int16_t shift = ( swab32(nbits) & 0xff ); // 0x1c = 28
int m;
double d = (double)0x0000ffff / (double)bits;
for ( m = shift; m < 29; m++ )
d *= 256.0;
for ( m = 29; m < shift; m++ )
d /= 256.0;
if ( shift == 28 )
d *= 256.0; // testnet
if ( opt_debug_diff )
applog( LOG_DEBUG, "net diff: %f -> shift %u, bits %08x", d,
shift, bits );
return net_diff;
}
void decred_decode_extradata( struct work* work, uint64_t* net_blocks )
{
// some random extradata to make the work unique
work->data[ DECRED_XNONCE_INDEX ] = (rand()*4);
work->height = work->data[32];
if (!have_longpoll && work->height > *net_blocks + 1)
{
char netinfo[64] = { 0 };
if (opt_showdiff && net_diff > 0.)
{
if (net_diff != work->targetdiff)
sprintf(netinfo, ", diff %.3f, target %.1f", net_diff,
work->targetdiff);
else
sprintf(netinfo, ", diff %.3f", net_diff);
}
applog(LOG_BLUE, "%s block %d%s", algo_names[opt_algo], work->height,
netinfo);
*net_blocks = work->height - 1;
}
}
void decred_be_build_stratum_request( char *req, struct work *work,
struct stratum_ctx *sctx )
{
unsigned char *xnonce2str;
uint32_t ntime, nonce;
char ntimestr[9], noncestr[9];
be32enc( &ntime, work->data[ DECRED_NTIME_INDEX ] );
be32enc( &nonce, work->data[ DECRED_NONCE_INDEX ] );
bin2hex( ntimestr, (char*)(&ntime), sizeof(uint32_t) );
bin2hex( noncestr, (char*)(&nonce), sizeof(uint32_t) );
xnonce2str = abin2hex( (char*)( &work->data[ DECRED_XNONCE_INDEX ] ),
sctx->xnonce1_size );
snprintf( req, JSON_BUF_LEN,
"{\"method\": \"mining.submit\", \"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\":4}",
rpc_user, work->job_id, xnonce2str, ntimestr, noncestr );
free(xnonce2str);
}
*/
/*
// data shared between gen_merkle_root and build_extraheader.
__thread uint32_t decred_extraheader[32] = { 0 };
__thread int decred_headersize = 0;
void decred_gen_merkle_root( char* merkle_root, struct stratum_ctx* sctx )
{
// getwork over stratum, getwork merkle + header passed in coinb1
memcpy(merkle_root, sctx->job.coinbase, 32);
decred_headersize = min((int)sctx->job.coinbase_size - 32,
sizeof(decred_extraheader) );
memcpy( decred_extraheader, &sctx->job.coinbase[32], decred_headersize);
}
*/
/*
#define min(a,b) (a>b ? (b) :(a))
void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
{
uchar merkle_root[64] = { 0 };
uint32_t extraheader[32] = { 0 };
int headersize = 0;
uint32_t* extradata = (uint32_t*) sctx->xnonce1;
size_t t;
int i;
// getwork over stratum, getwork merkle + header passed in coinb1
memcpy(merkle_root, sctx->job.coinbase, 32);
headersize = min((int)sctx->job.coinbase_size - 32,
sizeof(extraheader) );
memcpy( extraheader, &sctx->job.coinbase[32], headersize );
// Increment extranonce2
for ( t = 0; t < sctx->xnonce2_size && !( ++sctx->job.xnonce2[t] ); t++ );
// Assemble block header
memset( g_work->data, 0, sizeof(g_work->data) );
g_work->data[0] = le32dec( sctx->job.version );
for ( i = 0; i < 8; i++ )
g_work->data[1 + i] = swab32(
le32dec( (uint32_t *) sctx->job.prevhash + i ) );
for ( i = 0; i < 8; i++ )
g_work->data[9 + i] = swab32( be32dec( (uint32_t *) merkle_root + i ) );
// for ( i = 0; i < 8; i++ ) // prevhash
// g_work->data[1 + i] = swab32( g_work->data[1 + i] );
// for ( i = 0; i < 8; i++ ) // merkle
// g_work->data[9 + i] = swab32( g_work->data[9 + i] );
for ( i = 0; i < headersize/4; i++ ) // header
g_work->data[17 + i] = extraheader[i];
// extradata
for ( i = 0; i < sctx->xnonce1_size/4; i++ )
g_work->data[ DECRED_XNONCE_INDEX + i ] = extradata[i];
for ( i = DECRED_XNONCE_INDEX + sctx->xnonce1_size/4; i < 45; i++ )
g_work->data[i] = 0;
g_work->data[37] = (rand()*4) << 8;
// block header suffix from coinb2 (stake version)
memcpy( &g_work->data[44],
&sctx->job.coinbase[ sctx->job.coinbase_size-4 ], 4 );
sctx->bloc_height = g_work->data[32];
//applog_hex(work->data, 180);
//applog_hex(&work->data[36], 36);
}
#undef min
bool decred_ready_to_mine( struct work* work, struct stratum_ctx* stratum,
int thr_id )
{
if ( have_stratum && strcmp(stratum->job.job_id, work->job_id) )
// need to regen g_work..
return false;
if ( have_stratum && !work->data[0] && !opt_benchmark )
{
sleep(1);
return false;
}
// extradata: prevent duplicates
work->data[ DECRED_XNONCE_INDEX ] += 1;
work->data[ DECRED_XNONCE_INDEX + 1 ] |= thr_id;
return true;
}
bool register_decred_algo( algo_gate_t* gate )
{
gate->optimizations = SSE2_OPT;
gate->scanhash = (void*)&scanhash_decred;
gate->hash = (void*)&decred_hash;
gate->get_nonceptr = (void*)&decred_get_nonceptr;
gate->get_max64 = (void*)&get_max64_0x3fffffLL;
gate->decode_extra_data = (void*)&decred_decode_extradata;
gate->build_stratum_request = (void*)&decred_be_build_stratum_request;
gate->work_decode = (void*)&std_be_work_decode;
gate->submit_getwork_result = (void*)&std_be_submit_getwork_result;
gate->build_extraheader = (void*)&decred_build_extraheader;
gate->ready_to_mine = (void*)&decred_ready_to_mine;
gate->nbits_index = DECRED_NBITS_INDEX;
gate->ntime_index = DECRED_NTIME_INDEX;
gate->nonce_index = DECRED_NONCE_INDEX;
gate->work_data_size = DECRED_DATA_SIZE;
gate->work_cmp_size = DECRED_WORK_COMPARE_SIZE;
allow_mininginfo = false;
have_gbt = false;
return true;
}
*/

View File

@@ -1,14 +1,12 @@
#include "pentablake-gate.h"
#if defined (__AVX2__)
#if defined(PENTABLAKE_4WAY)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "blake-hash-4way.h"
#include "sph_blake.h"
#include "blake512-hash.h"
extern void pentablakehash_4way( void *output, const void *input )
{
@@ -22,23 +20,23 @@ extern void pentablakehash_4way( void *output, const void *input )
blake512_4way_init( &ctx );
blake512_4way( &ctx, input, 80 );
blake512_4way_update( &ctx, input, 80 );
blake512_4way_close( &ctx, vhash );
blake512_4way_init( &ctx );
blake512_4way( &ctx, vhash, 64 );
blake512_4way_update( &ctx, vhash, 64 );
blake512_4way_close( &ctx, vhash );
blake512_4way_init( &ctx );
blake512_4way( &ctx, vhash, 64 );
blake512_4way_update( &ctx, vhash, 64 );
blake512_4way_close( &ctx, vhash );
blake512_4way_init( &ctx );
blake512_4way( &ctx, vhash, 64 );
blake512_4way_update( &ctx, vhash, 64 );
blake512_4way_close( &ctx, vhash );
blake512_4way_init( &ctx );
blake512_4way( &ctx, vhash, 64 );
blake512_4way_update( &ctx, vhash, 64 );
blake512_4way_close( &ctx, vhash );
memcpy( output, hash0, 32 );
@@ -105,7 +103,7 @@ int scanhash_pentablake_4way( struct work *work,
&& fulltest( hash+(i<<3), ptarget ) && !opt_benchmark )
{
pdata[19] = n + i;
submit_lane_solution( work, hash+(i<<3), mythr, i );
submit_solution( work, hash+(i<<3), mythr );
}
n += 4;

View File

@@ -10,7 +10,6 @@ bool register_pentablake_algo( algo_gate_t* gate )
gate->hash = (void*)&pentablakehash;
#endif
gate->optimizations = AVX2_OPT;
gate->get_max64 = (void*)&get_max64_0x3ffff;
return true;
};

View File

@@ -4,9 +4,10 @@
#include "algo-gate-api.h"
#include <stdint.h>
#if defined(__AVX2__)
#define PENTABLAKE_4WAY
#endif
// 4way is broken
//#if defined(__AVX2__)
// #define PENTABLAKE_4WAY
//#endif
#if defined(PENTABLAKE_4WAY)
void pentablakehash_4way( void *state, const void *input );

View File

@@ -1,4 +1,7 @@
#include "pentablake-gate.h"
#if !defined(PENTABLAKE_8WAY) && !defined(PENTABLAKE_4WAY)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
@@ -111,3 +114,4 @@ int scanhash_pentablake( struct work *work, uint32_t max_nonce,
return 0;
}
#endif

View File

@@ -14,8 +14,9 @@
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "algo/sha/sph_types.h"
#include "simd-utils.h"
#include "compat/sph_types.h"
#include "compat.h"
#include "sph-blake2s.h"
static const uint32_t blake2s_IV[8] =
@@ -117,15 +118,15 @@ static inline int blake2s_param_set_inner_length( blake2s_param *P, const uint8_
return 0;
}
static inline int blake2s_param_set_salt( blake2s_param *P, const uint8_t salt[BLAKE2S_SALTBYTES] )
static inline int blake2s_param_set_salt( blake2s_param *P, const uint8_t salt[8] )
{
memcpy( P->salt, salt, BLAKE2S_SALTBYTES );
memcpy( P->salt, salt, 8 );
return 0;
}
static inline int blake2s_param_set_personal( blake2s_param *P, const uint8_t personal[BLAKE2S_PERSONALBYTES] )
static inline int blake2s_param_set_personal( blake2s_param *P, const uint8_t personal[8] )
{
memcpy( P->personal, personal, BLAKE2S_PERSONALBYTES );
memcpy( P->personal, personal, 8 );
return 0;
}
@@ -158,7 +159,7 @@ int blake2s_init( blake2s_state *S, const uint8_t outlen )
blake2s_param P[1];
/* Move interval verification here? */
if ( ( !outlen ) || ( outlen > BLAKE2S_OUTBYTES ) ) return -1;
if ( ( !outlen ) || ( outlen > 32 ) ) return -1;
P->digest_length = outlen;
P->key_length = 0;
@@ -178,9 +179,9 @@ int blake2s_init_key( blake2s_state *S, const uint8_t outlen, const void *key, c
{
blake2s_param P[1];
if ( ( !outlen ) || ( outlen > BLAKE2S_OUTBYTES ) ) return -1;
if ( ( !outlen ) || ( outlen > 32 ) ) return -1;
if ( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
if ( !key || !keylen || keylen > 8 ) return -1;
P->digest_length = outlen;
P->key_length = keylen;
@@ -197,19 +198,19 @@ int blake2s_init_key( blake2s_state *S, const uint8_t outlen, const void *key, c
if( blake2s_init_param( S, P ) < 0 ) return -1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
uint8_t block[64];
memset( block, 0, 64 );
memcpy( block, key, keylen );
blake2s_update( S, block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
blake2s_update( S, block, 64 );
secure_zero_memory( block, 64 ); /* Burn the key from stack */
}
return 0;
}
int blake2s_compress( blake2s_state *S, const uint8_t block[BLAKE2S_BLOCKBYTES] )
int blake2s_compress( blake2s_state *S, const uint8_t block[64] )
{
uint32_t m[16];
uint32_t v[16];
uint32_t _ALIGN(32) m[16];
uint32_t _ALIGN(32) v[16];
for( size_t i = 0; i < 16; ++i )
m[i] = load32( block + i * sizeof( m[i] ) );
@@ -225,6 +226,58 @@ int blake2s_compress( blake2s_state *S, const uint8_t block[BLAKE2S_BLOCKBYTES]
v[13] = S->t[1] ^ blake2s_IV[5];
v[14] = S->f[0] ^ blake2s_IV[6];
v[15] = S->f[1] ^ blake2s_IV[7];
#if defined(__SSE2__)
v128_t *V = (v128_t*)v;
#define BLAKE2S_ROUND( r ) \
V[0] = v128_add32( V[0], v128_add32( V[1], v128_set32( \
m[blake2s_sigma[r][ 6]], m[blake2s_sigma[r][ 4]], \
m[blake2s_sigma[r][ 2]], m[blake2s_sigma[r][ 0]] ) ) ); \
V[3] = v128_ror32( v128_xor( V[3], V[0] ), 16 ); \
V[2] = v128_add32( V[2], V[3] ); \
V[1] = v128_ror32( v128_xor( V[1], V[2] ), 12 ); \
V[0] = v128_add32( V[0], v128_add32( V[1], v128_set32( \
m[blake2s_sigma[r][ 7]], m[blake2s_sigma[r][ 5]], \
m[blake2s_sigma[r][ 3]], m[blake2s_sigma[r][ 1]] ) ) ); \
V[3] = v128_ror32( v128_xor( V[3], V[0] ), 8 ); \
V[2] = v128_add32( V[2], V[3] ); \
V[1] = v128_ror32( v128_xor( V[1], V[2] ), 7 ); \
V[0] = v128_shufll32( V[0] ); \
V[3] = v128_swap64( V[3] ); \
V[2] = v128_shuflr32( V[2] ); \
V[0] = v128_add32( V[0], v128_add32( V[1], v128_set32( \
m[blake2s_sigma[r][12]], m[blake2s_sigma[r][10]], \
m[blake2s_sigma[r][ 8]], m[blake2s_sigma[r][14]] ) ) ); \
V[3] = v128_ror32( v128_xor( V[3], V[0] ), 16 ); \
V[2] = v128_add32( V[2], V[3] ); \
V[1] = v128_ror32( v128_xor( V[1], V[2] ), 12 ); \
V[0] = v128_add32( V[0], v128_add32( V[1], v128_set32( \
m[blake2s_sigma[r][13]], m[blake2s_sigma[r][11]], \
m[blake2s_sigma[r][ 9]], m[blake2s_sigma[r][15]] ) ) ); \
V[3] = v128_ror32( v128_xor( V[3], V[0] ), 8 ); \
V[2] = v128_add32( V[2], V[3] ); \
V[1] = v128_ror32( v128_xor( V[1], V[2] ), 7 ); \
V[0] = v128_shuflr32( V[0] ); \
V[3] = v128_swap64( V[3] ); \
V[2] = v128_shufll32( V[2] )
BLAKE2S_ROUND(0);
BLAKE2S_ROUND(1);
BLAKE2S_ROUND(2);
BLAKE2S_ROUND(3);
BLAKE2S_ROUND(4);
BLAKE2S_ROUND(5);
BLAKE2S_ROUND(6);
BLAKE2S_ROUND(7);
BLAKE2S_ROUND(8);
BLAKE2S_ROUND(9);
#undef BLAKE2S_ROUND
#else
#define G(r,i,a,b,c,d) \
do { \
a = a + b + m[blake2s_sigma[r][2*i+0]]; \
@@ -236,6 +289,7 @@ int blake2s_compress( blake2s_state *S, const uint8_t block[BLAKE2S_BLOCKBYTES]
c = c + d; \
b = SPH_ROTR32(b ^ c, 7); \
} while(0)
#define ROUND(r) \
do { \
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
@@ -247,7 +301,8 @@ int blake2s_compress( blake2s_state *S, const uint8_t block[BLAKE2S_BLOCKBYTES]
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
} while(0)
ROUND( 0 );
ROUND( 0 );
ROUND( 1 );
ROUND( 2 );
ROUND( 3 );
@@ -258,6 +313,8 @@ int blake2s_compress( blake2s_state *S, const uint8_t block[BLAKE2S_BLOCKBYTES]
ROUND( 8 );
ROUND( 9 );
#endif
for( size_t i = 0; i < 8; ++i )
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
@@ -272,16 +329,16 @@ int blake2s_update( blake2s_state *S, const uint8_t *in, uint64_t inlen )
while( inlen > 0 )
{
size_t left = S->buflen;
size_t fill = 2 * BLAKE2S_BLOCKBYTES - left;
size_t fill = 2 * 64 - left;
if( inlen > fill )
{
memcpy( S->buf + left, in, fill ); // Fill buffer
S->buflen += fill;
blake2s_increment_counter( S, BLAKE2S_BLOCKBYTES );
blake2s_increment_counter( S, 64 );
blake2s_compress( S, S->buf ); // Compress
memcpy( S->buf, S->buf + BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); // Shift buffer left
S->buflen -= BLAKE2S_BLOCKBYTES;
memcpy( S->buf, S->buf + 64, 64 ); // Shift buffer left
S->buflen -= 64;
in += fill;
inlen -= fill;
}
@@ -299,19 +356,19 @@ int blake2s_update( blake2s_state *S, const uint8_t *in, uint64_t inlen )
int blake2s_final( blake2s_state *S, uint8_t *out, uint8_t outlen )
{
uint8_t buffer[BLAKE2S_OUTBYTES];
uint8_t buffer[32];
if( S->buflen > BLAKE2S_BLOCKBYTES )
if( S->buflen > 64 )
{
blake2s_increment_counter( S, BLAKE2S_BLOCKBYTES );
blake2s_increment_counter( S, 64 );
blake2s_compress( S, S->buf );
S->buflen -= BLAKE2S_BLOCKBYTES;
memcpy( S->buf, S->buf + BLAKE2S_BLOCKBYTES, S->buflen );
S->buflen -= 64;
memcpy( S->buf, S->buf + 64, S->buflen );
}
blake2s_increment_counter( S, ( uint32_t )S->buflen );
blake2s_set_lastblock( S );
memset( S->buf + S->buflen, 0, 2 * BLAKE2S_BLOCKBYTES - S->buflen ); /* Padding */
memset( S->buf + S->buflen, 0, 2 * 64 - S->buflen ); /* Padding */
blake2s_compress( S, S->buf );
for( int i = 0; i < 8; ++i ) /* Output full hash to temp buffer */
@@ -323,7 +380,7 @@ int blake2s_final( blake2s_state *S, uint8_t *out, uint8_t outlen )
int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen, const uint64_t inlen, uint8_t keylen )
{
blake2s_state S[1];
blake2s_state S;
/* Verify parameters */
if ( NULL == in ) return -1;
@@ -334,15 +391,15 @@ int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen
if( keylen > 0 )
{
if( blake2s_init_key( S, outlen, key, keylen ) < 0 ) return -1;
if( blake2s_init_key( &S, outlen, key, keylen ) < 0 ) return -1;
}
else
{
if( blake2s_init( S, outlen ) < 0 ) return -1;
if( blake2s_init( &S, outlen ) < 0 ) return -1;
}
blake2s_update( S, ( uint8_t * )in, inlen );
blake2s_final( S, out, outlen );
blake2s_update( &S, ( uint8_t * )in, inlen );
blake2s_final( &S, out, outlen );
return 0;
}
@@ -351,10 +408,10 @@ int blake2s( uint8_t *out, const void *in, const void *key, const uint8_t outlen
#include "blake2-kat.h" /* test data not included */
int main( int argc, char **argv )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t key[8];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2S_KEYBYTES; ++i )
for( size_t i = 0; i < 8; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
@@ -362,10 +419,10 @@ int main( int argc, char **argv )
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2s( hash, buf, key, BLAKE2S_OUTBYTES, i, BLAKE2S_KEYBYTES );
uint8_t hash[32];
blake2s( hash, buf, key, 32, i, );
if( 0 != memcmp( hash, blake2s_keyed_kat[i], BLAKE2S_OUTBYTES ) )
if( 0 != memcmp( hash, blake2s_keyed_kat[i], 32 ) )
{
puts( "error" );
return -1;

View File

@@ -87,19 +87,6 @@ static inline void secure_zero_memory(void *v, size_t n)
/* blake2.h */
#if defined(__cplusplus)
extern "C" {
#endif
enum blake2s_constant
{
BLAKE2S_BLOCKBYTES = 64,
BLAKE2S_OUTBYTES = 32,
BLAKE2S_KEYBYTES = 32,
BLAKE2S_SALTBYTES = 8,
BLAKE2S_PERSONALBYTES = 8
};
#pragma pack(push, 1)
typedef struct __blake2s_param
{
@@ -112,22 +99,22 @@ extern "C" {
uint8_t node_depth; // 15
uint8_t inner_length; // 16
// uint8_t reserved[0];
uint8_t salt[BLAKE2S_SALTBYTES]; // 24
uint8_t personal[BLAKE2S_PERSONALBYTES]; // 32
uint8_t salt[8]; // 24
uint8_t personal[8]; // 32
} blake2s_param;
ALIGN( 64 ) typedef struct __blake2s_state
typedef struct ALIGN( 64 ) __blake2s_state
{
uint32_t h[8];
uint32_t t[2];
uint32_t f[2];
uint8_t buf[2 * BLAKE2S_BLOCKBYTES];
uint8_t buf[2 * 64];
size_t buflen;
uint8_t last_node;
} blake2s_state ;
#pragma pack(pop)
int blake2s_compress( blake2s_state *S, const uint8_t block[BLAKE2S_BLOCKBYTES] );
int blake2s_compress( blake2s_state *S, const uint8_t block[64] );
// Streaming API
int blake2s_init( blake2s_state *S, const uint8_t outlen );

View File

@@ -630,6 +630,69 @@ static const sph_u64 CB[16] = {
H7 ^= S3 ^ V7 ^ VF; \
} while (0)
#define COMPRESS32_LE do { \
sph_u32 M0, M1, M2, M3, M4, M5, M6, M7; \
sph_u32 M8, M9, MA, MB, MC, MD, ME, MF; \
sph_u32 V0, V1, V2, V3, V4, V5, V6, V7; \
sph_u32 V8, V9, VA, VB, VC, VD, VE, VF; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
V3 = H3; \
V4 = H4; \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = S0 ^ CS0; \
V9 = S1 ^ CS1; \
VA = S2 ^ CS2; \
VB = S3 ^ CS3; \
VC = T0 ^ CS4; \
VD = T0 ^ CS5; \
VE = T1 ^ CS6; \
VF = T1 ^ CS7; \
M0 = *((uint32_t*)(buf + 0)); \
M1 = *((uint32_t*)(buf + 4)); \
M2 = *((uint32_t*)(buf + 8)); \
M3 = *((uint32_t*)(buf + 12)); \
M4 = *((uint32_t*)(buf + 16)); \
M5 = *((uint32_t*)(buf + 20)); \
M6 = *((uint32_t*)(buf + 24)); \
M7 = *((uint32_t*)(buf + 28)); \
M8 = *((uint32_t*)(buf + 32)); \
M9 = *((uint32_t*)(buf + 36)); \
MA = *((uint32_t*)(buf + 40)); \
MB = *((uint32_t*)(buf + 44)); \
MC = *((uint32_t*)(buf + 48)); \
MD = *((uint32_t*)(buf + 52)); \
ME = *((uint32_t*)(buf + 56)); \
MF = *((uint32_t*)(buf + 60)); \
ROUND_S(0); \
ROUND_S(1); \
ROUND_S(2); \
ROUND_S(3); \
ROUND_S(4); \
ROUND_S(5); \
ROUND_S(6); \
ROUND_S(7); \
if (BLAKE32_ROUNDS == 14) { \
ROUND_S(8); \
ROUND_S(9); \
ROUND_S(0); \
ROUND_S(1); \
ROUND_S(2); \
ROUND_S(3); \
} \
H0 ^= S0 ^ V0 ^ V8; \
H1 ^= S1 ^ V1 ^ V9; \
H2 ^= S2 ^ V2 ^ VA; \
H3 ^= S3 ^ V3 ^ VB; \
H4 ^= S0 ^ V4 ^ VC; \
H5 ^= S1 ^ V5 ^ VD; \
H6 ^= S2 ^ V6 ^ VE; \
H7 ^= S3 ^ V7 ^ VF; \
} while (0)
#endif
#if SPH_64
@@ -843,6 +906,45 @@ blake32(sph_blake_small_context *sc, const void *data, size_t len)
sc->ptr = ptr;
}
static void
blake32_le(sph_blake_small_context *sc, const void *data, size_t len)
{
unsigned char *buf;
size_t ptr;
DECL_STATE32
buf = sc->buf;
ptr = sc->ptr;
if (len < (sizeof sc->buf) - ptr) {
memcpy(buf + ptr, data, len);
ptr += len;
sc->ptr = ptr;
return;
}
READ_STATE32(sc);
while (len > 0) {
size_t clen;
clen = (sizeof sc->buf) - ptr;
if (clen > len)
clen = len;
memcpy(buf + ptr, data, clen);
ptr += clen;
data = (const unsigned char *)data + clen;
len -= clen;
if (ptr == sizeof sc->buf) {
if ((T0 = SPH_T32(T0 + 512)) < 512)
T1 = SPH_T32(T1 + 1);
COMPRESS32_LE;
ptr = 0;
}
}
WRITE_STATE32(sc);
sc->ptr = ptr;
}
static void
blake32_close(sph_blake_small_context *sc,
unsigned ub, unsigned n, void *dst, size_t out_size_w32)
@@ -1050,6 +1152,12 @@ sph_blake256(void *cc, const void *data, size_t len)
blake32(cc, data, len);
}
void
sph_blake256_update_le(void *cc, const void *data, size_t len)
{
blake32_le(cc, data, len);
}
/* see sph_blake.h */
void
sph_blake256_close(void *cc, void *dst)

View File

@@ -42,7 +42,7 @@ extern "C"{
#endif
#include <stddef.h>
#include "algo/sha/sph_types.h"
#include "compat/sph_types.h"
/**
* Output size (in bits) for BLAKE-224.
@@ -82,9 +82,9 @@ typedef struct {
#ifndef DOXYGEN_IGNORE
unsigned char buf[64]; /* first field, for alignment */
size_t ptr;
sph_u32 H[8];
sph_u32 S[4];
sph_u32 T0, T1;
uint32_t H[8];
uint32_t S[4];
uint32_t T0, T1;
#endif
} sph_blake_small_context;
@@ -198,6 +198,7 @@ void sph_blake256_init(void *cc);
* @param len the input data length (in bytes)
*/
void sph_blake256(void *cc, const void *data, size_t len);
void sph_blake256_update_le(void *cc, const void *data, size_t len);
/**
* Terminate the current BLAKE-256 computation and output the result into

View File

@@ -30,18 +30,11 @@
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "algo/sha/sph_types.h"
#include "simd-utils.h"
#include "compat/sph_types.h"
#include "sph_blake2b.h"
// Cyclic right rotation.
#ifndef ROTR64
#define ROTR64(x, y) (((x) >> (y)) ^ ((x) << (64 - (y))))
#endif
// Little-endian byte access.
#define B2B_GET64(p) \
(((uint64_t) ((uint8_t *) (p))[0]) ^ \
(((uint64_t) ((uint8_t *) (p))[1]) << 8) ^ \
@@ -52,47 +45,158 @@
(((uint64_t) ((uint8_t *) (p))[6]) << 48) ^ \
(((uint64_t) ((uint8_t *) (p))[7]) << 56))
// G Mixing function.
#if defined(__AVX2__)
#define B2B_G(a, b, c, d, x, y) { \
v[a] = v[a] + v[b] + x; \
v[d] = ROTR64(v[d] ^ v[a], 32); \
v[c] = v[c] + v[d]; \
v[b] = ROTR64(v[b] ^ v[c], 24); \
v[a] = v[a] + v[b] + y; \
v[d] = ROTR64(v[d] ^ v[a], 16); \
v[c] = v[c] + v[d]; \
v[b] = ROTR64(v[b] ^ v[c], 63); }
#define BLAKE2B_G( Sa, Sb, Sc, Sd, Se, Sf, Sg, Sh ) \
{ \
V[0] = _mm256_add_epi64( V[0], _mm256_add_epi64( V[1], \
_mm256_set_epi64x( m[ sigmaR[ Sg ] ], m[ sigmaR[ Se ] ], \
m[ sigmaR[ Sc ] ], m[ sigmaR[ Sa ] ] ) ) ); \
V[3] = mm256_ror_64( _mm256_xor_si256( V[3], V[0] ), 32 ); \
V[2] = _mm256_add_epi64( V[2], V[3] ); \
V[1] = mm256_ror_64( _mm256_xor_si256( V[1], V[2] ), 24 ); \
\
V[0] = _mm256_add_epi64( V[0], _mm256_add_epi64( V[1], \
_mm256_set_epi64x( m[ sigmaR[ Sh ] ], m[ sigmaR[ Sf ] ], \
m[ sigmaR[ Sd ] ], m[ sigmaR[ Sb ] ] ) ) ); \
V[3] = mm256_ror_64( _mm256_xor_si256( V[3], V[0] ), 16 ); \
V[2] = _mm256_add_epi64( V[2], V[3] ); \
V[1] = mm256_ror_64( _mm256_xor_si256( V[1], V[2] ), 63 ); \
}
// Pivot about V[1] instead of V[0] reduces latency.
#define BLAKE2B_ROUND( R ) \
{ \
__m256i *V = (__m256i*)v; \
const uint8_t *sigmaR = sigma[R]; \
BLAKE2B_G( 0, 1, 2, 3, 4, 5, 6, 7 ); \
V[0] = mm256_shufll_64( V[0] ); \
V[3] = mm256_swap_128( V[3] ); \
V[2] = mm256_shuflr_64( V[2] ); \
BLAKE2B_G( 14, 15, 8, 9, 10, 11, 12, 13 ); \
V[0] = mm256_shuflr_64( V[0] ); \
V[3] = mm256_swap_128( V[3] ); \
V[2] = mm256_shufll_64( V[2] ); \
}
/*
#define BLAKE2B_ROUND( R ) \
{ \
__m256i *V = (__m256i*)v; \
const uint8_t *sigmaR = sigma[R]; \
BLAKE2B_G( 0, 1, 2, 3, 4, 5, 6, 7 ); \
V[3] = mm256_shufll_64( V[3] ); \
V[2] = mm256_swap_128( V[2] ); \
V[1] = mm256_shuflr_64( V[1] ); \
BLAKE2B_G( 8, 9, 10, 11, 12, 13, 14, 15 ); \
V[3] = mm256_shuflr_64( V[3] ); \
V[2] = mm256_swap_128( V[2] ); \
V[1] = mm256_shufll_64( V[1] ); \
}
*/
#elif defined(__SSE2__) || defined(__ARM_NEON)
#define BLAKE2B_G( Va, Vb, Vc, Vd, Sa, Sb, Sc, Sd ) \
{ \
Va = v128_add64( Va, v128_add64( Vb, \
v128_set64( m[ sigmaR[ Sc ] ], m[ sigmaR[ Sa ] ] ) ) ); \
Vd = v128_ror64xor( Vd, Va, 32 ); \
Vc = v128_add64( Vc, Vd ); \
Vb = v128_ror64xor( Vb, Vc, 24 ); \
\
Va = v128_add64( Va, v128_add64( Vb, \
v128_set64( m[ sigmaR[ Sd ] ], m[ sigmaR[ Sb ] ] ) ) ); \
Vd = v128_ror64xor( Vd, Va, 16 ); \
Vc = v128_add64( Vc, Vd ); \
Vb = v128_ror64xor( Vb, Vc, 63 ); \
}
#define BLAKE2B_ROUND( R ) \
{ \
v128_t *V = (v128_t*)v; \
v128_t V2, V3, V6, V7; \
const uint8_t *sigmaR = sigma[R]; \
BLAKE2B_G( V[0], V[2], V[4], V[6], 0, 1, 2, 3 ); \
BLAKE2B_G( V[1], V[3], V[5], V[7], 4, 5, 6, 7 ); \
V2 = v128_alignr64( V[3], V[2], 1 ); \
V3 = v128_alignr64( V[2], V[3], 1 ); \
V6 = v128_alignr64( V[6], V[7], 1 ); \
V7 = v128_alignr64( V[7], V[6], 1 ); \
BLAKE2B_G( V[0], V2, V[5], V6, 8, 9, 10, 11 ); \
BLAKE2B_G( V[1], V3, V[4], V7, 12, 13, 14, 15 ); \
V[2] = v128_alignr64( V2, V3, 1 ); \
V[3] = v128_alignr64( V3, V2, 1 ); \
V[6] = v128_alignr64( V7, V6, 1 ); \
V[7] = v128_alignr64( V6, V7, 1 ); \
}
#else
#ifndef ROTR64
#define ROTR64(x, y) (((x) >> (y)) ^ ((x) << (64 - (y))))
#endif
#define BLAKE2B_G( R, Va, Vb, Vc, Vd, Sa, Sb ) \
{ \
Va = Va + Vb + m[ sigma[R][Sa] ]; \
Vd = ROTR64( Vd ^ Va, 32 ); \
Vc = Vc + Vd; \
Vb = ROTR64( Vb ^ Vc, 24 ); \
\
Va = Va + Vb + m[ sigma[R][Sb] ]; \
Vd = ROTR64( Vd ^ Va, 16 ); \
Vc = Vc + Vd; \
Vb = ROTR64( Vb ^ Vc, 63 ); \
}
#define BLAKE2B_ROUND( R ) \
{ \
BLAKE2B_G( R, v[ 0], v[ 4], v[ 8], v[12], 0, 1 ); \
BLAKE2B_G( R, v[ 1], v[ 5], v[ 9], v[13], 2, 3 ); \
BLAKE2B_G( R, v[ 2], v[ 6], v[10], v[14], 4, 5 ); \
BLAKE2B_G( R, v[ 3], v[ 7], v[11], v[15], 6, 7 ); \
BLAKE2B_G( R, v[ 0], v[ 5], v[10], v[15], 8, 9 ); \
BLAKE2B_G( R, v[ 1], v[ 6], v[11], v[12], 10, 11 ); \
BLAKE2B_G( R, v[ 2], v[ 7], v[ 8], v[13], 12, 13 ); \
BLAKE2B_G( R, v[ 3], v[ 4], v[ 9], v[14], 14, 15 ); \
}
#endif
// Initialization Vector.
static const uint64_t blake2b_iv[8] = {
static const uint64_t blake2b_iv[8] __attribute__ ((aligned (32))) =
{
0x6A09E667F3BCC908, 0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1,
0x510E527FADE682D1, 0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179
};
static const uint8_t sigma[12][16] __attribute__ ((aligned (32))) =
{
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
// Compression function. "last" flag indicates last block.
static void blake2b_compress( sph_blake2b_ctx *ctx, int last )
{
const uint8_t sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
int i;
uint64_t v[16], m[16];
uint64_t v[16] __attribute__ ((aligned (32)));
uint64_t m[16] __attribute__ ((aligned (32)));
int i;
for (i = 0; i < 8; i++) { // init work variables
v[i] = ctx->h[i];
@@ -103,20 +207,11 @@ static void blake2b_compress( sph_blake2b_ctx *ctx, int last )
v[13] ^= ctx->t[1]; // high 64 bits
if (last) // last block flag set ?
v[14] = ~v[14];
for (i = 0; i < 16; i++) // get little-endian words
m[i] = B2B_GET64(&ctx->b[8 * i]);
for (i = 0; i < 12; i++) { // twelve rounds
B2B_G( 0, 4, 8, 12, m[sigma[i][ 0]], m[sigma[i][ 1]]);
B2B_G( 1, 5, 9, 13, m[sigma[i][ 2]], m[sigma[i][ 3]]);
B2B_G( 2, 6, 10, 14, m[sigma[i][ 4]], m[sigma[i][ 5]]);
B2B_G( 3, 7, 11, 15, m[sigma[i][ 6]], m[sigma[i][ 7]]);
B2B_G( 0, 5, 10, 15, m[sigma[i][ 8]], m[sigma[i][ 9]]);
B2B_G( 1, 6, 11, 12, m[sigma[i][10]], m[sigma[i][11]]);
B2B_G( 2, 7, 8, 13, m[sigma[i][12]], m[sigma[i][13]]);
B2B_G( 3, 4, 9, 14, m[sigma[i][14]], m[sigma[i][15]]);
}
for (i = 0; i < 12; i++)
BLAKE2B_ROUND( i );
for( i = 0; i < 8; ++i )
ctx->h[i] ^= v[i] ^ v[i + 8];
@@ -184,7 +279,8 @@ void sph_blake2b_final( sph_blake2b_ctx *ctx, void *out )
while (ctx->c < 128) // fill up with zeros
ctx->b[ctx->c++] = 0;
blake2b_compress(ctx, 1); // final block flag = 1
blake2b_compress(ctx, 1); // final block flag = 1
// little endian convert and store
for (i = 0; i < ctx->outlen; i++) {

View File

@@ -18,7 +18,7 @@
#endif
// state context
ALIGN(64) typedef struct {
typedef ALIGN(64) struct {
uint8_t b[128]; // input buffer
uint64_t h[8]; // chained state
uint64_t t[2]; // total number of bytes

Some files were not shown because too many files have changed in this diff Show More