This commit is contained in:
Jay D Dee
2019-10-10 19:58:34 -04:00
parent 789c8b70bc
commit 72330eb5a7
30 changed files with 4534 additions and 858 deletions

View File

@@ -282,7 +282,9 @@ cpuminer_SOURCES = \
algo/yescrypt/yescrypt.c \
algo/yescrypt/sha256_Y.c \
algo/yescrypt/yescrypt-best.c \
algo/yespower/yespower.c \
algo/yespower/yespower-gate.c \
algo/yespower/yespower-blake2b.c \
algo/yespower/crypto/blake2b-yp.c \
algo/yespower/sha256_p.c \
algo/yespower/yespower-opt.c

View File

@@ -92,6 +92,7 @@ Supported Algorithms
phi2-lux identical to phi2
pluck Pluck:128 (Supcoin)
polytimos Ninja
power2b MicroBitcoin (MBC)
quark Quark
qubit Qubit
scrypt scrypt(1024, 1, 1) (default)
@@ -135,6 +136,7 @@ Supported Algorithms
yescryptr32 WAVI
yespower Cryply
yespowerr16 Yenten (YTN)
yespoer-b2b generic yespower + blake2b
zr5 Ziftr
Errata

View File

@@ -6,9 +6,6 @@ This feature requires recent SW including GCC version 5 or higher and
openssl version 1.1 or higher. It may also require using "-march=znver1"
compile flag.
cpuminer-opt is a console program, if you're using a mouse you're doing it
wrong.
Security warning
----------------
@@ -34,10 +31,24 @@ Intel Core2 or newer, or AMD Steamroller or newer CPU. ARM CPUs are not
supported.
64 bit Linux or Windows operating system. Apple and Android are not supported.
FreeBSD YMMV.
Change Log
----------
v3.9.9
Added power2b algo for MicroBitcoin.
Added generic yespower-b2b (yespower + blake2b) algo to be used with
the parameters introduced in v3.9.7 for yespower & yescrypt.
Display additional info when a share is rejected.
Some low level enhancements and minor tweaking of log output.
RELEASE_NOTES (this file) and README.md added to Windows release package.
v3.9.8.1
Summary log report will be generated on stratum diff change or after 5 minutes,

View File

@@ -204,6 +204,7 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_PHI2: register_phi2_algo ( gate ); break;
case ALGO_PLUCK: register_pluck_algo ( gate ); break;
case ALGO_POLYTIMOS: register_polytimos_algo ( gate ); break;
case ALGO_POWER2B: register_power2b_algo ( gate ); break;
case ALGO_QUARK: register_quark_algo ( gate ); break;
case ALGO_QUBIT: register_qubit_algo ( gate ); break;
case ALGO_SCRYPT: register_scrypt_algo ( gate ); break;
@@ -251,6 +252,7 @@ bool register_algo_gate( int algo, algo_gate_t *gate )
case ALGO_YESCRYPTR32: register_yescryptr32_algo ( gate ); break;
case ALGO_YESPOWER: register_yespower_algo ( gate ); break;
case ALGO_YESPOWERR16: register_yespowerr16_algo ( gate ); break;
case ALGO_YESPOWER_B2B: register_yespower_b2b_algo ( gate ); break;
case ALGO_ZR5: register_zr5_algo ( gate ); break;
default:
applog(LOG_ERR,"FAIL: algo_gate registration failed, unknown algo %s.\n", algo_names[opt_algo] );

View File

@@ -85,14 +85,16 @@
typedef uint32_t set_t;
#define EMPTY_SET 0
#define SSE2_OPT 1
#define AES_OPT 2
#define SSE42_OPT 4
#define AVX_OPT 8
#define AVX2_OPT 0x10
#define SHA_OPT 0x20
#define AVX512_OPT 0x40
#define EMPTY_SET 0
#define SSE2_OPT 1
#define AES_OPT 2
#define SSE42_OPT 4
#define AVX_OPT 8 // Sandybridge
#define AVX2_OPT 0x10 // Haswell
#define SHA_OPT 0x20 // sha256 (Ryzen, Ice Lake)
#define AVX512_OPT 0x40 // AVX512- F, VL, DQ, BW (Skylake-X)
#define VAES_OPT 0x80 // VAES (Ice Lake)
// return set containing all elements from sets a & b
inline set_t set_union ( set_t a, set_t b ) { return a | b; }

View File

@@ -116,7 +116,7 @@ void decred_build_extraheader( struct work* g_work, struct stratum_ctx* sctx )
// block header suffix from coinb2 (stake version)
memcpy( &g_work->data[44],
&sctx->job.coinbase[ sctx->job.coinbase_size-4 ], 4 );
sctx->bloc_height = g_work->data[32];
sctx->block_height = g_work->data[32];
//applog_hex(work->data, 180);
//applog_hex(&work->data[36], 36);
}

View File

@@ -263,10 +263,9 @@ inline void absorbBlockBlake2Safe( uint64_t *State, const uint64_t *In,
#if defined (__AVX2__)
register __m256i state0, state1, state2, state3;
const __m256i zero = m256_zero;
state0 = zero;
state1 = zero;
state0 =
state1 = m256_zero;
state2 = m256_const_64( 0xa54ff53a5f1d36f1ULL, 0x3c6ef372fe94f82bULL,
0xbb67ae8584caa73bULL, 0x6a09e667f3bcc908ULL );
state3 = m256_const_64( 0x5be0cd19137e2179ULL, 0x1f83d9abfb41bd6bULL,
@@ -290,12 +289,11 @@ inline void absorbBlockBlake2Safe( uint64_t *State, const uint64_t *In,
#elif defined (__SSE2__)
__m128i state0, state1, state2, state3, state4, state5, state6, state7;
const __m128i zero = m128_zero;
state0 = zero;
state1 = zero;
state2 = zero;
state3 = zero;
state0 =
state1 =
state2 =
state3 = m128_zero;
state4 = m128_const_64( 0xbb67ae8584caa73bULL, 0x6a09e667f3bcc908ULL );
state5 = m128_const_64( 0xa54ff53a5f1d36f1ULL, 0x3c6ef372fe94f82bULL );
state6 = m128_const_64( 0x9b05688c2b3e6c1fULL, 0x510e527fade682d1ULL );

View File

@@ -200,6 +200,7 @@ void sm3_4way_compress( __m128i *digest, __m128i *block )
T = _mm_set1_epi32( 0x7A879D8AUL );
for( j =16; j < 64; j++ )
{
// AVX512 _mm_rol_epi32 doesn't like using a variable for the second arg.
SS1 = mm128_rol_32( _mm_add_epi32( _mm_add_epi32( mm128_rol_32(A,12), E ),
mm128_rol_32( T, j&31 ) ), 7 );
SS2 = _mm_xor_si128( SS1, mm128_rol_32( A, 12 ) );

Binary file not shown.

View File

@@ -0,0 +1,322 @@
/*
* Copyright 2009 Colin Percival, 2014 savale
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <algo/yespower/crypto/sph_types.h>
#include <algo/yespower/utils/sysendian.h>
#include "blake2b-yp.h"
// Cyclic right rotation.
#ifndef ROTR64
#define ROTR64(x, y) (((x) >> (y)) ^ ((x) << (64 - (y))))
#endif
// Little-endian byte access.
#define B2B_GET64(p) \
(((uint64_t) ((uint8_t *) (p))[0]) ^ \
(((uint64_t) ((uint8_t *) (p))[1]) << 8) ^ \
(((uint64_t) ((uint8_t *) (p))[2]) << 16) ^ \
(((uint64_t) ((uint8_t *) (p))[3]) << 24) ^ \
(((uint64_t) ((uint8_t *) (p))[4]) << 32) ^ \
(((uint64_t) ((uint8_t *) (p))[5]) << 40) ^ \
(((uint64_t) ((uint8_t *) (p))[6]) << 48) ^ \
(((uint64_t) ((uint8_t *) (p))[7]) << 56))
// G Mixing function.
#define B2B_G(a, b, c, d, x, y) { \
v[a] = v[a] + v[b] + x; \
v[d] = ROTR64(v[d] ^ v[a], 32); \
v[c] = v[c] + v[d]; \
v[b] = ROTR64(v[b] ^ v[c], 24); \
v[a] = v[a] + v[b] + y; \
v[d] = ROTR64(v[d] ^ v[a], 16); \
v[c] = v[c] + v[d]; \
v[b] = ROTR64(v[b] ^ v[c], 63); }
// Initialization Vector.
static const uint64_t blake2b_iv[8] = {
0x6A09E667F3BCC908, 0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1,
0x510E527FADE682D1, 0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179
};
// Compression function. "last" flag indicates last block.
static void blake2b_compress(blake2b_yp_ctx *ctx, int last)
{
const uint8_t sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
int i;
uint64_t v[16], m[16];
// init work variables
for (i = 0; i < 8; i++) {
v[i] = ctx->h[i];
v[i + 8] = blake2b_iv[i];
}
v[12] ^= ctx->t[0]; // low 64 bits of offset
v[13] ^= ctx->t[1]; // high 64 bits
// last block flag set ?
if (last) {
v[14] = ~v[14];
}
// get little-endian words
for (i = 0; i < 16; i++) {
m[i] = B2B_GET64(&ctx->b[8 * i]);
}
// twelve rounds
for (i = 0; i < 12; i++) {
B2B_G( 0, 4, 8, 12, m[sigma[i][ 0]], m[sigma[i][ 1]]);
B2B_G( 1, 5, 9, 13, m[sigma[i][ 2]], m[sigma[i][ 3]]);
B2B_G( 2, 6, 10, 14, m[sigma[i][ 4]], m[sigma[i][ 5]]);
B2B_G( 3, 7, 11, 15, m[sigma[i][ 6]], m[sigma[i][ 7]]);
B2B_G( 0, 5, 10, 15, m[sigma[i][ 8]], m[sigma[i][ 9]]);
B2B_G( 1, 6, 11, 12, m[sigma[i][10]], m[sigma[i][11]]);
B2B_G( 2, 7, 8, 13, m[sigma[i][12]], m[sigma[i][13]]);
B2B_G( 3, 4, 9, 14, m[sigma[i][14]], m[sigma[i][15]]);
}
for(i = 0; i < 8; ++i) {
ctx->h[i] ^= v[i] ^ v[i + 8];
}
}
// Initialize the hashing context "ctx" with optional key "key".
// 1 <= outlen <= 64 gives the digest size in bytes.
// Secret key (also <= 64 bytes) is optional (keylen = 0).
int blake2b_yp_init(blake2b_yp_ctx *ctx, size_t outlen,
const void *key, size_t keylen) // (keylen=0: no key)
{
size_t i;
// illegal parameters
if (outlen == 0 || outlen > 64 || keylen > 64) {
return -1;
}
// state, "param block"
for (i = 0; i < 8; i++) {
ctx->h[i] = blake2b_iv[i];
}
ctx->h[0] ^= 0x01010000 ^ (keylen << 8) ^ outlen;
ctx->t[0] = 0; // input count low word
ctx->t[1] = 0; // input count high word
ctx->c = 0; // pointer within buffer
ctx->outlen = outlen;
// zero input block
for (i = keylen; i < 128; i++) {
ctx->b[i] = 0;
}
if (keylen > 0) {
blake2b_yp_update(ctx, key, keylen);
ctx->c = 128; // at the end
}
return 0;
}
// Add "inlen" bytes from "in" into the hash.
void blake2b_yp_update(blake2b_yp_ctx *ctx,
const void *in, size_t inlen) // data bytes
{
size_t i;
for (i = 0; i < inlen; i++) {
if (ctx->c == 128) { // buffer full ?
ctx->t[0] += ctx->c; // add counters
if (ctx->t[0] < ctx->c) // carry overflow ?
ctx->t[1]++; // high word
blake2b_compress(ctx, 0); // compress (not last)
ctx->c = 0; // counter to zero
}
ctx->b[ctx->c++] = ((const uint8_t *) in)[i];
}
}
// Generate the message digest (size given in init).
// Result placed in "out".
void blake2b_yp_final(blake2b_yp_ctx *ctx, void *out)
{
size_t i;
ctx->t[0] += ctx->c; // mark last block offset
// carry overflow
if (ctx->t[0] < ctx->c) {
ctx->t[1]++; // high word
}
// fill up with zeros
while (ctx->c < 128) {
ctx->b[ctx->c++] = 0;
}
blake2b_compress(ctx, 1); // final block flag = 1
// little endian convert and store
for (i = 0; i < ctx->outlen; i++) {
((uint8_t *) out)[i] =
(ctx->h[i >> 3] >> (8 * (i & 7))) & 0xFF;
}
}
// inlen = number of bytes
void blake2b_yp_hash(void *out, const void *in, size_t inlen) {
blake2b_yp_ctx ctx;
blake2b_yp_init(&ctx, 32, NULL, 0);
blake2b_yp_update(&ctx, in, inlen);
blake2b_yp_final(&ctx, out);
}
// // keylen = number of bytes
void hmac_blake2b_yp_init(hmac_yp_ctx *hctx, const void *_key, size_t keylen) {
const uint8_t *key = _key;
uint8_t keyhash[32];
uint8_t pad[64];
uint64_t i;
if (keylen > 64) {
blake2b_yp_hash(keyhash, key, keylen);
key = keyhash;
keylen = 32;
}
blake2b_yp_init(&hctx->inner, 32, NULL, 0);
memset(pad, 0x36, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake2b_yp_update(&hctx->inner, pad, 64);
blake2b_yp_init(&hctx->outer, 32, NULL, 0);
memset(pad, 0x5c, 64);
for (i = 0; i < keylen; ++i) {
pad[i] ^= key[i];
}
blake2b_yp_update(&hctx->outer, pad, 64);
memset(keyhash, 0, 32);
}
// datalen = number of bits
void hmac_blake2b_yp_update(hmac_yp_ctx *hctx, const void *data, size_t datalen) {
// update the inner state
blake2b_yp_update(&hctx->inner, data, datalen);
}
void hmac_blake2b_yp_final(hmac_yp_ctx *hctx, uint8_t *digest) {
uint8_t ihash[32];
blake2b_yp_final(&hctx->inner, ihash);
blake2b_yp_update(&hctx->outer, ihash, 32);
blake2b_yp_final(&hctx->outer, digest);
memset(ihash, 0, 32);
}
// // keylen = number of bytes; inlen = number of bytes
void hmac_blake2b_yp_hash(void *out, const void *key, size_t keylen, const void *in, size_t inlen) {
hmac_yp_ctx hctx;
hmac_blake2b_yp_init(&hctx, key, keylen);
hmac_blake2b_yp_update(&hctx, in, inlen);
hmac_blake2b_yp_final(&hctx, out);
}
void pbkdf2_blake2b_yp(const uint8_t * passwd, size_t passwdlen, const uint8_t * salt,
size_t saltlen, uint64_t c, uint8_t * buf, size_t dkLen)
{
hmac_yp_ctx PShctx, hctx;
size_t i;
uint8_t ivec[4];
uint8_t U[32];
uint8_t T[32];
uint64_t j;
int k;
size_t clen;
/* Compute HMAC state after processing P and S. */
hmac_blake2b_yp_init(&PShctx, passwd, passwdlen);
hmac_blake2b_yp_update(&PShctx, salt, saltlen);
/* Iterate through the blocks. */
for (i = 0; i * 32 < dkLen; i++) {
/* Generate INT(i + 1). */
be32enc(ivec, (uint32_t)(i + 1));
/* Compute U_1 = PRF(P, S || INT(i)). */
memcpy(&hctx, &PShctx, sizeof(hmac_yp_ctx));
hmac_blake2b_yp_update(&hctx, ivec, 4);
hmac_blake2b_yp_final(&hctx, U);
/* T_i = U_1 ... */
memcpy(T, U, 32);
for (j = 2; j <= c; j++) {
/* Compute U_j. */
hmac_blake2b_yp_init(&hctx, passwd, passwdlen);
hmac_blake2b_yp_update(&hctx, U, 32);
hmac_blake2b_yp_final(&hctx, U);
/* ... xor U_j ... */
for (k = 0; k < 32; k++) {
T[k] ^= U[k];
}
}
/* Copy as many bytes as necessary into buf. */
clen = dkLen - i * 32;
if (clen > 32) {
clen = 32;
}
memcpy(&buf[i * 32], T, clen);
}
/* Clean PShctx, since we never called _Final on it. */
memset(&PShctx, 0, sizeof(hmac_yp_ctx));
}

View File

@@ -0,0 +1,42 @@
#pragma once
#ifndef __BLAKE2B_H__
#define __BLAKE2B_H__
#include <stddef.h>
#include <stdint.h>
#if defined(_MSC_VER) || defined(__x86_64__) || defined(__x86__)
#define NATIVE_LITTLE_ENDIAN
#endif
// state context
typedef struct {
uint8_t b[128]; // input buffer
uint64_t h[8]; // chained state
uint64_t t[2]; // total number of bytes
size_t c; // pointer for b[]
size_t outlen; // digest size
} blake2b_yp_ctx;
typedef struct {
blake2b_yp_ctx inner;
blake2b_yp_ctx outer;
} hmac_yp_ctx;
#if defined(__cplusplus)
extern "C" {
#endif
int blake2b_yp_init(blake2b_yp_ctx *ctx, size_t outlen, const void *key, size_t keylen);
void blake2b_yp_update(blake2b_yp_ctx *ctx, const void *in, size_t inlen);
void blake2b_yp_final(blake2b_yp_ctx *ctx, void *out);
void blake2b_yp_hash(void *out, const void *in, size_t inlen);
void hmac_blake2b_yp_hash(void *out, const void *key, size_t keylen, const void *in, size_t inlen);
void pbkdf2_blake2b_yp(const uint8_t * passwd, size_t passwdlen, const uint8_t * salt,
size_t saltlen, uint64_t c, uint8_t * buf, size_t dkLen);
#if defined(__cplusplus)
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@
#define insecure_memzero(buf, len) /* empty */

View File

@@ -0,0 +1,94 @@
/*-
* Copyright 2007-2014 Colin Percival
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYSENDIAN_H_
#define _SYSENDIAN_H_
#include <stdint.h>
/* Avoid namespace collisions with BSD <sys/endian.h>. */
#define be32dec libcperciva_be32dec
#define be32enc libcperciva_be32enc
#define be64enc libcperciva_be64enc
#define le32dec libcperciva_le32dec
#define le32enc libcperciva_le32enc
static inline uint32_t
be32dec(const void * pp)
{
const uint8_t * p = (uint8_t const *)pp;
return ((uint32_t)(p[3]) + ((uint32_t)(p[2]) << 8) +
((uint32_t)(p[1]) << 16) + ((uint32_t)(p[0]) << 24));
}
static inline void
be32enc(void * pp, uint32_t x)
{
uint8_t * p = (uint8_t *)pp;
p[3] = x & 0xff;
p[2] = (x >> 8) & 0xff;
p[1] = (x >> 16) & 0xff;
p[0] = (x >> 24) & 0xff;
}
static inline void
be64enc(void * pp, uint64_t x)
{
uint8_t * p = (uint8_t *)pp;
p[7] = x & 0xff;
p[6] = (x >> 8) & 0xff;
p[5] = (x >> 16) & 0xff;
p[4] = (x >> 24) & 0xff;
p[3] = (x >> 32) & 0xff;
p[2] = (x >> 40) & 0xff;
p[1] = (x >> 48) & 0xff;
p[0] = (x >> 56) & 0xff;
}
static inline uint32_t
le32dec(const void * pp)
{
const uint8_t * p = (uint8_t const *)pp;
return ((uint32_t)(p[0]) + ((uint32_t)(p[1]) << 8) +
((uint32_t)(p[2]) << 16) + ((uint32_t)(p[3]) << 24));
}
static inline void
le32enc(void * pp, uint32_t x)
{
uint8_t * p = (uint8_t *)pp;
p[0] = x & 0xff;
p[1] = (x >> 8) & 0xff;
p[2] = (x >> 16) & 0xff;
p[3] = (x >> 24) & 0xff;
}
#endif /* !_SYSENDIAN_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -70,6 +70,45 @@ int scanhash_yespower( struct work *work, uint32_t max_nonce,
return 0;
}
void yespower_b2b_hash( const char *input, char *output, uint32_t len )
{
yespower_b2b_tls( input, len, &yespower_params, (yespower_binary_t*)output );
}
int scanhash_yespower_b2b( struct work *work, uint32_t max_nonce,
uint64_t *hashes_done, struct thr_info *mythr )
{
uint32_t _ALIGN(64) vhash[8];
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[19];
uint32_t n = first_nonce;
int thr_id = mythr->id; // thr_id arg is deprecated
for (int k = 0; k < 19; k++)
be32enc(&endiandata[k], pdata[k]);
do {
be32enc(&endiandata[19], n);
yespower_b2b_hash((char*) endiandata, (char*) vhash, 80);
if ( vhash[7] < Htarg && fulltest( vhash, ptarget )
&& !opt_benchmark )
{
pdata[19] = n;
submit_solution( work, vhash, mythr );
}
n++;
} while (n < max_nonce && !work_restart[thr_id].restart);
*hashes_done = n - first_nonce + 1;
pdata[19] = n;
return 0;
}
int64_t yespower_get_max64()
{
return 0xfffLL;
@@ -191,3 +230,66 @@ bool register_yescryptr32_05_algo( algo_gate_t* gate )
return true;
}
bool register_power2b_algo( algo_gate_t* gate )
{
yespower_params.version = YESPOWER_1_0;
yespower_params.N = 2048;
yespower_params.r = 32;
yespower_params.pers = "Now I am become Death, the destroyer of worlds";
yespower_params.perslen = 46;
applog( LOG_NOTICE,"yespower-b2b parameters: N= %d, R= %d.", yespower_params.N,
yespower_params.r );
applog( LOG_NOTICE,"Key= \"%s\"", yespower_params.pers );
applog( LOG_NOTICE,"Key length= %d\n", yespower_params.perslen );
gate->optimizations = SSE2_OPT;
gate->get_max64 = (void*)&yespower_get_max64;
gate->scanhash = (void*)&scanhash_yespower_b2b;
gate->hash = (void*)&yespower_b2b_hash;
opt_target_factor = 65536.0;
return true;
};
// Generic yespower + blake2b
bool register_yespower_b2b_algo( algo_gate_t* gate )
{
yespower_params.version = YESPOWER_1_0;
if ( !( opt_param_n && opt_param_r ) )
{
applog(LOG_ERR,"Yespower-b2b N & R parameters are required");
return false;
}
yespower_params.N = opt_param_n;
yespower_params.r = opt_param_r;
if ( opt_param_key )
{
yespower_params.pers = opt_param_key;
yespower_params.perslen = strlen( opt_param_key );
}
else
{
yespower_params.pers = NULL;
yespower_params.perslen = 0;
}
applog( LOG_NOTICE,"Yespower-b2b parameters: N= %d, R= %d",
yespower_params.N, yespower_params.r );
if ( yespower_params.pers )
{
applog( LOG_NOTICE,"Key= \"%s\"", yespower_params.pers );
applog( LOG_NOTICE,"Key length= %d\n", yespower_params.perslen );
}
gate->optimizations = SSE2_OPT;
gate->get_max64 = (void*)&yespower_get_max64;
gate->scanhash = (void*)&scanhash_yespower_b2b;
gate->hash = (void*)&yespower_b2b_hash;
opt_target_factor = 65536.0;
return true;
};

View File

@@ -111,6 +111,10 @@ extern int yespower(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
extern int yespower_b2b(yespower_local_t *local,
const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
/**
* yespower_tls(src, srclen, params, dst):
* Compute yespower(src[0 .. srclen - 1], N, r), to be checked for "< target".
@@ -123,6 +127,9 @@ extern int yespower(yespower_local_t *local,
extern int yespower_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
extern int yespower_b2b_tls(const uint8_t *src, size_t srclen,
const yespower_params_t *params, yespower_binary_t *dst);
#ifdef __cplusplus
}
#endif

20
configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.8.1.
# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.9.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -577,8 +577,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='cpuminer-opt'
PACKAGE_TARNAME='cpuminer-opt'
PACKAGE_VERSION='3.9.8.1'
PACKAGE_STRING='cpuminer-opt 3.9.8.1'
PACKAGE_VERSION='3.9.9'
PACKAGE_STRING='cpuminer-opt 3.9.9'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures cpuminer-opt 3.9.8.1 to adapt to many kinds of systems.
\`configure' configures cpuminer-opt 3.9.9 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1404,7 +1404,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of cpuminer-opt 3.9.8.1:";;
short | recursive ) echo "Configuration of cpuminer-opt 3.9.9:";;
esac
cat <<\_ACEOF
@@ -1509,7 +1509,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
cpuminer-opt configure 3.9.8.1
cpuminer-opt configure 3.9.9
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by cpuminer-opt $as_me 3.9.8.1, which was
It was created by cpuminer-opt $as_me 3.9.9, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2993,7 +2993,7 @@ fi
# Define the identity of the package.
PACKAGE='cpuminer-opt'
VERSION='3.9.8.1'
VERSION='3.9.9'
cat >>confdefs.h <<_ACEOF
@@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by cpuminer-opt $as_me 3.9.8.1, which was
This file was extended by cpuminer-opt $as_me 3.9.9, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -6756,7 +6756,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
cpuminer-opt config.status 3.9.8.1
cpuminer-opt config.status 3.9.9
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View File

@@ -1,4 +1,4 @@
AC_INIT([cpuminer-opt], [3.9.8.1])
AC_INIT([cpuminer-opt], [3.9.9])
AC_PREREQ([2.59c])
AC_CANONICAL_SYSTEM

View File

@@ -107,6 +107,8 @@ int opt_param_n = 0;
int opt_param_r = 0;
int opt_pluck_n = 128;
int opt_n_threads = 0;
bool opt_reset_on_stale = false;
// Windows doesn't support 128 bit affinity mask.
#if defined(__linux) && defined(GCC_INT128)
#define AFFINITY_USES_UINT128 1
@@ -810,33 +812,42 @@ out:
return rc;
}
void scale_hash_for_display ( double* hashrate, char* units )
// returns the unit prefix and the hashrate appropriately scaled.
void scale_hash_for_display ( double* hashrate, char* prefix )
{
if ( *hashrate < 1e4 ) // 0 H/s to 9999 H/s
*units = 0;
else if ( *hashrate < 1e7 ) // 10 kH/s to 9999 kH/s
{ *units = 'k'; *hashrate /= 1e3; }
if ( *hashrate < 1e4 ) // 0 H/s to 9999 h/s
*prefix = 0;
else if ( *hashrate < 1e7 ) // 10 kH/s to 9999 kh/s
{ *prefix = 'k'; *hashrate /= 1e3; }
else if ( *hashrate < 1e10 ) // 10 Mh/s to 9999 Mh/s
{ *units = 'M'; *hashrate /= 1e6; }
{ *prefix = 'M'; *hashrate /= 1e6; }
else if ( *hashrate < 1e13 ) // 10 Gh/s to 9999 Gh/s
{ *units = 'G'; *hashrate /= 1e9; }
{ *prefix = 'G'; *hashrate /= 1e9; }
else if ( *hashrate < 1e16 ) // 10 Th/s to 9999 Th/s
{ *units = 'T'; *hashrate /= 1e12; }
else // 10 Ph/s and higher
{ *units = 'P'; *hashrate /= 1e15; }
{ *prefix = 'T'; *hashrate /= 1e12; }
else if ( *hashrate < 1e19 ) // 10 Ph/s to 9999 Ph
{ *prefix = 'P'; *hashrate /= 1e15; }
else // 10 Eh/s and higher
{ *prefix = 'E'; *hashrate /= 1e18; }
}
void format_hms( char *s, uint64_t t )
static inline void sprintf_et( char *str, uint64_t seconds )
{
// 00h00m00s
uint64_t rem;
uint64_t sec = t % 60;
rem = t / 60;
uint64_t min = rem % 60;
uint64_t hrs = rem / 60;
sprintf( s, "%luh%02lum%02lus", hrs, min, sec );
uint64_t min = seconds / 60;
uint64_t sec = seconds % 60;
uint64_t hrs = min / 60;
if ( hrs )
{
uint64_t days = hrs / 24;
if ( days ) //0d00h
sprintf( str, "%llud%02lluh", days, hrs % 24 );
else // 0h00m
sprintf( str, "%lluh%02llum", hrs, min % 60 );
}
else // 0m00s
sprintf( str, "%llum%02llus", min, sec );
}
// Bitcoin formula for converting difficulty to an equivalent
// number of hashes.
//
@@ -853,7 +864,7 @@ static double time_sum = 0.;
static double latency_sum = 0.;
static uint64_t submit_sum = 0;
static uint64_t reject_sum = 0;
static uint32_t last_bloc_height = 0;
static uint32_t last_block_height = 0;
static double last_targetdiff = 0.;
struct share_stats_t
@@ -903,10 +914,11 @@ void report_summary_log( bool force )
* (double)(submits - rejects) / time;
double scaled_shrate = shrate;
int avg_latency = 0;
double latency_pc = 0.;
double latency_pc = 0.;
double submit_rate = 0.;
char shr_units[4] = {0};
char ghr_units[4] = {0};
char et_str[24];
if ( submits )
avg_latency = latency / submits;
@@ -919,10 +931,10 @@ void report_summary_log( bool force )
scale_hash_for_display( &scaled_shrate, shr_units );
scale_hash_for_display( &scaled_ghrate, ghr_units );
sprintf_et( et_str, et.tv_sec );
applog( LOG_NOTICE,
"Submitted %d shares in %dm%02ds (%.2f /min), %ld rejected",
submits, et.tv_sec / 60, et.tv_sec % 60, submit_rate, rejects );
applog( LOG_NOTICE, "Submitted %d shares in %s, %.2f /min, %ld rejected",
submits, et_str, submit_rate, rejects );
applog2( LOG_INFO, "Share eqv: %.2f %sh/s, miner ref: %.2f %sh/s",
scaled_shrate, shr_units, scaled_ghrate, ghr_units );
@@ -1025,12 +1037,36 @@ static int share_result( int result, struct work *null_work,
sres, share_time, latency, accepted_share_count,
rejected_share_count, solved_block_count );
if ( have_stratum && result && !opt_quiet )
if ( have_stratum && !opt_quiet )
applog2( LOG_INFO, "Share diff %.3g (%5f%%), block %d",
my_stats.share_diff, share_ratio, stratum.bloc_height );
my_stats.share_diff, share_ratio, stratum.block_height );
if ( reason )
applog( LOG_WARNING, "reject reason: %s.", reason );
{
applog( LOG_WARNING, "Reject reason: %s", reason );
if ( opt_debug )
{
uint32_t str1[8], str2[8];
char str3[65];
// display share hash and target for troubleshooting
diff_to_target( str1, my_stats.share_diff );
for ( int i = 0; i < 8; i++ )
be32enc( str2 + i, str1[7 - i] );
bin2hex( str3, (unsigned char*)str2, 12 );
applog2( LOG_INFO, "Hash: %s...", str3 );
diff_to_target( str1, last_targetdiff );
for ( int i = 0; i < 8; i++ )
be32enc( str2 + i, str1[7 - i] );
bin2hex( str3, (unsigned char*)str2, 12 );
applog2( LOG_INFO, "Target: %s...", str3 );
}
if ( opt_reset_on_stale && strstr( reason, "Invalid job id" ) )
stratum_need_reset = true;
}
return 1;
}
@@ -2538,7 +2574,7 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
// Log new block and/or stratum difficulty change.
if ( ( stratum_diff != sctx->job.diff )
|| ( last_bloc_height != sctx->bloc_height ) )
|| ( last_block_height != sctx->block_height ) )
{
double hr = global_hashrate;
char hr_units[4] = {0};
@@ -2551,20 +2587,20 @@ void std_stratum_gen_work( struct stratum_ctx *sctx, struct work *g_work )
report_summary_log( stratum_diff != 0. );
applog( LOG_BLUE, "New stratum difficulty" );
}
if ( last_bloc_height != sctx->bloc_height )
if ( last_block_height != sctx->block_height )
applog( LOG_BLUE, "New block" );
// Update data and calculate new estimates.
stratum_diff = sctx->job.diff;
last_bloc_height = stratum.bloc_height;
last_block_height = stratum.block_height;
last_targetdiff = g_work->targetdiff;
format_hms( block_ttf, net_diff * diff_to_hash / hr );
format_hms( share_ttf, last_targetdiff * diff_to_hash / hr );
sprintf_et( block_ttf, net_diff * diff_to_hash / hr );
sprintf_et( share_ttf, last_targetdiff * diff_to_hash / hr );
scale_hash_for_display ( &hr, hr_units );
applog2( LOG_INFO, "%s %s block %d", short_url,
algo_names[opt_algo], stratum.bloc_height );
algo_names[opt_algo], stratum.block_height );
applog2( LOG_INFO, "Diff: net %g, stratum %g, target %g",
net_diff, stratum_diff, last_targetdiff );
applog2( LOG_INFO, "TTF @ %.2f %sh/s: block %s, share %s",
@@ -2604,8 +2640,8 @@ static void *stratum_thread(void *userdata )
stratum.url = strdup( rpc_url );
applog(LOG_BLUE, "Connection changed to %s", short_url);
}
else if ( !opt_quiet )
applog(LOG_DEBUG, "Stratum connection reset");
else // if ( !opt_quiet )
applog(LOG_WARNING, "Stratum connection reset");
}
while ( !stratum.curl )
@@ -2648,10 +2684,10 @@ static void *stratum_thread(void *userdata )
if ( stratum.job.clean || jsonrpc_2 )
{
static uint32_t last_bloc_height;
if ( last_bloc_height != stratum.bloc_height )
static uint32_t last_block_height;
if ( last_block_height != stratum.block_height )
{
last_bloc_height = stratum.bloc_height;
last_block_height = stratum.block_height;
/*
if ( !opt_quiet )
{
@@ -2674,7 +2710,7 @@ static void *stratum_thread(void *userdata )
else if (opt_debug && !opt_quiet)
{
applog( LOG_BLUE, "%s asks job %d for block %d", short_url,
strtoul( stratum.job.job_id, NULL, 16 ), stratum.bloc_height );
strtoul( stratum.job.job_id, NULL, 16 ), stratum.block_height );
}
} // stratum.job.job_id
@@ -3132,6 +3168,9 @@ void parse_arg(int key, char *arg )
case 1024:
opt_randomize = true;
break;
case 1026:
opt_reset_on_stale = true;
break;
case 'V':
show_version_and_exit();
case 'h':

13
miner.h
View File

@@ -439,7 +439,7 @@ struct stratum_ctx {
struct work work __attribute__ ((aligned (64)));
pthread_mutex_t work_lock;
int bloc_height;
int block_height;
} __attribute__ ((aligned (64)));
bool stratum_socket_full(struct stratum_ctx *sctx, int timeout);
@@ -572,6 +572,7 @@ enum algos {
ALGO_PHI2,
ALGO_PLUCK,
ALGO_POLYTIMOS,
ALGO_POWER2B,
ALGO_QUARK,
ALGO_QUBIT,
ALGO_SCRYPT,
@@ -614,6 +615,7 @@ enum algos {
ALGO_YESCRYPTR32,
ALGO_YESPOWER,
ALGO_YESPOWERR16,
ALGO_YESPOWER_B2B,
ALGO_ZR5,
ALGO_COUNT
};
@@ -667,6 +669,7 @@ static const char* const algo_names[] = {
"phi2",
"pluck",
"polytimos",
"power2b",
"quark",
"qubit",
"scrypt",
@@ -709,6 +712,7 @@ static const char* const algo_names[] = {
"yescryptr32",
"yespower",
"yespowerr16",
"yespower-b2b",
"zr5",
"\0"
};
@@ -751,6 +755,7 @@ extern uint32_t opt_work_size;
extern double *thr_hashrates;
extern double global_hashrate;
extern double stratum_diff;
extern bool opt_reset_on_stale;
extern double net_diff;
extern double net_hashrate;
extern int opt_pluck_n;
@@ -828,6 +833,7 @@ Options:\n\
phi2\n\
pluck Pluck:128 (Supcoin)\n\
polytimos\n\
power2b MicroBitcoin (MBC)\n\
quark Quark\n\
qubit Qubit\n\
scrypt scrypt(1024, 1, 1) (default)\n\
@@ -871,6 +877,7 @@ Options:\n\
yescryptr32 WAVI\n\
yespower Cryply\n\
yespowerr16 Yenten (YTN)\n\
yespower-b2b generic yespower + blake2b\n\
zr5 Ziftr\n\
-N, --param-n N parameter for scrypt based algos\n\
-R, --patam-r R parameter for scrypt based algos\n\
@@ -890,7 +897,8 @@ Options:\n\
-s, --scantime=N upper bound on time spent scanning current work when\n\
long polling is unavailable, in seconds (default: 5)\n\
--randomize Randomize scan range start to reduce duplicates\n\
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
--reset-on-stale Workaround reset stratum if too many stale shares\n\
-f, --diff-factor Divide req. difficulty by this factor (std is 1.0)\n\
-m, --diff-multiplier Multiply difficulty by this factor (std is 1.0)\n\
--hash-meter Display thread hash rates\n\
--hide-diff Do not display changes in difficulty\n\
@@ -980,6 +988,7 @@ static struct option const options[] = {
{ "retries", 1, NULL, 'r' },
{ "retry-pause", 1, NULL, 1025 },
{ "randomize", 0, NULL, 1024 },
{ "reset-on-stale", 0, NULL, 1026 },
{ "scantime", 1, NULL, 's' },
#ifdef HAVE_SYSLOG_H
{ "syslog", 0, NULL, 'S' },

View File

@@ -36,7 +36,7 @@
// MMX: 64 bit vectors
// SSE2: 128 bit vectors (64 bit CPUs only, such as Intel Core2.
// AVX2: 256 bit vectors (Starting with Intel Haswell and AMD Ryzen)
// AVX512: 512 bit vectors (still under development)
// AVX512: 512 bit vectors (Starting with SkylakeX)
//
// Most functions are avalaible at the stated levels but in rare cases
// a higher level feature may be required with no compatible alternative.
@@ -138,24 +138,17 @@
// improve high level code readability without the penalty of function
// overhead.
//
// A major restructuring is taking place shifting the focus from pointers
// to registers. Previously pointer casting used memory to provide transparency
// leaving it up to the compiler to manage everything and it does a very good
// job. The focus has shifted to register arguments for more control
// over the actual instructions assuming the data is in a register and the
// the compiler just needs to manage the registers.
//
// Rather than use pointers to provide type transparency
// specific instructions are used to access specific data as specific types.
// Previously pointers were cast and the compiler was left to find a way
// to get the data from wherever it happened to be to the correct registers.
// These utilities avoid memory accesses and assume data is in a register
// argument. Vector constants, in particular are generated with opcodes instead
// of being read from memory.
//
// The utilities defined here make use features like register aliasing
// to optimize operations. Many operations have specialized versions as
// well as more generic versions. It is preferable to use a specialized
// version whenever possible a sthey can take advantage of certain
// optimizations not available to the generic version. Specically the generic
// version usually has a second argument used is some extra calculations.
// version whenever possible as they can take advantage of certain
// optimizations not available to the generic version. The generic
// version will often have an additional argument used is some extra
// calculations.
//
///////////////////////////////////////////////////////
@@ -165,9 +158,6 @@
#include <stdlib.h>
#include <stdbool.h>
// Various types and overlays
#include "simd-utils/simd-types.h"
// 64 and 128 bit integers.
#include "simd-utils/simd-int.h"
@@ -191,16 +181,16 @@
// Utilities that require AVX2 are defined in simd-256.h.
// Skylake-X has all these
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// 512 bit vectors
#include "simd-utils/simd-512.h"
#endif // MMX
#endif // SSE2
#endif // AVX
#endif // AVX2
#endif // AVX512
#endif // AVX2
#endif // AVX
#endif // SSE2
#endif // MMX
#include "simd-utils/intrlv.h"

View File

@@ -1,45 +1,11 @@
#if !defined(INTERLEAVE_H__)
#define INTERLEAVE_H__ 1
// philosophical discussion
//
// transitions:
//
// int32 <-> int64
// uint64_t = (uint64_t)int32_lo | ( (uint64_t)int32_hi << 32 )
// Efficient transition and post processing, 32 bit granularity is lost.
// Not pratical.
//
// int32 <-> m64
// More complex, 32 bit granularity maintained, limited number of mmx regs.
// int32 <-> int64 <-> m64 might be more efficient.
//
// int32 <-> m128
// Expensive, current implementation.
//
// int32 <-> m256
// Very expensive multi stage, current implementation.
//
// int64/m64 <-> m128
// Efficient, agnostic to native element size. Common.
//
// m128 <-> m256
// Expensive for a single instruction, unavoidable. Common.
//
// Multi stage options
//
// int32 <-> int64 -> m128
// More efficient than insert32, granularity maintained. Common.
//
// int64 <-> m128 -> m256
// Unavoidable, reasonably efficient. Common
//
// int32 <-> int64 -> m128 -> m256
// Seems inevitable, most efficient despite number of stages. Common.
//
// It seems the best approach is to avoid transitions and use the native type
// of the data: 64 & 32 bit use integer, 128 bit use m128i.
//////////////////////////////////////////////////////////////////////////
//
// Utilities to interleave and deinterleave multiple data for parallel
// processing using SIMD. Utilities are grouped by data size.
//
////////////////////////////////
//
@@ -262,8 +228,6 @@ static inline void dintrlv_4x32_512( void *dst0, void *dst1, void *dst2,
d0[15] = s[ 60]; d1[15] = s[ 61]; d2[15] = s[ 62]; d3[15] = s[ 63];
}
#undef DLEAVE_4x32
static inline void extr_lane_4x32( void *d, const void *s,
const int lane, const int bit_len )
{
@@ -308,6 +272,7 @@ static inline void mm128_intrlv_4x32x( void *dst, void *src0, void *src1,
}
}
// Double buffered source to reduce latency
static inline void mm128_bswap32_intrlv80_4x32( void *d, void *src )
{
__m128i sx = mm128_bswap_32( casti_m128i( src,0 ) );
@@ -469,15 +434,11 @@ static inline void extr_lane_8x32( void *d, const void *s,
#if defined(__AVX2__)
// There a alignment problems with the source buffer on Wwindows,
// can't use 256 bit bswap.
static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
{
__m256i s0 = mm256_bswap_32( casti_m256i( src,0 ) );
__m256i s1 = mm256_bswap_32( casti_m256i( src,1 ) );
__m128i s2 = mm128_bswap_32( casti_m128i( src,4 ) );
// const __m256i zero = m256_zero;
const __m256i one = m256_one_32;
const __m256i two = _mm256_add_epi32( one, one );
const __m256i three = _mm256_add_epi32( two, one );
@@ -485,7 +446,6 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
casti_m256i( d, 0 ) = _mm256_broadcastd_epi32(
_mm256_castsi256_si128( s0 ) );
// casti_m256i( d, 0 ) = _mm256_permutevar8x32_epi32( s0, m256_zero );
casti_m256i( d, 1 ) = _mm256_permutevar8x32_epi32( s0, one );
casti_m256i( d, 2 ) = _mm256_permutevar8x32_epi32( s0, two );
casti_m256i( d, 3 ) = _mm256_permutevar8x32_epi32( s0, three );
@@ -498,7 +458,6 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
_mm256_add_epi32( four, three ) );
casti_m256i( d, 8 ) = _mm256_broadcastd_epi32(
_mm256_castsi256_si128( s1 ) );
// casti_m256i( d, 8 ) = _mm256_permutevar8x32_epi32( s1, m256_zero );
casti_m256i( d, 9 ) = _mm256_permutevar8x32_epi32( s1, one );
casti_m256i( d,10 ) = _mm256_permutevar8x32_epi32( s1, two );
casti_m256i( d,11 ) = _mm256_permutevar8x32_epi32( s1, three );
@@ -510,8 +469,6 @@ static inline void mm256_bswap32_intrlv80_8x32( void *d, void *src )
casti_m256i( d,15 ) = _mm256_permutevar8x32_epi32( s1,
_mm256_add_epi32( four, three ) );
casti_m256i( d,16 ) = _mm256_broadcastd_epi32( s2 );
// casti_m256i( d,16 ) = _mm256_permutevar8x32_epi32(
// _mm256_castsi128_si256( s2 ), m256_zero );
casti_m256i( d,17 ) = _mm256_permutevar8x32_epi32(
_mm256_castsi128_si256( s2 ), one );
casti_m256i( d,18 ) = _mm256_permutevar8x32_epi32(
@@ -655,7 +612,7 @@ static inline void dintrlv_16x32_512( void *d00, void *d01, void *d02,
#undef DLEAVE_16x32
static inline void extr_lane_16x32( void *d, const void *s,
const int lane, const int bit_len )
const int lane, const int bit_len )
{
((uint32_t*)d)[ 0] = ((uint32_t*)s)[ lane ];
((uint32_t*)d)[ 1] = ((uint32_t*)s)[ lane+16 ];
@@ -689,42 +646,39 @@ static inline void mm512_bswap32_intrlv80_16x32( void *d, void *src )
casti_m512i( d, 0 ) = _mm512_broadcastd_epi32(
_mm512_castsi512_si128( s0 ) );
// casti_m512i( d, 0 ) = _mm512_permutexvar_epi32( s0, m512_zero );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( s0, one );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( s0, two );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( s0, three );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( two, two ) );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( three, two ) );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d, 1 ) = _mm512_permutexvar_epi32( one, s0 );
casti_m512i( d, 2 ) = _mm512_permutexvar_epi32( two, s0 );
casti_m512i( d, 3 ) = _mm512_permutexvar_epi32( three, s0 );
casti_m512i( d, 4 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( two, two ), s0 );
casti_m512i( d, 5 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( three, two ), s0 );
casti_m512i( d, 6 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d, 7 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d, 8 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
x = _mm512_add_epi32( x, three );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d, 9 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d,10 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d,11 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
x = _mm512_add_epi32( x, three );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( s0, x );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, one ) );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, two ) );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32( s0,
_mm512_add_epi32( x, three ) );
casti_m512i( d,12 ) = _mm512_permutexvar_epi32( x, s0 );
casti_m512i( d,13 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, one ), s0 );
casti_m512i( d,14 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, two ), s0 );
casti_m512i( d,15 ) = _mm512_permutexvar_epi32(
_mm512_add_epi32( x, three ), s0 );
casti_m512i( d,16 ) = _mm512_broadcastd_epi32( s1 );
// casti_m512i( d,16 ) = _mm512_permutexvar_epi32(
// _mm512_castsi128_si512( s1 ), m512_zero );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), one );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), two );
casti_m512i( d,19 ) = _mm512_permutexvar_epi32(
_mm512_castsi128_si512( s1 ), three );
casti_m512i( d,17 ) = _mm512_permutexvar_epi32( one,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,18 ) = _mm512_permutexvar_epi32( two,
_mm512_castsi128_si512( s1 ) );
casti_m512i( d,19 ) = _mm512_permutexvar_epi32( three,
_mm512_castsi128_si512( s1 ) );
}
#endif // AVX512
@@ -997,27 +951,21 @@ static inline void mm512_bswap32_intrlv80_8x64( void *dst, void *src )
__m512i *d = (__m512i*)dst;
__m512i s0 = mm512_bswap_32( casti_m512i( src, 0 ) );
__m128i s1 = mm128_bswap_32( casti_m128i( src, 4 ) );
// const __m512i zero = m512_zero;
const __m512i one = m512_one_64;
const __m512i two = _mm512_add_epi64( one, one );
const __m512i three = _mm512_add_epi64( two, one );
const __m512i four = _mm512_add_epi64( two, two );
d[0] = _mm512_broadcastq_epi64(
_mm512_castsi512_si128( s0 ) );
// d[0] = _mm512_permutexvar_epi64( s0, m512_zero );
d[1] = _mm512_permutexvar_epi64( s0, one );
d[2] = _mm512_permutexvar_epi64( s0, two );
d[3] = _mm512_permutexvar_epi64( s0, three );
d[4] = _mm512_permutexvar_epi64( s0, four );
d[5] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, one ) );
d[6] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, two ) );
d[7] = _mm512_permutexvar_epi64( s0, _mm512_add_epi64( four, three ) );
d[8] = _mm512_broadcastq_epi64( s1 );
// d[8] = _mm512_permutexvar_epi64(
// _mm512_castsi128_si512( s1 ), m512_zero );
d[9] = _mm512_permutexvar_epi64(
_mm512_castsi128_si512( s1 ), one );
d[0] = _mm512_broadcastq_epi64( _mm512_castsi512_si128( s0 ) );
d[1] = _mm512_permutexvar_epi64( one, s0 );
d[2] = _mm512_permutexvar_epi64( two, s0 );
d[3] = _mm512_permutexvar_epi64( three, s0 );
d[4] = _mm512_permutexvar_epi64( four, s0 );
d[5] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, one ), s0 );
d[6] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, two ), s0 );
d[7] = _mm512_permutexvar_epi64( _mm512_add_epi64( four, three ), s0 );
d[8] = _mm512_broadcastq_epi64( s1 );
d[9] = _mm512_permutexvar_epi64( one, _mm512_castsi128_si512( s1 ) );
}
#endif // AVX512
@@ -1164,6 +1112,44 @@ static inline void dintrlv_4x128_512( void *dst0, void *dst1, void *dst2,
}
// 2x256 (AVX512)
#if defined (__AVX__)
static inline void intrlv_2x256( void *dst, const void *src0,
const void *src1, int bit_len )
{
__m256i *d = (__m256i*)dst;
const __m256i *s0 = (const __m256i*)src0;
const __m256i *s1 = (const __m256i*)src1;
d[ 0] = s0[0]; d[ 1] = s1[0];
if ( bit_len <= 256 ) return;
d[ 2] = s0[1]; d[ 3] = s1[1];
if ( bit_len <= 512 ) return;
d[ 4] = s0[2];
if ( bit_len <= 640 ) return;
d[ 5] = s1[2];
d[ 6] = s0[3]; d[ 7] = s1[3];
}
// No 80 byte dintrlv
static inline void dintrlv_2x256( void *dst0, void *dst1,
const void *src, int bit_len )
{
__m256i *d0 = (__m256i*)dst0;
__m256i *d1 = (__m256i*)dst1;
const __m256i *s = (const __m256i*)src;
d0[0] = s[ 0]; d1[0] = s[ 1];
if ( bit_len <= 256 ) return;
d0[1] = s[ 2]; d1[1] = s[ 3];
if ( bit_len <= 512 ) return;
d0[2] = s[ 4]; d1[2] = s[ 5];
d0[3] = s[ 6]; d1[3] = s[ 7];
}
#endif // AVX
///////////////////////////
//
// Re-intereleaving

View File

@@ -19,18 +19,19 @@
//
// Constants are an issue with simd. Simply put, immediate constants don't
// exist. All simd constants either reside in memory or a register and
// must be loaded or generated at run time.
// must be loaded from memory or generated using instructions at run time.
//
// Due to the cost of generating constants it is often more efficient to
// define a local const for repeated references to the same constant.
//
// Some constant values can be generated using shortcuts. Zero for example
// is as simple as XORing any register with itself, and is implemented
// in the setzero instrinsic. These shortcuts must be implemented using ASM
// iby the setzero instrinsic. These shortcuts must be implemented using ASM
// due to doing things the compiler would complain about. Another single
// instruction constant is -1, defined below. Others may be added as the need
// arises. Even single instruction constants are less efficient than local
// register variables so the advice above stands.
// register variables so the advice above stands. These pseudo-constants
// do not perform any memory accesses
//
// One common use for simd constants is as a control index for some simd
// instructions like blend and shuffle. The utilities below do not take this
@@ -40,74 +41,74 @@
#define m128_zero _mm_setzero_si128()
static inline __m128i m128_one_128_fn()
static inline __m128i mm128_one_128_fn()
{
register uint64_t one = 1;
register __m128i a;
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return a;
}
#define m128_one_128 m128_one_128_fn()
#define m128_one_128 mm128_one_128_fn()
static inline __m128i m128_one_64_fn()
static inline __m128i mm128_one_64_fn()
{
register uint64_t one = 1;
register __m128i a;
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x44 );
}
#define m128_one_64 m128_one_64_fn()
#define m128_one_64 mm128_one_64_fn()
static inline __m128i m128_one_32_fn()
static inline __m128i mm128_one_32_fn()
{
register uint32_t one = 1;
register __m128i a;
__m128i a;
const uint32_t one = 1;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_32 m128_one_32_fn()
#define m128_one_32 mm128_one_32_fn()
static inline __m128i m128_one_16_fn()
static inline __m128i mm128_one_16_fn()
{
register uint32_t one = 0x00010001;
register __m128i a;
__m128i a;
const uint32_t one = 0x00010001;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_16 m128_one_16_fn()
#define m128_one_16 mm128_one_16_fn()
static inline __m128i m128_one_8_fn()
static inline __m128i mm128_one_8_fn()
{
register uint32_t one = 0x01010101;
register __m128i a;
__m128i a;
const uint32_t one = 0x01010101;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm_shuffle_epi32( a, 0x00 );
}
#define m128_one_8 m128_one_8_fn()
#define m128_one_8 mm128_one_8_fn()
static inline __m128i m128_neg1_fn()
static inline __m128i mm128_neg1_fn()
{
__m128i a;
asm( "pcmpeqd %0, %0\n\t"
: "=x" (a) );
return a;
}
#define m128_neg1 m128_neg1_fn()
#define m128_neg1 mm128_neg1_fn()
// move uint64_t to low bits of __m128i, zeros the rest
static inline __m128i mm128_mov64_128( uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -116,7 +117,7 @@ static inline __m128i mm128_mov64_128( uint64_t n )
static inline __m128i mm128_mov32_128( uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -125,7 +126,7 @@ static inline __m128i mm128_mov32_128( uint32_t n )
static inline uint64_t mm128_mov128_64( __m128i a )
{
register uint64_t n;
uint64_t n;
asm( "movq %1, %0\n\t"
: "=x" (n)
: "r" (a) );
@@ -134,7 +135,7 @@ static inline uint64_t mm128_mov128_64( __m128i a )
static inline uint32_t mm128_mov128_32( __m128i a )
{
register uint32_t n;
uint32_t n;
asm( "movd %1, %0\n\t"
: "=x" (n)
: "r" (a) );
@@ -143,7 +144,7 @@ static inline uint32_t mm128_mov128_32( __m128i a )
static inline __m128i m128_const1_64( const uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -152,7 +153,7 @@ static inline __m128i m128_const1_64( const uint64_t n )
static inline __m128i m128_const1_32( const uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -165,7 +166,7 @@ static inline __m128i m128_const1_32( const uint32_t n )
static inline __m128i m128_const_64( const uint64_t hi, const uint64_t lo )
{
register __m128i a;
__m128i a;
asm( "movq %2, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x" (a)
@@ -173,23 +174,9 @@ static inline __m128i m128_const_64( const uint64_t hi, const uint64_t lo )
return a;
}
/*
static inline __m128i m128_const1_64( const uint64_t n )
{
register __m128i a;
asm( "movq %1, %0\n\t"
"pinsrq $1, %1, %0\n\t"
: "=x"(a)
: "r"(n) );
return a;
}
*/
#else
// #define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL )
#define m128_const_64 _mm_set_epi64x
// #define m128_const1_64 _mm_set1_epi64x
#endif
@@ -310,8 +297,19 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
// AVX512 has implemented bit rotation for 128 bit vectors with
// 64 and 32 bit elements.
//
// Rotate each element of v by c bits
// compiler doesn't like when a variable is used for the last arg of
// _mm_rol_epi32, must be "8 bit immediate".
// sm3-hash-4way.c fails to compile.
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm128_ror_64( v, c ) _mm_ror_epi64( v, c )
#define mm128_rol_64( v, c ) _mm_rol_epi64( v, c )
#define mm128_ror_32( v, c ) _mm_ror_epi32( v, c )
#define mm128_rol_32( v, c ) _mm_rol_epi32( v, c )
#else
*/
#define mm128_ror_64( v, c ) \
_mm_or_si128( _mm_srli_epi64( v, c ), _mm_slli_epi64( v, 64-(c) ) )
@@ -325,6 +323,8 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_rol_32( v, c ) \
_mm_or_si128( _mm_slli_epi32( v, c ), _mm_srli_epi32( v, 32-(c) ) )
//#endif // AVX512 else
#define mm128_ror_16( v, c ) \
_mm_or_si128( _mm_srli_epi16( v, c ), _mm_slli_epi16( v, 16-(c) ) )
@@ -365,6 +365,22 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
#define mm128_brol( v, c ) \
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm128_invert_32( v ) _mm_shuffle_epi32( v, 0x1b )
#if defined(__SSSE3__)
#define mm128_invert_16( v ) \
_mm_shuffle_epi8( v, mm128_const_64( 0x0100030205040706, \
0x09080b0a0d0c0f0e )
#define mm128_invert_8( v ) \
_mm_shuffle_epi8( v, mm128_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f )
#endif // SSSE3
//
// Rotate elements within lanes.

View File

@@ -14,30 +14,32 @@
// is limited because 256 bit vectors are less likely to be used when 512
// is available.
//
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#define m256_one_256 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
#define m256_one_128 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
// set instructions load memory resident constants, this avoids mem.
// cost 4 pinsert + 1 vinsert, estimate 8 clocks latency.
#if defined(__AVX2__)
#define m256_const_128( hi, lo ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( lo ), hi, 1 )
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
m256_const_128( m128_const_64( i3, i2 ), m128_const_64( i1, i0 ) )
/*
#define m256_const_64( i3, i2, i1, i0 ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \
m128_const_64( i3, i2 ), 1 )
*/
#else // AVX
#define m256_const_64( i3, i2, i1, i0 ) _mm256_set_epi64x( i3, i2, i1, i0 )
#endif
static inline __m256i m256_const1_64( uint64_t i )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (i) );
@@ -46,86 +48,140 @@ static inline __m256i m256_const1_64( uint64_t i )
static inline __m256i m256_const1_32( uint32_t i )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastd_epi32( a );
}
static inline __m256i m256_const1_16( uint16_t i )
{
__m128i a;
asm( "movw %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastw_epi16( a );
}
static inline __m256i m256_const1_8( uint8_t i )
{
__m128i a;
asm( "movb %1, %0\n\t"
: "=x" (a)
: "r" (i) );
return _mm256_broadcastb_epi8( a );
}
//
// All SIMD constant macros are actually functions containing executable
// code and therefore can't be used as compile time initializers.
#define m256_zero _mm256_setzero_si256()
#if defined(__AVX2__)
// Don't call the frunction directly, use the macro to make appear like
// a constant identifier instead of a function.
// __m256i foo = m256_one_64;
static inline __m256i m256_one_64_fn()
static inline __m256i mm256_one_256_fn()
{
register uint64_t one = 1;
register __m128i a;
__m256i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return a;
}
#define m256_one_256 mm256_one_256_fn()
static inline __m256i mm256_one_128_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastsi128_si256( a );
}
#define m256_one_128 mm256_one_128_fn()
static inline __m256i mm256_one_64_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_64 m256_one_64_fn()
#define m256_one_64 mm256_one_64_fn()
static inline __m256i m256_one_32_fn()
static inline __m256i mm256_one_32_fn()
{
register uint64_t one = 0x0000000100000001;
register __m128i a;
__m128i a;
const uint64_t one = 0x0000000100000001;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_32 m256_one_32_fn()
#define m256_one_32 mm256_one_32_fn()
static inline __m256i m256_one_16_fn()
static inline __m256i mm256_one_16_fn()
{
register uint64_t one = 0x0001000100010001;
register __m128i a;
__m128i a;
const uint64_t one = 0x0001000100010001;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_16 m256_one_16_fn()
#define m256_one_16 mm256_one_16_fn()
static inline __m256i m256_one_8_fn()
static inline __m256i mm256_one_8_fn()
{
register uint64_t one = 0x0101010101010101;
register __m128i a;
__m128i a;
const uint64_t one = 0x0101010101010101;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return _mm256_broadcastq_epi64( a );
}
#define m256_one_8 m256_one_8_fn()
#define m256_one_8 mm256_one_8_fn()
static inline __m256i m256_neg1_fn()
static inline __m256i mm256_neg1_fn()
{
register __m256i a;
__m256i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
: "=x"(a) );
return a;
}
#define m256_neg1 m256_neg1_fn()
#define m256_neg1 mm256_neg1_fn()
#else // AVX
#define m256_one_256 m256_const_64( m128_zero, m128_one ) \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_zero, 1 )
#define m256_one_128 \
_mm256_inserti128_si256( _mm256_castsi128_si256( m128_one_128 ), \
m128_one_128, 1 )
#define m256_one_64 _mm256_set1_epi64x( 1ULL )
#define m256_one_32 _mm256_set1_epi64x( 0x0000000100000001ULL )
#define m256_one_16 _mm256_set1_epi64x( 0x0001000100010001ULL )
#define m256_one_8 _mm256_set1_epi64x( 0x0101010101010101ULL )
// AVX doesn't have inserti128 but insertf128 will do.
static inline __m256i m256_neg1_fn()
static inline __m256i mm256_neg1_fn()
{
__m128i a = m128_neg1;
return _mm256_insertf128_si256( _mm256_castsi128_si256( a ), a, 1 );
}
#define m256_neg1 m256_neg1_fn()
#define m256_neg1 mm256_neg1_fn()
#endif // AVX2 else AVX
@@ -175,7 +231,7 @@ do { \
// Move integer to lower bits of vector, upper bits set to zero.
static inline __m256i mm256_mov64_256( uint64_t n )
{
register __m128i a;
__m128i a;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (n) );
@@ -184,14 +240,14 @@ static inline __m256i mm256_mov64_256( uint64_t n )
static inline __m256i mm256_mov32_256( uint32_t n )
{
register __m128i a;
__m128i a;
asm( "movd %1, %0\n\t"
: "=x" (a)
: "r" (n) );
return _mm256_castsi128_si256( a );
}
// Move lo bits of vector to integer, hi bits are truncated.
// Return lo bits of vector as integer.
#define mm256_mov256_64( a ) mm128_mov128_64( _mm256_castsi256_si128( a ) )
#define mm256_mov256_32( a ) mm128_mov128_32( _mm256_castsi256_si128( a ) )
@@ -310,10 +366,20 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
// The only bit shift for more than 64 bits is with __int128.
//
// AVX512 has bit rotate for 256 bit vectors with 64 or 32 bit elements
// but is of little value
//
// Rotate each element of v by c bits
// compiler doesn't like when a variable is used for the last arg of
// _mm_rol_epi32, must be "8 bit immediate".
/*
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_ror_64( v, c ) _mm256_ror_epi64( v, c )
#define mm256_rol_64( v, c ) _mm256_rol_epi64( v, c )
#define mm256_ror_32( v, c ) _mm256_ror_epi32( v, c )
#define mm256_rol_32( v, c ) _mm256_rol_epi32( v, c )
#else
*/
#define mm256_ror_64( v, c ) \
_mm256_or_si256( _mm256_srli_epi64( v, c ), \
_mm256_slli_epi64( v, 64-(c) ) )
@@ -330,6 +396,9 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_or_si256( _mm256_slli_epi32( v, c ), \
_mm256_srli_epi32( v, 32-(c) ) )
// #endif // AVX512 else
#define mm256_ror_16( v, c ) \
_mm256_or_si256( _mm256_srli_epi16( v, c ), \
_mm256_slli_epi16( v, 16-(c) ) )
@@ -365,6 +434,19 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
_mm256_set1_epi32( 32 ), c ) ) )
// AVX512 can do 16 bit elements.
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
#define mm256_rorv_16( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#define mm256_rolv_16( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#endif // AVX512
//
// Rotate elements accross all lanes.
@@ -403,7 +485,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
0x0000000000000007, 0x0000000600000005 )
// AVX512 can do 16 & 8 bit elements.
#if defined(__AVX512VL__)
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Rotate 256 bit vector by one 16 bit element.
#define mm256_ror_1x16( v ) \
@@ -416,17 +498,50 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
0x000e000d000c000b, 0x000a000900080007, \
0x0006000500040003, 0x000200010000000f ), v )
// Rotate 256 bit vector by one byte.
#define mm256_ror_1x8( v ) m256_const_64( \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 )
#if defined (__AVX512VBMI__)
#define mm256_rol_1x8( v ) m256_const_64( \
// Rotate 256 bit vector by one byte.
#define mm256_ror_1x8( v ) _mm256_permutexvar_epi8( m256_const_64( \
0x001f1e1d1c1b1a19, 0x1817161514131211, \
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
#define mm256_rol_1x8( v ) _mm256_permutexvar_epi16( m256_const_64( \
0x1e1d1c1b1a191817, 0x161514131211100f, \
0x0e0d0c0b0a090807, 0x060504030201001f )
0x0e0d0c0b0a090807, 0x060504030201001f ), v )
#endif // VBMI
#endif // AVX512
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm256_invert_64 ( v ) _mm256_permute4x64_epi64( v, 0x1b )
#define mm256_invert_32 ( v ) _mm256_permutevar8x32_epi32( v, \
m256_const_64( 0x0000000000000001, 0x0000000200000003 \
0x0000000400000005, 0x0000000600000007 )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
#define mm256_invert_16 ( v ) \
_mm256_permutexvar_epi16( m256_const_64( 0x0000000100020003, \
0x0004000500060007, \
0x00080009000a000b, \
0x000c000d000e000f ), v )
#if defined(__AVX512VBMI__)
#define mm256_invert_8( v ) \
_mm256_permutexvar_epi8( m256_const_64( 0x0001020304050607, \
0x08090a0b0c0d0e0f, \
0x1011121314151617, \
0x18191a1b1c1d1e1f ), v )
#endif // VBMI
#endif // AVX512
//
// Rotate elements within lanes of 256 bit vector.

View File

@@ -1,35 +1,32 @@
#if !defined(SIMD_512_H__)
#define SIMD_512_H__ 1
#if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
//
// Some extentsions in AVX512 supporting operations on
// smaller elements in 256 bit vectors.
// AVX-512
//
// The baseline for these utilities is AVX512F, AVX512DQ, AVX512BW
// and AVX512VL, first available in quantity in Skylake-X.
// Some utilities may require additional features available in subsequent
// architectures and are noted.
// Variable rotate, each element rotates by corresponding index.
#define mm256_rorv_16( v, c ) \
_mm256_or_si256( \
_mm256_srlv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_sllv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#define mm256_rolv_16( v, c ) \
_mm256_or_si256( \
_mm256_sllv_epi16( v, _mm256_set1_epi16( c ) ), \
_mm256_srlv_epi16( v, _mm256_set1_epi16( 16-(c) ) ) )
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
// Invert vector: {7,6,5,4,3,2,1,0} -> {0,1,2,3,4,5,6,7}
#define mm256_invert_16 ( v ) \
_mm256_permutex_epi16( v, _mm256_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15 ) )
#define mm256_invert_8( v ) \
_mm256_permutex_epi8( v, _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \
8, 9,10,11,12,13,14,15, \
16,17,18,19,20,21,22,23, \
24,25,26,27,28,29,30,31 ) )
// AVX512 intrinsics have a few peculiarities with permutes and shuffles
// that are inconsistent with previous AVX2 implementations.
//
// _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute
// usually shuffles accross all lanes.
//
// permutexvar has args reversed, index is first arg. Previously all
// permutes and shuffles have the source vector first.
//
// _mm512_permutexvar_epi8 requires AVX512-VBMI, larger elements don't.
// It also performs the same op as _mm512_shuffle_epi8.
//
// _mm512_shuffle_epi8 shuffles accross entire 512 bits. Shuffle usually
// doesn't cross 128 bit lane boundaries.
//////////////////////////////////////////////////////////////
//
@@ -40,6 +37,74 @@
//
// Experimental, not fully tested.
//
// Pseudo constants.
//
// Vector constants are not really constants and can't be used as compile time
// initializers. They contain executable instructions to generate values at
// run time. They are very slow. If the same constant will be used repeatedly
// in a function it's better to define it once in a local register variable
// and use the variable for references.
// Tthe simpler the constant, the more efficienct it's generation. Zero is
// the fastest, then all elements set the same, different 64 bit elements,
// and different smaller elements is the slowest. Caching multiple uses us
// always faster.
#define m512_const_256( hi, lo ) \
_mm512_inserti64x4( _mm512_castsi256_si512( lo ), hi, 1 )
#define m512_const_128( i3, i2, i1, i0 ) \
_mm512_inserti64x4( _mm512_castsi256_si512( m256_const_128( i1, i0 ) ), \
m256_const_128( i3,i2 ), 1 )
#define m512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
m512_const_256( m256_const_64( i7,i6,i5,i4 ), \
m256_const_64( i3,i2,i1,i0 ) )
static inline __m512i m512_const1_256( __m256i v )
{
return _mm512_broadcast_i64x4( v );
}
static inline __m512i m512_const1_128( __m128i v )
{
return _mm512_broadcast_i64x2( v );
}
static inline __m512i m512_const1_64( uint64_t i )
{
__m128i a;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastq_epi64( a );
}
static inline __m512i m512_const1_32( uint32_t i )
{
__m128i a;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastd_epi32( a );
}
static inline __m512i m512_const1_16( uint16_t i )
{
__m128i a;
asm( "movw %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastw_epi16( a );
}
static inline __m512i m512_const1_8( uint8_t i )
{
__m128i a;
asm( "movb %1, %0\n\t"
: "=x"(a)
: "r"(i) );
return _mm512_broadcastb_epi8( a );
}
//
// Pseudo constants.
@@ -49,89 +114,104 @@
// initialized to zero.
#define m512_zero _mm512_setzero_si512()
/*
#define m512_one_512 _mm512_set_epi64( 0ULL, 0ULL, 0ULL, 0ULL, \
0ULL, 0ULL, 0ULL, 1ULL )
#define m512_one_256 _mm512_set4_epi64( 0ULL, 0ULL, 0ULL, 1ULL )
#define m512_one_128 _mm512_set4_epi64( 0ULL, 1ULL, 0ULL, 1ULL )
//#define m512_one_64 _mm512_set1_epi64( 1ULL )
//#define m512_one_32 _mm512_set1_epi32( 1UL )
//#define m512_one_16 _mm512_set1_epi16( 1U )
//#define m512_one_8 _mm512_set1_epi8( 1U )
//#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
#define m512_one_64 _mm512_set1_epi64( 1ULL )
#define m512_one_32 _mm512_set1_epi32( 1UL )
#define m512_one_16 _mm512_set1_epi16( 1U )
#define m512_one_8 _mm512_set1_epi8( 1U )
#define m512_neg1 _mm512_set1_epi64( 0xFFFFFFFFFFFFFFFFULL )
*/
#define m512_const_64( i7, i6, i5, i4, i3, i2, i1, i0 ) \
_mm512_inserti64x4( _mm512_castsi256_si512( m256_const_64( i3,i2,i1,i0 ) ), \
m256_const_64( i7,i6,i5,i4 ), 1 )
static inline __m512i m512_const1_64( uint64_t i )
static inline __m512i mm512_one_512_fn()
{
register __m128i a;
__m512i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x" (a)
: "r" (one) );
return a;
}
#define m512_one_512 mm512_one_512_fn()
static inline __m512i mm512_one_256_fn()
{
__m256i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r"(i) );
: "r" (one) );
return _mm512_broadcast_i64x4( a );
}
#define m512_one_256 mm512_one_256_fn()
static inline __m512i mm512_one_128_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcast_i64x2( a );
}
#define m512_one_128 mm512_one_128_fn()
static inline __m512i mm512_one_64_fn()
{
__m128i a;
const uint64_t one = 1;
asm( "movq %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_64 mm512_one_64_fn()
static inline __m512i m512_one_64_fn()
static inline __m512i mm512_one_32_fn()
{
__m512i a;
asm( "vpxorq %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubq %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 0x0000000100000001;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_64 m512_one_64_fn()
#define m512_one_32 mm512_one_32_fn()
static inline __m512i m512_one_32_fn()
static inline __m512i mm512_one_16_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubd %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 0x0001000100010001;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_32 m512_one_32_fn()
#define m512_one_16 mm512_one_16_fn()
static inline __m512i m512_one_16_fn()
static inline __m512i mm512_one_8_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubw %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
__m128i a;
const uint64_t one = 0x0101010101010101;
asm( "movd %1, %0\n\t"
: "=x"(a)
: "r" (one) );
return _mm512_broadcastq_epi64( a );
}
#define m512_one_16 m512_one_16_fn()
#define m512_one_8 mm512_one_8_fn()
static inline __m512i m512_one_8_fn()
{
__m512i a;
asm( "vpxord %0, %0, %0\n\t"
"vpcmpeqd %%zmm1, %%zmm1, %%zmm1\n\t"
"vpsubb %%zmm1, %0, %0\n\t"
:"=x"(a)
:
: "zmm1" );
return a;
}
#define m512_one_8 m512_one_8_fn()
static inline __m512i m512_neg1_fn()
static inline __m512i mm512_neg1_fn()
{
__m512i a;
asm( "vpcmpeqq %0, %0, %0\n\t"
:"=x"(a) );
return a;
}
#define m512_neg1 m512_neg1_fn()
#define m512_neg1 mm512_neg1_fn()
//
@@ -142,6 +222,7 @@ static inline __m512i m512_neg1_fn()
#define mm512_negate_32( x ) _mm512_sub_epi32( m512_zero, x )
#define mm512_negate_16( x ) _mm512_sub_epi16( m512_zero, x )
// More efficient to use cast to extract low lanes, it's free.
#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a )
#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 )
@@ -168,7 +249,7 @@ static inline __m512i m512_neg1_fn()
#define casto_m512i(p,o) (((__m512i*)(p))+(o))
// Add 4 values, fewer dependencies than sequential addition.
// Sum 4 values, fewer dependencies than sequential addition.
#define mm512_add4_64( a, b, c, d ) \
_mm512_add_epi64( _mm512_add_epi64( a, b ), _mm512_add_epi64( c, d ) )
@@ -186,16 +267,32 @@ static inline __m512i m512_neg1_fn()
_mm512_xor_si512( _mm512_xor_si256( a, b ), _mm512_xor_si256( c, d ) )
// Vector size conversion
#define mm256_extr_lo256_512( a ) _mm512_castsi512_si256( a )
#define mm256_extr_hi256_512( a ) _mm512_extracti64x4_epi64( a, 1 )
#define mm512_concat_256( hi, lo ) \
_mm512_inserti164x4( _mm512_castsi256_si512( lo ), hi, 1 )
// Horizontal vector testing
#define mm512_allbits0( a ) _mm512_cmpeq_epi64_mask( a, m512_zero )
#define mm256_allbits1( a ) _mm512_cmpeq_epi64_mask( a, m512_neg1 )
#define mm512_anybits0( a ) _mm512_cmpneq_epi64_mask( a, m512_neg1 )
#define mm512_anybits1( a ) _mm512_cmpneq_epi64_mask( a, m512_zero )
//
// Bit rotations.
// AVX512F has built-in bit fixed and variable rotation for 64 & 32 bit
// elements. There is no bit rotation or shift for larger elements.
// AVX512F has built-in fixed and variable bit rotation for 64 & 32 bit
// elements and can be called directly.
//
// _mm512_rol_epi64, _mm512_ror_epi64, _mm512_rol_epi32, _mm512_ror_epi32
// _mm512_rolv_epi64, _mm512_rorv_epi64, _mm512_rolv_epi32, _mm512_rorv_epi32
//
// Here is a bit rotate for 16 bit elements:
// Here is a fixed bit rotate for 16 bit elements:
#define mm512_ror_16( v, c ) \
_mm512_or_si512( _mm512_srli_epi16( v, c ), \
_mm512_slli_epi16( v, 16-(c) )
@@ -203,6 +300,36 @@ static inline __m512i m512_neg1_fn()
_mm512_or_si512( _mm512_slli_epi16( v, c ), \
_mm512_srli_epi16( v, 16-(c) )
// Rotations using a vector control index are very slow due to overhead
// to generate the index vector. Repeated rotations using the same index
// are better handled by the calling function where the index only needs
// to be generated once then reused very efficiently.
// Permutes and shuffles using an immediate index are significantly faster.
//
// Swap bytes in vector elements, vectorized endian conversion.
#define mm512_bswap_64( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x38393A3B3C3D3E3F, 0x3031323334353637, \
0x28292A2B2C2D2E2F, 0x2021222324252627, \
0x18191A1B1C1D1E1F, 0x1011121314151617, \
0x08090A0B0C0D0E0F, 0x0001020304050607 ) )
#define mm512_bswap_32( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233 ) )
#define mm512_bswap_16( v ) \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3F3C3D3A3B3839, 0x3637343532333031, \
0x2E2F2C2D2A2B2829, 0x2627242522232021, \
0x1E1F1C1D1A1B1819, 0x1617141512131011, \
0x0E0F0C0D0A0B0809, 0x0607040502030001 ) )
//
// Rotate elements in 512 bit vector.
@@ -222,60 +349,57 @@ static inline __m512i m512_neg1_fn()
#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n )
#define mm512_ror_1x16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0000001F001E001D, 0x001C001B001A0019, \
0X0018001700160015, 0X0014001300120011, \
0X0010000F000E000D, 0X000C000B000A0009, \
0X0008000700060005, 0X0004000300020001 ) )
0X0008000700060005, 0X0004000300020001 ), v )
#define mm512_rol_1x16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A001900180017, \
0X0016001500140013, 0X001200110010000F, \
0X000E000D000C000B, 0X000A000900080007, \
0X0006000500040003, 0X000200010000001F ) )
0X0006000500040003, 0X000200010000001F ), v )
#define mm512_ror_1x8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x003F3E3D3C3B3A39, 0x3837363534333231, \
0x302F2E2D2C2B2A29, 0x2827262524232221, \
0x201F1E1D1C1B1A19. 0x1817161514131211, \
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol_1x8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231302F. \
0x2E2D2C2B2A292827, 0x262524232221201F, \
0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201003F ) )
// Invert vector: {3,2,1,0} -> {0,1,2,3}
#define mm512_invert_128( v ) _mm512_permute4f128_epi32( a, 0x1b )
#define mm512_invert_128( v ) _mm512_shuffle_i64x2( v, v, 0x1b )
#define mm512_invert_64( v ) \
_mm512_permutex_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
_mm512_permutexvar_epi64( v, m512_const_64( 0,1,2,3,4,5,6,7 ) )
#define mm512_invert_32( v ) \
_mm512_permutexvar_epi32( v, m512_const_64( \
_mm512_permutexvar_epi32( m512_const_64( \
0x0000000000000001,0x0000000200000003, \
0x0000000400000005,0x0000000600000007, \
0x0000000800000009,0x0000000a0000000b, \
0x0000000c0000000d,0x0000000e0000000f ) )
0x0000000c0000000d,0x0000000e0000000f ), v )
#define mm512_invert_16( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0000000100020003, 0x0004000500060007, \
0x00080009000A000B, 0x000C000D000E000F, \
0x0010001100120013, 0x0014001500160017, \
0x00180019001A001B, 0x001C001D001E001F ) )
0x00180019001A001B, 0x001C001D001E001F ), v )
#define mm512_invert_8( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x0001020304050607, 0x08090A0B0C0D0E0F, \
0x1011121314151617, 0x18191A1B1C1D1E1F, \
0x2021222324252627, 0x28292A2B2C2D2E2F, \
@@ -293,46 +417,46 @@ static inline __m512i m512_neg1_fn()
// Rotate 256 bit lanes by one 32 bit element
#define mm512_ror1x32_256( v ) \
_mm512_permutexvar_epi32( v, m512_const_64( \
_mm512_permutexvar_epi32( m512_const_64( \
0x000000080000000f, 0x0000000e0000000d, \
0x0000000c0000000b, 0x0000000a00000009, \
0x0000000000000007, 0x0000000600000005, \
0x0000000400000003, 0x0000000200000001 ) )
0x0000000400000003, 0x0000000200000001, v ) )
#define mm512_rol1x32_256( v ) \
_mm512_permutexvar_epi32( v, m512_const_64( \
_mm512_permutexvar_epi32( m512_const_64( \
0x0000000e0000000d, 0x0000000c0000000b, \
0x0000000a00000009, 0x000000080000000f, \
0x0000000600000005, 0x0000000400000003, \
0x0000000200000001, 0x0000000000000007 ) )
0x0000000200000001, 0x0000000000000007 ), v )
#define mm512_ror1x16_256( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0010001F001E001D, 0x001C001B001A0019, \
0x0018001700160015, 0x0014001300120011, \
0x0000000F000E000D, 0x000C000B000A0009, \
0x0008000700060005, 0x0004000300020001 ) )
0x0008000700060005, 0x0004000300020001 ), v )
#define mm512_rol1x16_256( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A001900180017, \
0x0016001500140013, 0x001200110000000F, \
0x000E000D000C000B, 0x000A000900080007, \
0x0006000500040003, 0x000200010000001F ) )
0x0006000500040003, 0x000200010000001F ), v )
#define mm512_ror1x8_256( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x203F3E3D3C3B3A39, 0x3837363534333231, \
0x302F2E2D2C2B2A29, 0x2827262524232221, \
0x001F1E1D1C1B1A19, 0x1817161514131211, \
0x100F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol1x8_256( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231302F, \
0x2E2D2C2B2A292827, 0x262524232221203F, \
0x1E1D1C1B1A191817, 0x161514131211100F, \
0x0E0D0C0B0A090807, 0x060504030201001F ) )
0x0E0D0C0B0A090807, 0x060504030201001F ))
//
// Rotate elements within 128 bit lanes of 512 bit vector.
@@ -345,28 +469,28 @@ static inline __m512i m512_neg1_fn()
#define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 )
#define mm512_ror1x16_128( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x0018001F001E001D, 0x001C001B001A0019, \
0x0010001700160015, 0x0014001300120011, \
0x0008000F000E000D, 0x000C000B000A0009, \
0x0000000700060005, 0x0004000300020001 ) )
0x0000000700060005, 0x0004000300020001 ), v )
#define mm512_rol1x16_128( v ) \
_mm512_permutexvar_epi16( v, m512_const_64( \
_mm512_permutexvar_epi16( m512_const_64( \
0x001E001D001C001B, 0x001A00190018001F, \
0x0016001500140013, 0x0012001100100017, \
0x000E000D000C000B, 0x000A00090008000F, \
0x0006000500040003, 0x0002000100000007 ) )
0x0006000500040003, 0x0002000100000007, v ) )
#define mm512_ror1x8_128( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x303F3E3D3C3B3A39, 0x3837363534333231, \
0x202F2E2D2C2B2A29, 0x2827262524232221, \
0x101F1E1D1C1B1A19, 0x1817161514131211, \
0x000F0E0D0C0B0A09, 0x0807060504030201 ) )
#define mm512_rol1x8_128( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3B3A393837, 0x363534333231303F, \
0x2E2D2C2B2A292827, 0x262524232221202F, \
0x1E1D1C1B1A191817, 0x161514131211101F, \
@@ -387,32 +511,30 @@ static inline __m512i m512_neg1_fn()
// Swap 32 bit elements in each 64 bit lane
#define mm512_swap32_64( v ) _mm512_shuffle_epi32( v, 0xb1 )
// _mm512_set_epi8 doesn't seem to work
// Rotate each 64 bit lane by one 16 bit element.
#define mm512_ror1x16_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x39383F3E3D3C3B3A, 0x3130373635343332, \
0x29282F2E2D2C2B2A, 0x2120272625242322, \
0x19181F1E1D1C1B1A, 0x1110171615141312, \
0x09080F0E0D0C0B0A, 0x0100070605040302 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001c001f001e001d, 0x0018001b001a0019, \
0x0014001700160015, 0x0010001300120011, \
0x000c000f000e000d, 0x0008000b000a0009, \
0x0004000700060005, 0x0000000300020001, v )
#define mm512_rol1x16_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x3D3C3B3A39383F3E, 0x3534333231303736 \
0x2D2C2B2A29282F2E, 0x2524232221202726 \
0x1D1C1B1A19181F1E, 0x1514131211101716 \
0x0D0C0B0A09080F0E, 0x0504030201000706 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001e001d001c001f, 0x001a00190018001b, \
0x0016001500140017, 0x0012001100100013, \
0x000e000d000c000f, 0x000a00090008000b, \
0x0006000500040007, 0x0002000100000003, v )
// Rotate each 64 bit lane by one byte.
#define mm512_ror1x8_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x383F3E3D3C3B3A39, 0x3037363534333231, \
0x282F2E2D2C2B2A29, 0x2027262524232221, \
0x181F1E1D1C1B1A19, 0x1017161514131211, \
0x080F0E0D0C0B0A09, 0x0007060504030201 ) )
#define mm512_rol1x8_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle( v, m512_const_64( \
0x3E3D3C3B3A39383F, 0x3635343332313037, \
0x2E2D2C2B2A29282F, 0x2625242322212027, \
0x1E1D1C1B1A19181F, 0x1615141312111017, \
@@ -422,55 +544,31 @@ static inline __m512i m512_neg1_fn()
// Rotate elements within 32 bit lanes.
#define mm512_swap16_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x001D001C001F001E, 0x00190018001B001A, \
0x0015001400170016, 0x0011001000130012, \
0x000D000C000F000E, 0x00190008000B000A, \
0x0005000400070006, 0x0011000000030002 ) )
_mm512_permutexvar_epi16( m512_const_64( \
0x001e001f001c001d, 0x001a001b00180019, \
0x0016001700140015, 0x0012001300100011, \
0x000e000f000c000d, 0x000a000b00080009, \
0x0006000700040005, 0x0002000300000001 ), v )
#define mm512_ror1x8_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3C3F3E3D383B3A39, 0x3437363530333231, \
0x2C2F2E2D282B2A29, 0x2427262520232221, \
0x1C1F1E1D181B1A19, 0x1417161510131211, \
0x0C0F0E0D080B0A09, 0x0407060500030201 ) )
0x0C0F0E0D080B0A09, 0x0407060500030201 ))
#define mm512_rol1x8_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
_mm512_shuffle_epi8( v, m512_const_64( \
0x3E3D3C3F3A39383B, 0x3635343732313033, \
0x2E2D2C2F2A29282B, 0x2625242722212023, \
0x1E1D1C1F1A19181B, 0x1615141712111013, \
0x0E0D0C0F0A09080B, 0x0605040702010003 ) )
//
// Swap bytes in vector elements, vectorized bswap.
#define mm512_bswap_64( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x38393A3B3C3D3E3F, 0x2031323334353637, \
0x28292A2B2C2D2E2F, 0x2021222334353637, \
0x18191A1B1C1D1E1F, 0x1011121314151617, \
0x08090A0B0C0D0E0F, 0x0001020304050607 ) )
#define mm512_bswap_32( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233, \
0x3C3D3E3F38393A3B, 0x3435363730313233 ) )
#define mm512_bswap_16( v ) \
_mm512_permutexvar_epi8( v, m512_const_64( \
0x3E3F3C3D3A3B3839, 0x3637343532333031, \
0x2E2F2C2D2A2B2829, 0x2627242522232021, \
0x1E1F1C1D1A1B1819, 0x1617141512131011, \
0x0E0F0C0D0A0B0809, 0x0607040502030001 ) )
//
// Rotate elements from 2 512 bit vectors in place, source arguments
// are overwritten.
// These can all be done with 2 permutex2var instructions but they are
// slower than either xor or alignr.
// slower than either xor or alignr and require AVX512VBMI.
#define mm512_swap512_1024(v1, v2) \
v1 = _mm512_xor_si512(v1, v2); \

View File

@@ -1,5 +1,5 @@
#if !defined(SIMD_SCALAR_H__)
#define SIMD_SCALAR_H__ 1
#if !defined(SIMD_INT_H__)
#define SIMD_INT_H__ 1
///////////////////////////////////
//
@@ -13,6 +13,8 @@
// Some utilities are also provided for smaller integers, most notably
// bit rotation.
// MMX has no extract instruction for 32 bit elements so this:
// Lo is trivial, high is a simple shift.
// Input may be uint64_t or __m64, returns uint32_t.
@@ -56,18 +58,45 @@ static inline void memset_zero_64( uint64_t *src, int n )
static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
{ for ( int i = 0; i < n; i++ ) dst[i] = a; }
#if defined (GCC_INT128)
///////////////////////////////////////
//
// 128 bit integers
//
// 128 bit integers are inneficient and not a shortcut for __m128i.
// Native type __int128 supported starting with GCC-4.8.
//
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
// for 128 bit arithmetic. Vectors are preferred when 128 bit arith
// is not required. int128 also works better with other integer sizes.
// Vectors benefit from wider registers.
//
// For safety use typecasting on all numeric arguments.
//
// Use typecasting for conversion to/from 128 bit vector:
// __m128i v128 = (__m128i)my_int128l
// __m256i v256 = _mm256_set_m128i( (__m128i)my_int128, (__m128i)my_int128 );
// my_int128 = (uint128_t)_mm256_extracti128_si256( v256, 1 );
// No real need or use.
//#define u128_neg1 ((uint128_t)(-1))
// Compiler check for __int128 support
// Configure also has a test for int128.
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
#define GCC_INT128 1
#endif
// usefull for making constants.
#if !defined(GCC_INT128)
#warning "__int128 not supported, requires GCC-4.8 or newer."
#endif
#if defined(GCC_INT128)
// Familiar looking type names
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
// Maybe usefull for making constants.
#define mk_uint128( hi, lo ) \
( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) )
@@ -92,6 +121,6 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n )
#endif // GCC_INT128
#endif // SIMD_SCALAR_H__
#endif // SIMD_INT_H__

View File

@@ -1,389 +0,0 @@
//////////////////////////////////////
//
// Type abstraction overlays designed for use in highly optimized
// straight line code operating on array structures. It uses direct
// struct member access instead of indexing to access array elements.
// Ex: array.u32_3 instead of array[3].
//
// Vector types are used to represent asrrays. 64 and 128 bit vectors have
// corresponding 64 and 128 bit integer types.
//
// Data accesses are not tied to memory as arrays are. Thes structures
// can operate comfortably as reguietr variables.
//
// Although the abstraction makes for transparent usage there is overhead.
// Extra move instructins are required when an operation requires a
// different register type. Additionaly 128 bit operations, uint128_t
// and AES, can't be done in parallel with a 256 bit or lager vector.
// The require additionalmove instructions in addition to the lack of
// improvement from parallelism.
//
// Move instruction overhead is required when moving among gpr, mmx
// and xmm registers. The number of extra moves is usually the number
// of elements inthe vector. If bothe are the same size onlu one move
// is required. The number is doubled if the data is moved back.
//
// xmm and ymm resgisters are special, they are aliased. xmm registers
// overlay the lower 128 bits of the ymm registers. Accessing the data
// in the lower half of a ymm register by an xmm argument is free.
// The upper 128 bits need to be extracted and inserted like with other
// different sized data types.
//
// Integer types can be converted to differently sized integers without
// penalty.
//
// Conversions with penalty should be avoided as much possible by grouping
// operations requiring the same register set.
//
// There are two algorithms for extracting and inserting data.
//
// There isthe straightforward iterative meathod wher each element is
// extracted or inserted in turn. The compiler evidently take a different
// aproach based on assembly code generated by a set intrinsic.
// To extract 64 bit or smaller elements from a 256 bit vector the
// first extracts the upper 128 bit into a second xmm register. This
// eliminates a dependency between the upper and lower elements allowing
// the CPU more opportunity at multiple operations per clock.
// This adds one additional instruction to the process. With AVX512 an
// another stege is added by first splitting up the 512 bit vector into
// 2 256 bit vectors,
//
// xmm/ymm aliasing makes accessing low half trivial and without cost.
// Accessing the upper half requires a move from the upper half of
// the source register to the lower half of the destination.
// It's a bigger issue with GPRs as there is no aliasing.
//
// Theoretically memory resident data could bypass the move and load
// the data directly into the desired register type. However this
// ignores the overhead to ensure coherency between register and memory
// wich is significantly more.
//
// Overlay avoids pointer dereferences and favours register move over
// memory load, notwistanding compiler optimization.
//
// The syntax is ugly but can be abstracted with macros.
// Universal 64 bit overlay
// Avoids arrays and pointers, suitable as register variable.
// Conversions are transparent but not free, cost is one MOV instruction.
// Facilitates manipulating 32 bit data in 64 bit pairs.
// Allows full use of 64 bit registers for 32 bit data, effectively doubling
// the size of the register set.
// Potentially up to 50% reduction in instructions depending on rate of
// conversion.
///////////////////////////////////////////////////////
//
// 128 bit integer
//
// Native type __int128 supported starting with GCC-4.8.
//
// __int128 uses two 64 bit GPRs to hold the data. The main benefits are
// for 128 bit arithmetic. Vectors are preferred when 128 bit arith
// is not required. int128 also works better with other integer sizes.
// Vectors benefit from wider registers.
//
// For safety use typecasting on all numeric arguments.
//
// Use typecasting for conversion to/from 128 bit vector:
// __m128i v128 = (__m128i)my_int128l
// __m256i v256 = _mm256_set_m128i( (__m128i)my_int128, (__m128i)my_int128 );
// my_int128 = (uint128_t)_mm256_extracti128_si256( v256, 1 );
// Compiler check for __int128 support
// Configure also has a test for int128.
#if ( __GNUC__ > 4 ) || ( ( __GNUC__ == 4 ) && ( __GNUC_MINOR__ >= 8 ) )
#define GCC_INT128 1
#endif
#if !defined(GCC_INT128)
#warning "__int128 not supported, requires GCC-4.8 or newer."
#endif
#if defined(GCC_INT128)
// Familiar looking type names
typedef __int128 int128_t;
typedef unsigned __int128 uint128_t;
#endif
/////////////////////////////////////
//
// MMX 64 bit vector
//
// Emulates uint32_t[2]
struct _regarray_u32x2
{
uint32_t _0; uint32_t _1;
};
typedef struct _regarray_u32x2 regarray_u32x2;
// Emulates uint16_t[4]
struct _regarray_u16x4
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
};
typedef struct _regarray_u16x4 regarray_u16x4;
// Emulates uint8_t[8]
struct _regarray_u8x8
{
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
};
typedef struct _regarray_u8x8 regarray_u8x8;
// universal 64 bit overlay
union _regarray_64
{
regarray_u32x2 u32_; // uint32_t[2]
regarray_u16x4 u16_; // uint16_t[4]
regarray_u8x8 u8_; // uint8_t[8]
uint64_t u64;
__m64 v64;
};
typedef union _regarray_64 regarray_64;
/////
//
// SSE2
// Universal 128 bit overlay
//
// Avoids arrays and pointers, suitable as register variable.
// Designed for speed in straight line code with no loops.
//
// Conversions are transparent but not free, cost is one MOV instruction
// in each direction, except for lower half of ymm to/from xmm which are
// free.
//
// Facilitates two dimensional vectoring.
//
// 128 bit integer and AES can't be done in parallel. AES suffers extraction
// and insertion of the upper 128 bits. uint128_t suffers 4 times the cost
// with 2 64 bit extractions and 2 insertions for each 128 bit lane with
// single stage ymm <--> gpr for a total of 8 moves.
//
// Two stage conversion is possible which helps CPU instruction scheduling
// by removing a register dependency between the upper and lower 128 at the
// cost of two extra instructions (128 bit extract and insert. The compiler
// seems to prefer the 2 staged approach when using the set intrinsic.
// Use macros to simplify array access emulation.
// emulated array type: uint64_t a[4];
// array indexing: a[0], a[1]
// overlay emulation: a.u64_0, a.u64_1
// without macro: a.u64_._0, a.u64_._1
struct _regarray_u64x2
{
uint64_t _0; uint64_t _1;
};
typedef struct _regarray_u64x2 regarray_u64x2;
struct _regarray_v64x2
{
__m64 _0; __m64 _1;
};
typedef struct _regarray_v64x2 regarray_v64x2;
struct _regarray_u32x4
{
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
};
typedef struct _regarray_u32x2 regarray_u32x4;
struct _regarray_u16x8
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
};
typedef struct _regarray_u16x4 regarray_u16x4;
struct _regarray_u8x16
{
uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3;
uint8_t _4; uint8_t _5; uint8_t _6; uint8_t _7;
uint8_t _8; uint8_t _9; uint8_t _a; uint8_t _b;
uint8_t _c; uint8_t _d; uint8_t _e; uint8_t _f;
};
typedef struct _regarray_u8x16 regarray_u8x16;
union _register_array_m128v
{
#if defined(GCC_INT128)
uint128_t u128;
#endif
__m128i v128;
regarray_u64x2 u64_; // uint64_t[2]
regarray_v64x2 v64_; // __m64[2]
regarray_u32x4 u32_; // uint32_t[4]
regarray_u16x4 u16_; // uint16_t[8]
regarray_u8x16 u8_; // uint8_t[16]
};
typedef union _register_array_m128v register_array_m128v;
///////////////////
//
// AVX2
//
struct _regarray_v128x2
{
__m128i _0; __m128i _1;
};
typedef struct _regarray_v128x2 regarray_v128x2;
struct _regarray_u128x2
{
uint128_t _0; uint128_t _1;
};
typedef struct _regarray_u128x2 regarray_u128x2;
struct _regarray_u64x4
{
uint64_t _0; uint64_t _1; uint64_t _2; uint64_t _3;
};
typedef struct _regarray_u64x4 regarray_u64x4;
struct _regarray_v64x4
{
__m64 _0; __m64 _1; __m64 _2; __m64 _3;
};
typedef struct _regarray_v64x4 regarray_v64x4;
struct _regarray_u32x8
{
uint32_t _0; uint32_t _1; uint32_t _2; uint32_t _3;
uint32_t _4; uint32_t _5; uint32_t _6; uint32_t _7;
};
typedef struct _regarray_u32x8 regarray_u32x8;
struct _regarray_u16x16
{
uint16_t _0; uint16_t _1; uint16_t _2; uint16_t _3;
uint16_t _4; uint16_t _5; uint16_t _6; uint16_t _7;
uint16_t _8; uint16_t _9; uint16_t _a; uint16_t _b;
uint16_t _c; uint16_t _d; uint16_t _e; uint16_t _f;
};
typedef struct _regarray_u16x16 regarray_u16x16;
struct _regarray_u8x32
{
uint8_t _00; uint8_t _01; uint8_t _02; uint8_t _03;
uint8_t _04; uint8_t _05; uint8_t _06; uint8_t _07;
uint8_t _08; uint8_t _09; uint8_t _0a; uint8_t _0b;
uint8_t _0c; uint8_t _0d; uint8_t _0e; uint8_t _0f;
uint8_t _10; uint8_t _11; uint8_t _12; uint8_t _13;
uint8_t _14; uint8_t _15; uint8_t _16; uint8_t _17;
uint8_t _18; uint8_t _19; uint8_t _1a; uint8_t _1b;
uint8_t _1c; uint8_t _1d; uint8_t _1e; uint8_t _1f;
};
typedef struct _regarray_u8x32 regarray_u8x32;
union _regarray_v256
{
__m256i v256;
#if defined(GCC_INT128)
regarray_u128x2 u128_; // uint128_t[2]
#endif
regarray_v128x2 v128_; // __m128i[2]
regarray_v64x4 v64_;
regarray_u64x4 u64_;
regarray_u32x8 u32_;
regarray_u16x16 u16_;
regarray_u8x32 u8_;
};
typedef union _regarray_v256 regarray_v256;
////////////
//
// Abstraction macros to allow easy readability.
// Users may define their own list to suit their preferences
// such as, upper case hex, leading zeros, multidimensional,
// alphabetic, day of week, etc..
#define v128_0 v128_._0
#define v128_1 v128_._1
#define u128_0 u128_._0
#define u128_1 u128_._1
#define v64_0 v64_._0
#define v64_1 v64_._1
#define v64_2 v64_._2
#define v64_3 v64_._3
#define u64_0 u64_._0
#define u64_1 u64_._1
#define u64_2 u64_._2
#define u64_3 u64_._3
#define u32_0 u32_._0
#define u32_1 u32_._1
#define u32_2 u32_._2
#define u32_3 u32_._3
#define u32_4 u32_._4
#define u32_5 u32_._5
#define u32_6 u32_._6
#define u32_7 u32_._7
#define u16_0 u16_._0
#define u16_1 u16_._1
#define u16_2 u16_._2
#define u16_3 u16_._3
#define u16_4 u16_._4
#define u16_5 u16_._5
#define u16_6 u16_._6
#define u16_7 u16_._7
#define u16_8 u16_._8
#define u16_9 u16_._9
#define u16_a u16_._a
#define u16_b u16_._b
#define u16_c u16_._c
#define u16_d u16_._d
#define u16_e u16_._e
#define u16_f u16_._f
#define u8_00 u8_._00
#define u8_01 u8_._01
#define u8_02 u8_._02
#define u8_03 u8_._03
#define u8_04 u8_._04
#define u8_05 u8_._05
#define u8_06 u8_._06
#define u8_07 u8_._07
#define u8_08 u8_._08
#define u8_09 u8_._09
#define u8_0a u8_._0a
#define u8_0b u8_._0b
#define u8_0c u8_._0c
#define u8_0d u8_._0d
#define u8_0e u8_._0e
#define u8_0f u8_._0f
#define u8_10 u8_._10
#define u8_11 u8_._11
#define u8_12 u8_._12
#define u8_13 u8_._13
#define u8_14 u8_._14
#define u8_15 u8_._15
#define u8_16 u8_._16
#define u8_17 u8_._17
#define u8_18 u8_._18
#define u8_19 u8_._19
#define u8_1a u8_._1a
#define u8_1b u8_._1b
#define u8_1c u8_._1c
#define u8_1d u8_._1d
#define u8_1e u8_._1e
#define u8_1f u8_._1f

2
util.c
View File

@@ -1898,7 +1898,7 @@ static bool stratum_notify(struct stratum_ctx *sctx, json_t *params)
hex2bin(sctx->job.proofoffullnode, prooffullnode, 32);
}
sctx->bloc_height = getblocheight(sctx);
sctx->block_height = getblocheight(sctx);
for (i = 0; i < sctx->job.merkle_count; i++)
free(sctx->job.merkle[i]);

View File

@@ -24,6 +24,8 @@ ln -s $LOCAL_LIB/gmp/gmp.h ./gmp.h
# make release directory and copy selected DLLs.
mkdir release
cp README.txt release/
cp README.md release/
cp RELEASE_NOTES release/
cp /usr/x86_64-w64-mingw32/lib/zlib1.dll release/
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll release/
cp /usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/libstdc++-6.dll release/