diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 03e684c..2ca3180 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -38,6 +38,15 @@ supported. Change Log ---------- +v3.9.5.3 + +Fix crash mining hodl with aes-sse42. +More restructuring and share report tweaks. + +v3.9.5.2 + +Revert bswap-interleave optiization for causing crashes on Windows. + v3.9.5.1 Fixed skein2 crash on Windows. diff --git a/algo/blake/blake256-hash-4way.c b/algo/blake/blake256-hash-4way.c index 180b040..b228e07 100644 --- a/algo/blake/blake256-hash-4way.c +++ b/algo/blake/blake256-hash-4way.c @@ -412,34 +412,16 @@ do { \ V5 = H5; \ V6 = H6; \ V7 = H7; \ - V8 = _mm_xor_si128( S0, _mm_set_epi32( CS0, CS0, CS0, CS0 ) ); \ - V9 = _mm_xor_si128( S1, _mm_set_epi32( CS1, CS1, CS1, CS1 ) ); \ - VA = _mm_xor_si128( S2, _mm_set_epi32( CS2, CS2, CS2, CS2 ) ); \ - VB = _mm_xor_si128( S3, _mm_set_epi32( CS3, CS3, CS3, CS3 ) ); \ - VC = _mm_xor_si128( _mm_set_epi32( T0, T0, T0, T0 ), \ - _mm_set_epi32( CS4, CS4, CS4, CS4 ) ); \ - VD = _mm_xor_si128( _mm_set_epi32( T0, T0, T0, T0 ), \ - _mm_set_epi32( CS5, CS5, CS5, CS5 ) ); \ - VE = _mm_xor_si128( _mm_set_epi32( T1, T1, T1, T1 ) \ - , _mm_set_epi32( CS6, CS6, CS6, CS6 ) ); \ - VF = _mm_xor_si128( _mm_set_epi32( T1, T1, T1, T1 ), \ - _mm_set_epi32( CS7, CS7, CS7, CS7 ) ); \ - M[0x0] = mm128_bswap_32( *(buf + 0) ); \ - M[0x1] = mm128_bswap_32( *(buf + 1) ); \ - M[0x2] = mm128_bswap_32( *(buf + 2) ); \ - M[0x3] = mm128_bswap_32( *(buf + 3) ); \ - M[0x4] = mm128_bswap_32( *(buf + 4) ); \ - M[0x5] = mm128_bswap_32( *(buf + 5) ); \ - M[0x6] = mm128_bswap_32( *(buf + 6) ); \ - M[0x7] = mm128_bswap_32( *(buf + 7) ); \ - M[0x8] = mm128_bswap_32( *(buf + 8) ); \ - M[0x9] = mm128_bswap_32( *(buf + 9) ); \ - M[0xA] = mm128_bswap_32( *(buf + 10) ); \ - M[0xB] = mm128_bswap_32( *(buf + 11) ); \ - M[0xC] = mm128_bswap_32( *(buf + 12) ); \ - M[0xD] = mm128_bswap_32( *(buf + 13) ); \ - M[0xE] = mm128_bswap_32( *(buf + 14) ); \ - M[0xF] = mm128_bswap_32( *(buf + 15) ); \ + V8 = _mm_xor_si128( S0, _mm_set1_epi32( CS0 ) ); \ + V9 = _mm_xor_si128( S1, _mm_set1_epi32( CS1 ) ); \ + VA = _mm_xor_si128( S2, _mm_set1_epi32( CS2 ) ); \ + VB = _mm_xor_si128( S3, _mm_set1_epi32( CS3 ) ); \ + VC = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS4 ) ); \ + VD = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS5 ) ); \ + VE = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS6 ) ); \ + VF = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS7 ) ); \ + mm128_block_bswap_32( M, buf ); \ + mm128_block_bswap_32( M+8, buf+8 ); \ for (r = 0; r < rounds; r ++) \ ROUND_S_4WAY(r); \ H0 = _mm_xor_si128( _mm_xor_si128( \ @@ -464,6 +446,54 @@ do { \ // current impl +#if defined(__SSSE3__) + +#define BLAKE256_4WAY_BLOCK_BSWAP32 do \ +{ \ + __m128i shuf_bswap32 = _mm_set_epi64x( 0x0c0d0e0f08090a0b, \ + 0x0405060700010203 ); \ + M0 = _mm_shuffle_epi8( buf[ 0], shuf_bswap32 ); \ + M1 = _mm_shuffle_epi8( buf[ 1], shuf_bswap32 ); \ + M2 = _mm_shuffle_epi8( buf[ 2], shuf_bswap32 ); \ + M3 = _mm_shuffle_epi8( buf[ 3], shuf_bswap32 ); \ + M4 = _mm_shuffle_epi8( buf[ 4], shuf_bswap32 ); \ + M5 = _mm_shuffle_epi8( buf[ 5], shuf_bswap32 ); \ + M6 = _mm_shuffle_epi8( buf[ 6], shuf_bswap32 ); \ + M7 = _mm_shuffle_epi8( buf[ 7], shuf_bswap32 ); \ + M8 = _mm_shuffle_epi8( buf[ 8], shuf_bswap32 ); \ + M9 = _mm_shuffle_epi8( buf[ 9], shuf_bswap32 ); \ + MA = _mm_shuffle_epi8( buf[10], shuf_bswap32 ); \ + MB = _mm_shuffle_epi8( buf[11], shuf_bswap32 ); \ + MC = _mm_shuffle_epi8( buf[12], shuf_bswap32 ); \ + MD = _mm_shuffle_epi8( buf[13], shuf_bswap32 ); \ + ME = _mm_shuffle_epi8( buf[14], shuf_bswap32 ); \ + MF = _mm_shuffle_epi8( buf[15], shuf_bswap32 ); \ +} while(0) + +#else // SSE2 + +#define BLAKE256_4WAY_BLOCK_BSWAP32 do \ +{ \ + M0 = mm128_bswap_32( buf[0] ); \ + M1 = mm128_bswap_32( buf[1] ); \ + M2 = mm128_bswap_32( buf[2] ); \ + M3 = mm128_bswap_32( buf[3] ); \ + M4 = mm128_bswap_32( buf[4] ); \ + M5 = mm128_bswap_32( buf[5] ); \ + M6 = mm128_bswap_32( buf[6] ); \ + M7 = mm128_bswap_32( buf[7] ); \ + M8 = mm128_bswap_32( buf[8] ); \ + M9 = mm128_bswap_32( buf[9] ); \ + MA = mm128_bswap_32( buf[10] ); \ + MB = mm128_bswap_32( buf[11] ); \ + MC = mm128_bswap_32( buf[12] ); \ + MD = mm128_bswap_32( buf[13] ); \ + ME = mm128_bswap_32( buf[14] ); \ + MF = mm128_bswap_32( buf[15] ); \ +} while(0) + +#endif // SSSE3 else SSE2 + #define COMPRESS32_4WAY( rounds ) \ do { \ __m128i M0, M1, M2, M3, M4, M5, M6, M7; \ @@ -486,22 +516,7 @@ do { \ VD = _mm_xor_si128( _mm_set1_epi32( T0 ), _mm_set1_epi32( CS5 ) ); \ VE = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS6 ) ); \ VF = _mm_xor_si128( _mm_set1_epi32( T1 ), _mm_set1_epi32( CS7 ) ); \ - M0 = mm128_bswap_32( buf[ 0] ); \ - M1 = mm128_bswap_32( buf[ 1] ); \ - M2 = mm128_bswap_32( buf[ 2] ); \ - M3 = mm128_bswap_32( buf[ 3] ); \ - M4 = mm128_bswap_32( buf[ 4] ); \ - M5 = mm128_bswap_32( buf[ 5] ); \ - M6 = mm128_bswap_32( buf[ 6] ); \ - M7 = mm128_bswap_32( buf[ 7] ); \ - M8 = mm128_bswap_32( buf[ 8] ); \ - M9 = mm128_bswap_32( buf[ 9] ); \ - MA = mm128_bswap_32( buf[10] ); \ - MB = mm128_bswap_32( buf[11] ); \ - MC = mm128_bswap_32( buf[12] ); \ - MD = mm128_bswap_32( buf[13] ); \ - ME = mm128_bswap_32( buf[14] ); \ - MF = mm128_bswap_32( buf[15] ); \ + BLAKE256_4WAY_BLOCK_BSWAP32; \ ROUND_S_4WAY(0); \ ROUND_S_4WAY(1); \ ROUND_S_4WAY(2); \ @@ -519,14 +534,14 @@ do { \ ROUND_S_4WAY(2); \ ROUND_S_4WAY(3); \ } \ - H0 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( V8, V0 ), S0 ), H0 ); \ - H1 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( V9, V1 ), S1 ), H1 ); \ - H2 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VA, V2 ), S2 ), H2 ); \ - H3 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VB, V3 ), S3 ), H3 ); \ - H4 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VC, V4 ), S0 ), H4 ); \ - H5 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VD, V5 ), S1 ), H5 ); \ - H6 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VE, V6 ), S2 ), H6 ); \ - H7 = _mm_xor_si128( _mm_xor_si128( _mm_xor_si128( VF, V7 ), S3 ), H7 ); \ + H0 = mm128_xor4( V8, V0, S0, H0 ); \ + H1 = mm128_xor4( V9, V1, S1, H1 ); \ + H2 = mm128_xor4( VA, V2, S2, H2 ); \ + H3 = mm128_xor4( VB, V3, S3, H3 ); \ + H4 = mm128_xor4( VC, V4, S0, H4 ); \ + H5 = mm128_xor4( VD, V5, S1, H5 ); \ + H6 = mm128_xor4( VE, V6, S2, H6 ); \ + H7 = mm128_xor4( VF, V7, S3, H7 ); \ } while (0) #endif @@ -607,6 +622,7 @@ do { \ __m256i M8, M9, MA, MB, MC, MD, ME, MF; \ __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ __m256i V8, V9, VA, VB, VC, VD, VE, VF; \ + __m256i shuf_bswap32; \ V0 = H0; \ V1 = H1; \ V2 = H2; \ @@ -623,22 +639,24 @@ do { \ VD = _mm256_xor_si256( _mm256_set1_epi32( T0 ), _mm256_set1_epi32( CS5 ) ); \ VE = _mm256_xor_si256( _mm256_set1_epi32( T1 ), _mm256_set1_epi32( CS6 ) ); \ VF = _mm256_xor_si256( _mm256_set1_epi32( T1 ), _mm256_set1_epi32( CS7 ) ); \ - M0 = mm256_bswap_32( * buf ); \ - M1 = mm256_bswap_32( *(buf+1) ); \ - M2 = mm256_bswap_32( *(buf+2) ); \ - M3 = mm256_bswap_32( *(buf+3) ); \ - M4 = mm256_bswap_32( *(buf+4) ); \ - M5 = mm256_bswap_32( *(buf+5) ); \ - M6 = mm256_bswap_32( *(buf+6) ); \ - M7 = mm256_bswap_32( *(buf+7) ); \ - M8 = mm256_bswap_32( *(buf+8) ); \ - M9 = mm256_bswap_32( *(buf+9) ); \ - MA = mm256_bswap_32( *(buf+10) ); \ - MB = mm256_bswap_32( *(buf+11) ); \ - MC = mm256_bswap_32( *(buf+12) ); \ - MD = mm256_bswap_32( *(buf+13) ); \ - ME = mm256_bswap_32( *(buf+14) ); \ - MF = mm256_bswap_32( *(buf+15) ); \ + shuf_bswap32 = _mm256_set_epi64x( 0x0c0d0e0f08090a0b, 0x0405060700010203, \ + 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \ + M0 = _mm256_shuffle_epi8( * buf , shuf_bswap32 ); \ + M1 = _mm256_shuffle_epi8( *(buf+ 1), shuf_bswap32 ); \ + M2 = _mm256_shuffle_epi8( *(buf+ 2), shuf_bswap32 ); \ + M3 = _mm256_shuffle_epi8( *(buf+ 3), shuf_bswap32 ); \ + M4 = _mm256_shuffle_epi8( *(buf+ 4), shuf_bswap32 ); \ + M5 = _mm256_shuffle_epi8( *(buf+ 5), shuf_bswap32 ); \ + M6 = _mm256_shuffle_epi8( *(buf+ 6), shuf_bswap32 ); \ + M7 = _mm256_shuffle_epi8( *(buf+ 7), shuf_bswap32 ); \ + M8 = _mm256_shuffle_epi8( *(buf+ 8), shuf_bswap32 ); \ + M9 = _mm256_shuffle_epi8( *(buf+ 9), shuf_bswap32 ); \ + MA = _mm256_shuffle_epi8( *(buf+10), shuf_bswap32 ); \ + MB = _mm256_shuffle_epi8( *(buf+11), shuf_bswap32 ); \ + MC = _mm256_shuffle_epi8( *(buf+12), shuf_bswap32 ); \ + MD = _mm256_shuffle_epi8( *(buf+13), shuf_bswap32 ); \ + ME = _mm256_shuffle_epi8( *(buf+14), shuf_bswap32 ); \ + MF = _mm256_shuffle_epi8( *(buf+15), shuf_bswap32 ); \ ROUND_S_8WAY(0); \ ROUND_S_8WAY(1); \ ROUND_S_8WAY(2); \ @@ -656,22 +674,14 @@ do { \ ROUND_S_8WAY(2); \ ROUND_S_8WAY(3); \ } \ - H0 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( V8, V0 ), \ - S0 ), H0 ); \ - H1 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( V9, V1 ), \ - S1 ), H1 ); \ - H2 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VA, V2 ), \ - S2 ), H2 ); \ - H3 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VB, V3 ), \ - S3 ), H3 ); \ - H4 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VC, V4 ), \ - S0 ), H4 ); \ - H5 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VD, V5 ), \ - S1 ), H5 ); \ - H6 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VE, V6 ), \ - S2 ), H6 ); \ - H7 = _mm256_xor_si256( _mm256_xor_si256( _mm256_xor_si256( VF, V7 ), \ - S3 ), H7 ); \ + H0 = mm256_xor4( V8, V0, S0, H0 ); \ + H1 = mm256_xor4( V9, V1, S1, H1 ); \ + H2 = mm256_xor4( VA, V2, S2, H2 ); \ + H3 = mm256_xor4( VB, V3, S3, H3 ); \ + H4 = mm256_xor4( VC, V4, S0, H4 ); \ + H5 = mm256_xor4( VD, V5, S1, H5 ); \ + H6 = mm256_xor4( VE, V6, S2, H6 ); \ + H7 = mm256_xor4( VF, V7, S3, H7 ); \ } while (0) @@ -685,6 +695,7 @@ static void blake32_4way_init( blake_4way_small_context *ctx, const uint32_t *iv, const uint32_t *salt, int rounds ) { + __m128i zero = m128_zero; casti_m128i( ctx->H, 0 ) = _mm_set1_epi32( iv[0] ); casti_m128i( ctx->H, 1 ) = _mm_set1_epi32( iv[1] ); casti_m128i( ctx->H, 2 ) = _mm_set1_epi32( iv[2] ); @@ -694,16 +705,10 @@ blake32_4way_init( blake_4way_small_context *ctx, const uint32_t *iv, casti_m128i( ctx->H, 6 ) = _mm_set1_epi32( iv[6] ); casti_m128i( ctx->H, 7 ) = _mm_set1_epi32( iv[7] ); - casti_m128i( ctx->S, 0 ) = m128_zero; - casti_m128i( ctx->S, 1 ) = m128_zero; - casti_m128i( ctx->S, 2 ) = m128_zero; - casti_m128i( ctx->S, 3 ) = m128_zero; -/* - sc->S[0] = _mm_set1_epi32( salt[0] ); - sc->S[1] = _mm_set1_epi32( salt[1] ); - sc->S[2] = _mm_set1_epi32( salt[2] ); - sc->S[3] = _mm_set1_epi32( salt[3] ); -*/ + casti_m128i( ctx->S, 0 ) = zero; + casti_m128i( ctx->S, 1 ) = zero; + casti_m128i( ctx->S, 2 ) = zero; + casti_m128i( ctx->S, 3 ) = zero; ctx->T0 = ctx->T1 = 0; ctx->ptr = 0; ctx->rounds = rounds; @@ -796,14 +801,7 @@ blake32_4way_close( blake_4way_small_context *ctx, unsigned ub, unsigned n, blake32_4way( ctx, buf, 64 ); } - casti_m128i( dst, 0 ) = mm128_bswap_32( casti_m128i( ctx->H, 0 ) ); - casti_m128i( dst, 1 ) = mm128_bswap_32( casti_m128i( ctx->H, 1 ) ); - casti_m128i( dst, 2 ) = mm128_bswap_32( casti_m128i( ctx->H, 2 ) ); - casti_m128i( dst, 3 ) = mm128_bswap_32( casti_m128i( ctx->H, 3 ) ); - casti_m128i( dst, 4 ) = mm128_bswap_32( casti_m128i( ctx->H, 4 ) ); - casti_m128i( dst, 5 ) = mm128_bswap_32( casti_m128i( ctx->H, 5 ) ); - casti_m128i( dst, 6 ) = mm128_bswap_32( casti_m128i( ctx->H, 6 ) ); - casti_m128i( dst, 7 ) = mm128_bswap_32( casti_m128i( ctx->H, 7 ) ); + mm128_block_bswap_32( (__m128i*)dst, (__m128i*)ctx->H ); } #if defined (__AVX2__) @@ -816,11 +814,21 @@ static void blake32_8way_init( blake_8way_small_context *sc, const sph_u32 *iv, const sph_u32 *salt, int rounds ) { - int i; - for ( i = 0; i < 8; i++ ) - sc->H[i] = _mm256_set1_epi32( iv[i] ); - for ( i = 0; i < 4; i++ ) - sc->S[i] = _mm256_set1_epi32( salt[i] ); + __m256i zero = m256_zero; + casti_m256i( sc->H, 0 ) = _mm256_set1_epi32( iv[0] ); + casti_m256i( sc->H, 1 ) = _mm256_set1_epi32( iv[1] ); + casti_m256i( sc->H, 2 ) = _mm256_set1_epi32( iv[2] ); + casti_m256i( sc->H, 3 ) = _mm256_set1_epi32( iv[3] ); + casti_m256i( sc->H, 4 ) = _mm256_set1_epi32( iv[4] ); + casti_m256i( sc->H, 5 ) = _mm256_set1_epi32( iv[5] ); + casti_m256i( sc->H, 6 ) = _mm256_set1_epi32( iv[6] ); + casti_m256i( sc->H, 7 ) = _mm256_set1_epi32( iv[7] ); + + casti_m256i( sc->S, 0 ) = zero; + casti_m256i( sc->S, 1 ) = zero; + casti_m256i( sc->S, 2 ) = zero; + casti_m256i( sc->S, 3 ) = zero; + sc->T0 = sc->T1 = 0; sc->ptr = 0; sc->rounds = rounds; @@ -872,14 +880,10 @@ static void blake32_8way_close( blake_8way_small_context *sc, unsigned ub, unsigned n, void *dst, size_t out_size_w32 ) { -// union { - __m256i buf[16]; -// sph_u32 dummy; -// } u; - size_t ptr, k; + __m256i buf[16]; + size_t ptr; unsigned bit_len; sph_u32 th, tl; - __m256i *out; ptr = sc->ptr; bit_len = ((unsigned)ptr << 3); @@ -923,9 +927,7 @@ blake32_8way_close( blake_8way_small_context *sc, unsigned ub, unsigned n, *(buf+(60>>2)) = mm256_bswap_32( _mm256_set1_epi32( tl ) ); blake32_8way( sc, buf, 64 ); } - out = (__m256i*)dst; - for ( k = 0; k < out_size_w32; k++ ) - out[k] = mm256_bswap_32( sc->H[k] ); + mm256_block_bswap_32( (__m256i*)dst, (__m256i*)sc->H ); } #endif diff --git a/algo/blake/blake512-hash-4way.c b/algo/blake/blake512-hash-4way.c index 0063e4d..b57f712 100644 --- a/algo/blake/blake512-hash-4way.c +++ b/algo/blake/blake512-hash-4way.c @@ -412,18 +412,18 @@ static const sph_u64 CB[16] = { V5 = H5; \ V6 = H6; \ V7 = H7; \ - V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \ - V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \ - VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \ - VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \ - VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \ - VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \ - VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \ - VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \ + V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \ + V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \ + VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \ + VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \ + VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ + _mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \ + VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ + _mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \ + VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ + _mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \ + VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ + _mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \ M[0x0] = mm256_bswap_64( *(buf+0) ); \ M[0x1] = mm256_bswap_64( *(buf+1) ); \ M[0x2] = mm256_bswap_64( *(buf+2) ); \ @@ -464,80 +464,76 @@ static const sph_u64 CB[16] = { //current impl -#define COMPRESS64_4WAY do { \ - __m256i M0, M1, M2, M3, M4, M5, M6, M7; \ - __m256i M8, M9, MA, MB, MC, MD, ME, MF; \ - __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ - __m256i V8, V9, VA, VB, VC, VD, VE, VF; \ - V0 = H0; \ - V1 = H1; \ - V2 = H2; \ - V3 = H3; \ - V4 = H4; \ - V5 = H5; \ - V6 = H6; \ - V7 = H7; \ - V8 = _mm256_xor_si256( S0, _mm256_set_epi64x( CB0, CB0, CB0, CB0 ) ); \ - V9 = _mm256_xor_si256( S1, _mm256_set_epi64x( CB1, CB1, CB1, CB1 ) ); \ - VA = _mm256_xor_si256( S2, _mm256_set_epi64x( CB2, CB2, CB2, CB2 ) ); \ - VB = _mm256_xor_si256( S3, _mm256_set_epi64x( CB3, CB3, CB3, CB3 ) ); \ - VC = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB4, CB4, CB4, CB4 ) ); \ - VD = _mm256_xor_si256( _mm256_set_epi64x( T0, T0, T0, T0 ), \ - _mm256_set_epi64x( CB5, CB5, CB5, CB5 ) ); \ - VE = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB6, CB6, CB6, CB6 ) ); \ - VF = _mm256_xor_si256( _mm256_set_epi64x( T1, T1, T1, T1 ), \ - _mm256_set_epi64x( CB7, CB7, CB7, CB7 ) ); \ - M0 = mm256_bswap_64( *(buf + 0) ); \ - M1 = mm256_bswap_64( *(buf + 1) ); \ - M2 = mm256_bswap_64( *(buf + 2) ); \ - M3 = mm256_bswap_64( *(buf + 3) ); \ - M4 = mm256_bswap_64( *(buf + 4) ); \ - M5 = mm256_bswap_64( *(buf + 5) ); \ - M6 = mm256_bswap_64( *(buf + 6) ); \ - M7 = mm256_bswap_64( *(buf + 7) ); \ - M8 = mm256_bswap_64( *(buf + 8) ); \ - M9 = mm256_bswap_64( *(buf + 9) ); \ - MA = mm256_bswap_64( *(buf + 10) ); \ - MB = mm256_bswap_64( *(buf + 11) ); \ - MC = mm256_bswap_64( *(buf + 12) ); \ - MD = mm256_bswap_64( *(buf + 13) ); \ - ME = mm256_bswap_64( *(buf + 14) ); \ - MF = mm256_bswap_64( *(buf + 15) ); \ - ROUND_B_4WAY(0); \ - ROUND_B_4WAY(1); \ - ROUND_B_4WAY(2); \ - ROUND_B_4WAY(3); \ - ROUND_B_4WAY(4); \ - ROUND_B_4WAY(5); \ - ROUND_B_4WAY(6); \ - ROUND_B_4WAY(7); \ - ROUND_B_4WAY(8); \ - ROUND_B_4WAY(9); \ - ROUND_B_4WAY(0); \ - ROUND_B_4WAY(1); \ - ROUND_B_4WAY(2); \ - ROUND_B_4WAY(3); \ - ROUND_B_4WAY(4); \ - ROUND_B_4WAY(5); \ - H0 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S0, V0 ), V8 ), H0 ); \ - H1 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S1, V1 ), V9 ), H1 ); \ - H2 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S2, V2 ), VA ), H2 ); \ - H3 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S3, V3 ), VB ), H3 ); \ - H4 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S0, V4 ), VC ), H4 ); \ - H5 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S1, V5 ), VD ), H5 ); \ - H6 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S2, V6 ), VE ), H6 ); \ - H7 = _mm256_xor_si256( _mm256_xor_si256( \ - _mm256_xor_si256( S3, V7 ), VF ), H7 ); \ - } while (0) +#define COMPRESS64_4WAY do \ +{ \ + __m256i M0, M1, M2, M3, M4, M5, M6, M7; \ + __m256i M8, M9, MA, MB, MC, MD, ME, MF; \ + __m256i V0, V1, V2, V3, V4, V5, V6, V7; \ + __m256i V8, V9, VA, VB, VC, VD, VE, VF; \ + __m256i shuf_bswap64; \ + V0 = H0; \ + V1 = H1; \ + V2 = H2; \ + V3 = H3; \ + V4 = H4; \ + V5 = H5; \ + V6 = H6; \ + V7 = H7; \ + V8 = _mm256_xor_si256( S0, _mm256_set1_epi64x( CB0 ) ); \ + V9 = _mm256_xor_si256( S1, _mm256_set1_epi64x( CB1 ) ); \ + VA = _mm256_xor_si256( S2, _mm256_set1_epi64x( CB2 ) ); \ + VB = _mm256_xor_si256( S3, _mm256_set1_epi64x( CB3 ) ); \ + VC = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \ + _mm256_set1_epi64x( CB4 ) ); \ + VD = _mm256_xor_si256( _mm256_set1_epi64x( T0 ), \ + _mm256_set1_epi64x( CB5 ) ); \ + VE = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \ + _mm256_set1_epi64x( CB6 ) ); \ + VF = _mm256_xor_si256( _mm256_set1_epi64x( T1 ), \ + _mm256_set1_epi64x( CB7 ) ); \ + shuf_bswap64 = _mm256_set_epi64x( 0x08090a0b0c0d0e0f, 0x0001020304050607, \ + 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \ + M0 = _mm256_shuffle_epi8( *(buf+ 0), shuf_bswap64 ); \ + M1 = _mm256_shuffle_epi8( *(buf+ 1), shuf_bswap64 ); \ + M2 = _mm256_shuffle_epi8( *(buf+ 2), shuf_bswap64 ); \ + M3 = _mm256_shuffle_epi8( *(buf+ 3), shuf_bswap64 ); \ + M4 = _mm256_shuffle_epi8( *(buf+ 4), shuf_bswap64 ); \ + M5 = _mm256_shuffle_epi8( *(buf+ 5), shuf_bswap64 ); \ + M6 = _mm256_shuffle_epi8( *(buf+ 6), shuf_bswap64 ); \ + M7 = _mm256_shuffle_epi8( *(buf+ 7), shuf_bswap64 ); \ + M8 = _mm256_shuffle_epi8( *(buf+ 8), shuf_bswap64 ); \ + M9 = _mm256_shuffle_epi8( *(buf+ 9), shuf_bswap64 ); \ + MA = _mm256_shuffle_epi8( *(buf+10), shuf_bswap64 ); \ + MB = _mm256_shuffle_epi8( *(buf+11), shuf_bswap64 ); \ + MC = _mm256_shuffle_epi8( *(buf+12), shuf_bswap64 ); \ + MD = _mm256_shuffle_epi8( *(buf+13), shuf_bswap64 ); \ + ME = _mm256_shuffle_epi8( *(buf+14), shuf_bswap64 ); \ + MF = _mm256_shuffle_epi8( *(buf+15), shuf_bswap64 ); \ + ROUND_B_4WAY(0); \ + ROUND_B_4WAY(1); \ + ROUND_B_4WAY(2); \ + ROUND_B_4WAY(3); \ + ROUND_B_4WAY(4); \ + ROUND_B_4WAY(5); \ + ROUND_B_4WAY(6); \ + ROUND_B_4WAY(7); \ + ROUND_B_4WAY(8); \ + ROUND_B_4WAY(9); \ + ROUND_B_4WAY(0); \ + ROUND_B_4WAY(1); \ + ROUND_B_4WAY(2); \ + ROUND_B_4WAY(3); \ + ROUND_B_4WAY(4); \ + ROUND_B_4WAY(5); \ + H0 = mm256_xor4( V8, V0, S0, H0 ); \ + H1 = mm256_xor4( V9, V1, S1, H1 ); \ + H2 = mm256_xor4( VA, V2, S2, H2 ); \ + H3 = mm256_xor4( VB, V3, S3, H3 ); \ + H4 = mm256_xor4( VC, V4, S0, H4 ); \ + H5 = mm256_xor4( VD, V5, S1, H5 ); \ + H6 = mm256_xor4( VE, V6, S2, H6 ); \ + H7 = mm256_xor4( VF, V7, S3, H7 ); \ +} while (0) #endif @@ -547,13 +543,23 @@ static void blake64_4way_init( blake_4way_big_context *sc, const sph_u64 *iv, const sph_u64 *salt ) { - int i; - for ( i = 0; i < 8; i++ ) - sc->H[i] = _mm256_set1_epi64x( iv[i] ); - for ( i = 0; i < 4; i++ ) - sc->S[i] = _mm256_set1_epi64x( salt[i] ); - sc->T0 = sc->T1 = 0; - sc->ptr = 0; + __m256i zero = m256_zero; + casti_m256i( sc->H, 0 ) = _mm256_set1_epi64x( iv[0] ); + casti_m256i( sc->H, 1 ) = _mm256_set1_epi64x( iv[1] ); + casti_m256i( sc->H, 2 ) = _mm256_set1_epi64x( iv[2] ); + casti_m256i( sc->H, 3 ) = _mm256_set1_epi64x( iv[3] ); + casti_m256i( sc->H, 4 ) = _mm256_set1_epi64x( iv[4] ); + casti_m256i( sc->H, 5 ) = _mm256_set1_epi64x( iv[5] ); + casti_m256i( sc->H, 6 ) = _mm256_set1_epi64x( iv[6] ); + casti_m256i( sc->H, 7 ) = _mm256_set1_epi64x( iv[7] ); + + casti_m256i( sc->S, 0 ) = zero; + casti_m256i( sc->S, 1 ) = zero; + casti_m256i( sc->S, 2 ) = zero; + casti_m256i( sc->S, 3 ) = zero; + + sc->T0 = sc->T1 = 0; + sc->ptr = 0; } static void @@ -604,15 +610,11 @@ static void blake64_4way_close( blake_4way_big_context *sc, unsigned ub, unsigned n, void *dst, size_t out_size_w64) { -// union { - __m256i buf[16]; -// sph_u64 dummy; -// } u; - size_t ptr, k; + __m256i buf[16]; + size_t ptr; unsigned bit_len; uint64_t z, zz; sph_u64 th, tl; - __m256i *out; ptr = sc->ptr; bit_len = ((unsigned)ptr << 3); @@ -665,9 +667,7 @@ blake64_4way_close( blake_4way_big_context *sc, blake64_4way( sc, buf, 128 ); } - out = (__m256i*)dst; - for ( k = 0; k < out_size_w64; k++ ) - out[k] = mm256_bswap_64( sc->H[k] ); + mm256_block_bswap_64( (__m256i*)dst, sc->H ); } void diff --git a/algo/bmw/bmw256-hash-4way.c b/algo/bmw/bmw256-hash-4way.c index efcb5d2..8f785e3 100644 --- a/algo/bmw/bmw256-hash-4way.c +++ b/algo/bmw/bmw256-hash-4way.c @@ -113,50 +113,27 @@ static const uint32_t IV256[] = { #define expand1s( qt, M, H, i ) \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( ss1( qt[ (i)-16 ] ), \ - ss2( qt[ (i)-15 ] ) ), \ - _mm_add_epi32( ss3( qt[ (i)-14 ] ), \ - ss0( qt[ (i)-13 ] ) ) ), \ - _mm_add_epi32( \ - _mm_add_epi32( ss1( qt[ (i)-12 ] ), \ - ss2( qt[ (i)-11 ] ) ), \ - _mm_add_epi32( ss3( qt[ (i)-10 ] ), \ - ss0( qt[ (i)- 9 ] ) ) ) ), \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( ss1( qt[ (i)- 8 ] ), \ - ss2( qt[ (i)- 7 ] ) ), \ - _mm_add_epi32( ss3( qt[ (i)- 6 ] ), \ - ss0( qt[ (i)- 5 ] ) ) ), \ - _mm_add_epi32( \ - _mm_add_epi32( ss1( qt[ (i)- 4 ] ), \ - ss2( qt[ (i)- 3 ] ) ), \ - _mm_add_epi32( ss3( qt[ (i)- 2 ] ), \ - ss0( qt[ (i)- 1 ] ) ) ) ) ), \ + _mm_add_epi32( mm128_add4_32( \ + mm128_add4_32( ss1( qt[ (i)-16 ] ), ss2( qt[ (i)-15 ] ), \ + ss3( qt[ (i)-14 ] ), ss0( qt[ (i)-13 ] ) ), \ + mm128_add4_32( ss1( qt[ (i)-12 ] ), ss2( qt[ (i)-11 ] ), \ + ss3( qt[ (i)-10 ] ), ss0( qt[ (i)- 9 ] ) ), \ + mm128_add4_32( ss1( qt[ (i)- 8 ] ), ss2( qt[ (i)- 7 ] ), \ + ss3( qt[ (i)- 6 ] ), ss0( qt[ (i)- 5 ] ) ), \ + mm128_add4_32( ss1( qt[ (i)- 4 ] ), ss2( qt[ (i)- 3 ] ), \ + ss3( qt[ (i)- 2 ] ), ss0( qt[ (i)- 1 ] ) ) ), \ add_elt_s( M, H, (i)-16 ) ) #define expand2s( qt, M, H, i) \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( qt[ (i)-16 ], rs1( qt[ (i)-15 ] ) ), \ - _mm_add_epi32( qt[ (i)-14 ], rs2( qt[ (i)-13 ] ) ) ), \ - _mm_add_epi32( \ - _mm_add_epi32( qt[ (i)-12 ], rs3( qt[ (i)-11 ] ) ), \ - _mm_add_epi32( qt[ (i)-10 ], rs4( qt[ (i)- 9 ] ) ) ) ), \ - _mm_add_epi32( \ - _mm_add_epi32( \ - _mm_add_epi32( qt[ (i)- 8 ], rs5( qt[ (i)- 7 ] ) ), \ - _mm_add_epi32( qt[ (i)- 6 ], rs6( qt[ (i)- 5 ] ) ) ), \ - _mm_add_epi32( \ - _mm_add_epi32( qt[ (i)- 4 ], rs7( qt[ (i)- 3 ] ) ), \ - _mm_add_epi32( ss4( qt[ (i)- 2 ] ), \ - ss5( qt[ (i)- 1 ] ) ) ) ) ), \ + _mm_add_epi32( mm128_add4_32( \ + mm128_add4_32( qt[ (i)-16 ], rs1( qt[ (i)-15 ] ), \ + qt[ (i)-14 ], rs2( qt[ (i)-13 ] ) ), \ + mm128_add4_32( qt[ (i)-12 ], rs3( qt[ (i)-11 ] ), \ + qt[ (i)-10 ], rs4( qt[ (i)- 9 ] ) ), \ + mm128_add4_32( qt[ (i)- 8 ], rs5( qt[ (i)- 7 ] ), \ + qt[ (i)- 6 ], rs6( qt[ (i)- 5 ] ) ), \ + mm128_add4_32( qt[ (i)- 4 ], rs7( qt[ (i)- 3 ] ), \ + ss4( qt[ (i)- 2 ] ), ss5( qt[ (i)- 1 ] ) ) ), \ add_elt_s( M, H, (i)-16 ) ) #define Ws0 \ @@ -357,17 +334,11 @@ void compress_small( const __m128i *M, const __m128i H[16], __m128i dH[16] ) qt[30] = expand2s( qt, M, H, 30 ); qt[31] = expand2s( qt, M, H, 31 ); - xl = _mm_xor_si128( - _mm_xor_si128( _mm_xor_si128( qt[16], qt[17] ), - _mm_xor_si128( qt[18], qt[19] ) ), - _mm_xor_si128( _mm_xor_si128( qt[20], qt[21] ), - _mm_xor_si128( qt[22], qt[23] ) ) ); - xh = _mm_xor_si128( xl, - _mm_xor_si128( - _mm_xor_si128( _mm_xor_si128( qt[24], qt[25] ), - _mm_xor_si128( qt[26], qt[27] ) ), - _mm_xor_si128( _mm_xor_si128( qt[28], qt[29] ), - _mm_xor_si128( qt[30], qt[31] ) ))); + xl = _mm_xor_si128( mm128_xor4( qt[16], qt[17], qt[18], qt[19] ), + mm128_xor4( qt[20], qt[21], qt[22], qt[23] ) ); + xh = _mm_xor_si128( xl, _mm_xor_si128( + mm128_xor4( qt[24], qt[25], qt[26], qt[27] ), + mm128_xor4( qt[28], qt[29], qt[30], qt[31] ) ) ); dH[ 0] = _mm_add_epi32( _mm_xor_si128( M[0], @@ -695,22 +666,15 @@ bmw256_4way_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst) #define expand2s8( qt, M, H, i) \ _mm256_add_epi32( \ - _mm256_add_epi32( \ - _mm256_add_epi32( \ - _mm256_add_epi32( \ - _mm256_add_epi32( qt[ (i)-16 ], r8s1( qt[ (i)-15 ] ) ), \ - _mm256_add_epi32( qt[ (i)-14 ], r8s2( qt[ (i)-13 ] ) ) ), \ - _mm256_add_epi32( \ - _mm256_add_epi32( qt[ (i)-12 ], r8s3( qt[ (i)-11 ] ) ), \ - _mm256_add_epi32( qt[ (i)-10 ], r8s4( qt[ (i)- 9 ] ) ) ) ), \ - _mm256_add_epi32( \ - _mm256_add_epi32( \ - _mm256_add_epi32( qt[ (i)- 8 ], r8s5( qt[ (i)- 7 ] ) ), \ - _mm256_add_epi32( qt[ (i)- 6 ], r8s6( qt[ (i)- 5 ] ) ) ), \ - _mm256_add_epi32( \ - _mm256_add_epi32( qt[ (i)- 4 ], r8s7( qt[ (i)- 3 ] ) ), \ - _mm256_add_epi32( s8s4( qt[ (i)- 2 ] ), \ - s8s5( qt[ (i)- 1 ] ) ) ) ) ), \ + mm256_add4_32( \ + mm256_add4_32( qt[ (i)-16 ], r8s1( qt[ (i)-15 ] ), \ + qt[ (i)-14 ], r8s2( qt[ (i)-13 ] ) ), \ + mm256_add4_32( qt[ (i)-12 ], r8s3( qt[ (i)-11 ] ), \ + qt[ (i)-10 ], r8s4( qt[ (i)- 9 ] ) ), \ + mm256_add4_32( qt[ (i)- 8 ], r8s5( qt[ (i)- 7 ] ), \ + qt[ (i)- 6 ], r8s6( qt[ (i)- 5 ] ) ), \ + mm256_add4_32( qt[ (i)- 4 ], r8s7( qt[ (i)- 3 ] ), \ + s8s4( qt[ (i)- 2 ] ), s8s5( qt[ (i)- 1 ] ) ) ), \ add_elt_s8( M, H, (i)-16 ) ) @@ -913,16 +877,11 @@ void compress_small_8way( const __m256i *M, const __m256i H[16], qt[31] = expand2s8( qt, M, H, 31 ); xl = _mm256_xor_si256( - _mm256_xor_si256( _mm256_xor_si256( qt[16], qt[17] ), - _mm256_xor_si256( qt[18], qt[19] ) ), - _mm256_xor_si256( _mm256_xor_si256( qt[20], qt[21] ), - _mm256_xor_si256( qt[22], qt[23] ) ) ); - xh = _mm256_xor_si256( xl, - _mm256_xor_si256( - _mm256_xor_si256( _mm256_xor_si256( qt[24], qt[25] ), - _mm256_xor_si256( qt[26], qt[27] ) ), - _mm256_xor_si256( _mm256_xor_si256( qt[28], qt[29] ), - _mm256_xor_si256( qt[30], qt[31] ) ))); + mm256_xor4( qt[16], qt[17], qt[18], qt[19] ), + mm256_xor4( qt[20], qt[21], qt[22], qt[23] ) ); + xh = _mm256_xor_si256( xl, _mm256_xor_si256( + mm256_xor4( qt[24], qt[25], qt[26], qt[27] ), + mm256_xor4( qt[28], qt[29], qt[30], qt[31] ) ) ); dH[ 0] = _mm256_add_epi32( _mm256_xor_si256( M[0], diff --git a/algo/bmw/bmw512-hash-4way.c b/algo/bmw/bmw512-hash-4way.c index c272e4a..7c58003 100644 --- a/algo/bmw/bmw512-hash-4way.c +++ b/algo/bmw/bmw512-hash-4way.c @@ -569,28 +569,20 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst ) #define sb0(x) \ - _mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 1), \ - _mm256_slli_epi64( (x), 3) ), \ - _mm256_xor_si256( mm256_rol_64( (x), 4), \ - mm256_rol_64( (x), 37) ) ) + mm256_xor4( _mm256_srli_epi64( (x), 1), _mm256_slli_epi64( (x), 3), \ + mm256_rol_64( (x), 4), mm256_rol_64( (x),37) ) #define sb1(x) \ - _mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 1), \ - _mm256_slli_epi64( (x), 2) ), \ - _mm256_xor_si256( mm256_rol_64( (x), 13), \ - mm256_rol_64( (x), 43) ) ) + mm256_xor4( _mm256_srli_epi64( (x), 1), _mm256_slli_epi64( (x), 2), \ + mm256_rol_64( (x),13), mm256_rol_64( (x),43) ) #define sb2(x) \ - _mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 2), \ - _mm256_slli_epi64( (x), 1) ), \ - _mm256_xor_si256( mm256_rol_64( (x), 19), \ - mm256_rol_64( (x), 53) ) ) + mm256_xor4( _mm256_srli_epi64( (x), 2), _mm256_slli_epi64( (x), 1), \ + mm256_rol_64( (x),19), mm256_rol_64( (x),53) ) #define sb3(x) \ - _mm256_xor_si256( _mm256_xor_si256( _mm256_srli_epi64( (x), 2), \ - _mm256_slli_epi64( (x), 2) ), \ - _mm256_xor_si256( mm256_rol_64( (x), 28), \ - mm256_rol_64( (x), 59) ) ) + mm256_xor4( _mm256_srli_epi64( (x), 2), _mm256_slli_epi64( (x), 2), \ + mm256_rol_64( (x),28), mm256_rol_64( (x),59) ) #define sb4(x) \ _mm256_xor_si256( (x), _mm256_srli_epi64( (x), 1 ) ) @@ -618,55 +610,32 @@ void bmw512_2way_close( bmw_2way_big_context *ctx, void *dst ) rol_off_64( M, j, 10 ) ), \ _mm256_set1_epi64x( ( (j) + 16 ) * 0x0555555555555555ULL ) ), \ H[ ( (j)+7 ) & 0xF ] ) - + + #define expand1b( qt, M, H, i ) \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( sb1( qt[ (i)-16 ] ), \ - sb2( qt[ (i)-15 ] ) ), \ - _mm256_add_epi64( sb3( qt[ (i)-14 ] ), \ - sb0( qt[ (i)-13 ] ) ) ), \ - _mm256_add_epi64( \ - _mm256_add_epi64( sb1( qt[ (i)-12 ] ), \ - sb2( qt[ (i)-11 ] ) ), \ - _mm256_add_epi64( sb3( qt[ (i)-10 ] ), \ - sb0( qt[ (i)- 9 ] ) ) ) ), \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( sb1( qt[ (i)- 8 ] ), \ - sb2( qt[ (i)- 7 ] ) ), \ - _mm256_add_epi64( sb3( qt[ (i)- 6 ] ), \ - sb0( qt[ (i)- 5 ] ) ) ), \ - _mm256_add_epi64( \ - _mm256_add_epi64( sb1( qt[ (i)- 4 ] ), \ - sb2( qt[ (i)- 3 ] ) ), \ - _mm256_add_epi64( sb3( qt[ (i)- 2 ] ), \ - sb0( qt[ (i)- 1 ] ) ) ) ) ), \ + _mm256_add_epi64( mm256_add4_64( \ + mm256_add4_64( sb1( qt[ (i)-16 ] ), sb2( qt[ (i)-15 ] ), \ + sb3( qt[ (i)-14 ] ), sb0( qt[ (i)-13 ] )), \ + mm256_add4_64( sb1( qt[ (i)-12 ] ), sb2( qt[ (i)-11 ] ), \ + sb3( qt[ (i)-10 ] ), sb0( qt[ (i)- 9 ] )), \ + mm256_add4_64( sb1( qt[ (i)- 8 ] ), sb2( qt[ (i)- 7 ] ), \ + sb3( qt[ (i)- 6 ] ), sb0( qt[ (i)- 5 ] )), \ + mm256_add4_64( sb1( qt[ (i)- 4 ] ), sb2( qt[ (i)- 3 ] ), \ + sb3( qt[ (i)- 2 ] ), sb0( qt[ (i)- 1 ] ) ) ), \ add_elt_b( M, H, (i)-16 ) ) #define expand2b( qt, M, H, i) \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( qt[ (i)-16 ], rb1( qt[ (i)-15 ] ) ), \ - _mm256_add_epi64( qt[ (i)-14 ], rb2( qt[ (i)-13 ] ) ) ), \ - _mm256_add_epi64( \ - _mm256_add_epi64( qt[ (i)-12 ], rb3( qt[ (i)-11 ] ) ), \ - _mm256_add_epi64( qt[ (i)-10 ], rb4( qt[ (i)- 9 ] ) ) ) ), \ - _mm256_add_epi64( \ - _mm256_add_epi64( \ - _mm256_add_epi64( qt[ (i)- 8 ], rb5( qt[ (i)- 7 ] ) ), \ - _mm256_add_epi64( qt[ (i)- 6 ], rb6( qt[ (i)- 5 ] ) ) ), \ - _mm256_add_epi64( \ - _mm256_add_epi64( qt[ (i)- 4 ], rb7( qt[ (i)- 3 ] ) ), \ - _mm256_add_epi64( sb4( qt[ (i)- 2 ] ), \ - sb5( qt[ (i)- 1 ] ) ) ) ) ), \ + _mm256_add_epi64( mm256_add4_64( \ + mm256_add4_64( qt[ (i)-16 ], rb1( qt[ (i)-15 ] ), \ + qt[ (i)-14 ], rb2( qt[ (i)-13 ] ) ), \ + mm256_add4_64( qt[ (i)-12 ], rb3( qt[ (i)-11 ] ), \ + qt[ (i)-10 ], rb4( qt[ (i)- 9 ] ) ), \ + mm256_add4_64( qt[ (i)- 8 ], rb5( qt[ (i)- 7 ] ), \ + qt[ (i)- 6 ], rb6( qt[ (i)- 5 ] ) ), \ + mm256_add4_64( qt[ (i)- 4 ], rb7( qt[ (i)- 3 ] ), \ + sb4( qt[ (i)- 2 ] ), sb5( qt[ (i)- 1 ] ) ) ), \ add_elt_b( M, H, (i)-16 ) ) - #define Wb0 \ _mm256_add_epi64( \ _mm256_add_epi64( \ @@ -864,95 +833,90 @@ void compress_big( const __m256i *M, const __m256i H[16], __m256i dH[16] ) qt[30] = expand2b( qt, M, H, 30 ); qt[31] = expand2b( qt, M, H, 31 ); - xl = _mm256_xor_si256( - _mm256_xor_si256( _mm256_xor_si256( qt[16], qt[17] ), - _mm256_xor_si256( qt[18], qt[19] ) ), - _mm256_xor_si256( _mm256_xor_si256( qt[20], qt[21] ), - _mm256_xor_si256( qt[22], qt[23] ) ) ); - xh = _mm256_xor_si256( xl, - _mm256_xor_si256( - _mm256_xor_si256( _mm256_xor_si256( qt[24], qt[25] ), - _mm256_xor_si256( qt[26], qt[27] ) ), - _mm256_xor_si256( _mm256_xor_si256( qt[28], qt[29] ), - _mm256_xor_si256( qt[30], qt[31] ) ))); + xl = _mm256_xor_si256( + mm256_xor4( qt[16], qt[17], qt[18], qt[19] ), + mm256_xor4( qt[20], qt[21], qt[22], qt[23] ) ); + xh = _mm256_xor_si256( xl, _mm256_xor_si256( + mm256_xor4( qt[24], qt[25], qt[26], qt[27] ), + mm256_xor4( qt[28], qt[29], qt[30], qt[31] ) ) ); dH[ 0] = _mm256_add_epi64( - _mm256_xor_si256( M[0], - _mm256_xor_si256( _mm256_slli_epi64( xh, 5 ), - _mm256_srli_epi64( qt[16], 5 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[24] ), qt[ 0] )); + _mm256_xor_si256( M[0], + _mm256_xor_si256( _mm256_slli_epi64( xh, 5 ), + _mm256_srli_epi64( qt[16], 5 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[24] ), qt[ 0] ) ); dH[ 1] = _mm256_add_epi64( - _mm256_xor_si256( M[1], - _mm256_xor_si256( _mm256_srli_epi64( xh, 7 ), - _mm256_slli_epi64( qt[17], 8 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[25] ), qt[ 1] )); + _mm256_xor_si256( M[1], + _mm256_xor_si256( _mm256_srli_epi64( xh, 7 ), + _mm256_slli_epi64( qt[17], 8 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[25] ), qt[ 1] ) ); dH[ 2] = _mm256_add_epi64( - _mm256_xor_si256( M[2], - _mm256_xor_si256( _mm256_srli_epi64( xh, 5 ), - _mm256_slli_epi64( qt[18], 5 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[26] ), qt[ 2] )); + _mm256_xor_si256( M[2], + _mm256_xor_si256( _mm256_srli_epi64( xh, 5 ), + _mm256_slli_epi64( qt[18], 5 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[26] ), qt[ 2] ) ); dH[ 3] = _mm256_add_epi64( - _mm256_xor_si256( M[3], - _mm256_xor_si256( _mm256_srli_epi64( xh, 1 ), - _mm256_slli_epi64( qt[19], 5 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[27] ), qt[ 3] )); + _mm256_xor_si256( M[3], + _mm256_xor_si256( _mm256_srli_epi64( xh, 1 ), + _mm256_slli_epi64( qt[19], 5 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[27] ), qt[ 3] ) ); dH[ 4] = _mm256_add_epi64( - _mm256_xor_si256( M[4], - _mm256_xor_si256( _mm256_srli_epi64( xh, 3 ), - _mm256_slli_epi64( qt[20], 0 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[28] ), qt[ 4] )); + _mm256_xor_si256( M[4], + _mm256_xor_si256( _mm256_srli_epi64( xh, 3 ), + _mm256_slli_epi64( qt[20], 0 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[28] ), qt[ 4] ) ); dH[ 5] = _mm256_add_epi64( - _mm256_xor_si256( M[5], - _mm256_xor_si256( _mm256_slli_epi64( xh, 6 ), - _mm256_srli_epi64( qt[21], 6 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[29] ), qt[ 5] )); + _mm256_xor_si256( M[5], + _mm256_xor_si256( _mm256_slli_epi64( xh, 6 ), + _mm256_srli_epi64( qt[21], 6 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[29] ), qt[ 5] ) ); dH[ 6] = _mm256_add_epi64( - _mm256_xor_si256( M[6], - _mm256_xor_si256( _mm256_srli_epi64( xh, 4 ), - _mm256_slli_epi64( qt[22], 6 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[30] ), qt[ 6] )); + _mm256_xor_si256( M[6], + _mm256_xor_si256( _mm256_srli_epi64( xh, 4 ), + _mm256_slli_epi64( qt[22], 6 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[30] ), qt[ 6] ) ); dH[ 7] = _mm256_add_epi64( - _mm256_xor_si256( M[7], - _mm256_xor_si256( _mm256_srli_epi64( xh, 11 ), - _mm256_slli_epi64( qt[23], 2 ) ) ), - _mm256_xor_si256( _mm256_xor_si256( xl, qt[31] ), qt[ 7] )); + _mm256_xor_si256( M[7], + _mm256_xor_si256( _mm256_srli_epi64( xh, 11 ), + _mm256_slli_epi64( qt[23], 2 ) ) ), + _mm256_xor_si256( _mm256_xor_si256( xl, qt[31] ), qt[ 7] ) ); dH[ 8] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[4], 9 ), + mm256_rol_64( dH[4], 9 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[24] ), M[ 8] )), _mm256_xor_si256( _mm256_slli_epi64( xl, 8 ), _mm256_xor_si256( qt[23], qt[ 8] ) ) ); dH[ 9] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[5], 10 ), + mm256_rol_64( dH[5], 10 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[25] ), M[ 9] )), _mm256_xor_si256( _mm256_srli_epi64( xl, 6 ), _mm256_xor_si256( qt[16], qt[ 9] ) ) ); dH[10] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[6], 11 ), + mm256_rol_64( dH[6], 11 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[26] ), M[10] )), _mm256_xor_si256( _mm256_slli_epi64( xl, 6 ), _mm256_xor_si256( qt[17], qt[10] ) ) ); dH[11] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[7], 12 ), + mm256_rol_64( dH[7], 12 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[27] ), M[11] )), _mm256_xor_si256( _mm256_slli_epi64( xl, 4 ), _mm256_xor_si256( qt[18], qt[11] ) ) ); dH[12] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[0], 13 ), + mm256_rol_64( dH[0], 13 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[28] ), M[12] )), _mm256_xor_si256( _mm256_srli_epi64( xl, 3 ), _mm256_xor_si256( qt[19], qt[12] ) ) ); dH[13] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[1], 14 ), + mm256_rol_64( dH[1], 14 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[29] ), M[13] )), _mm256_xor_si256( _mm256_srli_epi64( xl, 4 ), _mm256_xor_si256( qt[20], qt[13] ) ) ); dH[14] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[2], 15 ), + mm256_rol_64( dH[2], 15 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[30] ), M[14] )), _mm256_xor_si256( _mm256_srli_epi64( xl, 7 ), _mm256_xor_si256( qt[21], qt[14] ) ) ); dH[15] = _mm256_add_epi64( _mm256_add_epi64( - mm256_rol_64( dH[3], 16 ), + mm256_rol_64( dH[3], 16 ), _mm256_xor_si256( _mm256_xor_si256( xh, qt[31] ), M[15] )), _mm256_xor_si256( _mm256_srli_epi64( xl, 2 ), _mm256_xor_si256( qt[22], qt[15] ) ) ); diff --git a/algo/hamsi/hamsi-hash-4way.c b/algo/hamsi/hamsi-hash-4way.c index b415db5..53ba5e3 100644 --- a/algo/hamsi/hamsi-hash-4way.c +++ b/algo/hamsi/hamsi-hash-4way.c @@ -531,16 +531,17 @@ static const sph_u32 T512[64][16] = { #define INPUT_BIG \ do { \ + const __m256i zero = _mm256_setzero_si256(); \ __m256i db = *buf; \ const sph_u32 *tp = &T512[0][0]; \ - m0 = m256_zero; \ - m1 = m256_zero; \ - m2 = m256_zero; \ - m3 = m256_zero; \ - m4 = m256_zero; \ - m5 = m256_zero; \ - m6 = m256_zero; \ - m7 = m256_zero; \ + m0 = zero; \ + m1 = zero; \ + m2 = zero; \ + m3 = zero; \ + m4 = zero; \ + m5 = zero; \ + m6 = zero; \ + m7 = zero; \ for ( int u = 0; u < 64; u++ ) \ { \ __m256i dm = _mm256_and_si256( db, m256_one_64 ) ; \ @@ -913,9 +914,7 @@ void hamsi512_4way( hamsi_4way_big_context *sc, const void *data, size_t len ) void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst ) { - __m256i *out = (__m256i*)dst; __m256i pad[1]; - size_t u; int ch, cl; sph_enc32be( &ch, sc->count_high ); @@ -925,8 +924,8 @@ void hamsi512_4way_close( hamsi_4way_big_context *sc, void *dst ) 0UL, 0x80UL, 0UL, 0x80UL ); hamsi_big( sc, sc->buf, 1 ); hamsi_big_final( sc, pad ); - for ( u = 0; u < 8; u ++ ) - out[u] = mm256_bswap_32( sc->h[u] ); + + mm256_block_bswap_32( (__m256i*)dst, sc->h ); } #ifdef __cplusplus diff --git a/algo/hodl/aes.c b/algo/hodl/aes.c index 4ea054f..5be2af3 100644 --- a/algo/hodl/aes.c +++ b/algo/hodl/aes.c @@ -83,7 +83,7 @@ void ExpandAESKey256(__m128i *keys, const __m128i *KeyBuf) keys[14] = tmp1; } -#ifdef __SSE4_2__ +#if defined(__SSE4_2__) //#ifdef __AVX__ #define AESENC(i,j) \ @@ -151,7 +151,7 @@ void AES256CBC(__m128i** data, const __m128i** next, __m128i ExpandedKey[][16], } } -#else // NO SSE4.2 +#else // NO AVX static inline __m128i AES256Core(__m128i State, const __m128i *ExpandedKey) { diff --git a/algo/hodl/hodl-gate.c b/algo/hodl/hodl-gate.c index 07fd0fa..e3df7d1 100644 --- a/algo/hodl/hodl-gate.c +++ b/algo/hodl/hodl-gate.c @@ -166,7 +166,7 @@ bool register_hodl_algo( algo_gate_t* gate ) // return false; // } pthread_barrier_init( &hodl_barrier, NULL, opt_n_threads ); - gate->optimizations = AES_OPT | SSE42_OPT | AVX2_OPT; + gate->optimizations = AES_OPT | AVX_OPT | AVX2_OPT; gate->scanhash = (void*)&hodl_scanhash; gate->get_new_work = (void*)&hodl_get_new_work; gate->longpoll_rpc_call = (void*)&hodl_longpoll_rpc_call; diff --git a/algo/hodl/hodl-wolf.c b/algo/hodl/hodl-wolf.c index f8a15c0..d84dfb9 100644 --- a/algo/hodl/hodl-wolf.c +++ b/algo/hodl/hodl-wolf.c @@ -17,7 +17,7 @@ void GenerateGarbageCore( CacheEntry *Garbage, int ThreadID, int ThreadCount, const uint32_t StartChunk = ThreadID * Chunk; const uint32_t EndChunk = StartChunk + Chunk; -#ifdef __SSE4_2__ +#if defined(__SSE4_2__) //#ifdef __AVX__ uint64_t* TempBufs[ SHA512_PARALLEL_N ] ; uint64_t* desination[ SHA512_PARALLEL_N ]; @@ -64,7 +64,7 @@ void Rev256(uint32_t *Dest, const uint32_t *Src) int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce, uint64_t *hashes_done, struct thr_info *mythr ) { -#ifdef __SSE4_2__ +#if defined(__SSE4_2__) //#ifdef __AVX__ uint32_t *pdata = work->data; uint32_t *ptarget = work->target; @@ -140,7 +140,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce, return(0); -#else // no SSE4.2 +#else // no AVX uint32_t *pdata = work->data; uint32_t *ptarget = work->target; @@ -148,6 +148,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce, CacheEntry *Garbage = (CacheEntry*)hodl_scratchbuf; CacheEntry Cache; uint32_t CollisionCount = 0; + int threadNumber = mythr->id; swab32_array( BlockHdr, pdata, 20 ); // Search for pattern in psuedorandom data @@ -205,7 +206,7 @@ int scanhash_hodl_wolf( struct work* work, uint32_t max_nonce, *hashes_done = CollisionCount; return(0); -#endif // SSE4.2 else +#endif // AVX else } diff --git a/algo/hodl/sha512-avx.h b/algo/hodl/sha512-avx.h index db7cca5..eb7f094 100644 --- a/algo/hodl/sha512-avx.h +++ b/algo/hodl/sha512-avx.h @@ -23,6 +23,7 @@ typedef struct __m256i h[8]; __m256i w[80]; #elif defined(__SSE4_2__) +//#elif defined(__AVX__) __m128i h[8]; __m128i w[80]; #else @@ -32,7 +33,8 @@ typedef struct #ifdef __AVX2__ #define SHA512_PARALLEL_N 8 -#elif defined(__SSE$_2__) +#elif defined(__SSE4_2__) +//#elif defined(__AVX__) #define SHA512_PARALLEL_N 4 #else #define SHA512_PARALLEL_N 1 // dummy value diff --git a/algo/hodl/sha512_avx.c b/algo/hodl/sha512_avx.c index 1615712..1c7c089 100644 --- a/algo/hodl/sha512_avx.c +++ b/algo/hodl/sha512_avx.c @@ -1,6 +1,6 @@ #ifndef __AVX2__ -#ifdef __SSE4_2__ +#if defined(__SSE4_2__) //#ifdef __AVX__ //Dependencies diff --git a/algo/hodl/wolf-aes.h b/algo/hodl/wolf-aes.h index dcca9ae..b33407f 100644 --- a/algo/hodl/wolf-aes.h +++ b/algo/hodl/wolf-aes.h @@ -6,7 +6,7 @@ void ExpandAESKey256(__m128i *keys, const __m128i *KeyBuf); -#ifdef __SSE4_2__ +#if defined(__SSE4_2__) //#ifdef __AVX__ #define AES_PARALLEL_N 8 diff --git a/algo/luffa/sph_luffa.c b/algo/luffa/sph_luffa.c index 299231a..349741a 100644 --- a/algo/luffa/sph_luffa.c +++ b/algo/luffa/sph_luffa.c @@ -77,6 +77,24 @@ static const sph_u32 V_INIT[5][8] = { } }; +#if SPH_LUFFA_PARALLEL + +static const sph_u64 RCW010[8] = { + SPH_C64(0xb6de10ed303994a6), SPH_C64(0x70f47aaec0e65299), + SPH_C64(0x0707a3d46cc33a12), SPH_C64(0x1c1e8f51dc56983e), + SPH_C64(0x707a3d451e00108f), SPH_C64(0xaeb285627800423d), + SPH_C64(0xbaca15898f5b7882), SPH_C64(0x40a46f3e96e1db12) +}; + +static const sph_u64 RCW014[8] = { + SPH_C64(0x01685f3de0337818), SPH_C64(0x05a17cf4441ba90d), + SPH_C64(0xbd09caca7f34d442), SPH_C64(0xf4272b289389217f), + SPH_C64(0x144ae5cce5a8bce6), SPH_C64(0xfaa7ae2b5274baf4), + SPH_C64(0x2e48f1c126889ba7), SPH_C64(0xb923c7049a226e9d) +}; + +#else + static const sph_u32 RC00[8] = { SPH_C32(0x303994a6), SPH_C32(0xc0e65299), SPH_C32(0x6cc33a12), SPH_C32(0xdc56983e), @@ -105,20 +123,18 @@ static const sph_u32 RC14[8] = { SPH_C32(0x2e48f1c1), SPH_C32(0xb923c704) }; -#if SPH_LUFFA_PARALLEL - -static const sph_u64 RCW010[8] = { - SPH_C64(0xb6de10ed303994a6), SPH_C64(0x70f47aaec0e65299), - SPH_C64(0x0707a3d46cc33a12), SPH_C64(0x1c1e8f51dc56983e), - SPH_C64(0x707a3d451e00108f), SPH_C64(0xaeb285627800423d), - SPH_C64(0xbaca15898f5b7882), SPH_C64(0x40a46f3e96e1db12) +static const sph_u32 RC30[8] = { + SPH_C32(0xb213afa5), SPH_C32(0xc84ebe95), + SPH_C32(0x4e608a22), SPH_C32(0x56d858fe), + SPH_C32(0x343b138f), SPH_C32(0xd0ec4e3d), + SPH_C32(0x2ceb4882), SPH_C32(0xb3ad2208) }; -static const sph_u64 RCW014[8] = { - SPH_C64(0x01685f3de0337818), SPH_C64(0x05a17cf4441ba90d), - SPH_C64(0xbd09caca7f34d442), SPH_C64(0xf4272b289389217f), - SPH_C64(0x144ae5cce5a8bce6), SPH_C64(0xfaa7ae2b5274baf4), - SPH_C64(0x2e48f1c126889ba7), SPH_C64(0xb923c7049a226e9d) +static const sph_u32 RC34[8] = { + SPH_C32(0xe028c9bf), SPH_C32(0x44756f91), + SPH_C32(0x7e8fce32), SPH_C32(0x956548be), + SPH_C32(0xfe191be2), SPH_C32(0x3cb226e5), + SPH_C32(0x5944a28e), SPH_C32(0xa1c4c355) }; #endif @@ -137,19 +153,6 @@ static const sph_u32 RC24[8] = { SPH_C32(0x36eda57f), SPH_C32(0x703aace7) }; -static const sph_u32 RC30[8] = { - SPH_C32(0xb213afa5), SPH_C32(0xc84ebe95), - SPH_C32(0x4e608a22), SPH_C32(0x56d858fe), - SPH_C32(0x343b138f), SPH_C32(0xd0ec4e3d), - SPH_C32(0x2ceb4882), SPH_C32(0xb3ad2208) -}; - -static const sph_u32 RC34[8] = { - SPH_C32(0xe028c9bf), SPH_C32(0x44756f91), - SPH_C32(0x7e8fce32), SPH_C32(0x956548be), - SPH_C32(0xfe191be2), SPH_C32(0x3cb226e5), - SPH_C32(0x5944a28e), SPH_C32(0xa1c4c355) -}; #if SPH_LUFFA_PARALLEL diff --git a/algo/lyra2/lyra2h-4way.c b/algo/lyra2/lyra2h-4way.c index 9ebd015..137b614 100644 --- a/algo/lyra2/lyra2h-4way.c +++ b/algo/lyra2/lyra2h-4way.c @@ -5,7 +5,7 @@ #include #include #include "lyra2.h" -#include "algo/blake/sph_blake.h" +//#include "algo/blake/sph_blake.h" #include "algo/blake/blake-hash-4way.h" __thread uint64_t* lyra2h_4way_matrix; diff --git a/algo/quark/anime-4way.c b/algo/quark/anime-4way.c index 5a76e9a..b71398b 100644 --- a/algo/quark/anime-4way.c +++ b/algo/quark/anime-4way.c @@ -50,6 +50,7 @@ void anime_4way_hash( void *state, const void *input ) __m256i vh_mask; const uint32_t mask = 8; const __m256i bit3_mask = _mm256_set1_epi64x( 8 ); + const __m256i zero = _mm256_setzero_si256(); anime_4way_ctx_holder ctx; memcpy( &ctx, &anime_4way_ctx, sizeof(anime_4way_ctx) ); @@ -59,8 +60,7 @@ void anime_4way_hash( void *state, const void *input ) blake512_4way( &ctx.blake, vhash, 64 ); blake512_4way_close( &ctx.blake, vhash ); - vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), - m256_zero ); + vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); @@ -114,8 +114,7 @@ void anime_4way_hash( void *state, const void *input ) jh512_4way( &ctx.jh, vhash, 64 ); jh512_4way_close( &ctx.jh, vhash ); - vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), - m256_zero ); + vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); if ( mm256_anybits1( vh_mask ) ) { @@ -139,8 +138,7 @@ void anime_4way_hash( void *state, const void *input ) skein512_4way( &ctx.skein, vhash, 64 ); skein512_4way_close( &ctx.skein, vhash ); - vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), - m256_zero ); + vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); if ( mm256_anybits1( vh_mask ) ) { diff --git a/algo/quark/quark-4way.c b/algo/quark/quark-4way.c index e9581de..5e22c67 100644 --- a/algo/quark/quark-4way.c +++ b/algo/quark/quark-4way.c @@ -51,6 +51,7 @@ void quark_4way_hash( void *state, const void *input ) quark_4way_ctx_holder ctx; const __m256i bit3_mask = _mm256_set1_epi64x( 8 ); const uint32_t mask = 8; + const __m256i zero = _mm256_setzero_si256(); memcpy( &ctx, &quark_4way_ctx, sizeof(quark_4way_ctx) ); @@ -60,8 +61,7 @@ void quark_4way_hash( void *state, const void *input ) bmw512_4way( &ctx.bmw, vhash, 64 ); bmw512_4way_close( &ctx.bmw, vhash ); - vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), - m256_zero ); + vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); mm256_dintrlv_4x64( hash0, hash1, hash2, hash3, vhash, 512 ); @@ -115,8 +115,7 @@ void quark_4way_hash( void *state, const void *input ) jh512_4way( &ctx.jh, vhash, 64 ); jh512_4way_close( &ctx.jh, vhash ); - vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), - m256_zero ); + vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); if ( mm256_anybits1( vh_mask ) ) { @@ -141,8 +140,7 @@ void quark_4way_hash( void *state, const void *input ) skein512_4way( &ctx.skein, vhash, 64 ); skein512_4way_close( &ctx.skein, vhash ); - vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), - m256_zero ); + vh_mask = _mm256_cmpeq_epi64( _mm256_and_si256( vh[0], bit3_mask ), zero ); if ( mm256_anybits1( vh_mask ) ) { diff --git a/algo/sha/sha2-hash-4way.c b/algo/sha/sha2-hash-4way.c index 92de422..d3e5150 100644 --- a/algo/sha/sha2-hash-4way.c +++ b/algo/sha/sha2-hash-4way.c @@ -86,8 +86,7 @@ static const sph_u32 K256[64] = { // SHA-256 4 way #define SHA2s_MEXP( a, b, c, d ) \ - _mm_add_epi32( _mm_add_epi32( _mm_add_epi32( \ - SSG2_1( W[a] ), W[b] ), SSG2_0( W[c] ) ), W[d] ); + mm128_add4_32( SSG2_1( W[a] ), W[b], SSG2_0( W[c] ), W[d] ); #define CHs(X, Y, Z) \ _mm_xor_si128( _mm_and_si128( _mm_xor_si128( Y, Z ), X ), Z ) @@ -115,9 +114,8 @@ static const sph_u32 K256[64] = { #define SHA2s_4WAY_STEP(A, B, C, D, E, F, G, H, i, j) \ do { \ register __m128i T1, T2; \ - T1 = _mm_add_epi32( _mm_add_epi32( _mm_add_epi32( \ - _mm_add_epi32( H, BSG2_1(E) ), CHs(E, F, G) ), \ - _mm_set1_epi32( K256[( (j)+(i) )] ) ), W[i] ); \ + T1 = _mm_add_epi32( H, mm128_add4_32( BSG2_1(E), CHs(E, F, G), \ + _mm_set1_epi32( K256[( (j)+(i) )] ), W[i] ) ); \ T2 = _mm_add_epi32( BSG2_0(A), MAJs(A, B, C) ); \ D = _mm_add_epi32( D, T1 ); \ H = _mm_add_epi32( T1, T2 ); \ @@ -129,22 +127,8 @@ sha256_4way_round( __m128i *in, __m128i r[8] ) register __m128i A, B, C, D, E, F, G, H; __m128i W[16]; - W[ 0] = mm128_bswap_32( in[ 0] ); - W[ 1] = mm128_bswap_32( in[ 1] ); - W[ 2] = mm128_bswap_32( in[ 2] ); - W[ 3] = mm128_bswap_32( in[ 3] ); - W[ 4] = mm128_bswap_32( in[ 4] ); - W[ 5] = mm128_bswap_32( in[ 5] ); - W[ 6] = mm128_bswap_32( in[ 6] ); - W[ 7] = mm128_bswap_32( in[ 7] ); - W[ 8] = mm128_bswap_32( in[ 8] ); - W[ 9] = mm128_bswap_32( in[ 9] ); - W[10] = mm128_bswap_32( in[10] ); - W[11] = mm128_bswap_32( in[11] ); - W[12] = mm128_bswap_32( in[12] ); - W[13] = mm128_bswap_32( in[13] ); - W[14] = mm128_bswap_32( in[14] ); - W[15] = mm128_bswap_32( in[15] ); + mm128_block_bswap_32( W, in ); + mm128_block_bswap_32( W+8, in+8 ); A = r[0]; B = r[1]; @@ -266,7 +250,7 @@ void sha256_4way( sha256_4way_context *sc, const void *data, size_t len ) void sha256_4way_close( sha256_4way_context *sc, void *dst ) { - unsigned ptr, u; + unsigned ptr; uint32_t low, high; const int buf_size = 64; const int pad = buf_size - 8; @@ -294,8 +278,7 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst ) mm128_bswap_32( _mm_set1_epi32( low ) ); sha256_4way_round( sc->buf, sc->val ); - for ( u = 0; u < 8; u ++ ) - ((__m128i*)dst)[u] = mm128_bswap_32( sc->val[u] ); + mm128_block_bswap_32( dst, sc->val ); } #if defined(__AVX2__) @@ -326,15 +309,13 @@ void sha256_4way_close( sha256_4way_context *sc, void *dst ) mm256_ror_32(x, 17), mm256_ror_32(x, 19) ), _mm256_srli_epi32(x, 10) ) #define SHA2x_MEXP( a, b, c, d ) \ - _mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \ - SSG2_1x( W[a] ), W[b] ), SSG2_0x( W[c] ) ), W[d] ); + mm256_add4_32( SSG2_1x( W[a] ), W[b], SSG2_0x( W[c] ), W[d] ); #define SHA2s_8WAY_STEP(A, B, C, D, E, F, G, H, i, j) \ do { \ register __m256i T1, T2; \ - T1 = _mm256_add_epi32( _mm256_add_epi32( _mm256_add_epi32( \ - _mm256_add_epi32( H, BSG2_1x(E) ), CHx(E, F, G) ), \ - _mm256_set1_epi32( K256[( (j)+(i) )] ) ), W[i] ); \ + T1 = _mm256_add_epi32( H, mm256_add4_32( BSG2_1x(E), CHx(E, F, G), \ + _mm256_set1_epi32( K256[( (j)+(i) )] ), W[i] ) ); \ T2 = _mm256_add_epi32( BSG2_0x(A), MAJx(A, B, C) ); \ D = _mm256_add_epi32( D, T1 ); \ H = _mm256_add_epi32( T1, T2 ); \ @@ -346,22 +327,8 @@ sha256_8way_round( __m256i *in, __m256i r[8] ) register __m256i A, B, C, D, E, F, G, H; __m256i W[16]; - W[ 0] = mm256_bswap_32( in[ 0] ); - W[ 1] = mm256_bswap_32( in[ 1] ); - W[ 2] = mm256_bswap_32( in[ 2] ); - W[ 3] = mm256_bswap_32( in[ 3] ); - W[ 4] = mm256_bswap_32( in[ 4] ); - W[ 5] = mm256_bswap_32( in[ 5] ); - W[ 6] = mm256_bswap_32( in[ 6] ); - W[ 7] = mm256_bswap_32( in[ 7] ); - W[ 8] = mm256_bswap_32( in[ 8] ); - W[ 9] = mm256_bswap_32( in[ 9] ); - W[10] = mm256_bswap_32( in[10] ); - W[11] = mm256_bswap_32( in[11] ); - W[12] = mm256_bswap_32( in[12] ); - W[13] = mm256_bswap_32( in[13] ); - W[14] = mm256_bswap_32( in[14] ); - W[15] = mm256_bswap_32( in[15] ); + mm256_block_bswap_32( W , in ); + mm256_block_bswap_32( W+8, in+8 ); A = r[0]; B = r[1]; @@ -484,7 +451,7 @@ void sha256_8way( sha256_8way_context *sc, const void *data, size_t len ) void sha256_8way_close( sha256_8way_context *sc, void *dst ) { - unsigned ptr, u; + unsigned ptr; uint32_t low, high; const int buf_size = 64; const int pad = buf_size - 8; @@ -513,8 +480,7 @@ void sha256_8way_close( sha256_8way_context *sc, void *dst ) sha256_8way_round( sc->buf, sc->val ); - for ( u = 0; u < 8; u ++ ) - ((__m256i*)dst)[u] = mm256_bswap_32( sc->val[u] ); + mm256_block_bswap_32( dst, sc->val ); } @@ -596,9 +562,8 @@ static const sph_u64 K512[80] = { #define SHA3_4WAY_STEP(A, B, C, D, E, F, G, H, i) \ do { \ register __m256i T1, T2; \ - T1 = _mm256_add_epi64( _mm256_add_epi64( _mm256_add_epi64( \ - _mm256_add_epi64( H, BSG5_1(E) ), CH(E, F, G) ), \ - _mm256_set1_epi64x( K512[i] ) ), W[i] ); \ + T1 = _mm256_add_epi64( H, mm256_add4_64( BSG5_1(E), CH(E, F, G), \ + _mm256_set1_epi64x( K512[i] ), W[i] ) ); \ T2 = _mm256_add_epi64( BSG5_0(A), MAJ(A, B, C) ); \ D = _mm256_add_epi64( D, T1 ); \ H = _mm256_add_epi64( T1, T2 ); \ @@ -611,11 +576,12 @@ sha512_4way_round( __m256i *in, __m256i r[8] ) register __m256i A, B, C, D, E, F, G, H; __m256i W[80]; - for ( i = 0; i < 16; i++ ) - W[i] = mm256_bswap_64( in[i] ); + mm256_block_bswap_64( W , in ); + mm256_block_bswap_64( W+8, in+8 ); + for ( i = 16; i < 80; i++ ) - W[i] = _mm256_add_epi64( _mm256_add_epi64( _mm256_add_epi64( - SSG5_1( W[ i-2 ] ), W[ i-7 ] ), SSG5_0( W[ i-15 ] ) ), W[ i-16 ] ); + W[i] = mm256_add4_64( SSG5_1( W[ i- 2 ] ), W[ i- 7 ], + SSG5_0( W[ i-15 ] ), W[ i-16 ] ); A = r[0]; B = r[1]; @@ -689,7 +655,7 @@ void sha512_4way( sha512_4way_context *sc, const void *data, size_t len ) void sha512_4way_close( sha512_4way_context *sc, void *dst ) { - unsigned ptr, u; + unsigned ptr; const int buf_size = 128; const int pad = buf_size - 16; @@ -711,8 +677,7 @@ void sha512_4way_close( sha512_4way_context *sc, void *dst ) mm256_bswap_64( _mm256_set1_epi64x( sc->count << 3 ) ); sha512_4way_round( sc->buf, sc->val ); - for ( u = 0; u < 8; u ++ ) - ((__m256i*)dst)[u] = mm256_bswap_64( sc->val[u] ); + mm256_block_bswap_64( dst, sc->val ); } #endif // __AVX2__ diff --git a/algo/shavite/shavite-hash-2way.c b/algo/shavite/shavite-hash-2way.c index f29260e..1a4a685 100644 --- a/algo/shavite/shavite-hash-2way.c +++ b/algo/shavite/shavite-hash-2way.c @@ -20,6 +20,7 @@ static const uint32_t IV512[] = static void c512_2way( shavite512_2way_context *ctx, const void *msg ) { + const __m128i zero = _mm_setzero_si128(); __m256i p0, p1, p2, p3, x; __m256i k00, k01, k02, k03, k10, k11, k12, k13; __m256i *m = (__m256i*)msg; @@ -33,24 +34,24 @@ c512_2way( shavite512_2way_context *ctx, const void *msg ) // round k00 = m[0]; - x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ), zero ); k01 = m[1]; - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero ); k02 = m[2]; - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero ); k03 = m[3]; - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero ); p0 = _mm256_xor_si256( p0, x ); k10 = m[4]; - x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ), zero ); k11 = m[5]; - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero ); k12 = m[6]; - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero ); k13 = m[7]; - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero ); p2 = _mm256_xor_si256( p2, x ); @@ -59,129 +60,129 @@ c512_2way( shavite512_2way_context *ctx, const void *msg ) // round 1, 5, 9 k00 = _mm256_xor_si256( k13, mm256_ror1x32_128( - mm256_aesenc_2x128( k00 ) ) ); + mm256_aesenc_2x128( k00, zero ) ) ); if ( r == 0 ) k00 = _mm256_xor_si256( k00, _mm256_set_epi32( ~ctx->count3, ctx->count2, ctx->count1, ctx->count0, ~ctx->count3, ctx->count2, ctx->count1, ctx->count0 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ), zero ); k01 = _mm256_xor_si256( k00, - mm256_ror1x32_128( mm256_aesenc_2x128( k01 ) ) ); + mm256_ror1x32_128( mm256_aesenc_2x128( k01, zero ) ) ); if ( r == 1 ) k01 = _mm256_xor_si256( k01, _mm256_set_epi32( ~ctx->count0, ctx->count1, ctx->count2, ctx->count3, ~ctx->count0, ctx->count1, ctx->count2, ctx->count3 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero ); k02 = _mm256_xor_si256( k01, - mm256_ror1x32_128( mm256_aesenc_2x128( k02 ) ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + mm256_ror1x32_128( mm256_aesenc_2x128( k02, zero ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero ); k03 = _mm256_xor_si256( k02, - mm256_ror1x32_128( mm256_aesenc_2x128( k03 ) ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + mm256_ror1x32_128( mm256_aesenc_2x128( k03, zero ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero ); p3 = _mm256_xor_si256( p3, x ); k10 = _mm256_xor_si256( k03, - mm256_ror1x32_128( mm256_aesenc_2x128( k10 ) ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ) ); + mm256_ror1x32_128( mm256_aesenc_2x128( k10, zero ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ), zero ); k11 = _mm256_xor_si256( k10, - mm256_ror1x32_128( mm256_aesenc_2x128( k11 ) ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + mm256_ror1x32_128( mm256_aesenc_2x128( k11, zero ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero ); k12 = _mm256_xor_si256( k11, - mm256_ror1x32_128( mm256_aesenc_2x128( k12 ) ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + mm256_ror1x32_128( mm256_aesenc_2x128( k12, zero ) ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero ); k13 = _mm256_xor_si256( k12, - mm256_ror1x32_128( mm256_aesenc_2x128( k13 ) ) ); + mm256_ror1x32_128( mm256_aesenc_2x128( k13, zero ) ) ); if ( r == 2 ) k13 = _mm256_xor_si256( k13, _mm256_set_epi32( ~ctx->count1, ctx->count0, ctx->count3, ctx->count2, ~ctx->count1, ctx->count0, ctx->count3, ctx->count2 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero ); p1 = _mm256_xor_si256( p1, x ); // round 2, 6, 10 k00 = _mm256_xor_si256( k00, mm256_ror2x256hi_1x32( k12, k13 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k00 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k00 ), zero ); k01 = _mm256_xor_si256( k01, mm256_ror2x256hi_1x32( k13, k00 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero ); k02 = _mm256_xor_si256( k02, mm256_ror2x256hi_1x32( k00, k01 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero ); k03 = _mm256_xor_si256( k03, mm256_ror2x256hi_1x32( k01, k02 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero ); p2 = _mm256_xor_si256( p2, x ); k10 = _mm256_xor_si256( k10, mm256_ror2x256hi_1x32( k02, k03 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k10 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k10 ), zero ); k11 = _mm256_xor_si256( k11, mm256_ror2x256hi_1x32( k03, k10 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero ); k12 = _mm256_xor_si256( k12, mm256_ror2x256hi_1x32( k10, k11 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero ); k13 = _mm256_xor_si256( k13, mm256_ror2x256hi_1x32( k11, k12 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero ); p0 = _mm256_xor_si256( p0, x ); // round 3, 7, 11 k00 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k00 ) ), k13 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k00 ) ); + mm256_aesenc_2x128( k00, zero ) ), k13 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k00 ), zero ); k01 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k01 ) ), k00 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + mm256_aesenc_2x128( k01, zero ) ), k00 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero ); k02 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k02 ) ), k01 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + mm256_aesenc_2x128( k02, zero ) ), k01 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero ); k03 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k03 ) ), k02 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + mm256_aesenc_2x128( k03, zero ) ), k02 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero ); p1 = _mm256_xor_si256( p1, x ); k10 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k10 ) ), k03 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k10 ) ); + mm256_aesenc_2x128( k10, zero ) ), k03 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k10 ), zero ); k11 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k11 ) ), k10 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + mm256_aesenc_2x128( k11, zero ) ), k10 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero ); k12 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k12 ) ), k11 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + mm256_aesenc_2x128( k12, zero ) ), k11 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero ); k13 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k13 ) ), k12 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + mm256_aesenc_2x128( k13, zero ) ), k12 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero ); p3 = _mm256_xor_si256( p3, x ); // round 4, 8, 12 k00 = _mm256_xor_si256( k00, mm256_ror2x256hi_1x32( k12, k13 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p1, k00 ), zero ); k01 = _mm256_xor_si256( k01, mm256_ror2x256hi_1x32( k13, k00 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero ); k02 = _mm256_xor_si256( k02, mm256_ror2x256hi_1x32( k00, k01 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero ); k03 = _mm256_xor_si256( k03, mm256_ror2x256hi_1x32( k01, k02 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero ); p0 = _mm256_xor_si256( p0, x ); k10 = _mm256_xor_si256( k10, mm256_ror2x256hi_1x32( k02, k03 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p3, k10 ), zero ); k11 = _mm256_xor_si256( k11, mm256_ror2x256hi_1x32( k03, k10 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero ); k12 = _mm256_xor_si256( k12, mm256_ror2x256hi_1x32( k10, k11 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero ); k13 = _mm256_xor_si256( k13, mm256_ror2x256hi_1x32( k11, k12 ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero ); p2 = _mm256_xor_si256( p2, x ); @@ -190,36 +191,36 @@ c512_2way( shavite512_2way_context *ctx, const void *msg ) // round 13 k00 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k00 ) ), k13 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ) ); + mm256_aesenc_2x128( k00, zero ) ), k13 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p0, k00 ), zero ); k01 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k01 ) ), k00 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ) ); + mm256_aesenc_2x128( k01, zero ) ), k00 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k01 ), zero ); k02 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k02 ) ), k01 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ) ); + mm256_aesenc_2x128( k02, zero ) ), k01 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k02 ), zero ); k03 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k03 ) ), k02 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ) ); + mm256_aesenc_2x128( k03, zero ) ), k02 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k03 ), zero ); p3 = _mm256_xor_si256( p3, x ); k10 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k10 ) ), k03 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ) ); + mm256_aesenc_2x128( k10, zero ) ), k03 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( p2, k10 ), zero ); k11 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k11 ) ), k10 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ) ); + mm256_aesenc_2x128( k11, zero ) ), k10 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k11 ), zero ); - k12 = mm256_ror1x32_128( mm256_aesenc_2x128( k12 ) ); + k12 = mm256_ror1x32_128( mm256_aesenc_2x128( k12, zero ) ); k12 = _mm256_xor_si256( k12, _mm256_xor_si256( k11, _mm256_set_epi32( ~ctx->count2, ctx->count3, ctx->count0, ctx->count1, ~ctx->count2, ctx->count3, ctx->count0, ctx->count1 ) ) ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ) ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k12 ), zero ); k13 = _mm256_xor_si256( mm256_ror1x32_128( - mm256_aesenc_2x128( k13 ) ), k12 ); - x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ) ); + mm256_aesenc_2x128( k13, zero ) ), k12 ); + x = mm256_aesenc_2x128( _mm256_xor_si256( x, k13 ), zero ); p1 = _mm256_xor_si256( p1, x ); diff --git a/algo/shavite/sph-shavite-aesni.c b/algo/shavite/sph-shavite-aesni.c index 303c201..b60a3b8 100644 --- a/algo/shavite/sph-shavite-aesni.c +++ b/algo/shavite/sph-shavite-aesni.c @@ -87,6 +87,7 @@ static const sph_u32 IV512[] = { static void c512( sph_shavite_big_context *sc, const void *msg ) { + const __m128i zero = _mm_setzero_si128(); __m128i p0, p1, p2, p3, x; __m128i k00, k01, k02, k03, k10, k11, k12, k13; __m128i *m = (__m128i*)msg; @@ -101,38 +102,38 @@ c512( sph_shavite_big_context *sc, const void *msg ) // round k00 = m[0]; x = _mm_xor_si128( p1, k00 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k01 = m[1]; x = _mm_xor_si128( x, k01 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k02 = m[2]; x = _mm_xor_si128( x, k02 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k03 = m[3]; x = _mm_xor_si128( x, k03 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p0 = _mm_xor_si128( p0, x ); k10 = m[4]; x = _mm_xor_si128( p3, k10 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k11 = m[5]; x = _mm_xor_si128( x, k11 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k12 = m[6]; x = _mm_xor_si128( x, k12 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k13 = m[7]; x = _mm_xor_si128( x, k13 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p2 = _mm_xor_si128( p2, x ); for ( r = 0; r < 3; r ++ ) { // round 1, 5, 9 - k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) ); + k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, zero ) ); k00 = _mm_xor_si128( k00, k13 ); if ( r == 0 ) @@ -140,8 +141,8 @@ c512( sph_shavite_big_context *sc, const void *msg ) ~sc->count3, sc->count2, sc->count1, sc->count0 ) ); x = _mm_xor_si128( p0, k00 ); - x = _mm_aesenc_si128( x, m128_zero ); - k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, zero ) ); k01 = _mm_xor_si128( k01, k00 ); if ( r == 1 ) @@ -149,32 +150,32 @@ c512( sph_shavite_big_context *sc, const void *msg ) ~sc->count0, sc->count1, sc->count2, sc->count3 ) ); x = _mm_xor_si128( x, k01 ); - x = _mm_aesenc_si128( x, m128_zero ); - k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, zero ) ); k02 = _mm_xor_si128( k02, k01 ); x = _mm_xor_si128( x, k02 ); - x = _mm_aesenc_si128( x, m128_zero ); - k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, zero ) ); k03 = _mm_xor_si128( k03, k02 ); x = _mm_xor_si128( x, k03 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p3 = _mm_xor_si128( p3, x ); - k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) ); + k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, zero ) ); k10 = _mm_xor_si128( k10, k03 ); x = _mm_xor_si128( p2, k10 ); - x = _mm_aesenc_si128( x, m128_zero ); - k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, zero ) ); k11 = _mm_xor_si128( k11, k10 ); x = _mm_xor_si128( x, k11 ); - x = _mm_aesenc_si128( x, m128_zero ); - k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, zero ) ); k12 = _mm_xor_si128( k12, k11 ); x = _mm_xor_si128( x, k12 ); - x = _mm_aesenc_si128( x, m128_zero ); - k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, zero ) ); k13 = _mm_xor_si128( k13, k12 ); if ( r == 2 ) @@ -182,78 +183,78 @@ c512( sph_shavite_big_context *sc, const void *msg ) ~sc->count1, sc->count0, sc->count3, sc->count2 ) ); x = _mm_xor_si128( x, k13 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p1 = _mm_xor_si128( p1, x ); // round 2, 6, 10 k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) ); x = _mm_xor_si128( p3, k00 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) ); x = _mm_xor_si128( x, k01 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) ); x = _mm_xor_si128( x, k02 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) ); x = _mm_xor_si128( x, k03 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p2 = _mm_xor_si128( p2, x ); k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) ); x = _mm_xor_si128( p1, k10 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) ); x = _mm_xor_si128( x, k11 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) ); x = _mm_xor_si128( x, k12 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) ); x = _mm_xor_si128( x, k13 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p0 = _mm_xor_si128( p0, x ); // round 3, 7, 11 - k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) ); + k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, zero ) ); k00 = _mm_xor_si128( k00, k13 ); x = _mm_xor_si128( p2, k00 ); - x = _mm_aesenc_si128( x, m128_zero ); - k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, zero ) ); k01 = _mm_xor_si128( k01, k00 ); x = _mm_xor_si128( x, k01 ); - x = _mm_aesenc_si128( x, m128_zero ); - k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, zero ) ); k02 = _mm_xor_si128( k02, k01 ); x = _mm_xor_si128( x, k02 ); - x = _mm_aesenc_si128( x, m128_zero ); - k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, zero ) ); k03 = _mm_xor_si128( k03, k02 ); x = _mm_xor_si128( x, k03 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p1 = _mm_xor_si128( p1, x ); - k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) ); + k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, zero ) ); k10 = _mm_xor_si128( k10, k03 ); x = _mm_xor_si128( p0, k10 ); - x = _mm_aesenc_si128( x, m128_zero ); - k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, zero ) ); k11 = _mm_xor_si128( k11, k10 ); x = _mm_xor_si128( x, k11 ); - x = _mm_aesenc_si128( x, m128_zero ); - k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, zero ) ); k12 = _mm_xor_si128( k12, k11 ); x = _mm_xor_si128( x, k12 ); - x = _mm_aesenc_si128( x, m128_zero ); - k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, zero ) ); k13 = _mm_xor_si128( k13, k12 ); x = _mm_xor_si128( x, k13 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p3 = _mm_xor_si128( p3, x ); @@ -261,73 +262,73 @@ c512( sph_shavite_big_context *sc, const void *msg ) k00 = _mm_xor_si128( k00, mm128_ror256hi_1x32( k12, k13 ) ); x = _mm_xor_si128( p1, k00 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k01 = _mm_xor_si128( k01, mm128_ror256hi_1x32( k13, k00 ) ); x = _mm_xor_si128( x, k01 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k02 = _mm_xor_si128( k02, mm128_ror256hi_1x32( k00, k01 ) ); x = _mm_xor_si128( x, k02 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k03 = _mm_xor_si128( k03, mm128_ror256hi_1x32( k01, k02 ) ); x = _mm_xor_si128( x, k03 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p0 = _mm_xor_si128( p0, x ); k10 = _mm_xor_si128( k10, mm128_ror256hi_1x32( k02, k03 ) ); x = _mm_xor_si128( p3, k10 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k11 = _mm_xor_si128( k11, mm128_ror256hi_1x32( k03, k10 ) ); x = _mm_xor_si128( x, k11 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k12 = _mm_xor_si128( k12, mm128_ror256hi_1x32( k10, k11 ) ); x = _mm_xor_si128( x, k12 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); k13 = _mm_xor_si128( k13, mm128_ror256hi_1x32( k11, k12 ) ); x = _mm_xor_si128( x, k13 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p2 = _mm_xor_si128( p2, x ); } // round 13 - k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, m128_zero ) ); + k00 = mm128_ror_1x32( _mm_aesenc_si128( k00, zero ) ); k00 = _mm_xor_si128( k00, k13 ); x = _mm_xor_si128( p0, k00 ); - x = _mm_aesenc_si128( x, m128_zero ); - k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k01 = mm128_ror_1x32( _mm_aesenc_si128( k01, zero ) ); k01 = _mm_xor_si128( k01, k00 ); x = _mm_xor_si128( x, k01 ); - x = _mm_aesenc_si128( x, m128_zero ); - k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k02 = mm128_ror_1x32( _mm_aesenc_si128( k02, zero ) ); k02 = _mm_xor_si128( k02, k01 ); x = _mm_xor_si128( x, k02 ); - x = _mm_aesenc_si128( x, m128_zero ); - k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k03 = mm128_ror_1x32( _mm_aesenc_si128( k03, zero ) ); k03 = _mm_xor_si128( k03, k02 ); x = _mm_xor_si128( x, k03 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p3 = _mm_xor_si128( p3, x ); - k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, m128_zero ) ); + k10 = mm128_ror_1x32( _mm_aesenc_si128( k10, zero ) ); k10 = _mm_xor_si128( k10, k03 ); x = _mm_xor_si128( p2, k10 ); - x = _mm_aesenc_si128( x, m128_zero ); - k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k11 = mm128_ror_1x32( _mm_aesenc_si128( k11, zero ) ); k11 = _mm_xor_si128( k11, k10 ); x = _mm_xor_si128( x, k11 ); - x = _mm_aesenc_si128( x, m128_zero ); - k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k12 = mm128_ror_1x32( _mm_aesenc_si128( k12, zero ) ); k12 = _mm_xor_si128( k12, _mm_xor_si128( k11, _mm_set_epi32( ~sc->count2, sc->count3, sc->count0, sc->count1 ) ) ); x = _mm_xor_si128( x, k12 ); - x = _mm_aesenc_si128( x, m128_zero ); - k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, m128_zero ) ); + x = _mm_aesenc_si128( x, zero ); + k13 = mm128_ror_1x32( _mm_aesenc_si128( k13, zero ) ); k13 = _mm_xor_si128( k13, k12 ); x = _mm_xor_si128( x, k13 ); - x = _mm_aesenc_si128( x, m128_zero ); + x = _mm_aesenc_si128( x, zero ); p1 = _mm_xor_si128( p1, x ); diff --git a/algo/simd/simd-hash-2way.c b/algo/simd/simd-hash-2way.c index eb42f49..41f4723 100644 --- a/algo/simd/simd-hash-2way.c +++ b/algo/simd/simd-hash-2way.c @@ -342,6 +342,7 @@ void fft128_2way( void *a ) void fft128_2way_msg( uint16_t *a, const uint8_t *x, int final ) { + const __m256i zero = _mm256_setzero_si256(); static const m256_v16 Tweak = {{ 0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,1, }}; static const m256_v16 FinalTweak = {{ 0,0,0,0,0,1,0,1, 0,0,0,0,0,1,0,1, }}; @@ -352,10 +353,10 @@ void fft128_2way_msg( uint16_t *a, const uint8_t *x, int final ) #define UNPACK( i ) \ do { \ __m256i t = X[i]; \ - A[2*i] = _mm256_unpacklo_epi8( t, m256_zero ); \ + A[2*i] = _mm256_unpacklo_epi8( t, zero ); \ A[2*i+8] = _mm256_mullo_epi16( A[2*i], FFT128_Twiddle[2*i].v256 ); \ A[2*i+8] = REDUCE(A[2*i+8]); \ - A[2*i+1] = _mm256_unpackhi_epi8( t, m256_zero ); \ + A[2*i+1] = _mm256_unpackhi_epi8( t, zero ); \ A[2*i+9] = _mm256_mullo_epi16(A[2*i+1], FFT128_Twiddle[2*i+1].v256 ); \ A[2*i+9] = REDUCE(A[2*i+9]); \ } while(0) @@ -365,10 +366,10 @@ do { \ do { \ __m256i t = X[i]; \ __m256i tmp; \ - A[2*i] = _mm256_unpacklo_epi8( t, m256_zero ); \ + A[2*i] = _mm256_unpacklo_epi8( t, zero ); \ A[2*i+8] = _mm256_mullo_epi16( A[ 2*i ], FFT128_Twiddle[ 2*i ].v256 ); \ A[2*i+8] = REDUCE( A[ 2*i+8 ] ); \ - tmp = _mm256_unpackhi_epi8( t, m256_zero ); \ + tmp = _mm256_unpackhi_epi8( t, zero ); \ A[2*i+1] = _mm256_add_epi16( tmp, tw ); \ A[2*i+9] = _mm256_mullo_epi16( _mm256_sub_epi16( tmp, tw ), \ FFT128_Twiddle[ 2*i+1 ].v256 );\ @@ -392,6 +393,7 @@ do { \ void fft256_2way_msg( uint16_t *a, const uint8_t *x, int final ) { + const __m256i zero = _mm256_setzero_si256(); static const m256_v16 Tweak = {{ 0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,1, }}; static const m256_v16 FinalTweak = {{ 0,0,0,0,0,1,0,1, 0,0,0,0,0,1,0,1, }}; @@ -402,11 +404,11 @@ void fft256_2way_msg( uint16_t *a, const uint8_t *x, int final ) #define UNPACK( i ) \ do { \ __m256i t = X[i]; \ - A[ 2*i ] = _mm256_unpacklo_epi8( t, m256_zero ); \ + A[ 2*i ] = _mm256_unpacklo_epi8( t, zero ); \ A[ 2*i + 16 ] = _mm256_mullo_epi16( A[ 2*i ], \ FFT256_Twiddle[ 2*i ].v256 ); \ A[ 2*i + 16 ] = REDUCE( A[ 2*i + 16 ] ); \ - A[ 2*i + 1 ] = _mm256_unpackhi_epi8( t, m256_zero ); \ + A[ 2*i + 1 ] = _mm256_unpackhi_epi8( t, zero ); \ A[ 2*i + 17 ] = _mm256_mullo_epi16( A[ 2*i + 1 ], \ FFT256_Twiddle[ 2*i + 1 ].v256 ); \ A[ 2*i + 17 ] = REDUCE( A[ 2*i + 17 ] ); \ @@ -417,11 +419,11 @@ do { \ do { \ __m256i t = X[i]; \ __m256i tmp; \ - A[ 2*i ] = _mm256_unpacklo_epi8( t, m256_zero ); \ + A[ 2*i ] = _mm256_unpacklo_epi8( t, zero ); \ A[ 2*i + 16 ] = _mm256_mullo_epi16( A[ 2*i ], \ FFT256_Twiddle[ 2*i ].v256 ); \ A[ 2*i + 16 ] = REDUCE( A[ 2*i + 16 ] ); \ - tmp = _mm256_unpackhi_epi8( t, m256_zero ); \ + tmp = _mm256_unpackhi_epi8( t, zero ); \ A[ 2*i + 1 ] = _mm256_add_epi16( tmp, tw ); \ A[ 2*i + 17 ] = _mm256_mullo_epi16( _mm256_sub_epi16( tmp, tw ), \ FFT256_Twiddle[ 2*i + 1 ].v256 ); \ @@ -446,6 +448,8 @@ do { \ fft128_2way( a+256 ); } +#define c1_16( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }} + void rounds512_2way( uint32_t *state, const uint8_t *msg, uint16_t *fft ) { register __m256i S0l, S1l, S2l, S3l; @@ -453,7 +457,8 @@ void rounds512_2way( uint32_t *state, const uint8_t *msg, uint16_t *fft ) __m256i *S = (__m256i*) state; __m256i *M = (__m256i*) msg; __m256i *W = (__m256i*) fft; - static const m256_v16 code[] = { mm256_const1_16(185), mm256_const1_16(233) }; + static const m256_v16 code[] = { c1_16(185), c1_16(233) }; + S0l = _mm256_xor_si256( S[0], M[0] ); S0h = _mm256_xor_si256( S[1], M[1] ); diff --git a/configure b/configure index 11145bf..6dae213 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.5.2. +# Generated by GNU Autoconf 2.69 for cpuminer-opt 3.9.5.3. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -577,8 +577,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='cpuminer-opt' PACKAGE_TARNAME='cpuminer-opt' -PACKAGE_VERSION='3.9.5.2' -PACKAGE_STRING='cpuminer-opt 3.9.5.2' +PACKAGE_VERSION='3.9.5.3' +PACKAGE_STRING='cpuminer-opt 3.9.5.3' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1332,7 +1332,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures cpuminer-opt 3.9.5.2 to adapt to many kinds of systems. +\`configure' configures cpuminer-opt 3.9.5.3 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1404,7 +1404,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of cpuminer-opt 3.9.5.2:";; + short | recursive ) echo "Configuration of cpuminer-opt 3.9.5.3:";; esac cat <<\_ACEOF @@ -1509,7 +1509,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -cpuminer-opt configure 3.9.5.2 +cpuminer-opt configure 3.9.5.3 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2012,7 +2012,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by cpuminer-opt $as_me 3.9.5.2, which was +It was created by cpuminer-opt $as_me 3.9.5.3, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2993,7 +2993,7 @@ fi # Define the identity of the package. PACKAGE='cpuminer-opt' - VERSION='3.9.5.2' + VERSION='3.9.5.3' cat >>confdefs.h <<_ACEOF @@ -6690,7 +6690,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by cpuminer-opt $as_me 3.9.5.2, which was +This file was extended by cpuminer-opt $as_me 3.9.5.3, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -6756,7 +6756,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -cpuminer-opt config.status 3.9.5.2 +cpuminer-opt config.status 3.9.5.3 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 3ea0031..c1b4825 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -AC_INIT([cpuminer-opt], [3.9.5.2]) +AC_INIT([cpuminer-opt], [3.9.5.3]) AC_PREREQ([2.59c]) AC_CANONICAL_SYSTEM diff --git a/cpu-miner.c b/cpu-miner.c index b2a9e6f..cf9fcd6 100644 --- a/cpu-miner.c +++ b/cpu-miner.c @@ -848,7 +848,8 @@ static double shash_sum = 0.; static double bhash_sum = 0.; static double time_sum = 0.; static double latency_sum = 0.; -static uint64_t submits_sum = 0; +static uint64_t submit_sum = 0; +static uint64_t reject_sum = 0; struct share_stats_t { @@ -943,7 +944,8 @@ static int share_result( int result, struct work *null_work, shash_sum += share_hash; bhash_sum += block_hash; time_sum += share_time; - submits_sum ++; + submit_sum ++; + reject_sum += (uint64_t)!result; latency_sum += latency; pthread_mutex_unlock( &stats_lock ); @@ -2118,30 +2120,49 @@ static void *miner_thread( void *userdata ) double hash = shash_sum; shash_sum = 0.; double bhash = bhash_sum; bhash_sum = 0.; double time = time_sum; time_sum = 0.; - uint64_t submits = submits_sum; submits_sum = 0; + uint64_t submits = submit_sum; submit_sum = 0; + uint64_t rejects = reject_sum; reject_sum = 0; uint64_t latency = latency_sum; latency_sum = 0; memcpy( &five_min_start, &time_now, sizeof time_now ); pthread_mutex_unlock( &stats_lock ); - double ghrate = global_hashrate; - double shrate = time == 0. ? 0. : hash / time; - double scaled_shrate = shrate; - double avg_share = bhash == 0. ? 0. : hash / bhash * 100.; + double ghrate = global_hashrate; + double scaled_ghrate = ghrate; + double shrate = time == 0. ? 0. : hash / time; + double scaled_shrate = shrate; + double avg_share = bhash == 0. ? 0. : hash / bhash * 100.; + uint64_t avg_latency = 0; + double latency_pc = 0.; + double rejects_pc = 0.; + double submit_rate = 0.; char shr[32]; char shr_units[4] = {0}; + char ghr[32]; + char ghr_units[4] = {0}; int temp = cpu_temp(0); - char timestr[32]; + char tempstr[32]; - latency = submits ? latency / submits : 0; + if ( submits ) + avg_latency = latency / submits; + + if ( time != 0. ) + { + submit_rate = (double)submits*60. / time; + rejects_pc = (double)rejects / (time*10.); + latency_pc = (double)latency / ( time*10.); + } + scale_hash_for_display( &scaled_shrate, shr_units ); + scale_hash_for_display( &scaled_ghrate, ghr_units ); + sprintf( ghr, "%.2f %sH/s", scaled_ghrate, ghr_units ); if ( use_colors ) { - if ( shrate > (32.*ghrate) ) + if ( shrate > (128.*ghrate) ) sprintf( shr, "%s%.2f %sH/s%s", CL_MAG, scaled_shrate, shr_units, CL_WHT ); - else if ( shrate > (8.*ghrate) ) + else if ( shrate > (16.*ghrate) ) sprintf( shr, "%s%.2f %sH/s%s", CL_GRN, scaled_shrate, shr_units, CL_WHT ); else if ( shrate > 2.0*ghrate ) @@ -2153,53 +2174,99 @@ static void *miner_thread( void *userdata ) sprintf( shr, "%s%.2f %sH/s%s", CL_YLW, scaled_shrate, shr_units, CL_WHT ); - if ( temp >= 80 ) sprintf( timestr, "%s%d C%s", + if ( temp >= 80 ) sprintf( tempstr, "%s%d C%s", CL_RED, temp, CL_WHT ); - else if (temp >=70 ) sprintf( timestr, "%s%d C%s", + else if (temp >=70 ) sprintf( tempstr, "%s%d C%s", CL_YLW, temp, CL_WHT ); - else sprintf( timestr, "%d C", temp ); + else sprintf( tempstr, "%d C", temp ); } else { sprintf( shr, "%.2f %sH/s", scaled_shrate, shr_units ); - sprintf( timestr, "%d C", temp ); + sprintf( tempstr, "%d C", temp ); } + + applog(LOG_NOTICE,"Submitted %d shares in %dm%02ds.", + (uint64_t)submits, et.tv_sec / 60, et.tv_sec % 60 ); + applog(LOG_NOTICE,"%d rejects (%.2f%%), %.5f%% block share.", + rejects, rejects_pc, avg_share ); + applog(LOG_NOTICE,"Avg hashrate: Miner %s, Share %s.", ghr, shr ); + +#if ((defined(_WIN64) || defined(__WINDOWS__))) + applog(LOG_NOTICE,"Shares/min: %.2f, latency %d ms (%.2f%%).", + submit_rate, avg_latency, latency_pc ); + +#else + applog(LOG_NOTICE,"Shares/min: %.2f, latency %d ms (%.2f%%), temp: %s.", + submit_rate, avg_latency, latency_pc, tempstr ); +#endif + +/* applog(LOG_NOTICE,"Submitted %d shares in %dm%02ds, %.5f%% block share.", (uint64_t)submits, et.tv_sec / 60, et.tv_sec % 60, avg_share ); #if ((defined(_WIN64) || defined(__WINDOWS__))) - applog(LOG_NOTICE,"Share hashrate %s, latency %d ms.", - shr, latency ); + applog(LOG_NOTICE,"Share hashrate %s, latency %d ms (%.2f%%).", + shr, avg_latency, latency_pc ); #else - applog(LOG_NOTICE,"Share hashrate %s, latency %d ms, temp %s.", - shr, latency, timestr ); + applog(LOG_NOTICE,"Share hashrate %s, latency %d ms (%.2f%%), temp %s.", + shr, avg_latency, latency_pc, tempstr ); #endif +*/ applog(LOG_INFO,"- - - - - - - - - - - - - - - - - - - - - - - - - - -"); } // display hashrate - if ( opt_hash_meter ) + if ( !opt_quiet ) { char hc[16]; char hr[16]; char hc_units[2] = {0,0}; char hr_units[2] = {0,0}; - double hashcount = thr_hashcount[thr_id]; - double hashrate = thr_hashrates[thr_id]; - if ( hashcount ) + double hashcount; + double hashrate; + if ( opt_hash_meter ) { - scale_hash_for_display( &hashcount, hc_units ); - scale_hash_for_display( &hashrate, hr_units ); - if ( hc_units[0] ) - sprintf( hc, "%.2f", hashcount ); - else // no fractions of a hash - sprintf( hc, "%.0f", hashcount ); - sprintf( hr, "%.2f", hashrate ); - applog( LOG_INFO, "CPU #%d: %s %sH, %s %sH/s", - thr_id, hc, hc_units, hr, hr_units ); + hashcount = thr_hashcount[thr_id]; + hashrate = thr_hashrates[thr_id]; + if ( hashcount != 0. ) + { + scale_hash_for_display( &hashcount, hc_units ); + scale_hash_for_display( &hashrate, hr_units ); + if ( hc_units[0] ) + sprintf( hc, "%.2f", hashcount ); + else // no fractions of a hash + sprintf( hc, "%.0f", hashcount ); + sprintf( hr, "%.2f", hashrate ); + applog( LOG_INFO, "CPU #%d: %s %sH, %s %sH/s", + thr_id, hc, hc_units, hr, hr_units ); + } + } + if ( thr_id == 0 ) + { + hashcount = 0.; + hashrate = 0.; + for ( i = 0; i < opt_n_threads; i++ ) + { + hashrate += thr_hashrates[i]; + hashcount += thr_hashcount[i]; + } + if ( hashcount != 0. ) + { + scale_hash_for_display( &hashcount, hc_units ); + scale_hash_for_display( &hashrate, hr_units ); + if ( hc_units[0] ) + sprintf( hc, "%.2f", hashcount ); + else // no fractions of a hash + sprintf( hc, "%.0f", hashcount ); + sprintf( hr, "%.2f", hashrate ); + applog( LOG_NOTICE, "Miner perf: %s %sH, %s %sH/s.", + hc, hc_units, hr, hr_units ); + } } } + // Display benchmark total // Update hashrate for API if no shares accepted yet. if ( ( opt_benchmark || !accepted_share_count ) @@ -2212,7 +2279,7 @@ static void *miner_thread( void *userdata ) hashrate += thr_hashrates[i]; hashcount += thr_hashcount[i]; } - if ( hashcount ) + if ( hashcount != 0. ) { global_hashcount = hashcount; global_hashrate = hashrate; diff --git a/simd-utils.h b/simd-utils.h index 71cedee..3bc0743 100644 --- a/simd-utils.h +++ b/simd-utils.h @@ -174,32 +174,32 @@ #if defined(__MMX__) // 64 bit vectors -#include "simd-utils/simd-mmx.h" +#include "simd-utils/simd-64.h" #include "simd-utils/intrlv-mmx.h" #if defined(__SSE2__) // 128 bit vectors -#include "simd-utils/simd-sse2.h" +#include "simd-utils/simd-128.h" #include "simd-utils/intrlv-sse2.h" #if defined(__AVX__) // 256 bit vector basics -#include "simd-utils/simd-avx.h" +#include "simd-utils/simd-256.h" #include "simd-utils/intrlv-avx.h" #if defined(__AVX2__) // 256 bit everything else -#include "simd-utils/simd-avx2.h" +//#include "simd-utils/simd-avx2.h" #include "simd-utils/intrlv-avx2.h" // Skylake-X has all these #if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) // 512 bit vectors -#include "simd-utils/simd-avx512.h" +#include "simd-utils/simd-512.h" #include "simd-utils/intrlv-avx512.h" #endif // MMX diff --git a/simd-utils/intrlv-avx.h b/simd-utils/intrlv-avx.h index 76673ec..400f060 100644 --- a/simd-utils/intrlv-avx.h +++ b/simd-utils/intrlv-avx.h @@ -1,6 +1,50 @@ #if !defined(INTRLV_AVX_H__) #define INTRLV_AVX_H__ 1 +// philosophical discussion +// +// transitions: +// +// int32 <-> int64 +// uint64_t = (uint64_t)int32_lo | ( (uint64_t)int32_hi << 32 ) +// Efficient transition and post processing, 32 bit granularity is lost. +// +// int32 <-> m64 +// More complex, 32 bit granularity maintained, limited number of mmx regs. +// int32 <-> int64 <-> m64 might be more efficient. +// +// int32 <-> m128 +// Expensive, current implementation. +// +// int32 <-> m256 +// Very expensive multi stage, current implementation. +// +// int64/m64 <-> m128 +// Efficient, agnostic to native element size. Common. +// +// m128 <-> m256 +// Expensive for a single instruction, unavoidable. Common. +// +// Multi stage options +// +// int32 <-> int64 -> m128 +// More efficient than insert32, granularity maintained. Common. +// +// int64 <-> m128 -> m256 +// Unavoidable, reasonably efficient. Common +// +// int32 <-> int64 -> m128 -> m256 +// Seems inevitable, most efficient despite number of stages. Common. +// +// Implementation plan. +// +// 1. Complete m128 <-> m256 +// 2. Implement int64 <-> m128 +// 3. Combine int64 <-> m128 <-> m256 +// 4. Implement int32 <-> int64 <-> m128 +// 5. Combine int32 <-> int64 <-> m128 <-> m256 +// + #if defined(__AVX__) // Convenient short cuts for local use only diff --git a/simd-utils/simd-sse2.h b/simd-utils/simd-128.h similarity index 60% rename from simd-utils/simd-sse2.h rename to simd-utils/simd-128.h index 78594d7..a1d3cd4 100644 --- a/simd-utils/simd-sse2.h +++ b/simd-utils/simd-128.h @@ -1,5 +1,5 @@ -#if !defined(SIMD_SSE2_H__) -#define SIMD_SSE2_H__ 1 +#if !defined(SIMD_128_H__) +#define SIMD_128_H__ 1 #if defined(__SSE2__) @@ -15,69 +15,148 @@ // // 128 bit operations are enhanced with uint128 which adds 128 bit integer // support for arithmetic and other operations. Casting to uint128_t is not -// free, it requires a move from mmx to gpr but is often the only way or -// the more efficient way for certain operations. - -// Compile time constant initializers are type agnostic and can have -// a pointer handle of almost any type. All arguments must be scalar constants. -// up to 64 bits. These iniitializers should only be used at compile time -// to initialize vector arrays. All data reside in memory. +// efficient but is sometimes the only way for certain operations. +// +// Constants are an issue with simd. Simply put, immediate constants don't +// exist. All simd constants either reside in memory or a register. +// The distibction is made below with c128 being memory resident defined +// at compile time and m128 being register defined at run time. +// +// All run time constants must be generated using their components elements +// incurring significant overhead. The more elements the more overhead +// both in instructions and in GP register usage. Whenever possible use +// 64 bit constant elements regardless of the actual element size. +// +// Due to the cost of generating constants they should not be regenerated +// in the same function. Instead, define a local const. +// +// Some constant values can be generated using shortcuts. Zero for example +// is as simple as XORing any register with itself, and is implemented +// in the setzero instrinsic. These shortcuts must be implemented is asm +// due to doing things the compiler would complain about. Another single +// instruction constant is -1, defined below. Others may be added as the need +// arises. Even single instruction constants are less efficient than local +// register variables so the advice above stands. +// +// One common use for simd constants is as a control index for some simd +// instructions like blend and shuffle. The utilities below do not take this +// into account. Those that generate a simd constant should not be used +// repeatedly. It may be better for the application to reimplement the +// utility to better suit its usage. // -// These are of limited use, it is often simpler to use uint64_t arrays -// and cast as required. - -#define mm128_const_64( x1, x0 ) {{ x1, x0 }} -#define mm128_const1_64( x ) {{ x, x }} - -#define mm128_const_32( x3, x2, x1, x0 ) {{ x3, x2, x1, x0 }} -#define mm128_const1_32( x ) {{ x,x,x,x }} - -#define mm128_const_16( x7, x6, x5, x4, x3, x2, x1, x0 ) \ - {{ x7, x6, x5, x4, x3, x2, x1, x0 }} -#define mm128_const1_16( x ) {{ x,x,x,x, x,x,x,x }} - -#define mm128_const_8( x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 ) \ - {{ x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 }} -#define mm128_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }} - -// Compile time constants, use only for compile time initializing. -#define c128_zero mm128_const1_64( 0ULL ) -#define c128_one_128 mm128_const_64( 0ULL, 1ULL ) -#define c128_one_64 mm128_const1_64( 1ULL ) -#define c128_one_32 mm128_const1_32( 1UL ) -#define c128_one_16 mm128_const1_16( 1U ) -#define c128_one_8 mm128_const1_8( 1U ) -#define c128_neg1 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL ) -#define c128_neg1_64 mm128_const1_64( 0xFFFFFFFFFFFFFFFFULL ) -#define c128_neg1_32 mm128_const1_32( 0xFFFFFFFFUL ) -#define c128_neg1_16 mm128_const1_32( 0xFFFFU ) -#define c128_neg1_8 mm128_const1_32( 0xFFU ) // // Pseudo constants. // // These can't be used for compile time initialization. // These should be used for all simple vectors. -// -// _mm_setzero_si128 uses pxor instruction, it's unclear what _mm_set_epi does. -// Clearly it's faster than reading a memory resident constant. Assume set -// is also faster. -// If a pseudo constant is used often in a function it may be preferable -// to define a register variable to represent that constant. -// register __m128i zero = mm_setzero_si128(). -// This reduces any references to a move instruction. +// Repeated usage of any simd pseudo-constant should use a locally defined +// const rather than recomputing it for every reference. #define m128_zero _mm_setzero_si128() -#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL ) -#define m128_one_64 _mm_set1_epi64x( 1ULL ) -#define m128_one_32 _mm_set1_epi32( 1UL ) -#define m128_one_16 _mm_set1_epi16( 1U ) -#define m128_one_8 _mm_set1_epi8( 1U ) +// As suggested by Intel... +// Arg passing for simd registers is assumed to be first output arg, +// then input args, then locals. This is probably wrong, gcc likely picks +// whichever register is currently holding the variable, or whichever +// register is available to hold it. Nevertheless, all args are specified +// by their arg number and local variables use registers starting at +// last arg + 1, by type. +// Output args don't need to be listed as clobbered. -#define m128_neg1 _mm_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL ) + +static inline __m128i m128_one_64_fn() +{ + __m128i a; + asm( "pxor %0, %0\n\t" + "pcmpeqd %%xmm1, %%xmm1\n\t" + "psubq %%xmm1, %0\n\t" + :"=x"(a) + : + : "xmm1" ); + return a; +} +#define m128_one_64 m128_one_64_fn() + +static inline __m128i m128_one_32_fn() +{ + __m128i a; + asm( "pxor %0, %0\n\t" + "pcmpeqd %%xmm1, %%xmm1\n\t" + "psubd %%xmm1, %0\n\t" + :"=x"(a) + : + : "xmm1" ); + return a; +} +#define m128_one_32 m128_one_32_fn() + +static inline __m128i m128_one_16_fn() +{ + __m128i a; + asm( "pxor %0, %0\n\t" + "pcmpeqd %%xmm1, %%xmm1\n\t" + "psubw %%xmm1, %0\n\t" + :"=x"(a) + : + : "xmm1" ); + return a; +} +#define m128_one_16 m128_one_16_fn() + +static inline __m128i m128_one_8_fn() +{ + __m128i a; + asm( "pxor %0, %0\n\t" + "pcmpeqd %%xmm1, %%xmm1\n\t" + "psubb %%xmm1, %0\n\t" + :"=x"(a) + : + : "xmm1" ); + return a; +} +#define m128_one_8 m128_one_8_fn() + +static inline __m128i m128_neg1_fn() +{ + __m128i a; + asm( "pcmpeqd %0, %0\n\t" + :"=x"(a) ); + return a; +} +#define m128_neg1 m128_neg1_fn() + +#if defined(__SSE41__) + +static inline __m128i m128_one_128_fn() +{ + __m128i a; + asm( "pinsrq $0, $1, %0\n\t" + "pinsrq $1, $0, %0\n\t" + :"=x"(a) ); + return a; +} +#define m128_one_128 m128_one_128_fn() + +// alternative to _mm_set_epi64x, doesn't use mem, +// cost = 2 pinsrt, estimate 4 clocks. +static inline __m128i m128_const_64( uint64_t hi, uint64_t lo ) +{ + __m128i a; + asm( "pinsrq $0, %2, %0\n\t" + "pinsrq $1, %1, %0\n\t" + :"=x"(a) + :"r"(hi),"r"(lo) ); + return a; +} + +#else + +#define m128_one_128 _mm_set_epi64x( 0ULL, 1ULL ) + +#define m128_const_64 _mm_set_epi64x + +#endif // // Basic operations without equivalent SIMD intrinsic @@ -90,9 +169,21 @@ #define mm128_negate_32( v ) _mm_sub_epi32( m128_zero, v ) #define mm128_negate_16( v ) _mm_sub_epi16( m128_zero, v ) -// Use uint128_t for most arithmetic, bit shift, comparison operations -// spanning all 128 bits. Some extractions are also more efficient -// casting __m128i as uint128_t and usingstandard operators. +// Add 4 values, fewer dependencies than sequential addition. +#define mm128_add4_64( a, b, c, d ) \ + _mm_add_epi64( _mm_add_epi64( a, b ), _mm_add_epi64( c, d ) ) + +#define mm128_add4_32( a, b, c, d ) \ + _mm_add_epi32( _mm_add_epi32( a, b ), _mm_add_epi32( c, d ) ) + +#define mm128_add4_16( a, b, c, d ) \ + _mm_add_epi16( _mm_add_epi16( a, b ), _mm_add_epi16( c, d ) ) + +#define mm128_add4_8( a, b, c, d ) \ + _mm_add_epi8( _mm_add_epi8( a, b ), _mm_add_epi8( c, d ) ) + +#define mm128_xor4( a, b, c, d ) \ + _mm_xor_si128( _mm_xor_si128( a, b ), _mm_xor_si128( c, d ) ) // This isn't cheap, not suitable for bulk usage. #define mm128_extr_4x32( a0, a1, a2, a3, src ) \ @@ -105,6 +196,16 @@ do { \ // Horizontal vector testing +#if defined(__SSE41__) + +#define mm128_allbits0( a ) _mm_testz_si128( a, a ) +#define mm128_allbits1( a ) _mm_testc_si128( a, m128_neg1 ) +#define mm128_allbitsne( a ) _mm_testnzc_si128( a, m128_neg1 ) +#define mm128_anybits0 mm128_allbitsne +#define mm128_anybits1 mm128_allbitsne + +#else // SSE2 + // Bit-wise test of entire vector, useful to test results of cmp. #define mm128_anybits0( a ) (uint128_t)(a) #define mm128_anybits1( a ) (((uint128_t)(a))+1) @@ -112,6 +213,8 @@ do { \ #define mm128_allbits0( a ) ( !mm128_anybits1(a) ) #define mm128_allbits1( a ) ( !mm128_anybits0(a) ) +#endif // SSE41 else SSE2 + // // Vector pointer cast @@ -139,6 +242,7 @@ do { \ #else +// Doesn't work with register variables. #define mm128_extr_64(a,n) (((uint64_t*)&a)[n]) #define mm128_extr_32(a,n) (((uint32_t*)&a)[n]) @@ -209,7 +313,7 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) // Bit rotations // AVX512 has implemented bit rotation for 128 bit vectors with -// 64 and 32 bit elements. Not really useful. +// 64 and 32 bit elements. // // Rotate each element of v by c bits @@ -233,13 +337,16 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) _mm_or_si128( _mm_slli_epi16( v, c ), _mm_srli_epi16( v, 16-(c) ) ) // -// Rotate elements accross all lanes +// Rotate vector elements accross all lanes #define mm128_swap_64( v ) _mm_shuffle_epi32( v, 0x4e ) #define mm128_ror_1x32( v ) _mm_shuffle_epi32( v, 0x39 ) #define mm128_rol_1x32( v ) _mm_shuffle_epi32( v, 0x93 ) +#if defined (__SSE3__) +// no SSE2 implementation, no current users + #define mm128_ror_1x16( v ) \ _mm_shuffle_epi8( v, _mm_set_epi8( 1, 0,15,14,13,12,11,10 \ 9, 8, 7, 6, 5, 4, 3, 2 ) ) @@ -252,6 +359,7 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) #define mm128_rol_1x8( v ) \ _mm_shuffle_epi8( v, _mm_set_epi8( 14,13,12,11,10, 9, 8, 7, \ 6, 5, 4, 3, 2, 1, 0,15 ) ) +#endif // SSE3 // Rotate 16 byte (128 bit) vector by c bytes. // Less efficient using shift but more versatile. Use only for odd number @@ -262,17 +370,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) #define mm128_brol( v, c ) \ _mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) ) -// Invert vector: {3,2,1,0} -> {0,1,2,3} -#define mm128_invert_32( v ) _mm_shuffle_epi32( a, 0x1b ) - -#define mm128_invert_16( v ) \ - _mm_shuffle_epi8( v, _mm_set_epi8( 1, 0, 3, 2, 5, 4, 7, 6, \ - 9, 8, 11,10, 13,12, 15,14 ) ) - -#define mm128_invert_8( v ) \ - _mm_shuffle_epi8( v, _mm_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, \ - 8, 9,10,11,12,13,14,15 ) ) - // // Rotate elements within lanes. @@ -283,7 +380,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) #define mm128_rol16_64( v ) _mm_shuffle_epi8( v, \ _mm_set_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 ) - #define mm128_swap16_32( v ) _mm_shuffle_epi8( v, \ _mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 ) @@ -293,17 +389,45 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, int n ) #if defined(__SSSE3__) #define mm128_bswap_64( v ) \ - _mm_shuffle_epi8( v, _mm_set_epi8( 8, 9,10,11,12,13,14,15, \ - 0, 1, 2, 3, 4, 5, 6, 7 ) ) + _mm_shuffle_epi8( v, m128_const64( 0x08090a0b0c0d0e0f, \ + 0x0001020304050607 ) ) #define mm128_bswap_32( v ) \ - _mm_shuffle_epi8( v, _mm_set_epi8( 12,13,14,15, 8, 9,10,11, \ - 4, 5, 6, 7, 0, 1, 2, 3 ) ) + _mm_shuffle_epi8( v, m128_const_64( 0x0c0d0e0f08090a0b, \ + 0x0405060700010203 ) ) #define mm128_bswap_16( v ) \ _mm_shuffle_epi8( v, _mm_set_epi8( 14,15, 12,13, 10,11, 8, 9, \ 6, 7, 4, 5, 2, 3, 0, 1 ) ) +// 8 byte qword * 8 qwords * 2 lanes = 128 bytes +#define mm128_block_bswap_64( d, s ) do \ +{ \ + __m128i ctl = m128_const_64( 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \ + casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \ + casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \ + casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \ + casti_m128i( d, 3 ) = _mm_shuffle_epi8( casti_m128i( s, 3 ), ctl ); \ + casti_m128i( d, 4 ) = _mm_shuffle_epi8( casti_m128i( s, 4 ), ctl ); \ + casti_m128i( d, 5 ) = _mm_shuffle_epi8( casti_m128i( s, 5 ), ctl ); \ + casti_m128i( d, 6 ) = _mm_shuffle_epi8( casti_m128i( s, 6 ), ctl ); \ + casti_m128i( d, 7 ) = _mm_shuffle_epi8( casti_m128i( s, 7 ), ctl ); \ +} while(0) + +// 4 byte dword * 8 dwords * 4 lanes = 128 bytes +#define mm128_block_bswap_32( d, s ) do \ +{ \ + __m128i ctl = m128_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \ + casti_m128i( d, 0 ) = _mm_shuffle_epi8( casti_m128i( s, 0 ), ctl ); \ + casti_m128i( d, 1 ) = _mm_shuffle_epi8( casti_m128i( s, 1 ), ctl ); \ + casti_m128i( d, 2 ) = _mm_shuffle_epi8( casti_m128i( s, 2 ), ctl ); \ + casti_m128i( d, 3 ) = _mm_shuffle_epi8( casti_m128i( s, 3 ), ctl ); \ + casti_m128i( d, 4 ) = _mm_shuffle_epi8( casti_m128i( s, 4 ), ctl ); \ + casti_m128i( d, 5 ) = _mm_shuffle_epi8( casti_m128i( s, 5 ), ctl ); \ + casti_m128i( d, 6 ) = _mm_shuffle_epi8( casti_m128i( s, 6 ), ctl ); \ + casti_m128i( d, 7 ) = _mm_shuffle_epi8( casti_m128i( s, 7 ), ctl ); \ +} while(0) + #else // SSE2 // Use inline function instead of macro due to multiple statements. @@ -326,16 +450,41 @@ static inline __m128i mm128_bswap_16( __m128i v ) return _mm_or_si128( _mm_slli_epi16( v, 8 ), _mm_srli_epi16( v, 8 ) ); } +static inline void mm128_block_bswap_64( __m128i *d, __m128i *s ) +{ + d[0] = mm128_bswap_32( s[0] ); + d[1] = mm128_bswap_32( s[1] ); + d[2] = mm128_bswap_32( s[2] ); + d[3] = mm128_bswap_32( s[3] ); + d[4] = mm128_bswap_32( s[4] ); + d[5] = mm128_bswap_32( s[5] ); + d[6] = mm128_bswap_32( s[6] ); + d[7] = mm128_bswap_32( s[7] ); +} + +static inline void mm128_block_bswap_32( __m128i *d, __m128i *s ) +{ + d[0] = mm128_bswap_32( s[0] ); + d[1] = mm128_bswap_32( s[1] ); + d[2] = mm128_bswap_32( s[2] ); + d[3] = mm128_bswap_32( s[3] ); + d[4] = mm128_bswap_32( s[4] ); + d[5] = mm128_bswap_32( s[5] ); + d[6] = mm128_bswap_32( s[6] ); + d[7] = mm128_bswap_32( s[7] ); +} + #endif // SSSE3 else SSE2 + // // Rotate in place concatenated 128 bit vectors as one 256 bit vector. // Swap 128 bit vectorse. -#define mm128_swap128_256(v1, v2) \ - v1 = _mm_xor_si128(v1, v2); \ - v2 = _mm_xor_si128(v1, v2); \ - v1 = _mm_xor_si128(v1, v2); +#define mm128_swap128_256( v1, v2 ) \ + v1 = _mm_xor_si128( v1, v2 ); \ + v2 = _mm_xor_si128( v1, v2 ); \ + v1 = _mm_xor_si128( v1, v2 ); // Concatenate v1 & v2 and rotate as one 256 bit vector. #if defined(__SSE4_1__) @@ -457,4 +606,4 @@ do { \ #endif // SSE4.1 else SSE2 #endif // __SSE2__ -#endif // SIMD_SSE2_H__ +#endif // SIMD_128_H__ diff --git a/simd-utils/simd-avx2.h b/simd-utils/simd-256.h similarity index 55% rename from simd-utils/simd-avx2.h rename to simd-utils/simd-256.h index c5016c4..ad86df7 100644 --- a/simd-utils/simd-avx2.h +++ b/simd-utils/simd-256.h @@ -1,44 +1,134 @@ -#if !defined(SIMD_AVX2_H__) -#define SIMD_AVX2_H__ 1 +#if !defined(SIMD_256_H__) +#define SIMD_256_H__ 1 -#if defined(__AVX2__) +#if defined(__AVX__) ///////////////////////////////////////////////////////////////////// // // AVX2 256 bit vectors // -// AVX2 is required for integer support of 256 bit vectors. +// Basic support for 256 bit vectors is available with AVX but integer +// support requires AVX2. // Some 256 bit vector utilities require AVX512 or have more efficient // AVX512 implementations. They will be selected automatically but their use // is limited because 256 bit vectors are less likely to be used when 512 // is available. -// Vector type overlays used by compile time vector constants. -// Constants of these types reside in memory. - - // -// Basic operations without SIMD equivalent +// Pseudo constants. +// These can't be used for compile time initialization but are preferable +// for simple constant vectors at run time. For repeated use define a local +// constant to avoid multiple calls to the same macro. -// Bitwise not ( ~x ) -#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \ +#define m256_zero _mm256_setzero_si256() -// Unary negation of each element ( -a ) -#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a ) -#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a ) -#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a ) +#define m256_one_256 \ + _mm256_insertf128_si256( _mm256_castsi128_si256( m128_one_128 ), \ + m128_zero, 1 ) -/*************************** - * - * extracti128 (AVX2) vs extractf128 (AVX)??? - - +#define m256_one_128 \ + _mm256_insertf128_si256( _mm256_castsi128_si256( m128_one_128 ), \ + m128_one_128, 1 ) + +// set instructions load memory resident constants, this avoids mem. +// cost 4 pinsert + 1 vinsert, estimate 7 clocks. +#define m256_const_64( i3, i2, i1, i0 ) \ + _mm256_insertf128_si256( _mm256_castsi128_si256( m128_const_64( i1, i0 ) ), \ + m128_const_64( i3, i2 ), 1 ) +#define m256_const1_64( i ) m256_const_64( i, i, i, i ) + +#if defined(__AVX2__) + +// These look like a lot of overhead but the compiler optimizes nicely +// and puts the asm inline in the calling function. Usage is like any +// variable expression. +// __m256i foo = m256_one_64; + +static inline __m256i m256_one_64_fn() +{ + __m256i a; + asm( "vpxor %0, %0, %0\n\t" + "vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t" + "vpsubq %%ymm1, %0, %0\n\t" + :"=x"(a) + : + : "ymm1" ); + return a; +} +#define m256_one_64 m256_one_64_fn() + +static inline __m256i m256_one_32_fn() +{ + __m256i a; + asm( "vpxor %0, %0, %0\n\t" + "vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t" + "vpsubd %%ymm1, %0, %0\n\t" + :"=x"(a) + : + : "ymm1" ); + return a; +} +#define m256_one_32 m256_one_32_fn() + +static inline __m256i m256_one_16_fn() +{ + __m256i a; + asm( "vpxor %0, %0, %0\n\t" + "vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t" + "vpsubw %%ymm1, %0, %0\n\t" + :"=x"(a) + : + : "ymm1" ); + return a; +} +#define m256_one_16 m256_one_16_fn() + +static inline __m256i m256_one_8_fn() +{ + __m256i a; + asm( "vpxor %0, %0, %0\n\t" + "vpcmpeqd %%ymm1, %%ymm1, %%ymm1\n\t" + "vpsubb %%ymm1, %0, %0\n\t" + :"=x"(a) + : + : "ymm1" ); + return a; +} +#define m256_one_8 m256_one_8_fn() + +static inline __m256i m256_neg1_fn() +{ + __m256i a; + asm( "vpcmpeqq %0, %0, %0\n\t" + :"=x"(a) ); + return a; +} +#define m256_neg1 m256_neg1_fn() + +#else // AVX + +#define m256_one_64 _mm256_set1_epi64x( 1ULL ) +#define m256_one_32 _mm256_set1_epi64x( 0x0000000100000001ULL ) +#define m256_one_16 _mm256_set1_epi64x( 0x0001000100010001ULL ) +#define m256_one_8 _mm256_set1_epi64x( 0x0101010101010101ULL ) + +// AVX doesn't have inserti128 but insertf128 will do. +// Ideally this can be done with 2 instructions and no temporary variables. +static inline __m256i m256_neg1_fn() +{ + __m128i a = m128_neg1; + return _mm256_insertf128_si256( _mm256_castsi128_si256( a ), a, 1 ); +} +#define m256_neg1 m256_neg1_fn() +//#define m256_neg1 _mm256_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL ) + +#endif // AVX2 else AVX // // Vector size conversion. // // Allows operations on either or both halves of a 256 bit vector serially. // Handy for parallel AES. -// Caveats: +// Caveats when writing: // _mm256_castsi256_si128 is free and without side effects. // _mm256_castsi128_si256 is also free but leaves the high half // undefined. That's ok if the hi half will be subseqnently assigned. @@ -78,14 +168,22 @@ do { \ // Insert b into specified half of a leaving other half of a unchanged. #define mm256_ins_lo128_256( a, b ) _mm256_inserti128_si256( a, b, 0 ) #define mm256_ins_hi128_256( a, b ) _mm256_inserti128_si256( a, b, 1 ) -*/ -/* + // concatenate two 128 bit vectors into one 256 bit vector: { hi, lo } #define mm256_concat_128( hi, lo ) \ mm256_ins_hi128_256( _mm256_castsi128_si256( lo ), hi ) // Horizontal vector testing +#if defined(__AVX2__) + +#define mm256_allbits0( a ) _mm256_testz_si256( a, a ) +#define mm256_allbits1( a ) _mm256_testc_si256( a, m256_neg1 ) +#define mm256_allbitsne( a ) _mm256_testnzc_si256( a, m256_neg1 ) +#define mm256_anybits0 mm256_allbitsne +#define mm256_anybits1 mm256_allbitsne + +#else // AVX // Bit-wise test of entire vector, useful to test results of cmp. #define mm256_anybits0( a ) \ @@ -99,35 +197,20 @@ do { \ #define mm256_allbits0_256( a ) ( !mm256_anybits1(a) ) #define mm256_allbits1_256( a ) ( !mm256_anybits0(a) ) +#endif // AVX2 else AVX + // Parallel AES, for when x is expected to be in a 256 bit register. -#define mm256_aesenc_2x128( x ) \ - mm256_concat_128( \ - _mm_aesenc_si128( mm128_extr_hi128_256( x ), m128_zero ), \ - _mm_aesenc_si128( mm128_extr_lo128_256( x ), m128_zero ) ) +// Use same 128 bit key. +#define mm256_aesenc_2x128( x, k ) \ + mm256_concat_128( _mm_aesenc_si128( mm128_extr_hi128_256( x ), k ), \ + _mm_aesenc_si128( mm128_extr_lo128_256( x ), k ) ) -#define mm256_aesenckey_2x128( x, k ) \ - mm256_concat_128( \ - _mm_aesenc_si128( mm128_extr_hi128_256( x ), \ - mm128_extr_lo128_256( k ) ), \ - _mm_aesenc_si128( mm128_extr_hi128_256( x ), \ - mm128_extr_lo128_256( k ) ) ) - -#define mm256_paesenc_2x128( y, x ) do \ +#define mm256_paesenc_2x128( y, x, k ) do \ { \ - __m256i *X = (__m256i*)x; \ - __m256i *Y = (__m256i*)y; \ - y[0] = _mm_aesenc_si128( x[0], m128_zero ); \ - y[1] = _mm_aesenc_si128( x[1], m128_zero ); \ -} while(0); - -// With pointers. -#define mm256_paesenckey_2x128( y, x, k ) do \ -{ \ - __m256i *X = (__m256i*)x; \ - __m256i *Y = (__m256i*)y; \ - __m256i *K = (__m256i*)ky; \ - y[0] = _mm_aesenc_si128( x[0], K[0] ); \ - y[1] = _mm_aesenc_si128( x[1], K[1] ); \ + __m128i *X = (__m128i*)x; \ + __m128i *Y = (__m128i*)y; \ + Y[0] = _mm_aesenc_si128( X[0], k ); \ + Y[1] = _mm_aesenc_si128( X[1], k ); \ } while(0); // @@ -201,7 +284,41 @@ static inline void memset_256( __m256i *dst, const __m256i a, int n ) static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) { for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; } -*************************************/ +/////////////////////////////// +// +// AVX2 needed from now on. +// + +#if defined(__AVX2__) + +// +// Basic operations without SIMD equivalent + +// Bitwise not ( ~x ) +#define mm256_not( x ) _mm256_xor_si256( (x), m256_neg1 ) \ + +// Unary negation of each element ( -a ) +#define mm256_negate_64( a ) _mm256_sub_epi64( m256_zero, a ) +#define mm256_negate_32( a ) _mm256_sub_epi32( m256_zero, a ) +#define mm256_negate_16( a ) _mm256_sub_epi16( m256_zero, a ) + + +// Add 4 values, fewer dependencies than sequential addition. + +#define mm256_add4_64( a, b, c, d ) \ + _mm256_add_epi64( _mm256_add_epi64( a, b ), _mm256_add_epi64( c, d ) ) + +#define mm256_add4_32( a, b, c, d ) \ + _mm256_add_epi32( _mm256_add_epi32( a, b ), _mm256_add_epi32( c, d ) ) + +#define mm256_add4_16( a, b, c, d ) \ + _mm256_add_epi16( _mm256_add_epi16( a, b ), _mm256_add_epi16( c, d ) ) + +#define mm256_add4_8( a, b, c, d ) \ + _mm256_add_epi8( _mm256_add_epi8( a, b ), _mm256_add_epi8( c, d ) ) + +#define mm256_xor4( a, b, c, d ) \ + _mm256_xor_si256( _mm256_xor_si256( a, b ), _mm256_xor_si256( c, d ) ) // // Bit rotations. @@ -241,24 +358,27 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) // index vector c #define mm256_rorv_64( v, c ) \ _mm256_or_si256( \ - _mm256_srlv_epi64( v, _mm256_set1_epi64x( c ) ), \ - _mm256_sllv_epi64( v, _mm256_set1_epi64x( 64-(c) ) ) ) + _mm256_srlv_epi64( v, c ), \ + _mm256_sllv_epi64( v, _mm256_sub_epi64( \ + _mm256_set1_epi64x( 64 ), c ) ) ) #define mm256_rolv_64( v, c ) \ _mm256_or_si256( \ - _mm256_sllv_epi64( v, _mm256_set1_epi64x( c ) ), \ - _mm256_srlv_epi64( v, _mm256_set1_epi64x( 64-(c) ) ) ) - + _mm256_sllv_epi64( v, c ), \ + _mm256_srlv_epi64( v, _mm256_sub_epi64( \ + _mm256_set1_epi64x( 64 ), c ) ) ) #define mm256_rorv_32( v, c ) \ _mm256_or_si256( \ - _mm256_srlv_epi32( v, _mm256_set1_epi32( c ) ), \ - _mm256_sllv_epi32( v, _mm256_set1_epi32( 32-(c) ) ) ) + _mm256_srlv_epi32( v, c ), \ + _mm256_sllv_epi32( v, _mm256_sub_epi32( \ + _mm256_set1_epi32( 32 ), c ) ) ) #define mm256_rolv_32( v, c ) \ _mm256_or_si256( \ - _mm256_sllv_epi32( v, _mm256_set1_epi32( c ) ), \ - _mm256_srlv_epi32( v, _mm256_set1_epi32( 32-(c) ) ) ) + _mm256_sllv_epi32( v, c ), \ + _mm256_srlv_epi32( v, _mm256_sub_epi32( \ + _mm256_set1_epi32( 32 ), c ) ) ) // AVX512 can do 16 bit elements. @@ -275,17 +395,28 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) #define mm256_ror_1x64( v ) _mm256_permute4x64_epi64( v, 0x39 ) #define mm256_rol_1x64( v ) _mm256_permute4x64_epi64( v, 0x93 ) -// Rotate 256 bit vector by one 32 bit element. +// A little faster with avx512 +// Rotate 256 bit vector by one 32 bit element. Use 64 bit set, it's faster. #define mm256_ror_1x32( v ) \ - _mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 0,7,6,5, 4,3,2,1 ) ) + _mm256_permutevar8x32_epi32( v, \ + m256_const_64( 0x0000000000000007, 0x0000000600000005, \ + 0x0000000400000003, 0x0000000200000001 ) + #define mm256_rol_1x32( v ) \ - _mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 6,5,4,3, 2,1,0,7 ) ) + _mm256_permutevar8x32_epi32( v, \ + m256_const_64( 0x0000000600000005, 0x0000000400000003, \ + 0x0000000200000001, 0x0000000000000007 ) // Rotate 256 bit vector by three 32 bit elements (96 bits). #define mm256_ror_3x32( v ) \ - _mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 2,1,0,7, 6,5,4,3 ) ) + _mm256_permutevar8x32_epi32( v, \ + m256_const_64( 0x0000000200000001, 0x0000000000000007, \ + 0x0000000600000005, 0x0000000400000003 ) + #define mm256_rol_3x32( v ) \ - _mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 4,3,2,1, 0,7,6,5 ) ) + _mm256_permutevar8x32_epi32( v, \ + m256_const_64( 0x0000000400000003, 0x0000000200000001, \ + 0x0000000000000007, 0x0000000600000005 ) // AVX512 can do 16 & 8 bit elements. #if defined(__AVX512VL__) @@ -293,7 +424,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) // Rotate 256 bit vector by one 16 bit element. #define mm256_ror_1x16( v ) \ _mm256_permutexvar_epi16( _mm256_set_epi16( \ - 0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v ) + 0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v ) #define mm256_rol_1x16( v ) \ _mm256_permutexvar_epi16( _mm256_set_epi16( \ @@ -303,7 +434,7 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) #define mm256_ror_1x8( v ) \ _mm256_permutexvar_epi8( _mm256_set_epi8( \ 0,31,30,29,28,27,26,25, 24,23,22,21,20,19,18,17, \ - 16,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v ) + 16,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ), v ) #define mm256_rol_1x8( v ) \ _mm256_permutexvar_epi8( _mm256_set_epi8( \ @@ -312,14 +443,6 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) #endif // AVX512 -// Invert vector: {3,2,1,0} -> {0,1,2,3} -#define mm256_invert_64( v ) _mm256_permute4x64_epi64( a, 0x1b ) - -#define mm256_invert_32( v ) \ - _mm256_permutevar8x32_epi32( v, _mm256_set_epi32( 0,1,2,3,4,5,6,7 ) ) - -// AVX512 can do 16 & 8 bit elements. - // // Rotate elements within lanes of 256 bit vector. @@ -332,15 +455,23 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) // Rotate each 128 bit lane by one 16 bit element. #define mm256_rol1x16_128( v ) \ - _mm256_shuffle_epi8( 13,12,11,10, 9,8,7,6, 5,4,3,2, 1,0,15,14 ) + _mm256_shuffle_epi8( v, _mm256_set_epi16( 6,5,4,3,2,1,0,7, \ + 6,5,4,3,2,1,0,7 ) ) #define mm256_ror1x16_128( v ) \ - _mm256_shuffle_epi8( 1,0,15,14, 13,12,11,10, 9,8,7,6, 5,4,3,2 ) + _mm256_shuffle_epi8( v, _mm256_set_epi16( 0,7,6,5,4,3,2,1, \ + 0,7,6,5,4,3,2,1 ) ) // Rotate each 128 bit lane by one byte #define mm256_rol1x8_128( v ) \ - _mm256_shuffle_epi8( 14, 13,12,11, 10,9,8,7, 6,5,4,3, 2,1,0,15 ) + _mm256_shuffle_epi8( v, _mm256_set_epi8(14,13,12,11,10, 9, 8, 7, \ + 6, 5, 4, 3, 2, 1, 0,15, \ + 14,13,12,11,10, 9, 8, 7, \ + 6, 5, 4, 3, 2, 1, 0,15 ) ) #define mm256_ror1x8_128( v ) \ - _mm256_shuffle_epi8( 0,15,14,13, 12,11,10,9, 8,7,6,5, 4,3,2,1 ) + _mm256_shuffle_epi8( v, _mm256_set_epi8( 0,15,14,13,12,11,10, 9, \ + 8, 7, 6, 5, 4, 3, 2, 1, \ + 0,15,14,13,12,11,10, 9, \ + 8, 7, 6, 5, 4, 3, 2, 1 ) ) // Rotate each 128 bit lane by c bytes. #define mm256_bror_128( v, c ) \ @@ -354,28 +485,27 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) #define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 ) #define mm256_ror16_64( v ) \ - _mm256_shuffle_epi8( 9, 8,15,14,13,12,11,10, 1, 0, 7, 6, 5, 4, 3, 2 ); + _mm256_shuffle_epi8( v, _mm256_set_epi16( 4,7,6,5,0,3,2,1, \ + 4,7,6,5,0,3,2,1 ) ) #define mm256_rol16_64( v ) \ - _mm256_shuffle_epi8( 13,12,11,10, 9, 8,15,14, 5, 4, 3, 2, 1, 0, 7, 6 ); + _mm256_shuffle_epi8( v, _mm256_set_epi16( 6,5,4,7,2,1,0,3, \ + 6,5,4,7,2,1,0,3 ) ) // Swap 16 bit elements in each 32 bit lane -#define mm256_swap16_32( v ) _mm256_shuffle_epi8( v, \ - _mm_set_epi8( 13,12,15,14, 9,8,11,10, 5,4,7,6, 1,0,3,2 ) +#define mm256_swap16_32( v ) \ + _mm256_shuffle_epi8( v, _mm256_set_epi16( 6,7,4,5,2,3,0,1, \ + 6,7,4,5,2,3,0,1 ) ) // // Swap bytes in vector elements, endian bswap. #define mm256_bswap_64( v ) \ - _mm256_shuffle_epi8( v, _mm256_set_epi8( 8, 9,10,11,12,13,14,15, \ - 0, 1, 2, 3, 4, 5, 6, 7, \ - 8, 9,10,11,12,13,14,15, \ - 0, 1, 2, 3, 4, 5, 6, 7 ) ) + _mm256_shuffle_epi8( v, m256_const_64( 0x08090a0b0c0d0e0f, \ + 0x0001020304050607, 0x08090a0b0c0d0e0f, 0x0001020304050607 ) ) #define mm256_bswap_32( v ) \ - _mm256_shuffle_epi8( v, _mm256_set_epi8( 12,13,14,15, 8, 9,10,11, \ - 4, 5, 6, 7, 0, 1, 2, 3, \ - 12,13,14,15, 8, 9,10,11, \ - 4, 5, 6, 7, 0, 1, 2, 3 ) ) + _mm256_shuffle_epi8( v, m256_const_64( 0x0c0d0e0f08090a0b, \ + 0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203 ) ) #define mm256_bswap_16( v ) \ _mm256_shuffle_epi8( v, _mm256_set_epi8( 14,15, 12,13, 10,11, 8, 9, \ @@ -383,6 +513,36 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) 14,15, 12,13, 10,11, 8, 9, \ 6, 7, 4, 5, 2, 3, 0, 1 ) ) +// 8 byte qword * 8 qwords * 4 lanes = 256 bytes +#define mm256_block_bswap_64( d, s ) do \ +{ \ + __m256i ctl = m256_const_64( 0x08090a0b0c0d0e0f, 0x0001020304050607, \ + 0x08090a0b0c0d0e0f, 0x0001020304050607 ); \ + casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \ + casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \ + casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \ + casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \ + casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \ + casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \ + casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \ + casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \ +} while(0) + +// 4 byte dword * 8 dwords * 8 lanes = 256 bytes +#define mm256_block_bswap_32( d, s ) do \ +{ \ + __m256i ctl = m256_const_64( 0x0c0d0e0f08090a0b, 0x0405060700010203, \ + 0x0c0d0e0f08090a0b, 0x0405060700010203 ); \ + casti_m256i( d, 0 ) = _mm256_shuffle_epi8( casti_m256i( s, 0 ), ctl ); \ + casti_m256i( d, 1 ) = _mm256_shuffle_epi8( casti_m256i( s, 1 ), ctl ); \ + casti_m256i( d, 2 ) = _mm256_shuffle_epi8( casti_m256i( s, 2 ), ctl ); \ + casti_m256i( d, 3 ) = _mm256_shuffle_epi8( casti_m256i( s, 3 ), ctl ); \ + casti_m256i( d, 4 ) = _mm256_shuffle_epi8( casti_m256i( s, 4 ), ctl ); \ + casti_m256i( d, 5 ) = _mm256_shuffle_epi8( casti_m256i( s, 5 ), ctl ); \ + casti_m256i( d, 6 ) = _mm256_shuffle_epi8( casti_m256i( s, 6 ), ctl ); \ + casti_m256i( d, 7 ) = _mm256_shuffle_epi8( casti_m256i( s, 7 ), ctl ); \ +} while(0) + // // Rotate two concatenated 256 bit vectors as one 512 bit vector by specified // number of elements. Rotate is done in place, source arguments are @@ -466,5 +626,6 @@ do { \ } while(0) #endif // __AVX2__ -#endif // SIMD_AVX2_H__ +#endif // __AVX__ +#endif // SIMD_256_H__ diff --git a/simd-utils/simd-avx512.h b/simd-utils/simd-512.h similarity index 95% rename from simd-utils/simd-avx512.h rename to simd-utils/simd-512.h index b904cc2..41c9e3f 100644 --- a/simd-utils/simd-avx512.h +++ b/simd-utils/simd-512.h @@ -1,5 +1,5 @@ -#if !defined(SIMD_AVX512_H__) -#define SIMD_AVX512_H__ 1 +#if !defined(SIMD_512_H__) +#define SIMD_512_H__ 1 #if defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__) @@ -246,28 +246,22 @@ // // Rotate elements in 512 bit vector. -#define mm512_swap_256( v ) \ - _mm512_permutexvar_epi64( v, _mm512_set_epi64( 3,2,1,0, 7,6,5,4 ) ) +#define mm512_swap_256( v ) _mm512_alignr_epi64( v, v, 4 ) -#define mm512_ror_1x128( v ) \ - _mm512_permutexvar_epi64( v, _mm512_set_epi64( 1,0, 7,6, 5,4, 3,2 ) ) +#define mm512_ror_1x128( v ) _mm512_alignr_epi64( v, v, 2 ) +#define mm512_rol_1x128( v ) _mm512_alignr_epi64( v, v, 6 ) -#define mm512_rol_1x128( v ) \ - _mm512_permutexvar_epi64( v, _mm512_set_epi64( 5,4, 3,2, 1,0, 7,6 ) ) +#define mm512_ror_1x64( v ) _mm512_alignr_epi64( v, v, 1 ) +#define mm512_rol_1x64( v ) _mm512_alignr_epi64( v, v, 7 ) -#define mm512_ror_1x64( v ) \ - _mm512_permutexvar_epi64( v, _mm512_set_epi64( 0,7,6,5,4,3,2,1 ) ) +#define mm512_ror_1x32( v ) _mm512_alignr_epi32( v, v, 1 ) +#define mm512_rol_1x32( v ) _mm512_alignr_epi32( v, v, 15 ) -#define mm512_rol_1x64( v ) \ - _mm512_permutexvar_epi64( v, _mm512_set_epi64( 6,5,4,3,2,1,0,7 ) ) +// Generic for odd rotations +#define mm512_ror_x64( v, n ) _mm512_alignr_epi64( v, v, n ) -#define mm512_ror_1x32( v ) \ - _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ - 0,15,14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ) ) +#define mm512_ror_x32( v, n ) _mm512_alignr_epi32( v, v, n ) -#define mm512_rol_1x32( v ) \ - _mm512_permutexvar_epi32( v, _mm512_set_epi32( \ - 14,13,12,11,10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 15 ) ) // Although documented to exist in AVX512F the _mm512_set_epi8 & // _mm512_set_epi16 intrinsics fail to compile. Seems usefull to have @@ -282,7 +276,7 @@ 0X00080007, 0X00060005, 0X00040003, 0X00020001 ) ) #define mm512_rol_1x16( v ) \ - _mm512_permutexvar_epi16( v, _mm512_set_epi16( \ + _mm512_permutexvar_epi16( v, _mm512_set_epi32( \ 0x001E001D, 0x001C001B, 0x001A0019, 0x00180017, \ 0X00160015, 0X00140013, 0X00120011, 0x0010000F, \ 0X000E000D, 0X000C000B, 0X000A0009, 0X00080007, \ @@ -290,14 +284,14 @@ #define mm512_ror_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi8( \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ 0x003F3E3D, 0x3C3B3A39, 0x38373635, 0x34333231, \ 0x302F2E2D, 0x2C2B2A29, 0x28272625, 0x24232221, \ 0x201F1E1D, 0x1C1B1A19. 0x18171615, 0x14131211, \ 0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201 ) ) #define mm512_rol_1x8( v ) \ - _mm512_permutexvar_epi8( v, _mm512_set_epi8( \ + _mm512_permutexvar_epi8( v, _mm512_set_epi32( \ 0x3E3D3C3B, 0x3A393837, 0x36353433, 0x3231302F. \ 0x2E2D2C2B, 0x2A292827, 0x26252423, 0x2221201F, \ 0x1E1D1C1B, 0x1A191817, 0x16151413, 0x1211100F, \ @@ -601,4 +595,4 @@ do { \ } while(0) #endif // AVX512 -#endif // SIMD_AVX512_H__ +#endif // SIMD_512_H__ diff --git a/simd-utils/simd-mmx.h b/simd-utils/simd-64.h similarity index 80% rename from simd-utils/simd-mmx.h rename to simd-utils/simd-64.h index ca08039..3add748 100644 --- a/simd-utils/simd-mmx.h +++ b/simd-utils/simd-64.h @@ -1,5 +1,5 @@ -#if !defined(SIMD_MMX_H__) -#define SIMD_MMX_H__ 1 +#if !defined(SIMD_64_H__) +#define SIMD_64_H__ 1 #if defined(__MMX__) @@ -13,21 +13,20 @@ // Pseudo constants +/* #define m64_zero _mm_setzero_si64() #define m64_one_64 _mm_set_pi32( 0UL, 1UL ) #define m64_one_32 _mm_set1_pi32( 1UL ) #define m64_one_16 _mm_set1_pi16( 1U ) #define m64_one_8 _mm_set1_pi8( 1U ); #define m64_neg1 _mm_set1_pi32( 0xFFFFFFFFUL ) -/* cast also works, which is better? +*/ #define m64_zero ( (__m64)0ULL ) #define m64_one_64 ( (__m64)1ULL ) #define m64_one_32 ( (__m64)0x0000000100000001ULL ) #define m64_one_16 ( (__m64)0x0001000100010001ULL ) #define m64_one_8 ( (__m64)0x0101010101010101ULL ) #define m64_neg1 ( (__m64)0xFFFFFFFFFFFFFFFFULL ) -*/ - #define casti_m64(p,i) (((__m64*)(p))[(i)]) @@ -42,6 +41,14 @@ #define mm64_negate_8( v ) _mm_sub_pi8( m64_zero, (__m64)v ) // Rotate bits in packed elements of 64 bit vector +#define mm64_rol_64( a, n ) \ + _mm_or_si64( _mm_slli_si64( (__m64)(a), n ), \ + _mm_srli_si64( (__m64)(a), 64-(n) ) ) + +#define mm64_ror_64( a, n ) \ + _mm_or_si64( _mm_srli_si64( (__m64)(a), n ), \ + _mm_slli_si64( (__m64)(a), 64-(n) ) ) + #define mm64_rol_32( a, n ) \ _mm_or_si64( _mm_slli_pi32( (__m64)(a), n ), \ _mm_srli_pi32( (__m64)(a), 32-(n) ) ) @@ -78,22 +85,20 @@ // Endian byte swap packed elements // A vectorized version of the u64 bswap, use when data already in MMX reg. #define mm64_bswap_64( v ) \ - _mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) ) + _mm_shuffle_pi8( (__m64)v, (__m64)0x0001020304050607 ) #define mm64_bswap_32( v ) \ - _mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 4,5,6,7, 0,1,2,3 ) ) + _mm_shuffle_pi8( (__m64)v, (__m64)0x0405060700010203 ) -/* #define mm64_bswap_16( v ) \ - _mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 6,7, 4,5, 2,3, 0,1 ) ); -*/ + _mm_shuffle_pi8( (__m64)v, (__m64)0x0607040502030001 ); #else #define mm64_bswap_64( v ) \ (__m64)__builtin_bswap64( (uint64_t)v ) -// This exists only for compatibility with CPUs without SSSE3. MMX doesn't +// These exist only for compatibility with CPUs without SSSE3. MMX doesn't // have extract 32 instruction so pointers are needed to access elements. // It' more efficient for the caller to use scalar variables and call // bswap_32 directly. @@ -101,20 +106,11 @@ _mm_set_pi32( __builtin_bswap32( ((uint32_t*)&v)[1] ), \ __builtin_bswap32( ((uint32_t*)&v)[0] ) ) -#endif - -// Invert vector: {3,2,1,0} -> {0,1,2,3} -// Invert_64 is the same as bswap64 -// Invert_32 is the same as swap32 - -#define mm64_invert_16( v ) _mm_shuffle_pi16( (__m64)v, 0x1b ) - -#if defined(__SSSE3__) - -// An SSE2 or MMX version of this would be monstrous, shifting, masking and -// oring each byte individually. -#define mm64_invert_8( v ) \ - _mm_shuffle_pi8( (__m64)v, _mm_set_pi8( 0,1,2,3,4,5,6,7 ) ); +#define mm64_bswap_16( v ) \ + _mm_set_pi16( __builtin_bswap16( ((uint16_t*)&v)[3] ), \ + __builtin_bswap16( ((uint16_t*)&v)[2] ), \ + __builtin_bswap16( ((uint16_t*)&v)[1] ), \ + __builtin_bswap16( ((uint16_t*)&v)[0] ) ) #endif @@ -131,5 +127,5 @@ static inline void memset_m64( __m64 *dst, const __m64 a, int n ) #endif // MMX -#endif // SIMD_MMX_H__ +#endif // SIMD_64_H__ diff --git a/simd-utils/simd-avx.h b/simd-utils/simd-avx.h deleted file mode 100644 index 36bebc3..0000000 --- a/simd-utils/simd-avx.h +++ /dev/null @@ -1,243 +0,0 @@ -#if !defined(SIMD_AVX_H__) -#define SIMD_AVX_H__ 1 - -#if defined(__AVX__) - -///////////////////////////////////////////////////////////////////// -// -// AVX 256 bit vectors -// -// Basic support for 256 bit vectors. Most of the good stuff needs AVX2. - -// Compile time vector constants and initializers. -// -// The following macro constants and functions should only be used -// for compile time initialization of constant and variable vector -// arrays. These constants use memory, use _mm256_set at run time to -// avoid using memory. - -#define mm256_const_64( x3, x2, x1, x0 ) {{ x3, x2, x1, x0 }} -#define mm256_const1_64( x ) {{ x,x,x,x }} - -#define mm256_const_32( x7, x6, x5, x4, x3, x2, x1, x0 ) \ - {{ x7, x6, x5, x4, x3, x2, x1, x0 }} -#define mm256_const1_32( x ) {{ x,x,x,x, x,x,x,x }} - -#define mm256_const_16( x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 ) \ - {{ x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 }} -#define mm256_const1_16( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }} - -#define mm256_const_8( x31, x30, x29, x28, x27, x26, x25, x24, \ - x23, x22, x21, x20, x19, x18, x17, x16, \ - x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 ) \ - {{ x31, x30, x29, x28, x27, x26, x25, x24, \ - x23, x22, x21, x20, x19, x18, x17, x16, \ - x15, x14, x13, x12, x11, x10, x09, x08, \ - x07, x06, x05, x04, x03, x02, x01, x00 }} -#define mm256_const1_8( x ) {{ x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x, \ - x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x }} - -// Predefined compile time constant vectors. -// Use Pseudo constants at run time for all simple constant vectors. -#define c256_zero mm256_const1_64( 0ULL ) -#define c256_one_256 mm256_const_64( 0ULL, 0ULL, 0ULL, 1ULL ) -#define c256_one_128 mm256_const_64( 0ULL, 1ULL, 0ULL, 1ULL ) -#define c256_one_64 mm256_const1_64( 1ULL ) -#define c256_one_32 mm256_const1_32( 1UL ) -#define c256_one_16 mm256_const1_16( 1U ) -#define c256_one_8 mm256_const1_8( 1U ) -#define c256_neg1 mm256_const1_64( 0xFFFFFFFFFFFFFFFFULL ) -#define c256_neg1_64 mm256_const1_64( 0xFFFFFFFFFFFFFFFFULL ) -#define c256_neg1_32 mm256_const1_32( 0xFFFFFFFFUL ) -#define c256_neg1_16 mm256_const1_16( 0xFFFFU ) -#define c256_neg1_8 mm256_const1_8( 0xFFU ) - -// -// Pseudo constants. -// These can't be used for compile time initialization but are preferable -// for simple constant vectors at run time. - -#define m256_zero _mm256_setzero_si256() -#define m256_one_256 _mm256_set_epi64x( 0ULL, 0ULL, 0ULL, 1ULL ) -#define m256_one_128 _mm256_set_epi64x( 0ULL, 1ULL, 0ULL, 1ULL ) -#define m256_one_64 _mm256_set1_epi64x( 1ULL ) -#define m256_one_32 _mm256_set1_epi32( 1UL ) -#define m256_one_16 _mm256_set1_epi16( 1U ) -#define m256_one_8 _mm256_set1_epi8( 1U ) -#define m256_neg1 _mm256_set1_epi64x( 0xFFFFFFFFFFFFFFFFULL ) - -// -// Vector size conversion. -// -// Allows operations on either or both halves of a 256 bit vector serially. -// Handy for parallel AES. -// Caveats: -// _mm256_castsi256_si128 is free and without side effects. -// _mm256_castsi128_si256 is also free but leaves the high half -// undefined. That's ok if the hi half will be subseqnently assigned. -// If assigning both, do lo first, If assigning only 1, use -// _mm256_inserti128_si256. -// -// What to do about extractf128 (AVX) and extracti128 (AVX2)? -#define mm128_extr_lo128_256( a ) _mm256_castsi256_si128( a ) -#define mm128_extr_hi128_256( a ) _mm256_extractf128_si256( a, 1 ) - -// Extract 4 u64 from 256 bit vector. -#define mm256_extr_4x64( a0, a1, a2, a3, src ) \ -do { \ - __m128i hi = _mm256_extractf128_si256( src, 1 ); \ - a0 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 0 ); \ - a1 = _mm_extract_epi64( _mm256_castsi256_si128( src ), 1 ); \ - a2 = _mm_extract_epi64( hi, 0 ); \ - a3 = _mm_extract_epi64( hi, 1 ); \ -} while(0) - -#define mm256_extr_8x32( a0, a1, a2, a3, a4, a5, a6, a7, src ) \ -do { \ - __m128i hi = _mm256_extractf128_si256( src, 1 ); \ - a0 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 0 ); \ - a1 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 1 ); \ - a2 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 2 ); \ - a3 = _mm_extract_epi32( _mm256_castsi256_si128( src ), 3 ); \ - a4 = _mm_extract_epi32( hi, 0 ); \ - a5 = _mm_extract_epi32( hi, 1 ); \ - a6 = _mm_extract_epi32( hi, 2 ); \ - a7 = _mm_extract_epi32( hi, 3 ); \ -} while(0) - -// input __m128i, returns __m256i -// To build a 256 bit vector from 2 128 bit vectors lo must be done first. -// lo alone leaves hi undefined, hi alone leaves lo unchanged. -// Both cost one clock while preserving the other half.. -// Insert b into specified half of a leaving other half of a unchanged. -#define mm256_ins_lo128_256( a, b ) _mm256_insertf128_si256( a, b, 0 ) -#define mm256_ins_hi128_256( a, b ) _mm256_insertf128_si256( a, b, 1 ) - -// concatenate two 128 bit vectors into one 256 bit vector: { hi, lo } -#define mm256_concat_128( hi, lo ) \ - mm256_ins_hi128_256( _mm256_castsi128_si256( lo ), hi ) - -// Horizontal vector testing - -// Needs int128 support -// Bit-wise test of entire vector, useful to test results of cmp. -#define mm256_anybits0( a ) \ - ( (uint128_t)mm128_extr_hi128_256( a ) \ - | (uint128_t)mm128_extr_lo128_256( a ) ) - -#define mm256_anybits1( a ) \ - ( ( (uint128_t)mm128_extr_hi128_256( a ) + 1 ) \ - | ( (uint128_t)mm128_extr_lo128_256( a ) + 1 ) ) - -#define mm256_allbits0_256( a ) ( !mm256_anybits1(a) ) -#define mm256_allbits1_256( a ) ( !mm256_anybits0(a) ) - -// Parallel AES, for when x is expected to be in a 256 bit register. -#define mm256_aesenc_2x128( x ) \ - mm256_concat_128( \ - _mm_aesenc_si128( mm128_extr_hi128_256( x ), m128_zero ), \ - _mm_aesenc_si128( mm128_extr_lo128_256( x ), m128_zero ) ) - -#define mm256_aesenckey_2x128( x, k ) \ - mm256_concat_128( \ - _mm_aesenc_si128( mm128_extr_hi128_256( x ), \ - mm128_extr_lo128_256( k ) ), \ - _mm_aesenc_si128( mm128_extr_hi128_256( x ), \ - mm128_extr_lo128_256( k ) ) ) - -#define mm256_paesenc_2x128( y, x ) do \ -{ \ - __m256i *X = (__m256i*)x; \ - __m256i *Y = (__m256i*)y; \ - y[0] = _mm_aesenc_si128( x[0], m128_zero ); \ - y[1] = _mm_aesenc_si128( x[1], m128_zero ); \ -} while(0); - -// With pointers. -#define mm256_paesenckey_2x128( y, x, k ) do \ -{ \ - __m256i *X = (__m256i*)x; \ - __m256i *Y = (__m256i*)y; \ - __m256i *K = (__m256i*)ky; \ - y[0] = _mm_aesenc_si128( x[0], K[0] ); \ - y[1] = _mm_aesenc_si128( x[1], K[1] ); \ -} while(0); - -// -// Pointer casting - -// p = any aligned pointer -// returns p as pointer to vector type, not very useful -#define castp_m256i(p) ((__m256i*)(p)) - -// p = any aligned pointer -// returns *p, watch your pointer arithmetic -#define cast_m256i(p) (*((__m256i*)(p))) - -// p = any aligned pointer, i = scaled array index -// returns value p[i] -#define casti_m256i(p,i) (((__m256i*)(p))[(i)]) - -// p = any aligned pointer, o = scaled offset -// returns pointer p+o -#define casto_m256i(p,o) (((__m256i*)(p))+(o)) - - -// Gather scatter - -#define mm256_gather_64( d, s0, s1, s2, s3 ) \ - ((uint64_t*)(d))[0] = (uint64_t)(s0); \ - ((uint64_t*)(d))[1] = (uint64_t)(s1); \ - ((uint64_t*)(d))[2] = (uint64_t)(s2); \ - ((uint64_t*)(d))[3] = (uint64_t)(s3); - -#define mm256_gather_32( d, s0, s1, s2, s3, s4, s5, s6, s7 ) \ - ((uint32_t*)(d))[0] = (uint32_t)(s0); \ - ((uint32_t*)(d))[1] = (uint32_t)(s1); \ - ((uint32_t*)(d))[2] = (uint32_t)(s2); \ - ((uint32_t*)(d))[3] = (uint32_t)(s3); \ - ((uint32_t*)(d))[4] = (uint32_t)(s4); \ - ((uint32_t*)(d))[5] = (uint32_t)(s5); \ - ((uint32_t*)(d))[6] = (uint32_t)(s6); \ - ((uint32_t*)(d))[7] = (uint32_t)(s7); - - -// Scatter data from contiguous memory. -// All arguments are pointers -#define mm256_scatter_64( d0, d1, d2, d3, s ) \ - *((uint64_t*)(d0)) = ((uint64_t*)(s))[0]; \ - *((uint64_t*)(d1)) = ((uint64_t*)(s))[1]; \ - *((uint64_t*)(d2)) = ((uint64_t*)(s))[2]; \ - *((uint64_t*)(d3)) = ((uint64_t*)(s))[3]; - -#define mm256_scatter_32( d0, d1, d2, d3, d4, d5, d6, d7, s ) \ - *((uint32_t*)(d0)) = ((uint32_t*)(s))[0]; \ - *((uint32_t*)(d1)) = ((uint32_t*)(s))[1]; \ - *((uint32_t*)(d2)) = ((uint32_t*)(s))[2]; \ - *((uint32_t*)(d3)) = ((uint32_t*)(s))[3]; \ - *((uint32_t*)(d4)) = ((uint32_t*)(s))[4]; \ - *((uint32_t*)(d5)) = ((uint32_t*)(s))[5]; \ - *((uint32_t*)(d6)) = ((uint32_t*)(s))[6]; \ - *((uint32_t*)(d7)) = ((uint32_t*)(s))[7]; - - -// -// Memory functions -// n = number of 256 bit (32 byte) vectors - -static inline void memset_zero_256( __m256i *dst, int n ) -{ for ( int i = 0; i < n; i++ ) dst[i] = m256_zero; } - -static inline void memset_256( __m256i *dst, const __m256i a, int n ) -{ for ( int i = 0; i < n; i++ ) dst[i] = a; } - -static inline void memcpy_256( __m256i *dst, const __m256i *src, int n ) -{ for ( int i = 0; i < n; i ++ ) dst[i] = src[i]; } - - -#endif // __AVX__ -#endif // SIMD_AVX_H__ - diff --git a/simd-utils/simd-int.h b/simd-utils/simd-int.h index 7ef7833..1268214 100644 --- a/simd-utils/simd-int.h +++ b/simd-utils/simd-int.h @@ -62,10 +62,16 @@ static inline void memset_64( uint64_t *dst, const uint64_t a, int n ) // // 128 bit integers // +// 128 bit integers are inneficient and not a shortcut for __m128i. // No real need or use. //#define u128_neg1 ((uint128_t)(-1)) +// usefull for making constants. +#define mk_uint128( hi, lo ) \ + ( ( (uint128_t)(hi) << 64 ) | ( (uint128_t)(lo) ) ) + + // Extracting the low bits is a trivial cast. // These specialized functions are optimized while providing a // consistent interface.