mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
v3.10.4
This commit is contained in:
@@ -2075,9 +2075,6 @@ static inline void dintrlv_2x256( void *dst0, void *dst1,
|
||||
d0[3] = s[6]; d1[3] = s[7];
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#endif // AVX
|
||||
|
||||
///////////////////////////
|
||||
@@ -2225,7 +2222,6 @@ static inline void rintrlv_4x32_4x64( void *dst,
|
||||
|
||||
// 2x128 -> 4x64
|
||||
|
||||
|
||||
static inline void rintrlv_2x128_4x64( void *dst, const void *src0,
|
||||
const void *src1, const int bit_len )
|
||||
{
|
||||
@@ -2268,7 +2264,6 @@ static inline void rintrlv_2x128_4x64( void *dst, const void *src0,
|
||||
d[31] = _mm_unpackhi_epi64( s1[14], s1[15] );
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
#define RLEAVE_2x128_4x64( i ) do \
|
||||
{ \
|
||||
@@ -2339,7 +2334,6 @@ static inline void rintrlv_4x64_2x128( void *dst0, void *dst1,
|
||||
d1[15] = _mm_unpackhi_epi64( s[29], s[31] );
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
#define RLEAVE_4x64_2x128( i ) do \
|
||||
{ \
|
||||
@@ -2364,6 +2358,354 @@ static inline void rintrlv_4x64_2x128( void *dst0, void *dst1,
|
||||
}
|
||||
*/
|
||||
|
||||
// 2x128 -> 8x64
|
||||
|
||||
static inline void rintrlv_4x128_8x64( void *dst, const void *src0,
|
||||
const void *src1, const int bit_len )
|
||||
{
|
||||
__m128i *d = (__m128i*)dst;
|
||||
const __m128i *s0 = (const __m128i*)src0;
|
||||
const __m128i *s1 = (const __m128i*)src1;
|
||||
|
||||
d[ 0] = _mm_unpacklo_epi64( s0[ 0], s0[ 1] );
|
||||
d[ 1] = _mm_unpacklo_epi64( s0[ 2], s0[ 3] );
|
||||
d[ 2] = _mm_unpacklo_epi64( s1[ 0], s1[ 1] );
|
||||
d[ 3] = _mm_unpacklo_epi64( s1[ 2], s1[ 3] );
|
||||
d[ 4] = _mm_unpackhi_epi64( s0[ 0], s0[ 1] );
|
||||
d[ 5] = _mm_unpackhi_epi64( s0[ 2], s0[ 3] );
|
||||
d[ 6] = _mm_unpackhi_epi64( s1[ 0], s1[ 1] );
|
||||
d[ 7] = _mm_unpackhi_epi64( s1[ 2], s1[ 3] );
|
||||
|
||||
d[ 8] = _mm_unpacklo_epi64( s0[ 4], s0[ 5] );
|
||||
d[ 9] = _mm_unpacklo_epi64( s0[ 6], s0[ 7] );
|
||||
d[10] = _mm_unpacklo_epi64( s1[ 4], s1[ 5] );
|
||||
d[11] = _mm_unpacklo_epi64( s1[ 6], s1[ 7] );
|
||||
d[12] = _mm_unpackhi_epi64( s0[ 4], s0[ 5] );
|
||||
d[13] = _mm_unpackhi_epi64( s0[ 6], s0[ 7] );
|
||||
d[14] = _mm_unpackhi_epi64( s1[ 4], s1[ 5] );
|
||||
d[15] = _mm_unpackhi_epi64( s1[ 6], s1[ 7] );
|
||||
|
||||
if ( bit_len <= 256 ) return;
|
||||
|
||||
d[16] = _mm_unpacklo_epi64( s0[ 8], s0[ 9] );
|
||||
d[17] = _mm_unpacklo_epi64( s0[10], s0[11] );
|
||||
d[18] = _mm_unpacklo_epi64( s1[ 8], s1[ 9] );
|
||||
d[19] = _mm_unpacklo_epi64( s1[10], s1[11] );
|
||||
d[20] = _mm_unpackhi_epi64( s0[ 8], s0[ 9] );
|
||||
d[21] = _mm_unpackhi_epi64( s0[10], s0[11] );
|
||||
d[22] = _mm_unpackhi_epi64( s1[ 8], s1[ 9] );
|
||||
d[23] = _mm_unpackhi_epi64( s1[10], s1[11] );
|
||||
|
||||
d[24] = _mm_unpacklo_epi64( s0[12], s0[13] );
|
||||
d[25] = _mm_unpacklo_epi64( s0[14], s0[15] );
|
||||
d[26] = _mm_unpacklo_epi64( s1[12], s1[13] );
|
||||
d[27] = _mm_unpacklo_epi64( s1[14], s1[15] );
|
||||
d[28] = _mm_unpackhi_epi64( s0[12], s0[13] );
|
||||
d[29] = _mm_unpackhi_epi64( s0[14], s0[15] );
|
||||
d[30] = _mm_unpackhi_epi64( s1[12], s1[13] );
|
||||
d[31] = _mm_unpackhi_epi64( s1[14], s1[15] );
|
||||
|
||||
if ( bit_len <= 512 ) return;
|
||||
|
||||
d[32] = _mm_unpacklo_epi64( s0[16], s0[17] );
|
||||
d[33] = _mm_unpacklo_epi64( s0[18], s0[19] );
|
||||
d[34] = _mm_unpacklo_epi64( s1[16], s1[17] );
|
||||
d[35] = _mm_unpacklo_epi64( s1[18], s1[19] );
|
||||
d[36] = _mm_unpackhi_epi64( s0[16], s0[17] );
|
||||
d[37] = _mm_unpackhi_epi64( s0[18], s0[19] );
|
||||
d[38] = _mm_unpackhi_epi64( s1[16], s1[17] );
|
||||
d[39] = _mm_unpackhi_epi64( s1[18], s1[19] );
|
||||
|
||||
d[40] = _mm_unpacklo_epi64( s0[20], s0[21] );
|
||||
d[41] = _mm_unpacklo_epi64( s0[22], s0[23] );
|
||||
d[42] = _mm_unpacklo_epi64( s1[20], s1[21] );
|
||||
d[43] = _mm_unpacklo_epi64( s1[22], s1[23] );
|
||||
d[44] = _mm_unpackhi_epi64( s0[20], s0[21] );
|
||||
d[45] = _mm_unpackhi_epi64( s0[22], s0[23] );
|
||||
d[46] = _mm_unpackhi_epi64( s1[20], s1[21] );
|
||||
d[47] = _mm_unpackhi_epi64( s1[22], s1[23] );
|
||||
|
||||
d[48] = _mm_unpacklo_epi64( s0[24], s0[25] );
|
||||
d[49] = _mm_unpacklo_epi64( s0[26], s0[27] );
|
||||
d[50] = _mm_unpacklo_epi64( s1[24], s1[25] );
|
||||
d[51] = _mm_unpacklo_epi64( s1[26], s1[27] );
|
||||
d[52] = _mm_unpackhi_epi64( s0[24], s0[25] );
|
||||
d[53] = _mm_unpackhi_epi64( s0[26], s0[27] );
|
||||
d[54] = _mm_unpackhi_epi64( s1[24], s1[25] );
|
||||
d[55] = _mm_unpackhi_epi64( s1[26], s1[27] );
|
||||
|
||||
d[56] = _mm_unpacklo_epi64( s0[28], s0[29] );
|
||||
d[57] = _mm_unpacklo_epi64( s0[30], s0[31] );
|
||||
d[58] = _mm_unpacklo_epi64( s1[28], s1[29] );
|
||||
d[59] = _mm_unpacklo_epi64( s1[30], s1[31] );
|
||||
d[60] = _mm_unpackhi_epi64( s0[28], s0[29] );
|
||||
d[61] = _mm_unpackhi_epi64( s0[30], s0[31] );
|
||||
d[62] = _mm_unpackhi_epi64( s1[28], s1[29] );
|
||||
d[63] = _mm_unpackhi_epi64( s1[30], s1[31] );
|
||||
}
|
||||
|
||||
// 8x64 -> 4x128
|
||||
|
||||
static inline void rintrlv_8x64_4x128( void *dst0, void *dst1,
|
||||
const void *src, const int bit_len )
|
||||
{
|
||||
__m128i *d0 = (__m128i*)dst0;
|
||||
__m128i *d1 = (__m128i*)dst1;
|
||||
const __m128i* s = (const __m128i*)src;
|
||||
|
||||
d0[ 0] = _mm_unpacklo_epi64( s[ 0], s[ 4] );
|
||||
d0[ 1] = _mm_unpackhi_epi64( s[ 0], s[ 4] );
|
||||
d1[ 0] = _mm_unpacklo_epi64( s[ 2], s[ 6] );
|
||||
d1[ 1] = _mm_unpackhi_epi64( s[ 2], s[ 6] );
|
||||
d0[ 2] = _mm_unpacklo_epi64( s[ 1], s[ 5] );
|
||||
d0[ 3] = _mm_unpackhi_epi64( s[ 1], s[ 5] );
|
||||
d1[ 2] = _mm_unpacklo_epi64( s[ 3], s[ 7] );
|
||||
d1[ 3] = _mm_unpackhi_epi64( s[ 3], s[ 7] );
|
||||
|
||||
d0[ 4] = _mm_unpacklo_epi64( s[ 8], s[12] );
|
||||
d0[ 5] = _mm_unpackhi_epi64( s[ 8], s[12] );
|
||||
d1[ 4] = _mm_unpacklo_epi64( s[10], s[14] );
|
||||
d1[ 5] = _mm_unpackhi_epi64( s[10], s[14] );
|
||||
d0[ 6] = _mm_unpacklo_epi64( s[ 9], s[13] );
|
||||
d0[ 7] = _mm_unpackhi_epi64( s[ 9], s[13] );
|
||||
d1[ 6] = _mm_unpacklo_epi64( s[11], s[15] );
|
||||
d1[ 7] = _mm_unpackhi_epi64( s[11], s[15] );
|
||||
|
||||
if ( bit_len <= 256 ) return;
|
||||
|
||||
d0[ 8] = _mm_unpacklo_epi64( s[16], s[20] );
|
||||
d0[ 9] = _mm_unpackhi_epi64( s[16], s[20] );
|
||||
d1[ 8] = _mm_unpacklo_epi64( s[18], s[22] );
|
||||
d1[ 9] = _mm_unpackhi_epi64( s[18], s[22] );
|
||||
d0[10] = _mm_unpacklo_epi64( s[17], s[21] );
|
||||
d0[11] = _mm_unpackhi_epi64( s[17], s[21] );
|
||||
d1[10] = _mm_unpacklo_epi64( s[19], s[23] );
|
||||
d1[11] = _mm_unpackhi_epi64( s[19], s[23] );
|
||||
|
||||
d0[12] = _mm_unpacklo_epi64( s[24], s[28] );
|
||||
d0[13] = _mm_unpackhi_epi64( s[24], s[28] );
|
||||
d1[12] = _mm_unpacklo_epi64( s[26], s[30] );
|
||||
d1[13] = _mm_unpackhi_epi64( s[26], s[30] );
|
||||
d0[14] = _mm_unpacklo_epi64( s[25], s[29] );
|
||||
d0[15] = _mm_unpackhi_epi64( s[25], s[29] );
|
||||
d1[14] = _mm_unpacklo_epi64( s[27], s[31] );
|
||||
d1[15] = _mm_unpackhi_epi64( s[27], s[31] );
|
||||
|
||||
if ( bit_len <= 512 ) return;
|
||||
|
||||
d0[16] = _mm_unpacklo_epi64( s[32], s[36] );
|
||||
d0[17] = _mm_unpackhi_epi64( s[32], s[36] );
|
||||
d1[16] = _mm_unpacklo_epi64( s[34], s[38] );
|
||||
d1[17] = _mm_unpackhi_epi64( s[34], s[38] );
|
||||
d0[18] = _mm_unpacklo_epi64( s[33], s[37] );
|
||||
d0[19] = _mm_unpackhi_epi64( s[33], s[37] );
|
||||
d1[18] = _mm_unpacklo_epi64( s[35], s[39] );
|
||||
d1[19] = _mm_unpackhi_epi64( s[35], s[39] );
|
||||
|
||||
d0[20] = _mm_unpacklo_epi64( s[40], s[44] );
|
||||
d0[21] = _mm_unpackhi_epi64( s[40], s[44] );
|
||||
d1[20] = _mm_unpacklo_epi64( s[42], s[46] );
|
||||
d1[21] = _mm_unpackhi_epi64( s[42], s[46] );
|
||||
d0[22] = _mm_unpacklo_epi64( s[41], s[45] );
|
||||
d0[23] = _mm_unpackhi_epi64( s[41], s[45] );
|
||||
d1[22] = _mm_unpacklo_epi64( s[43], s[47] );
|
||||
d1[23] = _mm_unpackhi_epi64( s[43], s[47] );
|
||||
|
||||
d0[24] = _mm_unpacklo_epi64( s[48], s[52] );
|
||||
d0[25] = _mm_unpackhi_epi64( s[48], s[52] );
|
||||
d1[24] = _mm_unpacklo_epi64( s[50], s[54] );
|
||||
d1[25] = _mm_unpackhi_epi64( s[50], s[54] );
|
||||
d0[26] = _mm_unpacklo_epi64( s[49], s[53] );
|
||||
d0[27] = _mm_unpackhi_epi64( s[49], s[53] );
|
||||
d1[26] = _mm_unpacklo_epi64( s[51], s[55] );
|
||||
d1[27] = _mm_unpackhi_epi64( s[51], s[55] );
|
||||
|
||||
d0[28] = _mm_unpacklo_epi64( s[56], s[60] );
|
||||
d0[29] = _mm_unpackhi_epi64( s[56], s[60] );
|
||||
d1[28] = _mm_unpacklo_epi64( s[58], s[62] );
|
||||
d1[29] = _mm_unpackhi_epi64( s[58], s[62] );
|
||||
d0[30] = _mm_unpacklo_epi64( s[57], s[61] );
|
||||
d0[31] = _mm_unpackhi_epi64( s[57], s[61] );
|
||||
d1[30] = _mm_unpacklo_epi64( s[59], s[63] );
|
||||
d1[31] = _mm_unpackhi_epi64( s[59], s[63] );
|
||||
}
|
||||
|
||||
// 8x64 -> 2x256
|
||||
|
||||
static inline void rintrlv_8x64_2x256( void *dst0, void *dst1, void *dst2,
|
||||
void *dst3, const void *src, const int bit_len )
|
||||
{
|
||||
__m128i *d0 = (__m128i*)dst0;
|
||||
__m128i *d1 = (__m128i*)dst1;
|
||||
__m128i *d2 = (__m128i*)dst2;
|
||||
__m128i *d3 = (__m128i*)dst3;
|
||||
const __m128i* s = (const __m128i*)src;
|
||||
|
||||
d0[ 0] = _mm_unpacklo_epi64( s[ 0], s[ 4] );
|
||||
d1[ 0] = _mm_unpackhi_epi64( s[ 0], s[ 4] );
|
||||
d2[ 0] = _mm_unpacklo_epi64( s[ 1], s[ 5] );
|
||||
d3[ 0] = _mm_unpackhi_epi64( s[ 1], s[ 5] );
|
||||
d0[ 1] = _mm_unpacklo_epi64( s[ 2], s[ 6] );
|
||||
d1[ 1] = _mm_unpackhi_epi64( s[ 2], s[ 6] );
|
||||
d2[ 1] = _mm_unpacklo_epi64( s[ 3], s[ 7] );
|
||||
d3[ 1] = _mm_unpackhi_epi64( s[ 3], s[ 7] );
|
||||
|
||||
d0[ 2] = _mm_unpacklo_epi64( s[ 8], s[12] );
|
||||
d1[ 2] = _mm_unpackhi_epi64( s[ 8], s[12] );
|
||||
d2[ 2] = _mm_unpacklo_epi64( s[ 9], s[13] );
|
||||
d3[ 2] = _mm_unpackhi_epi64( s[ 9], s[13] );
|
||||
d0[ 3] = _mm_unpacklo_epi64( s[10], s[14] );
|
||||
d1[ 3] = _mm_unpackhi_epi64( s[10], s[14] );
|
||||
d2[ 3] = _mm_unpacklo_epi64( s[11], s[15] );
|
||||
d3[ 3] = _mm_unpackhi_epi64( s[11], s[15] );
|
||||
|
||||
if ( bit_len <= 256 ) return;
|
||||
|
||||
d0[ 4] = _mm_unpacklo_epi64( s[16], s[20] );
|
||||
d1[ 4] = _mm_unpackhi_epi64( s[16], s[20] );
|
||||
d2[ 4] = _mm_unpacklo_epi64( s[17], s[21] );
|
||||
d3[ 4] = _mm_unpackhi_epi64( s[17], s[21] );
|
||||
d0[ 5] = _mm_unpacklo_epi64( s[18], s[22] );
|
||||
d1[ 5] = _mm_unpackhi_epi64( s[18], s[22] );
|
||||
d2[ 5] = _mm_unpacklo_epi64( s[19], s[23] );
|
||||
d3[ 5] = _mm_unpackhi_epi64( s[19], s[23] );
|
||||
|
||||
d0[ 6] = _mm_unpacklo_epi64( s[24], s[28] );
|
||||
d1[ 6] = _mm_unpackhi_epi64( s[24], s[28] );
|
||||
d2[ 6] = _mm_unpacklo_epi64( s[25], s[29] );
|
||||
d3[ 6] = _mm_unpackhi_epi64( s[25], s[29] );
|
||||
d0[ 7] = _mm_unpacklo_epi64( s[26], s[30] );
|
||||
d1[ 7] = _mm_unpackhi_epi64( s[26], s[30] );
|
||||
d2[ 7] = _mm_unpacklo_epi64( s[27], s[31] );
|
||||
d3[ 7] = _mm_unpackhi_epi64( s[27], s[31] );
|
||||
|
||||
if ( bit_len <= 512 ) return;
|
||||
|
||||
d0[ 8] = _mm_unpacklo_epi64( s[32], s[36] );
|
||||
d1[ 8] = _mm_unpackhi_epi64( s[32], s[36] );
|
||||
d2[ 8] = _mm_unpacklo_epi64( s[33], s[37] );
|
||||
d3[ 8] = _mm_unpackhi_epi64( s[33], s[37] );
|
||||
d0[ 9] = _mm_unpacklo_epi64( s[34], s[38] );
|
||||
d1[ 9] = _mm_unpackhi_epi64( s[34], s[38] );
|
||||
d2[ 9] = _mm_unpacklo_epi64( s[35], s[39] );
|
||||
d3[ 9] = _mm_unpackhi_epi64( s[35], s[39] );
|
||||
|
||||
d0[10] = _mm_unpacklo_epi64( s[40], s[44] );
|
||||
d1[10] = _mm_unpackhi_epi64( s[40], s[44] );
|
||||
d2[10] = _mm_unpacklo_epi64( s[41], s[45] );
|
||||
d3[10] = _mm_unpackhi_epi64( s[41], s[45] );
|
||||
d0[11] = _mm_unpacklo_epi64( s[42], s[46] );
|
||||
d1[11] = _mm_unpackhi_epi64( s[42], s[46] );
|
||||
d2[11] = _mm_unpacklo_epi64( s[43], s[47] );
|
||||
d3[11] = _mm_unpackhi_epi64( s[43], s[47] );
|
||||
|
||||
d0[12] = _mm_unpacklo_epi64( s[48], s[52] );
|
||||
d1[12] = _mm_unpackhi_epi64( s[48], s[52] );
|
||||
d2[12] = _mm_unpacklo_epi64( s[49], s[53] );
|
||||
d3[12] = _mm_unpackhi_epi64( s[49], s[53] );
|
||||
d0[13] = _mm_unpacklo_epi64( s[50], s[54] );
|
||||
d1[13] = _mm_unpackhi_epi64( s[50], s[54] );
|
||||
d2[13] = _mm_unpacklo_epi64( s[51], s[55] );
|
||||
d3[13] = _mm_unpackhi_epi64( s[51], s[55] );
|
||||
|
||||
d0[14] = _mm_unpacklo_epi64( s[56], s[60] );
|
||||
d1[14] = _mm_unpackhi_epi64( s[56], s[60] );
|
||||
d2[14] = _mm_unpacklo_epi64( s[57], s[61] );
|
||||
d3[14] = _mm_unpackhi_epi64( s[57], s[61] );
|
||||
d0[15] = _mm_unpacklo_epi64( s[58], s[62] );
|
||||
d1[15] = _mm_unpackhi_epi64( s[58], s[62] );
|
||||
d2[15] = _mm_unpacklo_epi64( s[59], s[63] );
|
||||
d3[15] = _mm_unpackhi_epi64( s[59], s[63] );
|
||||
}
|
||||
|
||||
// 4x128 -> 8x64
|
||||
|
||||
static inline void rintrlv_2x256_8x64( void *dst, const void *src0,
|
||||
const void *src1, const void *src2, const void *src3, const int bit_len )
|
||||
{
|
||||
__m128i *d = (__m128i*)dst;
|
||||
__m128i *s0 = (__m128i*)src0;
|
||||
__m128i *s1 = (__m128i*)src1;
|
||||
__m128i *s2 = (__m128i*)src2;
|
||||
__m128i *s3 = (__m128i*)src3;
|
||||
|
||||
d[ 0] = _mm_unpacklo_epi64( s0[0], s0[2] );
|
||||
d[ 1] = _mm_unpacklo_epi64( s1[0], s1[2] );
|
||||
d[ 2] = _mm_unpacklo_epi64( s2[0], s2[2] );
|
||||
d[ 3] = _mm_unpacklo_epi64( s3[0], s3[2] );
|
||||
d[ 4] = _mm_unpackhi_epi64( s0[0], s0[2] );
|
||||
d[ 5] = _mm_unpackhi_epi64( s1[0], s1[2] );
|
||||
d[ 6] = _mm_unpackhi_epi64( s2[0], s2[2] );
|
||||
d[ 7] = _mm_unpackhi_epi64( s3[0], s3[2] );
|
||||
|
||||
d[ 8] = _mm_unpacklo_epi64( s0[1], s0[3] );
|
||||
d[ 9] = _mm_unpacklo_epi64( s1[1], s1[3] );
|
||||
d[10] = _mm_unpacklo_epi64( s2[1], s2[3] );
|
||||
d[11] = _mm_unpacklo_epi64( s3[1], s3[3] );
|
||||
d[12] = _mm_unpackhi_epi64( s0[1], s0[3] );
|
||||
d[13] = _mm_unpackhi_epi64( s1[1], s1[3] );
|
||||
d[14] = _mm_unpackhi_epi64( s2[1], s2[3] );
|
||||
d[15] = _mm_unpackhi_epi64( s3[1], s3[3] );
|
||||
|
||||
if ( bit_len <= 256 ) return;
|
||||
|
||||
d[16] = _mm_unpacklo_epi64( s0[4], s0[6] );
|
||||
d[17] = _mm_unpacklo_epi64( s1[4], s1[6] );
|
||||
d[18] = _mm_unpacklo_epi64( s2[4], s2[6] );
|
||||
d[19] = _mm_unpacklo_epi64( s3[4], s3[6] );
|
||||
d[20] = _mm_unpackhi_epi64( s0[4], s0[6] );
|
||||
d[21] = _mm_unpackhi_epi64( s1[4], s1[6] );
|
||||
d[22] = _mm_unpackhi_epi64( s2[4], s2[6] );
|
||||
d[23] = _mm_unpackhi_epi64( s3[4], s3[6] );
|
||||
|
||||
d[24] = _mm_unpacklo_epi64( s0[5], s0[7] );
|
||||
d[25] = _mm_unpacklo_epi64( s1[5], s1[7] );
|
||||
d[26] = _mm_unpacklo_epi64( s2[5], s2[7] );
|
||||
d[27] = _mm_unpacklo_epi64( s3[5], s3[7] );
|
||||
d[28] = _mm_unpackhi_epi64( s0[5], s0[7] );
|
||||
d[29] = _mm_unpackhi_epi64( s1[5], s1[7] );
|
||||
d[30] = _mm_unpackhi_epi64( s2[5], s2[7] );
|
||||
d[31] = _mm_unpackhi_epi64( s3[5], s3[7] );
|
||||
|
||||
if ( bit_len <= 512 ) return;
|
||||
|
||||
d[32] = _mm_unpacklo_epi64( s0[8], s0[10] );
|
||||
d[33] = _mm_unpacklo_epi64( s1[8], s1[10] );
|
||||
d[34] = _mm_unpacklo_epi64( s2[8], s2[10] );
|
||||
d[35] = _mm_unpacklo_epi64( s3[8], s3[10] );
|
||||
d[36] = _mm_unpackhi_epi64( s0[8], s0[10] );
|
||||
d[37] = _mm_unpackhi_epi64( s1[8], s1[10] );
|
||||
d[38] = _mm_unpackhi_epi64( s2[8], s2[10] );
|
||||
d[39] = _mm_unpackhi_epi64( s3[8], s3[10] );
|
||||
|
||||
d[40] = _mm_unpacklo_epi64( s0[9], s0[11] );
|
||||
d[41] = _mm_unpacklo_epi64( s1[9], s1[11] );
|
||||
d[42] = _mm_unpacklo_epi64( s2[9], s2[11] );
|
||||
d[43] = _mm_unpacklo_epi64( s3[9], s3[11] );
|
||||
d[44] = _mm_unpackhi_epi64( s0[9], s0[11] );
|
||||
d[45] = _mm_unpackhi_epi64( s1[9], s1[11] );
|
||||
d[46] = _mm_unpackhi_epi64( s2[9], s2[11] );
|
||||
d[47] = _mm_unpackhi_epi64( s3[9], s3[11] );
|
||||
|
||||
d[48] = _mm_unpacklo_epi64( s0[12], s0[14] );
|
||||
d[49] = _mm_unpacklo_epi64( s1[12], s1[14] );
|
||||
d[50] = _mm_unpacklo_epi64( s2[12], s2[14] );
|
||||
d[51] = _mm_unpacklo_epi64( s3[12], s3[14] );
|
||||
d[52] = _mm_unpackhi_epi64( s0[12], s0[14] );
|
||||
d[53] = _mm_unpackhi_epi64( s1[12], s1[14] );
|
||||
d[54] = _mm_unpackhi_epi64( s2[12], s2[14] );
|
||||
d[55] = _mm_unpackhi_epi64( s3[12], s3[14] );
|
||||
|
||||
d[56] = _mm_unpacklo_epi64( s0[13], s0[15] );
|
||||
d[57] = _mm_unpacklo_epi64( s1[13], s1[15] );
|
||||
d[58] = _mm_unpacklo_epi64( s2[13], s2[15] );
|
||||
d[59] = _mm_unpacklo_epi64( s3[13], s3[15] );
|
||||
d[60] = _mm_unpackhi_epi64( s0[13], s0[15] );
|
||||
d[61] = _mm_unpackhi_epi64( s1[13], s1[15] );
|
||||
d[62] = _mm_unpackhi_epi64( s2[13], s2[15] );
|
||||
d[63] = _mm_unpackhi_epi64( s3[13], s3[15] );
|
||||
}
|
||||
|
||||
//
|
||||
// Some functions customized for mining.
|
||||
|
||||
|
@@ -252,7 +252,6 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
|
||||
#else
|
||||
|
||||
|
||||
#define mm128_ror_64 mm128_ror_var_64
|
||||
#define mm128_rol_64 mm128_rol_var_64
|
||||
#define mm128_ror_32 mm128_ror_var_32
|
||||
@@ -274,6 +273,15 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
#define mm128_ror_1x32( v ) _mm_shuffle_epi32( v, 0x39 )
|
||||
#define mm128_rol_1x32( v ) _mm_shuffle_epi32( v, 0x93 )
|
||||
|
||||
// Rotate 16 byte (128 bit) vector by c bytes.
|
||||
// Less efficient using shift but more versatile. Use only for odd number
|
||||
// byte rotations. Use shuffle above whenever possible.
|
||||
#define mm128_ror_x8( v, c ) \
|
||||
_mm_or_si128( _mm_srli_si128( v, c ), _mm_slli_si128( v, 16-(c) ) )
|
||||
|
||||
#define mm128_rol_x8( v, c ) \
|
||||
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
|
||||
|
||||
#if defined (__SSE3__)
|
||||
// no SSE2 implementation, no current users
|
||||
|
||||
@@ -289,17 +297,21 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
#define mm128_rol_1x8( v ) \
|
||||
_mm_shuffle_epi8( v, m128_const_64( 0x0e0d0c0b0a090807, \
|
||||
0x060504030201000f ) )
|
||||
#endif // SSE3
|
||||
#else // SSE2
|
||||
|
||||
// Rotate 16 byte (128 bit) vector by c bytes.
|
||||
// Less efficient using shift but more versatile. Use only for odd number
|
||||
// byte rotations. Use shuffle above whenever possible.
|
||||
#define mm128_bror( v, c ) \
|
||||
_mm_or_si128( _mm_srli_si128( v, c ), _mm_slli_si128( v, 16-(c) ) )
|
||||
#define mm128_ror_1x16( v ) \
|
||||
_mm_or_si128( _mm_srli_si128( v, 2 ), _mm_slli_si128( v, 14 ) )
|
||||
|
||||
#define mm128_brol( v, c ) \
|
||||
_mm_or_si128( _mm_slli_si128( v, c ), _mm_srli_si128( v, 16-(c) ) )
|
||||
#define mm128_rol_1x16( v ) \
|
||||
_mm_or_si128( _mm_slli_si128( v, 2 ), _mm_srli_si128( v, 14 ) )
|
||||
|
||||
#define mm128_ror_1x8( v ) \
|
||||
_mm_or_si128( _mm_srli_si128( v, 1 ), _mm_slli_si128( v, 15 ) )
|
||||
|
||||
#define mm128_rol_1x8( v ) \
|
||||
_mm_or_si128( _mm_slli_si128( v, 1 ), _mm_srli_si128( v, 15 ) )
|
||||
|
||||
#endif // SSE3 else SSE2
|
||||
|
||||
// Invert vector: {3,2,1,0} -> {0,1,2,3}
|
||||
#define mm128_invert_32( v ) _mm_shuffle_epi32( v, 0x1b )
|
||||
@@ -319,19 +331,24 @@ static inline void memcpy_128( __m128i *dst, const __m128i *src, const int n )
|
||||
//
|
||||
// Rotate elements within lanes.
|
||||
|
||||
#define mm128_swap32_64( v ) _mm_shuffle_epi32( v, 0xb1 )
|
||||
#define mm128_swap_64_32( v ) _mm_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
#define mm128_ror16_64( v ) \
|
||||
_mm_shuffle_epi8( v, m128_const_64( 0x09080f0e0d0c0b0a, \
|
||||
0x0100070605040302 )
|
||||
#define mm128_rol64_8( v, c ) \
|
||||
_mm_or_si128( _mm_slli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm128_rol16_64( v ) \
|
||||
_mm_shuffle_epi8( v, m128_const_64( 0x0d0c0b0a09080f0e, \
|
||||
0x0504030201000706 )
|
||||
#define mm128_ror64_8( v, c ) \
|
||||
_mm_or_si128( _mm_srli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm128_swap16_32( v ) \
|
||||
_mm_shuffle_epi8( v, m128_const_64( 0x0d0c0f0e09080b0a, \
|
||||
0x0504070601000302 )
|
||||
#define mm128_rol32_8( v, c ) \
|
||||
_mm_or_si128( _mm_slli_epi32( v, ( ( (c)<<3 ) ), \
|
||||
_mm_srli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm128_ror32_8( v, c ) \
|
||||
_mm_or_si128( _mm_srli_epi32( v, ( ( (c)<<3 ) ), \
|
||||
_mm_slli_epi32( v, ( ( 32 - ( (c)<<3 ) ) ) )
|
||||
|
||||
|
||||
//
|
||||
// Endian byte swap.
|
||||
@@ -431,64 +448,65 @@ static inline void mm128_block_bswap_32( __m128i *d, const __m128i *s )
|
||||
|
||||
// Swap 128 bit vectorse.
|
||||
|
||||
#define mm128_swap128_256( v1, v2 ) \
|
||||
#define mm128_swap256_128( v1, v2 ) \
|
||||
v1 = _mm_xor_si128( v1, v2 ); \
|
||||
v2 = _mm_xor_si128( v1, v2 ); \
|
||||
v1 = _mm_xor_si128( v1, v2 );
|
||||
|
||||
|
||||
// Concatenate v1 & v2 and rotate as one 256 bit vector.
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
#define mm128_ror1x64_256( v1, v2 ) \
|
||||
#define mm128_ror256_64( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 8 ); \
|
||||
v1 = _mm_alignr_epi8( v2, v1, 8 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x64_256( v1, v2 ) \
|
||||
#define mm128_rol256_64( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 8 ); \
|
||||
v2 = _mm_alignr_epi8( v2, v1, 8 ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_ror1x32_256( v1, v2 ) \
|
||||
#define mm128_ror256_32( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 4 ); \
|
||||
v1 = _mm_alignr_epi8( v2, v1, 4 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x32_256( v1, v2 ) \
|
||||
#define mm128_rol256_32( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 12 ); \
|
||||
v2 = _mm_alignr_epi8( v2, v1, 12 ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_ror1x16_256( v1, v2 ) \
|
||||
#define mm128_ror256_16( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 2 ); \
|
||||
v1 = _mm_alignr_epi8( v2, v1, 2 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x16_256( v1, v2 ) \
|
||||
#define mm128_rol256_16( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 14 ); \
|
||||
v2 = _mm_alignr_epi8( v2, v1, 14 ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_ror1x8_256( v1, v2 ) \
|
||||
#define mm128_ror256_8( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 1 ); \
|
||||
v1 = _mm_alignr_epi8( v2, v1, 1 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x8_256( v1, v2 ) \
|
||||
#define mm128_rol256_8( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_alignr_epi8( v1, v2, 15 ); \
|
||||
v2 = _mm_alignr_epi8( v2, v1, 15 ); \
|
||||
@@ -497,7 +515,7 @@ do { \
|
||||
|
||||
#else // SSE2
|
||||
|
||||
#define mm128_ror1x64_256( v1, v2 ) \
|
||||
#define mm128_ror256_64( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 8 ), \
|
||||
_mm_slli_si128( v2, 8 ) ); \
|
||||
@@ -506,7 +524,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x64_256( v1, v2 ) \
|
||||
#define mm128_rol256_64( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 8 ), \
|
||||
_mm_srli_si128( v2, 8 ) ); \
|
||||
@@ -515,7 +533,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_ror1x32_256( v1, v2 ) \
|
||||
#define mm128_ror256_32( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 4 ), \
|
||||
_mm_slli_si128( v2, 12 ) ); \
|
||||
@@ -524,7 +542,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x32_256( v1, v2 ) \
|
||||
#define mm128_rol256_32( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 4 ), \
|
||||
_mm_srli_si128( v2, 12 ) ); \
|
||||
@@ -533,7 +551,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_ror1x16_256( v1, v2 ) \
|
||||
#define mm128_ror256_16( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 2 ), \
|
||||
_mm_slli_si128( v2, 14 ) ); \
|
||||
@@ -542,7 +560,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x16_256( v1, v2 ) \
|
||||
#define mm128_rol256_16( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 2 ), \
|
||||
_mm_srli_si128( v2, 14 ) ); \
|
||||
@@ -551,7 +569,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_ror1x8_256( v1, v2 ) \
|
||||
#define mm128_ror256_8( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_srli_si128( v1, 1 ), \
|
||||
_mm_slli_si128( v2, 15 ) ); \
|
||||
@@ -560,7 +578,7 @@ do { \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm128_rol1x8_256( v1, v2 ) \
|
||||
#define mm128_rol256_8( v1, v2 ) \
|
||||
do { \
|
||||
__m128i t = _mm_or_si128( _mm_slli_si128( v1, 1 ), \
|
||||
_mm_srli_si128( v2, 15 ) ); \
|
||||
|
@@ -414,99 +414,71 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
|
||||
|
||||
//
|
||||
// Rotate elements within lanes of 256 bit vector.
|
||||
// Rotate elements within each 128 bit lane of 256 bit vector.
|
||||
|
||||
// Swap 64 bit elements in each 128 bit lane.
|
||||
#define mm256_swap64_128( v ) _mm256_shuffle_epi32( v, 0x4e )
|
||||
#define mm256_swap128_64( v ) _mm256_shuffle_epi32( v, 0x4e )
|
||||
|
||||
// Rotate each 128 bit lane by one 32 bit element.
|
||||
#define mm256_ror1x32_128( v ) _mm256_shuffle_epi32( v, 0x39 )
|
||||
#define mm256_rol1x32_128( v ) _mm256_shuffle_epi32( v, 0x93 )
|
||||
#define mm256_ror128_32( v ) _mm256_shuffle_epi32( v, 0x39 )
|
||||
|
||||
#define mm256_ror1x16_128( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x11101f1e1d1c1b1a, 0x1918171615141312, \
|
||||
0x01000f0e0d0c0b0a, 0x0908070605040302 ) )
|
||||
#define mm256_rol128_1x32( v ) _mm256_shuffle_epi32( v, 0x93 )
|
||||
|
||||
#define mm256_rol1x16_128( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x1d1c1b1a19181716, 0x1514131211101f1e, \
|
||||
0x0d0c0b0a09080706, 0x0504030201000f0e ) )
|
||||
|
||||
#define mm256_ror1x8_128( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x101f1e1d1c1b1a19, 0x1817161514131211, \
|
||||
0x000f0e0d0c0b0a09, 0x0807060504030201 ) )
|
||||
|
||||
#define mm256_rol1x8_128( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x1d1c1b1a19181f1e, 0x1514131211101716, \
|
||||
0x0d0c0b0a09080f0e, 0x0504030201000706 ) )
|
||||
|
||||
// Rotate each 128 bit lane by c bytes.
|
||||
#define mm256_bror_128( v, c ) \
|
||||
// Rotave each 128 bit lane by c elements.
|
||||
#define mm256_ror128_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_bsrli_epi128( v, c ), \
|
||||
_mm256_bslli_epi128( v, 16-(c) ) )
|
||||
#define mm256_brol_128( v, c ) \
|
||||
#define mm256_rol128_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_bslli_epi128( v, c ), \
|
||||
_mm256_bsrli_epi128( v, 16-(c) ) )
|
||||
|
||||
// Swap 32 bit elements in each 64 bit lane
|
||||
#define mm256_swap32_64( v ) _mm256_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
// Rotate elements in each 64 bit lane
|
||||
|
||||
#define mm256_swap64_32( v ) _mm256_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define mm256_rol1x16_64( v ) _mm256_rol_epi64( v, 16 )
|
||||
#define mm256_ror1x16_64( v ) _mm256_ror_epi64( v, 16 )
|
||||
#define mm256_rol64_8( v, c ) _mm256_rol_epi64( v, ((c)<<3) )
|
||||
#define mm256_ror64_8( v, c ) _mm256_ror_epi64( v, ((c)<<3) )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_ror1x16_64( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x19181f1e1d1c1b1a, 0x1110171615141312, \
|
||||
0x09080f0e0d0c0b0a, 0x0100070605040302 ) )
|
||||
#define mm256_rol64_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_slli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm256_srli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm256_ror64_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_srli_epi64( v, ( ( (c)<<3 ) ), \
|
||||
_mm256_slli_epi64( v, ( ( 64 - ( (c)<<3 ) ) ) )
|
||||
|
||||
#define mm256_rol1x16_64( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x1d1c1b1a19181f1e, 0x1514131211101716, \
|
||||
0x0d0c0b0a09080f0e, 0x0504030201000706 ) )
|
||||
#endif
|
||||
|
||||
#define mm256_ror1x8_64( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x181f1e1d1c1b1a19, 0x1017161514131211, \
|
||||
0x080f0e0d0c0b0a09, 0x0007060504030201 ) )
|
||||
|
||||
#define mm256_rol1x8_64( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x1e1d1c1b1a19181f, 0x1615141312111017, \
|
||||
0x0e0d0c0b0a09080f, 0x0605040302010007 ) )
|
||||
|
||||
#define mm256_ror3x8_64( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x1a19181f1e1d1c1b, 0x1211101716151413, \
|
||||
0x0a09080f0e0d0c0b, 0x0201000706050403 ) )
|
||||
|
||||
#define mm256_rol3x8_64( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x1c1b1a19181f1e1d, 0x1413121110171615, \
|
||||
0x0c0b0a09080f0e0d, 0x0403020100070605 ) )
|
||||
|
||||
|
||||
// Swap 16 bit elements in each 32 bit lane
|
||||
// Rotate elements in each 32 bit lane
|
||||
|
||||
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512BW__)
|
||||
|
||||
#define mm256_swap16_32( v ) _mm256_rol_epi32( v, 16 )
|
||||
#define mm256_swap32_16( v ) _mm256_rol_epi32( v, 16 )
|
||||
|
||||
#define mm256_rol32_8( v ) _mm256_rol_epi32( v, 8 )
|
||||
#define mm256_ror32_8( v ) _mm256_ror_epi32( v, 8 )
|
||||
|
||||
#else
|
||||
|
||||
#define mm256_swap16_32( v ) \
|
||||
_mm256_shuffle_epi8( v, \
|
||||
m256_const_64( 0x1b1a19181f1e1d1c, 0x1312111017161514, \
|
||||
0x0b0a09080f0e0d0c, 0x0302010007060504 ) )
|
||||
#define mm256_swap32_16( v ) \
|
||||
_mm256_or_si256( _mm256_slli_epi32( v, 16 ), \
|
||||
_mm256_srli_epi32( v, 16 ) )
|
||||
|
||||
#define mm256_rol32_8( v ) \
|
||||
_mm256_or_si256( _mm256_slli_epi32( v, 8 ), \
|
||||
_mm256_srli_epi32( v, 8 ) )
|
||||
|
||||
#define mm256_ror32_8( v, c ) \
|
||||
_mm256_or_si256( _mm256_srli_epi32( v, 8 ), \
|
||||
_mm256_slli_epi32( v, 8 ) )
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
//
|
||||
// Swap bytes in vector elements, endian bswap.
|
||||
#define mm256_bswap_64( v ) \
|
||||
@@ -565,19 +537,19 @@ static inline void memcpy_256( __m256i *dst, const __m256i *src, const int n )
|
||||
// _mm256_alignr_epi 64/32 are only available with AVX512 but AVX512 also
|
||||
// makes these macros unnecessary.
|
||||
|
||||
#define mm256_swap256_512 (v1, v2) \
|
||||
v1 = _mm256_xor_si256(v1, v2); \
|
||||
v2 = _mm256_xor_si256(v1, v2); \
|
||||
v1 = _mm256_xor_si256(v1, v2);
|
||||
#define mm256_swap512_256( v1, v2 ) \
|
||||
v1 = _mm256_xor_si256( v1, v2 ); \
|
||||
v2 = _mm256_xor_si256( v1, v2 ); \
|
||||
v1 = _mm256_xor_si256( v1, v2 );
|
||||
|
||||
#define mm256_ror1x128_512( v1, v2 ) \
|
||||
#define mm256_ror512_128( v1, v2 ) \
|
||||
do { \
|
||||
__m256i t = _mm256_permute2x128( v1, v2, 0x03 ); \
|
||||
v1 = _mm256__mm256_permute2x128( v2, v1, 0x21 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm256_rol1x128_512( v1, v2 ) \
|
||||
#define mm256_rol512_128( v1, v2 ) \
|
||||
do { \
|
||||
__m256i t = _mm256_permute2x128( v1, v2, 0x03 ); \
|
||||
v2 = _mm256__mm256_permute2x128( v2, v1, 0x21 ); \
|
||||
|
@@ -15,13 +15,13 @@
|
||||
|
||||
// AVX512 intrinsics have a few changes from previous conventions.
|
||||
//
|
||||
// Some instructions like cmp and blend use the mask regsiters now instead
|
||||
// a vector mask.
|
||||
// cmp instruction now returns a bitmask isnstead of a vector mask.
|
||||
// This eliminates the need for the blendv instruction.
|
||||
//
|
||||
// The new rotate instructions require the count to be only an 8 bit
|
||||
// immediate value. The documentation is the same as for shift and
|
||||
// it allows variables. Suspect a compiler issue but it still happens
|
||||
// in GCC9.
|
||||
// The new rotate instructions require the count to be an 8 bit
|
||||
// immediate value only. Compilation fails if a variable is used.
|
||||
// The documentation is the same as for shift and it works with
|
||||
// variables.
|
||||
//
|
||||
// _mm512_permutex_epi64 only shuffles within 256 bit lanes. Permute
|
||||
// usually shuffles accross all lanes.
|
||||
@@ -109,6 +109,11 @@ static inline __m512i m512_const_64( const uint64_t i7, const uint64_t i6,
|
||||
#define m512_const2_64( i1, i0 ) \
|
||||
m512_const1_128( m128_const_64( i1, i0 ) )
|
||||
|
||||
#define m512_const2_32( i1, i0 ) \
|
||||
m512_const1_64( ( ( ( (uint64_t)(i1) << 32 ) ) \
|
||||
| ( (uint64_t)(i0) & 0xffffffff ) ) )
|
||||
|
||||
|
||||
static inline __m512i m512_const4_64( const uint64_t i3, const uint64_t i2,
|
||||
const uint64_t i1, const uint64_t i0 )
|
||||
{
|
||||
@@ -265,7 +270,7 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
m512_const_64( 0x38393a3b3c3d3e3f, 0x3031323334353637, \
|
||||
0x28292a2b2c2d2e2f, 0x2021222324252627, \
|
||||
0x18191a1b1c1d1e1f, 0x1011121314151617, \
|
||||
0x08090a0b0c0d0e0f, 0x0001020304050607 ))
|
||||
0x08090a0b0c0d0e0f, 0x0001020304050607 ) )
|
||||
|
||||
#define mm512_bswap_32( v ) \
|
||||
_mm512_shuffle_epi8( v, \
|
||||
@@ -304,8 +309,8 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
{ \
|
||||
__m512i ctl = m512_const_64( 0x3c3d3e3f38393a3b, 0x3435363730313233, \
|
||||
0x2c2d2e2f28292a2b, 0x2425262720212223, \
|
||||
0x0c0d0e0f08090a0b, 0x0405060700010203, \
|
||||
0x1c1d1e1f18191a1b, 0x1415161710111213 ); \
|
||||
0x1c1d1e1f18191a1b, 0x1415161710111213, \
|
||||
0x0c0d0e0f08090a0b, 0x0405060700010203 ); \
|
||||
casti_m512i( d, 0 ) = _mm512_shuffle_epi8( casti_m512i( s, 0 ), ctl ); \
|
||||
casti_m512i( d, 1 ) = _mm512_shuffle_epi8( casti_m512i( s, 1 ), ctl ); \
|
||||
casti_m512i( d, 2 ) = _mm512_shuffle_epi8( casti_m512i( s, 2 ), ctl ); \
|
||||
@@ -320,8 +325,10 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
//
|
||||
// Rotate elements in 512 bit vector.
|
||||
|
||||
|
||||
#define mm512_swap_256( v ) _mm512_alignr_epi64( v, v, 4 )
|
||||
|
||||
// 1x64 notation used to disinguish from bit rotation.
|
||||
#define mm512_ror_1x128( v ) _mm512_alignr_epi64( v, v, 2 )
|
||||
#define mm512_rol_1x128( v ) _mm512_alignr_epi64( v, v, 6 )
|
||||
|
||||
@@ -401,51 +408,58 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
//
|
||||
// Rotate elements within 256 bit lanes of 512 bit vector.
|
||||
|
||||
// Rename these for consistency. Element size is always last.
|
||||
// mm<vectorsize>_<op><lanesize>_<elementsize>
|
||||
|
||||
|
||||
// Swap hi & lo 128 bits in each 256 bit lane
|
||||
#define mm512_swap128_256( v ) _mm512_permutex_epi64( v, 0x4e )
|
||||
|
||||
#define mm512_swap256_128( v ) _mm512_permutex_epi64( v, 0x4e )
|
||||
|
||||
// Rotate 256 bit lanes by one 64 bit element
|
||||
#define mm512_ror1x64_256( v ) _mm512_permutex_epi64( v, 0x39 )
|
||||
#define mm512_rol1x64_256( v ) _mm512_permutex_epi64( v, 0x93 )
|
||||
|
||||
#define mm512_ror256_64( v ) _mm512_permutex_epi64( v, 0x39 )
|
||||
#define mm512_rol256_64( v ) _mm512_permutex_epi64( v, 0x93 )
|
||||
|
||||
|
||||
// Rotate 256 bit lanes by one 32 bit element
|
||||
#define mm512_ror1x32_256( v ) \
|
||||
|
||||
#define mm512_ror256_32( v ) \
|
||||
_mm512_permutexvar_epi32( m512_const_64( \
|
||||
0x000000080000000f, 0x0000000e0000000d, \
|
||||
0x0000000c0000000b, 0x0000000a00000009, \
|
||||
0x0000000000000007, 0x0000000600000005, \
|
||||
0x0000000400000003, 0x0000000200000001 ), v )
|
||||
|
||||
#define mm512_rol1x32_256( v ) \
|
||||
#define mm512_rol256_32( v ) \
|
||||
_mm512_permutexvar_epi32( m512_const_64( \
|
||||
0x0000000e0000000d, 0x0000000c0000000b, \
|
||||
0x0000000a00000009, 0x000000080000000f, \
|
||||
0x0000000600000005, 0x0000000400000003, \
|
||||
0x0000000200000001, 0x0000000000000007 ), v )
|
||||
|
||||
#define mm512_ror1x16_256( v ) \
|
||||
#define mm512_ror256_16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x00100001001e001d, 0x001c001b001a0019, \
|
||||
0x0018001700160015, 0x0014001300120011, \
|
||||
0x0000000f000e000d, 0x000c000b000a0009, \
|
||||
0x0008000700060005, 0x0004000300020001 ), v )
|
||||
|
||||
#define mm512_rol1x16_256( v ) \
|
||||
#define mm512_rol256_16( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x001e001d001c001b, 0x001a001900180017, \
|
||||
0x0016001500140013, 0x001200110010001f, \
|
||||
0x000e000d000c000b, 0x000a000900080007, \
|
||||
0x0006000500040003, 0x000200010000000f ), v )
|
||||
|
||||
#define mm512_ror1x8_256( v ) \
|
||||
#define mm512_ror256_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x203f3e3d3c3b3a39, 0x3837363534333231, \
|
||||
0x302f2e2d2c2b2a29, 0x2827262524232221, \
|
||||
0x001f1e1d1c1b1a19, 0x1817161514131211, \
|
||||
0x100f0e0d0c0b0a09, 0x0807060504030201 ), v )
|
||||
|
||||
#define mm512_rol1x8_256( v ) \
|
||||
#define mm512_rol256_8( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x3e3d3c3b3a393837, 0x363534333231302f, \
|
||||
0x2e2d2c2b2a292827, 0x262524232221203f, \
|
||||
@@ -456,45 +470,19 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
// Rotate elements within 128 bit lanes of 512 bit vector.
|
||||
|
||||
// Swap hi & lo 64 bits in each 128 bit lane
|
||||
#define mm512_swap64_128( v ) _mm512_shuffle_epi32( v, 0x4e )
|
||||
#define mm512_swap128_64( v ) _mm512_shuffle_epi32( v, 0x4e )
|
||||
|
||||
// Rotate 128 bit lanes by one 32 bit element
|
||||
#define mm512_ror1x32_128( v ) _mm512_shuffle_epi32( v, 0x39 )
|
||||
#define mm512_rol1x32_128( v ) _mm512_shuffle_epi32( v, 0x93 )
|
||||
#define mm512_ror128_32( v ) _mm512_shuffle_epi32( v, 0x39 )
|
||||
#define mm512_rol128_32( v ) _mm512_shuffle_epi32( v, 0x93 )
|
||||
|
||||
#define mm512_ror1x16_128( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x0018001f001e001d, 0x001c001b001a0019, \
|
||||
0x0010001700160015, 0x0014001300120011, \
|
||||
0x0008000f000e000d, 0x000c000b000a0009, \
|
||||
0x0000000700060005, 0x0004000300020001 ), v )
|
||||
|
||||
#define mm512_rol1x16_128( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x001e001d001c001b, 0x001a00190018001f, \
|
||||
0x0016001500140013, 0x0012001100100017, \
|
||||
0x000e000d000c000b, 0x000a00090008000f, \
|
||||
0x0006000500040003, 0x0002000100000007 ), v )
|
||||
|
||||
#define mm512_ror1x8_128( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x303f3e3d3c3b3a39, 0x3837363534333231, \
|
||||
0x202f2e2d2c2b2a29, 0x2827262524232221, \
|
||||
0x101f1e1d1c1b1a19, 0x1817161514131211, \
|
||||
0x000f0e0d0c0b0a09, 0x0807060504030201 ) )
|
||||
|
||||
#define mm512_rol1x8_128( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x3e3d3c3b3a393837, 0x363534333231303f, \
|
||||
0x2e2d2c2b2a292827, 0x262524232221202f, \
|
||||
0x1e1d1c1b1a191817, 0x161514131211101f, \
|
||||
0x0e0d0c0b0a090807, 0x060504030201000f ) )
|
||||
|
||||
// Rotate 128 bit lanes by c bytes.
|
||||
#define mm512_bror_128( v, c ) \
|
||||
// Rotate 128 bit lanes by c bytes, faster than building that monstrous
|
||||
// constant above.
|
||||
#define mm512_ror128_8( v, c ) \
|
||||
_mm512_or_si512( _mm512_bsrli_epi128( v, c ), \
|
||||
_mm512_bslli_epi128( v, 16-(c) ) )
|
||||
#define mm512_brol_128( v, c ) \
|
||||
#define mm512_rol128_8( v, c ) \
|
||||
_mm512_or_si512( _mm512_bslli_epi128( v, c ), \
|
||||
_mm512_bsrli_epi128( v, 16-(c) ) )
|
||||
|
||||
@@ -502,75 +490,23 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
//
|
||||
// Rotate elements within 64 bit lanes.
|
||||
|
||||
#define mm512_rol64_x8( v, c ) _mm512_rol_epi64( v, ((c)<<3) )
|
||||
#define mm512_ror64_x8( v, c ) _mm512_ror_epi64( v, ((c)<<3) )
|
||||
|
||||
// Swap 32 bit elements in each 64 bit lane
|
||||
#define mm512_swap32_64( v ) _mm512_shuffle_epi32( v, 0xb1 )
|
||||
#define mm512_swap64_32( v ) _mm512_shuffle_epi32( v, 0xb1 )
|
||||
|
||||
// Rotate each 64 bit lane by one 16 bit element.
|
||||
#define mm512_ror1x16_64( v ) _mm512_ror_epi64( v, 16 )
|
||||
#define mm512_rol1x16_64( v ) _mm512_rol_epi64( v, 16 )
|
||||
#define mm512_ror1x8_64( v ) _mm512_ror_epi64( v, 8 )
|
||||
#define mm512_rol1x8_64( v ) _mm512_rol_epi64( v, 8 )
|
||||
|
||||
/*
|
||||
#define mm512_ror1x16_64( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x001c001f001e001d, 0x0018001b001a0019, \
|
||||
0x0014001700160015, 0x0010001300120011, \
|
||||
0x000c000f000e000d, 0x0008000b000a0009, \
|
||||
0x0004000700060005, 0x0000000300020001, v )
|
||||
|
||||
#define mm512_rol1x16_64( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x001e001d001c001f, 0x001a00190018001b, \
|
||||
0x0016001500140017, 0x0012001100100013, \
|
||||
0x000e000d000c000f, 0x000a00090008000b, \
|
||||
0x0006000500040007, 0x0002000100000003, v )
|
||||
|
||||
// Rotate each 64 bit lane by one byte.
|
||||
#define mm512_ror1x8_64( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x383F3E3D3C3B3A39, 0x3037363534333231, \
|
||||
0x282F2E2D2C2B2A29, 0x2027262524232221, \
|
||||
0x181F1E1D1C1B1A19, 0x1017161514131211, \
|
||||
0x080F0E0D0C0B0A09, 0x0007060504030201 ) )
|
||||
#define mm512_rol1x8_64( v ) \
|
||||
_mm512_shuffle( v, m512_const_64( \
|
||||
0x3E3D3C3B3A39383F, 0x3635343332313037, \
|
||||
0x2E2D2C2B2A29282F, 0x2625242322212027, \
|
||||
0x1E1D1C1B1A19181F, 0x1615141312111017, \
|
||||
0x0E0D0C0B0A09080F, 0x0605040302010007 ) )
|
||||
*/
|
||||
#define mm512_ror64_16( v ) _mm512_ror_epi64( v, 16 )
|
||||
#define mm512_rol64_16( v ) _mm512_rol_epi64( v, 16 )
|
||||
#define mm512_ror64_8( v ) _mm512_ror_epi64( v, 8 )
|
||||
#define mm512_rol64_8( v ) _mm512_rol_epi64( v, 8 )
|
||||
|
||||
//
|
||||
// Rotate elements within 32 bit lanes.
|
||||
|
||||
#define mm512_swap16_32( v ) _mm512_ror_epi32( v, 16 )
|
||||
#define mm512_ror1x8_32( v ) _mm512_ror_epi32( v, 8 )
|
||||
#define mm512_rol1x8_32( v ) _mm512_rol_epi32( v, 8 )
|
||||
|
||||
/*
|
||||
#define mm512_swap16_32( v ) \
|
||||
_mm512_permutexvar_epi16( m512_const_64( \
|
||||
0x001e001f001c001d, 0x001a001b00180019, \
|
||||
0x0016001700140015, 0x0012001300100011, \
|
||||
0x000e000f000c000d, 0x000a000b00080009, \
|
||||
0x0006000700040005, 0x0002000300000001 ), v )
|
||||
|
||||
#define mm512_ror1x8_32( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x3C3F3E3D383B3A39, 0x3437363530333231, \
|
||||
0x2C2F2E2D282B2A29, 0x2427262520232221, \
|
||||
0x1C1F1E1D181B1A19, 0x1417161510131211, \
|
||||
0x0C0F0E0D080B0A09, 0x0407060500030201 ))
|
||||
|
||||
#define mm512_rol1x8_32( v ) \
|
||||
_mm512_shuffle_epi8( v, m512_const_64( \
|
||||
0x3E3D3C3F3A39383B, 0x3635343732313033, \
|
||||
0x2E2D2C2F2A29282B, 0x2625242722212023, \
|
||||
0x1E1D1C1F1A19181B, 0x1615141712111013, \
|
||||
0x0E0D0C0F0A09080B, 0x0605040702010003 ) )
|
||||
*/
|
||||
|
||||
#define mm512_rol32_x8( v, c ) _mm512_rol_epi32( v, ((c)<<2) )
|
||||
#define mm512_ror32_x8( v, c ) _mm512_ror_epi32( v, ((c)<<2) )
|
||||
|
||||
|
||||
//
|
||||
@@ -579,61 +515,61 @@ static inline void memcpy_512( __m512i *dst, const __m512i *src, const int n )
|
||||
// These can all be done with 2 permutex2var instructions but they are
|
||||
// slower than either xor or alignr and require AVX512VBMI.
|
||||
|
||||
#define mm512_swap512_1024(v1, v2) \
|
||||
#define mm512_swap1024_512(v1, v2) \
|
||||
v1 = _mm512_xor_si512(v1, v2); \
|
||||
v2 = _mm512_xor_si512(v1, v2); \
|
||||
v1 = _mm512_xor_si512(v1, v2);
|
||||
|
||||
#define mm512_ror1x256_1024( v1, v2 ) \
|
||||
#define mm512_ror1024_256( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi64( v1, v2, 4 ); \
|
||||
v1 = _mm512_alignr_epi64( v2, v1, 4 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm512_rol1x256_1024( v1, v2 ) \
|
||||
#define mm512_rol1024_256( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi64( v1, v2, 4 ); \
|
||||
v2 = _mm512_alignr_epi64( v2, v1, 4 ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm512_ror1x128_1024( v1, v2 ) \
|
||||
#define mm512_ror1024_128( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi64( v1, v2, 2 ); \
|
||||
v1 = _mm512_alignr_epi64( v2, v1, 2 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm512_rol1x128_1024( v1, v2 ) \
|
||||
#define mm512_rol1024_128( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi64( v1, v2, 6 ); \
|
||||
v2 = _mm512_alignr_epi64( v2, v1, 6 ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm512_ror1x64_1024( v1, v2 ) \
|
||||
#define mm512_ror1024_64( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi64( v1, v2, 1 ); \
|
||||
v1 = _mm512_alignr_epi64( v2, v1, 1 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm512_rol1x64_1024( v1, v2 ) \
|
||||
#define mm512_rol1024_64( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi64( v1, v2, 7 ); \
|
||||
v2 = _mm512_alignr_epi64( v2, v1, 7 ); \
|
||||
v1 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm512_ror1x32_1024( v1, v2 ) \
|
||||
#define mm512_ror1024_32( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi32( v1, v2, 1 ); \
|
||||
v1 = _mm512_alignr_epi32( v2, v1, 1 ); \
|
||||
v2 = t; \
|
||||
} while(0)
|
||||
|
||||
#define mm512_rol1x32_1024( v1, v2 ) \
|
||||
#define mm512_rol1024_32( v1, v2 ) \
|
||||
do { \
|
||||
__m512i t = _mm512_alignr_epi32( v1, v2, 15 ); \
|
||||
v2 = _mm512_alignr_epi32( v2, v1, 15 ); \
|
||||
|
Reference in New Issue
Block a user