This commit is contained in:
Jay D Dee
2017-02-22 22:47:44 -05:00
parent 4521b324e3
commit 33b1bb5cd4
31 changed files with 1597 additions and 559 deletions

351
avxdefs.h
View File

@@ -1,4 +1,5 @@
// Some tools to help using AVX and AVX2
// Some tools to help using AVX and AVX2
// AVX support is required to include this header file, AVX2 optional.
#include <inttypes.h>
#include <immintrin.h>
@@ -15,7 +16,7 @@
typedef union
{
#if defined __AVX2__
#if defined (__AVX2__)
__m256i v256;
#endif
__m128i v128[ 2];
@@ -36,7 +37,8 @@ uint8_t v8 [16];
#if defined (__AVX2__)
// Replacements for vectorized data
// AVX2 replacements for vectorized data
// n = number of __m256i (32 bytes)
inline void memset_zero_m256i( __m256i *dst, int n )
{
@@ -48,68 +50,65 @@ inline void memset_m256i( __m256i *dst, const __m256i a, int n )
for ( int i = 0; i < n; i++ ) dst[i] = a;
}
// optimized copying, first fit is usually best. If none of these works there
// are __m128i versions or plain memcpy.
// Optimized copying using vectors. For misaligned data or more ganuularity
// use __m228i versions or plain memcpy as appropriate.
// Fixed size
// Copying fixed size
// multi buffered copy for 64 bytes, the size of a cache line.
// minimum alignment is 32 bytes, optimum for cache is 64.
// Multi buffered copy using __m256i.
// minimum alignment is 32 bytes (_m1256i), optimum 64 (cache line).
// src & dst are __m256i*
inline void mcpy64_m256i( __m256i* dst, const __m256i* src )
// Copy 64 bytes (2x__m256i, one cache line), double buffered
inline void mcpy64_m256i( __m256i* dest, const __m256i* srce )
{
const __m256i* dest = dst;
const __m256i* srce = src;
__m256i a = _mm256_load_si256( srce );
__m256i a = _mm256_load_si256( srce );
__m256i b = _mm256_load_si256( srce + 1 );
_mm256_store_si256( dest, a );
_mm256_store_si256( dest + 1, b );
_mm256_store_si256( dest, a );
_mm256_store_si256( dest + 1, b );
}
inline void mcpy96_m256i( __m256i* dst, const __m256i* src )
// Copy 96 bytes (3x__m256i), triple buffered
inline void mcpy96_m256i( __m256i* dest, const __m256i* srce )
{
const __m256i* dest = dst;
const __m256i* srce = src;
__m256i a = _mm256_load_si256( srce );
__m256i a = _mm256_load_si256( srce );
__m256i b = _mm256_load_si256( srce + 1 );
_mm256_store_si256( dest, a );
__m256i c = _mm256_load_si256( srce + 2 );
_mm256_store_si256( dest + 1, b );
_mm256_store_si256( dest + 2, c );
_mm256_store_si256( dest, a );
_mm256_store_si256( dest + 1, b );
_mm256_store_si256( dest + 2, c );
}
inline void mcpy128_m256i( __m256i* dst, const __m256i* src )
// Copy 128 bytes (4x__m256i), quad buffered
inline void mcpy128_m256i( __m256i* dest, const __m256i* srce )
{
const __m256i* dest = dst;
const __m256i* srce = src;
__m256i a = _mm256_load_si256( srce );
__m256i b = _mm256_load_si256( srce + 1 );
__m256i c = _mm256_load_si256( srce + 2 );
_mm256_store_si256( dest , a );
__m256i d = _mm256_load_si256( srce + 3 );
_mm256_store_si256( dest + 1, b );
a = _mm256_load_si256( srce + 4 );
_mm256_store_si256( dest + 2, c );
b = _mm256_load_si256( srce + 5 );
_mm256_store_si256( dest + 3, d );
c = _mm256_load_si256( srce + 6 );
_mm256_store_si256( dest + 4, a );
d = _mm256_load_si256( srce + 7 );
_mm256_store_si256( dest + 5, b );
_mm256_store_si256( dest + 6, c );
_mm256_store_si256( dest + 7, d );
_mm256_store_si256( dest , a );
a = _mm256_load_si256( srce + 4 );
_mm256_store_si256( dest + 1, b );
b = _mm256_load_si256( srce + 5 );
_mm256_store_si256( dest + 2, c );
c = _mm256_load_si256( srce + 6 );
_mm256_store_si256( dest + 3, d );
d = _mm256_load_si256( srce + 7 );
_mm256_store_si256( dest + 4, a );
_mm256_store_si256( dest + 5, b );
_mm256_store_si256( dest + 6, c );
_mm256_store_si256( dest + 7, d );
}
// Variable size
// Copy variable size
//
// copy multiples of 64 bytes using quad buffering with interleave
// of first read of next line with last write of current line.
// n is a multiple of 32 bytes (_m256i size)
// minimum alignment: 32 bytes
// optimum alignment: 64 bytes (cache line size)
// minimum size.....: 128 bytes (4*n)
// recommended size.: 256+ bytes
// recommended size.: 256+ bytes (8*n)
// minimum increment: 128 bytes
// Only the first load or store in a cache line triggers a memory access.
// the subsequent actions are trivial because they benefit from data
@@ -120,17 +119,16 @@ inline void mcpy128_m256i( __m256i* dst, const __m256i* src )
inline void mcpy_m256i_x4( __m256i *dst, const __m256i *src, const int n )
{
const __m256i* dest = dst;
const __m256i* srce = src;
__m256i* end = dst + n;
// preload 1 cache line to absorb startup latency
__m256i a = _mm256_load_si256( srce );
__m256i b = _mm256_load_si256( srce + 1 );
// start loading second line, queue while waiting
__m256i c = _mm256_load_si256( srce + 2 );
__m256i a = _mm256_load_si256( src );
__m256i b = _mm256_load_si256( src + 1 );
// start loading second line, queued while waiting for 1st line.
__m256i c = _mm256_load_si256( src + 2 );
// start writing first line, as soon as data available,
// second line read will have priority on the bus
_mm256_store_si256( dest, a );
_mm256_store_si256( dst, a );
__m256i d;
int i;
@@ -139,39 +137,37 @@ inline void mcpy_m256i_x4( __m256i *dst, const __m256i *src, const int n )
for ( i = 0; i < loops; i++ )
{
const int i4 = i*4;
const __m256i* si4 = (__m256i*)(srce + i4);
const __m256i* di4 = (__m256i*)(dest + i4);
const __m256i* si4 = src + i4;
__m256i* di4 = dst + i4;
d = _mm256_load_si256( si4 + 3 );
_mm256_store_si256( di4 + 1, b );
_mm256_store_si256( di4 + 1, b );
// start loading next line
a = _mm256_load_si256( si4 + 4 );
_mm256_store_si256( di4 + 2, c );
_mm256_store_si256( di4 + 2, c );
b = _mm256_load_si256( si4 + 5 );
_mm256_store_si256( di4 + 3, d );
_mm256_store_si256( di4 + 3, d );
c = _mm256_load_si256( si4 + 6 );
// start writing next line
_mm256_store_si256( di4 + 4, a );
_mm256_store_si256( di4 + 4, a );
}
// finish last line
d = _mm256_load_si256( srce + n - 4 );
_mm256_store_si256( dest + n - 3, b );
_mm256_store_si256( dest + n - 2, c );
_mm256_store_si256( dest + n - 1, d );
d = _mm256_load_si256( end - 4 );
_mm256_store_si256( end - 3, b );
_mm256_store_si256( end - 2, c );
_mm256_store_si256( end - 1, d );
}
// basic __m256i memcpy
// basic aligned __m256i memcpy
inline void memcpy_m256i( __m256i *dst, const __m256i *src, int n )
{
for ( int i = 0; i < n; i ++ ) dst[i] = src[i];
}
// For cheating with pointer types
// p = any aligned pointer
// returns p as pointer to vector type
// returns p as pointer to vector type, not very useful
#define castp_m256i(p) ((__m256i*)(p))
#define castp_m128i(p) ((__m128i*)(p))
@@ -198,7 +194,7 @@ inline void memcpy_m256i( __m256i *dst, const __m256i *src, int n )
//__m256i _mm256_inserti128_si256(__m256i a, __m128i b, const int mask);
// Rotate bits in 4 uint64 (3 instructions)
// __m256i mm256_rotr_64( __256i, int )
// w = packed 64 bit data, n= number of bits to rotate
#define mm256_rotr_64( w, c ) \
_mm256_or_si256( _mm256_srli_epi64(w, c), _mm256_slli_epi64(w, 64 - c) )
@@ -219,6 +215,7 @@ inline void memcpy_m256i( __m256i *dst, const __m256i *src, int n )
_mm256_permute4x64_epi64( w, 0x93 )
// shift 256 bits by n*64 bits (4 uint64 by n uint64)
// mm256_slli256_nx64( w )
#define mm256_slli256_1x64( w ) \
_mm256_and_si256( mm256_rotl256_1x64( w ), \
_mm256_set_epi64x( 0, \
@@ -231,6 +228,7 @@ inline void memcpy_m256i( __m256i *dst, const __m256i *src, int n )
0 ) )
*/
// these ones probably are backward
#define mm256_slli256_2x64( w ) \
_mm256_and_si256( mm256_swap128( w ), \
_mm256_set_epi64x( 0xffffffffffffffffull, \
@@ -271,9 +269,26 @@ inline void memcpy_m256i( __m256i *dst, const __m256i *src, int n )
0xffffffffffffffffull ) )
*/
// vectored version of BYTES_SWAP32
inline __m256i mm256_byteswap_epi32( __m256i x )
{
__m256i x1 = _mm256_and_si256( x,
_mm256_set_epi32( 0x0000ff00, 0x0000ff00, 0x0000ff00, 0x0000ff00,
0x0000ff00, 0x0000ff00, 0x0000ff00, 0x0000ff00 ) );
__m256i x2 = _mm256_and_si256( x,
_mm256_set_epi32( 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000,
0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 ) );
__m256i x0 = _mm256_slli_epi32( x, 24 ); // x0 = x << 24
x1 = _mm256_slli_epi32( x1, 8 ); // x1 = mask(x) << 8
x2 = _mm256_srli_epi32( x2, 8 ); // x2 = mask(x) >> 8
__m256i x3 = _mm256_srli_epi32( x, 24 ); // x3 = x >> 24
return _mm256_or_si256( _mm256_or_si256( x0, x1 ),
_mm256_or_si256( x2, x3 ) );
}
#endif // AVX2
// Replacements for vectorized data
// AVX replacements for vectorized data
inline void memset_zero_m128i( __m128i *dst, int n )
{
@@ -287,52 +302,80 @@ inline void memset_m128i( __m128i *dst, const __m128i a, int n )
// __m128i versions of optimized copying
inline void mcpy32_m128i( __m128i* dst, const __m128i* src )
// Copy 32 bytes (2x__m128i), double buffered
inline void mcpy32_m128i( __m128i* dest, const __m128i* srce )
{
const __m128i* dest = dst;
const __m128i* srce = src;
// 4 loads fills cache line
__m128i a = _mm_load_si128( srce );
__m128i b = _mm_load_si128( srce + 1 );
_mm_store_si128( dest, a );
_mm_store_si128( dest + 1, b );
_mm_store_si128( dest, a );
_mm_store_si128( dest + 1, b );
}
inline void mcpy64_m128i( __m128i* dst, const __m128i* src )
// Copy 64 Bytes (4x__m128i), quad buffered
inline void mcpy64_m128i( __m128i* dest, const __m128i* srce )
{
const __m128i* dest = dst;
const __m128i* srce = src;
// 4 loads fills cache line
__m128i a = _mm_load_si128( srce );
__m128i b = _mm_load_si128( srce + 1 );
__m128i c = _mm_load_si128( srce + 2 );
__m128i d = _mm_load_si128( srce + 3 );
// need to store a before overwriting it
_mm_store_si128( dest, a );
a = _mm_load_si128( srce + 4 );
_mm_store_si128( dest + 1, b );
b = _mm_load_si128( srce + 5 );
_mm_store_si128( dest + 2, c );
c = _mm_load_si128( srce + 6 );
_mm_store_si128( dest + 3, d );
d = _mm_load_si128( srce + 7 );
_mm_store_si128( dest + 4, a );
d = _mm_load_si128( srce + 7 );
_mm_store_si128( dest + 5, b );
_mm_store_si128( dest + 6, c );
_mm_store_si128( dest + 7, d );
_mm_store_si128( dest, a );
a = _mm_load_si128( srce + 4 );
_mm_store_si128( dest + 1, b );
b = _mm_load_si128( srce + 5 );
_mm_store_si128( dest + 2, c );
c = _mm_load_si128( srce + 6 );
_mm_store_si128( dest + 3, d );
d = _mm_load_si128( srce + 7 );
_mm_store_si128( dest + 4, a );
_mm_store_si128( dest + 5, b );
_mm_store_si128( dest + 6, c );
_mm_store_si128( dest + 7, d );
}
// Copy 96 Bytes (6x__m128i), quad buffered
inline void mcpy96_m128i( __m128i* dest, const __m128i* srce )
{
// 4 loads fills cache line
__m128i a = _mm_load_si128( srce );
__m128i b = _mm_load_si128( srce + 1 );
__m128i c = _mm_load_si128( srce + 2 );
__m128i d = _mm_load_si128( srce + 3 );
// need to store a before overwriting it
_mm_store_si128( dest, a );
a = _mm_load_si128( srce + 4 );
_mm_store_si128( dest + 1, b );
b = _mm_load_si128( srce + 5 );
_mm_store_si128( dest + 2, c );
c = _mm_load_si128( srce + 6 );
_mm_store_si128( dest + 3, d );
d = _mm_load_si128( srce + 7 );
_mm_store_si128( dest + 4, a );
a = _mm_load_si128( srce + 8 );
_mm_store_si128( dest + 5, b );
b = _mm_load_si128( srce + 9 );
_mm_store_si128( dest + 6, c );
c = _mm_load_si128( srce + 10 );
_mm_store_si128( dest + 7, d );
d = _mm_load_si128( srce + 11 );
_mm_store_si128( dest + 8, a );
_mm_store_si128( dest + 9, b );
_mm_store_si128( dest + 10, c );
_mm_store_si128( dest + 11, d );
}
// Variable length
// copy multiples of 16 bytes using quad buffering.
//
// Copy multiples of 16 bytes (__m128i) using quad buffering.
// n is a multiple of 16 bytes (__m128i size)
// minimum alignment: 16 bytes
// optimum alignment: 64 bytes (cache line size)
// minimum size.....: 32 bytes (4*n)
// recommended size.: 96+ bytes
// minimum increment: 32 bytes
inline void memcpy_m128i_x4( __m128i *dst, const __m128i *src, const int n )
// minimum size.....: 64 bytes (4*n)
// recommended size.: 128+ bytes (8*n)
// minimum increment: 64 bytes
inline void mcpy_m128i_x4( __m128i *dst, const __m128i *src, const int n )
{
// preload 1 cache line to absorb startup latency
__m128i a = _mm_load_si128( src );
@@ -342,36 +385,92 @@ inline void memcpy_m128i_x4( __m128i *dst, const __m128i *src, const int n )
int i;
const int loops = n/4 - 1;
const __m128i* dst_n = (__m128i*)(dst + n);
__m128i* end = dst + n;
for ( i = 0; i < loops; i++ )
{
const int i4 = i*4;
const __m128i* si4 = (__m128i*)(src + i4);
const __m128i* di4 = (__m128i*)(dst + i4);
const __m128i* si4 = src + i4;
__m128i* di4 = dst + i4;
// need to free a before overwriting it
_mm_store_si128( di4, a );
a = _mm_load_si128( di4 + 4 );
_mm_store_si128( di4 + 1, b );
b = _mm_load_si128( di4 + 5 );
_mm_store_si128( di4 + 2, c );
c = _mm_load_si128( di4 + 6 );
_mm_store_si128( di4 + 3, d );
d = _mm_load_si128( di4 + 7 );
_mm_store_si128( di4, a );
a = _mm_load_si128( si4 + 4 );
_mm_store_si128( di4 + 1, b );
b = _mm_load_si128( si4 + 5 );
_mm_store_si128( di4 + 2, c );
c = _mm_load_si128( si4 + 6 );
_mm_store_si128( di4 + 3, d );
d = _mm_load_si128( si4 + 7 );
}
_mm_store_si128( dst_n - 4, a );
_mm_store_si128( dst_n - 3, b );
_mm_store_si128( dst_n - 2, c );
_mm_store_si128( dst_n - 1, d );
_mm_store_si128( end - 4, a );
_mm_store_si128( end - 3, b );
_mm_store_si128( end - 2, c );
_mm_store_si128( end - 1, d );
}
// basic __m128i copy
// basic aligned __m128i copy
inline void memcpy_m128i( __m128i *dst, const __m128i *src, int n )
{
for ( int i = 0; i < n; i ++ ) dst[i] = src[i];
}
inline void memcpy_64( uint64_t* dst, const uint64_t* src, int n )
{
for ( int i = 0; i < n; i++ )
dst[i] = src[i];
}
// Smart generic mem copy optimized for copying large data, n = bytes.
// Most efficient with 256 bit aligned data and size a multiple of 4*256,
// but fkexible enough to handle any any alignment, any size with performance
// considerations. For common fixed sizes use the approppriate functions above.
inline void mcpy( void* dst, const void* src, int n )
{
// enforce alignment and minimum size for quad buffered vector copy
#if defined (__AVX2__)
// Try 256 bit copy
if ( ( (uint64_t)dst % 32 == 0 ) && ( (const uint64_t)src % 32 == 0 ) )
{
if ( n % 128 == 0 )
{
mcpy_m256i_x4( (__m256i*)dst, (const __m256i*)src, n/32 );
return;
}
else
{
memcpy_m256i( (__m256i*)dst, (const __m256i*)src, n/32 );
return;
}
}
else
#endif
// Try 128 bit copy
if ( ( (uint64_t)dst % 16 == 0 ) && ( (const uint64_t)src % 16 == 0 ) )
{
if ( n % 64 == 0 )
{
mcpy_m128i_x4( (__m128i*)dst, (const __m128i*)src, n/16 );
return;
}
else
{
memcpy_m128i( (__m128i*)dst, (const __m128i*)src, n/16 );
return;
}
}
// Try 64 bit copy
else if ( ( (uint64_t)dst % 8 == 0 ) && ( (const uint64_t)src % 8 == 0 )
&& ( n/8 == 0 ) )
{
memcpy_64( (uint64_t*)dst, (const uint64_t*)src, n/8 );
return;
}
// slow copy
memcpy( dst, src, n );
}
// For cheating with pointer types
// p = any aligned pointer
@@ -408,14 +507,16 @@ inline void memcpy_m128i( __m128i *dst, const __m128i *src, int n )
// mm256_rotl256_1x64 when avx2 is not available or data is alreeady in __m128i
// format. uses one local
//void mm128_rotl256_1x64( __m128i, __m128i )
#define mm128_rotl256_1x64(s0, s1) do { \
#define mm128_rotl256_1x64(s0,s1) do { \
__m128i t; \
s0 = mm128_swap64( s0); \
s1 = mm128_swap64( s1); \
t = _mm_or_si128( _mm_and_si128( s0, _mm_set_epi64x(0ull,0xffffffffffffffffull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0xffffffffffffffffull,0ull) ) ); \
s1 = _mm_or_si128( _mm_and_si128( s0, _mm_set_epi64x(0xffffffffffffffffull,0ull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0ull,0xffffffffffffffffull) ) ); \
s0 = mm128_swap64(s0); \
s1 = mm128_swap64(s1); \
t = _mm_or_si128( \
_mm_and_si128( s0, _mm_set_epi64x(0ull,0xffffffffffffffffull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0xffffffffffffffffull,0ull) ) ); \
s1 = _mm_or_si128( \
_mm_and_si128( s0, _mm_set_epi64x(0xffffffffffffffffull,0ull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0ull,0xffffffffffffffffull) ) ); \
s0 = t; \
} while(0)
@@ -423,10 +524,26 @@ inline void memcpy_m128i( __m128i *dst, const __m128i *src, int n )
__m128i t; \
s0 = mm128_swap64( s0); \
s1 = mm128_swap64( s1); \
t = _mm_or_si128( _mm_and_si128( s0, _mm_set_epi64x(0xffffffffffffffffull,0ull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0ull,0xffffffffffffffffull) ) ); \
s1 = _mm_or_si128( _mm_and_si128( s0, _mm_set_epi64x(0ull,0xffffffffffffffffull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0xffffffffffffffffull,0ull) ) ); \
t = _mm_or_si128( \
_mm_and_si128( s0, _mm_set_epi64x(0xffffffffffffffffull,0ull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0ull,0xffffffffffffffffull) ) ); \
s1 = _mm_or_si128( \
_mm_and_si128( s0, _mm_set_epi64x(0ull,0xffffffffffffffffull) ), \
_mm_and_si128( s1, _mm_set_epi64x(0xffffffffffffffffull,0ull) ) ); \
s0 = t; \
} while(0)
// vectored version of BYTES_SWAP32
inline __m128i mm_byteswap_epi32( __m128i x )
{
__m128i x1 = _mm_and_si128( x, _mm_set_epi32( 0x0000ff00, 0x0000ff00,
0x0000ff00, 0x0000ff00 ) );
__m128i x2 = _mm_and_si128( x, _mm_set_epi32( 0x00ff0000, 0x00ff0000,
0x00ff0000, 0x00ff0000 ) );
__m128i x0 = _mm_slli_epi32( x, 24 ); // x0 = x << 24
x1 = _mm_slli_epi32( x1, 8 ); // x1 = mask(x) << 8
x2 = _mm_srli_epi32( x2, 8 ); // x2 = mask(x) >> 8
__m128i x3 = _mm_srli_epi32( x, 24 ); // x3 = x >> 24
return _mm_or_si128( _mm_or_si128( x0, x1 ), _mm_or_si128( x2, x3 ) );
}