mirror of
https://github.com/JayDDee/cpuminer-opt.git
synced 2025-09-17 23:44:27 +00:00
Initial upload v3.4.7
This commit is contained in:
1173
algo/x2.hide/scrypt-arm.S
Normal file
1173
algo/x2.hide/scrypt-arm.S
Normal file
File diff suppressed because it is too large
Load Diff
2879
algo/x2.hide/scrypt-x64.S
Normal file
2879
algo/x2.hide/scrypt-x64.S
Normal file
File diff suppressed because it is too large
Load Diff
821
algo/x2.hide/scrypt-x86.S
Normal file
821
algo/x2.hide/scrypt-x86.S
Normal file
@@ -0,0 +1,821 @@
|
||||
/*
|
||||
* Copyright 2011-2012 pooler@litecoinpool.org
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "cpuminer-config.h"
|
||||
|
||||
#if defined(__linux__) && defined(__ELF__)
|
||||
.section .note.GNU-stack,"",%progbits
|
||||
#endif
|
||||
|
||||
#if defined(__i386__)
|
||||
|
||||
.macro scrypt_shuffle src, so, dest, do
|
||||
movl \so+60(\src), %eax
|
||||
movl \so+44(\src), %ebx
|
||||
movl \so+28(\src), %ecx
|
||||
movl \so+12(\src), %edx
|
||||
movl %eax, \do+12(\dest)
|
||||
movl %ebx, \do+28(\dest)
|
||||
movl %ecx, \do+44(\dest)
|
||||
movl %edx, \do+60(\dest)
|
||||
movl \so+40(\src), %eax
|
||||
movl \so+8(\src), %ebx
|
||||
movl \so+48(\src), %ecx
|
||||
movl \so+16(\src), %edx
|
||||
movl %eax, \do+8(\dest)
|
||||
movl %ebx, \do+40(\dest)
|
||||
movl %ecx, \do+16(\dest)
|
||||
movl %edx, \do+48(\dest)
|
||||
movl \so+20(\src), %eax
|
||||
movl \so+4(\src), %ebx
|
||||
movl \so+52(\src), %ecx
|
||||
movl \so+36(\src), %edx
|
||||
movl %eax, \do+4(\dest)
|
||||
movl %ebx, \do+20(\dest)
|
||||
movl %ecx, \do+36(\dest)
|
||||
movl %edx, \do+52(\dest)
|
||||
movl \so+0(\src), %eax
|
||||
movl \so+24(\src), %ebx
|
||||
movl \so+32(\src), %ecx
|
||||
movl \so+56(\src), %edx
|
||||
movl %eax, \do+0(\dest)
|
||||
movl %ebx, \do+24(\dest)
|
||||
movl %ecx, \do+32(\dest)
|
||||
movl %edx, \do+56(\dest)
|
||||
.endm
|
||||
|
||||
.macro salsa8_core_gen_quadround
|
||||
movl 52(%esp), %ecx
|
||||
movl 4(%esp), %edx
|
||||
movl 20(%esp), %ebx
|
||||
movl 8(%esp), %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 4(%esp)
|
||||
movl 36(%esp), %edi
|
||||
leal (%edx, %ebx), %ebp
|
||||
roll $9, %ebp
|
||||
xorl %ebp, %edi
|
||||
movl 24(%esp), %ebp
|
||||
movl %edi, 8(%esp)
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 40(%esp), %ebx
|
||||
movl %ecx, 20(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 24(%esp)
|
||||
movl 56(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 36(%esp)
|
||||
movl 28(%esp), %ecx
|
||||
movl %edx, 28(%esp)
|
||||
movl 44(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 60(%esp), %ebx
|
||||
movl %esi, 40(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 44(%esp)
|
||||
movl 12(%esp), %edi
|
||||
xorl %esi, %ebp
|
||||
leal (%edx, %ebx), %esi
|
||||
roll $9, %esi
|
||||
xorl %esi, %edi
|
||||
movl %edi, 12(%esp)
|
||||
movl 48(%esp), %esi
|
||||
movl %ebp, 48(%esp)
|
||||
movl 64(%esp), %ebp
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl 32(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 32(%esp)
|
||||
movl %ebx, %ecx
|
||||
movl %edx, 52(%esp)
|
||||
movl 28(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 40(%esp), %ebx
|
||||
movl %esi, 28(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 40(%esp)
|
||||
movl 12(%esp), %edi
|
||||
xorl %esi, %ebp
|
||||
leal (%edx, %ebx), %esi
|
||||
roll $9, %esi
|
||||
xorl %esi, %edi
|
||||
movl %edi, 12(%esp)
|
||||
movl 4(%esp), %esi
|
||||
movl %ebp, 4(%esp)
|
||||
movl 48(%esp), %ebp
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 48(%esp)
|
||||
movl 32(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 32(%esp)
|
||||
movl 24(%esp), %ecx
|
||||
movl %edx, 24(%esp)
|
||||
movl 52(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 28(%esp), %ebx
|
||||
movl %esi, 28(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 52(%esp)
|
||||
movl 8(%esp), %edi
|
||||
xorl %esi, %ebp
|
||||
leal (%edx, %ebx), %esi
|
||||
roll $9, %esi
|
||||
xorl %esi, %edi
|
||||
movl %edi, 8(%esp)
|
||||
movl 44(%esp), %esi
|
||||
movl %ebp, 44(%esp)
|
||||
movl 4(%esp), %ebp
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 20(%esp), %ebx
|
||||
movl %ecx, 4(%esp)
|
||||
addl %edi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%esi, %ebp), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl 36(%esp), %edi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %edi
|
||||
movl %edi, 20(%esp)
|
||||
movl %ebx, %ecx
|
||||
movl %edx, 36(%esp)
|
||||
movl 24(%esp), %edx
|
||||
addl %edi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %esi
|
||||
movl 28(%esp), %ebx
|
||||
movl %esi, 24(%esp)
|
||||
addl %edi, %esi
|
||||
roll $18, %esi
|
||||
leal (%ecx, %edx), %edi
|
||||
roll $7, %edi
|
||||
xorl %edi, %ebx
|
||||
movl %ebx, 28(%esp)
|
||||
xorl %esi, %ebp
|
||||
movl 8(%esp), %esi
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl 40(%esp), %edi
|
||||
movl %ebp, 8(%esp)
|
||||
movl 44(%esp), %ebp
|
||||
movl %esi, 40(%esp)
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 4(%esp), %ebx
|
||||
movl %ecx, 44(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 4(%esp)
|
||||
movl 20(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 56(%esp)
|
||||
movl 48(%esp), %ecx
|
||||
movl %edx, 20(%esp)
|
||||
movl 36(%esp), %edx
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl 24(%esp), %ebx
|
||||
movl %edi, 24(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
leal (%ecx, %edx), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 60(%esp)
|
||||
movl 12(%esp), %esi
|
||||
xorl %edi, %ebp
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl %esi, 12(%esp)
|
||||
movl 52(%esp), %edi
|
||||
movl %ebp, 36(%esp)
|
||||
movl 8(%esp), %ebp
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl 32(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 32(%esp)
|
||||
movl %ebx, %ecx
|
||||
movl %edx, 48(%esp)
|
||||
movl 20(%esp), %edx
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl 24(%esp), %ebx
|
||||
movl %edi, 20(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
leal (%ecx, %edx), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 8(%esp)
|
||||
movl 12(%esp), %esi
|
||||
xorl %edi, %ebp
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl %esi, 12(%esp)
|
||||
movl 28(%esp), %edi
|
||||
movl %ebp, 52(%esp)
|
||||
movl 36(%esp), %ebp
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 16(%esp), %ebx
|
||||
movl %ecx, 16(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 28(%esp)
|
||||
movl 32(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 32(%esp)
|
||||
movl 4(%esp), %ecx
|
||||
movl %edx, 4(%esp)
|
||||
movl 48(%esp), %edx
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl 20(%esp), %ebx
|
||||
movl %edi, 20(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
leal (%ecx, %edx), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 48(%esp)
|
||||
movl 40(%esp), %esi
|
||||
xorl %edi, %ebp
|
||||
leal (%edx, %ebx), %edi
|
||||
roll $9, %edi
|
||||
xorl %edi, %esi
|
||||
movl %esi, 36(%esp)
|
||||
movl 60(%esp), %edi
|
||||
movl %ebp, 24(%esp)
|
||||
movl 52(%esp), %ebp
|
||||
addl %esi, %ebx
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %ecx
|
||||
movl 44(%esp), %ebx
|
||||
movl %ecx, 40(%esp)
|
||||
addl %esi, %ecx
|
||||
roll $18, %ecx
|
||||
leal (%edi, %ebp), %esi
|
||||
roll $7, %esi
|
||||
xorl %esi, %ebx
|
||||
movl %ebx, 52(%esp)
|
||||
movl 56(%esp), %esi
|
||||
xorl %ecx, %edx
|
||||
leal (%ebp, %ebx), %ecx
|
||||
roll $9, %ecx
|
||||
xorl %ecx, %esi
|
||||
movl %esi, 56(%esp)
|
||||
addl %esi, %ebx
|
||||
movl %edx, 44(%esp)
|
||||
roll $13, %ebx
|
||||
xorl %ebx, %edi
|
||||
movl %edi, 60(%esp)
|
||||
addl %esi, %edi
|
||||
roll $18, %edi
|
||||
xorl %edi, %ebp
|
||||
movl %ebp, 64(%esp)
|
||||
.endm
|
||||
|
||||
.text
|
||||
.p2align 5
|
||||
salsa8_core_gen:
|
||||
salsa8_core_gen_quadround
|
||||
salsa8_core_gen_quadround
|
||||
ret
|
||||
|
||||
|
||||
.text
|
||||
.p2align 5
|
||||
.globl scrypt_core
|
||||
.globl _scrypt_core
|
||||
scrypt_core:
|
||||
_scrypt_core:
|
||||
pushl %ebx
|
||||
pushl %ebp
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
|
||||
/* Check for SSE2 availability */
|
||||
movl $1, %eax
|
||||
cpuid
|
||||
andl $0x04000000, %edx
|
||||
jnz scrypt_core_sse2
|
||||
|
||||
scrypt_core_gen:
|
||||
movl 20(%esp), %edi
|
||||
movl 24(%esp), %esi
|
||||
subl $72, %esp
|
||||
|
||||
.macro scrypt_core_macro1a p, q
|
||||
movl \p(%edi), %eax
|
||||
movl \q(%edi), %edx
|
||||
movl %eax, \p(%esi)
|
||||
movl %edx, \q(%esi)
|
||||
xorl %edx, %eax
|
||||
movl %eax, \p(%edi)
|
||||
movl %eax, \p(%esp)
|
||||
.endm
|
||||
|
||||
.macro scrypt_core_macro1b p, q
|
||||
movl \p(%edi), %eax
|
||||
xorl \p(%esi, %edx), %eax
|
||||
movl \q(%edi), %ebx
|
||||
xorl \q(%esi, %edx), %ebx
|
||||
movl %ebx, \q(%edi)
|
||||
xorl %ebx, %eax
|
||||
movl %eax, \p(%edi)
|
||||
movl %eax, \p(%esp)
|
||||
.endm
|
||||
|
||||
.macro scrypt_core_macro2 p, q
|
||||
movl \p(%esp), %eax
|
||||
addl \p(%edi), %eax
|
||||
movl %eax, \p(%edi)
|
||||
xorl \q(%edi), %eax
|
||||
movl %eax, \q(%edi)
|
||||
movl %eax, \p(%esp)
|
||||
.endm
|
||||
|
||||
.macro scrypt_core_macro3 p, q
|
||||
movl \p(%esp), %eax
|
||||
addl \q(%edi), %eax
|
||||
movl %eax, \q(%edi)
|
||||
.endm
|
||||
|
||||
leal 131072(%esi), %ecx
|
||||
scrypt_core_gen_loop1:
|
||||
movl %esi, 64(%esp)
|
||||
movl %ecx, 68(%esp)
|
||||
|
||||
scrypt_core_macro1a 0, 64
|
||||
scrypt_core_macro1a 4, 68
|
||||
scrypt_core_macro1a 8, 72
|
||||
scrypt_core_macro1a 12, 76
|
||||
scrypt_core_macro1a 16, 80
|
||||
scrypt_core_macro1a 20, 84
|
||||
scrypt_core_macro1a 24, 88
|
||||
scrypt_core_macro1a 28, 92
|
||||
scrypt_core_macro1a 32, 96
|
||||
scrypt_core_macro1a 36, 100
|
||||
scrypt_core_macro1a 40, 104
|
||||
scrypt_core_macro1a 44, 108
|
||||
scrypt_core_macro1a 48, 112
|
||||
scrypt_core_macro1a 52, 116
|
||||
scrypt_core_macro1a 56, 120
|
||||
scrypt_core_macro1a 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
scrypt_core_macro2 0, 64
|
||||
scrypt_core_macro2 4, 68
|
||||
scrypt_core_macro2 8, 72
|
||||
scrypt_core_macro2 12, 76
|
||||
scrypt_core_macro2 16, 80
|
||||
scrypt_core_macro2 20, 84
|
||||
scrypt_core_macro2 24, 88
|
||||
scrypt_core_macro2 28, 92
|
||||
scrypt_core_macro2 32, 96
|
||||
scrypt_core_macro2 36, 100
|
||||
scrypt_core_macro2 40, 104
|
||||
scrypt_core_macro2 44, 108
|
||||
scrypt_core_macro2 48, 112
|
||||
scrypt_core_macro2 52, 116
|
||||
scrypt_core_macro2 56, 120
|
||||
scrypt_core_macro2 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
scrypt_core_macro3 0, 64
|
||||
scrypt_core_macro3 4, 68
|
||||
scrypt_core_macro3 8, 72
|
||||
scrypt_core_macro3 12, 76
|
||||
scrypt_core_macro3 16, 80
|
||||
scrypt_core_macro3 20, 84
|
||||
scrypt_core_macro3 24, 88
|
||||
scrypt_core_macro3 28, 92
|
||||
scrypt_core_macro3 32, 96
|
||||
scrypt_core_macro3 36, 100
|
||||
scrypt_core_macro3 40, 104
|
||||
scrypt_core_macro3 44, 108
|
||||
scrypt_core_macro3 48, 112
|
||||
scrypt_core_macro3 52, 116
|
||||
scrypt_core_macro3 56, 120
|
||||
scrypt_core_macro3 60, 124
|
||||
|
||||
movl 64(%esp), %esi
|
||||
movl 68(%esp), %ecx
|
||||
addl $128, %esi
|
||||
cmpl %ecx, %esi
|
||||
jne scrypt_core_gen_loop1
|
||||
|
||||
movl 96(%esp), %esi
|
||||
movl $1024, %ecx
|
||||
scrypt_core_gen_loop2:
|
||||
movl %ecx, 68(%esp)
|
||||
|
||||
movl 64(%edi), %edx
|
||||
andl $1023, %edx
|
||||
shll $7, %edx
|
||||
|
||||
scrypt_core_macro1b 0, 64
|
||||
scrypt_core_macro1b 4, 68
|
||||
scrypt_core_macro1b 8, 72
|
||||
scrypt_core_macro1b 12, 76
|
||||
scrypt_core_macro1b 16, 80
|
||||
scrypt_core_macro1b 20, 84
|
||||
scrypt_core_macro1b 24, 88
|
||||
scrypt_core_macro1b 28, 92
|
||||
scrypt_core_macro1b 32, 96
|
||||
scrypt_core_macro1b 36, 100
|
||||
scrypt_core_macro1b 40, 104
|
||||
scrypt_core_macro1b 44, 108
|
||||
scrypt_core_macro1b 48, 112
|
||||
scrypt_core_macro1b 52, 116
|
||||
scrypt_core_macro1b 56, 120
|
||||
scrypt_core_macro1b 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
scrypt_core_macro2 0, 64
|
||||
scrypt_core_macro2 4, 68
|
||||
scrypt_core_macro2 8, 72
|
||||
scrypt_core_macro2 12, 76
|
||||
scrypt_core_macro2 16, 80
|
||||
scrypt_core_macro2 20, 84
|
||||
scrypt_core_macro2 24, 88
|
||||
scrypt_core_macro2 28, 92
|
||||
scrypt_core_macro2 32, 96
|
||||
scrypt_core_macro2 36, 100
|
||||
scrypt_core_macro2 40, 104
|
||||
scrypt_core_macro2 44, 108
|
||||
scrypt_core_macro2 48, 112
|
||||
scrypt_core_macro2 52, 116
|
||||
scrypt_core_macro2 56, 120
|
||||
scrypt_core_macro2 60, 124
|
||||
|
||||
call salsa8_core_gen
|
||||
|
||||
movl 92(%esp), %edi
|
||||
movl 96(%esp), %esi
|
||||
scrypt_core_macro3 0, 64
|
||||
scrypt_core_macro3 4, 68
|
||||
scrypt_core_macro3 8, 72
|
||||
scrypt_core_macro3 12, 76
|
||||
scrypt_core_macro3 16, 80
|
||||
scrypt_core_macro3 20, 84
|
||||
scrypt_core_macro3 24, 88
|
||||
scrypt_core_macro3 28, 92
|
||||
scrypt_core_macro3 32, 96
|
||||
scrypt_core_macro3 36, 100
|
||||
scrypt_core_macro3 40, 104
|
||||
scrypt_core_macro3 44, 108
|
||||
scrypt_core_macro3 48, 112
|
||||
scrypt_core_macro3 52, 116
|
||||
scrypt_core_macro3 56, 120
|
||||
scrypt_core_macro3 60, 124
|
||||
|
||||
movl 68(%esp), %ecx
|
||||
subl $1, %ecx
|
||||
ja scrypt_core_gen_loop2
|
||||
|
||||
addl $72, %esp
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebp
|
||||
popl %ebx
|
||||
ret
|
||||
|
||||
|
||||
.macro salsa8_core_sse2_doubleround
|
||||
movdqa %xmm1, %xmm4
|
||||
paddd %xmm0, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $7, %xmm4
|
||||
psrld $25, %xmm5
|
||||
pxor %xmm4, %xmm3
|
||||
movdqa %xmm0, %xmm4
|
||||
pxor %xmm5, %xmm3
|
||||
|
||||
paddd %xmm3, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $9, %xmm4
|
||||
psrld $23, %xmm5
|
||||
pxor %xmm4, %xmm2
|
||||
movdqa %xmm3, %xmm4
|
||||
pxor %xmm5, %xmm2
|
||||
pshufd $0x93, %xmm3, %xmm3
|
||||
|
||||
paddd %xmm2, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $13, %xmm4
|
||||
psrld $19, %xmm5
|
||||
pxor %xmm4, %xmm1
|
||||
movdqa %xmm2, %xmm4
|
||||
pxor %xmm5, %xmm1
|
||||
pshufd $0x4e, %xmm2, %xmm2
|
||||
|
||||
paddd %xmm1, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $18, %xmm4
|
||||
psrld $14, %xmm5
|
||||
pxor %xmm4, %xmm0
|
||||
movdqa %xmm3, %xmm4
|
||||
pxor %xmm5, %xmm0
|
||||
pshufd $0x39, %xmm1, %xmm1
|
||||
|
||||
paddd %xmm0, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $7, %xmm4
|
||||
psrld $25, %xmm5
|
||||
pxor %xmm4, %xmm1
|
||||
movdqa %xmm0, %xmm4
|
||||
pxor %xmm5, %xmm1
|
||||
|
||||
paddd %xmm1, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $9, %xmm4
|
||||
psrld $23, %xmm5
|
||||
pxor %xmm4, %xmm2
|
||||
movdqa %xmm1, %xmm4
|
||||
pxor %xmm5, %xmm2
|
||||
pshufd $0x93, %xmm1, %xmm1
|
||||
|
||||
paddd %xmm2, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $13, %xmm4
|
||||
psrld $19, %xmm5
|
||||
pxor %xmm4, %xmm3
|
||||
movdqa %xmm2, %xmm4
|
||||
pxor %xmm5, %xmm3
|
||||
pshufd $0x4e, %xmm2, %xmm2
|
||||
|
||||
paddd %xmm3, %xmm4
|
||||
movdqa %xmm4, %xmm5
|
||||
pslld $18, %xmm4
|
||||
psrld $14, %xmm5
|
||||
pxor %xmm4, %xmm0
|
||||
pshufd $0x39, %xmm3, %xmm3
|
||||
pxor %xmm5, %xmm0
|
||||
.endm
|
||||
|
||||
.macro salsa8_core_sse2
|
||||
salsa8_core_sse2_doubleround
|
||||
salsa8_core_sse2_doubleround
|
||||
salsa8_core_sse2_doubleround
|
||||
salsa8_core_sse2_doubleround
|
||||
.endm
|
||||
|
||||
.p2align 5
|
||||
scrypt_core_sse2:
|
||||
movl 20(%esp), %edi
|
||||
movl 24(%esp), %esi
|
||||
movl %esp, %ebp
|
||||
subl $128, %esp
|
||||
andl $-16, %esp
|
||||
|
||||
scrypt_shuffle %edi, 0, %esp, 0
|
||||
scrypt_shuffle %edi, 64, %esp, 64
|
||||
|
||||
movdqa 96(%esp), %xmm6
|
||||
movdqa 112(%esp), %xmm7
|
||||
|
||||
movl %esi, %edx
|
||||
leal 131072(%esi), %ecx
|
||||
scrypt_core_sse2_loop1:
|
||||
movdqa 0(%esp), %xmm0
|
||||
movdqa 16(%esp), %xmm1
|
||||
movdqa 32(%esp), %xmm2
|
||||
movdqa 48(%esp), %xmm3
|
||||
movdqa 64(%esp), %xmm4
|
||||
movdqa 80(%esp), %xmm5
|
||||
pxor %xmm4, %xmm0
|
||||
pxor %xmm5, %xmm1
|
||||
movdqa %xmm0, 0(%edx)
|
||||
movdqa %xmm1, 16(%edx)
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm2, 32(%edx)
|
||||
movdqa %xmm3, 48(%edx)
|
||||
movdqa %xmm4, 64(%edx)
|
||||
movdqa %xmm5, 80(%edx)
|
||||
movdqa %xmm6, 96(%edx)
|
||||
movdqa %xmm7, 112(%edx)
|
||||
|
||||
salsa8_core_sse2
|
||||
paddd 0(%edx), %xmm0
|
||||
paddd 16(%edx), %xmm1
|
||||
paddd 32(%edx), %xmm2
|
||||
paddd 48(%edx), %xmm3
|
||||
movdqa %xmm0, 0(%esp)
|
||||
movdqa %xmm1, 16(%esp)
|
||||
movdqa %xmm2, 32(%esp)
|
||||
movdqa %xmm3, 48(%esp)
|
||||
|
||||
pxor 64(%esp), %xmm0
|
||||
pxor 80(%esp), %xmm1
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
movdqa %xmm2, %xmm6
|
||||
movdqa %xmm3, %xmm7
|
||||
salsa8_core_sse2
|
||||
paddd 64(%esp), %xmm0
|
||||
paddd 80(%esp), %xmm1
|
||||
paddd %xmm2, %xmm6
|
||||
paddd %xmm3, %xmm7
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
|
||||
addl $128, %edx
|
||||
cmpl %ecx, %edx
|
||||
jne scrypt_core_sse2_loop1
|
||||
|
||||
movdqa 64(%esp), %xmm4
|
||||
movdqa 80(%esp), %xmm5
|
||||
|
||||
movl $1024, %ecx
|
||||
scrypt_core_sse2_loop2:
|
||||
movd %xmm4, %edx
|
||||
movdqa 0(%esp), %xmm0
|
||||
movdqa 16(%esp), %xmm1
|
||||
movdqa 32(%esp), %xmm2
|
||||
movdqa 48(%esp), %xmm3
|
||||
andl $1023, %edx
|
||||
shll $7, %edx
|
||||
pxor 0(%esi, %edx), %xmm0
|
||||
pxor 16(%esi, %edx), %xmm1
|
||||
pxor 32(%esi, %edx), %xmm2
|
||||
pxor 48(%esi, %edx), %xmm3
|
||||
|
||||
pxor %xmm4, %xmm0
|
||||
pxor %xmm5, %xmm1
|
||||
movdqa %xmm0, 0(%esp)
|
||||
movdqa %xmm1, 16(%esp)
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm2, 32(%esp)
|
||||
movdqa %xmm3, 48(%esp)
|
||||
salsa8_core_sse2
|
||||
paddd 0(%esp), %xmm0
|
||||
paddd 16(%esp), %xmm1
|
||||
paddd 32(%esp), %xmm2
|
||||
paddd 48(%esp), %xmm3
|
||||
movdqa %xmm0, 0(%esp)
|
||||
movdqa %xmm1, 16(%esp)
|
||||
movdqa %xmm2, 32(%esp)
|
||||
movdqa %xmm3, 48(%esp)
|
||||
|
||||
pxor 64(%esi, %edx), %xmm0
|
||||
pxor 80(%esi, %edx), %xmm1
|
||||
pxor 96(%esi, %edx), %xmm2
|
||||
pxor 112(%esi, %edx), %xmm3
|
||||
pxor 64(%esp), %xmm0
|
||||
pxor 80(%esp), %xmm1
|
||||
pxor %xmm6, %xmm2
|
||||
pxor %xmm7, %xmm3
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
movdqa %xmm2, %xmm6
|
||||
movdqa %xmm3, %xmm7
|
||||
salsa8_core_sse2
|
||||
paddd 64(%esp), %xmm0
|
||||
paddd 80(%esp), %xmm1
|
||||
paddd %xmm2, %xmm6
|
||||
paddd %xmm3, %xmm7
|
||||
movdqa %xmm0, %xmm4
|
||||
movdqa %xmm1, %xmm5
|
||||
movdqa %xmm0, 64(%esp)
|
||||
movdqa %xmm1, 80(%esp)
|
||||
|
||||
subl $1, %ecx
|
||||
ja scrypt_core_sse2_loop2
|
||||
|
||||
movdqa %xmm6, 96(%esp)
|
||||
movdqa %xmm7, 112(%esp)
|
||||
|
||||
scrypt_shuffle %esp, 0, %edi, 0
|
||||
scrypt_shuffle %esp, 64, %edi, 64
|
||||
|
||||
movl %ebp, %esp
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebp
|
||||
popl %ebx
|
||||
ret
|
||||
|
||||
#endif
|
||||
767
algo/x2.hide/scrypt.c
Normal file
767
algo/x2.hide/scrypt.c
Normal file
@@ -0,0 +1,767 @@
|
||||
/*
|
||||
* Copyright 2009 Colin Percival, 2011 ArtForz, 2011-2013 pooler
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* This file was originally written by Colin Percival as part of the Tarsnap
|
||||
* online backup system.
|
||||
*/
|
||||
|
||||
#include "../cpuminer-config.h"
|
||||
#include "../miner.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
static const uint32_t keypad[12] = {
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000280
|
||||
};
|
||||
static const uint32_t innerpad[11] = {
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x000004a0
|
||||
};
|
||||
static const uint32_t outerpad[8] = {
|
||||
0x80000000, 0, 0, 0, 0, 0, 0, 0x00000300
|
||||
};
|
||||
static const uint32_t finalblk[16] = {
|
||||
0x00000001, 0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000620
|
||||
};
|
||||
|
||||
static inline void HMAC_SHA256_80_init(const uint32_t *key,
|
||||
uint32_t *tstate, uint32_t *ostate)
|
||||
{
|
||||
uint32_t ihash[8];
|
||||
uint32_t pad[16];
|
||||
int i;
|
||||
|
||||
/* tstate is assumed to contain the midstate of key */
|
||||
memcpy(pad, key + 16, 16);
|
||||
memcpy(pad + 4, keypad, 48);
|
||||
sha256_transform(tstate, pad, 0);
|
||||
memcpy(ihash, tstate, 32);
|
||||
|
||||
sha256_init(ostate);
|
||||
for (i = 0; i < 8; i++)
|
||||
pad[i] = ihash[i] ^ 0x5c5c5c5c;
|
||||
for (; i < 16; i++)
|
||||
pad[i] = 0x5c5c5c5c;
|
||||
sha256_transform(ostate, pad, 0);
|
||||
|
||||
sha256_init(tstate);
|
||||
for (i = 0; i < 8; i++)
|
||||
pad[i] = ihash[i] ^ 0x36363636;
|
||||
for (; i < 16; i++)
|
||||
pad[i] = 0x36363636;
|
||||
sha256_transform(tstate, pad, 0);
|
||||
}
|
||||
|
||||
static inline void PBKDF2_SHA256_80_128(const uint32_t *tstate,
|
||||
const uint32_t *ostate, const uint32_t *salt, uint32_t *output)
|
||||
{
|
||||
uint32_t istate[8], ostate2[8];
|
||||
uint32_t ibuf[16], obuf[16];
|
||||
int i, j;
|
||||
|
||||
memcpy(istate, tstate, 32);
|
||||
sha256_transform(istate, salt, 0);
|
||||
|
||||
memcpy(ibuf, salt + 16, 16);
|
||||
memcpy(ibuf + 5, innerpad, 44);
|
||||
memcpy(obuf + 8, outerpad, 32);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
memcpy(obuf, istate, 32);
|
||||
ibuf[4] = i + 1;
|
||||
sha256_transform(obuf, ibuf, 0);
|
||||
|
||||
memcpy(ostate2, ostate, 32);
|
||||
sha256_transform(ostate2, obuf, 0);
|
||||
for (j = 0; j < 8; j++)
|
||||
output[8 * i + j] = swab32(ostate2[j]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void PBKDF2_SHA256_128_32(uint32_t *tstate, uint32_t *ostate,
|
||||
const uint32_t *salt, uint32_t *output)
|
||||
{
|
||||
uint32_t buf[16];
|
||||
int i;
|
||||
|
||||
sha256_transform(tstate, salt, 1);
|
||||
sha256_transform(tstate, salt + 16, 1);
|
||||
sha256_transform(tstate, finalblk, 0);
|
||||
memcpy(buf, tstate, 32);
|
||||
memcpy(buf + 8, outerpad, 32);
|
||||
|
||||
sha256_transform(ostate, buf, 0);
|
||||
for (i = 0; i < 8; i++)
|
||||
output[i] = swab32(ostate[i]);
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
|
||||
static const uint32_t keypad_4way[4 * 12] = {
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000280, 0x00000280, 0x00000280, 0x00000280
|
||||
};
|
||||
static const uint32_t innerpad_4way[4 * 11] = {
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x000004a0, 0x000004a0, 0x000004a0, 0x000004a0
|
||||
};
|
||||
static const uint32_t outerpad_4way[4 * 8] = {
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000300, 0x00000300, 0x00000300, 0x00000300
|
||||
};
|
||||
static const uint32_t finalblk_4way[4 * 16] __attribute__((aligned(16))) = {
|
||||
0x00000001, 0x00000001, 0x00000001, 0x00000001,
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000620, 0x00000620, 0x00000620, 0x00000620
|
||||
};
|
||||
|
||||
static inline void HMAC_SHA256_80_init_4way(const uint32_t *key,
|
||||
uint32_t *tstate, uint32_t *ostate)
|
||||
{
|
||||
uint32_t ihash[4 * 8] __attribute__((aligned(16)));
|
||||
uint32_t pad[4 * 16] __attribute__((aligned(16)));
|
||||
int i;
|
||||
|
||||
/* tstate is assumed to contain the midstate of key */
|
||||
memcpy(pad, key + 4 * 16, 4 * 16);
|
||||
memcpy(pad + 4 * 4, keypad_4way, 4 * 48);
|
||||
sha256_transform_4way(tstate, pad, 0);
|
||||
memcpy(ihash, tstate, 4 * 32);
|
||||
|
||||
sha256_init_4way(ostate);
|
||||
for (i = 0; i < 4 * 8; i++)
|
||||
pad[i] = ihash[i] ^ 0x5c5c5c5c;
|
||||
for (; i < 4 * 16; i++)
|
||||
pad[i] = 0x5c5c5c5c;
|
||||
sha256_transform_4way(ostate, pad, 0);
|
||||
|
||||
sha256_init_4way(tstate);
|
||||
for (i = 0; i < 4 * 8; i++)
|
||||
pad[i] = ihash[i] ^ 0x36363636;
|
||||
for (; i < 4 * 16; i++)
|
||||
pad[i] = 0x36363636;
|
||||
sha256_transform_4way(tstate, pad, 0);
|
||||
}
|
||||
|
||||
static inline void PBKDF2_SHA256_80_128_4way(const uint32_t *tstate,
|
||||
const uint32_t *ostate, const uint32_t *salt, uint32_t *output)
|
||||
{
|
||||
uint32_t istate[4 * 8] __attribute__((aligned(16)));
|
||||
uint32_t ostate2[4 * 8] __attribute__((aligned(16)));
|
||||
uint32_t ibuf[4 * 16] __attribute__((aligned(16)));
|
||||
uint32_t obuf[4 * 16] __attribute__((aligned(16)));
|
||||
int i, j;
|
||||
|
||||
memcpy(istate, tstate, 4 * 32);
|
||||
sha256_transform_4way(istate, salt, 0);
|
||||
|
||||
memcpy(ibuf, salt + 4 * 16, 4 * 16);
|
||||
memcpy(ibuf + 4 * 5, innerpad_4way, 4 * 44);
|
||||
memcpy(obuf + 4 * 8, outerpad_4way, 4 * 32);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
memcpy(obuf, istate, 4 * 32);
|
||||
ibuf[4 * 4 + 0] = i + 1;
|
||||
ibuf[4 * 4 + 1] = i + 1;
|
||||
ibuf[4 * 4 + 2] = i + 1;
|
||||
ibuf[4 * 4 + 3] = i + 1;
|
||||
sha256_transform_4way(obuf, ibuf, 0);
|
||||
|
||||
memcpy(ostate2, ostate, 4 * 32);
|
||||
sha256_transform_4way(ostate2, obuf, 0);
|
||||
for (j = 0; j < 4 * 8; j++)
|
||||
output[4 * 8 * i + j] = swab32(ostate2[j]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void PBKDF2_SHA256_128_32_4way(uint32_t *tstate,
|
||||
uint32_t *ostate, const uint32_t *salt, uint32_t *output)
|
||||
{
|
||||
uint32_t buf[4 * 16] __attribute__((aligned(16)));
|
||||
int i;
|
||||
|
||||
sha256_transform_4way(tstate, salt, 1);
|
||||
sha256_transform_4way(tstate, salt + 4 * 16, 1);
|
||||
sha256_transform_4way(tstate, finalblk_4way, 0);
|
||||
memcpy(buf, tstate, 4 * 32);
|
||||
memcpy(buf + 4 * 8, outerpad_4way, 4 * 32);
|
||||
|
||||
sha256_transform_4way(ostate, buf, 0);
|
||||
for (i = 0; i < 4 * 8; i++)
|
||||
output[i] = swab32(ostate[i]);
|
||||
}
|
||||
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
|
||||
#ifdef HAVE_SHA256_8WAY
|
||||
|
||||
static const uint32_t finalblk_8way[8 * 16] __attribute__((aligned(32))) = {
|
||||
0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001,
|
||||
0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000620, 0x00000620, 0x00000620, 0x00000620, 0x00000620, 0x00000620, 0x00000620, 0x00000620
|
||||
};
|
||||
|
||||
static inline void HMAC_SHA256_80_init_8way(const uint32_t *key,
|
||||
uint32_t *tstate, uint32_t *ostate)
|
||||
{
|
||||
uint32_t ihash[8 * 8] __attribute__((aligned(32)));
|
||||
uint32_t pad[8 * 16] __attribute__((aligned(32)));
|
||||
int i;
|
||||
|
||||
/* tstate is assumed to contain the midstate of key */
|
||||
memcpy(pad, key + 8 * 16, 8 * 16);
|
||||
for (i = 0; i < 8; i++)
|
||||
pad[8 * 4 + i] = 0x80000000;
|
||||
memset(pad + 8 * 5, 0x00, 8 * 40);
|
||||
for (i = 0; i < 8; i++)
|
||||
pad[8 * 15 + i] = 0x00000280;
|
||||
sha256_transform_8way(tstate, pad, 0);
|
||||
memcpy(ihash, tstate, 8 * 32);
|
||||
|
||||
sha256_init_8way(ostate);
|
||||
for (i = 0; i < 8 * 8; i++)
|
||||
pad[i] = ihash[i] ^ 0x5c5c5c5c;
|
||||
for (; i < 8 * 16; i++)
|
||||
pad[i] = 0x5c5c5c5c;
|
||||
sha256_transform_8way(ostate, pad, 0);
|
||||
|
||||
sha256_init_8way(tstate);
|
||||
for (i = 0; i < 8 * 8; i++)
|
||||
pad[i] = ihash[i] ^ 0x36363636;
|
||||
for (; i < 8 * 16; i++)
|
||||
pad[i] = 0x36363636;
|
||||
sha256_transform_8way(tstate, pad, 0);
|
||||
}
|
||||
|
||||
static inline void PBKDF2_SHA256_80_128_8way(const uint32_t *tstate,
|
||||
const uint32_t *ostate, const uint32_t *salt, uint32_t *output)
|
||||
{
|
||||
uint32_t istate[8 * 8] __attribute__((aligned(32)));
|
||||
uint32_t ostate2[8 * 8] __attribute__((aligned(32)));
|
||||
uint32_t ibuf[8 * 16] __attribute__((aligned(32)));
|
||||
uint32_t obuf[8 * 16] __attribute__((aligned(32)));
|
||||
int i, j;
|
||||
|
||||
memcpy(istate, tstate, 8 * 32);
|
||||
sha256_transform_8way(istate, salt, 0);
|
||||
|
||||
memcpy(ibuf, salt + 8 * 16, 8 * 16);
|
||||
for (i = 0; i < 8; i++)
|
||||
ibuf[8 * 5 + i] = 0x80000000;
|
||||
memset(ibuf + 8 * 6, 0x00, 8 * 36);
|
||||
for (i = 0; i < 8; i++)
|
||||
ibuf[8 * 15 + i] = 0x000004a0;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
obuf[8 * 8 + i] = 0x80000000;
|
||||
memset(obuf + 8 * 9, 0x00, 8 * 24);
|
||||
for (i = 0; i < 8; i++)
|
||||
obuf[8 * 15 + i] = 0x00000300;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
memcpy(obuf, istate, 8 * 32);
|
||||
ibuf[8 * 4 + 0] = i + 1;
|
||||
ibuf[8 * 4 + 1] = i + 1;
|
||||
ibuf[8 * 4 + 2] = i + 1;
|
||||
ibuf[8 * 4 + 3] = i + 1;
|
||||
ibuf[8 * 4 + 4] = i + 1;
|
||||
ibuf[8 * 4 + 5] = i + 1;
|
||||
ibuf[8 * 4 + 6] = i + 1;
|
||||
ibuf[8 * 4 + 7] = i + 1;
|
||||
sha256_transform_8way(obuf, ibuf, 0);
|
||||
|
||||
memcpy(ostate2, ostate, 8 * 32);
|
||||
sha256_transform_8way(ostate2, obuf, 0);
|
||||
for (j = 0; j < 8 * 8; j++)
|
||||
output[8 * 8 * i + j] = swab32(ostate2[j]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void PBKDF2_SHA256_128_32_8way(uint32_t *tstate,
|
||||
uint32_t *ostate, const uint32_t *salt, uint32_t *output)
|
||||
{
|
||||
uint32_t buf[8 * 16] __attribute__((aligned(32)));
|
||||
int i;
|
||||
|
||||
sha256_transform_8way(tstate, salt, 1);
|
||||
sha256_transform_8way(tstate, salt + 8 * 16, 1);
|
||||
sha256_transform_8way(tstate, finalblk_8way, 0);
|
||||
|
||||
memcpy(buf, tstate, 8 * 32);
|
||||
for (i = 0; i < 8; i++)
|
||||
buf[8 * 8 + i] = 0x80000000;
|
||||
memset(buf + 8 * 9, 0x00, 8 * 24);
|
||||
for (i = 0; i < 8; i++)
|
||||
buf[8 * 15 + i] = 0x00000300;
|
||||
sha256_transform_8way(ostate, buf, 0);
|
||||
|
||||
for (i = 0; i < 8 * 8; i++)
|
||||
output[i] = swab32(ostate[i]);
|
||||
}
|
||||
|
||||
#endif /* HAVE_SHA256_8WAY */
|
||||
|
||||
|
||||
#if defined(__x86_64__)
|
||||
|
||||
#define SCRYPT_MAX_WAYS 12
|
||||
#define HAVE_SCRYPT_3WAY 1
|
||||
int scrypt_best_throughput();
|
||||
void scrypt_core(uint32_t *X, uint32_t *V);
|
||||
void scrypt_core_3way(uint32_t *X, uint32_t *V);
|
||||
#if defined(USE_AVX2)
|
||||
#undef SCRYPT_MAX_WAYS
|
||||
#define SCRYPT_MAX_WAYS 24
|
||||
#define HAVE_SCRYPT_6WAY 1
|
||||
void scrypt_core_6way(uint32_t *X, uint32_t *V);
|
||||
#endif
|
||||
|
||||
#elif defined(__i386__)
|
||||
|
||||
#define SCRYPT_MAX_WAYS 4
|
||||
#define scrypt_best_throughput() 1
|
||||
void scrypt_core(uint32_t *X, uint32_t *V);
|
||||
|
||||
#elif defined(__arm__) && defined(__APCS_32__)
|
||||
|
||||
void scrypt_core(uint32_t *X, uint32_t *V);
|
||||
#if defined(__ARM_NEON__)
|
||||
#undef HAVE_SHA256_4WAY
|
||||
#define SCRYPT_MAX_WAYS 3
|
||||
#define HAVE_SCRYPT_3WAY 1
|
||||
#define scrypt_best_throughput() 3
|
||||
void scrypt_core_3way(uint32_t *X, uint32_t *V);
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
static inline void xor_salsa8(uint32_t B[16], const uint32_t Bx[16])
|
||||
{
|
||||
uint32_t x00,x01,x02,x03,x04,x05,x06,x07,x08,x09,x10,x11,x12,x13,x14,x15;
|
||||
int i;
|
||||
|
||||
x00 = (B[ 0] ^= Bx[ 0]);
|
||||
x01 = (B[ 1] ^= Bx[ 1]);
|
||||
x02 = (B[ 2] ^= Bx[ 2]);
|
||||
x03 = (B[ 3] ^= Bx[ 3]);
|
||||
x04 = (B[ 4] ^= Bx[ 4]);
|
||||
x05 = (B[ 5] ^= Bx[ 5]);
|
||||
x06 = (B[ 6] ^= Bx[ 6]);
|
||||
x07 = (B[ 7] ^= Bx[ 7]);
|
||||
x08 = (B[ 8] ^= Bx[ 8]);
|
||||
x09 = (B[ 9] ^= Bx[ 9]);
|
||||
x10 = (B[10] ^= Bx[10]);
|
||||
x11 = (B[11] ^= Bx[11]);
|
||||
x12 = (B[12] ^= Bx[12]);
|
||||
x13 = (B[13] ^= Bx[13]);
|
||||
x14 = (B[14] ^= Bx[14]);
|
||||
x15 = (B[15] ^= Bx[15]);
|
||||
for (i = 0; i < 8; i += 2) {
|
||||
#define R(a, b) (((a) << (b)) | ((a) >> (32 - (b))))
|
||||
/* Operate on columns. */
|
||||
x04 ^= R(x00+x12, 7); x09 ^= R(x05+x01, 7);
|
||||
x14 ^= R(x10+x06, 7); x03 ^= R(x15+x11, 7);
|
||||
|
||||
x08 ^= R(x04+x00, 9); x13 ^= R(x09+x05, 9);
|
||||
x02 ^= R(x14+x10, 9); x07 ^= R(x03+x15, 9);
|
||||
|
||||
x12 ^= R(x08+x04,13); x01 ^= R(x13+x09,13);
|
||||
x06 ^= R(x02+x14,13); x11 ^= R(x07+x03,13);
|
||||
|
||||
x00 ^= R(x12+x08,18); x05 ^= R(x01+x13,18);
|
||||
x10 ^= R(x06+x02,18); x15 ^= R(x11+x07,18);
|
||||
|
||||
/* Operate on rows. */
|
||||
x01 ^= R(x00+x03, 7); x06 ^= R(x05+x04, 7);
|
||||
x11 ^= R(x10+x09, 7); x12 ^= R(x15+x14, 7);
|
||||
|
||||
x02 ^= R(x01+x00, 9); x07 ^= R(x06+x05, 9);
|
||||
x08 ^= R(x11+x10, 9); x13 ^= R(x12+x15, 9);
|
||||
|
||||
x03 ^= R(x02+x01,13); x04 ^= R(x07+x06,13);
|
||||
x09 ^= R(x08+x11,13); x14 ^= R(x13+x12,13);
|
||||
|
||||
x00 ^= R(x03+x02,18); x05 ^= R(x04+x07,18);
|
||||
x10 ^= R(x09+x08,18); x15 ^= R(x14+x13,18);
|
||||
#undef R
|
||||
}
|
||||
B[ 0] += x00;
|
||||
B[ 1] += x01;
|
||||
B[ 2] += x02;
|
||||
B[ 3] += x03;
|
||||
B[ 4] += x04;
|
||||
B[ 5] += x05;
|
||||
B[ 6] += x06;
|
||||
B[ 7] += x07;
|
||||
B[ 8] += x08;
|
||||
B[ 9] += x09;
|
||||
B[10] += x10;
|
||||
B[11] += x11;
|
||||
B[12] += x12;
|
||||
B[13] += x13;
|
||||
B[14] += x14;
|
||||
B[15] += x15;
|
||||
}
|
||||
|
||||
static inline void scrypt_core(uint32_t *X, uint32_t *V)
|
||||
{
|
||||
uint32_t i, j, k;
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
memcpy(&V[i * 32], X, 128);
|
||||
xor_salsa8(&X[0], &X[16]);
|
||||
xor_salsa8(&X[16], &X[0]);
|
||||
}
|
||||
for (i = 0; i < 1024; i++) {
|
||||
j = 32 * (X[16] & 1023);
|
||||
for (k = 0; k < 32; k++)
|
||||
X[k] ^= V[j + k];
|
||||
xor_salsa8(&X[0], &X[16]);
|
||||
xor_salsa8(&X[16], &X[0]);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef SCRYPT_MAX_WAYS
|
||||
#define SCRYPT_MAX_WAYS 1
|
||||
#define scrypt_best_throughput() 1
|
||||
#endif
|
||||
|
||||
#define SCRYPT_BUFFER_SIZE (SCRYPT_MAX_WAYS * 131072 + 63)
|
||||
|
||||
unsigned char *scrypt_buffer_alloc()
|
||||
{
|
||||
return malloc(SCRYPT_BUFFER_SIZE);
|
||||
}
|
||||
|
||||
static void scrypt_1024_1_1_256(const uint32_t *input, uint32_t *output,
|
||||
uint32_t *midstate, unsigned char *scratchpad)
|
||||
{
|
||||
uint32_t tstate[8], ostate[8];
|
||||
uint32_t X[32];
|
||||
uint32_t *V;
|
||||
|
||||
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
|
||||
|
||||
memcpy(tstate, midstate, 32);
|
||||
HMAC_SHA256_80_init(input, tstate, ostate);
|
||||
PBKDF2_SHA256_80_128(tstate, ostate, input, X);
|
||||
|
||||
scrypt_core(X, V);
|
||||
|
||||
PBKDF2_SHA256_128_32(tstate, ostate, X, output);
|
||||
}
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
static void scrypt_1024_1_1_256_4way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad)
|
||||
{
|
||||
uint32_t tstate[4 * 8] __attribute__((aligned(128)));
|
||||
uint32_t ostate[4 * 8] __attribute__((aligned(128)));
|
||||
uint32_t W[4 * 32] __attribute__((aligned(128)));
|
||||
uint32_t X[4 * 32] __attribute__((aligned(128)));
|
||||
uint32_t *V;
|
||||
int i, k;
|
||||
|
||||
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[4 * i + k] = input[k * 20 + i];
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
tstate[4 * i + k] = midstate[i];
|
||||
HMAC_SHA256_80_init_4way(W, tstate, ostate);
|
||||
PBKDF2_SHA256_80_128_4way(tstate, ostate, W, W);
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
X[k * 32 + i] = W[4 * i + k];
|
||||
scrypt_core(X + 0 * 32, V);
|
||||
scrypt_core(X + 1 * 32, V);
|
||||
scrypt_core(X + 2 * 32, V);
|
||||
scrypt_core(X + 3 * 32, V);
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[4 * i + k] = X[k * 32 + i];
|
||||
PBKDF2_SHA256_128_32_4way(tstate, ostate, W, W);
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
output[k * 8 + i] = W[4 * i + k];
|
||||
}
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#ifdef HAVE_SCRYPT_3WAY
|
||||
|
||||
static void scrypt_1024_1_1_256_3way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad)
|
||||
{
|
||||
uint32_t tstate[3 * 8], ostate[3 * 8];
|
||||
uint32_t X[3 * 32] __attribute__((aligned(64)));
|
||||
uint32_t *V;
|
||||
|
||||
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
|
||||
|
||||
memcpy(tstate + 0, midstate, 32);
|
||||
memcpy(tstate + 8, midstate, 32);
|
||||
memcpy(tstate + 16, midstate, 32);
|
||||
HMAC_SHA256_80_init(input + 0, tstate + 0, ostate + 0);
|
||||
HMAC_SHA256_80_init(input + 20, tstate + 8, ostate + 8);
|
||||
HMAC_SHA256_80_init(input + 40, tstate + 16, ostate + 16);
|
||||
PBKDF2_SHA256_80_128(tstate + 0, ostate + 0, input + 0, X + 0);
|
||||
PBKDF2_SHA256_80_128(tstate + 8, ostate + 8, input + 20, X + 32);
|
||||
PBKDF2_SHA256_80_128(tstate + 16, ostate + 16, input + 40, X + 64);
|
||||
|
||||
scrypt_core_3way(X, V);
|
||||
|
||||
PBKDF2_SHA256_128_32(tstate + 0, ostate + 0, X + 0, output + 0);
|
||||
PBKDF2_SHA256_128_32(tstate + 8, ostate + 8, X + 32, output + 8);
|
||||
PBKDF2_SHA256_128_32(tstate + 16, ostate + 16, X + 64, output + 16);
|
||||
}
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
static void scrypt_1024_1_1_256_12way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad)
|
||||
{
|
||||
uint32_t tstate[12 * 8] __attribute__((aligned(128)));
|
||||
uint32_t ostate[12 * 8] __attribute__((aligned(128)));
|
||||
uint32_t W[12 * 32] __attribute__((aligned(128)));
|
||||
uint32_t X[12 * 32] __attribute__((aligned(128)));
|
||||
uint32_t *V;
|
||||
int i, j, k;
|
||||
|
||||
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 20; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[128 * j + 4 * i + k] = input[80 * j + k * 20 + i];
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
tstate[32 * j + 4 * i + k] = midstate[i];
|
||||
HMAC_SHA256_80_init_4way(W + 0, tstate + 0, ostate + 0);
|
||||
HMAC_SHA256_80_init_4way(W + 128, tstate + 32, ostate + 32);
|
||||
HMAC_SHA256_80_init_4way(W + 256, tstate + 64, ostate + 64);
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 32, ostate + 32, W + 128, W + 128);
|
||||
PBKDF2_SHA256_80_128_4way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
X[128 * j + k * 32 + i] = W[128 * j + 4 * i + k];
|
||||
scrypt_core_3way(X + 0 * 96, V);
|
||||
scrypt_core_3way(X + 1 * 96, V);
|
||||
scrypt_core_3way(X + 2 * 96, V);
|
||||
scrypt_core_3way(X + 3 * 96, V);
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
W[128 * j + 4 * i + k] = X[128 * j + k * 32 + i];
|
||||
PBKDF2_SHA256_128_32_4way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_128_32_4way(tstate + 32, ostate + 32, W + 128, W + 128);
|
||||
PBKDF2_SHA256_128_32_4way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 4; k++)
|
||||
output[32 * j + k * 8 + i] = W[128 * j + 4 * i + k];
|
||||
}
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#endif /* HAVE_SCRYPT_3WAY */
|
||||
|
||||
#ifdef HAVE_SCRYPT_6WAY
|
||||
static void scrypt_1024_1_1_256_24way(const uint32_t *input,
|
||||
uint32_t *output, uint32_t *midstate, unsigned char *scratchpad)
|
||||
{
|
||||
uint32_t tstate[24 * 8] __attribute__((aligned(128)));
|
||||
uint32_t ostate[24 * 8] __attribute__((aligned(128)));
|
||||
uint32_t W[24 * 32] __attribute__((aligned(128)));
|
||||
uint32_t X[24 * 32] __attribute__((aligned(128)));
|
||||
uint32_t *V;
|
||||
int i, j, k;
|
||||
|
||||
V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63));
|
||||
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 20; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
W[8 * 32 * j + 8 * i + k] = input[8 * 20 * j + k * 20 + i];
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
tstate[8 * 8 * j + 8 * i + k] = midstate[i];
|
||||
HMAC_SHA256_80_init_8way(W + 0, tstate + 0, ostate + 0);
|
||||
HMAC_SHA256_80_init_8way(W + 256, tstate + 64, ostate + 64);
|
||||
HMAC_SHA256_80_init_8way(W + 512, tstate + 128, ostate + 128);
|
||||
PBKDF2_SHA256_80_128_8way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_80_128_8way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
PBKDF2_SHA256_80_128_8way(tstate + 128, ostate + 128, W + 512, W + 512);
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
X[8 * 32 * j + k * 32 + i] = W[8 * 32 * j + 8 * i + k];
|
||||
scrypt_core_6way(X + 0 * 32, V);
|
||||
scrypt_core_6way(X + 6 * 32, V);
|
||||
scrypt_core_6way(X + 12 * 32, V);
|
||||
scrypt_core_6way(X + 18 * 32, V);
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 32; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
W[8 * 32 * j + 8 * i + k] = X[8 * 32 * j + k * 32 + i];
|
||||
PBKDF2_SHA256_128_32_8way(tstate + 0, ostate + 0, W + 0, W + 0);
|
||||
PBKDF2_SHA256_128_32_8way(tstate + 64, ostate + 64, W + 256, W + 256);
|
||||
PBKDF2_SHA256_128_32_8way(tstate + 128, ostate + 128, W + 512, W + 512);
|
||||
for (j = 0; j < 3; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
for (k = 0; k < 8; k++)
|
||||
output[8 * 8 * j + k * 8 + i] = W[8 * 32 * j + 8 * i + k];
|
||||
}
|
||||
#endif /* HAVE_SCRYPT_6WAY */
|
||||
|
||||
int scanhash_scrypt(int thr_id, uint32_t *pdata,
|
||||
unsigned char *scratchbuf, const uint32_t *ptarget,
|
||||
uint32_t max_nonce, unsigned long *hashes_done)
|
||||
{
|
||||
uint32_t data[SCRYPT_MAX_WAYS * 20], hash[SCRYPT_MAX_WAYS * 8];
|
||||
uint32_t midstate[8];
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
int throughput = scrypt_best_throughput();
|
||||
int i;
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
if (sha256_use_4way())
|
||||
throughput *= 4;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < throughput; i++)
|
||||
memcpy(data + i * 20, pdata, 80);
|
||||
|
||||
sha256_init(midstate);
|
||||
sha256_transform(midstate, data, 0);
|
||||
|
||||
do {
|
||||
for (i = 0; i < throughput; i++)
|
||||
data[i * 20 + 19] = ++n;
|
||||
|
||||
#if defined(HAVE_SHA256_4WAY)
|
||||
if (throughput == 4)
|
||||
scrypt_1024_1_1_256_4way(data, hash, midstate, scratchbuf);
|
||||
else
|
||||
#endif
|
||||
#if defined(HAVE_SCRYPT_3WAY) && defined(HAVE_SHA256_4WAY)
|
||||
if (throughput == 12)
|
||||
scrypt_1024_1_1_256_12way(data, hash, midstate, scratchbuf);
|
||||
else
|
||||
#endif
|
||||
#if defined(HAVE_SCRYPT_6WAY)
|
||||
if (throughput == 24)
|
||||
scrypt_1024_1_1_256_24way(data, hash, midstate, scratchbuf);
|
||||
else
|
||||
#endif
|
||||
#if defined(HAVE_SCRYPT_3WAY)
|
||||
if (throughput == 3)
|
||||
scrypt_1024_1_1_256_3way(data, hash, midstate, scratchbuf);
|
||||
else
|
||||
#endif
|
||||
scrypt_1024_1_1_256(data, hash, midstate, scratchbuf);
|
||||
|
||||
for (i = 0; i < throughput; i++) {
|
||||
if (hash[i * 8 + 7] <= Htarg && fulltest(hash + i * 8, ptarget)) {
|
||||
*hashes_done = n - pdata[19] + 1;
|
||||
pdata[19] = data[i * 20 + 19];
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
*hashes_done = n - pdata[19] + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool register_scrypt_algo( algo_gate_t* gate )
|
||||
{
|
||||
gate->scanhash = &scanhash_scrypt;
|
||||
gate->hash = &scrypt_hash;
|
||||
// gate->get_max64 = scrypt_get_max64;
|
||||
return true;
|
||||
};
|
||||
|
||||
1583
algo/x2.hide/sha2-arm.S
Normal file
1583
algo/x2.hide/sha2-arm.S
Normal file
File diff suppressed because it is too large
Load Diff
3661
algo/x2.hide/sha2-x64.S
Normal file
3661
algo/x2.hide/sha2-x64.S
Normal file
File diff suppressed because it is too large
Load Diff
1193
algo/x2.hide/sha2-x86.S
Normal file
1193
algo/x2.hide/sha2-x86.S
Normal file
File diff suppressed because it is too large
Load Diff
630
algo/x2.hide/sha2.c
Normal file
630
algo/x2.hide/sha2.c
Normal file
@@ -0,0 +1,630 @@
|
||||
/*
|
||||
* Copyright 2011 ArtForz
|
||||
* Copyright 2011-2013 pooler
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version. See COPYING for more details.
|
||||
*/
|
||||
|
||||
#include "../cpuminer-config.h"
|
||||
#include "../miner.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#if defined(__arm__) && defined(__APCS_32__)
|
||||
#define EXTERN_SHA256
|
||||
#endif
|
||||
|
||||
static const uint32_t sha256_h[8] = {
|
||||
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
|
||||
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
|
||||
};
|
||||
|
||||
static const uint32_t sha256_k[64] = {
|
||||
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
|
||||
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
|
||||
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
|
||||
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
|
||||
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
|
||||
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
|
||||
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
|
||||
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
|
||||
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
|
||||
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
|
||||
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
|
||||
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
|
||||
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
|
||||
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
|
||||
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
|
||||
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
||||
};
|
||||
|
||||
void sha256_init(uint32_t *state)
|
||||
{
|
||||
memcpy(state, sha256_h, 32);
|
||||
}
|
||||
|
||||
/* Elementary functions used by SHA256 */
|
||||
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
|
||||
#define Maj(x, y, z) ((x & (y | z)) | (y & z))
|
||||
#define ROTR(x, n) ((x >> n) | (x << (32 - n)))
|
||||
#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
|
||||
#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
|
||||
#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ (x >> 3))
|
||||
#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ (x >> 10))
|
||||
|
||||
/* SHA256 round function */
|
||||
#define RND(a, b, c, d, e, f, g, h, k) \
|
||||
do { \
|
||||
t0 = h + S1(e) + Ch(e, f, g) + k; \
|
||||
t1 = S0(a) + Maj(a, b, c); \
|
||||
d += t0; \
|
||||
h = t0 + t1; \
|
||||
} while (0)
|
||||
|
||||
/* Adjusted round function for rotating state */
|
||||
#define RNDr(S, W, i) \
|
||||
RND(S[(64 - i) % 8], S[(65 - i) % 8], \
|
||||
S[(66 - i) % 8], S[(67 - i) % 8], \
|
||||
S[(68 - i) % 8], S[(69 - i) % 8], \
|
||||
S[(70 - i) % 8], S[(71 - i) % 8], \
|
||||
W[i] + sha256_k[i])
|
||||
|
||||
#ifndef EXTERN_SHA256
|
||||
|
||||
/*
|
||||
* SHA256 block compression function. The 256-bit state is transformed via
|
||||
* the 512-bit input block to produce a new state.
|
||||
*/
|
||||
void sha256_transform(uint32_t *state, const uint32_t *block, int swap)
|
||||
{
|
||||
uint32_t W[64];
|
||||
uint32_t S[8];
|
||||
uint32_t t0, t1;
|
||||
int i;
|
||||
|
||||
/* 1. Prepare message schedule W. */
|
||||
if (swap) {
|
||||
for (i = 0; i < 16; i++)
|
||||
W[i] = swab32(block[i]);
|
||||
} else
|
||||
memcpy(W, block, 64);
|
||||
for (i = 16; i < 64; i += 2) {
|
||||
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
|
||||
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15];
|
||||
}
|
||||
|
||||
/* 2. Initialize working variables. */
|
||||
memcpy(S, state, 32);
|
||||
|
||||
/* 3. Mix. */
|
||||
RNDr(S, W, 0);
|
||||
RNDr(S, W, 1);
|
||||
RNDr(S, W, 2);
|
||||
RNDr(S, W, 3);
|
||||
RNDr(S, W, 4);
|
||||
RNDr(S, W, 5);
|
||||
RNDr(S, W, 6);
|
||||
RNDr(S, W, 7);
|
||||
RNDr(S, W, 8);
|
||||
RNDr(S, W, 9);
|
||||
RNDr(S, W, 10);
|
||||
RNDr(S, W, 11);
|
||||
RNDr(S, W, 12);
|
||||
RNDr(S, W, 13);
|
||||
RNDr(S, W, 14);
|
||||
RNDr(S, W, 15);
|
||||
RNDr(S, W, 16);
|
||||
RNDr(S, W, 17);
|
||||
RNDr(S, W, 18);
|
||||
RNDr(S, W, 19);
|
||||
RNDr(S, W, 20);
|
||||
RNDr(S, W, 21);
|
||||
RNDr(S, W, 22);
|
||||
RNDr(S, W, 23);
|
||||
RNDr(S, W, 24);
|
||||
RNDr(S, W, 25);
|
||||
RNDr(S, W, 26);
|
||||
RNDr(S, W, 27);
|
||||
RNDr(S, W, 28);
|
||||
RNDr(S, W, 29);
|
||||
RNDr(S, W, 30);
|
||||
RNDr(S, W, 31);
|
||||
RNDr(S, W, 32);
|
||||
RNDr(S, W, 33);
|
||||
RNDr(S, W, 34);
|
||||
RNDr(S, W, 35);
|
||||
RNDr(S, W, 36);
|
||||
RNDr(S, W, 37);
|
||||
RNDr(S, W, 38);
|
||||
RNDr(S, W, 39);
|
||||
RNDr(S, W, 40);
|
||||
RNDr(S, W, 41);
|
||||
RNDr(S, W, 42);
|
||||
RNDr(S, W, 43);
|
||||
RNDr(S, W, 44);
|
||||
RNDr(S, W, 45);
|
||||
RNDr(S, W, 46);
|
||||
RNDr(S, W, 47);
|
||||
RNDr(S, W, 48);
|
||||
RNDr(S, W, 49);
|
||||
RNDr(S, W, 50);
|
||||
RNDr(S, W, 51);
|
||||
RNDr(S, W, 52);
|
||||
RNDr(S, W, 53);
|
||||
RNDr(S, W, 54);
|
||||
RNDr(S, W, 55);
|
||||
RNDr(S, W, 56);
|
||||
RNDr(S, W, 57);
|
||||
RNDr(S, W, 58);
|
||||
RNDr(S, W, 59);
|
||||
RNDr(S, W, 60);
|
||||
RNDr(S, W, 61);
|
||||
RNDr(S, W, 62);
|
||||
RNDr(S, W, 63);
|
||||
|
||||
/* 4. Mix local working variables into global state */
|
||||
for (i = 0; i < 8; i++)
|
||||
state[i] += S[i];
|
||||
}
|
||||
|
||||
#endif /* EXTERN_SHA256 */
|
||||
|
||||
|
||||
static const uint32_t sha256d_hash1[16] = {
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x80000000, 0x00000000, 0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000100
|
||||
};
|
||||
|
||||
static void sha256d_80_swap(uint32_t *hash, const uint32_t *data)
|
||||
{
|
||||
uint32_t S[16];
|
||||
int i;
|
||||
|
||||
sha256_init(S);
|
||||
sha256_transform(S, data, 0);
|
||||
sha256_transform(S, data + 16, 0);
|
||||
memcpy(S + 8, sha256d_hash1 + 8, 32);
|
||||
sha256_init(hash);
|
||||
sha256_transform(hash, S, 0);
|
||||
for (i = 0; i < 8; i++)
|
||||
hash[i] = swab32(hash[i]);
|
||||
}
|
||||
|
||||
void sha256d(unsigned char *hash, const unsigned char *data, int len)
|
||||
{
|
||||
uint32_t S[16], T[16];
|
||||
int i, r;
|
||||
|
||||
sha256_init(S);
|
||||
for (r = len; r > -9; r -= 64) {
|
||||
if (r < 64)
|
||||
memset(T, 0, 64);
|
||||
memcpy(T, data + len - r, r > 64 ? 64 : (r < 0 ? 0 : r));
|
||||
if (r >= 0 && r < 64)
|
||||
((unsigned char *)T)[r] = 0x80;
|
||||
for (i = 0; i < 16; i++)
|
||||
T[i] = be32dec(T + i);
|
||||
if (r < 56)
|
||||
T[15] = 8 * len;
|
||||
sha256_transform(S, T, 0);
|
||||
}
|
||||
memcpy(S + 8, sha256d_hash1 + 8, 32);
|
||||
sha256_init(T);
|
||||
sha256_transform(T, S, 0);
|
||||
for (i = 0; i < 8; i++)
|
||||
be32enc((uint32_t *)hash + i, T[i]);
|
||||
}
|
||||
|
||||
static inline void sha256d_preextend(uint32_t *W)
|
||||
{
|
||||
W[16] = s1(W[14]) + W[ 9] + s0(W[ 1]) + W[ 0];
|
||||
W[17] = s1(W[15]) + W[10] + s0(W[ 2]) + W[ 1];
|
||||
W[18] = s1(W[16]) + W[11] + W[ 2];
|
||||
W[19] = s1(W[17]) + W[12] + s0(W[ 4]);
|
||||
W[20] = W[13] + s0(W[ 5]) + W[ 4];
|
||||
W[21] = W[14] + s0(W[ 6]) + W[ 5];
|
||||
W[22] = W[15] + s0(W[ 7]) + W[ 6];
|
||||
W[23] = W[16] + s0(W[ 8]) + W[ 7];
|
||||
W[24] = W[17] + s0(W[ 9]) + W[ 8];
|
||||
W[25] = s0(W[10]) + W[ 9];
|
||||
W[26] = s0(W[11]) + W[10];
|
||||
W[27] = s0(W[12]) + W[11];
|
||||
W[28] = s0(W[13]) + W[12];
|
||||
W[29] = s0(W[14]) + W[13];
|
||||
W[30] = s0(W[15]) + W[14];
|
||||
W[31] = s0(W[16]) + W[15];
|
||||
}
|
||||
|
||||
static inline void sha256d_prehash(uint32_t *S, const uint32_t *W)
|
||||
{
|
||||
uint32_t t0, t1;
|
||||
RNDr(S, W, 0);
|
||||
RNDr(S, W, 1);
|
||||
RNDr(S, W, 2);
|
||||
}
|
||||
|
||||
#ifdef EXTERN_SHA256
|
||||
|
||||
void sha256d_ms(uint32_t *hash, uint32_t *W,
|
||||
const uint32_t *midstate, const uint32_t *prehash);
|
||||
|
||||
#else
|
||||
|
||||
static inline void sha256d_ms(uint32_t *hash, uint32_t *W,
|
||||
const uint32_t *midstate, const uint32_t *prehash)
|
||||
{
|
||||
uint32_t S[64];
|
||||
uint32_t t0, t1;
|
||||
int i;
|
||||
|
||||
S[18] = W[18];
|
||||
S[19] = W[19];
|
||||
S[20] = W[20];
|
||||
S[22] = W[22];
|
||||
S[23] = W[23];
|
||||
S[24] = W[24];
|
||||
S[30] = W[30];
|
||||
S[31] = W[31];
|
||||
|
||||
W[18] += s0(W[3]);
|
||||
W[19] += W[3];
|
||||
W[20] += s1(W[18]);
|
||||
W[21] = s1(W[19]);
|
||||
W[22] += s1(W[20]);
|
||||
W[23] += s1(W[21]);
|
||||
W[24] += s1(W[22]);
|
||||
W[25] = s1(W[23]) + W[18];
|
||||
W[26] = s1(W[24]) + W[19];
|
||||
W[27] = s1(W[25]) + W[20];
|
||||
W[28] = s1(W[26]) + W[21];
|
||||
W[29] = s1(W[27]) + W[22];
|
||||
W[30] += s1(W[28]) + W[23];
|
||||
W[31] += s1(W[29]) + W[24];
|
||||
for (i = 32; i < 64; i += 2) {
|
||||
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
|
||||
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15];
|
||||
}
|
||||
|
||||
memcpy(S, prehash, 32);
|
||||
|
||||
RNDr(S, W, 3);
|
||||
RNDr(S, W, 4);
|
||||
RNDr(S, W, 5);
|
||||
RNDr(S, W, 6);
|
||||
RNDr(S, W, 7);
|
||||
RNDr(S, W, 8);
|
||||
RNDr(S, W, 9);
|
||||
RNDr(S, W, 10);
|
||||
RNDr(S, W, 11);
|
||||
RNDr(S, W, 12);
|
||||
RNDr(S, W, 13);
|
||||
RNDr(S, W, 14);
|
||||
RNDr(S, W, 15);
|
||||
RNDr(S, W, 16);
|
||||
RNDr(S, W, 17);
|
||||
RNDr(S, W, 18);
|
||||
RNDr(S, W, 19);
|
||||
RNDr(S, W, 20);
|
||||
RNDr(S, W, 21);
|
||||
RNDr(S, W, 22);
|
||||
RNDr(S, W, 23);
|
||||
RNDr(S, W, 24);
|
||||
RNDr(S, W, 25);
|
||||
RNDr(S, W, 26);
|
||||
RNDr(S, W, 27);
|
||||
RNDr(S, W, 28);
|
||||
RNDr(S, W, 29);
|
||||
RNDr(S, W, 30);
|
||||
RNDr(S, W, 31);
|
||||
RNDr(S, W, 32);
|
||||
RNDr(S, W, 33);
|
||||
RNDr(S, W, 34);
|
||||
RNDr(S, W, 35);
|
||||
RNDr(S, W, 36);
|
||||
RNDr(S, W, 37);
|
||||
RNDr(S, W, 38);
|
||||
RNDr(S, W, 39);
|
||||
RNDr(S, W, 40);
|
||||
RNDr(S, W, 41);
|
||||
RNDr(S, W, 42);
|
||||
RNDr(S, W, 43);
|
||||
RNDr(S, W, 44);
|
||||
RNDr(S, W, 45);
|
||||
RNDr(S, W, 46);
|
||||
RNDr(S, W, 47);
|
||||
RNDr(S, W, 48);
|
||||
RNDr(S, W, 49);
|
||||
RNDr(S, W, 50);
|
||||
RNDr(S, W, 51);
|
||||
RNDr(S, W, 52);
|
||||
RNDr(S, W, 53);
|
||||
RNDr(S, W, 54);
|
||||
RNDr(S, W, 55);
|
||||
RNDr(S, W, 56);
|
||||
RNDr(S, W, 57);
|
||||
RNDr(S, W, 58);
|
||||
RNDr(S, W, 59);
|
||||
RNDr(S, W, 60);
|
||||
RNDr(S, W, 61);
|
||||
RNDr(S, W, 62);
|
||||
RNDr(S, W, 63);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
S[i] += midstate[i];
|
||||
|
||||
W[18] = S[18];
|
||||
W[19] = S[19];
|
||||
W[20] = S[20];
|
||||
W[22] = S[22];
|
||||
W[23] = S[23];
|
||||
W[24] = S[24];
|
||||
W[30] = S[30];
|
||||
W[31] = S[31];
|
||||
|
||||
memcpy(S + 8, sha256d_hash1 + 8, 32);
|
||||
S[16] = s1(sha256d_hash1[14]) + sha256d_hash1[ 9] + s0(S[ 1]) + S[ 0];
|
||||
S[17] = s1(sha256d_hash1[15]) + sha256d_hash1[10] + s0(S[ 2]) + S[ 1];
|
||||
S[18] = s1(S[16]) + sha256d_hash1[11] + s0(S[ 3]) + S[ 2];
|
||||
S[19] = s1(S[17]) + sha256d_hash1[12] + s0(S[ 4]) + S[ 3];
|
||||
S[20] = s1(S[18]) + sha256d_hash1[13] + s0(S[ 5]) + S[ 4];
|
||||
S[21] = s1(S[19]) + sha256d_hash1[14] + s0(S[ 6]) + S[ 5];
|
||||
S[22] = s1(S[20]) + sha256d_hash1[15] + s0(S[ 7]) + S[ 6];
|
||||
S[23] = s1(S[21]) + S[16] + s0(sha256d_hash1[ 8]) + S[ 7];
|
||||
S[24] = s1(S[22]) + S[17] + s0(sha256d_hash1[ 9]) + sha256d_hash1[ 8];
|
||||
S[25] = s1(S[23]) + S[18] + s0(sha256d_hash1[10]) + sha256d_hash1[ 9];
|
||||
S[26] = s1(S[24]) + S[19] + s0(sha256d_hash1[11]) + sha256d_hash1[10];
|
||||
S[27] = s1(S[25]) + S[20] + s0(sha256d_hash1[12]) + sha256d_hash1[11];
|
||||
S[28] = s1(S[26]) + S[21] + s0(sha256d_hash1[13]) + sha256d_hash1[12];
|
||||
S[29] = s1(S[27]) + S[22] + s0(sha256d_hash1[14]) + sha256d_hash1[13];
|
||||
S[30] = s1(S[28]) + S[23] + s0(sha256d_hash1[15]) + sha256d_hash1[14];
|
||||
S[31] = s1(S[29]) + S[24] + s0(S[16]) + sha256d_hash1[15];
|
||||
for (i = 32; i < 60; i += 2) {
|
||||
S[i] = s1(S[i - 2]) + S[i - 7] + s0(S[i - 15]) + S[i - 16];
|
||||
S[i+1] = s1(S[i - 1]) + S[i - 6] + s0(S[i - 14]) + S[i - 15];
|
||||
}
|
||||
S[60] = s1(S[58]) + S[53] + s0(S[45]) + S[44];
|
||||
|
||||
sha256_init(hash);
|
||||
|
||||
RNDr(hash, S, 0);
|
||||
RNDr(hash, S, 1);
|
||||
RNDr(hash, S, 2);
|
||||
RNDr(hash, S, 3);
|
||||
RNDr(hash, S, 4);
|
||||
RNDr(hash, S, 5);
|
||||
RNDr(hash, S, 6);
|
||||
RNDr(hash, S, 7);
|
||||
RNDr(hash, S, 8);
|
||||
RNDr(hash, S, 9);
|
||||
RNDr(hash, S, 10);
|
||||
RNDr(hash, S, 11);
|
||||
RNDr(hash, S, 12);
|
||||
RNDr(hash, S, 13);
|
||||
RNDr(hash, S, 14);
|
||||
RNDr(hash, S, 15);
|
||||
RNDr(hash, S, 16);
|
||||
RNDr(hash, S, 17);
|
||||
RNDr(hash, S, 18);
|
||||
RNDr(hash, S, 19);
|
||||
RNDr(hash, S, 20);
|
||||
RNDr(hash, S, 21);
|
||||
RNDr(hash, S, 22);
|
||||
RNDr(hash, S, 23);
|
||||
RNDr(hash, S, 24);
|
||||
RNDr(hash, S, 25);
|
||||
RNDr(hash, S, 26);
|
||||
RNDr(hash, S, 27);
|
||||
RNDr(hash, S, 28);
|
||||
RNDr(hash, S, 29);
|
||||
RNDr(hash, S, 30);
|
||||
RNDr(hash, S, 31);
|
||||
RNDr(hash, S, 32);
|
||||
RNDr(hash, S, 33);
|
||||
RNDr(hash, S, 34);
|
||||
RNDr(hash, S, 35);
|
||||
RNDr(hash, S, 36);
|
||||
RNDr(hash, S, 37);
|
||||
RNDr(hash, S, 38);
|
||||
RNDr(hash, S, 39);
|
||||
RNDr(hash, S, 40);
|
||||
RNDr(hash, S, 41);
|
||||
RNDr(hash, S, 42);
|
||||
RNDr(hash, S, 43);
|
||||
RNDr(hash, S, 44);
|
||||
RNDr(hash, S, 45);
|
||||
RNDr(hash, S, 46);
|
||||
RNDr(hash, S, 47);
|
||||
RNDr(hash, S, 48);
|
||||
RNDr(hash, S, 49);
|
||||
RNDr(hash, S, 50);
|
||||
RNDr(hash, S, 51);
|
||||
RNDr(hash, S, 52);
|
||||
RNDr(hash, S, 53);
|
||||
RNDr(hash, S, 54);
|
||||
RNDr(hash, S, 55);
|
||||
RNDr(hash, S, 56);
|
||||
|
||||
hash[2] += hash[6] + S1(hash[3]) + Ch(hash[3], hash[4], hash[5])
|
||||
+ S[57] + sha256_k[57];
|
||||
hash[1] += hash[5] + S1(hash[2]) + Ch(hash[2], hash[3], hash[4])
|
||||
+ S[58] + sha256_k[58];
|
||||
hash[0] += hash[4] + S1(hash[1]) + Ch(hash[1], hash[2], hash[3])
|
||||
+ S[59] + sha256_k[59];
|
||||
hash[7] += hash[3] + S1(hash[0]) + Ch(hash[0], hash[1], hash[2])
|
||||
+ S[60] + sha256_k[60]
|
||||
+ sha256_h[7];
|
||||
}
|
||||
|
||||
#endif /* EXTERN_SHA256 */
|
||||
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
|
||||
void sha256d_ms_4way(uint32_t *hash, uint32_t *data,
|
||||
const uint32_t *midstate, const uint32_t *prehash);
|
||||
|
||||
static inline int scanhash_sha256d_4way(int thr_id, uint32_t *pdata,
|
||||
const uint32_t *ptarget, uint32_t max_nonce, unsigned long *hashes_done)
|
||||
{
|
||||
uint32_t data[4 * 64] __attribute__((aligned(128)));
|
||||
uint32_t hash[4 * 8] __attribute__((aligned(32)));
|
||||
uint32_t midstate[4 * 8] __attribute__((aligned(32)));
|
||||
uint32_t prehash[4 * 8] __attribute__((aligned(32)));
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
int i, j;
|
||||
|
||||
memcpy(data, pdata + 16, 64);
|
||||
sha256d_preextend(data);
|
||||
for (i = 31; i >= 0; i--)
|
||||
for (j = 0; j < 4; j++)
|
||||
data[i * 4 + j] = data[i];
|
||||
|
||||
sha256_init(midstate);
|
||||
sha256_transform(midstate, pdata, 0);
|
||||
memcpy(prehash, midstate, 32);
|
||||
sha256d_prehash(prehash, pdata + 16);
|
||||
for (i = 7; i >= 0; i--) {
|
||||
for (j = 0; j < 4; j++) {
|
||||
midstate[i * 4 + j] = midstate[i];
|
||||
prehash[i * 4 + j] = prehash[i];
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
for (i = 0; i < 4; i++)
|
||||
data[4 * 3 + i] = ++n;
|
||||
|
||||
sha256d_ms_4way(hash, data, midstate, prehash);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (swab32(hash[4 * 7 + i]) <= Htarg) {
|
||||
pdata[19] = data[4 * 3 + i];
|
||||
sha256d_80_swap(hash, pdata);
|
||||
if (fulltest(hash, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* HAVE_SHA256_4WAY */
|
||||
|
||||
#ifdef HAVE_SHA256_8WAY
|
||||
|
||||
void sha256d_ms_8way(uint32_t *hash, uint32_t *data,
|
||||
const uint32_t *midstate, const uint32_t *prehash);
|
||||
|
||||
static inline int scanhash_sha256d_8way(int thr_id, uint32_t *pdata,
|
||||
const uint32_t *ptarget, uint32_t max_nonce, unsigned long *hashes_done)
|
||||
{
|
||||
uint32_t data[8 * 64] __attribute__((aligned(128)));
|
||||
uint32_t hash[8 * 8] __attribute__((aligned(32)));
|
||||
uint32_t midstate[8 * 8] __attribute__((aligned(32)));
|
||||
uint32_t prehash[8 * 8] __attribute__((aligned(32)));
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
int i, j;
|
||||
|
||||
memcpy(data, pdata + 16, 64);
|
||||
sha256d_preextend(data);
|
||||
for (i = 31; i >= 0; i--)
|
||||
for (j = 0; j < 8; j++)
|
||||
data[i * 8 + j] = data[i];
|
||||
|
||||
sha256_init(midstate);
|
||||
sha256_transform(midstate, pdata, 0);
|
||||
memcpy(prehash, midstate, 32);
|
||||
sha256d_prehash(prehash, pdata + 16);
|
||||
for (i = 7; i >= 0; i--) {
|
||||
for (j = 0; j < 8; j++) {
|
||||
midstate[i * 8 + j] = midstate[i];
|
||||
prehash[i * 8 + j] = prehash[i];
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
for (i = 0; i < 8; i++)
|
||||
data[8 * 3 + i] = ++n;
|
||||
|
||||
sha256d_ms_8way(hash, data, midstate, prehash);
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (swab32(hash[8 * 7 + i]) <= Htarg) {
|
||||
pdata[19] = data[8 * 3 + i];
|
||||
sha256d_80_swap(hash, pdata);
|
||||
if (fulltest(hash, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* HAVE_SHA256_8WAY */
|
||||
|
||||
int scanhash_sha256d(int thr_id, uint32_t *pdata, const uint32_t *ptarget,
|
||||
uint32_t max_nonce, unsigned long *hashes_done)
|
||||
{
|
||||
uint32_t data[64] __attribute__((aligned(128)));
|
||||
uint32_t hash[8] __attribute__((aligned(32)));
|
||||
uint32_t midstate[8] __attribute__((aligned(32)));
|
||||
uint32_t prehash[8] __attribute__((aligned(32)));
|
||||
uint32_t n = pdata[19] - 1;
|
||||
const uint32_t first_nonce = pdata[19];
|
||||
const uint32_t Htarg = ptarget[7];
|
||||
|
||||
#ifdef HAVE_SHA256_8WAY
|
||||
if (sha256_use_8way())
|
||||
return scanhash_sha256d_8way(thr_id, pdata, ptarget,
|
||||
max_nonce, hashes_done);
|
||||
#endif
|
||||
#ifdef HAVE_SHA256_4WAY
|
||||
if (sha256_use_4way())
|
||||
return scanhash_sha256d_4way(thr_id, pdata, ptarget,
|
||||
max_nonce, hashes_done);
|
||||
#endif
|
||||
|
||||
memcpy(data, pdata + 16, 64);
|
||||
sha256d_preextend(data);
|
||||
|
||||
sha256_init(midstate);
|
||||
sha256_transform(midstate, pdata, 0);
|
||||
memcpy(prehash, midstate, 32);
|
||||
sha256d_prehash(prehash, pdata + 16);
|
||||
|
||||
do {
|
||||
data[3] = ++n;
|
||||
sha256d_ms(hash, data, midstate, prehash);
|
||||
if (swab32(hash[7]) <= Htarg) {
|
||||
pdata[19] = data[3];
|
||||
sha256d_80_swap(hash, pdata);
|
||||
if (fulltest(hash, ptarget)) {
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
} while (n < max_nonce && !work_restart[thr_id].restart);
|
||||
|
||||
*hashes_done = n - first_nonce + 1;
|
||||
pdata[19] = n;
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user