2
; * Copyright (C) 2011 - Neil Kettle <neil@digit-labs.org>
4
; * This file is part of cpuminer-ng.
6
; * cpuminer-ng is free software: you can redistribute it and/or modify
7
; * it under the terms of the GNU General Public License as published by
8
; * the Free Software Foundation, either version 3 of the License, or
9
; * (at your option) any later version.
11
; * cpuminer-ng is distributed in the hope that it will be useful,
12
; * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
; * GNU General Public License for more details.
16
; * You should have received a copy of the GNU General Public License
17
; * along with cpuminer-ng. If not, see <http://www.gnu.org/licenses/>.
20
; %rbp, %rbx, and %r12-%r15 - callee save
30
; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
31
%define SHA_CALC_W_PARA 2
32
%define SHA_CALC_W_UNROLL 8
34
%define SHA_ROUND_LOOP_UNROLL 16
36
%ifidn __YASM_OBJFMT__, macho64
37
extern _sha256_consts_m128i
40
extern sha256_consts_m128i
44
%ifidn __YASM_OBJFMT__, macho64
45
global _sha256_sse2_64_new
47
global sha256_sse2_64_new
64
%macro sha_round_blk 0
65
movdqa sr1, [data+rax] ; T1 = w;
67
movdqa sr2, rE ; sr2 = rE
69
pandn sr2, rG ; sr2 = ~rE & rG
70
movdqa sr3, rF ; sr3 = rF
72
paddd sr1, rH ; T1 = h + sha256_consts_m128i[i] + w;
73
movdqa rH, rG ; rH = rG
75
pand sr3, rE ; sr3 = rE & rF
76
movdqa rG, rF ; rG = rF
78
%ifidn __YASM_OBJFMT__, macho64
81
paddd sr1, sha256_consts_m128i[rax] ; T1 = sha256_consts_m128i[i] + w;
83
pxor sr2, sr3 ; sr2 = (rE & rF) ^ (~rE & rG) = Ch (e, f, g)
85
movdqa rF, rE ; rF = rE
86
paddd sr1, sr2 ; T1 = h + Ch (e, f, g) + sha256_consts_m128i[i] + w;
88
movdqa sr2, rE ; sr2 = rE
91
movdqa sr3, rE ; e >> 6
94
psrld sr3, 5 ; e >> 11
95
pxor rE, sr2 ; e >> 6 ^ e << 7
97
pslld sr2, 14 ; e << 21
98
pxor rE, sr3 ; e >> 6 ^ e << 7 ^ e >> 11
100
psrld sr3, 14 ; e >> 25
101
pxor rE, sr2 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21
103
pslld sr2, 5 ; e << 26
104
pxor rE, sr3 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21 ^ e >> 25
106
pxor rE, sr2 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21 ^ e >> 25 ^ e << 26
107
movdqa sr2, rB ; sr2 = rB
109
paddd sr1, rE ; sr1 = h + BIGSIGMA1_256(e) + Ch (e, f, g) + sha256_consts_m128i[i] + w;
110
movdqa rE, rD ; rE = rD
112
movdqa rD, rC ; rD = rC
113
paddd rE, sr1 ; rE = rD + T1
115
movdqa sr3, rC ; sr3 = rC
116
pand rC, rA ; rC = rC & rA
118
pand sr3, rB ; sr3 = rB & rC
119
pand sr2, rA ; sr2 = rB & rA
121
pxor sr2, rC ; sr2 = (rB & rA) ^ (rC & rA)
122
movdqa rC, rB ; rC = rB
124
pxor sr2, sr3 ; sr2 = (rB & rA) ^ (rC & rA) ^ (rB & rC)
125
movdqa rB, rA ; rB = rA
127
paddd sr1, sr2 ; sr1 = T1 + (rB & rA) ^ (rC & rA) ^ (rB & rC)
130
movdqa sr3, rA ; sr3 = rA
133
pslld sr3, 10 ; a << 10
134
movdqa sr2, rA ; a >> 2
136
pxor rA, sr3 ; a >> 2 ^ a << 10
137
psrld sr2, 11 ; a >> 13
139
pxor rA, sr2 ; a >> 2 ^ a << 10 ^ a >> 13
140
pslld sr3, 9 ; a << 19
142
pxor rA, sr3 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19
143
psrld sr2, 9 ; a >> 21
145
pxor rA, sr2 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19 ^ a >> 21
146
pslld sr3, 11 ; a << 30
148
pxor rA, sr3 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19 ^ a >> 21 ^ a << 30
149
paddd rA, sr1 ; T1 + BIGSIGMA0_256(a) + Maj(a, b, c);
152
%macro sha_calc_w_blk 1
153
movdqa xmm0, [r11-(15-%1)*16] ; xmm0 = W[I-15]
154
movdqa xmm4, [r11-(15-(%1+1))*16] ; xmm4 = W[I-15+1]
155
movdqa xmm2, xmm0 ; xmm2 = W[I-15]
156
movdqa xmm6, xmm4 ; xmm6 = W[I-15+1]
157
psrld xmm0, 3 ; xmm0 = W[I-15] >> 3
158
psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3
159
movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3
160
movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3
161
pslld xmm2, 14 ; xmm2 = W[I-15] << 14
162
pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14
163
psrld xmm1, 4 ; xmm1 = W[I-15] >> 7
164
psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7
165
pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
166
pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
167
psrld xmm1, 11 ; xmm1 = W[I-15] >> 18
168
psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18
169
pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
170
pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
171
pslld xmm2, 11 ; xmm2 = W[I-15] << 25
172
pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25
173
pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
174
pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
175
pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
176
pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
178
movdqa xmm3, [r11-(2-%1)*16] ; xmm3 = W[I-2]
179
movdqa xmm7, [r11-(2-(%1+1))*16] ; xmm7 = W[I-2+1]
181
paddd xmm0, [r11-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16]
182
paddd xmm4, [r11-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1]
186
movdqa xmm2, xmm3 ; xmm2 = W[I-2]
187
movdqa xmm6, xmm7 ; xmm6 = W[I-2+1]
188
psrld xmm3, 10 ; xmm3 = W[I-2] >> 10
189
psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10
190
movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10
191
movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10
193
paddd xmm0, [r11-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
195
pslld xmm2, 13 ; xmm2 = W[I-2] << 13
196
pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13
197
psrld xmm1, 7 ; xmm1 = W[I-2] >> 17
198
psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17
200
paddd xmm4, [r11-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
202
pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
203
pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
204
psrld xmm1, 2 ; xmm1 = W[I-2] >> 19
205
psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19
206
pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
207
pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
208
pslld xmm2, 2 ; xmm2 = W[I-2] << 15
209
pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15
210
pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
211
pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
212
pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
213
pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
215
paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
216
paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
217
movdqa [r11+(%1*16)], xmm0
218
movdqa [r11+((%1+1)*16)], xmm4
221
; _sha256_sse2_64_new hash(rdi), hash1(rsi), data(rdx), init(rcx),
223
%ifidn __YASM_OBJFMT__, macho64
232
mov rbx, 64*4 ; rbx is # of SHA-2 rounds
233
mov rax, 16*4 ; rax is where we expand to
236
lea rbx, qword [data+rbx*4]
237
lea r11, qword [data+rax*4]
241
%rep SHA_CALC_W_UNROLL
243
%assign i i+SHA_CALC_W_PARA
245
add r11, SHA_CALC_W_UNROLL*SHA_CALC_W_PARA*16
254
pshufd rB, rA, 0x55 ; rB == B
255
pshufd rC, rA, 0xAA ; rC == C
256
pshufd rD, rA, 0xFF ; rD == D
257
pshufd rA, rA, 0 ; rA == A
259
movdqa rE, [init+4*4]
260
pshufd rF, rE, 0x55 ; rF == F
261
pshufd rG, rE, 0xAA ; rG == G
262
pshufd rH, rE, 0xFF ; rH == H
263
pshufd rE, rE, 0 ; rE == E
265
%ifidn __YASM_OBJFMT__, macho64
266
lea rcx, [_sha256_consts_m128i wrt rip]
271
%rep SHA_ROUND_LOOP_UNROLL
278
; Finished the 64 rounds, calculate hash and save
281
pshufd sr2, sr1, 0x55
282
pshufd sr3, sr1, 0xAA
283
pshufd sr4, sr1, 0xFF
291
movdqa sr1, [init+4*4]
292
pshufd sr2, sr1, 0x55
293
pshufd sr3, sr1, 0xAA
294
pshufd sr4, sr1, 0xFF
304
movdqa [hash1+0*16], rA
305
movdqa [hash1+1*16], rB
306
movdqa [hash1+2*16], rC
307
movdqa [hash1+3*16], rD
308
movdqa [hash1+4*16], rE
309
movdqa [hash1+5*16], rF
310
movdqa [hash1+6*16], rG
311
movdqa [hash1+7*16], rH
314
mov init, sha256_init
318
movdqa [hash+7*16], rH