3
# MD5 optimized for AMD64.
5
# Author: Marc Bevand <bevand_m (at) epita.fr>
6
# Licence: I hereby disclaim the copyright on this code and place it
7
# in the public domain.
15
# dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
17
# %r11d = z' (copy of z for the next step)
18
# Each round1_step() takes about 5.71 clocks (9 instructions, 1.58 IPC)
21
my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
22
$code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
23
$code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
25
xor $y, %r11d /* y ^ ... */
26
lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
27
and $x, %r11d /* x & ... */
28
xor $z, %r11d /* z ^ ... */
29
mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
30
add %r11d, $dst /* dst += ... */
31
rol \$$s, $dst /* dst <<< s */
32
mov $y, %r11d /* (NEXT STEP) z' = $y */
33
add $x, $dst /* dst += x */
38
# dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
40
# %r11d = y' (copy of y for the next step)
41
# Each round2_step() takes about 6.22 clocks (9 instructions, 1.45 IPC)
44
my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
45
$code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1);
46
$code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
48
xor $x, %r11d /* x ^ ... */
49
lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
50
and $z, %r11d /* z & ... */
51
xor $y, %r11d /* y ^ ... */
52
mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
53
add %r11d, $dst /* dst += ... */
54
rol \$$s, $dst /* dst <<< s */
55
mov $x, %r11d /* (NEXT STEP) y' = $x */
56
add $x, $dst /* dst += x */
61
# dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
63
# %r11d = y' (copy of y for the next step)
64
# Each round3_step() takes about 4.26 clocks (8 instructions, 1.88 IPC)
67
my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
68
$code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1);
69
$code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
71
lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
72
mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
73
xor $z, %r11d /* z ^ ... */
74
xor $x, %r11d /* x ^ ... */
75
add %r11d, $dst /* dst += ... */
76
rol \$$s, $dst /* dst <<< s */
77
mov $x, %r11d /* (NEXT STEP) y' = $x */
78
add $x, $dst /* dst += x */
83
# dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
85
# %r11d = not z' (copy of not z for the next step)
86
# Each round4_step() takes about 5.27 clocks (9 instructions, 1.71 IPC)
89
my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
90
$code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
91
$code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1);
92
$code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n"
95
lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
96
or $x, %r11d /* x | ... */
97
xor $y, %r11d /* y ^ ... */
98
add %r11d, $dst /* dst += ... */
99
mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
100
mov \$0xffffffff, %r11d
101
rol \$$s, $dst /* dst <<< s */
102
xor $y, %r11d /* (NEXT STEP) not z' = not $y */
103
add $x, $dst /* dst += x */
108
open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
114
.globl md5_block_asm_host_order
115
.type md5_block_asm_host_order,\@function,3
116
md5_block_asm_host_order:
122
# rdi = arg #1 (ctx, MD5_CTX pointer)
123
# rsi = arg #2 (ptr, data pointer)
124
# rdx = arg #3 (nbr, number of 16-word blocks to process)
125
mov %rdi, %rbp # rbp = ctx
126
shl \$6, %rdx # rdx = nbr in bytes
127
lea (%rsi,%rdx), %rdi # rdi = end
128
mov 0*4(%rbp), %eax # eax = ctx->A
129
mov 1*4(%rbp), %ebx # ebx = ctx->B
130
mov 2*4(%rbp), %ecx # ecx = ctx->C
131
mov 3*4(%rbp), %edx # edx = ctx->D
139
cmp %rdi, %rsi # cmp end with ptr
140
je .Lend # jmp if ptr == end
142
# BEGIN of loop over 16-word blocks
143
.Lloop: # save old values of A, B, C, D
149
round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
150
round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
151
round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
152
round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
153
round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
154
round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
155
round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
156
round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
157
round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
158
round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
159
round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
160
round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
161
round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
162
round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
163
round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
164
round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
166
round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
167
round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
168
round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
169
round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
170
round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
171
round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
172
round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
173
round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
174
round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
175
round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
176
round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
177
round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
178
round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
179
round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
180
round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
181
round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
183
round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
184
round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
185
round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
186
round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
187
round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
188
round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
189
round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
190
round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
191
round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
192
round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
193
round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
194
round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
195
round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
196
round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
197
round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
198
round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
200
round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
201
round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
202
round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
203
round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
204
round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
205
round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
206
round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
207
round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
208
round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
209
round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
210
round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
211
round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
212
round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
213
round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
214
round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
215
round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
217
# add old values of A, B, C, D
224
add \$64, %rsi # ptr += 64
225
cmp %rdi, %rsi # cmp end with ptr
226
jb .Lloop # jmp if ptr < end
227
# END of loop over 16-word blocks
230
mov %eax, 0*4(%rbp) # ctx->A = A
231
mov %ebx, 1*4(%rbp) # ctx->B = B
232
mov %ecx, 2*4(%rbp) # ctx->C = C
233
mov %edx, 3*4(%rbp) # ctx->D = D
240
.size md5_block_asm_host_order,.-md5_block_asm_host_order