~ubuntu-branches/ubuntu/lucid/openssl/lucid-proposed

« back to all changes in this revision

Viewing changes to crypto/sha/asm/sha1-ia64.pl

  • Committer: Bazaar Package Importer
  • Author(s): Kurt Roeckx
  • Date: 2009-06-13 18:15:46 UTC
  • mto: (11.1.5 squeeze)
  • mto: This revision was merged to the branch mainline in revision 34.
  • Revision ID: james.westby@ubuntu.com-20090613181546-vbfntai3b009dl1u
Tags: upstream-0.9.8k
ImportĀ upstreamĀ versionĀ 0.9.8k

Show diffs side-by-side

added added

removed removed

Lines of Context:
2
2
#
3
3
# ====================================================================
4
4
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5
 
# project. Rights for redistribution and usage in source and binary
6
 
# forms are granted according to the OpenSSL license.
 
5
# project. The module is, however, dual licensed under OpenSSL and
 
6
# CRYPTOGAMS licenses depending on where you obtain it. For further
 
7
# details see http://www.openssl.org/~appro/cryptogams/.
7
8
# ====================================================================
8
9
#
9
10
# Eternal question is what's wrong with compiler generated code? The
11
12
# to perform rotations by maintaining copy of 32-bit value in upper
12
13
# bits of 64-bit register. Just follow mux2 and shrp instructions...
13
14
# Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which
14
 
# is >50% better than HP C and >2x better than gcc. As of this moment
15
 
# performance under little-endian OS such as Linux and Windows will be
16
 
# a bit lower, because data has to be picked in reverse byte-order.
17
 
# It's possible to resolve this issue by implementing third function,
18
 
# sha1_block_asm_data_order_aligned, which would temporarily flip
19
 
# BE field in User Mask register...
 
15
# is >50% better than HP C and >2x better than gcc.
20
16
 
21
17
$code=<<___;
22
 
.ident  \"sha1-ia64.s, version 1.0\"
 
18
.ident  \"sha1-ia64.s, version 1.2\"
23
19
.ident  \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
24
20
.explicit
25
21
 
55
51
 
56
52
sub BODY_00_15 {
57
53
local   *code=shift;
58
 
local   ($i,$a,$b,$c,$d,$e,$f,$unaligned)=@_;
 
54
local   ($i,$a,$b,$c,$d,$e,$f)=@_;
59
55
 
60
 
if ($unaligned) {
61
 
        $code.=<<___;
62
 
{ .mmi; ld1     tmp0=[inp],2                // MSB
63
 
        ld1     tmp1=[tmp3],2           };;
64
 
{ .mmi; ld1     tmp2=[inp],2
65
 
        ld1     $X[$i&0xf]=[tmp3],2         // LSB
66
 
        dep     tmp1=tmp0,tmp1,8,8      };;
67
 
{ .mii; cmp.ne  p16,p0=r0,r0                // no misaligned prefetch
68
 
        dep     $X[$i&0xf]=tmp2,$X[$i&0xf],8,8;;
69
 
        dep     $X[$i&0xf]=tmp1,$X[$i&0xf],16,16        };;
70
 
{ .mmi; nop.m   0
71
 
___
72
 
        }
73
 
elsif ($i<15) {
74
 
        $code.=<<___;
75
 
{ .mmi; ld4     $X[($i+1)&0xf]=[inp],4  // prefetch
76
 
___
77
 
        }
78
 
else    {
79
 
        $code.=<<___;
80
 
{ .mmi; nop.m   0
81
 
___
82
 
        }
 
56
$code.=<<___ if ($i==0);
 
57
{ .mmi; ld1     $X[$i&0xf]=[inp],2          // MSB
 
58
        ld1     tmp2=[tmp3],2           };;
 
59
{ .mmi; ld1     tmp0=[inp],2
 
60
        ld1     tmp4=[tmp3],2               // LSB
 
61
        dep     $X[$i&0xf]=$X[$i&0xf],tmp2,8,8  };;
 
62
___
83
63
if ($i<15) {
84
64
        $code.=<<___;
85
 
        and     tmp0=$c,$b
86
 
        dep.z   tmp5=$a,5,27            }   // a<<5
 
65
{ .mmi; ld1     $X[($i+1)&0xf]=[inp],2      // +1
 
66
        dep     tmp1=tmp0,tmp4,8,8      };;
 
67
{ .mmi; ld1     tmp2=[tmp3],2               // +1
 
68
        and     tmp4=$c,$b
 
69
        dep     $X[$i&0xf]=$X[$i&0xf],tmp1,16,16        } //;;
87
70
{ .mmi; andcm   tmp1=$d,$b
88
 
        add     tmp4=$e,$K_00_19        };;
89
 
{ .mmi; or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
90
 
        add     $f=tmp4,$X[$i&0xf]          // f=xi+e+K_00_19
 
71
        add     tmp0=$e,$K_00_19
 
72
        dep.z   tmp5=$a,5,27            };; // a<<5
 
73
{ .mmi; or      tmp4=tmp4,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
 
74
        add     $f=tmp0,$X[$i&0xf]          // f=xi+e+K_00_19
91
75
        extr.u  tmp1=$a,27,5            };; // a>>27
92
 
{ .mib; add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
 
76
{ .mmi; ld1     tmp0=[inp],2                // +1
 
77
        add     $f=$f,tmp4                  // f+=F_00_19(b,c,d)
93
78
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
94
 
{ .mib; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
 
79
{ .mmi; ld1     tmp4=[tmp3],2               // +1
 
80
        or      tmp5=tmp1,tmp5              // ROTATE(a,5)
95
81
        mux2    tmp6=$a,0x44            };; // see b in next iteration
96
 
{ .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
97
 
        mux2    $X[$i&0xf]=$X[$i&0xf],0x44
98
 
        nop.i   0                       };;
 
82
{ .mii; add     $f=$f,tmp5                  // f+=ROTATE(a,5)
 
83
        dep     $X[($i+1)&0xf]=$X[($i+1)&0xf],tmp2,8,8  // +1
 
84
        mux2    $X[$i&0xf]=$X[$i&0xf],0x44      } //;;
99
85
 
100
86
___
101
87
        }
102
88
else    {
103
89
        $code.=<<___;
104
 
        and     tmp0=$c,$b
105
 
        dep.z   tmp5=$a,5,27            }   // a<<5 ;;?
 
90
{ .mii; and     tmp3=$c,$b
 
91
        dep     tmp1=tmp0,tmp4,8,8;;
 
92
        dep     $X[$i&0xf]=$X[$i&0xf],tmp1,16,16        } //;;
106
93
{ .mmi; andcm   tmp1=$d,$b
107
 
        add     tmp4=$e,$K_00_19        };;
108
 
{ .mmi; or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
109
 
        add     $f=tmp4,$X[$i&0xf]          // f=xi+e+K_00_19
 
94
        add     tmp0=$e,$K_00_19
 
95
        dep.z   tmp5=$a,5,27            };; // a<<5
 
96
{ .mmi; or      tmp4=tmp3,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
 
97
        add     $f=tmp0,$X[$i&0xf]          // f=xi+e+K_00_19
110
98
        extr.u  tmp1=$a,27,5            }   // a>>27
111
99
{ .mmi; xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
112
100
        xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
113
101
        nop.i   0                       };;
114
 
{ .mmi; add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
 
102
{ .mmi; add     $f=$f,tmp4                  // f+=F_00_19(b,c,d)
115
103
        xor     tmp2=tmp2,tmp3              // +1
116
104
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
117
105
{ .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
190
178
        extr.u  tmp1=$a,27,5            }   // a>>27
191
179
{ .mib; add     $f=$f,tmp4                  // f+=e+K_20_39
192
180
        add     $h1=$h1,$a              };; // wrap up
193
 
{ .mmi;
194
 
(p16)   ld4.s   $X[0]=[inp],4               // non-faulting prefetch
195
 
        add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
 
181
{ .mmi; add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
196
182
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30) ;;?
197
183
{ .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
198
184
        add     $h3=$h3,$c              };; // wrap up
245
231
ctx=r32;        // in0
246
232
inp=r33;        // in1
247
233
 
248
 
// void sha1_block_asm_host_order(SHA_CTX *c,const void *p,size_t num);
249
 
.global sha1_block_asm_host_order#
250
 
.proc   sha1_block_asm_host_order#
251
 
.align  32
252
 
sha1_block_asm_host_order:
253
 
        .prologue
254
 
{ .mmi; alloc   tmp1=ar.pfs,3,15,0,0
255
 
        $ADDP   tmp0=4,ctx
256
 
        .save   ar.lc,r3
257
 
        mov     r3=ar.lc                }
258
 
{ .mmi; $ADDP   ctx=0,ctx
259
 
        $ADDP   inp=0,inp
260
 
        mov     r2=pr                   };;
261
 
tmp4=in2;
262
 
tmp5=loc13;
263
 
tmp6=loc14;
264
 
        .body
265
 
{ .mlx; ld4     $h0=[ctx],8
266
 
        movl    $K_00_19=0x5a827999     }
267
 
{ .mlx; ld4     $h1=[tmp0],8
268
 
        movl    $K_20_39=0x6ed9eba1     };;
269
 
{ .mlx; ld4     $h2=[ctx],8
270
 
        movl    $K_40_59=0x8f1bbcdc     }
271
 
{ .mlx; ld4     $h3=[tmp0]
272
 
        movl    $K_60_79=0xca62c1d6     };;
273
 
{ .mmi; ld4     $h4=[ctx],-16
274
 
        add     in2=-1,in2                  // adjust num for ar.lc
275
 
        mov     ar.ec=1                 };;
276
 
{ .mmi; ld4     $X[0]=[inp],4               // prefetch
277
 
        cmp.ne  p16,p0=r0,in2               // prefecth at loop end
278
 
        mov     ar.lc=in2               };; // brp.loop.imp: too far
279
 
 
280
 
.Lhtop:
281
 
{ .mmi; mov     $A=$h0
282
 
        mov     $B=$h1
283
 
        mux2    tmp6=$h1,0x44           }
284
 
{ .mmi; mov     $C=$h2
285
 
        mov     $D=$h3
286
 
        mov     $E=$h4                  };;
287
 
 
288
 
___
289
 
 
290
 
        &BODY_00_15(\$code, 0,$A,$B,$C,$D,$E,$T);
291
 
        &BODY_00_15(\$code, 1,$T,$A,$B,$C,$D,$E);
292
 
        &BODY_00_15(\$code, 2,$E,$T,$A,$B,$C,$D);
293
 
        &BODY_00_15(\$code, 3,$D,$E,$T,$A,$B,$C);
294
 
        &BODY_00_15(\$code, 4,$C,$D,$E,$T,$A,$B);
295
 
        &BODY_00_15(\$code, 5,$B,$C,$D,$E,$T,$A);
296
 
        &BODY_00_15(\$code, 6,$A,$B,$C,$D,$E,$T);
297
 
        &BODY_00_15(\$code, 7,$T,$A,$B,$C,$D,$E);
298
 
        &BODY_00_15(\$code, 8,$E,$T,$A,$B,$C,$D);
299
 
        &BODY_00_15(\$code, 9,$D,$E,$T,$A,$B,$C);
300
 
        &BODY_00_15(\$code,10,$C,$D,$E,$T,$A,$B);
301
 
        &BODY_00_15(\$code,11,$B,$C,$D,$E,$T,$A);
302
 
        &BODY_00_15(\$code,12,$A,$B,$C,$D,$E,$T);
303
 
        &BODY_00_15(\$code,13,$T,$A,$B,$C,$D,$E);
304
 
        &BODY_00_15(\$code,14,$E,$T,$A,$B,$C,$D);
305
 
        &BODY_00_15(\$code,15,$D,$E,$T,$A,$B,$C);
306
 
 
307
 
        &BODY_16_19(\$code,16,$C,$D,$E,$T,$A,$B);
308
 
        &BODY_16_19(\$code,17,$B,$C,$D,$E,$T,$A);
309
 
        &BODY_16_19(\$code,18,$A,$B,$C,$D,$E,$T);
310
 
        &BODY_16_19(\$code,19,$T,$A,$B,$C,$D,$E);
311
 
 
312
 
        &BODY_20_39(\$code,20,$E,$T,$A,$B,$C,$D);
313
 
        &BODY_20_39(\$code,21,$D,$E,$T,$A,$B,$C);
314
 
        &BODY_20_39(\$code,22,$C,$D,$E,$T,$A,$B);
315
 
        &BODY_20_39(\$code,23,$B,$C,$D,$E,$T,$A);
316
 
        &BODY_20_39(\$code,24,$A,$B,$C,$D,$E,$T);
317
 
        &BODY_20_39(\$code,25,$T,$A,$B,$C,$D,$E);
318
 
        &BODY_20_39(\$code,26,$E,$T,$A,$B,$C,$D);
319
 
        &BODY_20_39(\$code,27,$D,$E,$T,$A,$B,$C);
320
 
        &BODY_20_39(\$code,28,$C,$D,$E,$T,$A,$B);
321
 
        &BODY_20_39(\$code,29,$B,$C,$D,$E,$T,$A);
322
 
        &BODY_20_39(\$code,30,$A,$B,$C,$D,$E,$T);
323
 
        &BODY_20_39(\$code,31,$T,$A,$B,$C,$D,$E);
324
 
        &BODY_20_39(\$code,32,$E,$T,$A,$B,$C,$D);
325
 
        &BODY_20_39(\$code,33,$D,$E,$T,$A,$B,$C);
326
 
        &BODY_20_39(\$code,34,$C,$D,$E,$T,$A,$B);
327
 
        &BODY_20_39(\$code,35,$B,$C,$D,$E,$T,$A);
328
 
        &BODY_20_39(\$code,36,$A,$B,$C,$D,$E,$T);
329
 
        &BODY_20_39(\$code,37,$T,$A,$B,$C,$D,$E);
330
 
        &BODY_20_39(\$code,38,$E,$T,$A,$B,$C,$D);
331
 
        &BODY_20_39(\$code,39,$D,$E,$T,$A,$B,$C);
332
 
 
333
 
        &BODY_40_59(\$code,40,$C,$D,$E,$T,$A,$B);
334
 
        &BODY_40_59(\$code,41,$B,$C,$D,$E,$T,$A);
335
 
        &BODY_40_59(\$code,42,$A,$B,$C,$D,$E,$T);
336
 
        &BODY_40_59(\$code,43,$T,$A,$B,$C,$D,$E);
337
 
        &BODY_40_59(\$code,44,$E,$T,$A,$B,$C,$D);
338
 
        &BODY_40_59(\$code,45,$D,$E,$T,$A,$B,$C);
339
 
        &BODY_40_59(\$code,46,$C,$D,$E,$T,$A,$B);
340
 
        &BODY_40_59(\$code,47,$B,$C,$D,$E,$T,$A);
341
 
        &BODY_40_59(\$code,48,$A,$B,$C,$D,$E,$T);
342
 
        &BODY_40_59(\$code,49,$T,$A,$B,$C,$D,$E);
343
 
        &BODY_40_59(\$code,50,$E,$T,$A,$B,$C,$D);
344
 
        &BODY_40_59(\$code,51,$D,$E,$T,$A,$B,$C);
345
 
        &BODY_40_59(\$code,52,$C,$D,$E,$T,$A,$B);
346
 
        &BODY_40_59(\$code,53,$B,$C,$D,$E,$T,$A);
347
 
        &BODY_40_59(\$code,54,$A,$B,$C,$D,$E,$T);
348
 
        &BODY_40_59(\$code,55,$T,$A,$B,$C,$D,$E);
349
 
        &BODY_40_59(\$code,56,$E,$T,$A,$B,$C,$D);
350
 
        &BODY_40_59(\$code,57,$D,$E,$T,$A,$B,$C);
351
 
        &BODY_40_59(\$code,58,$C,$D,$E,$T,$A,$B);
352
 
        &BODY_40_59(\$code,59,$B,$C,$D,$E,$T,$A);
353
 
 
354
 
        &BODY_60_79(\$code,60,$A,$B,$C,$D,$E,$T);
355
 
        &BODY_60_79(\$code,61,$T,$A,$B,$C,$D,$E);
356
 
        &BODY_60_79(\$code,62,$E,$T,$A,$B,$C,$D);
357
 
        &BODY_60_79(\$code,63,$D,$E,$T,$A,$B,$C);
358
 
        &BODY_60_79(\$code,64,$C,$D,$E,$T,$A,$B);
359
 
        &BODY_60_79(\$code,65,$B,$C,$D,$E,$T,$A);
360
 
        &BODY_60_79(\$code,66,$A,$B,$C,$D,$E,$T);
361
 
        &BODY_60_79(\$code,67,$T,$A,$B,$C,$D,$E);
362
 
        &BODY_60_79(\$code,68,$E,$T,$A,$B,$C,$D);
363
 
        &BODY_60_79(\$code,69,$D,$E,$T,$A,$B,$C);
364
 
        &BODY_60_79(\$code,70,$C,$D,$E,$T,$A,$B);
365
 
        &BODY_60_79(\$code,71,$B,$C,$D,$E,$T,$A);
366
 
        &BODY_60_79(\$code,72,$A,$B,$C,$D,$E,$T);
367
 
        &BODY_60_79(\$code,73,$T,$A,$B,$C,$D,$E);
368
 
        &BODY_60_79(\$code,74,$E,$T,$A,$B,$C,$D);
369
 
        &BODY_60_79(\$code,75,$D,$E,$T,$A,$B,$C);
370
 
        &BODY_60_79(\$code,76,$C,$D,$E,$T,$A,$B);
371
 
        &BODY_60_79(\$code,77,$B,$C,$D,$E,$T,$A);
372
 
        &BODY_60_79(\$code,78,$A,$B,$C,$D,$E,$T);
373
 
        &BODY_60_79(\$code,79,$T,$A,$B,$C,$D,$E);
374
 
 
375
 
$code.=<<___;
376
 
{ .mmb; add     $h0=$h0,$E
377
 
        nop.m   0
378
 
        br.ctop.dptk.many       .Lhtop  };;
379
 
.Lhend:
380
 
{ .mmi; add     tmp0=4,ctx
381
 
        mov     ar.lc=r3                };;
382
 
{ .mmi; st4     [ctx]=$h0,8
383
 
        st4     [tmp0]=$h1,8            };;
384
 
{ .mmi; st4     [ctx]=$h2,8
385
 
        st4     [tmp0]=$h3              };;
386
 
{ .mib; st4     [ctx]=$h4,-16
387
 
        mov     pr=r2,0x1ffff
388
 
        br.ret.sptk.many        b0      };;
389
 
.endp   sha1_block_asm_host_order#
390
 
___
391
 
 
392
 
 
393
 
$code.=<<___;
394
 
// void sha1_block_asm_data_order(SHA_CTX *c,const void *p,size_t num);
395
 
.global sha1_block_asm_data_order#
396
 
.proc   sha1_block_asm_data_order#
397
 
.align  32
398
 
sha1_block_asm_data_order:
399
 
___
400
 
$code.=<<___ if ($big_endian);
401
 
{ .mmi; and     r2=3,inp                                };;
402
 
{ .mib; cmp.eq  p6,p0=r0,r2
403
 
(p6)    br.dptk.many    sha1_block_asm_host_order       };;
404
 
___
405
 
$code.=<<___;
 
234
// void sha1_block_data_order(SHA_CTX *c,const void *p,size_t num);
 
235
.global sha1_block_data_order#
 
236
.proc   sha1_block_data_order#
 
237
.align  32
 
238
sha1_block_data_order:
406
239
        .prologue
407
240
{ .mmi; alloc   tmp1=ar.pfs,3,15,0,0
408
241
        $ADDP   tmp0=4,ctx
440
273
 
441
274
___
442
275
 
443
 
        &BODY_00_15(\$code, 0,$A,$B,$C,$D,$E,$T,1);
444
 
        &BODY_00_15(\$code, 1,$T,$A,$B,$C,$D,$E,1);
445
 
        &BODY_00_15(\$code, 2,$E,$T,$A,$B,$C,$D,1);
446
 
        &BODY_00_15(\$code, 3,$D,$E,$T,$A,$B,$C,1);
447
 
        &BODY_00_15(\$code, 4,$C,$D,$E,$T,$A,$B,1);
448
 
        &BODY_00_15(\$code, 5,$B,$C,$D,$E,$T,$A,1);
449
 
        &BODY_00_15(\$code, 6,$A,$B,$C,$D,$E,$T,1);
450
 
        &BODY_00_15(\$code, 7,$T,$A,$B,$C,$D,$E,1);
451
 
        &BODY_00_15(\$code, 8,$E,$T,$A,$B,$C,$D,1);
452
 
        &BODY_00_15(\$code, 9,$D,$E,$T,$A,$B,$C,1);
453
 
        &BODY_00_15(\$code,10,$C,$D,$E,$T,$A,$B,1);
454
 
        &BODY_00_15(\$code,11,$B,$C,$D,$E,$T,$A,1);
455
 
        &BODY_00_15(\$code,12,$A,$B,$C,$D,$E,$T,1);
456
 
        &BODY_00_15(\$code,13,$T,$A,$B,$C,$D,$E,1);
457
 
        &BODY_00_15(\$code,14,$E,$T,$A,$B,$C,$D,1);
458
 
        &BODY_00_15(\$code,15,$D,$E,$T,$A,$B,$C,1);
459
 
 
460
 
        &BODY_16_19(\$code,16,$C,$D,$E,$T,$A,$B);
461
 
        &BODY_16_19(\$code,17,$B,$C,$D,$E,$T,$A);
462
 
        &BODY_16_19(\$code,18,$A,$B,$C,$D,$E,$T);
463
 
        &BODY_16_19(\$code,19,$T,$A,$B,$C,$D,$E);
464
 
 
465
 
        &BODY_20_39(\$code,20,$E,$T,$A,$B,$C,$D);
466
 
        &BODY_20_39(\$code,21,$D,$E,$T,$A,$B,$C);
467
 
        &BODY_20_39(\$code,22,$C,$D,$E,$T,$A,$B);
468
 
        &BODY_20_39(\$code,23,$B,$C,$D,$E,$T,$A);
469
 
        &BODY_20_39(\$code,24,$A,$B,$C,$D,$E,$T);
470
 
        &BODY_20_39(\$code,25,$T,$A,$B,$C,$D,$E);
471
 
        &BODY_20_39(\$code,26,$E,$T,$A,$B,$C,$D);
472
 
        &BODY_20_39(\$code,27,$D,$E,$T,$A,$B,$C);
473
 
        &BODY_20_39(\$code,28,$C,$D,$E,$T,$A,$B);
474
 
        &BODY_20_39(\$code,29,$B,$C,$D,$E,$T,$A);
475
 
        &BODY_20_39(\$code,30,$A,$B,$C,$D,$E,$T);
476
 
        &BODY_20_39(\$code,31,$T,$A,$B,$C,$D,$E);
477
 
        &BODY_20_39(\$code,32,$E,$T,$A,$B,$C,$D);
478
 
        &BODY_20_39(\$code,33,$D,$E,$T,$A,$B,$C);
479
 
        &BODY_20_39(\$code,34,$C,$D,$E,$T,$A,$B);
480
 
        &BODY_20_39(\$code,35,$B,$C,$D,$E,$T,$A);
481
 
        &BODY_20_39(\$code,36,$A,$B,$C,$D,$E,$T);
482
 
        &BODY_20_39(\$code,37,$T,$A,$B,$C,$D,$E);
483
 
        &BODY_20_39(\$code,38,$E,$T,$A,$B,$C,$D);
484
 
        &BODY_20_39(\$code,39,$D,$E,$T,$A,$B,$C);
485
 
 
486
 
        &BODY_40_59(\$code,40,$C,$D,$E,$T,$A,$B);
487
 
        &BODY_40_59(\$code,41,$B,$C,$D,$E,$T,$A);
488
 
        &BODY_40_59(\$code,42,$A,$B,$C,$D,$E,$T);
489
 
        &BODY_40_59(\$code,43,$T,$A,$B,$C,$D,$E);
490
 
        &BODY_40_59(\$code,44,$E,$T,$A,$B,$C,$D);
491
 
        &BODY_40_59(\$code,45,$D,$E,$T,$A,$B,$C);
492
 
        &BODY_40_59(\$code,46,$C,$D,$E,$T,$A,$B);
493
 
        &BODY_40_59(\$code,47,$B,$C,$D,$E,$T,$A);
494
 
        &BODY_40_59(\$code,48,$A,$B,$C,$D,$E,$T);
495
 
        &BODY_40_59(\$code,49,$T,$A,$B,$C,$D,$E);
496
 
        &BODY_40_59(\$code,50,$E,$T,$A,$B,$C,$D);
497
 
        &BODY_40_59(\$code,51,$D,$E,$T,$A,$B,$C);
498
 
        &BODY_40_59(\$code,52,$C,$D,$E,$T,$A,$B);
499
 
        &BODY_40_59(\$code,53,$B,$C,$D,$E,$T,$A);
500
 
        &BODY_40_59(\$code,54,$A,$B,$C,$D,$E,$T);
501
 
        &BODY_40_59(\$code,55,$T,$A,$B,$C,$D,$E);
502
 
        &BODY_40_59(\$code,56,$E,$T,$A,$B,$C,$D);
503
 
        &BODY_40_59(\$code,57,$D,$E,$T,$A,$B,$C);
504
 
        &BODY_40_59(\$code,58,$C,$D,$E,$T,$A,$B);
505
 
        &BODY_40_59(\$code,59,$B,$C,$D,$E,$T,$A);
506
 
 
507
 
        &BODY_60_79(\$code,60,$A,$B,$C,$D,$E,$T);
508
 
        &BODY_60_79(\$code,61,$T,$A,$B,$C,$D,$E);
509
 
        &BODY_60_79(\$code,62,$E,$T,$A,$B,$C,$D);
510
 
        &BODY_60_79(\$code,63,$D,$E,$T,$A,$B,$C);
511
 
        &BODY_60_79(\$code,64,$C,$D,$E,$T,$A,$B);
512
 
        &BODY_60_79(\$code,65,$B,$C,$D,$E,$T,$A);
513
 
        &BODY_60_79(\$code,66,$A,$B,$C,$D,$E,$T);
514
 
        &BODY_60_79(\$code,67,$T,$A,$B,$C,$D,$E);
515
 
        &BODY_60_79(\$code,68,$E,$T,$A,$B,$C,$D);
516
 
        &BODY_60_79(\$code,69,$D,$E,$T,$A,$B,$C);
517
 
        &BODY_60_79(\$code,70,$C,$D,$E,$T,$A,$B);
518
 
        &BODY_60_79(\$code,71,$B,$C,$D,$E,$T,$A);
519
 
        &BODY_60_79(\$code,72,$A,$B,$C,$D,$E,$T);
520
 
        &BODY_60_79(\$code,73,$T,$A,$B,$C,$D,$E);
521
 
        &BODY_60_79(\$code,74,$E,$T,$A,$B,$C,$D);
522
 
        &BODY_60_79(\$code,75,$D,$E,$T,$A,$B,$C);
523
 
        &BODY_60_79(\$code,76,$C,$D,$E,$T,$A,$B);
524
 
        &BODY_60_79(\$code,77,$B,$C,$D,$E,$T,$A);
525
 
        &BODY_60_79(\$code,78,$A,$B,$C,$D,$E,$T);
526
 
        &BODY_60_79(\$code,79,$T,$A,$B,$C,$D,$E);
 
276
{ my $i,@V=($A,$B,$C,$D,$E,$T);
 
277
 
 
278
        for($i=0;$i<16;$i++)    { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); }
 
279
        for(;$i<20;$i++)        { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
 
280
        for(;$i<40;$i++)        { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); }
 
281
        for(;$i<60;$i++)        { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
 
282
        for(;$i<80;$i++)        { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
 
283
 
 
284
        (($V[5] eq $D) and ($V[0] eq $E)) or die;       # double-check
 
285
}
527
286
 
528
287
$code.=<<___;
529
288
{ .mmb; add     $h0=$h0,$E
539
298
{ .mib; st4     [ctx]=$h4,-16
540
299
        mov     pr=r2,0x1ffff
541
300
        br.ret.sptk.many        b0      };;
542
 
.endp   sha1_block_asm_data_order#
 
301
.endp   sha1_block_data_order#
 
302
stringz "SHA1 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>"
543
303
___
544
304
 
 
305
$output=shift and open STDOUT,">$output";
545
306
print $code;