1
/* $Id: mul.S,v 1.1 2001/04/27 21:55:43 bencollins Exp $
2
* mul.S: This routine was taken from glibc-1.09 and is covered
3
* by the GNU Library General Public License Version 2.
7
* Signed multiply, from Appendix E of the Sparc Version 8
12
* Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
13
* the 64-bit product).
15
* This code optimizes short (less than 13-bit) multiplies.
20
mov %o0, %y ! multiplier -> Y
21
andncc %o0, 0xfff, %g0 ! test bits 12..31
22
be Lmul_shortway ! if zero, can do it the short way
23
andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
26
* Long multiply. 32 steps, followed by a final shift step.
28
mulscc %o4, %o1, %o4 ! 1
29
mulscc %o4, %o1, %o4 ! 2
30
mulscc %o4, %o1, %o4 ! 3
31
mulscc %o4, %o1, %o4 ! 4
32
mulscc %o4, %o1, %o4 ! 5
33
mulscc %o4, %o1, %o4 ! 6
34
mulscc %o4, %o1, %o4 ! 7
35
mulscc %o4, %o1, %o4 ! 8
36
mulscc %o4, %o1, %o4 ! 9
37
mulscc %o4, %o1, %o4 ! 10
38
mulscc %o4, %o1, %o4 ! 11
39
mulscc %o4, %o1, %o4 ! 12
40
mulscc %o4, %o1, %o4 ! 13
41
mulscc %o4, %o1, %o4 ! 14
42
mulscc %o4, %o1, %o4 ! 15
43
mulscc %o4, %o1, %o4 ! 16
44
mulscc %o4, %o1, %o4 ! 17
45
mulscc %o4, %o1, %o4 ! 18
46
mulscc %o4, %o1, %o4 ! 19
47
mulscc %o4, %o1, %o4 ! 20
48
mulscc %o4, %o1, %o4 ! 21
49
mulscc %o4, %o1, %o4 ! 22
50
mulscc %o4, %o1, %o4 ! 23
51
mulscc %o4, %o1, %o4 ! 24
52
mulscc %o4, %o1, %o4 ! 25
53
mulscc %o4, %o1, %o4 ! 26
54
mulscc %o4, %o1, %o4 ! 27
55
mulscc %o4, %o1, %o4 ! 28
56
mulscc %o4, %o1, %o4 ! 29
57
mulscc %o4, %o1, %o4 ! 30
58
mulscc %o4, %o1, %o4 ! 31
59
mulscc %o4, %o1, %o4 ! 32
60
mulscc %o4, %g0, %o4 ! final shift
62
! If %o0 was negative, the result is
63
! (%o0 * %o1) + (%o1 << 32))
71
! %o0 was indeed negative; fix upper 32 bits of result by subtracting
72
! %o1 (i.e., return %o4 - %o1 in %o1).
80
/* Faster code adapted from tege@sics.se's code for umul.S. */
81
sra %o0, 31, %o2 ! make mask from sign bit
82
and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0
83
rd %y, %o0 ! get lower half of product
85
sub %o4, %o2, %o1 ! subtract compensation
86
! and put upper half in place
91
* Short multiply. 12 steps, followed by a final shift step.
92
* The resulting bits are off by 12 and (32-12) = 20 bit positions,
93
* but there is no problem with %o0 being negative (unlike above).
95
mulscc %o4, %o1, %o4 ! 1
96
mulscc %o4, %o1, %o4 ! 2
97
mulscc %o4, %o1, %o4 ! 3
98
mulscc %o4, %o1, %o4 ! 4
99
mulscc %o4, %o1, %o4 ! 5
100
mulscc %o4, %o1, %o4 ! 6
101
mulscc %o4, %o1, %o4 ! 7
102
mulscc %o4, %o1, %o4 ! 8
103
mulscc %o4, %o1, %o4 ! 9
104
mulscc %o4, %o1, %o4 ! 10
105
mulscc %o4, %o1, %o4 ! 11
106
mulscc %o4, %o1, %o4 ! 12
107
mulscc %o4, %g0, %o4 ! final shift
110
* %o4 has 20 of the bits that should be in the low part of the
111
* result; %y has the bottom 12 (as %y's top 12). That is:
114
* +----------------+----------------+
115
* | -12- | -20- | -12- | -20- |
116
* +------(---------+------)---------+
117
* --hi-- ----low-part----
119
* The upper 12 bits of %o4 should be sign-extended to form the
120
* high part of the product (i.e., highpart = %o4 >> 20).
124
sll %o4, 12, %o0 ! shift middle bits left 12
125
srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left
126
or %o5, %o0, %o0 ! construct low part of result
128
sra %o4, 20, %o1 ! ... and extract high part of result