1
/* UltraSPARC 64 mpn_modexact_1c_odd -- mpn by limb exact style remainder.
3
THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY. THEY'RE ALMOST
4
CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
5
FUTURE GNU MP RELEASES.
7
Copyright 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
9
This file is part of the GNU MP Library.
11
The GNU MP Library is free software; you can redistribute it and/or modify
12
it under the terms of the GNU Lesser General Public License as published by
13
the Free Software Foundation; either version 2.1 of the License, or (at your
14
option) any later version.
16
The GNU MP Library is distributed in the hope that it will be useful, but
17
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
19
License for more details.
21
You should have received a copy of the GNU Lesser General Public License
22
along with the GNU MP Library; see the file COPYING.LIB. If not, write to
23
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
24
MA 02110-1301, USA. */
30
#include "mpn/sparc64/sparc64.h"
33
/* 64-bit divisor 32-bit divisor
34
cycles/limb cycles/limb
40
/* This implementation reduces the number of multiplies done, knowing that
41
on ultrasparc 1 and 2 the mulx instruction stalls the whole chip.
43
The key idea is to use the fact that the low limb of q*d equals l, this
44
being the whole purpose of the q calculated. It means there's no need to
45
calculate the lowest 32x32->64 part of the q*d, instead it can be
46
inferred from l and the other three 32x32->64 parts. See sparc64.h for
49
When d is 32-bits, the same applies, but in this case there's only one
50
other 32x32->64 part (ie. HIGH(q)*d).
52
The net effect is that for 64-bit divisor each limb is 4 mulx, or for
53
32-bit divisor each is 2 mulx.
57
No doubt this could be done in assembler, if that helped the scheduling,
58
or perhaps guaranteed good code irrespective of the compiler.
62
It might be possibly to use floating point. The loop is dominated by
63
multiply latency, so not sure if floats would improve that. One
64
possibility would be to take two limbs at a time, with a 128 bit inverse,
65
if there's enough registers, which could effectively use float throughput
66
to reduce total latency across two limbs. */
68
#define ASSERT_RETVAL(r) \
69
ASSERT (orig_c < d ? r < d : r <= d)
72
mpn_modexact_1c_odd (mp_srcptr src, mp_size_t size, mp_limb_t d, mp_limb_t orig_c)
75
mp_limb_t s, l, q, h, inverse;
79
ASSERT_MPN (src, size);
83
/* udivx is faster than 10 or 12 mulx's for one limb via an inverse */
102
modlimb_invert (inverse, d);
110
SUBC_LIMB (c, l, s, c);
113
umul_ppmm_half_lowequal (h, q, d, l);
121
/* With high s <= d the final step can be a subtract and addback.
122
If c==0 then the addback will restore to l>=0. If c==d then
123
will get l==d if s==0, but that's ok per the function
127
l += (l > c ? d : 0);
134
/* Can't skip a divide, just do the loop code once more. */
135
SUBC_LIMB (c, l, s, c);
137
umul_ppmm_half_lowequal (h, q, d, l);
146
mp_limb_t dl = LOW32 (d);
147
mp_limb_t dh = HIGH32 (d);
154
SUBC_LIMB (c, l, s, c);
157
umul_ppmm_lowequal (h, q, d, dh, dl, l);
165
/* With high s <= d the final step can be a subtract and addback.
166
If c==0 then the addback will restore to l>=0. If c==d then
167
will get l==d if s==0, but that's ok per the function
171
l += (l > c ? d : 0);
178
/* Can't skip a divide, just do the loop code once more. */
179
SUBC_LIMB (c, l, s, c);
181
umul_ppmm_lowequal (h, q, d, dh, dl, l);