1
1
/* mpf_mul_ui -- Multiply a float and an unsigned integer.
3
Copyright 1993, 1994, 1996, 2001 Free Software Foundation, Inc.
3
Copyright 1993, 1994, 1996, 2001, 2003, 2004 Free Software Foundation, Inc.
5
5
This file is part of the GNU MP Library.
17
17
You should have received a copy of the GNU Lesser General Public License
18
18
along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19
the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20
MA 02111-1307, USA. */
19
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20
MA 02110-1301, USA. */
23
23
#include "gmp-impl.h"
27
/* The core operation is a multiply of PREC(r) limbs from u by v, producing
28
either PREC(r) or PREC(r)+1 result limbs. If u is shorter than PREC(r),
29
then we take only as much as it has. If u is longer we incorporate a
30
carry from the lower limbs.
32
If u has just 1 extra limb, then the carry to add is high(up[0]*v). That
33
is of course what mpn_mul_1 would do if it was called with PREC(r)+1
36
If u has more than 1 extra limb, then there can be a further carry bit
37
out of lower uncalculated limbs (the way the low of one product adds to
38
the high of the product below it). This is of course what an mpn_mul_1
39
would do if it was called with the full u operand. But we instead work
40
downwards explicitly, until a carry occurs or until a value other than
41
GMP_NUMB_MAX occurs (that being the only value a carry bit can propagate
44
The carry determination normally requires two umul_ppmm's, only rarely
45
will GMP_NUMB_MAX occur and require further products.
47
The carry limb is conveniently added into the mul_1 using mpn_mul_1c when
48
that function exists, otherwise a subsequent mpn_add_1 is needed.
50
Clearly when mpn_mul_1c is used the carry must be calculated first. But
51
this is also the case when add_1 is used, since if r==u and ABSIZ(r) >
52
PREC(r) then the mpn_mul_1 overwrites the low part of the input.
54
A reuse r==u with size > prec can occur from a size PREC(r)+1 in the
55
usual way, or it can occur from an mpf_set_prec_raw leaving a bigger
56
sized value. In both cases we can end up calling mpn_mul_1 with
57
overlapping src and dst regions, but this will be with dst < src and such
58
an overlap is permitted.
62
No attempt is made to determine in advance whether the result will be
63
PREC(r) or PREC(r)+1 limbs. If it's going to be PREC(r)+1 then we could
64
take one less limb from u and generate just PREC(r), that of course
65
satisfying application requested precision. But any test counting bits
66
or forming the high product would almost certainly take longer than the
67
incremental cost of an extra limb in mpn_mul_1.
71
Repeated mpf_mul_ui's with an even v will accumulate low zero bits on the
72
result, leaving low zero limbs after a while, which it might be nice to
73
strip to save work in subsequent operations. Calculating the low limb
74
explicitly would let us direct mpn_mul_1 to put the balance at rp when
75
the low is zero (instead of normally rp+1). But it's not clear whether
76
this would be worthwhile. Explicit code for the low limb will probably
77
be slower than having it done in mpn_mul_1, so we need to consider how
78
often a zero will be stripped and how much that's likely to save
26
82
mpf_mul_ui (mpf_ptr r, mpf_srcptr u, unsigned long int v)
87
mp_size_t prec, excess;
88
mp_limb_t cy_limb, vl, cbit, cin;
35
91
usize = u->_mp_size;
36
if (usize == 0 || v == 0)
92
if (UNLIKELY (v == 0) || UNLIKELY (usize == 0))
99
#if BITS_PER_ULONG > GMP_NUMB_BITS /* avoid warnings about shift amount */
100
if (v > GMP_NUMB_MAX)
104
vp[0] = v & GMP_NUMB_MASK;
105
vp[1] = v >> GMP_NUMB_BITS;
108
ASSERT_CODE (PREC(vf) = 2);
43
115
size = ABS (usize);
44
116
prec = r->_mp_prec;
119
excess = size - prec;
124
/* up is bigger than desired rp, shorten it to prec limbs and
125
determine a carry-in */
127
mp_limb_t vl_shifted = vl << GMP_NAIL_BITS;
128
mp_limb_t hi, lo, next_lo, sum;
131
/* high limb of top product */
133
umul_ppmm (cin, lo, up[i], vl_shifted);
135
/* and carry bit out of products below that, if any */
142
umul_ppmm (hi, next_lo, up[i], vl_shifted);
143
lo >>= GMP_NAIL_BITS;
144
ADDC_LIMB (cbit, sum, hi, lo);
148
/* Continue only if the sum is GMP_NUMB_MAX. GMP_NUMB_MAX is the
149
only value a carry from below can propagate across. If we've
150
just seen the carry out (ie. cbit!=0) then sum!=GMP_NUMB_MAX,
151
so this test stops us for that case too. */
152
if (LIKELY (sum != GMP_NUMB_MAX))
53
/* Since we can do it at almost no cost, remove zero limbs at low end of
60
cy_limb = mpn_mul_1 (rp, up, size, (mp_limb_t) v);
161
#if HAVE_NATIVE_mpn_mul_1c
162
cy_limb = mpn_mul_1c (rp, up, size, vl, cin);
164
cy_limb = mpn_mul_1 (rp, up, size, vl);
165
__GMPN_ADD_1 (cbit, rp, rp, size, cin);
61
168
rp[size] = cy_limb;
62
169
cy_limb = cy_limb != 0;
63
170
r->_mp_exp = u->_mp_exp + cy_limb;