1
/* mpfr_round_raw_generic, mpfr_round_raw2, mpfr_round_raw, mpfr_round_prec,
2
mpfr_can_round, mpfr_can_round_raw -- various rounding functions
4
Copyright 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
6
This file is part of the MPFR Library.
8
The MPFR Library is free software; you can redistribute it and/or modify
9
it under the terms of the GNU Lesser General Public License as published by
10
the Free Software Foundation; either version 2.1 of the License, or (at your
11
option) any later version.
13
The MPFR Library is distributed in the hope that it will be useful, but
14
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16
License for more details.
18
You should have received a copy of the GNU Lesser General Public License
19
along with the MPFR Library; see the file COPYING.LIB. If not, write to
20
the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
21
MA 02111-1307, USA. */
26
#include "mpfr-impl.h"
28
#if (BITS_PER_MP_LIMB & (BITS_PER_MP_LIMB - 1))
29
#error "BITS_PER_MP_LIMB must be a power of 2"
33
* If flag = 0, puts in y the value of xp (with precision xprec and
34
* sign 1 if negative=0, -1 otherwise) rounded to precision yprec and
35
* direction rnd_mode. Supposes x is not zero nor NaN nor +/- Infinity
36
* (i.e. *xp != 0). If inexp != NULL, computes the inexact flag of the
39
* In case of even rounding when rnd = GMP_RNDN, returns 2 or -2.
41
* If flag = 1, just returns whether one should add 1 or not for rounding.
43
* Note: yprec may be < MPFR_PREC_MIN; in particular, it may be equal
44
* to 1. In this case, the even rounding is done away from 0, which is
45
* a natural generalization. Indeed, a number with 1-bit precision can
46
* be seen as a denormalized number with more precision.
50
mpfr_round_raw_generic(mp_limb_t *yp, mp_limb_t *xp, mp_prec_t xprec,
51
int neg, mp_prec_t yprec, mp_rnd_t rnd_mode,
55
mp_limb_t himask, lomask;
58
xsize = (xprec-1)/BITS_PER_MP_LIMB + 1;
60
nw = yprec / BITS_PER_MP_LIMB;
61
rw = yprec & (BITS_PER_MP_LIMB - 1);
63
if (flag && !inexp && (rnd_mode==GMP_RNDZ || xprec <= yprec
64
|| (rnd_mode==GMP_RNDU && neg)
65
|| (rnd_mode==GMP_RNDD && neg==0)))
71
lomask = ((MP_LIMB_T_ONE << (BITS_PER_MP_LIMB - rw)) - MP_LIMB_T_ONE);
79
MPFR_ASSERTN(nw >= 1);
82
{ /* No rounding is necessary. */
83
/* if yp=xp, maybe an overlap: MPN_COPY_DECR is ok when src <= dst */
84
MPFR_ASSERTN(nw >= xsize);
89
MPN_COPY_DECR(yp + (nw - xsize), xp, xsize);
90
MPN_ZERO(yp, nw - xsize);
96
if ((rnd_mode == GMP_RNDU && neg) ||
97
(rnd_mode == GMP_RNDD && !neg))
100
if (inexp || rnd_mode != GMP_RNDZ)
107
MPFR_ASSERTN(k >= 0);
108
sb = xp[k] & lomask; /* First non-significant bits */
109
if (rnd_mode == GMP_RNDN)
111
mp_limb_t rbmask = MP_LIMB_T_ONE << (BITS_PER_MP_LIMB - rw - 1);
112
if (sb & rbmask) /* rounding bit */
113
sb &= ~rbmask; /* it is 1, clear it */
115
rnd_mode = GMP_RNDZ; /* it is 0, behave like rounding to 0 */
117
while (sb == 0 && k > 0)
119
if (rnd_mode == GMP_RNDN)
120
{ /* rounding to nearest, with rounding bit = 1 */
121
if (sb == 0) /* Even rounding. */
123
sb = xp[xsize - nw] & (himask ^ (himask << 1));
125
*inexp = ((neg != 0) ^ (sb != 0))
126
? MPFR_EVEN_INEX : -MPFR_EVEN_INEX;
131
*inexp = (neg == 0) ? 1 : -1;
136
: (((neg != 0) ^ (rnd_mode != GMP_RNDZ)) ? 1 : -1);
142
return sb != 0 && rnd_mode != GMP_RNDZ;
144
if (sb != 0 && rnd_mode != GMP_RNDZ)
145
carry = mpn_add_1(yp, xp + xsize - nw, nw,
146
rw ? MP_LIMB_T_ONE << (BITS_PER_MP_LIMB - rw) : 1);
148
MPN_COPY_INCR(yp, xp + xsize - nw, nw);
157
mpfr_round_prec (mpfr_ptr x, mp_rnd_t rnd_mode, mp_prec_t prec)
164
MPFR_ASSERTN(prec >= MPFR_PREC_MIN && prec <= MPFR_PREC_MAX);
166
nw = 1 + (prec - 1) / BITS_PER_MP_LIMB; /* needed allocated limbs */
168
/* check if x has enough allocated space for the mantissa */
169
if (nw > MPFR_ABSSIZE(x))
171
MPFR_MANT(x) = (mp_ptr) (*__gmp_reallocate_func)
172
(MPFR_MANT(x), (size_t) MPFR_ABSSIZE(x) * BYTES_PER_MP_LIMB,
173
(size_t) nw * BYTES_PER_MP_LIMB);
174
MPFR_SET_ABSSIZE(x, nw); /* new number of allocated limbs */
181
return 0; /* infinity is exact */
183
/* x is a real number */
186
tmp = TMP_ALLOC (nw * BYTES_PER_MP_LIMB);
188
carry = mpfr_round_raw (tmp, xp, MPFR_PREC(x), MPFR_SIGN(x) < 0,
189
prec, rnd_mode, &inexact);
194
mp_exp_t exp = MPFR_EXP(x);
196
if (exp == __mpfr_emax)
197
(void) mpfr_set_overflow(x, rnd_mode, MPFR_SIGN(x));
201
xp[nw - 1] = GMP_LIMB_HIGHBIT;
203
MPN_ZERO(xp, nw - 1);
207
MPN_COPY(xp, tmp, nw);
213
/* assumption: BITS_PER_MP_LIMB is a power of 2 */
215
/* assuming b is an approximation of x in direction rnd1
216
with error at most 2^(MPFR_EXP(b)-err), returns 1 if one is
217
able to round exactly x to precision prec with direction rnd2,
224
mpfr_can_round (mpfr_ptr b, mp_exp_t err, mp_rnd_t rnd1,
225
mp_rnd_t rnd2, mp_prec_t prec)
227
return MPFR_IS_ZERO(b) ? 0 : /* we cannot round if b=0 */
228
mpfr_can_round_raw (MPFR_MANT(b),
229
(MPFR_PREC(b) - 1)/BITS_PER_MP_LIMB + 1,
230
MPFR_SIGN(b), err, rnd1, rnd2, prec);
234
mpfr_can_round_raw (mp_limb_t *bp, mp_size_t bn, int neg, mp_exp_t err0,
235
mp_rnd_t rnd1, mp_rnd_t rnd2, mp_prec_t prec)
244
if (err0 < 0 || (mp_exp_unsigned_t) err0 <= prec)
245
return 0; /* can't round */
249
/* if the error is smaller than ulp(b), then anyway it will propagate
251
err = ((mp_exp_unsigned_t) err0 > (mp_prec_t) bn * BITS_PER_MP_LIMB) ?
252
(mp_prec_t) bn * BITS_PER_MP_LIMB : err0;
254
/* warning: if k = m*BITS_PER_MP_LIMB, consider limb m-1 and not m */
255
k = (err - 1) / BITS_PER_MP_LIMB;
256
s = err % BITS_PER_MP_LIMB;
258
s = BITS_PER_MP_LIMB - s;
259
/* the error corresponds to bit s in limb k, the most significant limb
261
k1 = (prec - 1) / BITS_PER_MP_LIMB;
262
s1 = prec % BITS_PER_MP_LIMB;
264
s1 = BITS_PER_MP_LIMB - s1;
266
/* the last significant bit is bit s1 in limb k1 */
268
/* don't need to consider the k1 most significant limbs */
271
prec -= (mp_prec_t) k1 * BITS_PER_MP_LIMB;
272
/* if when adding or subtracting (1 << s) in bp[bn-1-k], it does not
273
change bp[bn-1] >> s1, then we can round */
275
if (rnd1 == GMP_RNDU)
279
if (rnd1 == GMP_RNDD)
280
rnd1 = neg ? GMP_RNDU : GMP_RNDZ;
282
/* in the sequel, RNDU = towards infinity, RNDZ = towards zero */
286
k++; /* since we work with k+1 everywhere */
287
tmp = TMP_ALLOC(tn * BYTES_PER_MP_LIMB);
289
MPN_COPY (tmp, bp, bn - k);
291
if (rnd1 != GMP_RNDN)
292
{ /* GMP_RNDZ or GMP_RNDU */
293
cc = (bp[bn - 1] >> s1) & 1;
294
cc ^= mpfr_round_raw2(bp, bn, neg, rnd2, prec);
296
/* now round b +/- 2^(MPFR_EXP(b)-err) */
297
cc2 = rnd1 == GMP_RNDZ ?
298
mpn_add_1 (tmp + bn - k, bp + bn - k, k, MP_LIMB_T_ONE << s) :
299
mpn_sub_1 (tmp + bn - k, bp + bn - k, k, MP_LIMB_T_ONE << s);
303
/* first round b+2^(MPFR_EXP(b)-err) */
304
cc = mpn_add_1 (tmp + bn - k, bp + bn - k, k, MP_LIMB_T_ONE << s);
305
cc = (tmp[bn - 1] >> s1) & 1; /* gives 0 when cc=1 */
306
cc ^= mpfr_round_raw2 (tmp, bn, neg, rnd2, prec);
308
/* now round b-2^(MPFR_EXP(b)-err) */
309
cc2 = mpn_sub_1 (tmp + bn - k, bp + bn - k, k, MP_LIMB_T_ONE << s);
318
cc2 = (tmp[bn - 1] >> s1) & 1;
319
cc2 ^= mpfr_round_raw2 (tmp, bn, neg, rnd2, prec);