1
/* This Source Code Form is subject to the terms of the Mozilla Public
2
* License, v. 2.0. If a copy of the MPL was not distributed with this
3
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
/* $Id: mpmontg.c,v 1.25 2012/11/14 01:14:11 wtc%google.com Exp $ */
6
/* This file implements moduluar exponentiation using Montgomery's
7
* method for modular reduction. This file implements the method
8
* described as "Improvement 2" in the paper "A Cryptogrpahic Library for
9
* the Motorola DSP56000" by Stephen R. Dusse' and Burton S. Kaliski Jr.
10
* published in "Advances in Cryptology: Proceedings of EUROCRYPT '90"
11
* "Lecture Notes in Computer Science" volume 473, 1991, pg 230-244,
12
* published by Springer Verlag.
15
#define MP_USING_CACHE_SAFE_MOD_EXP 1
20
#ifdef MP_USING_MONT_MULF
23
#include <stddef.h> /* ptrdiff_t */
25
/* if MP_CHAR_STORE_SLOW is defined, we */
26
/* need to know endianness of this platform. */
27
#ifdef MP_CHAR_STORE_SLOW
28
#if !defined(MP_IS_BIG_ENDIAN) && !defined(MP_IS_LITTLE_ENDIAN)
29
#error "You must define MP_IS_BIG_ENDIAN or MP_IS_LITTLE_ENDIAN\n" \
30
" if you define MP_CHAR_STORE_SLOW."
36
#define MAX_ODD_INTS 32 /* 2 ** (WINDOW_BITS - 1) */
38
/*! computes T = REDC(T), 2^b == R
41
mp_err s_mp_redc(mp_int *T, mp_mont_modulus *mmm)
46
i = (MP_USED(&mmm->N) << 1) + 1;
47
MP_CHECKOK( s_mp_pad(T, i) );
48
for (i = 0; i < MP_USED(&mmm->N); ++i ) {
49
mp_digit m_i = MP_DIGIT(T, i) * mmm->n0prime;
50
/* T += N * m_i * (MP_RADIX ** i); */
51
MP_CHECKOK( s_mp_mul_d_add_offset(&mmm->N, m_i, T, i) );
56
s_mp_rshd( T, MP_USED(&mmm->N) );
58
if ((res = s_mp_cmp(T, &mmm->N)) >= 0) {
60
MP_CHECKOK( s_mp_sub(T, &mmm->N) );
62
if ((res = mp_cmp(T, &mmm->N)) >= 0) {
73
#if !defined(MP_MONT_USE_MP_MUL)
75
/*! c <- REDC( a * b ) mod N
76
\param a < N i.e. "reduced"
77
\param b < N i.e. "reduced"
78
\param mmm modulus N and n0' of N
80
mp_err s_mp_mul_mont(const mp_int *a, const mp_int *b, mp_int *c,
86
mp_size ib; /* "index b": index of current digit of B */
89
ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG);
91
if (MP_USED(a) < MP_USED(b)) {
92
const mp_int *xch = b; /* switch a and b, to do fewer outer loops */
97
MP_USED(c) = 1; MP_DIGIT(c, 0) = 0;
98
ib = (MP_USED(&mmm->N) << 1) + 1;
99
if((res = s_mp_pad(c, ib)) != MP_OKAY)
104
s_mpv_mul_d(MP_DIGITS(a), useda, *pb++, MP_DIGITS(c));
105
s_mp_setz(MP_DIGITS(c) + useda + 1, ib - (useda + 1));
106
m_i = MP_DIGIT(c, 0) * mmm->n0prime;
107
s_mp_mul_d_add_offset(&mmm->N, m_i, c, 0);
109
/* Outer loop: Digits of b */
111
for (ib = 1; ib < usedb; ib++) {
112
mp_digit b_i = *pb++;
114
/* Inner product: Digits of a */
116
s_mpv_mul_d_add_prop(MP_DIGITS(a), useda, b_i, MP_DIGITS(c) + ib);
117
m_i = MP_DIGIT(c, ib) * mmm->n0prime;
118
s_mp_mul_d_add_offset(&mmm->N, m_i, c, ib);
120
if (usedb < MP_USED(&mmm->N)) {
121
for (usedb = MP_USED(&mmm->N); ib < usedb; ++ib ) {
122
m_i = MP_DIGIT(c, ib) * mmm->n0prime;
123
s_mp_mul_d_add_offset(&mmm->N, m_i, c, ib);
127
s_mp_rshd( c, MP_USED(&mmm->N) ); /* c /= R */
128
if (s_mp_cmp(c, &mmm->N) >= 0) {
129
MP_CHECKOK( s_mp_sub(c, &mmm->N) );
139
mp_err s_mp_to_mont(const mp_int *x, mp_mont_modulus *mmm, mp_int *xMont)
143
/* xMont = x * R mod N where N is modulus */
144
MP_CHECKOK( mp_copy( x, xMont ) );
145
MP_CHECKOK( s_mp_lshd( xMont, MP_USED(&mmm->N) ) ); /* xMont = x << b */
146
MP_CHECKOK( mp_div(xMont, &mmm->N, 0, xMont) ); /* mod N */
151
#ifdef MP_USING_MONT_MULF
153
/* the floating point multiply is already cache safe,
154
* don't turn on cache safe unless we specifically
156
#ifndef MP_FORCE_CACHE_SAFE
157
#undef MP_USING_CACHE_SAFE_MOD_EXP
160
unsigned int mp_using_mont_mulf = 1;
162
/* computes montgomery square of the integer in mResult */
164
conv_i32_to_d32_and_d16(dm1, d16Tmp, mResult, nLen); \
165
mont_mulf_noconv(mResult, dm1, d16Tmp, \
166
dTmp, dn, MP_DIGITS(modulus), nLen, dn0)
168
/* computes montgomery product of x and the integer in mResult */
170
conv_i32_to_d32(dm1, mResult, nLen); \
171
mont_mulf_noconv(mResult, dm1, oddPowers[x], \
172
dTmp, dn, MP_DIGITS(modulus), nLen, dn0)
174
/* Do modular exponentiation using floating point multiply code. */
175
mp_err mp_exptmod_f(const mp_int * montBase,
176
const mp_int * exponent,
177
const mp_int * modulus,
179
mp_mont_modulus *mmm,
181
mp_size bits_in_exponent,
186
double *dBuf = 0, *dm1, *dn, *dSqr, *d16Tmp, *dTmp;
191
int dSize = 0, oddPowSize, dTmpSize;
193
double *oddPowers[MAX_ODD_INTS];
195
/* function for computing n0prime only works if n0 is odd */
197
MP_DIGITS(&accum1) = 0;
199
for (i = 0; i < MAX_ODD_INTS; ++i)
202
MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) );
205
MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) );
206
MP_CHECKOK( s_mp_pad(&accum1, nLen) );
208
oddPowSize = 2 * nLen + 1;
209
dTmpSize = 2 * oddPowSize;
210
dSize = sizeof(double) * (nLen * 4 + 1 +
211
((odd_ints + 1) * oddPowSize) + dTmpSize);
212
dBuf = (double *)malloc(dSize);
213
dm1 = dBuf; /* array of d32 */
214
dn = dBuf + nLen; /* array of d32 */
215
dSqr = dn + nLen; /* array of d32 */
216
d16Tmp = dSqr + nLen; /* array of d16 */
217
dTmp = d16Tmp + oddPowSize;
219
for (i = 0; i < odd_ints; ++i) {
223
mResult = (mp_digit *)(dTmp + dTmpSize); /* size is nLen + 1 */
225
/* Make dn and dn0 */
226
conv_i32_to_d32(dn, MP_DIGITS(modulus), nLen);
227
dn0 = (double)(mmm->n0prime & 0xffff);
230
conv_i32_to_d32_and_d16(dm1, oddPowers[0], MP_DIGITS(montBase), nLen);
231
mont_mulf_noconv(mResult, dm1, oddPowers[0],
232
dTmp, dn, MP_DIGITS(modulus), nLen, dn0);
233
conv_i32_to_d32(dSqr, mResult, nLen);
235
for (i = 1; i < odd_ints; ++i) {
236
mont_mulf_noconv(mResult, dSqr, oddPowers[i - 1],
237
dTmp, dn, MP_DIGITS(modulus), nLen, dn0);
238
conv_i32_to_d16(oddPowers[i], mResult, nLen);
241
s_mp_copy(MP_DIGITS(&accum1), mResult, nLen); /* from, to, len */
243
for (expOff = bits_in_exponent - window_bits; expOff >= 0; expOff -= window_bits) {
245
MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) );
246
smallExp = (mp_size)res;
248
if (window_bits == 1) {
251
} else if (smallExp & 1) {
256
} else if (window_bits == 4) {
259
} else if (smallExp & 1) {
260
SQR; SQR; SQR; SQR; MUL(smallExp/2);
261
} else if (smallExp & 2) {
262
SQR; SQR; SQR; MUL(smallExp/4); SQR;
263
} else if (smallExp & 4) {
264
SQR; SQR; MUL(smallExp/8); SQR; SQR;
265
} else if (smallExp & 8) {
266
SQR; MUL(smallExp/16); SQR; SQR; SQR;
270
} else if (window_bits == 5) {
272
SQR; SQR; SQR; SQR; SQR;
273
} else if (smallExp & 1) {
274
SQR; SQR; SQR; SQR; SQR; MUL(smallExp/2);
275
} else if (smallExp & 2) {
276
SQR; SQR; SQR; SQR; MUL(smallExp/4); SQR;
277
} else if (smallExp & 4) {
278
SQR; SQR; SQR; MUL(smallExp/8); SQR; SQR;
279
} else if (smallExp & 8) {
280
SQR; SQR; MUL(smallExp/16); SQR; SQR; SQR;
281
} else if (smallExp & 0x10) {
282
SQR; MUL(smallExp/32); SQR; SQR; SQR; SQR;
286
} else if (window_bits == 6) {
288
SQR; SQR; SQR; SQR; SQR; SQR;
289
} else if (smallExp & 1) {
290
SQR; SQR; SQR; SQR; SQR; SQR; MUL(smallExp/2);
291
} else if (smallExp & 2) {
292
SQR; SQR; SQR; SQR; SQR; MUL(smallExp/4); SQR;
293
} else if (smallExp & 4) {
294
SQR; SQR; SQR; SQR; MUL(smallExp/8); SQR; SQR;
295
} else if (smallExp & 8) {
296
SQR; SQR; SQR; MUL(smallExp/16); SQR; SQR; SQR;
297
} else if (smallExp & 0x10) {
298
SQR; SQR; MUL(smallExp/32); SQR; SQR; SQR; SQR;
299
} else if (smallExp & 0x20) {
300
SQR; MUL(smallExp/64); SQR; SQR; SQR; SQR; SQR;
309
s_mp_copy(mResult, MP_DIGITS(&accum1), nLen); /* from, to, len */
311
res = s_mp_redc(&accum1, mmm);
312
mp_exch(&accum1, result);
318
memset(dBuf, 0, dSize);
329
MP_CHECKOK( mp_sqr(a, b) );\
330
MP_CHECKOK( s_mp_redc(b, mmm) )
332
#if defined(MP_MONT_USE_MP_MUL)
334
MP_CHECKOK( mp_mul(a, oddPowers + (x), b) ); \
335
MP_CHECKOK( s_mp_redc(b, mmm) )
338
MP_CHECKOK( s_mp_mul_mont(a, oddPowers + (x), b, mmm) )
341
#define SWAPPA ptmp = pa1; pa1 = pa2; pa2 = ptmp
343
/* Do modular exponentiation using integer multiply code. */
344
mp_err mp_exptmod_i(const mp_int * montBase,
345
const mp_int * exponent,
346
const mp_int * modulus,
348
mp_mont_modulus *mmm,
350
mp_size bits_in_exponent,
354
mp_int *pa1, *pa2, *ptmp;
358
mp_int accum1, accum2, power2, oddPowers[MAX_ODD_INTS];
360
/* power2 = base ** 2; oddPowers[i] = base ** (2*i + 1); */
361
/* oddPowers[i] = base ** (2*i + 1); */
363
MP_DIGITS(&accum1) = 0;
364
MP_DIGITS(&accum2) = 0;
365
MP_DIGITS(&power2) = 0;
366
for (i = 0; i < MAX_ODD_INTS; ++i) {
367
MP_DIGITS(oddPowers + i) = 0;
370
MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) );
371
MP_CHECKOK( mp_init_size(&accum2, 3 * nLen + 2) );
373
MP_CHECKOK( mp_init_copy(&oddPowers[0], montBase) );
375
mp_init_size(&power2, nLen + 2 * MP_USED(montBase) + 2);
376
MP_CHECKOK( mp_sqr(montBase, &power2) ); /* power2 = montBase ** 2 */
377
MP_CHECKOK( s_mp_redc(&power2, mmm) );
379
for (i = 1; i < odd_ints; ++i) {
380
mp_init_size(oddPowers + i, nLen + 2 * MP_USED(&power2) + 2);
381
MP_CHECKOK( mp_mul(oddPowers + (i - 1), &power2, oddPowers + i) );
382
MP_CHECKOK( s_mp_redc(oddPowers + i, mmm) );
385
/* set accumulator to montgomery residue of 1 */
387
MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) );
391
for (expOff = bits_in_exponent - window_bits; expOff >= 0; expOff -= window_bits) {
393
MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) );
394
smallExp = (mp_size)res;
396
if (window_bits == 1) {
398
SQR(pa1,pa2); SWAPPA;
399
} else if (smallExp & 1) {
400
SQR(pa1,pa2); MUL(0,pa2,pa1);
404
} else if (window_bits == 4) {
406
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
407
} else if (smallExp & 1) {
408
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
409
MUL(smallExp/2, pa1,pa2); SWAPPA;
410
} else if (smallExp & 2) {
411
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2);
412
MUL(smallExp/4,pa2,pa1); SQR(pa1,pa2); SWAPPA;
413
} else if (smallExp & 4) {
414
SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/8,pa1,pa2);
415
SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA;
416
} else if (smallExp & 8) {
417
SQR(pa1,pa2); MUL(smallExp/16,pa2,pa1); SQR(pa1,pa2);
418
SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA;
422
} else if (window_bits == 5) {
424
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
425
SQR(pa1,pa2); SWAPPA;
426
} else if (smallExp & 1) {
427
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
428
SQR(pa1,pa2); MUL(smallExp/2,pa2,pa1);
429
} else if (smallExp & 2) {
430
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
431
MUL(smallExp/4,pa1,pa2); SQR(pa2,pa1);
432
} else if (smallExp & 4) {
433
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2);
434
MUL(smallExp/8,pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
435
} else if (smallExp & 8) {
436
SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/16,pa1,pa2);
437
SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
438
} else if (smallExp & 0x10) {
439
SQR(pa1,pa2); MUL(smallExp/32,pa2,pa1); SQR(pa1,pa2);
440
SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
444
} else if (window_bits == 6) {
446
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
447
SQR(pa1,pa2); SQR(pa2,pa1);
448
} else if (smallExp & 1) {
449
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
450
SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/2,pa1,pa2); SWAPPA;
451
} else if (smallExp & 2) {
452
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
453
SQR(pa1,pa2); MUL(smallExp/4,pa2,pa1); SQR(pa1,pa2); SWAPPA;
454
} else if (smallExp & 4) {
455
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
456
MUL(smallExp/8,pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA;
457
} else if (smallExp & 8) {
458
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2);
459
MUL(smallExp/16,pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
460
SQR(pa1,pa2); SWAPPA;
461
} else if (smallExp & 0x10) {
462
SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/32,pa1,pa2);
463
SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA;
464
} else if (smallExp & 0x20) {
465
SQR(pa1,pa2); MUL(smallExp/64,pa2,pa1); SQR(pa1,pa2);
466
SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA;
475
res = s_mp_redc(pa1, mmm);
476
mp_exch(pa1, result);
482
for (i = 0; i < odd_ints; ++i) {
483
mp_clear(oddPowers + i);
490
#ifdef MP_USING_CACHE_SAFE_MOD_EXP
491
unsigned int mp_using_cache_safe_exp = 1;
494
mp_err mp_set_safe_modexp(int value)
496
#ifdef MP_USING_CACHE_SAFE_MOD_EXP
497
mp_using_cache_safe_exp = value;
507
#ifdef MP_USING_CACHE_SAFE_MOD_EXP
508
#define WEAVE_WORD_SIZE 4
510
#ifndef MP_CHAR_STORE_SLOW
512
* mpi_to_weave takes an array of bignums, a matrix in which each bignum
513
* occupies all the columns of a row, and transposes it into a matrix in
514
* which each bignum occupies a column of every row. The first row of the
515
* input matrix becomes the first column of the output matrix. The n'th
516
* row of input becomes the n'th column of output. The input data is said
517
* to be "interleaved" or "woven" into the output matrix.
519
* The array of bignums is left in this woven form. Each time a single
520
* bignum value is needed, it is recreated by fetching the n'th column,
521
* forming a single row which is the new bignum.
523
* The purpose of this interleaving is make it impossible to determine which
524
* of the bignums is being used in any one operation by examining the pattern
527
* The weaving function does not transpose the entire input matrix in one call.
528
* It transposes 4 rows of mp_ints into their respective columns of output.
530
* There are two different implementations of the weaving and unweaving code
531
* in this file. One uses byte loads and stores. The second uses loads and
532
* stores of mp_weave_word size values. The weaved forms of these two
533
* implementations differ. Consequently, each one has its own explanation.
535
* Here is the explanation for the byte-at-a-time implementation.
537
* This implementation treats each mp_int bignum as an array of bytes,
538
* rather than as an array of mp_digits. It stores those bytes as a
539
* column of bytes in the output matrix. It doesn't care if the machine
540
* uses big-endian or little-endian byte ordering within mp_digits.
541
* The first byte of the mp_digit array becomes the first byte in the output
542
* column, regardless of whether that byte is the MSB or LSB of the mp_digit.
544
* "bignums" is an array of mp_ints.
545
* It points to four rows, four mp_ints, a subset of a larger array of mp_ints.
547
* "weaved" is the weaved output matrix.
548
* The first byte of bignums[0] is stored in weaved[0].
550
* "nBignums" is the total number of bignums in the array of which "bignums"
553
* "nDigits" is the size in mp_digits of each mp_int in the "bignums" array.
554
* mp_ints that use less than nDigits digits are logically padded with zeros
555
* while being stored in the weaved array.
557
mp_err mpi_to_weave(const mp_int *bignums,
558
unsigned char *weaved,
559
mp_size nDigits, /* in each mp_int of input */
560
mp_size nBignums) /* in the entire source array */
563
unsigned char * endDest = weaved + (nDigits * nBignums * sizeof(mp_digit));
565
for (i=0; i < WEAVE_WORD_SIZE; i++) {
566
mp_size used = MP_USED(&bignums[i]);
567
unsigned char *pSrc = (unsigned char *)MP_DIGITS(&bignums[i]);
568
unsigned char *endSrc = pSrc + (used * sizeof(mp_digit));
569
unsigned char *pDest = weaved + i;
571
ARGCHK(MP_SIGN(&bignums[i]) == MP_ZPOS, MP_BADARG);
572
ARGCHK(used <= nDigits, MP_BADARG);
574
for (; pSrc < endSrc; pSrc++) {
578
while (pDest < endDest) {
587
/* Reverse the operation above for one mp_int.
588
* Reconstruct one mp_int from its column in the weaved array.
589
* "pSrc" points to the offset into the weave array of the bignum we
590
* are going to reconstruct.
592
mp_err weave_to_mpi(mp_int *a, /* output, result */
593
const unsigned char *pSrc, /* input, byte matrix */
594
mp_size nDigits, /* per mp_int output */
595
mp_size nBignums) /* bignums in weaved matrix */
597
unsigned char *pDest = (unsigned char *)MP_DIGITS(a);
598
unsigned char *endDest = pDest + (nDigits * sizeof(mp_digit));
600
MP_SIGN(a) = MP_ZPOS;
601
MP_USED(a) = nDigits;
603
for (; pDest < endDest; pSrc += nBignums, pDest++) {
612
/* Need a primitive that we know is 32 bits long... */
613
/* this is true on all modern processors we know of today*/
614
typedef unsigned int mp_weave_word;
617
* on some platforms character stores into memory is very expensive since they
618
* generate a read/modify/write operation on the bus. On those platforms
619
* we need to do integer writes to the bus. Because of some unrolled code,
620
* in this current code the size of mp_weave_word must be four. The code that
621
* makes this assumption explicity is called out. (on some platforms a write
622
* of 4 bytes still requires a single read-modify-write operation.
624
* This function is takes the identical parameters as the function above,
625
* however it lays out the final array differently. Where the previous function
626
* treats the mpi_int as an byte array, this function treats it as an array of
627
* mp_digits where each digit is stored in big endian order.
629
* since we need to interleave on a byte by byte basis, we need to collect
630
* several mpi structures together into a single uint32 before we write. We
631
* also need to make sure the uint32 is arranged so that the first value of
632
* the first array winds up in b[0]. This means construction of that uint32
633
* is endian specific (even though the layout of the mp_digits in the array
634
* is always big endian).
636
* The final data is stored as follows :
638
* Our same logical array p array, m is sizeof(mp_digit),
639
* N is still count and n is now b_size. If we define p[i].digit[j]0 as the
640
* most significant byte of the word p[i].digit[j], p[i].digit[j]1 as
641
* the next most significant byte of p[i].digit[j], ... and p[i].digit[j]m-1
642
* is the least significant byte.
643
* Our array would look like:
644
* p[0].digit[0]0 p[1].digit[0]0 ... p[N-2].digit[0]0 p[N-1].digit[0]0
645
* p[0].digit[0]1 p[1].digit[0]1 ... p[N-2].digit[0]1 p[N-1].digit[0]1
647
* p[0].digit[0]m-1 p[1].digit[0]m-1 ... p[N-2].digit[0]m-1 p[N-1].digit[0]m-1
648
* p[0].digit[1]0 p[1].digit[1]0 ... p[N-2].digit[1]0 p[N-1].digit[1]0
651
* p[0].digit[n-1]m-2 p[1].digit[n-1]m-2 ... p[N-2].digit[n-1]m-2 p[N-1].digit[n-1]m-2
652
* p[0].digit[n-1]m-1 p[1].digit[n-1]m-1 ... p[N-2].digit[n-1]m-1 p[N-1].digit[n-1]m-1
655
mp_err mpi_to_weave(const mp_int *a, unsigned char *b,
656
mp_size b_size, mp_size count)
667
mp_weave_word *weaved = (mp_weave_word *)b;
669
count = count/sizeof(mp_weave_word);
671
/* this code pretty much depends on this ! */
673
assert(WEAVE_WORD_SIZE == 4);
674
assert(sizeof(mp_weave_word) == 4);
677
digitsa0 = MP_DIGITS(&a[0]);
678
digitsa1 = MP_DIGITS(&a[1]);
679
digitsa2 = MP_DIGITS(&a[2]);
680
digitsa3 = MP_DIGITS(&a[3]);
681
useda0 = MP_USED(&a[0]);
682
useda1 = MP_USED(&a[1]);
683
useda2 = MP_USED(&a[2]);
684
useda3 = MP_USED(&a[3]);
686
ARGCHK(MP_SIGN(&a[0]) == MP_ZPOS, MP_BADARG);
687
ARGCHK(MP_SIGN(&a[1]) == MP_ZPOS, MP_BADARG);
688
ARGCHK(MP_SIGN(&a[2]) == MP_ZPOS, MP_BADARG);
689
ARGCHK(MP_SIGN(&a[3]) == MP_ZPOS, MP_BADARG);
690
ARGCHK(useda0 <= b_size, MP_BADARG);
691
ARGCHK(useda1 <= b_size, MP_BADARG);
692
ARGCHK(useda2 <= b_size, MP_BADARG);
693
ARGCHK(useda3 <= b_size, MP_BADARG);
695
#define SAFE_FETCH(digit, used, word) ((word) < (used) ? (digit[word]) : 0)
697
for (i=0; i < b_size; i++) {
698
mp_digit d0 = SAFE_FETCH(digitsa0,useda0,i);
699
mp_digit d1 = SAFE_FETCH(digitsa1,useda1,i);
700
mp_digit d2 = SAFE_FETCH(digitsa2,useda2,i);
701
mp_digit d3 = SAFE_FETCH(digitsa3,useda3,i);
702
register mp_weave_word acc;
705
* ONE_STEP takes the MSB of each of our current digits and places that
706
* byte in the appropriate position for writing to the weaved array.
711
* When the data is written it would always wind up:
717
* Once we've written the MSB, we shift the whole digit up left one
718
* byte, putting the Next Most Significant Byte in the MSB position,
719
* so we we repeat the next one step that byte will be written.
720
* NOTE: This code assumes sizeof(mp_weave_word) and MP_WEAVE_WORD_SIZE
723
#ifdef MP_IS_LITTLE_ENDIAN
724
#define MPI_WEAVE_ONE_STEP \
725
acc = (d0 >> (MP_DIGIT_BIT-8)) & 0x000000ff; d0 <<= 8; /*b0*/ \
726
acc |= (d1 >> (MP_DIGIT_BIT-16)) & 0x0000ff00; d1 <<= 8; /*b1*/ \
727
acc |= (d2 >> (MP_DIGIT_BIT-24)) & 0x00ff0000; d2 <<= 8; /*b2*/ \
728
acc |= (d3 >> (MP_DIGIT_BIT-32)) & 0xff000000; d3 <<= 8; /*b3*/ \
729
*weaved = acc; weaved += count;
731
#define MPI_WEAVE_ONE_STEP \
732
acc = (d0 >> (MP_DIGIT_BIT-32)) & 0xff000000; d0 <<= 8; /*b0*/ \
733
acc |= (d1 >> (MP_DIGIT_BIT-24)) & 0x00ff0000; d1 <<= 8; /*b1*/ \
734
acc |= (d2 >> (MP_DIGIT_BIT-16)) & 0x0000ff00; d2 <<= 8; /*b2*/ \
735
acc |= (d3 >> (MP_DIGIT_BIT-8)) & 0x000000ff; d3 <<= 8; /*b3*/ \
736
*weaved = acc; weaved += count;
738
switch (sizeof(mp_digit)) {
784
/* reverse the operation above for one entry.
785
* b points to the offset into the weave array of the power we are
787
mp_err weave_to_mpi(mp_int *a, const unsigned char *b,
788
mp_size b_size, mp_size count)
790
mp_digit *pb = MP_DIGITS(a);
791
mp_digit *end = &pb[b_size];
793
MP_SIGN(a) = MP_ZPOS;
796
for (; pb < end; pb++) {
797
register mp_digit digit;
799
digit = *b << 8; b += count;
800
#define MPI_UNWEAVE_ONE_STEP digit |= *b; b += count; digit = digit << 8;
801
switch (sizeof(mp_digit)) {
839
digit |= *b; b += count;
850
MP_CHECKOK( mp_sqr(a, b) );\
851
MP_CHECKOK( s_mp_redc(b, mmm) )
853
#if defined(MP_MONT_USE_MP_MUL)
854
#define MUL_NOWEAVE(x,a,b) \
855
MP_CHECKOK( mp_mul(a, x, b) ); \
856
MP_CHECKOK( s_mp_redc(b, mmm) )
858
#define MUL_NOWEAVE(x,a,b) \
859
MP_CHECKOK( s_mp_mul_mont(a, x, b, mmm) )
863
MP_CHECKOK( weave_to_mpi(&tmp, powers + (x), nLen, num_powers) ); \
864
MUL_NOWEAVE(&tmp,a,b)
866
#define SWAPPA ptmp = pa1; pa1 = pa2; pa2 = ptmp
867
#define MP_ALIGN(x,y) ((((ptrdiff_t)(x))+((y)-1))&(((ptrdiff_t)0)-(y)))
869
/* Do modular exponentiation using integer multiply code. */
870
mp_err mp_exptmod_safe_i(const mp_int * montBase,
871
const mp_int * exponent,
872
const mp_int * modulus,
874
mp_mont_modulus *mmm,
876
mp_size bits_in_exponent,
880
mp_int *pa1, *pa2, *ptmp;
882
mp_size first_window;
885
mp_int accum1, accum2, accum[WEAVE_WORD_SIZE];
887
unsigned char *powersArray;
888
unsigned char *powers;
890
MP_DIGITS(&accum1) = 0;
891
MP_DIGITS(&accum2) = 0;
892
MP_DIGITS(&accum[0]) = 0;
893
MP_DIGITS(&accum[1]) = 0;
894
MP_DIGITS(&accum[2]) = 0;
895
MP_DIGITS(&accum[3]) = 0;
898
powersArray = (unsigned char *)malloc(num_powers*(nLen*sizeof(mp_digit)+1));
899
if (powersArray == NULL) {
904
/* powers[i] = base ** (i); */
905
powers = (unsigned char *)MP_ALIGN(powersArray,num_powers);
907
/* grab the first window value. This allows us to preload accumulator1
908
* and save a conversion, some squares and a multiple*/
909
MP_CHECKOK( mpl_get_bits(exponent,
910
bits_in_exponent-window_bits, window_bits) );
911
first_window = (mp_size)res;
913
MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) );
914
MP_CHECKOK( mp_init_size(&accum2, 3 * nLen + 2) );
915
MP_CHECKOK( mp_init_size(&tmp, 3 * nLen + 2) );
917
/* build the first WEAVE_WORD powers inline */
918
/* if WEAVE_WORD_SIZE is not 4, this code will have to change */
919
if (num_powers > 2) {
920
MP_CHECKOK( mp_init_size(&accum[0], 3 * nLen + 2) );
921
MP_CHECKOK( mp_init_size(&accum[1], 3 * nLen + 2) );
922
MP_CHECKOK( mp_init_size(&accum[2], 3 * nLen + 2) );
923
MP_CHECKOK( mp_init_size(&accum[3], 3 * nLen + 2) );
924
mp_set(&accum[0], 1);
925
MP_CHECKOK( s_mp_to_mont(&accum[0], mmm, &accum[0]) );
926
MP_CHECKOK( mp_copy(montBase, &accum[1]) );
927
SQR(montBase, &accum[2]);
928
MUL_NOWEAVE(montBase, &accum[2], &accum[3]);
929
MP_CHECKOK( mpi_to_weave(accum, powers, nLen, num_powers) );
930
if (first_window < 4) {
931
MP_CHECKOK( mp_copy(&accum[first_window], &accum1) );
932
first_window = num_powers;
935
if (first_window == 0) {
937
MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) );
939
/* assert first_window == 1? */
940
MP_CHECKOK( mp_copy(montBase, &accum1) );
945
* calculate all the powers in the powers array.
946
* this adds 2**(k-1)-2 square operations over just calculating the
947
* odd powers where k is the window size in the two other mp_modexpt
948
* implementations in this file. We will get some of that
949
* back by not needing the first 'k' squares and one multiply for the
951
for (i = WEAVE_WORD_SIZE; i < num_powers; i++) {
952
int acc_index = i & (WEAVE_WORD_SIZE-1); /* i % WEAVE_WORD_SIZE */
954
MUL_NOWEAVE(montBase, &accum[acc_index-1] , &accum[acc_index]);
955
/* we've filled the array do our 'per array' processing */
956
if (acc_index == (WEAVE_WORD_SIZE-1)) {
957
MP_CHECKOK( mpi_to_weave(accum, powers + i - (WEAVE_WORD_SIZE-1),
960
if (first_window <= i) {
961
MP_CHECKOK( mp_copy(&accum[first_window & (WEAVE_WORD_SIZE-1)],
963
first_window = num_powers;
967
/* up to 8 we can find 2^i-1 in the accum array, but at 8 we our source
968
* and target are the same so we need to copy.. After that, the
969
* value is overwritten, so we need to fetch it from the stored
971
if (i > 2* WEAVE_WORD_SIZE) {
972
MP_CHECKOK(weave_to_mpi(&accum2, powers+i/2, nLen, num_powers));
973
SQR(&accum2, &accum[acc_index]);
975
int half_power_index = (i/2) & (WEAVE_WORD_SIZE-1);
976
if (half_power_index == acc_index) {
977
/* copy is cheaper than weave_to_mpi */
978
MP_CHECKOK(mp_copy(&accum[half_power_index], &accum2));
979
SQR(&accum2,&accum[acc_index]);
981
SQR(&accum[half_power_index],&accum[acc_index]);
986
/* if the accum1 isn't set, Then there is something wrong with our logic
987
* above and is an internal programming error.
990
assert(MP_USED(&accum1) != 0);
993
/* set accumulator to montgomery residue of 1 */
997
for (expOff = bits_in_exponent - window_bits*2; expOff >= 0; expOff -= window_bits) {
999
MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) );
1000
smallExp = (mp_size)res;
1002
/* handle unroll the loops */
1003
switch (window_bits) {
1006
SQR(pa1,pa2); SWAPPA;
1007
} else if (smallExp & 1) {
1008
SQR(pa1,pa2); MUL_NOWEAVE(montBase,pa2,pa1);
1014
SQR(pa1,pa2); SQR(pa2,pa1);
1017
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
1018
MUL(smallExp, pa1,pa2); SWAPPA;
1021
SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1);
1022
SQR(pa1,pa2); MUL(smallExp,pa2,pa1);
1025
abort(); /* could do a loop? */
1029
res = s_mp_redc(pa1, mmm);
1030
mp_exch(pa1, result);
1035
mp_clear(&accum[0]);
1036
mp_clear(&accum[1]);
1037
mp_clear(&accum[2]);
1038
mp_clear(&accum[3]);
1040
/* PORT_Memset(powers,0,num_powers*nLen*sizeof(mp_digit)); */
1048
mp_err mp_exptmod(const mp_int *inBase, const mp_int *exponent,
1049
const mp_int *modulus, mp_int *result)
1052
mp_size bits_in_exponent, i, window_bits, odd_ints;
1055
mp_int montBase, goodBase;
1056
mp_mont_modulus mmm;
1057
#ifdef MP_USING_CACHE_SAFE_MOD_EXP
1058
static unsigned int max_window_bits;
1061
/* function for computing n0prime only works if n0 is odd */
1062
if (!mp_isodd(modulus))
1063
return s_mp_exptmod(inBase, exponent, modulus, result);
1065
MP_DIGITS(&montBase) = 0;
1066
MP_DIGITS(&goodBase) = 0;
1068
if (mp_cmp(inBase, modulus) < 0) {
1071
MP_CHECKOK( mp_init(&goodBase) );
1073
MP_CHECKOK( mp_mod(inBase, modulus, &goodBase) );
1076
nLen = MP_USED(modulus);
1077
MP_CHECKOK( mp_init_size(&montBase, 2 * nLen + 2) );
1079
mmm.N = *modulus; /* a copy of the mp_int struct */
1081
/* compute n0', given n0, n0' = -(n0 ** -1) mod MP_RADIX
1082
** where n0 = least significant mp_digit of N, the modulus.
1084
mmm.n0prime = 0 - s_mp_invmod_radix( MP_DIGIT(modulus, 0) );
1086
MP_CHECKOK( s_mp_to_mont(base, &mmm, &montBase) );
1088
bits_in_exponent = mpl_significant_bits(exponent);
1089
#ifdef MP_USING_CACHE_SAFE_MOD_EXP
1090
if (mp_using_cache_safe_exp) {
1091
if (bits_in_exponent > 780)
1093
else if (bits_in_exponent > 256)
1095
else if (bits_in_exponent > 20)
1097
/* RSA public key exponents are typically under 20 bits (common values
1098
* are: 3, 17, 65537) and a 4-bit window is inefficient
1104
if (bits_in_exponent > 480)
1106
else if (bits_in_exponent > 160)
1108
else if (bits_in_exponent > 20)
1110
/* RSA public key exponents are typically under 20 bits (common values
1111
* are: 3, 17, 65537) and a 4-bit window is inefficient
1116
#ifdef MP_USING_CACHE_SAFE_MOD_EXP
1118
* clamp the window size based on
1119
* the cache line size.
1121
if (!max_window_bits) {
1122
unsigned long cache_size = s_mpi_getProcessorLineSize();
1123
/* processor has no cache, use 'fast' code always */
1124
if (cache_size == 0) {
1125
mp_using_cache_safe_exp = 0;
1127
if ((cache_size == 0) || (cache_size >= 64)) {
1128
max_window_bits = 6;
1129
} else if (cache_size >= 32) {
1130
max_window_bits = 5;
1131
} else if (cache_size >= 16) {
1132
max_window_bits = 4;
1133
} else max_window_bits = 1; /* should this be an assert? */
1136
/* clamp the window size down before we caclulate bits_in_exponent */
1137
if (mp_using_cache_safe_exp) {
1138
if (window_bits > max_window_bits) {
1139
window_bits = max_window_bits;
1144
odd_ints = 1 << (window_bits - 1);
1145
i = bits_in_exponent % window_bits;
1147
bits_in_exponent += window_bits - i;
1150
#ifdef MP_USING_MONT_MULF
1151
if (mp_using_mont_mulf) {
1152
MP_CHECKOK( s_mp_pad(&montBase, nLen) );
1153
res = mp_exptmod_f(&montBase, exponent, modulus, result, &mmm, nLen,
1154
bits_in_exponent, window_bits, odd_ints);
1157
#ifdef MP_USING_CACHE_SAFE_MOD_EXP
1158
if (mp_using_cache_safe_exp) {
1159
res = mp_exptmod_safe_i(&montBase, exponent, modulus, result, &mmm, nLen,
1160
bits_in_exponent, window_bits, 1 << window_bits);
1163
res = mp_exptmod_i(&montBase, exponent, modulus, result, &mmm, nLen,
1164
bits_in_exponent, window_bits, odd_ints);
1167
mp_clear(&montBase);
1168
mp_clear(&goodBase);
1169
/* Don't mp_clear mmm.N because it is merely a copy of modulus.
1172
memset(&mmm, 0, sizeof mmm);