51
51
#define BLOCKSIZE (128/8)
54
/* Helper macro to force alignment to 16 bytes. */
56
# define ATTR_ALIGNED_16 __attribute__ ((aligned (16)))
58
# define ATTR_ALIGNED_16
54
62
/* USE_PADLOCK indicates whether to compile the padlock specific
57
65
#ifdef ENABLE_PADLOCK_SUPPORT
58
66
# if defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4 && defined (__GNUC__)
67
# define USE_PADLOCK 1
61
69
#endif /*ENABLE_PADLOCK_SUPPORT*/
63
static const char *selftest(void);
71
/* USE_AESNI inidicates whether to compile with Intel AES-NI code. We
72
need the vector-size attribute which seems to be available since
73
gcc 3. However, to be on the safe side we require at least gcc 4. */
75
#ifdef ENABLE_AESNI_SUPPORT
76
# if defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4 && __GNUC__ >= 4
79
#endif /* ENABLE_AESNI_SUPPORT */
82
typedef int m128i_t __attribute__ ((__vector_size__ (16)));
85
/* Define an u32 variant for the sake of gcc 4.4's strict aliasing. */
86
#if __GNUC__ > 4 || ( __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )
87
typedef u32 __attribute__ ((__may_alias__)) u32_a_t;
94
/* Our context object. */
67
int ROUNDS; /* Key-length-dependent number of rounds. */
97
/* The first fields are the keyschedule arrays. This is so that
98
they are aligned on a 16 byte boundary if using gcc. This
99
alignment is required for the AES-NI code and a good idea in any
100
case. The alignment is guaranteed due to the way cipher.c
101
allocates the space for the context. The PROPERLY_ALIGNED_TYPE
102
hack is used to force a minimal alignment if not using gcc of if
103
the alignment requirement is higher that 16 bytes. */
106
PROPERLY_ALIGNED_TYPE dummy;
107
byte keyschedule[MAXROUNDS+1][4][4];
109
/* The key as passed to the padlock engine. It is only used if
110
the padlock engine is used (USE_PADLOCK, below). */
111
unsigned char padlock_key[16] __attribute__ ((aligned (16)));
112
#endif /*USE_PADLOCK*/
116
PROPERLY_ALIGNED_TYPE dummy;
117
byte keyschedule[MAXROUNDS+1][4][4];
119
int rounds; /* Key-length-dependent number of rounds. */
68
120
int decryption_prepared; /* The decryption key schedule is available. */
69
121
#ifdef USE_PADLOCK
70
122
int use_padlock; /* Padlock shall be used. */
71
/* The key as passed to the padlock engine. */
72
unsigned char padlock_key[16] __attribute__ ((aligned (16)));
123
#endif /*USE_PADLOCK*/
125
int use_aesni; /* AES-NI shall be used. */
127
} RIJNDAEL_context ATTR_ALIGNED_16;
129
/* Macros defining alias for the keyschedules. */
130
#define keyschenc u1.keyschedule
131
#define keyschdec u2.keyschedule
132
#define padlockkey u1.padlock_key
134
/* Two macros to be called prior and after the use of AESNI
135
instructions. There should be no external function calls between
136
the use of these macros. There purpose is to make sure that the
137
SSE regsiters are cleared and won't reveal any information about
138
the key or the data. */
140
# define aesni_prepare() do { } while (0)
141
# define aesni_cleanup() \
142
do { asm volatile ("pxor %%xmm0, %%xmm0\n\t" \
143
"pxor %%xmm1, %%xmm1\n" :: ); \
145
# define aesni_cleanup_2_4() \
146
do { asm volatile ("pxor %%xmm2, %%xmm2\n\t" \
147
"pxor %%xmm3, %%xmm3\n" \
148
"pxor %%xmm4, %%xmm4\n":: ); \
151
# define aesni_prepare() do { } while (0)
152
# define aesni_cleanup() do { } while (0)
76
PROPERLY_ALIGNED_TYPE dummy;
77
byte keyschedule[MAXROUNDS+1][4][4];
81
PROPERLY_ALIGNED_TYPE dummy;
82
byte keyschedule[MAXROUNDS+1][4][4];
86
#define keySched u1.keyschedule
87
#define keySched2 u2.keyschedule
89
156
/* All the numbers. */
90
157
#include "rijndael-tables.h"
93
/* Perform the key setup. */
161
/* Function prototypes. */
163
/* We don't want to inline these functions to help gcc allocate enough
165
static void do_aesni_ctr (const RIJNDAEL_context *ctx, unsigned char *ctr,
166
unsigned char *b, const unsigned char *a)
167
__attribute__ ((__noinline__));
168
static void do_aesni_ctr_4 (const RIJNDAEL_context *ctx, unsigned char *ctr,
169
unsigned char *b, const unsigned char *a)
170
__attribute__ ((__noinline__));
173
static const char *selftest(void);
177
/* Perform the key setup. */
94
178
static gcry_err_code_t
95
179
do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen)
97
181
static int initialized = 0;
98
182
static const char *selftest_failed=0;
100
184
int i,j, r, t, rconpointer = 0;
133
217
#ifdef USE_PADLOCK
134
218
ctx->use_padlock = 0;
137
224
if( keylen == 128/8 )
141
231
#ifdef USE_PADLOCK
142
if ((_gcry_get_hw_features () & HWF_PADLOCK_AES))
232
else if ((_gcry_get_hw_features () & HWF_PADLOCK_AES))
144
234
ctx->use_padlock = 1;
145
memcpy (ctx->padlock_key, key, keylen);
235
memcpy (ctx->padlockkey, key, keylen);
239
else if ((_gcry_get_hw_features () & HWF_INTEL_AESNI))
149
245
else if ( keylen == 192/8 )
255
else if ((_gcry_get_hw_features () & HWF_INTEL_AESNI))
154
261
else if ( keylen == 256/8 )
271
else if ((_gcry_get_hw_features () & HWF_INTEL_AESNI))
160
278
return GPG_ERR_INV_KEYLEN;
162
ctx->ROUNDS = ROUNDS;
165
if (ctx->use_padlock)
280
ctx->rounds = rounds;
282
/* NB: We don't yet support Padlock hardware key generation. */
286
#ifdef USE_AESNI_is_disabled_here
287
else if (ctx->use_aesni && ctx->rounds == 10)
167
/* Nothing to do as we support only hardware key generation for
289
/* Note: This code works for AES-128 but it is not much better
290
than using the standard key schedule. We disable it for
291
now and don't put any effort into implementing this for
292
AES-192 and AES-256. */
293
asm volatile ("movl %[key], %%esi\n\t"
294
"movdqu (%%esi), %%xmm1\n\t" /* xmm1 := key */
295
"movl %[ksch], %%esi\n\t"
296
"movdqa %%xmm1, (%%esi)\n\t" /* ksch[0] := xmm1 */
297
"aeskeygenassist $0x01, %%xmm1, %%xmm2\n\t"
298
"call .Lexpand128_%=\n\t"
299
"movdqa %%xmm1, 0x10(%%esi)\n\t" /* ksch[1] := xmm1 */
300
"aeskeygenassist $0x02, %%xmm1, %%xmm2\n\t"
301
"call .Lexpand128_%=\n\t"
302
"movdqa %%xmm1, 0x20(%%esi)\n\t" /* ksch[2] := xmm1 */
303
"aeskeygenassist $0x04, %%xmm1, %%xmm2\n\t"
304
"call .Lexpand128_%=\n\t"
305
"movdqa %%xmm1, 0x30(%%esi)\n\t" /* ksch[3] := xmm1 */
306
"aeskeygenassist $0x08, %%xmm1, %%xmm2\n\t"
307
"call .Lexpand128_%=\n\t"
308
"movdqa %%xmm1, 0x40(%%esi)\n\t" /* ksch[4] := xmm1 */
309
"aeskeygenassist $0x10, %%xmm1, %%xmm2\n\t"
310
"call .Lexpand128_%=\n\t"
311
"movdqa %%xmm1, 0x50(%%esi)\n\t" /* ksch[5] := xmm1 */
312
"aeskeygenassist $0x20, %%xmm1, %%xmm2\n\t"
313
"call .Lexpand128_%=\n\t"
314
"movdqa %%xmm1, 0x60(%%esi)\n\t" /* ksch[6] := xmm1 */
315
"aeskeygenassist $0x40, %%xmm1, %%xmm2\n\t"
316
"call .Lexpand128_%=\n\t"
317
"movdqa %%xmm1, 0x70(%%esi)\n\t" /* ksch[7] := xmm1 */
318
"aeskeygenassist $0x80, %%xmm1, %%xmm2\n\t"
319
"call .Lexpand128_%=\n\t"
320
"movdqa %%xmm1, 0x80(%%esi)\n\t" /* ksch[8] := xmm1 */
321
"aeskeygenassist $0x1b, %%xmm1, %%xmm2\n\t"
322
"call .Lexpand128_%=\n\t"
323
"movdqa %%xmm1, 0x90(%%esi)\n\t" /* ksch[9] := xmm1 */
324
"aeskeygenassist $0x36, %%xmm1, %%xmm2\n\t"
325
"call .Lexpand128_%=\n\t"
326
"movdqa %%xmm1, 0xa0(%%esi)\n\t" /* ksch[10] := xmm1 */
329
".Lexpand128_%=:\n\t"
330
"pshufd $0xff, %%xmm2, %%xmm2\n\t"
331
"movdqa %%xmm1, %%xmm3\n\t"
332
"pslldq $4, %%xmm3\n\t"
333
"pxor %%xmm3, %%xmm1\n\t"
334
"pslldq $4, %%xmm3\n\t"
335
"pxor %%xmm3, %%xmm1\n\t"
336
"pslldq $4, %%xmm3\n\t"
337
"pxor %%xmm3, %%xmm2\n\t"
338
"pxor %%xmm2, %%xmm1\n\t"
342
"pxor %%xmm1, %%xmm1\n\t"
343
"pxor %%xmm2, %%xmm2\n\t"
344
"pxor %%xmm3, %%xmm3\n"
346
: [key] "g" (key), [ksch] "g" (ctx->keyschenc)
347
: "%esi", "cc", "memory" );
171
#endif /*USE_PADLOCK*/
173
#define W (ctx->keySched)
174
for (i = 0; i < keylen; i++)
352
#define W (ctx->keyschenc)
353
for (i = 0; i < keylen; i++)
176
k[i >> 2][i & 3] = key[i];
355
k[i >> 2][i & 3] = key[i];
179
for (j = KC-1; j >= 0; j--)
358
for (j = KC-1; j >= 0; j--)
181
*((u32*)tk[j]) = *((u32*)k[j]);
360
*((u32_a_t*)tk[j]) = *((u32_a_t*)k[j]);
185
364
/* Copy values into round key array. */
186
for (j = 0; (j < KC) && (r < ROUNDS + 1); )
365
for (j = 0; (j < KC) && (r < rounds + 1); )
188
367
for (; (j < KC) && (t < 4); j++, t++)
190
*((u32*)W[r][t]) = *((u32*)tk[j]);
369
*((u32_a_t*)W[r][t]) = *((u32_a_t*)tk[j]);
325
*((u32*)u.temp[0]) = *((u32*)(a )) ^ *((u32*)rk[0][0]);
326
*((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[0][1]);
327
*((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[0][2]);
328
*((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[0][3]);
329
*((u32*)(b )) = (*((u32*)T1[u.temp[0][0]])
330
^ *((u32*)T2[u.temp[1][1]])
331
^ *((u32*)T3[u.temp[2][2]])
332
^ *((u32*)T4[u.temp[3][3]]));
333
*((u32*)(b + 4)) = (*((u32*)T1[u.temp[1][0]])
334
^ *((u32*)T2[u.temp[2][1]])
335
^ *((u32*)T3[u.temp[3][2]])
336
^ *((u32*)T4[u.temp[0][3]]));
337
*((u32*)(b + 8)) = (*((u32*)T1[u.temp[2][0]])
338
^ *((u32*)T2[u.temp[3][1]])
339
^ *((u32*)T3[u.temp[0][2]])
340
^ *((u32*)T4[u.temp[1][3]]));
341
*((u32*)(b +12)) = (*((u32*)T1[u.temp[3][0]])
342
^ *((u32*)T2[u.temp[0][1]])
343
^ *((u32*)T3[u.temp[1][2]])
344
^ *((u32*)T4[u.temp[2][3]]));
531
*((u32_a_t*)u.temp[0]) = *((u32_a_t*)(a )) ^ *((u32_a_t*)rk[0][0]);
532
*((u32_a_t*)u.temp[1]) = *((u32_a_t*)(a+ 4)) ^ *((u32_a_t*)rk[0][1]);
533
*((u32_a_t*)u.temp[2]) = *((u32_a_t*)(a+ 8)) ^ *((u32_a_t*)rk[0][2]);
534
*((u32_a_t*)u.temp[3]) = *((u32_a_t*)(a+12)) ^ *((u32_a_t*)rk[0][3]);
535
*((u32_a_t*)(b )) = (*((u32_a_t*)T1[u.temp[0][0]])
536
^ *((u32_a_t*)T2[u.temp[1][1]])
537
^ *((u32_a_t*)T3[u.temp[2][2]])
538
^ *((u32_a_t*)T4[u.temp[3][3]]));
539
*((u32_a_t*)(b + 4)) = (*((u32_a_t*)T1[u.temp[1][0]])
540
^ *((u32_a_t*)T2[u.temp[2][1]])
541
^ *((u32_a_t*)T3[u.temp[3][2]])
542
^ *((u32_a_t*)T4[u.temp[0][3]]));
543
*((u32_a_t*)(b + 8)) = (*((u32_a_t*)T1[u.temp[2][0]])
544
^ *((u32_a_t*)T2[u.temp[3][1]])
545
^ *((u32_a_t*)T3[u.temp[0][2]])
546
^ *((u32_a_t*)T4[u.temp[1][3]]));
547
*((u32_a_t*)(b +12)) = (*((u32_a_t*)T1[u.temp[3][0]])
548
^ *((u32_a_t*)T2[u.temp[0][1]])
549
^ *((u32_a_t*)T3[u.temp[1][2]])
550
^ *((u32_a_t*)T4[u.temp[2][3]]));
346
for (r = 1; r < ROUNDS-1; r++)
552
for (r = 1; r < rounds-1; r++)
348
*((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[r][0]);
349
*((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);
350
*((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[r][2]);
351
*((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[r][3]);
554
*((u32_a_t*)u.temp[0]) = *((u32_a_t*)(b )) ^ *((u32_a_t*)rk[r][0]);
555
*((u32_a_t*)u.temp[1]) = *((u32_a_t*)(b+ 4)) ^ *((u32_a_t*)rk[r][1]);
556
*((u32_a_t*)u.temp[2]) = *((u32_a_t*)(b+ 8)) ^ *((u32_a_t*)rk[r][2]);
557
*((u32_a_t*)u.temp[3]) = *((u32_a_t*)(b+12)) ^ *((u32_a_t*)rk[r][3]);
353
*((u32*)(b )) = (*((u32*)T1[u.temp[0][0]])
354
^ *((u32*)T2[u.temp[1][1]])
355
^ *((u32*)T3[u.temp[2][2]])
356
^ *((u32*)T4[u.temp[3][3]]));
357
*((u32*)(b + 4)) = (*((u32*)T1[u.temp[1][0]])
358
^ *((u32*)T2[u.temp[2][1]])
359
^ *((u32*)T3[u.temp[3][2]])
360
^ *((u32*)T4[u.temp[0][3]]));
361
*((u32*)(b + 8)) = (*((u32*)T1[u.temp[2][0]])
362
^ *((u32*)T2[u.temp[3][1]])
363
^ *((u32*)T3[u.temp[0][2]])
364
^ *((u32*)T4[u.temp[1][3]]));
365
*((u32*)(b +12)) = (*((u32*)T1[u.temp[3][0]])
366
^ *((u32*)T2[u.temp[0][1]])
367
^ *((u32*)T3[u.temp[1][2]])
368
^ *((u32*)T4[u.temp[2][3]]));
559
*((u32_a_t*)(b )) = (*((u32_a_t*)T1[u.temp[0][0]])
560
^ *((u32_a_t*)T2[u.temp[1][1]])
561
^ *((u32_a_t*)T3[u.temp[2][2]])
562
^ *((u32_a_t*)T4[u.temp[3][3]]));
563
*((u32_a_t*)(b + 4)) = (*((u32_a_t*)T1[u.temp[1][0]])
564
^ *((u32_a_t*)T2[u.temp[2][1]])
565
^ *((u32_a_t*)T3[u.temp[3][2]])
566
^ *((u32_a_t*)T4[u.temp[0][3]]));
567
*((u32_a_t*)(b + 8)) = (*((u32_a_t*)T1[u.temp[2][0]])
568
^ *((u32_a_t*)T2[u.temp[3][1]])
569
^ *((u32_a_t*)T3[u.temp[0][2]])
570
^ *((u32_a_t*)T4[u.temp[1][3]]));
571
*((u32_a_t*)(b +12)) = (*((u32_a_t*)T1[u.temp[3][0]])
572
^ *((u32_a_t*)T2[u.temp[0][1]])
573
^ *((u32_a_t*)T3[u.temp[1][2]])
574
^ *((u32_a_t*)T4[u.temp[2][3]]));
371
/* Last round is special. */
372
*((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[ROUNDS-1][0]);
373
*((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[ROUNDS-1][1]);
374
*((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[ROUNDS-1][2]);
375
*((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[ROUNDS-1][3]);
577
/* Last round is special. */
578
*((u32_a_t*)u.temp[0]) = *((u32_a_t*)(b )) ^ *((u32_a_t*)rk[rounds-1][0]);
579
*((u32_a_t*)u.temp[1]) = *((u32_a_t*)(b+ 4)) ^ *((u32_a_t*)rk[rounds-1][1]);
580
*((u32_a_t*)u.temp[2]) = *((u32_a_t*)(b+ 8)) ^ *((u32_a_t*)rk[rounds-1][2]);
581
*((u32_a_t*)u.temp[3]) = *((u32_a_t*)(b+12)) ^ *((u32_a_t*)rk[rounds-1][3]);
376
582
b[ 0] = T1[u.temp[0][0]][1];
377
583
b[ 1] = T1[u.temp[1][1]][1];
378
584
b[ 2] = T1[u.temp[2][2]][1];
463
676
#endif /*USE_PADLOCK*/
680
/* Encrypt one block using the Intel AES-NI instructions. A and B may
681
be the same; they need to be properly aligned to 16 bytes.
683
Our problem here is that gcc does not allow the "x" constraint for
684
SSE registers in asm unless you compile with -msse. The common
685
wisdom is to use a separate file for SSE instructions and build it
686
separately. This would require a lot of extra build system stuff,
687
similar to what we do in mpi/ for the asm stuff. What we do
688
instead is to use standard registers and a bit more of plain asm
689
which copies the data and key stuff to the SSE registers and later
690
back. If we decide to implement some block modes with parallelized
691
AES instructions, it might indeed be better to use plain asm ala
694
do_aesni_enc_aligned (const RIJNDAEL_context *ctx,
695
unsigned char *b, const unsigned char *a)
697
#define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t"
698
#define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t"
699
/* Note: For now we relax the alignment requirement for A and B: It
700
does not make much difference because in many case we would need
701
to memcpy them to an extra buffer; using the movdqu is much faster
702
that memcpy and movdqa. For CFB we know that the IV is properly
703
aligned but that is a special case. We should better implement
704
CFB direct in asm. */
705
asm volatile ("movdqu %[src], %%xmm0\n\t" /* xmm0 := *a */
706
"movl %[key], %%esi\n\t" /* esi := keyschenc */
707
"movdqa (%%esi), %%xmm1\n\t" /* xmm1 := key[0] */
708
"pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */
709
"movdqa 0x10(%%esi), %%xmm1\n\t"
711
"movdqa 0x20(%%esi), %%xmm1\n\t"
713
"movdqa 0x30(%%esi), %%xmm1\n\t"
715
"movdqa 0x40(%%esi), %%xmm1\n\t"
717
"movdqa 0x50(%%esi), %%xmm1\n\t"
719
"movdqa 0x60(%%esi), %%xmm1\n\t"
721
"movdqa 0x70(%%esi), %%xmm1\n\t"
723
"movdqa 0x80(%%esi), %%xmm1\n\t"
725
"movdqa 0x90(%%esi), %%xmm1\n\t"
727
"movdqa 0xa0(%%esi), %%xmm1\n\t"
728
"cmp $10, %[rounds]\n\t"
731
"movdqa 0xb0(%%esi), %%xmm1\n\t"
733
"movdqa 0xc0(%%esi), %%xmm1\n\t"
734
"cmp $12, %[rounds]\n\t"
737
"movdqa 0xd0(%%esi), %%xmm1\n\t"
739
"movdqa 0xe0(%%esi), %%xmm1\n"
743
"movdqu %%xmm0, %[dst]\n"
746
[key] "r" (ctx->keyschenc),
747
[rounds] "r" (ctx->rounds)
748
: "%esi", "cc", "memory");
749
#undef aesenc_xmm1_xmm0
750
#undef aesenclast_xmm1_xmm0
755
do_aesni_dec_aligned (const RIJNDAEL_context *ctx,
756
unsigned char *b, const unsigned char *a)
758
#define aesdec_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xde, 0xc1\n\t"
759
#define aesdeclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdf, 0xc1\n\t"
760
asm volatile ("movdqu %[src], %%xmm0\n\t" /* xmm0 := *a */
761
"movl %[key], %%esi\n\t"
762
"movdqa (%%esi), %%xmm1\n\t"
763
"pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */
764
"movdqa 0x10(%%esi), %%xmm1\n\t"
766
"movdqa 0x20(%%esi), %%xmm1\n\t"
768
"movdqa 0x30(%%esi), %%xmm1\n\t"
770
"movdqa 0x40(%%esi), %%xmm1\n\t"
772
"movdqa 0x50(%%esi), %%xmm1\n\t"
774
"movdqa 0x60(%%esi), %%xmm1\n\t"
776
"movdqa 0x70(%%esi), %%xmm1\n\t"
778
"movdqa 0x80(%%esi), %%xmm1\n\t"
780
"movdqa 0x90(%%esi), %%xmm1\n\t"
782
"movdqa 0xa0(%%esi), %%xmm1\n\t"
783
"cmp $10, %[rounds]\n\t"
786
"movdqa 0xb0(%%esi), %%xmm1\n\t"
788
"movdqa 0xc0(%%esi), %%xmm1\n\t"
789
"cmp $12, %[rounds]\n\t"
792
"movdqa 0xd0(%%esi), %%xmm1\n\t"
794
"movdqa 0xe0(%%esi), %%xmm1\n"
798
"movdqu %%xmm0, %[dst]\n"
801
[key] "r" (ctx->keyschdec),
802
[rounds] "r" (ctx->rounds)
803
: "%esi", "cc", "memory");
804
#undef aesdec_xmm1_xmm0
805
#undef aesdeclast_xmm1_xmm0
809
/* Perform a CFB encryption or decryption round using the
810
initialization vector IV and the input block A. Write the result
811
to the output block B and update IV. IV needs to be 16 byte
814
do_aesni_cfb (const RIJNDAEL_context *ctx, int decrypt_flag,
815
unsigned char *iv, unsigned char *b, const unsigned char *a)
817
#define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t"
818
#define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t"
819
asm volatile ("movdqa %[iv], %%xmm0\n\t" /* xmm0 := IV */
820
"movl %[key], %%esi\n\t" /* esi := keyschenc */
821
"movdqa (%%esi), %%xmm1\n\t" /* xmm1 := key[0] */
822
"pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */
823
"movdqa 0x10(%%esi), %%xmm1\n\t"
825
"movdqa 0x20(%%esi), %%xmm1\n\t"
827
"movdqa 0x30(%%esi), %%xmm1\n\t"
829
"movdqa 0x40(%%esi), %%xmm1\n\t"
831
"movdqa 0x50(%%esi), %%xmm1\n\t"
833
"movdqa 0x60(%%esi), %%xmm1\n\t"
835
"movdqa 0x70(%%esi), %%xmm1\n\t"
837
"movdqa 0x80(%%esi), %%xmm1\n\t"
839
"movdqa 0x90(%%esi), %%xmm1\n\t"
841
"movdqa 0xa0(%%esi), %%xmm1\n\t"
842
"cmp $10, %[rounds]\n\t"
845
"movdqa 0xb0(%%esi), %%xmm1\n\t"
847
"movdqa 0xc0(%%esi), %%xmm1\n\t"
848
"cmp $12, %[rounds]\n\t"
851
"movdqa 0xd0(%%esi), %%xmm1\n\t"
853
"movdqa 0xe0(%%esi), %%xmm1\n"
857
"movdqu %[src], %%xmm1\n\t" /* Save input. */
858
"pxor %%xmm1, %%xmm0\n\t" /* xmm0 = input ^ IV */
860
"cmp $1, %[decrypt]\n\t"
861
"jz .Ldecrypt_%=\n\t"
862
"movdqa %%xmm0, %[iv]\n\t" /* [encrypt] Store IV. */
865
"movdqa %%xmm1, %[iv]\n" /* [decrypt] Store IV. */
867
"movdqu %%xmm0, %[dst]\n" /* Store output. */
868
: [iv] "+m" (*iv), [dst] "=m" (*b)
870
[key] "g" (ctx->keyschenc),
871
[rounds] "g" (ctx->rounds),
872
[decrypt] "m" (decrypt_flag)
873
: "%esi", "cc", "memory");
874
#undef aesenc_xmm1_xmm0
875
#undef aesenclast_xmm1_xmm0
878
/* Perform a CTR encryption round using the counter CTR and the input
879
block A. Write the result to the output block B and update CTR.
880
CTR needs to be a 16 byte aligned little-endian value. */
882
do_aesni_ctr (const RIJNDAEL_context *ctx,
883
unsigned char *ctr, unsigned char *b, const unsigned char *a)
885
#define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t"
886
#define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t"
887
static unsigned char be_mask[16] __attribute__ ((aligned (16))) =
888
{ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
890
asm volatile ("movdqa %[ctr], %%xmm0\n\t" /* xmm0, xmm2 := CTR */
891
"movaps %%xmm0, %%xmm2\n\t"
892
"mov $1, %%esi\n\t" /* xmm2++ (big-endian) */
893
"movd %%esi, %%xmm1\n\t"
894
"pshufb %[mask], %%xmm2\n\t"
895
"paddq %%xmm1, %%xmm2\n\t"
896
"pshufb %[mask], %%xmm2\n\t"
897
"movdqa %%xmm2, %[ctr]\n" /* Update CTR. */
899
"movl %[key], %%esi\n\t" /* esi := keyschenc */
900
"movdqa (%%esi), %%xmm1\n\t" /* xmm1 := key[0] */
901
"pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */
902
"movdqa 0x10(%%esi), %%xmm1\n\t"
904
"movdqa 0x20(%%esi), %%xmm1\n\t"
906
"movdqa 0x30(%%esi), %%xmm1\n\t"
908
"movdqa 0x40(%%esi), %%xmm1\n\t"
910
"movdqa 0x50(%%esi), %%xmm1\n\t"
912
"movdqa 0x60(%%esi), %%xmm1\n\t"
914
"movdqa 0x70(%%esi), %%xmm1\n\t"
916
"movdqa 0x80(%%esi), %%xmm1\n\t"
918
"movdqa 0x90(%%esi), %%xmm1\n\t"
920
"movdqa 0xa0(%%esi), %%xmm1\n\t"
921
"cmp $10, %[rounds]\n\t"
924
"movdqa 0xb0(%%esi), %%xmm1\n\t"
926
"movdqa 0xc0(%%esi), %%xmm1\n\t"
927
"cmp $12, %[rounds]\n\t"
930
"movdqa 0xd0(%%esi), %%xmm1\n\t"
932
"movdqa 0xe0(%%esi), %%xmm1\n"
936
"movdqu %[src], %%xmm1\n\t" /* xmm1 := input */
937
"pxor %%xmm1, %%xmm0\n\t" /* EncCTR ^= input */
938
"movdqu %%xmm0, %[dst]" /* Store EncCTR. */
940
: [ctr] "+m" (*ctr), [dst] "=m" (*b)
942
[key] "g" (ctx->keyschenc),
943
[rounds] "g" (ctx->rounds),
944
[mask] "m" (*be_mask)
945
: "%esi", "cc", "memory");
946
#undef aesenc_xmm1_xmm0
947
#undef aesenclast_xmm1_xmm0
951
/* Four blocks at a time variant of do_aesni_ctr. */
953
do_aesni_ctr_4 (const RIJNDAEL_context *ctx,
954
unsigned char *ctr, unsigned char *b, const unsigned char *a)
956
#define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t"
957
#define aesenc_xmm1_xmm2 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xd1\n\t"
958
#define aesenc_xmm1_xmm3 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xd9\n\t"
959
#define aesenc_xmm1_xmm4 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xe1\n\t"
960
#define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t"
961
#define aesenclast_xmm1_xmm2 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xd1\n\t"
962
#define aesenclast_xmm1_xmm3 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xd9\n\t"
963
#define aesenclast_xmm1_xmm4 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xe1\n\t"
965
static unsigned char be_mask[16] __attribute__ ((aligned (16))) =
966
{ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
971
xmm1 temp / round key
978
asm volatile ("movdqa %[ctr], %%xmm0\n\t" /* xmm0, xmm2 := CTR */
979
"movaps %%xmm0, %%xmm2\n\t"
980
"mov $1, %%esi\n\t" /* xmm1 := 1 */
981
"movd %%esi, %%xmm1\n\t"
982
"pshufb %[mask], %%xmm2\n\t" /* xmm2 := le(xmm2) */
983
"paddq %%xmm1, %%xmm2\n\t" /* xmm2++ */
984
"movaps %%xmm2, %%xmm3\n\t" /* xmm3 := xmm2 */
985
"paddq %%xmm1, %%xmm3\n\t" /* xmm3++ */
986
"movaps %%xmm3, %%xmm4\n\t" /* xmm4 := xmm3 */
987
"paddq %%xmm1, %%xmm4\n\t" /* xmm4++ */
988
"movaps %%xmm4, %%xmm5\n\t" /* xmm5 := xmm4 */
989
"paddq %%xmm1, %%xmm5\n\t" /* xmm5++ */
990
"pshufb %[mask], %%xmm2\n\t" /* xmm2 := be(xmm2) */
991
"pshufb %[mask], %%xmm3\n\t" /* xmm3 := be(xmm3) */
992
"pshufb %[mask], %%xmm4\n\t" /* xmm4 := be(xmm4) */
993
"pshufb %[mask], %%xmm5\n\t" /* xmm5 := be(xmm5) */
994
"movdqa %%xmm5, %[ctr]\n" /* Update CTR. */
996
"movl %[key], %%esi\n\t" /* esi := keyschenc */
997
"movdqa (%%esi), %%xmm1\n\t" /* xmm1 := key[0] */
998
"pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */
999
"pxor %%xmm1, %%xmm2\n\t" /* xmm2 ^= key[0] */
1000
"pxor %%xmm1, %%xmm3\n\t" /* xmm3 ^= key[0] */
1001
"pxor %%xmm1, %%xmm4\n\t" /* xmm4 ^= key[0] */
1002
"movdqa 0x10(%%esi), %%xmm1\n\t"
1007
"movdqa 0x20(%%esi), %%xmm1\n\t"
1012
"movdqa 0x30(%%esi), %%xmm1\n\t"
1017
"movdqa 0x40(%%esi), %%xmm1\n\t"
1022
"movdqa 0x50(%%esi), %%xmm1\n\t"
1027
"movdqa 0x60(%%esi), %%xmm1\n\t"
1032
"movdqa 0x70(%%esi), %%xmm1\n\t"
1037
"movdqa 0x80(%%esi), %%xmm1\n\t"
1042
"movdqa 0x90(%%esi), %%xmm1\n\t"
1047
"movdqa 0xa0(%%esi), %%xmm1\n\t"
1048
"cmp $10, %[rounds]\n\t"
1049
"jz .Lenclast%=\n\t"
1054
"movdqa 0xb0(%%esi), %%xmm1\n\t"
1059
"movdqa 0xc0(%%esi), %%xmm1\n\t"
1060
"cmp $12, %[rounds]\n\t"
1061
"jz .Lenclast%=\n\t"
1066
"movdqa 0xd0(%%esi), %%xmm1\n\t"
1071
"movdqa 0xe0(%%esi), %%xmm1\n"
1074
aesenclast_xmm1_xmm0
1075
aesenclast_xmm1_xmm2
1076
aesenclast_xmm1_xmm3
1077
aesenclast_xmm1_xmm4
1079
"movdqu %[src], %%xmm1\n\t" /* Get block 1. */
1080
"pxor %%xmm1, %%xmm0\n\t" /* EncCTR-1 ^= input */
1081
"movdqu %%xmm0, %[dst]\n\t" /* Store block 1 */
1083
"movdqu (16)%[src], %%xmm1\n\t" /* Get block 2. */
1084
"pxor %%xmm1, %%xmm2\n\t" /* EncCTR-2 ^= input */
1085
"movdqu %%xmm2, (16)%[dst]\n\t" /* Store block 2. */
1087
"movdqu (32)%[src], %%xmm1\n\t" /* Get block 3. */
1088
"pxor %%xmm1, %%xmm3\n\t" /* EncCTR-3 ^= input */
1089
"movdqu %%xmm3, (32)%[dst]\n\t" /* Store block 3. */
1091
"movdqu (48)%[src], %%xmm1\n\t" /* Get block 4. */
1092
"pxor %%xmm1, %%xmm4\n\t" /* EncCTR-4 ^= input */
1093
"movdqu %%xmm4, (48)%[dst]" /* Store block 4. */
1095
: [ctr] "+m" (*ctr), [dst] "=m" (*b)
1097
[key] "g" (ctx->keyschenc),
1098
[rounds] "g" (ctx->rounds),
1099
[mask] "m" (*be_mask)
1100
: "%esi", "cc", "memory");
1101
#undef aesenc_xmm1_xmm0
1102
#undef aesenc_xmm1_xmm2
1103
#undef aesenc_xmm1_xmm3
1104
#undef aesenc_xmm1_xmm4
1105
#undef aesenclast_xmm1_xmm0
1106
#undef aesenclast_xmm1_xmm2
1107
#undef aesenclast_xmm1_xmm3
1108
#undef aesenclast_xmm1_xmm4
1113
do_aesni (RIJNDAEL_context *ctx, int decrypt_flag,
1114
unsigned char *bx, const unsigned char *ax)
1119
if (!ctx->decryption_prepared )
1121
prepare_decryption ( ctx );
1122
ctx->decryption_prepared = 1;
1124
do_aesni_dec_aligned (ctx, bx, ax);
1127
do_aesni_enc_aligned (ctx, bx, ax);
1129
#endif /*USE_AESNI*/
467
1133
rijndael_encrypt (void *context, byte *b, const byte *a)
469
1135
RIJNDAEL_context *ctx = context;
471
1139
#ifdef USE_PADLOCK
472
if (ctx->use_padlock)
1140
else if (ctx->use_padlock)
474
1142
do_padlock (ctx, 0, b, a);
475
1143
_gcry_burn_stack (48 + 15 /* possible padding for alignment */);
1145
#endif /*USE_PADLOCK*/
1147
else if (ctx->use_aesni)
1150
do_aesni (ctx, 0, b, a);
1153
#endif /*USE_AESNI*/
478
#endif /*USE_PADLOCK*/
480
1156
do_encrypt (ctx, b, a);
481
_gcry_burn_stack (48 + 2*sizeof(int));
1157
_gcry_burn_stack (56 + 2*sizeof(int));
570
1340
and the decryption must have been prepared. A and B may be the
573
do_decrypt_aligned (RIJNDAEL_context *ctx,
1343
do_decrypt_aligned (RIJNDAEL_context *ctx,
574
1344
unsigned char *b, const unsigned char *a)
576
#define rk (ctx->keySched2)
577
int ROUNDS = ctx->ROUNDS;
1346
#define rk (ctx->keyschdec)
1347
int rounds = ctx->rounds;
581
1351
u32 tempu32[4]; /* Force correct alignment. */
582
1352
byte temp[4][4];
586
*((u32*)u.temp[0]) = *((u32*)(a )) ^ *((u32*)rk[ROUNDS][0]);
587
*((u32*)u.temp[1]) = *((u32*)(a+ 4)) ^ *((u32*)rk[ROUNDS][1]);
588
*((u32*)u.temp[2]) = *((u32*)(a+ 8)) ^ *((u32*)rk[ROUNDS][2]);
589
*((u32*)u.temp[3]) = *((u32*)(a+12)) ^ *((u32*)rk[ROUNDS][3]);
591
*((u32*)(b )) = (*((u32*)T5[u.temp[0][0]])
592
^ *((u32*)T6[u.temp[3][1]])
593
^ *((u32*)T7[u.temp[2][2]])
594
^ *((u32*)T8[u.temp[1][3]]));
595
*((u32*)(b+ 4)) = (*((u32*)T5[u.temp[1][0]])
596
^ *((u32*)T6[u.temp[0][1]])
597
^ *((u32*)T7[u.temp[3][2]])
598
^ *((u32*)T8[u.temp[2][3]]));
599
*((u32*)(b+ 8)) = (*((u32*)T5[u.temp[2][0]])
600
^ *((u32*)T6[u.temp[1][1]])
601
^ *((u32*)T7[u.temp[0][2]])
602
^ *((u32*)T8[u.temp[3][3]]));
603
*((u32*)(b+12)) = (*((u32*)T5[u.temp[3][0]])
604
^ *((u32*)T6[u.temp[2][1]])
605
^ *((u32*)T7[u.temp[1][2]])
606
^ *((u32*)T8[u.temp[0][3]]));
608
for (r = ROUNDS-1; r > 1; r--)
1356
*((u32_a_t*)u.temp[0]) = *((u32_a_t*)(a )) ^ *((u32_a_t*)rk[rounds][0]);
1357
*((u32_a_t*)u.temp[1]) = *((u32_a_t*)(a+ 4)) ^ *((u32_a_t*)rk[rounds][1]);
1358
*((u32_a_t*)u.temp[2]) = *((u32_a_t*)(a+ 8)) ^ *((u32_a_t*)rk[rounds][2]);
1359
*((u32_a_t*)u.temp[3]) = *((u32_a_t*)(a+12)) ^ *((u32_a_t*)rk[rounds][3]);
1361
*((u32_a_t*)(b )) = (*((u32_a_t*)T5[u.temp[0][0]])
1362
^ *((u32_a_t*)T6[u.temp[3][1]])
1363
^ *((u32_a_t*)T7[u.temp[2][2]])
1364
^ *((u32_a_t*)T8[u.temp[1][3]]));
1365
*((u32_a_t*)(b+ 4)) = (*((u32_a_t*)T5[u.temp[1][0]])
1366
^ *((u32_a_t*)T6[u.temp[0][1]])
1367
^ *((u32_a_t*)T7[u.temp[3][2]])
1368
^ *((u32_a_t*)T8[u.temp[2][3]]));
1369
*((u32_a_t*)(b+ 8)) = (*((u32_a_t*)T5[u.temp[2][0]])
1370
^ *((u32_a_t*)T6[u.temp[1][1]])
1371
^ *((u32_a_t*)T7[u.temp[0][2]])
1372
^ *((u32_a_t*)T8[u.temp[3][3]]));
1373
*((u32_a_t*)(b+12)) = (*((u32_a_t*)T5[u.temp[3][0]])
1374
^ *((u32_a_t*)T6[u.temp[2][1]])
1375
^ *((u32_a_t*)T7[u.temp[1][2]])
1376
^ *((u32_a_t*)T8[u.temp[0][3]]));
1378
for (r = rounds-1; r > 1; r--)
610
*((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[r][0]);
611
*((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[r][1]);
612
*((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[r][2]);
613
*((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[r][3]);
614
*((u32*)(b )) = (*((u32*)T5[u.temp[0][0]])
615
^ *((u32*)T6[u.temp[3][1]])
616
^ *((u32*)T7[u.temp[2][2]])
617
^ *((u32*)T8[u.temp[1][3]]));
618
*((u32*)(b+ 4)) = (*((u32*)T5[u.temp[1][0]])
619
^ *((u32*)T6[u.temp[0][1]])
620
^ *((u32*)T7[u.temp[3][2]])
621
^ *((u32*)T8[u.temp[2][3]]));
622
*((u32*)(b+ 8)) = (*((u32*)T5[u.temp[2][0]])
623
^ *((u32*)T6[u.temp[1][1]])
624
^ *((u32*)T7[u.temp[0][2]])
625
^ *((u32*)T8[u.temp[3][3]]));
626
*((u32*)(b+12)) = (*((u32*)T5[u.temp[3][0]])
627
^ *((u32*)T6[u.temp[2][1]])
628
^ *((u32*)T7[u.temp[1][2]])
629
^ *((u32*)T8[u.temp[0][3]]));
1380
*((u32_a_t*)u.temp[0]) = *((u32_a_t*)(b )) ^ *((u32_a_t*)rk[r][0]);
1381
*((u32_a_t*)u.temp[1]) = *((u32_a_t*)(b+ 4)) ^ *((u32_a_t*)rk[r][1]);
1382
*((u32_a_t*)u.temp[2]) = *((u32_a_t*)(b+ 8)) ^ *((u32_a_t*)rk[r][2]);
1383
*((u32_a_t*)u.temp[3]) = *((u32_a_t*)(b+12)) ^ *((u32_a_t*)rk[r][3]);
1384
*((u32_a_t*)(b )) = (*((u32_a_t*)T5[u.temp[0][0]])
1385
^ *((u32_a_t*)T6[u.temp[3][1]])
1386
^ *((u32_a_t*)T7[u.temp[2][2]])
1387
^ *((u32_a_t*)T8[u.temp[1][3]]));
1388
*((u32_a_t*)(b+ 4)) = (*((u32_a_t*)T5[u.temp[1][0]])
1389
^ *((u32_a_t*)T6[u.temp[0][1]])
1390
^ *((u32_a_t*)T7[u.temp[3][2]])
1391
^ *((u32_a_t*)T8[u.temp[2][3]]));
1392
*((u32_a_t*)(b+ 8)) = (*((u32_a_t*)T5[u.temp[2][0]])
1393
^ *((u32_a_t*)T6[u.temp[1][1]])
1394
^ *((u32_a_t*)T7[u.temp[0][2]])
1395
^ *((u32_a_t*)T8[u.temp[3][3]]));
1396
*((u32_a_t*)(b+12)) = (*((u32_a_t*)T5[u.temp[3][0]])
1397
^ *((u32_a_t*)T6[u.temp[2][1]])
1398
^ *((u32_a_t*)T7[u.temp[1][2]])
1399
^ *((u32_a_t*)T8[u.temp[0][3]]));
632
/* Last round is special. */
633
*((u32*)u.temp[0]) = *((u32*)(b )) ^ *((u32*)rk[1][0]);
634
*((u32*)u.temp[1]) = *((u32*)(b+ 4)) ^ *((u32*)rk[1][1]);
635
*((u32*)u.temp[2]) = *((u32*)(b+ 8)) ^ *((u32*)rk[1][2]);
636
*((u32*)u.temp[3]) = *((u32*)(b+12)) ^ *((u32*)rk[1][3]);
1402
/* Last round is special. */
1403
*((u32_a_t*)u.temp[0]) = *((u32_a_t*)(b )) ^ *((u32_a_t*)rk[1][0]);
1404
*((u32_a_t*)u.temp[1]) = *((u32_a_t*)(b+ 4)) ^ *((u32_a_t*)rk[1][1]);
1405
*((u32_a_t*)u.temp[2]) = *((u32_a_t*)(b+ 8)) ^ *((u32_a_t*)rk[1][2]);
1406
*((u32_a_t*)u.temp[3]) = *((u32_a_t*)(b+12)) ^ *((u32_a_t*)rk[1][3]);
637
1407
b[ 0] = S5[u.temp[0][0]];
638
1408
b[ 1] = S5[u.temp[3][1]];
639
1409
b[ 2] = S5[u.temp[2][2]];
947
1786
GCRY_CIPHER_MODE_CFB, /* F.3.13, CFB128-AES128 */
948
1787
{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
949
1788
0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c },
950
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1789
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
951
1790
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
953
1792
{ { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
954
1793
0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a },
955
1794
{ 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20,
956
1795
0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a } },
958
1797
{ { 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
959
1798
0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51 },
960
1799
{ 0xc8, 0xa6, 0x45, 0x37, 0xa0, 0xb3, 0xa9, 0x3f,
961
1800
0xcd, 0xe3, 0xcd, 0xad, 0x9f, 0x1c, 0xe5, 0x8b } },
963
{ { 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
1802
{ { 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
964
1803
0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef },
965
1804
{ 0x26, 0x75, 0x1f, 0x67, 0xa3, 0xcb, 0xb1, 0x40,
966
1805
0xb1, 0x80, 0x8c, 0xf1, 0x87, 0xa4, 0xf4, 0xdf } },
968
1807
{ { 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
969
1808
0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 },
970
1809
{ 0xc0, 0x4b, 0x05, 0x35, 0x7c, 0x5d, 0x1c, 0x0e,