1
/* cipher.c - cipher dispatcher
2
* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
3
* 2005, 2007, 2008 Free Software Foundation, Inc.
5
* This file is part of Libgcrypt.
7
* Libgcrypt is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU Lesser general Public License as
9
* published by the Free Software Foundation; either version 2.1 of
10
* the License, or (at your option) any later version.
12
* Libgcrypt is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
* GNU Lesser General Public License for more details.
17
* You should have received a copy of the GNU Lesser General Public
18
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
31
#define MAX_BLOCKSIZE 16
33
#define CTX_MAGIC_NORMAL 0x24091964
34
#define CTX_MAGIC_SECURE 0x46919042
36
#undef NEED_16BYTE_ALIGNED_CONTEXT
37
#if defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4 && defined (__GNUC__)
38
#define NEED_16BYTE_ALIGNED_CONTEXT 1
41
/* A dummy extraspec so that we do not need to tests the extraspec
42
field from the module specification against NULL and instead
43
directly test the respective fields of extraspecs. */
44
static cipher_extra_spec_t dummy_extra_spec;
46
/* This is the list of the default ciphers, which are included in
48
static struct cipher_table_entry
50
gcry_cipher_spec_t *cipher;
51
cipher_extra_spec_t *extraspec;
52
unsigned int algorithm;
57
{ &_gcry_cipher_spec_blowfish,
58
&dummy_extra_spec, GCRY_CIPHER_BLOWFISH },
61
{ &_gcry_cipher_spec_des,
62
&dummy_extra_spec, GCRY_CIPHER_DES },
63
{ &_gcry_cipher_spec_tripledes,
64
&_gcry_cipher_extraspec_tripledes, GCRY_CIPHER_3DES, 1 },
67
{ &_gcry_cipher_spec_arcfour,
68
&dummy_extra_spec, GCRY_CIPHER_ARCFOUR },
71
{ &_gcry_cipher_spec_cast5,
72
&dummy_extra_spec, GCRY_CIPHER_CAST5 },
75
{ &_gcry_cipher_spec_aes,
76
&_gcry_cipher_extraspec_aes, GCRY_CIPHER_AES, 1 },
77
{ &_gcry_cipher_spec_aes192,
78
&_gcry_cipher_extraspec_aes192, GCRY_CIPHER_AES192, 1 },
79
{ &_gcry_cipher_spec_aes256,
80
&_gcry_cipher_extraspec_aes256, GCRY_CIPHER_AES256, 1 },
83
{ &_gcry_cipher_spec_twofish,
84
&dummy_extra_spec, GCRY_CIPHER_TWOFISH },
85
{ &_gcry_cipher_spec_twofish128,
86
&dummy_extra_spec, GCRY_CIPHER_TWOFISH128 },
89
{ &_gcry_cipher_spec_serpent128,
90
&dummy_extra_spec, GCRY_CIPHER_SERPENT128 },
91
{ &_gcry_cipher_spec_serpent192,
92
&dummy_extra_spec, GCRY_CIPHER_SERPENT192 },
93
{ &_gcry_cipher_spec_serpent256,
94
&dummy_extra_spec, GCRY_CIPHER_SERPENT256 },
97
{ &_gcry_cipher_spec_rfc2268_40,
98
&dummy_extra_spec, GCRY_CIPHER_RFC2268_40 },
101
{ &_gcry_cipher_spec_seed,
102
&dummy_extra_spec, GCRY_CIPHER_SEED },
105
{ &_gcry_cipher_spec_camellia128,
106
&dummy_extra_spec, GCRY_CIPHER_CAMELLIA128 },
107
{ &_gcry_cipher_spec_camellia192,
108
&dummy_extra_spec, GCRY_CIPHER_CAMELLIA192 },
109
{ &_gcry_cipher_spec_camellia256,
110
&dummy_extra_spec, GCRY_CIPHER_CAMELLIA256 },
115
/* List of registered ciphers. */
116
static gcry_module_t ciphers_registered;
118
/* This is the lock protecting CIPHERS_REGISTERED. */
119
static ath_mutex_t ciphers_registered_lock = ATH_MUTEX_INITIALIZER;
121
/* Flag to check wether the default ciphers have already been
123
static int default_ciphers_registered;
125
/* Convenient macro for registering the default ciphers. */
126
#define REGISTER_DEFAULT_CIPHERS \
129
ath_mutex_lock (&ciphers_registered_lock); \
130
if (! default_ciphers_registered) \
132
cipher_register_default (); \
133
default_ciphers_registered = 1; \
135
ath_mutex_unlock (&ciphers_registered_lock); \
140
/* A VIA processor with the Padlock engine requires an alignment of
141
most data on a 16 byte boundary. Because we trick out the compiler
142
while allocating the context, the align attribute as used in
143
rijndael.c does not work on its own. Thus we need to make sure
144
that the entire context structure is a aligned on that boundary.
145
We achieve this by defining a new type and use that instead of our
146
usual alignment type. */
149
PROPERLY_ALIGNED_TYPE foo;
150
#ifdef NEED_16BYTE_ALIGNED_CONTEXT
151
char bar[16] __attribute__ ((aligned (16)));
154
} cipher_context_alignment_t;
157
/* The handle structure. */
158
struct gcry_cipher_handle
161
size_t actual_handle_size; /* Allocated size of this handle. */
162
size_t handle_offset; /* Offset to the malloced block. */
163
gcry_cipher_spec_t *cipher;
164
cipher_extra_spec_t *extraspec;
165
gcry_module_t module;
167
/* The algorithm id. This is a hack required because the module
168
interface does not easily allow to retrieve this value. */
171
/* A structure with function pointers for bulk operations. Due to
172
limitations of the module system (we don't want to change the
173
API) we need to keep these function pointers here. The cipher
174
open function intializes them and the actual encryption routines
175
use them if they are not NULL. */
177
void (*cfb_enc)(void *context, unsigned char *iv,
178
void *outbuf_arg, const void *inbuf_arg,
179
unsigned int nblocks);
180
void (*cfb_dec)(void *context, unsigned char *iv,
181
void *outbuf_arg, const void *inbuf_arg,
182
unsigned int nblocks);
183
void (*cbc_enc)(void *context, unsigned char *iv,
184
void *outbuf_arg, const void *inbuf_arg,
185
unsigned int nblocks, int cbc_mac);
186
void (*cbc_dec)(void *context, unsigned char *iv,
187
void *outbuf_arg, const void *inbuf_arg,
188
unsigned int nblocks);
195
/* The initialization vector. To help code optimization we make
196
sure that it is aligned on an unsigned long and u32 boundary. */
198
unsigned long dummy_iv;
200
unsigned char iv[MAX_BLOCKSIZE];
203
unsigned char lastiv[MAX_BLOCKSIZE];
204
int unused; /* Number of unused bytes in the IV. */
206
unsigned char ctr[MAX_BLOCKSIZE]; /* For Counter (CTR) mode. */
209
/* What follows are two contexts of the cipher in use. The first
210
one needs to be aligned well enough for the cipher operation
211
whereas the second one is a copy created by cipher_setkey and
212
used by cipher_reset. That second copy has no need for proper
213
aligment because it is only accessed by memcpy. */
214
cipher_context_alignment_t context;
219
/* These dummy functions are used in case a cipher implementation
220
refuses to provide it's own functions. */
222
static gcry_err_code_t
223
dummy_setkey (void *c, const unsigned char *key, unsigned int keylen)
228
return GPG_ERR_NO_ERROR;
232
dummy_encrypt_block (void *c,
233
unsigned char *outbuf, const unsigned char *inbuf)
242
dummy_decrypt_block (void *c,
243
unsigned char *outbuf, const unsigned char *inbuf)
252
dummy_encrypt_stream (void *c,
253
unsigned char *outbuf, const unsigned char *inbuf,
264
dummy_decrypt_stream (void *c,
265
unsigned char *outbuf, const unsigned char *inbuf,
276
/* Internal function. Register all the ciphers included in
277
CIPHER_TABLE. Note, that this function gets only used by the macro
278
REGISTER_DEFAULT_CIPHERS which protects it using a mutex. */
280
cipher_register_default (void)
282
gcry_err_code_t err = GPG_ERR_NO_ERROR;
285
for (i = 0; !err && cipher_table[i].cipher; i++)
287
if (! cipher_table[i].cipher->setkey)
288
cipher_table[i].cipher->setkey = dummy_setkey;
289
if (! cipher_table[i].cipher->encrypt)
290
cipher_table[i].cipher->encrypt = dummy_encrypt_block;
291
if (! cipher_table[i].cipher->decrypt)
292
cipher_table[i].cipher->decrypt = dummy_decrypt_block;
293
if (! cipher_table[i].cipher->stencrypt)
294
cipher_table[i].cipher->stencrypt = dummy_encrypt_stream;
295
if (! cipher_table[i].cipher->stdecrypt)
296
cipher_table[i].cipher->stdecrypt = dummy_decrypt_stream;
298
if ( fips_mode () && !cipher_table[i].fips_allowed )
301
err = _gcry_module_add (&ciphers_registered,
302
cipher_table[i].algorithm,
303
(void *) cipher_table[i].cipher,
304
(void *) cipher_table[i].extraspec,
312
/* Internal callback function. Used via _gcry_module_lookup. */
314
gcry_cipher_lookup_func_name (void *spec, void *data)
316
gcry_cipher_spec_t *cipher = (gcry_cipher_spec_t *) spec;
317
char *name = (char *) data;
318
const char **aliases = cipher->aliases;
319
int i, ret = ! stricmp (name, cipher->name);
322
for (i = 0; aliases[i] && (! ret); i++)
323
ret = ! stricmp (name, aliases[i]);
328
/* Internal callback function. Used via _gcry_module_lookup. */
330
gcry_cipher_lookup_func_oid (void *spec, void *data)
332
gcry_cipher_spec_t *cipher = (gcry_cipher_spec_t *) spec;
333
char *oid = (char *) data;
334
gcry_cipher_oid_spec_t *oid_specs = cipher->oids;
338
for (i = 0; oid_specs[i].oid && (! ret); i++)
339
if (! stricmp (oid, oid_specs[i].oid))
345
/* Internal function. Lookup a cipher entry by it's name. */
347
gcry_cipher_lookup_name (const char *name)
349
gcry_module_t cipher;
351
cipher = _gcry_module_lookup (ciphers_registered, (void *) name,
352
gcry_cipher_lookup_func_name);
357
/* Internal function. Lookup a cipher entry by it's oid. */
359
gcry_cipher_lookup_oid (const char *oid)
361
gcry_module_t cipher;
363
cipher = _gcry_module_lookup (ciphers_registered, (void *) oid,
364
gcry_cipher_lookup_func_oid);
369
/* Register a new cipher module whose specification can be found in
370
CIPHER. On success, a new algorithm ID is stored in ALGORITHM_ID
371
and a pointer representhing this module is stored in MODULE. */
373
_gcry_cipher_register (gcry_cipher_spec_t *cipher,
374
cipher_extra_spec_t *extraspec,
376
gcry_module_t *module)
378
gcry_err_code_t err = 0;
381
/* We do not support module loading in fips mode. */
383
return gpg_error (GPG_ERR_NOT_SUPPORTED);
385
ath_mutex_lock (&ciphers_registered_lock);
386
err = _gcry_module_add (&ciphers_registered, 0,
388
(void *)(extraspec? extraspec : &dummy_extra_spec),
390
ath_mutex_unlock (&ciphers_registered_lock);
395
*algorithm_id = mod->mod_id;
398
return gcry_error (err);
401
/* Unregister the cipher identified by MODULE, which must have been
402
registered with gcry_cipher_register. */
404
gcry_cipher_unregister (gcry_module_t module)
406
ath_mutex_lock (&ciphers_registered_lock);
407
_gcry_module_release (module);
408
ath_mutex_unlock (&ciphers_registered_lock);
411
/* Locate the OID in the oid table and return the index or -1 when not
412
found. An opitonal "oid." or "OID." prefix in OID is ignored, the
413
OID is expected to be in standard IETF dotted notation. The
414
internal algorithm number is returned in ALGORITHM unless it
415
ispassed as NULL. A pointer to the specification of the module
416
implementing this algorithm is return in OID_SPEC unless passed as
419
search_oid (const char *oid, int *algorithm, gcry_cipher_oid_spec_t *oid_spec)
421
gcry_module_t module;
424
if (oid && ((! strncmp (oid, "oid.", 4))
425
|| (! strncmp (oid, "OID.", 4))))
428
module = gcry_cipher_lookup_oid (oid);
431
gcry_cipher_spec_t *cipher = module->spec;
434
for (i = 0; cipher->oids[i].oid && !ret; i++)
435
if (! stricmp (oid, cipher->oids[i].oid))
438
*algorithm = module->mod_id;
440
*oid_spec = cipher->oids[i];
443
_gcry_module_release (module);
449
/* Map STRING to the cipher algorithm identifier. Returns the
450
algorithm ID of the cipher for the given name or 0 if the name is
451
not known. It is valid to pass NULL for STRING which results in a
452
return value of 0. */
454
gcry_cipher_map_name (const char *string)
456
gcry_module_t cipher;
457
int ret, algorithm = 0;
462
REGISTER_DEFAULT_CIPHERS;
464
/* If the string starts with a digit (optionally prefixed with
465
either "OID." or "oid."), we first look into our table of ASN.1
466
object identifiers to figure out the algorithm */
468
ath_mutex_lock (&ciphers_registered_lock);
470
ret = search_oid (string, &algorithm, NULL);
473
cipher = gcry_cipher_lookup_name (string);
476
algorithm = cipher->mod_id;
477
_gcry_module_release (cipher);
481
ath_mutex_unlock (&ciphers_registered_lock);
487
/* Given a STRING with an OID in dotted decimal notation, this
488
function returns the cipher mode (GCRY_CIPHER_MODE_*) associated
489
with that OID or 0 if no mode is known. Passing NULL for string
490
yields a return value of 0. */
492
gcry_cipher_mode_from_oid (const char *string)
494
gcry_cipher_oid_spec_t oid_spec;
495
int ret = 0, mode = 0;
500
ath_mutex_lock (&ciphers_registered_lock);
501
ret = search_oid (string, NULL, &oid_spec);
503
mode = oid_spec.mode;
504
ath_mutex_unlock (&ciphers_registered_lock);
510
/* Map the cipher algorithm whose ID is contained in ALGORITHM to a
511
string representation of the algorithm name. For unknown algorithm
512
IDs this function returns "?". */
514
cipher_algo_to_string (int algorithm)
516
gcry_module_t cipher;
519
REGISTER_DEFAULT_CIPHERS;
521
ath_mutex_lock (&ciphers_registered_lock);
522
cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
525
name = ((gcry_cipher_spec_t *) cipher->spec)->name;
526
_gcry_module_release (cipher);
530
ath_mutex_unlock (&ciphers_registered_lock);
535
/* Map the cipher algorithm identifier ALGORITHM to a string
536
representing this algorithm. This string is the default name as
537
used by Libgcrypt. An pointer to an empty string is returned for
538
an unknown algorithm. NULL is never returned. */
540
gcry_cipher_algo_name (int algorithm)
542
return cipher_algo_to_string (algorithm);
546
/* Flag the cipher algorithm with the identifier ALGORITHM as
547
disabled. There is no error return, the function does nothing for
548
unknown algorithms. Disabled algorithms are vitually not available
551
disable_cipher_algo (int algorithm)
553
gcry_module_t cipher;
555
REGISTER_DEFAULT_CIPHERS;
557
ath_mutex_lock (&ciphers_registered_lock);
558
cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
561
if (! (cipher->flags & FLAG_MODULE_DISABLED))
562
cipher->flags |= FLAG_MODULE_DISABLED;
563
_gcry_module_release (cipher);
565
ath_mutex_unlock (&ciphers_registered_lock);
569
/* Return 0 if the cipher algorithm with identifier ALGORITHM is
570
available. Returns a basic error code value if it is not
572
static gcry_err_code_t
573
check_cipher_algo (int algorithm)
575
gcry_err_code_t err = GPG_ERR_NO_ERROR;
576
gcry_module_t cipher;
578
REGISTER_DEFAULT_CIPHERS;
580
ath_mutex_lock (&ciphers_registered_lock);
581
cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
584
if (cipher->flags & FLAG_MODULE_DISABLED)
585
err = GPG_ERR_CIPHER_ALGO;
586
_gcry_module_release (cipher);
589
err = GPG_ERR_CIPHER_ALGO;
590
ath_mutex_unlock (&ciphers_registered_lock);
596
/* Return the standard length of the key for the cipher algorithm with
597
the identifier ALGORITHM. This function expects a valid algorithm
598
and will abort if the algorithm is not available or the length of
599
the key is not known. */
601
cipher_get_keylen (int algorithm)
603
gcry_module_t cipher;
606
REGISTER_DEFAULT_CIPHERS;
608
ath_mutex_lock (&ciphers_registered_lock);
609
cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
612
len = ((gcry_cipher_spec_t *) cipher->spec)->keylen;
614
log_bug ("cipher %d w/o key length\n", algorithm);
615
_gcry_module_release (cipher);
618
log_bug ("cipher %d not found\n", algorithm);
619
ath_mutex_unlock (&ciphers_registered_lock);
624
/* Return the block length of the cipher algorithm with the identifier
625
ALGORITHM. This function expects a valid algorithm and will abort
626
if the algorithm is not available or the length of the key is not
629
cipher_get_blocksize (int algorithm)
631
gcry_module_t cipher;
634
REGISTER_DEFAULT_CIPHERS;
636
ath_mutex_lock (&ciphers_registered_lock);
637
cipher = _gcry_module_lookup_id (ciphers_registered, algorithm);
640
len = ((gcry_cipher_spec_t *) cipher->spec)->blocksize;
642
log_bug ("cipher %d w/o blocksize\n", algorithm);
643
_gcry_module_release (cipher);
646
log_bug ("cipher %d not found\n", algorithm);
647
ath_mutex_unlock (&ciphers_registered_lock);
654
Open a cipher handle for use with cipher algorithm ALGORITHM, using
655
the cipher mode MODE (one of the GCRY_CIPHER_MODE_*) and return a
656
handle in HANDLE. Put NULL into HANDLE and return an error code if
657
something goes wrong. FLAGS may be used to modify the
658
operation. The defined flags are:
660
GCRY_CIPHER_SECURE: allocate all internal buffers in secure memory.
661
GCRY_CIPHER_ENABLE_SYNC: Enable the sync operation as used in OpenPGP.
662
GCRY_CIPHER_CBC_CTS: Enable CTS mode.
663
GCRY_CIPHER_CBC_MAC: Enable MAC mode.
665
Values for these flags may be combined using OR.
668
gcry_cipher_open (gcry_cipher_hd_t *handle,
669
int algo, int mode, unsigned int flags)
671
int secure = (flags & GCRY_CIPHER_SECURE);
672
gcry_cipher_spec_t *cipher = NULL;
673
cipher_extra_spec_t *extraspec = NULL;
674
gcry_module_t module = NULL;
675
gcry_cipher_hd_t h = NULL;
676
gcry_err_code_t err = 0;
678
/* If the application missed to call the random poll function, we do
679
it here to ensure that it is used once in a while. */
680
_gcry_fast_random_poll ();
682
REGISTER_DEFAULT_CIPHERS;
684
/* Fetch the according module and check wether the cipher is marked
685
available for use. */
686
ath_mutex_lock (&ciphers_registered_lock);
687
module = _gcry_module_lookup_id (ciphers_registered, algo);
692
if (module->flags & FLAG_MODULE_DISABLED)
694
/* Not available for use. */
695
err = GPG_ERR_CIPHER_ALGO;
696
_gcry_module_release (module);
700
cipher = (gcry_cipher_spec_t *) module->spec;
701
extraspec = module->extraspec;
705
err = GPG_ERR_CIPHER_ALGO;
706
ath_mutex_unlock (&ciphers_registered_lock);
712
| GCRY_CIPHER_ENABLE_SYNC
713
| GCRY_CIPHER_CBC_CTS
714
| GCRY_CIPHER_CBC_MAC))
715
|| (flags & GCRY_CIPHER_CBC_CTS & GCRY_CIPHER_CBC_MAC)))
716
err = GPG_ERR_CIPHER_ALGO;
718
/* check that a valid mode has been requested */
722
case GCRY_CIPHER_MODE_ECB:
723
case GCRY_CIPHER_MODE_CBC:
724
case GCRY_CIPHER_MODE_CFB:
725
case GCRY_CIPHER_MODE_OFB:
726
case GCRY_CIPHER_MODE_CTR:
727
if ((cipher->encrypt == dummy_encrypt_block)
728
|| (cipher->decrypt == dummy_decrypt_block))
729
err = GPG_ERR_INV_CIPHER_MODE;
732
case GCRY_CIPHER_MODE_STREAM:
733
if ((cipher->stencrypt == dummy_encrypt_stream)
734
|| (cipher->stdecrypt == dummy_decrypt_stream))
735
err = GPG_ERR_INV_CIPHER_MODE;
738
case GCRY_CIPHER_MODE_NONE:
739
/* This mode may be used for debugging. It copies the main
740
text verbatim to the ciphertext. We do not allow this in
741
fips mode or if no debug flag has been set. */
742
if (fips_mode () || !_gcry_get_debug_flag (0))
743
err = GPG_ERR_INV_CIPHER_MODE;
747
err = GPG_ERR_INV_CIPHER_MODE;
750
/* Perform selftest here and mark this with a flag in cipher_table?
751
No, we should not do this as it takes too long. Further it does
752
not make sense to exclude algorithms with failing selftests at
753
runtime: If a selftest fails there is something seriously wrong
754
with the system and thus we better die immediately. */
758
size_t size = (sizeof (*h)
759
+ 2 * cipher->contextsize
760
- sizeof (cipher_context_alignment_t)
761
#ifdef NEED_16BYTE_ALIGNED_CONTEXT
762
+ 15 /* Space for leading alignment gap. */
763
#endif /*NEED_16BYTE_ALIGNED_CONTEXT*/
767
h = gcry_calloc_secure (1, size);
769
h = gcry_calloc (1, size);
772
err = gpg_err_code_from_errno (errno);
777
#ifdef NEED_16BYTE_ALIGNED_CONTEXT
778
if ( ((unsigned long)h & 0x0f) )
780
/* The malloced block is not aligned on a 16 byte
781
boundary. Correct for this. */
782
off = 16 - ((unsigned long)h & 0x0f);
783
h = (void*)((char*)h + off);
785
#endif /*NEED_16BYTE_ALIGNED_CONTEXT*/
787
h->magic = secure ? CTX_MAGIC_SECURE : CTX_MAGIC_NORMAL;
788
h->actual_handle_size = size - off;
789
h->handle_offset = off;
791
h->extraspec = extraspec;
797
/* Setup bulk encryption routines. */
801
case GCRY_CIPHER_AES128:
802
case GCRY_CIPHER_AES192:
803
case GCRY_CIPHER_AES256:
804
h->bulk.cfb_enc = _gcry_aes_cfb_enc;
805
h->bulk.cfb_dec = _gcry_aes_cfb_dec;
806
h->bulk.cbc_enc = _gcry_aes_cbc_enc;
807
h->bulk.cbc_dec = _gcry_aes_cbc_dec;
823
/* Release module. */
824
ath_mutex_lock (&ciphers_registered_lock);
825
_gcry_module_release (module);
826
ath_mutex_unlock (&ciphers_registered_lock);
830
*handle = err ? NULL : h;
832
return gcry_error (err);
836
/* Release all resources associated with the cipher handle H. H may be
837
NULL in which case this is a no-operation. */
839
gcry_cipher_close (gcry_cipher_hd_t h)
846
if ((h->magic != CTX_MAGIC_SECURE)
847
&& (h->magic != CTX_MAGIC_NORMAL))
848
_gcry_fatal_error(GPG_ERR_INTERNAL,
849
"gcry_cipher_close: already closed/invalid handle");
853
/* Release module. */
854
ath_mutex_lock (&ciphers_registered_lock);
855
_gcry_module_release (h->module);
856
ath_mutex_unlock (&ciphers_registered_lock);
858
/* We always want to wipe out the memory even when the context has
859
been allocated in secure memory. The user might have disabled
860
secure memory or is using his own implementation which does not
861
do the wiping. To accomplish this we need to keep track of the
862
actual size of this structure because we have no way to known
863
how large the allocated area was when using a standard malloc. */
864
off = h->handle_offset;
865
wipememory (h, h->actual_handle_size);
867
gcry_free ((char*)h - off);
871
/* Set the key to be used for the encryption context C to KEY with
872
length KEYLEN. The length should match the required length. */
874
cipher_setkey (gcry_cipher_hd_t c, byte *key, unsigned int keylen)
878
ret = (*c->cipher->setkey) (&c->context.c, key, keylen);
881
/* Duplicate initial context. */
882
memcpy ((void *) ((char *) &c->context.c + c->cipher->contextsize),
883
(void *) &c->context.c,
884
c->cipher->contextsize);
887
return gcry_error (ret);
891
/* Set the IV to be used for the encryption context C to IV with
892
length IVLEN. The length should match the required length. */
894
cipher_setiv( gcry_cipher_hd_t c, const byte *iv, unsigned ivlen )
896
memset (c->u_iv.iv, 0, c->cipher->blocksize);
899
if (ivlen != c->cipher->blocksize)
901
log_info ("WARNING: cipher_setiv: ivlen=%u blklen=%u\n",
902
ivlen, (unsigned int)c->cipher->blocksize);
903
fips_signal_error ("IV length does not match blocklength");
905
if (ivlen > c->cipher->blocksize)
906
ivlen = c->cipher->blocksize;
907
memcpy (c->u_iv.iv, iv, ivlen);
913
/* Reset the cipher context to the initial context. This is basically
914
the same as an release followed by a new. */
916
cipher_reset (gcry_cipher_hd_t c)
918
memcpy (&c->context.c,
919
(char *) &c->context.c + c->cipher->contextsize,
920
c->cipher->contextsize);
921
memset (c->u_iv.iv, 0, c->cipher->blocksize);
922
memset (c->lastiv, 0, c->cipher->blocksize);
923
memset (c->ctr, 0, c->cipher->blocksize);
928
do_ecb_encrypt( gcry_cipher_hd_t c, byte *outbuf, const byte *inbuf,
929
unsigned int nblocks )
933
for (n=0; n < nblocks; n++ )
935
c->cipher->encrypt ( &c->context.c, outbuf, (byte*)/*arggg*/inbuf );
936
inbuf += c->cipher->blocksize;
937
outbuf += c->cipher->blocksize;
942
do_ecb_decrypt( gcry_cipher_hd_t c, byte *outbuf, const byte *inbuf,
943
unsigned int nblocks )
947
for (n=0; n < nblocks; n++ )
949
c->cipher->decrypt ( &c->context.c, outbuf, (byte*)/*arggg*/inbuf );
950
inbuf += c->cipher->blocksize;
951
outbuf += c->cipher->blocksize;
957
do_cbc_encrypt (gcry_cipher_hd_t c, unsigned char *outbuf,
958
const unsigned char *inbuf, unsigned int nbytes )
963
size_t blocksize = c->cipher->blocksize;
964
unsigned nblocks = nbytes / blocksize;
966
if ((c->flags & GCRY_CIPHER_CBC_CTS) && nbytes > blocksize)
968
if ((nbytes % blocksize) == 0)
974
c->bulk.cbc_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks,
975
(c->flags & GCRY_CIPHER_CBC_MAC));
976
inbuf += nblocks * blocksize;
977
if (!(c->flags & GCRY_CIPHER_CBC_MAC))
978
outbuf += nblocks * blocksize;
982
for (n=0; n < nblocks; n++ )
984
for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
985
outbuf[i] = inbuf[i] ^ *ivp++;
986
c->cipher->encrypt ( &c->context.c, outbuf, outbuf );
987
memcpy (c->u_iv.iv, outbuf, blocksize );
989
if (!(c->flags & GCRY_CIPHER_CBC_MAC))
994
if ((c->flags & GCRY_CIPHER_CBC_CTS) && nbytes > blocksize)
996
/* We have to be careful here, since outbuf might be equal to
1001
if ((nbytes % blocksize) == 0)
1002
restbytes = blocksize;
1004
restbytes = nbytes % blocksize;
1006
outbuf -= blocksize;
1007
for (ivp = c->u_iv.iv, i = 0; i < restbytes; i++)
1010
outbuf[blocksize + i] = outbuf[i];
1011
outbuf[i] = b ^ *ivp++;
1013
for (; i < blocksize; i++)
1014
outbuf[i] = 0 ^ *ivp++;
1016
c->cipher->encrypt (&c->context.c, outbuf, outbuf);
1017
memcpy (c->u_iv.iv, outbuf, blocksize);
1023
do_cbc_decrypt (gcry_cipher_hd_t c, unsigned char *outbuf,
1024
const unsigned char *inbuf, unsigned int nbytes)
1029
size_t blocksize = c->cipher->blocksize;
1030
unsigned int nblocks = nbytes / blocksize;
1032
if ((c->flags & GCRY_CIPHER_CBC_CTS) && nbytes > blocksize)
1035
if ((nbytes % blocksize) == 0)
1037
memcpy (c->lastiv, c->u_iv.iv, blocksize);
1040
if (c->bulk.cbc_dec)
1042
c->bulk.cbc_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1043
inbuf += nblocks * blocksize;
1044
outbuf += nblocks * blocksize;
1048
for (n=0; n < nblocks; n++ )
1050
/* Because outbuf and inbuf might be the same, we have to
1051
* save the original ciphertext block. We use LASTIV for
1052
* this here because it is not used otherwise. */
1053
memcpy (c->lastiv, inbuf, blocksize);
1054
c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
1055
for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1056
outbuf[i] ^= *ivp++;
1057
memcpy(c->u_iv.iv, c->lastiv, blocksize );
1058
inbuf += c->cipher->blocksize;
1059
outbuf += c->cipher->blocksize;
1063
if ((c->flags & GCRY_CIPHER_CBC_CTS) && nbytes > blocksize)
1067
if ((nbytes % blocksize) == 0)
1068
restbytes = blocksize;
1070
restbytes = nbytes % blocksize;
1072
memcpy (c->lastiv, c->u_iv.iv, blocksize ); /* Save Cn-2. */
1073
memcpy (c->u_iv.iv, inbuf + blocksize, restbytes ); /* Save Cn. */
1075
c->cipher->decrypt ( &c->context.c, outbuf, inbuf );
1076
for (ivp=c->u_iv.iv,i=0; i < restbytes; i++ )
1077
outbuf[i] ^= *ivp++;
1079
memcpy(outbuf + blocksize, outbuf, restbytes);
1080
for(i=restbytes; i < blocksize; i++)
1081
c->u_iv.iv[i] = outbuf[i];
1082
c->cipher->decrypt (&c->context.c, outbuf, c->u_iv.iv);
1083
for(ivp=c->lastiv,i=0; i < blocksize; i++ )
1084
outbuf[i] ^= *ivp++;
1085
/* c->lastiv is now really lastlastiv, does this matter? */
1091
do_cfb_encrypt( gcry_cipher_hd_t c, unsigned char *outbuf,
1092
const unsigned char *inbuf, unsigned int nbytes )
1095
size_t blocksize = c->cipher->blocksize;
1096
size_t blocksize_x_2 = blocksize + blocksize;
1098
if ( nbytes <= c->unused )
1100
/* Short enough to be encoded by the remaining XOR mask. */
1101
/* XOR the input with the IV and store input into IV. */
1102
for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
1104
nbytes--, c->unused-- )
1105
*outbuf++ = (*ivp++ ^= *inbuf++);
1111
/* XOR the input with the IV and store input into IV */
1112
nbytes -= c->unused;
1113
for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1114
*outbuf++ = (*ivp++ ^= *inbuf++);
1117
/* Now we can process complete blocks. We use a loop as long as we
1118
have at least 2 blocks and use conditions for the rest. This
1119
also allows to use a bulk encryption function if available. */
1120
if (nbytes >= blocksize_x_2 && c->bulk.cfb_enc)
1122
unsigned int nblocks = nbytes / blocksize;
1123
c->bulk.cfb_enc (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1124
outbuf += nblocks * blocksize;
1125
inbuf += nblocks * blocksize;
1126
nbytes -= nblocks * blocksize;
1130
while ( nbytes >= blocksize_x_2 )
1133
/* Encrypt the IV. */
1134
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1135
/* XOR the input with the IV and store input into IV. */
1136
for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1137
*outbuf++ = (*ivp++ ^= *inbuf++);
1138
nbytes -= blocksize;
1142
if ( nbytes >= blocksize )
1145
/* Save the current IV and then encrypt the IV. */
1146
memcpy( c->lastiv, c->u_iv.iv, blocksize );
1147
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1148
/* XOR the input with the IV and store input into IV */
1149
for(ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1150
*outbuf++ = (*ivp++ ^= *inbuf++);
1151
nbytes -= blocksize;
1155
/* Save the current IV and then encrypt the IV. */
1156
memcpy( c->lastiv, c->u_iv.iv, blocksize );
1157
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1158
c->unused = blocksize;
1159
/* Apply the XOR. */
1160
c->unused -= nbytes;
1161
for(ivp=c->u_iv.iv; nbytes; nbytes-- )
1162
*outbuf++ = (*ivp++ ^= *inbuf++);
1168
do_cfb_decrypt( gcry_cipher_hd_t c, unsigned char *outbuf,
1169
const unsigned char *inbuf, unsigned int nbytes )
1174
size_t blocksize = c->cipher->blocksize;
1175
size_t blocksize_x_2 = blocksize + blocksize;
1177
if (nbytes <= c->unused)
1179
/* Short enough to be encoded by the remaining XOR mask. */
1180
/* XOR the input with the IV and store input into IV. */
1181
for (ivp=c->u_iv.iv+blocksize - c->unused;
1183
nbytes--, c->unused--)
1186
*outbuf++ = *ivp ^ temp;
1194
/* XOR the input with the IV and store input into IV. */
1195
nbytes -= c->unused;
1196
for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1199
*outbuf++ = *ivp ^ temp;
1204
/* Now we can process complete blocks. We use a loop as long as we
1205
have at least 2 blocks and use conditions for the rest. This
1206
also allows to use a bulk encryption function if available. */
1207
if (nbytes >= blocksize_x_2 && c->bulk.cfb_dec)
1209
unsigned int nblocks = nbytes / blocksize;
1210
c->bulk.cfb_dec (&c->context.c, c->u_iv.iv, outbuf, inbuf, nblocks);
1211
outbuf += nblocks * blocksize;
1212
inbuf += nblocks * blocksize;
1213
nbytes -= nblocks * blocksize;
1217
while (nbytes >= blocksize_x_2 )
1219
/* Encrypt the IV. */
1220
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1221
/* XOR the input with the IV and store input into IV. */
1222
for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1225
*outbuf++ = *ivp ^ temp;
1228
nbytes -= blocksize;
1232
if (nbytes >= blocksize )
1234
/* Save the current IV and then encrypt the IV. */
1235
memcpy ( c->lastiv, c->u_iv.iv, blocksize);
1236
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1237
/* XOR the input with the IV and store input into IV */
1238
for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1241
*outbuf++ = *ivp ^ temp;
1244
nbytes -= blocksize;
1249
/* Save the current IV and then encrypt the IV. */
1250
memcpy ( c->lastiv, c->u_iv.iv, blocksize );
1251
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1252
c->unused = blocksize;
1253
/* Apply the XOR. */
1254
c->unused -= nbytes;
1255
for (ivp=c->u_iv.iv; nbytes; nbytes-- )
1258
*outbuf++ = *ivp ^ temp;
1266
do_ofb_encrypt( gcry_cipher_hd_t c,
1267
byte *outbuf, const byte *inbuf, unsigned nbytes )
1270
size_t blocksize = c->cipher->blocksize;
1272
if ( nbytes <= c->unused )
1274
/* Short enough to be encoded by the remaining XOR mask. */
1275
/* XOR the input with the IV */
1276
for (ivp=c->u_iv.iv+c->cipher->blocksize - c->unused;
1278
nbytes--, c->unused-- )
1279
*outbuf++ = (*ivp++ ^ *inbuf++);
1285
nbytes -= c->unused;
1286
for(ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1287
*outbuf++ = (*ivp++ ^ *inbuf++);
1290
/* Now we can process complete blocks. */
1291
while ( nbytes >= blocksize )
1294
/* Encrypt the IV (and save the current one). */
1295
memcpy( c->lastiv, c->u_iv.iv, blocksize );
1296
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1298
for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1299
*outbuf++ = (*ivp++ ^ *inbuf++);
1300
nbytes -= blocksize;
1303
{ /* process the remaining bytes */
1304
memcpy( c->lastiv, c->u_iv.iv, blocksize );
1305
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1306
c->unused = blocksize;
1307
c->unused -= nbytes;
1308
for(ivp=c->u_iv.iv; nbytes; nbytes-- )
1309
*outbuf++ = (*ivp++ ^ *inbuf++);
1314
do_ofb_decrypt( gcry_cipher_hd_t c,
1315
byte *outbuf, const byte *inbuf, unsigned int nbytes )
1318
size_t blocksize = c->cipher->blocksize;
1320
if( nbytes <= c->unused )
1322
/* Short enough to be encoded by the remaining XOR mask. */
1323
for (ivp=c->u_iv.iv+blocksize - c->unused; nbytes; nbytes--,c->unused--)
1324
*outbuf++ = *ivp++ ^ *inbuf++;
1330
nbytes -= c->unused;
1331
for (ivp=c->u_iv.iv+blocksize - c->unused; c->unused; c->unused-- )
1332
*outbuf++ = *ivp++ ^ *inbuf++;
1335
/* Now we can process complete blocks. */
1336
while ( nbytes >= blocksize )
1339
/* Encrypt the IV (and save the current one). */
1340
memcpy( c->lastiv, c->u_iv.iv, blocksize );
1341
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1342
for (ivp=c->u_iv.iv,i=0; i < blocksize; i++ )
1343
*outbuf++ = *ivp++ ^ *inbuf++;
1344
nbytes -= blocksize;
1347
{ /* Process the remaining bytes. */
1348
/* Encrypt the IV (and save the current one). */
1349
memcpy( c->lastiv, c->u_iv.iv, blocksize );
1350
c->cipher->encrypt ( &c->context.c, c->u_iv.iv, c->u_iv.iv );
1351
c->unused = blocksize;
1352
c->unused -= nbytes;
1353
for (ivp=c->u_iv.iv; nbytes; nbytes-- )
1354
*outbuf++ = *ivp++ ^ *inbuf++;
1360
do_ctr_encrypt( gcry_cipher_hd_t c, byte *outbuf, const byte *inbuf,
1361
unsigned int nbytes )
1364
byte tmp[MAX_BLOCKSIZE];
1367
for(n=0; n < nbytes; n++)
1369
if ((n % c->cipher->blocksize) == 0)
1371
c->cipher->encrypt (&c->context.c, tmp, c->ctr);
1373
for (i = c->cipher->blocksize; i > 0; i--)
1376
if (c->ctr[i-1] != 0)
1381
/* XOR input with encrypted counter and store in output. */
1382
outbuf[n] = inbuf[n] ^ tmp[n % c->cipher->blocksize];
1387
do_ctr_decrypt( gcry_cipher_hd_t c, byte *outbuf, const byte *inbuf,
1388
unsigned int nbytes )
1390
do_ctr_encrypt (c, outbuf, inbuf, nbytes);
1395
* Encrypt INBUF to OUTBUF with the mode selected at open.
1396
* inbuf and outbuf may overlap or be the same.
1397
* Depending on the mode some contraints apply to NBYTES.
1399
static gcry_err_code_t
1400
cipher_encrypt (gcry_cipher_hd_t c, byte *outbuf,
1401
const byte *inbuf, unsigned int nbytes)
1403
gcry_err_code_t rc = GPG_ERR_NO_ERROR;
1406
case GCRY_CIPHER_MODE_ECB:
1407
if (!(nbytes%c->cipher->blocksize))
1408
do_ecb_encrypt(c, outbuf, inbuf, nbytes/c->cipher->blocksize );
1410
rc = GPG_ERR_INV_ARG;
1412
case GCRY_CIPHER_MODE_CBC:
1413
if (!(nbytes%c->cipher->blocksize)
1414
|| (nbytes > c->cipher->blocksize
1415
&& (c->flags & GCRY_CIPHER_CBC_CTS)))
1416
do_cbc_encrypt(c, outbuf, inbuf, nbytes );
1418
rc = GPG_ERR_INV_ARG;
1420
case GCRY_CIPHER_MODE_CFB:
1421
do_cfb_encrypt(c, outbuf, inbuf, nbytes );
1423
case GCRY_CIPHER_MODE_OFB:
1424
do_ofb_encrypt(c, outbuf, inbuf, nbytes );
1426
case GCRY_CIPHER_MODE_CTR:
1427
do_ctr_encrypt(c, outbuf, inbuf, nbytes );
1429
case GCRY_CIPHER_MODE_STREAM:
1430
c->cipher->stencrypt ( &c->context.c,
1431
outbuf, (byte*)/*arggg*/inbuf, nbytes );
1433
case GCRY_CIPHER_MODE_NONE:
1434
if (fips_mode () || !_gcry_get_debug_flag (0))
1436
fips_signal_error ("cipher mode NONE used");
1437
rc = GPG_ERR_INV_CIPHER_MODE;
1441
if ( inbuf != outbuf )
1442
memmove (outbuf, inbuf, nbytes);
1446
log_fatal("cipher_encrypt: invalid mode %d\n", c->mode );
1447
rc = GPG_ERR_INV_CIPHER_MODE;
1455
* Encrypt IN and write it to OUT. If IN is NULL, in-place encryption has
1459
gcry_cipher_encrypt (gcry_cipher_hd_t h, void *out, size_t outsize,
1460
const void *in, size_t inlen)
1462
gcry_err_code_t err;
1466
/* Caller requested in-place encryption. */
1467
/* Actually cipher_encrypt() does not need to know about it, but
1468
* we may change it in the future to get better performance. */
1469
err = cipher_encrypt (h, out, out, outsize);
1471
else if (outsize < ((h->flags & GCRY_CIPHER_CBC_MAC) ?
1472
h->cipher->blocksize : inlen))
1473
err = GPG_ERR_TOO_SHORT;
1474
else if ((h->mode == GCRY_CIPHER_MODE_ECB
1475
|| (h->mode == GCRY_CIPHER_MODE_CBC
1476
&& (! ((h->flags & GCRY_CIPHER_CBC_CTS)
1477
&& (inlen > h->cipher->blocksize)))))
1478
&& (inlen % h->cipher->blocksize))
1479
err = GPG_ERR_INV_ARG;
1481
err = cipher_encrypt (h, out, in, inlen);
1484
memset (out, 0x42, outsize); /* Failsafe: Make sure that the
1485
plaintext will never make it into
1488
return gcry_error (err);
1494
* Decrypt INBUF to OUTBUF with the mode selected at open.
1495
* inbuf and outbuf may overlap or be the same.
1496
* Depending on the mode some some contraints apply to NBYTES.
1498
static gcry_err_code_t
1499
cipher_decrypt (gcry_cipher_hd_t c, byte *outbuf, const byte *inbuf,
1500
unsigned int nbytes)
1502
gcry_err_code_t rc = GPG_ERR_NO_ERROR;
1505
case GCRY_CIPHER_MODE_ECB:
1506
if (!(nbytes%c->cipher->blocksize))
1507
do_ecb_decrypt(c, outbuf, inbuf, nbytes/c->cipher->blocksize );
1509
rc = GPG_ERR_INV_ARG;
1511
case GCRY_CIPHER_MODE_CBC:
1512
if (!(nbytes%c->cipher->blocksize)
1513
|| (nbytes > c->cipher->blocksize
1514
&& (c->flags & GCRY_CIPHER_CBC_CTS)))
1515
do_cbc_decrypt(c, outbuf, inbuf, nbytes );
1517
rc = GPG_ERR_INV_ARG;
1519
case GCRY_CIPHER_MODE_CFB:
1520
do_cfb_decrypt(c, outbuf, inbuf, nbytes );
1522
case GCRY_CIPHER_MODE_OFB:
1523
do_ofb_decrypt(c, outbuf, inbuf, nbytes );
1525
case GCRY_CIPHER_MODE_CTR:
1526
do_ctr_decrypt(c, outbuf, inbuf, nbytes );
1528
case GCRY_CIPHER_MODE_STREAM:
1529
c->cipher->stdecrypt ( &c->context.c,
1530
outbuf, (byte*)/*arggg*/inbuf, nbytes );
1532
case GCRY_CIPHER_MODE_NONE:
1533
if (fips_mode () || !_gcry_get_debug_flag (0))
1535
fips_signal_error ("cipher mode NONE used");
1536
rc = GPG_ERR_INV_CIPHER_MODE;
1540
if (inbuf != outbuf)
1541
memmove (outbuf, inbuf, nbytes);
1545
log_fatal ("cipher_decrypt: invalid mode %d\n", c->mode );
1546
rc = GPG_ERR_INV_CIPHER_MODE;
1554
gcry_cipher_decrypt (gcry_cipher_hd_t h, void *out, size_t outsize,
1555
const void *in, size_t inlen)
1557
gcry_err_code_t err = 0;
1561
/* Caller requested in-place encryption. */
1562
/* Actually cipher_encrypt() does not need to know about it, but
1563
* we may change it in the future to get better performance. */
1564
err = cipher_decrypt (h, out, out, outsize);
1566
else if (outsize < inlen)
1567
err = GPG_ERR_TOO_SHORT;
1568
else if (((h->mode == GCRY_CIPHER_MODE_ECB)
1569
|| ((h->mode == GCRY_CIPHER_MODE_CBC)
1570
&& (! ((h->flags & GCRY_CIPHER_CBC_CTS)
1571
&& (inlen > h->cipher->blocksize)))))
1572
&& (inlen % h->cipher->blocksize) != 0)
1573
err = GPG_ERR_INV_ARG;
1575
err = cipher_decrypt (h, out, in, inlen);
1577
return gcry_error (err);
1583
* Used for PGP's somewhat strange CFB mode. Only works if
1584
* the corresponding flag is set.
1587
cipher_sync (gcry_cipher_hd_t c)
1589
if ((c->flags & GCRY_CIPHER_ENABLE_SYNC) && c->unused)
1591
memmove (c->u_iv.iv + c->unused,
1592
c->u_iv.iv, c->cipher->blocksize - c->unused);
1594
c->lastiv + c->cipher->blocksize - c->unused, c->unused);
1601
_gcry_cipher_setkey (gcry_cipher_hd_t hd, const void *key, size_t keylen)
1603
return cipher_setkey (hd, (void*)key, keylen);
1608
_gcry_cipher_setiv (gcry_cipher_hd_t hd, const void *iv, size_t ivlen)
1610
cipher_setiv (hd, iv, ivlen);
1614
/* Set counter for CTR mode. (CTR,CTRLEN) must denote a buffer of
1615
block size length, or (NULL,0) to set the CTR to the all-zero
1618
_gcry_cipher_setctr (gcry_cipher_hd_t hd, const void *ctr, size_t ctrlen)
1620
if (ctr && ctrlen == hd->cipher->blocksize)
1621
memcpy (hd->ctr, ctr, hd->cipher->blocksize);
1622
else if (!ctr || !ctrlen)
1623
memset (hd->ctr, 0, hd->cipher->blocksize);
1625
return gpg_error (GPG_ERR_INV_ARG);
1631
gcry_cipher_ctl( gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen)
1633
gcry_err_code_t rc = GPG_ERR_NO_ERROR;
1637
case GCRYCTL_SET_KEY: /* Deprecated; use gcry_cipher_setkey. */
1638
rc = cipher_setkey( h, buffer, buflen );
1641
case GCRYCTL_SET_IV: /* Deprecated; use gcry_cipher_setiv. */
1642
cipher_setiv( h, buffer, buflen );
1649
case GCRYCTL_CFB_SYNC:
1653
case GCRYCTL_SET_CBC_CTS:
1655
if (h->flags & GCRY_CIPHER_CBC_MAC)
1656
rc = GPG_ERR_INV_FLAG;
1658
h->flags |= GCRY_CIPHER_CBC_CTS;
1660
h->flags &= ~GCRY_CIPHER_CBC_CTS;
1663
case GCRYCTL_SET_CBC_MAC:
1665
if (h->flags & GCRY_CIPHER_CBC_CTS)
1666
rc = GPG_ERR_INV_FLAG;
1668
h->flags |= GCRY_CIPHER_CBC_MAC;
1670
h->flags &= ~GCRY_CIPHER_CBC_MAC;
1673
case GCRYCTL_DISABLE_ALGO:
1674
/* This command expects NULL for H and BUFFER to point to an
1675
integer with the algo number. */
1676
if( h || !buffer || buflen != sizeof(int) )
1677
return gcry_error (GPG_ERR_CIPHER_ALGO);
1678
disable_cipher_algo( *(int*)buffer );
1681
case GCRYCTL_SET_CTR: /* Deprecated; use gcry_cipher_setctr. */
1682
if (buffer && buflen == h->cipher->blocksize)
1683
memcpy (h->ctr, buffer, h->cipher->blocksize);
1684
else if (buffer == NULL || buflen == 0)
1685
memset (h->ctr, 0, h->cipher->blocksize);
1687
rc = GPG_ERR_INV_ARG;
1690
case 61: /* Disable weak key detection (private). */
1691
if (h->extraspec->set_extra_info)
1692
rc = h->extraspec->set_extra_info
1693
(&h->context.c, CIPHER_INFO_NO_WEAK_KEY, NULL, 0);
1695
rc = GPG_ERR_NOT_SUPPORTED;
1698
case 62: /* Return current input vector (private). */
1699
/* This is the input block as used in CFB and OFB mode which has
1700
initially been set as IV. The returned format is:
1701
1 byte Actual length of the block in bytes.
1703
If the provided buffer is too short, an error is returned. */
1704
if (buflen < (1 + h->cipher->blocksize))
1705
rc = GPG_ERR_TOO_SHORT;
1709
unsigned char *dst = buffer;
1713
n = h->cipher->blocksize;
1714
gcry_assert (n <= h->cipher->blocksize);
1716
ivp = h->u_iv.iv + h->cipher->blocksize - n;
1723
rc = GPG_ERR_INV_OP;
1726
return gcry_error (rc);
1730
/* Return information about the cipher handle H. CMD is the kind of
1731
information requested. BUFFER and NBYTES are reserved for now.
1733
There are no values for CMD yet defined.
1735
The fucntion always returns GPG_ERR_INV_OP.
1739
gcry_cipher_info (gcry_cipher_hd_t h, int cmd, void *buffer, size_t *nbytes)
1741
gcry_err_code_t err = GPG_ERR_NO_ERROR;
1750
err = GPG_ERR_INV_OP;
1753
return gcry_error (err);
1756
/* Return information about the given cipher algorithm ALGO.
1758
WHAT select the kind of information returned:
1761
Return the length of the key. If the algorithm ALGO
1762
supports multiple key lengths, the maximum supported key length
1763
is returned. The key length is returned as number of octets.
1764
BUFFER and NBYTES must be zero.
1767
Return the blocklength of the algorithm ALGO counted in octets.
1768
BUFFER and NBYTES must be zero.
1771
Returns 0 if the specified algorithm ALGO is available for use.
1772
BUFFER and NBYTES must be zero.
1774
Note: Because this function is in most cases used to return an
1775
integer value, we can make it easier for the caller to just look at
1776
the return value. The caller will in all cases consult the value
1777
and thereby detecting whether a error occured or not (i.e. while
1778
checking the block size)
1781
gcry_cipher_algo_info (int algo, int what, void *buffer, size_t *nbytes)
1783
gcry_err_code_t err = GPG_ERR_NO_ERROR;
1788
case GCRYCTL_GET_KEYLEN:
1789
if (buffer || (! nbytes))
1790
err = GPG_ERR_CIPHER_ALGO;
1793
ui = cipher_get_keylen (algo);
1794
if ((ui > 0) && (ui <= 512))
1795
*nbytes = (size_t) ui / 8;
1797
/* The only reason is an invalid algo or a strange
1799
err = GPG_ERR_CIPHER_ALGO;
1803
case GCRYCTL_GET_BLKLEN:
1804
if (buffer || (! nbytes))
1805
err = GPG_ERR_CIPHER_ALGO;
1808
ui = cipher_get_blocksize (algo);
1809
if ((ui > 0) && (ui < 10000))
1812
/* The only reason is an invalid algo or a strange
1814
err = GPG_ERR_CIPHER_ALGO;
1818
case GCRYCTL_TEST_ALGO:
1819
if (buffer || nbytes)
1820
err = GPG_ERR_INV_ARG;
1822
err = check_cipher_algo (algo);
1826
err = GPG_ERR_INV_OP;
1829
return gcry_error (err);
1833
/* This function returns length of the key for algorithm ALGO. If the
1834
algorithm supports multiple key lengths, the maximum supported key
1835
length is returned. On error 0 is returned. The key length is
1836
returned as number of octets.
1838
This is a convenience functions which should be preferred over
1839
gcry_cipher_algo_info because it allows for proper type
1842
gcry_cipher_get_algo_keylen (int algo)
1846
if (gcry_cipher_algo_info (algo, GCRYCTL_GET_KEYLEN, NULL, &n))
1851
/* This functions returns the blocklength of the algorithm ALGO
1852
counted in octets. On error 0 is returned.
1854
This is a convenience functions which should be preferred over
1855
gcry_cipher_algo_info because it allows for proper type
1858
gcry_cipher_get_algo_blklen (int algo)
1862
if (gcry_cipher_algo_info( algo, GCRYCTL_GET_BLKLEN, NULL, &n))
1867
/* Explicitly initialize this module. */
1869
_gcry_cipher_init (void)
1871
gcry_err_code_t err = GPG_ERR_NO_ERROR;
1873
REGISTER_DEFAULT_CIPHERS;
1878
/* Get a list consisting of the IDs of the loaded cipher modules. If
1879
LIST is zero, write the number of loaded cipher modules to
1880
LIST_LENGTH and return. If LIST is non-zero, the first
1881
*LIST_LENGTH algorithm IDs are stored in LIST, which must be of
1882
according size. In case there are less cipher modules than
1883
*LIST_LENGTH, *LIST_LENGTH is updated to the correct number. */
1885
gcry_cipher_list (int *list, int *list_length)
1887
gcry_err_code_t err = GPG_ERR_NO_ERROR;
1889
ath_mutex_lock (&ciphers_registered_lock);
1890
err = _gcry_module_list (ciphers_registered, list, list_length);
1891
ath_mutex_unlock (&ciphers_registered_lock);
1897
/* Run the selftests for cipher algorithm ALGO with optional reporting
1900
_gcry_cipher_selftest (int algo, int extended, selftest_report_func_t report)
1902
gcry_module_t module = NULL;
1903
cipher_extra_spec_t *extraspec = NULL;
1904
gcry_err_code_t ec = 0;
1906
REGISTER_DEFAULT_CIPHERS;
1908
ath_mutex_lock (&ciphers_registered_lock);
1909
module = _gcry_module_lookup_id (ciphers_registered, algo);
1910
if (module && !(module->flags & FLAG_MODULE_DISABLED))
1911
extraspec = module->extraspec;
1912
ath_mutex_unlock (&ciphers_registered_lock);
1913
if (extraspec && extraspec->selftest)
1914
ec = extraspec->selftest (algo, extended, report);
1917
ec = GPG_ERR_CIPHER_ALGO;
1919
report ("cipher", algo, "module",
1920
module && !(module->flags & FLAG_MODULE_DISABLED)?
1921
"no selftest available" :
1922
module? "algorithm disabled" : "algorithm not found");
1927
ath_mutex_lock (&ciphers_registered_lock);
1928
_gcry_module_release (module);
1929
ath_mutex_unlock (&ciphers_registered_lock);
1931
return gpg_error (ec);