~ubuntu-branches/ubuntu/lucid/openssl/lucid-security

« back to all changes in this revision

Viewing changes to crypto/engine/eng_padlock.c

  • Committer: Bazaar Package Importer
  • Author(s): Kurt Roeckx
  • Date: 2006-05-15 16:00:58 UTC
  • mto: (11.1.1 lenny)
  • mto: This revision was merged to the branch mainline in revision 6.
  • Revision ID: james.westby@ubuntu.com-20060515160058-pg6lnbkkpkwpdj2e
Tags: upstream-0.9.8b
ImportĀ upstreamĀ versionĀ 0.9.8b

Show diffs side-by-side

added added

removed removed

Lines of Context:
257
257
        union { unsigned int pad[4];
258
258
                struct {
259
259
                        int rounds:4;
260
 
                        int algo:3;
261
 
                        int keygen:1;
 
260
                        int dgst:1;     /* n/a in C3 */
 
261
                        int align:1;    /* n/a in C3 */
 
262
                        int ciphr:1;    /* n/a in C3 */
 
263
                        unsigned int keygen:1;
262
264
                        int interm:1;
263
 
                        int encdec:1;
 
265
                        unsigned int encdec:1;
264
266
                        int ksize:2;
265
267
                } b;
266
268
        } cword;                /* Control word */
650
652
 
651
653
        NID_aes_192_ecb,
652
654
        NID_aes_192_cbc,
653
 
#if 0
654
 
        NID_aes_192_cfb,        /* FIXME: AES192/256 CFB/OFB don't work. */
 
655
        NID_aes_192_cfb,
655
656
        NID_aes_192_ofb,
656
 
#endif
657
657
 
658
658
        NID_aes_256_ecb,
659
659
        NID_aes_256_cbc,
660
 
#if 0
661
660
        NID_aes_256_cfb,
662
661
        NID_aes_256_ofb,
663
 
#endif
664
662
};
665
663
static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
666
664
                                      sizeof(padlock_cipher_nids[0]));
676
674
#define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
677
675
        NEAREST_ALIGNED(ctx->cipher_data))
678
676
 
 
677
#define EVP_CIPHER_block_size_ECB       AES_BLOCK_SIZE
 
678
#define EVP_CIPHER_block_size_CBC       AES_BLOCK_SIZE
 
679
#define EVP_CIPHER_block_size_OFB       1
 
680
#define EVP_CIPHER_block_size_CFB       1
 
681
 
679
682
/* Declaring so many ciphers by hand would be a pain.
680
683
   Instead introduce a bit of preprocessor magic :-) */
681
684
#define DECLARE_AES_EVP(ksize,lmode,umode)      \
682
685
static const EVP_CIPHER padlock_aes_##ksize##_##lmode = {       \
683
686
        NID_aes_##ksize##_##lmode,              \
684
 
        AES_BLOCK_SIZE,                 \
 
687
        EVP_CIPHER_block_size_##umode,  \
685
688
        AES_KEY_SIZE_##ksize,           \
686
689
        AES_BLOCK_SIZE,                 \
687
690
        0 | EVP_CIPH_##umode##_MODE,    \
783
786
        memset(cdata, 0, sizeof(struct padlock_cipher_data));
784
787
 
785
788
        /* Prepare Control word. */
786
 
        cdata->cword.b.encdec = (ctx->encrypt == 0);
 
789
        if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE)
 
790
                cdata->cword.b.encdec = 0;
 
791
        else
 
792
                cdata->cword.b.encdec = (ctx->encrypt == 0);
787
793
        cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
788
794
        cdata->cword.b.ksize = (key_len - 128) / 64;
789
795
 
803
809
                           and is listed as hardware errata. They most
804
810
                           likely will fix it at some point and then
805
811
                           a check for stepping would be due here. */
806
 
                        if (enc)
 
812
                        if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE ||
 
813
                            EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE ||
 
814
                            enc)
807
815
                                AES_set_encrypt_key(key, key_len, &cdata->ks);
808
816
                        else
809
817
                                AES_set_decrypt_key(key, key_len, &cdata->ks);
878
886
}
879
887
 
880
888
#ifndef  PADLOCK_CHUNK
881
 
# define PADLOCK_CHUNK  4096    /* Must be a power of 2 larger than 16 */
 
889
# define PADLOCK_CHUNK  512     /* Must be a power of 2 larger than 16 */
882
890
#endif
883
891
#if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1)
884
892
# error "insane PADLOCK_CHUNK..."
897
905
        int    inp_misaligned, out_misaligned, realign_in_loop;
898
906
        size_t chunk, allocated=0;
899
907
 
 
908
        /* ctx->num is maintained in byte-oriented modes,
 
909
           such as CFB and OFB... */
 
910
        if ((chunk = ctx->num)) { /* borrow chunk variable */
 
911
                unsigned char *ivp=ctx->iv;
 
912
 
 
913
                switch (EVP_CIPHER_CTX_mode(ctx)) {
 
914
                case EVP_CIPH_CFB_MODE:
 
915
                        if (chunk >= AES_BLOCK_SIZE)
 
916
                                return 0; /* bogus value */
 
917
 
 
918
                        if (ctx->encrypt)
 
919
                                while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
 
920
                                        ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
 
921
                                        chunk++, nbytes--;
 
922
                                }
 
923
                        else    while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
 
924
                                        unsigned char c = *(in_arg++);
 
925
                                        *(out_arg++) = c ^ ivp[chunk];
 
926
                                        ivp[chunk++] = c, nbytes--;
 
927
                                }
 
928
 
 
929
                        ctx->num = chunk%AES_BLOCK_SIZE;
 
930
                        break;
 
931
                case EVP_CIPH_OFB_MODE:
 
932
                        if (chunk >= AES_BLOCK_SIZE)
 
933
                                return 0; /* bogus value */
 
934
 
 
935
                        while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
 
936
                                *(out_arg++) = *(in_arg++) ^ ivp[chunk];
 
937
                                chunk++, nbytes--;
 
938
                        }
 
939
 
 
940
                        ctx->num = chunk%AES_BLOCK_SIZE;
 
941
                        break;
 
942
                }
 
943
        }
 
944
 
900
945
        if (nbytes == 0)
901
946
                return 1;
 
947
#if 0
902
948
        if (nbytes % AES_BLOCK_SIZE)
903
949
                return 0; /* are we expected to do tail processing? */
 
950
#else
 
951
        /* nbytes is always multiple of AES_BLOCK_SIZE in ECB and CBC
 
952
           modes and arbitrary value in byte-oriented modes, such as
 
953
           CFB and OFB... */
 
954
#endif
904
955
 
905
956
        /* VIA promises CPUs that won't require alignment in the future.
906
957
           For now padlock_aes_align_required is initialized to 1 and
907
958
           the condition is never met... */
908
 
        if (!padlock_aes_align_required)
 
959
        /* C7 core is capable to manage unaligned input in non-ECB[!]
 
960
           mode, but performance penalties appear to be approximately
 
961
           same as for software alignment below or ~3x. They promise to
 
962
           improve it in the future, but for now we can just as well
 
963
           pretend that it can only handle aligned input... */
 
964
        if (!padlock_aes_align_required && (nbytes%AES_BLOCK_SIZE)==0)
909
965
                return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
910
966
 
911
967
        inp_misaligned = (((size_t)in_arg) & 0x0F);
917
973
         * in order to improve L1 cache utilization... */
918
974
        realign_in_loop = out_misaligned|inp_misaligned;
919
975
 
920
 
        if (!realign_in_loop)
 
976
        if (!realign_in_loop && (nbytes%AES_BLOCK_SIZE)==0)
921
977
                return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
922
978
 
923
979
        /* this takes one "if" out of the loops */
984
1040
                break;
985
1041
 
986
1042
        case EVP_CIPH_CFB_MODE:
987
 
                memcpy (cdata->iv, ctx->iv, AES_BLOCK_SIZE);
988
 
                goto cfb_shortcut;
 
1043
                memcpy (iv = cdata->iv, ctx->iv, AES_BLOCK_SIZE);
 
1044
                chunk &= ~(AES_BLOCK_SIZE-1);
 
1045
                if (chunk)      goto cfb_shortcut;
 
1046
                else            goto cfb_skiploop;
989
1047
                do      {
990
1048
                        if (iv != cdata->iv)
991
1049
                                memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
1004
1062
                        else
1005
1063
                                out     = out_arg+=chunk;
1006
1064
 
1007
 
                } while (nbytes -= chunk);
 
1065
                        nbytes -= chunk;
 
1066
                } while (nbytes >= AES_BLOCK_SIZE);
 
1067
 
 
1068
                cfb_skiploop:
 
1069
                if (nbytes) {
 
1070
                        unsigned char *ivp = cdata->iv;
 
1071
 
 
1072
                        if (iv != ivp) {
 
1073
                                memcpy(ivp, iv, AES_BLOCK_SIZE);
 
1074
                                iv = ivp;
 
1075
                        }
 
1076
                        ctx->num = nbytes;
 
1077
                        if (cdata->cword.b.encdec) {
 
1078
                                cdata->cword.b.encdec=0;
 
1079
                                padlock_reload_key();
 
1080
                                padlock_xcrypt_ecb(1,cdata,ivp,ivp);
 
1081
                                cdata->cword.b.encdec=1;
 
1082
                                padlock_reload_key();
 
1083
                                while(nbytes) {
 
1084
                                        unsigned char c = *(in_arg++);
 
1085
                                        *(out_arg++) = c ^ *ivp;
 
1086
                                        *(ivp++) = c, nbytes--;
 
1087
                                }
 
1088
                        }
 
1089
                        else {  padlock_reload_key();
 
1090
                                padlock_xcrypt_ecb(1,cdata,ivp,ivp);
 
1091
                                padlock_reload_key();
 
1092
                                while (nbytes) {
 
1093
                                        *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
 
1094
                                        ivp++, nbytes--;
 
1095
                                }
 
1096
                        }
 
1097
                }
 
1098
 
1008
1099
                memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
1009
1100
                break;
1010
1101
 
1011
1102
        case EVP_CIPH_OFB_MODE:
1012
1103
                memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
1013
 
                do      {
 
1104
                chunk &= ~(AES_BLOCK_SIZE-1);
 
1105
                if (chunk) do   {
1014
1106
                        if (inp_misaligned)
1015
1107
                                inp = padlock_memcpy(out, in_arg, chunk);
1016
1108
                        else
1026
1118
 
1027
1119
                        nbytes -= chunk;
1028
1120
                        chunk   = PADLOCK_CHUNK;
1029
 
                } while (nbytes);
 
1121
                } while (nbytes >= AES_BLOCK_SIZE);
 
1122
 
 
1123
                if (nbytes) {
 
1124
                        unsigned char *ivp = cdata->iv;
 
1125
 
 
1126
                        ctx->num = nbytes;
 
1127
                        padlock_reload_key();   /* empirically found */
 
1128
                        padlock_xcrypt_ecb(1,cdata,ivp,ivp);
 
1129
                        padlock_reload_key();   /* empirically found */
 
1130
                        while (nbytes) {
 
1131
                                *(out_arg++) = *(in_arg++) ^ *ivp;
 
1132
                                ivp++, nbytes--;
 
1133
                        }
 
1134
                }
 
1135
 
1030
1136
                memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
1031
1137
                break;
1032
1138