54
55
static uint32_t dec_multbl[4][256];
57
static inline void addkey(av_aes_block *dst, const av_aes_block *src, const av_aes_block *round_key){
59
# define ROT(x, s) ((x >> s) | (x << (32-s)))
61
# define ROT(x, s) ((x << s) | (x >> (32-s)))
64
static inline void addkey(av_aes_block *dst, const av_aes_block *src,
65
const av_aes_block *round_key)
58
67
dst->u64[0] = src->u64[0] ^ round_key->u64[0];
59
68
dst->u64[1] = src->u64[1] ^ round_key->u64[1];
62
static void subshift(av_aes_block s0[2], int s, const uint8_t *box){
63
av_aes_block *s1= (av_aes_block *)(s0[0].u8 - s);
64
av_aes_block *s3= (av_aes_block *)(s0[0].u8 + s);
65
s0[0].u8[0]=box[s0[1].u8[ 0]]; s0[0].u8[ 4]=box[s0[1].u8[ 4]]; s0[0].u8[ 8]=box[s0[1].u8[ 8]]; s0[0].u8[12]=box[s0[1].u8[12]];
66
s1[0].u8[3]=box[s1[1].u8[ 7]]; s1[0].u8[ 7]=box[s1[1].u8[11]]; s1[0].u8[11]=box[s1[1].u8[15]]; s1[0].u8[15]=box[s1[1].u8[ 3]];
67
s0[0].u8[2]=box[s0[1].u8[10]]; s0[0].u8[10]=box[s0[1].u8[ 2]]; s0[0].u8[ 6]=box[s0[1].u8[14]]; s0[0].u8[14]=box[s0[1].u8[ 6]];
68
s3[0].u8[1]=box[s3[1].u8[13]]; s3[0].u8[13]=box[s3[1].u8[ 9]]; s3[0].u8[ 9]=box[s3[1].u8[ 5]]; s3[0].u8[ 5]=box[s3[1].u8[ 1]];
71
static inline void addkey_s(av_aes_block *dst, const uint8_t *src,
72
const av_aes_block *round_key)
74
dst->u64[0] = AV_RN64(src) ^ round_key->u64[0];
75
dst->u64[1] = AV_RN64(src + 8) ^ round_key->u64[1];
78
static inline void addkey_d(uint8_t *dst, const av_aes_block *src,
79
const av_aes_block *round_key)
81
AV_WN64(dst, src->u64[0] ^ round_key->u64[0]);
82
AV_WN64(dst + 8, src->u64[1] ^ round_key->u64[1]);
85
static void subshift(av_aes_block s0[2], int s, const uint8_t *box)
87
av_aes_block *s1 = (av_aes_block *) (s0[0].u8 - s);
88
av_aes_block *s3 = (av_aes_block *) (s0[0].u8 + s);
90
s0[0].u8[ 0] = box[s0[1].u8[ 0]];
91
s0[0].u8[ 4] = box[s0[1].u8[ 4]];
92
s0[0].u8[ 8] = box[s0[1].u8[ 8]];
93
s0[0].u8[12] = box[s0[1].u8[12]];
94
s1[0].u8[ 3] = box[s1[1].u8[ 7]];
95
s1[0].u8[ 7] = box[s1[1].u8[11]];
96
s1[0].u8[11] = box[s1[1].u8[15]];
97
s1[0].u8[15] = box[s1[1].u8[ 3]];
98
s0[0].u8[ 2] = box[s0[1].u8[10]];
99
s0[0].u8[10] = box[s0[1].u8[ 2]];
100
s0[0].u8[ 6] = box[s0[1].u8[14]];
101
s0[0].u8[14] = box[s0[1].u8[ 6]];
102
s3[0].u8[ 1] = box[s3[1].u8[13]];
103
s3[0].u8[13] = box[s3[1].u8[ 9]];
104
s3[0].u8[ 9] = box[s3[1].u8[ 5]];
105
s3[0].u8[ 5] = box[s3[1].u8[ 1]];
71
108
static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d){
73
#define ROT(x,s) ((x<<s)|(x>>(32-s)))
74
110
return multbl[0][a] ^ ROT(multbl[0][b], 8) ^ ROT(multbl[0][c], 16) ^ ROT(multbl[0][d], 24);
76
112
return multbl[0][a] ^ multbl[1][b] ^ multbl[2][c] ^ multbl[3][d];
85
121
state[0].u32[3] = mix_core(multbl, src[3][0], src[s1-1][1], src[1][2], src[s3-1][3]);
88
static inline void crypt(AVAES *a, int s, const uint8_t *sbox, uint32_t multbl[][256]){
124
static inline void crypt(AVAES *a, int s, const uint8_t *sbox,
125
uint32_t multbl[][256])
91
for(r=a->rounds-1; r>0; r--){
92
mix(a->state, multbl, 3-s, 1+s);
129
for (r = a->rounds - 1; r > 0; r--) {
130
mix(a->state, multbl, 3 - s, 1 + s);
93
131
addkey(&a->state[1], &a->state[0], &a->round_key[r]);
95
134
subshift(&a->state[0], s, sbox);
98
void av_aes_crypt(AVAES *a, uint8_t *dst_, const uint8_t *src_, int count, uint8_t *iv_, int decrypt){
99
av_aes_block *dst = (av_aes_block *)dst_;
100
const av_aes_block *src = (const av_aes_block *)src_;
101
av_aes_block *iv = (av_aes_block *)iv_;
103
addkey(&a->state[1], src, &a->round_key[a->rounds]);
137
void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src,
138
int count, uint8_t *iv, int decrypt)
141
addkey_s(&a->state[1], src, &a->round_key[a->rounds]);
105
143
crypt(a, 0, inv_sbox, dec_multbl);
107
addkey(&a->state[0], &a->state[0], iv);
145
addkey_s(&a->state[0], iv, &a->state[0]);
108
146
memcpy(iv, src, 16);
110
addkey(dst, &a->state[0], &a->round_key[0]);
112
if(iv) addkey(&a->state[1], &a->state[1], iv);
113
crypt(a, 2, sbox, enc_multbl);
114
addkey(dst, &a->state[0], &a->round_key[0]);
115
if(iv) memcpy(iv, dst, 16);
148
addkey_d(dst, &a->state[0], &a->round_key[0]);
151
addkey_s(&a->state[1], iv, &a->state[1]);
152
crypt(a, 2, sbox, enc_multbl);
153
addkey_d(dst, &a->state[0], &a->round_key[0]);
122
static void init_multbl2(uint8_t tbl[1024], const int c[4], const uint8_t *log8, const uint8_t *alog8, const uint8_t *sbox){
124
for(i=0; i<1024; i++){
126
if(x) tbl[i]= alog8[ log8[x] + log8[c[i&3]] ];
162
static void init_multbl2(uint32_t tbl[][256], const int c[4],
163
const uint8_t *log8, const uint8_t *alog8,
168
for (i = 0; i < 256; i++) {
173
k = alog8[x + log8[c[0]]];
174
l = alog8[x + log8[c[1]]];
175
m = alog8[x + log8[c[2]]];
176
n = alog8[x + log8[c[3]]];
177
tbl[0][i] = AV_NE(MKBETAG(k,l,m,n), MKTAG(k,l,m,n));
128
178
#if !CONFIG_SMALL
129
for(j=256; j<1024; j++)
131
tbl[4*j+i]= tbl[4*j + ((i-1)&3) - 1024];
179
tbl[1][i] = ROT(tbl[0][i], 8);
180
tbl[2][i] = ROT(tbl[0][i], 16);
181
tbl[3][i] = ROT(tbl[0][i], 24);
135
187
// this is based on the reference AES code by Paulo Barreto and Vincent Rijmen
136
int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt) {
188
int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt)
137
190
int i, j, t, rconpointer = 0;
138
191
uint8_t tk[8][4];
192
int KC = key_bits >> 5;
142
195
uint8_t alog8[512];
144
if(!enc_multbl[FF_ARRAY_ELEMS(enc_multbl)-1][FF_ARRAY_ELEMS(enc_multbl[0])-1]){
146
for(i=0; i<255; i++){
153
for(i=0; i<256; i++){
154
j= i ? alog8[255-log8[i]] : 0;
155
j ^= (j<<1) ^ (j<<2) ^ (j<<3) ^ (j<<4);
156
j = (j ^ (j>>8) ^ 99) & 255;
160
init_multbl2(dec_multbl[0], (const int[4]){0xe, 0x9, 0xd, 0xb}, log8, alog8, inv_sbox);
161
init_multbl2(enc_multbl[0], (const int[4]){0x2, 0x1, 0x1, 0x3}, log8, alog8, sbox);
197
if (!enc_multbl[FF_ARRAY_ELEMS(enc_multbl)-1][FF_ARRAY_ELEMS(enc_multbl[0])-1]) {
199
for (i = 0; i < 255; i++) {
200
alog8[i] = alog8[i + 255] = j;
206
for (i = 0; i < 256; i++) {
207
j = i ? alog8[255 - log8[i]] : 0;
208
j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4);
209
j = (j ^ (j >> 8) ^ 99) & 255;
213
init_multbl2(dec_multbl, (const int[4]) { 0xe, 0x9, 0xd, 0xb },
214
log8, alog8, inv_sbox);
215
init_multbl2(enc_multbl, (const int[4]) { 0x2, 0x1, 0x1, 0x3 },
164
if(key_bits!=128 && key_bits!=192 && key_bits!=256)
219
if (key_bits != 128 && key_bits != 192 && key_bits != 256)
169
memcpy(tk, key, KC*4);
171
for(t= 0; t < (rounds+1)*16;) {
172
memcpy(a->round_key[0].u8+t, tk, KC*4);
175
for(i = 0; i < 4; i++)
176
tk[0][i] ^= sbox[tk[KC-1][(i+1)&3]];
224
memcpy(tk, key, KC * 4);
225
memcpy(a->round_key[0].u8, key, KC * 4);
227
for (t = KC * 4; t < (rounds + 1) * 16; t += KC * 4) {
228
for (i = 0; i < 4; i++)
229
tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]];
177
230
tk[0][0] ^= rcon[rconpointer++];
179
for(j = 1; j < KC; j++){
180
if(KC != 8 || j != KC>>1)
181
for(i = 0; i < 4; i++) tk[j][i] ^= tk[j-1][i];
232
for (j = 1; j < KC; j++) {
233
if (KC != 8 || j != KC >> 1)
234
for (i = 0; i < 4; i++)
235
tk[j][i] ^= tk[j - 1][i];
183
for(i = 0; i < 4; i++) tk[j][i] ^= sbox[tk[j-1][i]];
237
for (i = 0; i < 4; i++)
238
tk[j][i] ^= sbox[tk[j - 1][i]];
241
memcpy(a->round_key[0].u8 + t, tk, KC * 4);
188
for(i=1; i<rounds; i++){
245
for (i = 1; i < rounds; i++) {
189
246
av_aes_block tmp[3];
190
memcpy(&tmp[2], &a->round_key[i], 16);
247
tmp[2] = a->round_key[i];
191
248
subshift(&tmp[1], 0, sbox);
192
249
mix(tmp, dec_multbl, 1, 3);
193
memcpy(&a->round_key[i], &tmp[0], 16);
250
a->round_key[i] = tmp[0];
196
for(i=0; i<(rounds+1)>>1; i++){
198
FFSWAP(int, a->round_key[i].u8[j], a->round_key[rounds-i].u8[j]);
253
for (i = 0; i < (rounds + 1) >> 1; i++) {
254
FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds-i]);
212
uint8_t rkey[2][16]= {
214
{0x10, 0xa5, 0x88, 0x69, 0xd7, 0x4b, 0xe5, 0xa3, 0x74, 0xcf, 0x86, 0x7c, 0xfb, 0x47, 0x38, 0x59}};
266
int main(int argc, char **argv)
270
uint8_t rkey[2][16] = {
272
{ 0x10, 0xa5, 0x88, 0x69, 0xd7, 0x4b, 0xe5, 0xa3,
273
0x74, 0xcf, 0x86, 0x7c, 0xfb, 0x47, 0x38, 0x59 }
215
275
uint8_t pt[16], rpt[2][16]= {
216
{0x6a, 0x84, 0x86, 0x7c, 0xd7, 0x7e, 0x12, 0xad, 0x07, 0xea, 0x1b, 0xe8, 0x95, 0xc5, 0x3f, 0xa3},
276
{ 0x6a, 0x84, 0x86, 0x7c, 0xd7, 0x7e, 0x12, 0xad,
277
0x07, 0xea, 0x1b, 0xe8, 0x95, 0xc5, 0x3f, 0xa3 },
218
280
uint8_t rct[2][16]= {
219
{0x73, 0x22, 0x81, 0xc0, 0xa0, 0xaa, 0xb8, 0xf7, 0xa5, 0x4a, 0x0c, 0x67, 0xa0, 0xc4, 0x5e, 0xcf},
220
{0x6d, 0x25, 0x1e, 0x69, 0x44, 0xb0, 0x51, 0xe0, 0x4e, 0xaa, 0x6f, 0xb4, 0xdb, 0xf7, 0x84, 0x65}};
281
{ 0x73, 0x22, 0x81, 0xc0, 0xa0, 0xaa, 0xb8, 0xf7,
282
0xa5, 0x4a, 0x0c, 0x67, 0xa0, 0xc4, 0x5e, 0xcf },
283
{ 0x6d, 0x25, 0x1e, 0x69, 0x44, 0xb0, 0x51, 0xe0,
284
0x4e, 0xaa, 0x6f, 0xb4, 0xdb, 0xf7, 0x84, 0x65 }
221
286
uint8_t temp[16];
224
av_aes_init(&ae, "PI=3.141592654..", 128, 0);
225
av_aes_init(&ad, "PI=3.141592654..", 128, 1);
226
289
av_log_set_level(AV_LOG_DEBUG);
227
av_lfg_init(&prng, 1);
291
for (i = 0; i < 2; i++) {
230
292
av_aes_init(&b, rkey[i], 128, 1);
231
293
av_aes_crypt(&b, temp, rct[i], 1, NULL, 1);
233
if(rpt[i][j] != temp[j])
234
av_log(NULL, AV_LOG_ERROR, "%d %02X %02X\n", j, rpt[i][j], temp[j]);
237
for(i=0; i<10000; i++){
239
pt[j] = av_lfg_get(&prng);
242
av_aes_crypt(&ae, temp, pt, 1, NULL, 0);
244
av_log(NULL, AV_LOG_ERROR, "%02X %02X %02X %02X\n", temp[0], temp[5], temp[10], temp[15]);
245
av_aes_crypt(&ad, temp, temp, 1, NULL, 1);
248
if(pt[j] != temp[j]){
249
av_log(NULL, AV_LOG_ERROR, "%d %d %02X %02X\n", i,j, pt[j], temp[j]);
294
for (j = 0; j < 16; j++) {
295
if (rpt[i][j] != temp[j]) {
296
av_log(NULL, AV_LOG_ERROR, "%d %02X %02X\n",
297
j, rpt[i][j], temp[j]);
303
if (argc > 1 && !strcmp(argv[1], "-t")) {
307
av_aes_init(&ae, "PI=3.141592654..", 128, 0);
308
av_aes_init(&ad, "PI=3.141592654..", 128, 1);
309
av_lfg_init(&prng, 1);
311
for (i = 0; i < 10000; i++) {
312
for (j = 0; j < 16; j++) {
313
pt[j] = av_lfg_get(&prng);
317
av_aes_crypt(&ae, temp, pt, 1, NULL, 0);
319
av_log(NULL, AV_LOG_ERROR, "%02X %02X %02X %02X\n",
320
temp[0], temp[5], temp[10], temp[15]);
321
av_aes_crypt(&ad, temp, temp, 1, NULL, 1);
324
for (j = 0; j < 16; j++) {
325
if (pt[j] != temp[j]) {
326
av_log(NULL, AV_LOG_ERROR, "%d %d %02X %02X\n",
327
i, j, pt[j], temp[j]);