]> git.zerfleddert.de Git - proxmark3-svn/blame_incremental - client/aes.c
add: iceman1001's idea to reuse @pwpiwi's fast select without anticollision to speedu...
[proxmark3-svn] / client / aes.c
... / ...
CommitLineData
1/*
2 * FIPS-197 compliant AES implementation
3 *
4 * Copyright (C) 2006-2014, Brainspark B.V.
5 *
6 * This file is part of PolarSSL (http://www.polarssl.org)
7 * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
8 *
9 * All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 */
25/*
26 * The AES block cipher was designed by Vincent Rijmen and Joan Daemen.
27 *
28 * http://csrc.nist.gov/encryption/aes/rijndael/Rijndael.pdf
29 * http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
30 */
31
32#if !defined(POLARSSL_CONFIG_FILE)
33#include "polarssl_config.h"
34#else
35#include POLARSSL_CONFIG_FILE
36#endif
37
38#if defined(POLARSSL_AES_C)
39
40#include "aes.h"
41#if defined(POLARSSL_PADLOCK_C)
42#include "polarssl/padlock.h"
43#endif
44#if defined(POLARSSL_AESNI_C)
45#include "polarssl/aesni.h"
46#endif
47
48#if defined(POLARSSL_PLATFORM_C)
49#include "polarssl/platform.h"
50#else
51#define polarssl_printf printf
52#endif
53
54#if !defined(POLARSSL_AES_ALT)
55
56/* Implementation that should never be optimized out by the compiler */
57static void polarssl_zeroize( void *v, size_t n ) {
58 volatile unsigned char *p = v; while( n-- ) *p++ = 0;
59}
60
61/*
62 * 32-bit integer manipulation macros (little endian)
63 */
64#ifndef GET_UINT32_LE
65#define GET_UINT32_LE(n,b,i) \
66{ \
67 (n) = ( (uint32_t) (b)[(i) ] ) \
68 | ( (uint32_t) (b)[(i) + 1] << 8 ) \
69 | ( (uint32_t) (b)[(i) + 2] << 16 ) \
70 | ( (uint32_t) (b)[(i) + 3] << 24 ); \
71}
72#endif
73
74#ifndef PUT_UINT32_LE
75#define PUT_UINT32_LE(n,b,i) \
76{ \
77 (b)[(i) ] = (unsigned char) ( (n) ); \
78 (b)[(i) + 1] = (unsigned char) ( (n) >> 8 ); \
79 (b)[(i) + 2] = (unsigned char) ( (n) >> 16 ); \
80 (b)[(i) + 3] = (unsigned char) ( (n) >> 24 ); \
81}
82#endif
83
84#if defined(POLARSSL_PADLOCK_C) && \
85 ( defined(POLARSSL_HAVE_X86) || defined(PADLOCK_ALIGN16) )
86static int aes_padlock_ace = -1;
87#endif
88
89#if defined(POLARSSL_AES_ROM_TABLES)
90/*
91 * Forward S-box
92 */
93static const unsigned char FSb[256] =
94{
95 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5,
96 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
97 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0,
98 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
99 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC,
100 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
101 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A,
102 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
103 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0,
104 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
105 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B,
106 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
107 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85,
108 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
109 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5,
110 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
111 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17,
112 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
113 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88,
114 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
115 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C,
116 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
117 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9,
118 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
119 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6,
120 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
121 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E,
122 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
123 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94,
124 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
125 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68,
126 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
127};
128
129/*
130 * Forward tables
131 */
132#define FT \
133\
134 V(A5,63,63,C6), V(84,7C,7C,F8), V(99,77,77,EE), V(8D,7B,7B,F6), \
135 V(0D,F2,F2,FF), V(BD,6B,6B,D6), V(B1,6F,6F,DE), V(54,C5,C5,91), \
136 V(50,30,30,60), V(03,01,01,02), V(A9,67,67,CE), V(7D,2B,2B,56), \
137 V(19,FE,FE,E7), V(62,D7,D7,B5), V(E6,AB,AB,4D), V(9A,76,76,EC), \
138 V(45,CA,CA,8F), V(9D,82,82,1F), V(40,C9,C9,89), V(87,7D,7D,FA), \
139 V(15,FA,FA,EF), V(EB,59,59,B2), V(C9,47,47,8E), V(0B,F0,F0,FB), \
140 V(EC,AD,AD,41), V(67,D4,D4,B3), V(FD,A2,A2,5F), V(EA,AF,AF,45), \
141 V(BF,9C,9C,23), V(F7,A4,A4,53), V(96,72,72,E4), V(5B,C0,C0,9B), \
142 V(C2,B7,B7,75), V(1C,FD,FD,E1), V(AE,93,93,3D), V(6A,26,26,4C), \
143 V(5A,36,36,6C), V(41,3F,3F,7E), V(02,F7,F7,F5), V(4F,CC,CC,83), \
144 V(5C,34,34,68), V(F4,A5,A5,51), V(34,E5,E5,D1), V(08,F1,F1,F9), \
145 V(93,71,71,E2), V(73,D8,D8,AB), V(53,31,31,62), V(3F,15,15,2A), \
146 V(0C,04,04,08), V(52,C7,C7,95), V(65,23,23,46), V(5E,C3,C3,9D), \
147 V(28,18,18,30), V(A1,96,96,37), V(0F,05,05,0A), V(B5,9A,9A,2F), \
148 V(09,07,07,0E), V(36,12,12,24), V(9B,80,80,1B), V(3D,E2,E2,DF), \
149 V(26,EB,EB,CD), V(69,27,27,4E), V(CD,B2,B2,7F), V(9F,75,75,EA), \
150 V(1B,09,09,12), V(9E,83,83,1D), V(74,2C,2C,58), V(2E,1A,1A,34), \
151 V(2D,1B,1B,36), V(B2,6E,6E,DC), V(EE,5A,5A,B4), V(FB,A0,A0,5B), \
152 V(F6,52,52,A4), V(4D,3B,3B,76), V(61,D6,D6,B7), V(CE,B3,B3,7D), \
153 V(7B,29,29,52), V(3E,E3,E3,DD), V(71,2F,2F,5E), V(97,84,84,13), \
154 V(F5,53,53,A6), V(68,D1,D1,B9), V(00,00,00,00), V(2C,ED,ED,C1), \
155 V(60,20,20,40), V(1F,FC,FC,E3), V(C8,B1,B1,79), V(ED,5B,5B,B6), \
156 V(BE,6A,6A,D4), V(46,CB,CB,8D), V(D9,BE,BE,67), V(4B,39,39,72), \
157 V(DE,4A,4A,94), V(D4,4C,4C,98), V(E8,58,58,B0), V(4A,CF,CF,85), \
158 V(6B,D0,D0,BB), V(2A,EF,EF,C5), V(E5,AA,AA,4F), V(16,FB,FB,ED), \
159 V(C5,43,43,86), V(D7,4D,4D,9A), V(55,33,33,66), V(94,85,85,11), \
160 V(CF,45,45,8A), V(10,F9,F9,E9), V(06,02,02,04), V(81,7F,7F,FE), \
161 V(F0,50,50,A0), V(44,3C,3C,78), V(BA,9F,9F,25), V(E3,A8,A8,4B), \
162 V(F3,51,51,A2), V(FE,A3,A3,5D), V(C0,40,40,80), V(8A,8F,8F,05), \
163 V(AD,92,92,3F), V(BC,9D,9D,21), V(48,38,38,70), V(04,F5,F5,F1), \
164 V(DF,BC,BC,63), V(C1,B6,B6,77), V(75,DA,DA,AF), V(63,21,21,42), \
165 V(30,10,10,20), V(1A,FF,FF,E5), V(0E,F3,F3,FD), V(6D,D2,D2,BF), \
166 V(4C,CD,CD,81), V(14,0C,0C,18), V(35,13,13,26), V(2F,EC,EC,C3), \
167 V(E1,5F,5F,BE), V(A2,97,97,35), V(CC,44,44,88), V(39,17,17,2E), \
168 V(57,C4,C4,93), V(F2,A7,A7,55), V(82,7E,7E,FC), V(47,3D,3D,7A), \
169 V(AC,64,64,C8), V(E7,5D,5D,BA), V(2B,19,19,32), V(95,73,73,E6), \
170 V(A0,60,60,C0), V(98,81,81,19), V(D1,4F,4F,9E), V(7F,DC,DC,A3), \
171 V(66,22,22,44), V(7E,2A,2A,54), V(AB,90,90,3B), V(83,88,88,0B), \
172 V(CA,46,46,8C), V(29,EE,EE,C7), V(D3,B8,B8,6B), V(3C,14,14,28), \
173 V(79,DE,DE,A7), V(E2,5E,5E,BC), V(1D,0B,0B,16), V(76,DB,DB,AD), \
174 V(3B,E0,E0,DB), V(56,32,32,64), V(4E,3A,3A,74), V(1E,0A,0A,14), \
175 V(DB,49,49,92), V(0A,06,06,0C), V(6C,24,24,48), V(E4,5C,5C,B8), \
176 V(5D,C2,C2,9F), V(6E,D3,D3,BD), V(EF,AC,AC,43), V(A6,62,62,C4), \
177 V(A8,91,91,39), V(A4,95,95,31), V(37,E4,E4,D3), V(8B,79,79,F2), \
178 V(32,E7,E7,D5), V(43,C8,C8,8B), V(59,37,37,6E), V(B7,6D,6D,DA), \
179 V(8C,8D,8D,01), V(64,D5,D5,B1), V(D2,4E,4E,9C), V(E0,A9,A9,49), \
180 V(B4,6C,6C,D8), V(FA,56,56,AC), V(07,F4,F4,F3), V(25,EA,EA,CF), \
181 V(AF,65,65,CA), V(8E,7A,7A,F4), V(E9,AE,AE,47), V(18,08,08,10), \
182 V(D5,BA,BA,6F), V(88,78,78,F0), V(6F,25,25,4A), V(72,2E,2E,5C), \
183 V(24,1C,1C,38), V(F1,A6,A6,57), V(C7,B4,B4,73), V(51,C6,C6,97), \
184 V(23,E8,E8,CB), V(7C,DD,DD,A1), V(9C,74,74,E8), V(21,1F,1F,3E), \
185 V(DD,4B,4B,96), V(DC,BD,BD,61), V(86,8B,8B,0D), V(85,8A,8A,0F), \
186 V(90,70,70,E0), V(42,3E,3E,7C), V(C4,B5,B5,71), V(AA,66,66,CC), \
187 V(D8,48,48,90), V(05,03,03,06), V(01,F6,F6,F7), V(12,0E,0E,1C), \
188 V(A3,61,61,C2), V(5F,35,35,6A), V(F9,57,57,AE), V(D0,B9,B9,69), \
189 V(91,86,86,17), V(58,C1,C1,99), V(27,1D,1D,3A), V(B9,9E,9E,27), \
190 V(38,E1,E1,D9), V(13,F8,F8,EB), V(B3,98,98,2B), V(33,11,11,22), \
191 V(BB,69,69,D2), V(70,D9,D9,A9), V(89,8E,8E,07), V(A7,94,94,33), \
192 V(B6,9B,9B,2D), V(22,1E,1E,3C), V(92,87,87,15), V(20,E9,E9,C9), \
193 V(49,CE,CE,87), V(FF,55,55,AA), V(78,28,28,50), V(7A,DF,DF,A5), \
194 V(8F,8C,8C,03), V(F8,A1,A1,59), V(80,89,89,09), V(17,0D,0D,1A), \
195 V(DA,BF,BF,65), V(31,E6,E6,D7), V(C6,42,42,84), V(B8,68,68,D0), \
196 V(C3,41,41,82), V(B0,99,99,29), V(77,2D,2D,5A), V(11,0F,0F,1E), \
197 V(CB,B0,B0,7B), V(FC,54,54,A8), V(D6,BB,BB,6D), V(3A,16,16,2C)
198
199#define V(a,b,c,d) 0x##a##b##c##d
200static const uint32_t FT0[256] = { FT };
201#undef V
202
203#define V(a,b,c,d) 0x##b##c##d##a
204static const uint32_t FT1[256] = { FT };
205#undef V
206
207#define V(a,b,c,d) 0x##c##d##a##b
208static const uint32_t FT2[256] = { FT };
209#undef V
210
211#define V(a,b,c,d) 0x##d##a##b##c
212static const uint32_t FT3[256] = { FT };
213#undef V
214
215#undef FT
216
217/*
218 * Reverse S-box
219 */
220static const unsigned char RSb[256] =
221{
222 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38,
223 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
224 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87,
225 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
226 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D,
227 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
228 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2,
229 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
230 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16,
231 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
232 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA,
233 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
234 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A,
235 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
236 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02,
237 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
238 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA,
239 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
240 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85,
241 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
242 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89,
243 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
244 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20,
245 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
246 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31,
247 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
248 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D,
249 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
250 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0,
251 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
252 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26,
253 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
254};
255
256/*
257 * Reverse tables
258 */
259#define RT \
260\
261 V(50,A7,F4,51), V(53,65,41,7E), V(C3,A4,17,1A), V(96,5E,27,3A), \
262 V(CB,6B,AB,3B), V(F1,45,9D,1F), V(AB,58,FA,AC), V(93,03,E3,4B), \
263 V(55,FA,30,20), V(F6,6D,76,AD), V(91,76,CC,88), V(25,4C,02,F5), \
264 V(FC,D7,E5,4F), V(D7,CB,2A,C5), V(80,44,35,26), V(8F,A3,62,B5), \
265 V(49,5A,B1,DE), V(67,1B,BA,25), V(98,0E,EA,45), V(E1,C0,FE,5D), \
266 V(02,75,2F,C3), V(12,F0,4C,81), V(A3,97,46,8D), V(C6,F9,D3,6B), \
267 V(E7,5F,8F,03), V(95,9C,92,15), V(EB,7A,6D,BF), V(DA,59,52,95), \
268 V(2D,83,BE,D4), V(D3,21,74,58), V(29,69,E0,49), V(44,C8,C9,8E), \
269 V(6A,89,C2,75), V(78,79,8E,F4), V(6B,3E,58,99), V(DD,71,B9,27), \
270 V(B6,4F,E1,BE), V(17,AD,88,F0), V(66,AC,20,C9), V(B4,3A,CE,7D), \
271 V(18,4A,DF,63), V(82,31,1A,E5), V(60,33,51,97), V(45,7F,53,62), \
272 V(E0,77,64,B1), V(84,AE,6B,BB), V(1C,A0,81,FE), V(94,2B,08,F9), \
273 V(58,68,48,70), V(19,FD,45,8F), V(87,6C,DE,94), V(B7,F8,7B,52), \
274 V(23,D3,73,AB), V(E2,02,4B,72), V(57,8F,1F,E3), V(2A,AB,55,66), \
275 V(07,28,EB,B2), V(03,C2,B5,2F), V(9A,7B,C5,86), V(A5,08,37,D3), \
276 V(F2,87,28,30), V(B2,A5,BF,23), V(BA,6A,03,02), V(5C,82,16,ED), \
277 V(2B,1C,CF,8A), V(92,B4,79,A7), V(F0,F2,07,F3), V(A1,E2,69,4E), \
278 V(CD,F4,DA,65), V(D5,BE,05,06), V(1F,62,34,D1), V(8A,FE,A6,C4), \
279 V(9D,53,2E,34), V(A0,55,F3,A2), V(32,E1,8A,05), V(75,EB,F6,A4), \
280 V(39,EC,83,0B), V(AA,EF,60,40), V(06,9F,71,5E), V(51,10,6E,BD), \
281 V(F9,8A,21,3E), V(3D,06,DD,96), V(AE,05,3E,DD), V(46,BD,E6,4D), \
282 V(B5,8D,54,91), V(05,5D,C4,71), V(6F,D4,06,04), V(FF,15,50,60), \
283 V(24,FB,98,19), V(97,E9,BD,D6), V(CC,43,40,89), V(77,9E,D9,67), \
284 V(BD,42,E8,B0), V(88,8B,89,07), V(38,5B,19,E7), V(DB,EE,C8,79), \
285 V(47,0A,7C,A1), V(E9,0F,42,7C), V(C9,1E,84,F8), V(00,00,00,00), \
286 V(83,86,80,09), V(48,ED,2B,32), V(AC,70,11,1E), V(4E,72,5A,6C), \
287 V(FB,FF,0E,FD), V(56,38,85,0F), V(1E,D5,AE,3D), V(27,39,2D,36), \
288 V(64,D9,0F,0A), V(21,A6,5C,68), V(D1,54,5B,9B), V(3A,2E,36,24), \
289 V(B1,67,0A,0C), V(0F,E7,57,93), V(D2,96,EE,B4), V(9E,91,9B,1B), \
290 V(4F,C5,C0,80), V(A2,20,DC,61), V(69,4B,77,5A), V(16,1A,12,1C), \
291 V(0A,BA,93,E2), V(E5,2A,A0,C0), V(43,E0,22,3C), V(1D,17,1B,12), \
292 V(0B,0D,09,0E), V(AD,C7,8B,F2), V(B9,A8,B6,2D), V(C8,A9,1E,14), \
293 V(85,19,F1,57), V(4C,07,75,AF), V(BB,DD,99,EE), V(FD,60,7F,A3), \
294 V(9F,26,01,F7), V(BC,F5,72,5C), V(C5,3B,66,44), V(34,7E,FB,5B), \
295 V(76,29,43,8B), V(DC,C6,23,CB), V(68,FC,ED,B6), V(63,F1,E4,B8), \
296 V(CA,DC,31,D7), V(10,85,63,42), V(40,22,97,13), V(20,11,C6,84), \
297 V(7D,24,4A,85), V(F8,3D,BB,D2), V(11,32,F9,AE), V(6D,A1,29,C7), \
298 V(4B,2F,9E,1D), V(F3,30,B2,DC), V(EC,52,86,0D), V(D0,E3,C1,77), \
299 V(6C,16,B3,2B), V(99,B9,70,A9), V(FA,48,94,11), V(22,64,E9,47), \
300 V(C4,8C,FC,A8), V(1A,3F,F0,A0), V(D8,2C,7D,56), V(EF,90,33,22), \
301 V(C7,4E,49,87), V(C1,D1,38,D9), V(FE,A2,CA,8C), V(36,0B,D4,98), \
302 V(CF,81,F5,A6), V(28,DE,7A,A5), V(26,8E,B7,DA), V(A4,BF,AD,3F), \
303 V(E4,9D,3A,2C), V(0D,92,78,50), V(9B,CC,5F,6A), V(62,46,7E,54), \
304 V(C2,13,8D,F6), V(E8,B8,D8,90), V(5E,F7,39,2E), V(F5,AF,C3,82), \
305 V(BE,80,5D,9F), V(7C,93,D0,69), V(A9,2D,D5,6F), V(B3,12,25,CF), \
306 V(3B,99,AC,C8), V(A7,7D,18,10), V(6E,63,9C,E8), V(7B,BB,3B,DB), \
307 V(09,78,26,CD), V(F4,18,59,6E), V(01,B7,9A,EC), V(A8,9A,4F,83), \
308 V(65,6E,95,E6), V(7E,E6,FF,AA), V(08,CF,BC,21), V(E6,E8,15,EF), \
309 V(D9,9B,E7,BA), V(CE,36,6F,4A), V(D4,09,9F,EA), V(D6,7C,B0,29), \
310 V(AF,B2,A4,31), V(31,23,3F,2A), V(30,94,A5,C6), V(C0,66,A2,35), \
311 V(37,BC,4E,74), V(A6,CA,82,FC), V(B0,D0,90,E0), V(15,D8,A7,33), \
312 V(4A,98,04,F1), V(F7,DA,EC,41), V(0E,50,CD,7F), V(2F,F6,91,17), \
313 V(8D,D6,4D,76), V(4D,B0,EF,43), V(54,4D,AA,CC), V(DF,04,96,E4), \
314 V(E3,B5,D1,9E), V(1B,88,6A,4C), V(B8,1F,2C,C1), V(7F,51,65,46), \
315 V(04,EA,5E,9D), V(5D,35,8C,01), V(73,74,87,FA), V(2E,41,0B,FB), \
316 V(5A,1D,67,B3), V(52,D2,DB,92), V(33,56,10,E9), V(13,47,D6,6D), \
317 V(8C,61,D7,9A), V(7A,0C,A1,37), V(8E,14,F8,59), V(89,3C,13,EB), \
318 V(EE,27,A9,CE), V(35,C9,61,B7), V(ED,E5,1C,E1), V(3C,B1,47,7A), \
319 V(59,DF,D2,9C), V(3F,73,F2,55), V(79,CE,14,18), V(BF,37,C7,73), \
320 V(EA,CD,F7,53), V(5B,AA,FD,5F), V(14,6F,3D,DF), V(86,DB,44,78), \
321 V(81,F3,AF,CA), V(3E,C4,68,B9), V(2C,34,24,38), V(5F,40,A3,C2), \
322 V(72,C3,1D,16), V(0C,25,E2,BC), V(8B,49,3C,28), V(41,95,0D,FF), \
323 V(71,01,A8,39), V(DE,B3,0C,08), V(9C,E4,B4,D8), V(90,C1,56,64), \
324 V(61,84,CB,7B), V(70,B6,32,D5), V(74,5C,6C,48), V(42,57,B8,D0)
325
326#define V(a,b,c,d) 0x##a##b##c##d
327static const uint32_t RT0[256] = { RT };
328#undef V
329
330#define V(a,b,c,d) 0x##b##c##d##a
331static const uint32_t RT1[256] = { RT };
332#undef V
333
334#define V(a,b,c,d) 0x##c##d##a##b
335static const uint32_t RT2[256] = { RT };
336#undef V
337
338#define V(a,b,c,d) 0x##d##a##b##c
339static const uint32_t RT3[256] = { RT };
340#undef V
341
342#undef RT
343
344/*
345 * Round constants
346 */
347static const uint32_t RCON[10] =
348{
349 0x00000001, 0x00000002, 0x00000004, 0x00000008,
350 0x00000010, 0x00000020, 0x00000040, 0x00000080,
351 0x0000001B, 0x00000036
352};
353
354#else /* POLARSSL_AES_ROM_TABLES */
355
356/*
357 * Forward S-box & tables
358 */
359static unsigned char FSb[256];
360static uint32_t FT0[256];
361static uint32_t FT1[256];
362static uint32_t FT2[256];
363static uint32_t FT3[256];
364
365/*
366 * Reverse S-box & tables
367 */
368static unsigned char RSb[256];
369static uint32_t RT0[256];
370static uint32_t RT1[256];
371static uint32_t RT2[256];
372static uint32_t RT3[256];
373
374/*
375 * Round constants
376 */
377static uint32_t RCON[10];
378
379/*
380 * Tables generation code
381 */
382#define ROTL8(x) ( ( x << 8 ) & 0xFFFFFFFF ) | ( x >> 24 )
383#define XTIME(x) ( ( x << 1 ) ^ ( ( x & 0x80 ) ? 0x1B : 0x00 ) )
384#define MUL(x,y) ( ( x && y ) ? pow[(log[x]+log[y]) % 255] : 0 )
385
386static int aes_init_done = 0;
387
388static void aes_gen_tables( void )
389{
390 int i, x, y, z;
391 int pow[256];
392 int log[256];
393
394 /*
395 * compute pow and log tables over GF(2^8)
396 */
397 for( i = 0, x = 1; i < 256; i++ )
398 {
399 pow[i] = x;
400 log[x] = i;
401 x = ( x ^ XTIME( x ) ) & 0xFF;
402 }
403
404 /*
405 * calculate the round constants
406 */
407 for( i = 0, x = 1; i < 10; i++ )
408 {
409 RCON[i] = (uint32_t) x;
410 x = XTIME( x ) & 0xFF;
411 }
412
413 /*
414 * generate the forward and reverse S-boxes
415 */
416 FSb[0x00] = 0x63;
417 RSb[0x63] = 0x00;
418
419 for( i = 1; i < 256; i++ )
420 {
421 x = pow[255 - log[i]];
422
423 y = x; y = ( ( y << 1 ) | ( y >> 7 ) ) & 0xFF;
424 x ^= y; y = ( ( y << 1 ) | ( y >> 7 ) ) & 0xFF;
425 x ^= y; y = ( ( y << 1 ) | ( y >> 7 ) ) & 0xFF;
426 x ^= y; y = ( ( y << 1 ) | ( y >> 7 ) ) & 0xFF;
427 x ^= y ^ 0x63;
428
429 FSb[i] = (unsigned char) x;
430 RSb[x] = (unsigned char) i;
431 }
432
433 /*
434 * generate the forward and reverse tables
435 */
436 for( i = 0; i < 256; i++ )
437 {
438 x = FSb[i];
439 y = XTIME( x ) & 0xFF;
440 z = ( y ^ x ) & 0xFF;
441
442 FT0[i] = ( (uint32_t) y ) ^
443 ( (uint32_t) x << 8 ) ^
444 ( (uint32_t) x << 16 ) ^
445 ( (uint32_t) z << 24 );
446
447 FT1[i] = ROTL8( FT0[i] );
448 FT2[i] = ROTL8( FT1[i] );
449 FT3[i] = ROTL8( FT2[i] );
450
451 x = RSb[i];
452
453 RT0[i] = ( (uint32_t) MUL( 0x0E, x ) ) ^
454 ( (uint32_t) MUL( 0x09, x ) << 8 ) ^
455 ( (uint32_t) MUL( 0x0D, x ) << 16 ) ^
456 ( (uint32_t) MUL( 0x0B, x ) << 24 );
457
458 RT1[i] = ROTL8( RT0[i] );
459 RT2[i] = ROTL8( RT1[i] );
460 RT3[i] = ROTL8( RT2[i] );
461 }
462}
463
464#endif /* POLARSSL_AES_ROM_TABLES */
465
466void aes_init( aes_context *ctx )
467{
468 memset( ctx, 0, sizeof( aes_context ) );
469}
470
471void aes_free( aes_context *ctx )
472{
473 if( ctx == NULL )
474 return;
475
476 polarssl_zeroize( ctx, sizeof( aes_context ) );
477}
478
479/*
480 * AES key schedule (encryption)
481 */
482int aes_setkey_enc( aes_context *ctx, const unsigned char *key,
483 unsigned int keysize )
484{
485 unsigned int i;
486 uint32_t *RK;
487
488#if !defined(POLARSSL_AES_ROM_TABLES)
489 if( aes_init_done == 0 )
490 {
491 aes_gen_tables();
492 aes_init_done = 1;
493
494 }
495#endif
496
497 switch( keysize )
498 {
499 case 128: ctx->nr = 10; break;
500 case 192: ctx->nr = 12; break;
501 case 256: ctx->nr = 14; break;
502 default : return( POLARSSL_ERR_AES_INVALID_KEY_LENGTH );
503 }
504
505#if defined(POLARSSL_PADLOCK_C) && defined(PADLOCK_ALIGN16)
506 if( aes_padlock_ace == -1 )
507 aes_padlock_ace = padlock_supports( PADLOCK_ACE );
508
509 if( aes_padlock_ace )
510 ctx->rk = RK = PADLOCK_ALIGN16( ctx->buf );
511 else
512#endif
513 ctx->rk = RK = ctx->buf;
514
515#if defined(POLARSSL_AESNI_C) && defined(POLARSSL_HAVE_X86_64)
516 if( aesni_supports( POLARSSL_AESNI_AES ) )
517 return( aesni_setkey_enc( (unsigned char *) ctx->rk, key, keysize ) );
518#endif
519
520 for( i = 0; i < ( keysize >> 5 ); i++ )
521 {
522 GET_UINT32_LE( RK[i], key, i << 2 );
523 }
524
525 switch( ctx->nr )
526 {
527 case 10:
528
529 for( i = 0; i < 10; i++, RK += 4 )
530 {
531 RK[4] = RK[0] ^ RCON[i] ^
532 ( (uint32_t) FSb[ ( RK[3] >> 8 ) & 0xFF ] ) ^
533 ( (uint32_t) FSb[ ( RK[3] >> 16 ) & 0xFF ] << 8 ) ^
534 ( (uint32_t) FSb[ ( RK[3] >> 24 ) & 0xFF ] << 16 ) ^
535 ( (uint32_t) FSb[ ( RK[3] ) & 0xFF ] << 24 );
536
537 RK[5] = RK[1] ^ RK[4];
538 RK[6] = RK[2] ^ RK[5];
539 RK[7] = RK[3] ^ RK[6];
540 }
541 break;
542
543 case 12:
544
545 for( i = 0; i < 8; i++, RK += 6 )
546 {
547 RK[6] = RK[0] ^ RCON[i] ^
548 ( (uint32_t) FSb[ ( RK[5] >> 8 ) & 0xFF ] ) ^
549 ( (uint32_t) FSb[ ( RK[5] >> 16 ) & 0xFF ] << 8 ) ^
550 ( (uint32_t) FSb[ ( RK[5] >> 24 ) & 0xFF ] << 16 ) ^
551 ( (uint32_t) FSb[ ( RK[5] ) & 0xFF ] << 24 );
552
553 RK[7] = RK[1] ^ RK[6];
554 RK[8] = RK[2] ^ RK[7];
555 RK[9] = RK[3] ^ RK[8];
556 RK[10] = RK[4] ^ RK[9];
557 RK[11] = RK[5] ^ RK[10];
558 }
559 break;
560
561 case 14:
562
563 for( i = 0; i < 7; i++, RK += 8 )
564 {
565 RK[8] = RK[0] ^ RCON[i] ^
566 ( (uint32_t) FSb[ ( RK[7] >> 8 ) & 0xFF ] ) ^
567 ( (uint32_t) FSb[ ( RK[7] >> 16 ) & 0xFF ] << 8 ) ^
568 ( (uint32_t) FSb[ ( RK[7] >> 24 ) & 0xFF ] << 16 ) ^
569 ( (uint32_t) FSb[ ( RK[7] ) & 0xFF ] << 24 );
570
571 RK[9] = RK[1] ^ RK[8];
572 RK[10] = RK[2] ^ RK[9];
573 RK[11] = RK[3] ^ RK[10];
574
575 RK[12] = RK[4] ^
576 ( (uint32_t) FSb[ ( RK[11] ) & 0xFF ] ) ^
577 ( (uint32_t) FSb[ ( RK[11] >> 8 ) & 0xFF ] << 8 ) ^
578 ( (uint32_t) FSb[ ( RK[11] >> 16 ) & 0xFF ] << 16 ) ^
579 ( (uint32_t) FSb[ ( RK[11] >> 24 ) & 0xFF ] << 24 );
580
581 RK[13] = RK[5] ^ RK[12];
582 RK[14] = RK[6] ^ RK[13];
583 RK[15] = RK[7] ^ RK[14];
584 }
585 break;
586 }
587
588 return( 0 );
589}
590
591/*
592 * AES key schedule (decryption)
593 */
594int aes_setkey_dec( aes_context *ctx, const unsigned char *key,
595 unsigned int keysize )
596{
597 int i, j, ret;
598 aes_context cty;
599 uint32_t *RK;
600 uint32_t *SK;
601
602 aes_init( &cty );
603
604#if defined(POLARSSL_PADLOCK_C) && defined(PADLOCK_ALIGN16)
605 if( aes_padlock_ace == -1 )
606 aes_padlock_ace = padlock_supports( PADLOCK_ACE );
607
608 if( aes_padlock_ace )
609 ctx->rk = RK = PADLOCK_ALIGN16( ctx->buf );
610 else
611#endif
612 ctx->rk = RK = ctx->buf;
613
614 /* Also checks keysize */
615 if( ( ret = aes_setkey_enc( &cty, key, keysize ) ) != 0 )
616 goto exit;
617
618 ctx->nr = cty.nr;
619
620#if defined(POLARSSL_AESNI_C) && defined(POLARSSL_HAVE_X86_64)
621 if( aesni_supports( POLARSSL_AESNI_AES ) )
622 {
623 aesni_inverse_key( (unsigned char *) ctx->rk,
624 (const unsigned char *) cty.rk, ctx->nr );
625 goto exit;
626 }
627#endif
628
629 SK = cty.rk + cty.nr * 4;
630
631 *RK++ = *SK++;
632 *RK++ = *SK++;
633 *RK++ = *SK++;
634 *RK++ = *SK++;
635
636 for( i = ctx->nr - 1, SK -= 8; i > 0; i--, SK -= 8 )
637 {
638 for( j = 0; j < 4; j++, SK++ )
639 {
640 *RK++ = RT0[ FSb[ ( *SK ) & 0xFF ] ] ^
641 RT1[ FSb[ ( *SK >> 8 ) & 0xFF ] ] ^
642 RT2[ FSb[ ( *SK >> 16 ) & 0xFF ] ] ^
643 RT3[ FSb[ ( *SK >> 24 ) & 0xFF ] ];
644 }
645 }
646
647 *RK++ = *SK++;
648 *RK++ = *SK++;
649 *RK++ = *SK++;
650 *RK++ = *SK++;
651
652exit:
653 aes_free( &cty );
654
655 return( ret );
656}
657
658#define AES_FROUND(X0,X1,X2,X3,Y0,Y1,Y2,Y3) \
659{ \
660 X0 = *RK++ ^ FT0[ ( Y0 ) & 0xFF ] ^ \
661 FT1[ ( Y1 >> 8 ) & 0xFF ] ^ \
662 FT2[ ( Y2 >> 16 ) & 0xFF ] ^ \
663 FT3[ ( Y3 >> 24 ) & 0xFF ]; \
664 \
665 X1 = *RK++ ^ FT0[ ( Y1 ) & 0xFF ] ^ \
666 FT1[ ( Y2 >> 8 ) & 0xFF ] ^ \
667 FT2[ ( Y3 >> 16 ) & 0xFF ] ^ \
668 FT3[ ( Y0 >> 24 ) & 0xFF ]; \
669 \
670 X2 = *RK++ ^ FT0[ ( Y2 ) & 0xFF ] ^ \
671 FT1[ ( Y3 >> 8 ) & 0xFF ] ^ \
672 FT2[ ( Y0 >> 16 ) & 0xFF ] ^ \
673 FT3[ ( Y1 >> 24 ) & 0xFF ]; \
674 \
675 X3 = *RK++ ^ FT0[ ( Y3 ) & 0xFF ] ^ \
676 FT1[ ( Y0 >> 8 ) & 0xFF ] ^ \
677 FT2[ ( Y1 >> 16 ) & 0xFF ] ^ \
678 FT3[ ( Y2 >> 24 ) & 0xFF ]; \
679}
680
681#define AES_RROUND(X0,X1,X2,X3,Y0,Y1,Y2,Y3) \
682{ \
683 X0 = *RK++ ^ RT0[ ( Y0 ) & 0xFF ] ^ \
684 RT1[ ( Y3 >> 8 ) & 0xFF ] ^ \
685 RT2[ ( Y2 >> 16 ) & 0xFF ] ^ \
686 RT3[ ( Y1 >> 24 ) & 0xFF ]; \
687 \
688 X1 = *RK++ ^ RT0[ ( Y1 ) & 0xFF ] ^ \
689 RT1[ ( Y0 >> 8 ) & 0xFF ] ^ \
690 RT2[ ( Y3 >> 16 ) & 0xFF ] ^ \
691 RT3[ ( Y2 >> 24 ) & 0xFF ]; \
692 \
693 X2 = *RK++ ^ RT0[ ( Y2 ) & 0xFF ] ^ \
694 RT1[ ( Y1 >> 8 ) & 0xFF ] ^ \
695 RT2[ ( Y0 >> 16 ) & 0xFF ] ^ \
696 RT3[ ( Y3 >> 24 ) & 0xFF ]; \
697 \
698 X3 = *RK++ ^ RT0[ ( Y3 ) & 0xFF ] ^ \
699 RT1[ ( Y2 >> 8 ) & 0xFF ] ^ \
700 RT2[ ( Y1 >> 16 ) & 0xFF ] ^ \
701 RT3[ ( Y0 >> 24 ) & 0xFF ]; \
702}
703
704/*
705 * AES-ECB block encryption/decryption
706 */
707int aes_crypt_ecb( aes_context *ctx,
708 int mode,
709 const unsigned char input[16],
710 unsigned char output[16] )
711{
712 int i;
713 uint32_t *RK, X0, X1, X2, X3, Y0, Y1, Y2, Y3;
714
715#if defined(POLARSSL_AESNI_C) && defined(POLARSSL_HAVE_X86_64)
716 if( aesni_supports( POLARSSL_AESNI_AES ) )
717 return( aesni_crypt_ecb( ctx, mode, input, output ) );
718#endif
719
720#if defined(POLARSSL_PADLOCK_C) && defined(POLARSSL_HAVE_X86)
721 if( aes_padlock_ace )
722 {
723 if( padlock_xcryptecb( ctx, mode, input, output ) == 0 )
724 return( 0 );
725
726 // If padlock data misaligned, we just fall back to
727 // unaccelerated mode
728 //
729 }
730#endif
731
732 RK = ctx->rk;
733
734 GET_UINT32_LE( X0, input, 0 ); X0 ^= *RK++;
735 GET_UINT32_LE( X1, input, 4 ); X1 ^= *RK++;
736 GET_UINT32_LE( X2, input, 8 ); X2 ^= *RK++;
737 GET_UINT32_LE( X3, input, 12 ); X3 ^= *RK++;
738
739 if( mode == AES_DECRYPT )
740 {
741 for( i = ( ctx->nr >> 1 ) - 1; i > 0; i-- )
742 {
743 AES_RROUND( Y0, Y1, Y2, Y3, X0, X1, X2, X3 );
744 AES_RROUND( X0, X1, X2, X3, Y0, Y1, Y2, Y3 );
745 }
746
747 AES_RROUND( Y0, Y1, Y2, Y3, X0, X1, X2, X3 );
748
749 X0 = *RK++ ^ \
750 ( (uint32_t) RSb[ ( Y0 ) & 0xFF ] ) ^
751 ( (uint32_t) RSb[ ( Y3 >> 8 ) & 0xFF ] << 8 ) ^
752 ( (uint32_t) RSb[ ( Y2 >> 16 ) & 0xFF ] << 16 ) ^
753 ( (uint32_t) RSb[ ( Y1 >> 24 ) & 0xFF ] << 24 );
754
755 X1 = *RK++ ^ \
756 ( (uint32_t) RSb[ ( Y1 ) & 0xFF ] ) ^
757 ( (uint32_t) RSb[ ( Y0 >> 8 ) & 0xFF ] << 8 ) ^
758 ( (uint32_t) RSb[ ( Y3 >> 16 ) & 0xFF ] << 16 ) ^
759 ( (uint32_t) RSb[ ( Y2 >> 24 ) & 0xFF ] << 24 );
760
761 X2 = *RK++ ^ \
762 ( (uint32_t) RSb[ ( Y2 ) & 0xFF ] ) ^
763 ( (uint32_t) RSb[ ( Y1 >> 8 ) & 0xFF ] << 8 ) ^
764 ( (uint32_t) RSb[ ( Y0 >> 16 ) & 0xFF ] << 16 ) ^
765 ( (uint32_t) RSb[ ( Y3 >> 24 ) & 0xFF ] << 24 );
766
767 X3 = *RK++ ^ \
768 ( (uint32_t) RSb[ ( Y3 ) & 0xFF ] ) ^
769 ( (uint32_t) RSb[ ( Y2 >> 8 ) & 0xFF ] << 8 ) ^
770 ( (uint32_t) RSb[ ( Y1 >> 16 ) & 0xFF ] << 16 ) ^
771 ( (uint32_t) RSb[ ( Y0 >> 24 ) & 0xFF ] << 24 );
772 }
773 else /* AES_ENCRYPT */
774 {
775 for( i = ( ctx->nr >> 1 ) - 1; i > 0; i-- )
776 {
777 AES_FROUND( Y0, Y1, Y2, Y3, X0, X1, X2, X3 );
778 AES_FROUND( X0, X1, X2, X3, Y0, Y1, Y2, Y3 );
779 }
780
781 AES_FROUND( Y0, Y1, Y2, Y3, X0, X1, X2, X3 );
782
783 X0 = *RK++ ^ \
784 ( (uint32_t) FSb[ ( Y0 ) & 0xFF ] ) ^
785 ( (uint32_t) FSb[ ( Y1 >> 8 ) & 0xFF ] << 8 ) ^
786 ( (uint32_t) FSb[ ( Y2 >> 16 ) & 0xFF ] << 16 ) ^
787 ( (uint32_t) FSb[ ( Y3 >> 24 ) & 0xFF ] << 24 );
788
789 X1 = *RK++ ^ \
790 ( (uint32_t) FSb[ ( Y1 ) & 0xFF ] ) ^
791 ( (uint32_t) FSb[ ( Y2 >> 8 ) & 0xFF ] << 8 ) ^
792 ( (uint32_t) FSb[ ( Y3 >> 16 ) & 0xFF ] << 16 ) ^
793 ( (uint32_t) FSb[ ( Y0 >> 24 ) & 0xFF ] << 24 );
794
795 X2 = *RK++ ^ \
796 ( (uint32_t) FSb[ ( Y2 ) & 0xFF ] ) ^
797 ( (uint32_t) FSb[ ( Y3 >> 8 ) & 0xFF ] << 8 ) ^
798 ( (uint32_t) FSb[ ( Y0 >> 16 ) & 0xFF ] << 16 ) ^
799 ( (uint32_t) FSb[ ( Y1 >> 24 ) & 0xFF ] << 24 );
800
801 X3 = *RK++ ^ \
802 ( (uint32_t) FSb[ ( Y3 ) & 0xFF ] ) ^
803 ( (uint32_t) FSb[ ( Y0 >> 8 ) & 0xFF ] << 8 ) ^
804 ( (uint32_t) FSb[ ( Y1 >> 16 ) & 0xFF ] << 16 ) ^
805 ( (uint32_t) FSb[ ( Y2 >> 24 ) & 0xFF ] << 24 );
806 }
807
808 PUT_UINT32_LE( X0, output, 0 );
809 PUT_UINT32_LE( X1, output, 4 );
810 PUT_UINT32_LE( X2, output, 8 );
811 PUT_UINT32_LE( X3, output, 12 );
812
813 return( 0 );
814}
815
816#if defined(POLARSSL_CIPHER_MODE_CBC)
817/*
818 * AES-CBC buffer encryption/decryption
819 */
820int aes_crypt_cbc( aes_context *ctx,
821 int mode,
822 size_t length,
823 unsigned char iv[16],
824 const unsigned char *input,
825 unsigned char *output )
826{
827 int i;
828 unsigned char temp[16];
829
830 if( length % 16 )
831 return( POLARSSL_ERR_AES_INVALID_INPUT_LENGTH );
832
833#if defined(POLARSSL_PADLOCK_C) && defined(POLARSSL_HAVE_X86)
834 if( aes_padlock_ace )
835 {
836 if( padlock_xcryptcbc( ctx, mode, length, iv, input, output ) == 0 )
837 return( 0 );
838
839 // If padlock data misaligned, we just fall back to
840 // unaccelerated mode
841 //
842 }
843#endif
844
845 if( mode == AES_DECRYPT )
846 {
847 while( length > 0 )
848 {
849 memcpy( temp, input, 16 );
850 aes_crypt_ecb( ctx, mode, input, output );
851
852 for( i = 0; i < 16; i++ )
853 output[i] = (unsigned char)( output[i] ^ iv[i] );
854
855 memcpy( iv, temp, 16 );
856
857 input += 16;
858 output += 16;
859 length -= 16;
860 }
861 }
862 else
863 {
864 while( length > 0 )
865 {
866 for( i = 0; i < 16; i++ )
867 output[i] = (unsigned char)( input[i] ^ iv[i] );
868
869 aes_crypt_ecb( ctx, mode, output, output );
870 memcpy( iv, output, 16 );
871
872 input += 16;
873 output += 16;
874 length -= 16;
875 }
876 }
877
878 return( 0 );
879}
880#endif /* POLARSSL_CIPHER_MODE_CBC */
881
882#if defined(POLARSSL_CIPHER_MODE_CFB)
883/*
884 * AES-CFB128 buffer encryption/decryption
885 */
886int aes_crypt_cfb128( aes_context *ctx,
887 int mode,
888 size_t length,
889 size_t *iv_off,
890 unsigned char iv[16],
891 const unsigned char *input,
892 unsigned char *output )
893{
894 int c;
895 size_t n = *iv_off;
896
897 if( mode == AES_DECRYPT )
898 {
899 while( length-- )
900 {
901 if( n == 0 )
902 aes_crypt_ecb( ctx, AES_ENCRYPT, iv, iv );
903
904 c = *input++;
905 *output++ = (unsigned char)( c ^ iv[n] );
906 iv[n] = (unsigned char) c;
907
908 n = ( n + 1 ) & 0x0F;
909 }
910 }
911 else
912 {
913 while( length-- )
914 {
915 if( n == 0 )
916 aes_crypt_ecb( ctx, AES_ENCRYPT, iv, iv );
917
918 iv[n] = *output++ = (unsigned char)( iv[n] ^ *input++ );
919
920 n = ( n + 1 ) & 0x0F;
921 }
922 }
923
924 *iv_off = n;
925
926 return( 0 );
927}
928
929/*
930 * AES-CFB8 buffer encryption/decryption
931 */
932#include <stdio.h>
933int aes_crypt_cfb8( aes_context *ctx,
934 int mode,
935 size_t length,
936 unsigned char iv[16],
937 const unsigned char *input,
938 unsigned char *output )
939{
940 unsigned char c;
941 unsigned char ov[17];
942
943 while( length-- )
944 {
945 memcpy( ov, iv, 16 );
946 aes_crypt_ecb( ctx, AES_ENCRYPT, iv, iv );
947
948 if( mode == AES_DECRYPT )
949 ov[16] = *input;
950
951 c = *output++ = (unsigned char)( iv[0] ^ *input++ );
952
953 if( mode == AES_ENCRYPT )
954 ov[16] = c;
955
956 memcpy( iv, ov + 1, 16 );
957 }
958
959 return( 0 );
960}
961#endif /*POLARSSL_CIPHER_MODE_CFB */
962
963#if defined(POLARSSL_CIPHER_MODE_CTR)
964/*
965 * AES-CTR buffer encryption/decryption
966 */
967int aes_crypt_ctr( aes_context *ctx,
968 size_t length,
969 size_t *nc_off,
970 unsigned char nonce_counter[16],
971 unsigned char stream_block[16],
972 const unsigned char *input,
973 unsigned char *output )
974{
975 int c, i;
976 size_t n = *nc_off;
977
978 while( length-- )
979 {
980 if( n == 0 ) {
981 aes_crypt_ecb( ctx, AES_ENCRYPT, nonce_counter, stream_block );
982
983 for( i = 16; i > 0; i-- )
984 if( ++nonce_counter[i - 1] != 0 )
985 break;
986 }
987 c = *input++;
988 *output++ = (unsigned char)( c ^ stream_block[n] );
989
990 n = ( n + 1 ) & 0x0F;
991 }
992
993 *nc_off = n;
994
995 return( 0 );
996}
997#endif /* POLARSSL_CIPHER_MODE_CTR */
998
999#endif /* !POLARSSL_AES_ALT */
1000
1001#if defined(POLARSSL_SELF_TEST)
1002
1003#include <stdio.h>
1004
1005/*
1006 * AES test vectors from:
1007 *
1008 * http://csrc.nist.gov/archive/aes/rijndael/rijndael-vals.zip
1009 */
1010static const unsigned char aes_test_ecb_dec[3][16] =
1011{
1012 { 0x44, 0x41, 0x6A, 0xC2, 0xD1, 0xF5, 0x3C, 0x58,
1013 0x33, 0x03, 0x91, 0x7E, 0x6B, 0xE9, 0xEB, 0xE0 },
1014 { 0x48, 0xE3, 0x1E, 0x9E, 0x25, 0x67, 0x18, 0xF2,
1015 0x92, 0x29, 0x31, 0x9C, 0x19, 0xF1, 0x5B, 0xA4 },
1016 { 0x05, 0x8C, 0xCF, 0xFD, 0xBB, 0xCB, 0x38, 0x2D,
1017 0x1F, 0x6F, 0x56, 0x58, 0x5D, 0x8A, 0x4A, 0xDE }
1018};
1019
1020static const unsigned char aes_test_ecb_enc[3][16] =
1021{
1022 { 0xC3, 0x4C, 0x05, 0x2C, 0xC0, 0xDA, 0x8D, 0x73,
1023 0x45, 0x1A, 0xFE, 0x5F, 0x03, 0xBE, 0x29, 0x7F },
1024 { 0xF3, 0xF6, 0x75, 0x2A, 0xE8, 0xD7, 0x83, 0x11,
1025 0x38, 0xF0, 0x41, 0x56, 0x06, 0x31, 0xB1, 0x14 },
1026 { 0x8B, 0x79, 0xEE, 0xCC, 0x93, 0xA0, 0xEE, 0x5D,
1027 0xFF, 0x30, 0xB4, 0xEA, 0x21, 0x63, 0x6D, 0xA4 }
1028};
1029
1030#if defined(POLARSSL_CIPHER_MODE_CBC)
1031static const unsigned char aes_test_cbc_dec[3][16] =
1032{
1033 { 0xFA, 0xCA, 0x37, 0xE0, 0xB0, 0xC8, 0x53, 0x73,
1034 0xDF, 0x70, 0x6E, 0x73, 0xF7, 0xC9, 0xAF, 0x86 },
1035 { 0x5D, 0xF6, 0x78, 0xDD, 0x17, 0xBA, 0x4E, 0x75,
1036 0xB6, 0x17, 0x68, 0xC6, 0xAD, 0xEF, 0x7C, 0x7B },
1037 { 0x48, 0x04, 0xE1, 0x81, 0x8F, 0xE6, 0x29, 0x75,
1038 0x19, 0xA3, 0xE8, 0x8C, 0x57, 0x31, 0x04, 0x13 }
1039};
1040
1041static const unsigned char aes_test_cbc_enc[3][16] =
1042{
1043 { 0x8A, 0x05, 0xFC, 0x5E, 0x09, 0x5A, 0xF4, 0x84,
1044 0x8A, 0x08, 0xD3, 0x28, 0xD3, 0x68, 0x8E, 0x3D },
1045 { 0x7B, 0xD9, 0x66, 0xD5, 0x3A, 0xD8, 0xC1, 0xBB,
1046 0x85, 0xD2, 0xAD, 0xFA, 0xE8, 0x7B, 0xB1, 0x04 },
1047 { 0xFE, 0x3C, 0x53, 0x65, 0x3E, 0x2F, 0x45, 0xB5,
1048 0x6F, 0xCD, 0x88, 0xB2, 0xCC, 0x89, 0x8F, 0xF0 }
1049};
1050#endif /* POLARSSL_CIPHER_MODE_CBC */
1051
1052#if defined(POLARSSL_CIPHER_MODE_CFB)
1053/*
1054 * AES-CFB128 test vectors from:
1055 *
1056 * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
1057 */
1058static const unsigned char aes_test_cfb128_key[3][32] =
1059{
1060 { 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
1061 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C },
1062 { 0x8E, 0x73, 0xB0, 0xF7, 0xDA, 0x0E, 0x64, 0x52,
1063 0xC8, 0x10, 0xF3, 0x2B, 0x80, 0x90, 0x79, 0xE5,
1064 0x62, 0xF8, 0xEA, 0xD2, 0x52, 0x2C, 0x6B, 0x7B },
1065 { 0x60, 0x3D, 0xEB, 0x10, 0x15, 0xCA, 0x71, 0xBE,
1066 0x2B, 0x73, 0xAE, 0xF0, 0x85, 0x7D, 0x77, 0x81,
1067 0x1F, 0x35, 0x2C, 0x07, 0x3B, 0x61, 0x08, 0xD7,
1068 0x2D, 0x98, 0x10, 0xA3, 0x09, 0x14, 0xDF, 0xF4 }
1069};
1070
1071static const unsigned char aes_test_cfb128_iv[16] =
1072{
1073 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1074 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
1075};
1076
1077static const unsigned char aes_test_cfb128_pt[64] =
1078{
1079 0x6B, 0xC1, 0xBE, 0xE2, 0x2E, 0x40, 0x9F, 0x96,
1080 0xE9, 0x3D, 0x7E, 0x11, 0x73, 0x93, 0x17, 0x2A,
1081 0xAE, 0x2D, 0x8A, 0x57, 0x1E, 0x03, 0xAC, 0x9C,
1082 0x9E, 0xB7, 0x6F, 0xAC, 0x45, 0xAF, 0x8E, 0x51,
1083 0x30, 0xC8, 0x1C, 0x46, 0xA3, 0x5C, 0xE4, 0x11,
1084 0xE5, 0xFB, 0xC1, 0x19, 0x1A, 0x0A, 0x52, 0xEF,
1085 0xF6, 0x9F, 0x24, 0x45, 0xDF, 0x4F, 0x9B, 0x17,
1086 0xAD, 0x2B, 0x41, 0x7B, 0xE6, 0x6C, 0x37, 0x10
1087};
1088
1089static const unsigned char aes_test_cfb128_ct[3][64] =
1090{
1091 { 0x3B, 0x3F, 0xD9, 0x2E, 0xB7, 0x2D, 0xAD, 0x20,
1092 0x33, 0x34, 0x49, 0xF8, 0xE8, 0x3C, 0xFB, 0x4A,
1093 0xC8, 0xA6, 0x45, 0x37, 0xA0, 0xB3, 0xA9, 0x3F,
1094 0xCD, 0xE3, 0xCD, 0xAD, 0x9F, 0x1C, 0xE5, 0x8B,
1095 0x26, 0x75, 0x1F, 0x67, 0xA3, 0xCB, 0xB1, 0x40,
1096 0xB1, 0x80, 0x8C, 0xF1, 0x87, 0xA4, 0xF4, 0xDF,
1097 0xC0, 0x4B, 0x05, 0x35, 0x7C, 0x5D, 0x1C, 0x0E,
1098 0xEA, 0xC4, 0xC6, 0x6F, 0x9F, 0xF7, 0xF2, 0xE6 },
1099 { 0xCD, 0xC8, 0x0D, 0x6F, 0xDD, 0xF1, 0x8C, 0xAB,
1100 0x34, 0xC2, 0x59, 0x09, 0xC9, 0x9A, 0x41, 0x74,
1101 0x67, 0xCE, 0x7F, 0x7F, 0x81, 0x17, 0x36, 0x21,
1102 0x96, 0x1A, 0x2B, 0x70, 0x17, 0x1D, 0x3D, 0x7A,
1103 0x2E, 0x1E, 0x8A, 0x1D, 0xD5, 0x9B, 0x88, 0xB1,
1104 0xC8, 0xE6, 0x0F, 0xED, 0x1E, 0xFA, 0xC4, 0xC9,
1105 0xC0, 0x5F, 0x9F, 0x9C, 0xA9, 0x83, 0x4F, 0xA0,
1106 0x42, 0xAE, 0x8F, 0xBA, 0x58, 0x4B, 0x09, 0xFF },
1107 { 0xDC, 0x7E, 0x84, 0xBF, 0xDA, 0x79, 0x16, 0x4B,
1108 0x7E, 0xCD, 0x84, 0x86, 0x98, 0x5D, 0x38, 0x60,
1109 0x39, 0xFF, 0xED, 0x14, 0x3B, 0x28, 0xB1, 0xC8,
1110 0x32, 0x11, 0x3C, 0x63, 0x31, 0xE5, 0x40, 0x7B,
1111 0xDF, 0x10, 0x13, 0x24, 0x15, 0xE5, 0x4B, 0x92,
1112 0xA1, 0x3E, 0xD0, 0xA8, 0x26, 0x7A, 0xE2, 0xF9,
1113 0x75, 0xA3, 0x85, 0x74, 0x1A, 0xB9, 0xCE, 0xF8,
1114 0x20, 0x31, 0x62, 0x3D, 0x55, 0xB1, 0xE4, 0x71 }
1115};
1116#endif /* POLARSSL_CIPHER_MODE_CFB */
1117
1118#if defined(POLARSSL_CIPHER_MODE_CTR)
1119/*
1120 * AES-CTR test vectors from:
1121 *
1122 * http://www.faqs.org/rfcs/rfc3686.html
1123 */
1124
1125static const unsigned char aes_test_ctr_key[3][16] =
1126{
1127 { 0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC,
1128 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3, 0x9E },
1129 { 0x7E, 0x24, 0x06, 0x78, 0x17, 0xFA, 0xE0, 0xD7,
1130 0x43, 0xD6, 0xCE, 0x1F, 0x32, 0x53, 0x91, 0x63 },
1131 { 0x76, 0x91, 0xBE, 0x03, 0x5E, 0x50, 0x20, 0xA8,
1132 0xAC, 0x6E, 0x61, 0x85, 0x29, 0xF9, 0xA0, 0xDC }
1133};
1134
1135static const unsigned char aes_test_ctr_nonce_counter[3][16] =
1136{
1137 { 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
1139 { 0x00, 0x6C, 0xB6, 0xDB, 0xC0, 0x54, 0x3B, 0x59,
1140 0xDA, 0x48, 0xD9, 0x0B, 0x00, 0x00, 0x00, 0x01 },
1141 { 0x00, 0xE0, 0x01, 0x7B, 0x27, 0x77, 0x7F, 0x3F,
1142 0x4A, 0x17, 0x86, 0xF0, 0x00, 0x00, 0x00, 0x01 }
1143};
1144
1145static const unsigned char aes_test_ctr_pt[3][48] =
1146{
1147 { 0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20, 0x62,
1148 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6D, 0x73, 0x67 },
1149
1150 { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1151 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1152 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
1153 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F },
1154
1155 { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1156 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1157 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
1158 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
1159 0x20, 0x21, 0x22, 0x23 }
1160};
1161
1162static const unsigned char aes_test_ctr_ct[3][48] =
1163{
1164 { 0xE4, 0x09, 0x5D, 0x4F, 0xB7, 0xA7, 0xB3, 0x79,
1165 0x2D, 0x61, 0x75, 0xA3, 0x26, 0x13, 0x11, 0xB8 },
1166 { 0x51, 0x04, 0xA1, 0x06, 0x16, 0x8A, 0x72, 0xD9,
1167 0x79, 0x0D, 0x41, 0xEE, 0x8E, 0xDA, 0xD3, 0x88,
1168 0xEB, 0x2E, 0x1E, 0xFC, 0x46, 0xDA, 0x57, 0xC8,
1169 0xFC, 0xE6, 0x30, 0xDF, 0x91, 0x41, 0xBE, 0x28 },
1170 { 0xC1, 0xCF, 0x48, 0xA8, 0x9F, 0x2F, 0xFD, 0xD9,
1171 0xCF, 0x46, 0x52, 0xE9, 0xEF, 0xDB, 0x72, 0xD7,
1172 0x45, 0x40, 0xA4, 0x2B, 0xDE, 0x6D, 0x78, 0x36,
1173 0xD5, 0x9A, 0x5C, 0xEA, 0xAE, 0xF3, 0x10, 0x53,
1174 0x25, 0xB2, 0x07, 0x2F }
1175};
1176
1177static const int aes_test_ctr_len[3] =
1178 { 16, 32, 36 };
1179#endif /* POLARSSL_CIPHER_MODE_CTR */
1180
1181/*
1182 * Checkup routine
1183 */
1184int aes_self_test( int verbose )
1185{
1186 int ret = 0, i, j, u, v;
1187 unsigned char key[32];
1188 unsigned char buf[64];
1189 unsigned char iv[16];
1190#if defined(POLARSSL_CIPHER_MODE_CBC)
1191 unsigned char prv[16];
1192#endif
1193#if defined(POLARSSL_CIPHER_MODE_CTR) || defined(POLARSSL_CIPHER_MODE_CFB)
1194 size_t offset;
1195#endif
1196#if defined(POLARSSL_CIPHER_MODE_CTR)
1197 int len;
1198 unsigned char nonce_counter[16];
1199 unsigned char stream_block[16];
1200#endif
1201 aes_context ctx;
1202
1203 memset( key, 0, 32 );
1204 aes_init( &ctx );
1205
1206 /*
1207 * ECB mode
1208 */
1209 for( i = 0; i < 6; i++ )
1210 {
1211 u = i >> 1;
1212 v = i & 1;
1213
1214 if( verbose != 0 )
1215 polarssl_printf( " AES-ECB-%3d (%s): ", 128 + u * 64,
1216 ( v == AES_DECRYPT ) ? "dec" : "enc" );
1217
1218 memset( buf, 0, 16 );
1219
1220 if( v == AES_DECRYPT )
1221 {
1222 aes_setkey_dec( &ctx, key, 128 + u * 64 );
1223
1224 for( j = 0; j < 10000; j++ )
1225 aes_crypt_ecb( &ctx, v, buf, buf );
1226
1227 if( memcmp( buf, aes_test_ecb_dec[u], 16 ) != 0 )
1228 {
1229 if( verbose != 0 )
1230 polarssl_printf( "failed\n" );
1231
1232 ret = 1;
1233 goto exit;
1234 }
1235 }
1236 else
1237 {
1238 aes_setkey_enc( &ctx, key, 128 + u * 64 );
1239
1240 for( j = 0; j < 10000; j++ )
1241 aes_crypt_ecb( &ctx, v, buf, buf );
1242
1243 if( memcmp( buf, aes_test_ecb_enc[u], 16 ) != 0 )
1244 {
1245 if( verbose != 0 )
1246 polarssl_printf( "failed\n" );
1247
1248 ret = 1;
1249 goto exit;
1250 }
1251 }
1252
1253 if( verbose != 0 )
1254 polarssl_printf( "passed\n" );
1255 }
1256
1257 if( verbose != 0 )
1258 polarssl_printf( "\n" );
1259
1260#if defined(POLARSSL_CIPHER_MODE_CBC)
1261 /*
1262 * CBC mode
1263 */
1264 for( i = 0; i < 6; i++ )
1265 {
1266 u = i >> 1;
1267 v = i & 1;
1268
1269 if( verbose != 0 )
1270 polarssl_printf( " AES-CBC-%3d (%s): ", 128 + u * 64,
1271 ( v == AES_DECRYPT ) ? "dec" : "enc" );
1272
1273 memset( iv , 0, 16 );
1274 memset( prv, 0, 16 );
1275 memset( buf, 0, 16 );
1276
1277 if( v == AES_DECRYPT )
1278 {
1279 aes_setkey_dec( &ctx, key, 128 + u * 64 );
1280
1281 for( j = 0; j < 10000; j++ )
1282 aes_crypt_cbc( &ctx, v, 16, iv, buf, buf );
1283
1284 if( memcmp( buf, aes_test_cbc_dec[u], 16 ) != 0 )
1285 {
1286 if( verbose != 0 )
1287 polarssl_printf( "failed\n" );
1288
1289 ret = 1;
1290 goto exit;
1291 }
1292 }
1293 else
1294 {
1295 aes_setkey_enc( &ctx, key, 128 + u * 64 );
1296
1297 for( j = 0; j < 10000; j++ )
1298 {
1299 unsigned char tmp[16];
1300
1301 aes_crypt_cbc( &ctx, v, 16, iv, buf, buf );
1302
1303 memcpy( tmp, prv, 16 );
1304 memcpy( prv, buf, 16 );
1305 memcpy( buf, tmp, 16 );
1306 }
1307
1308 if( memcmp( prv, aes_test_cbc_enc[u], 16 ) != 0 )
1309 {
1310 if( verbose != 0 )
1311 polarssl_printf( "failed\n" );
1312
1313 ret = 1;
1314 goto exit;
1315 }
1316 }
1317
1318 if( verbose != 0 )
1319 polarssl_printf( "passed\n" );
1320 }
1321
1322 if( verbose != 0 )
1323 polarssl_printf( "\n" );
1324#endif /* POLARSSL_CIPHER_MODE_CBC */
1325
1326#if defined(POLARSSL_CIPHER_MODE_CFB)
1327 /*
1328 * CFB128 mode
1329 */
1330 for( i = 0; i < 6; i++ )
1331 {
1332 u = i >> 1;
1333 v = i & 1;
1334
1335 if( verbose != 0 )
1336 polarssl_printf( " AES-CFB128-%3d (%s): ", 128 + u * 64,
1337 ( v == AES_DECRYPT ) ? "dec" : "enc" );
1338
1339 memcpy( iv, aes_test_cfb128_iv, 16 );
1340 memcpy( key, aes_test_cfb128_key[u], 16 + u * 8 );
1341
1342 offset = 0;
1343 aes_setkey_enc( &ctx, key, 128 + u * 64 );
1344
1345 if( v == AES_DECRYPT )
1346 {
1347 memcpy( buf, aes_test_cfb128_ct[u], 64 );
1348 aes_crypt_cfb128( &ctx, v, 64, &offset, iv, buf, buf );
1349
1350 if( memcmp( buf, aes_test_cfb128_pt, 64 ) != 0 )
1351 {
1352 if( verbose != 0 )
1353 polarssl_printf( "failed\n" );
1354
1355 ret = 1;
1356 goto exit;
1357 }
1358 }
1359 else
1360 {
1361 memcpy( buf, aes_test_cfb128_pt, 64 );
1362 aes_crypt_cfb128( &ctx, v, 64, &offset, iv, buf, buf );
1363
1364 if( memcmp( buf, aes_test_cfb128_ct[u], 64 ) != 0 )
1365 {
1366 if( verbose != 0 )
1367 polarssl_printf( "failed\n" );
1368
1369 ret = 1;
1370 goto exit;
1371 }
1372 }
1373
1374 if( verbose != 0 )
1375 polarssl_printf( "passed\n" );
1376 }
1377
1378 if( verbose != 0 )
1379 polarssl_printf( "\n" );
1380#endif /* POLARSSL_CIPHER_MODE_CFB */
1381
1382#if defined(POLARSSL_CIPHER_MODE_CTR)
1383 /*
1384 * CTR mode
1385 */
1386 for( i = 0; i < 6; i++ )
1387 {
1388 u = i >> 1;
1389 v = i & 1;
1390
1391 if( verbose != 0 )
1392 polarssl_printf( " AES-CTR-128 (%s): ",
1393 ( v == AES_DECRYPT ) ? "dec" : "enc" );
1394
1395 memcpy( nonce_counter, aes_test_ctr_nonce_counter[u], 16 );
1396 memcpy( key, aes_test_ctr_key[u], 16 );
1397
1398 offset = 0;
1399 aes_setkey_enc( &ctx, key, 128 );
1400
1401 if( v == AES_DECRYPT )
1402 {
1403 len = aes_test_ctr_len[u];
1404 memcpy( buf, aes_test_ctr_ct[u], len );
1405
1406 aes_crypt_ctr( &ctx, len, &offset, nonce_counter, stream_block,
1407 buf, buf );
1408
1409 if( memcmp( buf, aes_test_ctr_pt[u], len ) != 0 )
1410 {
1411 if( verbose != 0 )
1412 polarssl_printf( "failed\n" );
1413
1414 ret = 1;
1415 goto exit;
1416 }
1417 }
1418 else
1419 {
1420 len = aes_test_ctr_len[u];
1421 memcpy( buf, aes_test_ctr_pt[u], len );
1422
1423 aes_crypt_ctr( &ctx, len, &offset, nonce_counter, stream_block,
1424 buf, buf );
1425
1426 if( memcmp( buf, aes_test_ctr_ct[u], len ) != 0 )
1427 {
1428 if( verbose != 0 )
1429 polarssl_printf( "failed\n" );
1430
1431 ret = 1;
1432 goto exit;
1433 }
1434 }
1435
1436 if( verbose != 0 )
1437 polarssl_printf( "passed\n" );
1438 }
1439
1440 if( verbose != 0 )
1441 polarssl_printf( "\n" );
1442#endif /* POLARSSL_CIPHER_MODE_CTR */
1443
1444 ret = 0;
1445
1446exit:
1447 aes_free( &ctx );
1448
1449 return( ret );
1450}
1451
1452#endif /* POLARSSL_SELF_TEST */
1453
1454#endif /* POLARSSL_AES_C */
Impressum, Datenschutz