xref: /openbmc/linux/arch/arm64/crypto/aes-glue.c (revision f3a8b664)
1 /*
2  * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
3  *
4  * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <asm/neon.h>
12 #include <asm/hwcap.h>
13 #include <crypto/aes.h>
14 #include <crypto/ablk_helper.h>
15 #include <crypto/algapi.h>
16 #include <linux/module.h>
17 #include <linux/cpufeature.h>
18 #include <crypto/xts.h>
19 
20 #include "aes-ce-setkey.h"
21 
22 #ifdef USE_V8_CRYPTO_EXTENSIONS
23 #define MODE			"ce"
24 #define PRIO			300
25 #define aes_setkey		ce_aes_setkey
26 #define aes_expandkey		ce_aes_expandkey
27 #define aes_ecb_encrypt		ce_aes_ecb_encrypt
28 #define aes_ecb_decrypt		ce_aes_ecb_decrypt
29 #define aes_cbc_encrypt		ce_aes_cbc_encrypt
30 #define aes_cbc_decrypt		ce_aes_cbc_decrypt
31 #define aes_ctr_encrypt		ce_aes_ctr_encrypt
32 #define aes_xts_encrypt		ce_aes_xts_encrypt
33 #define aes_xts_decrypt		ce_aes_xts_decrypt
34 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
35 #else
36 #define MODE			"neon"
37 #define PRIO			200
38 #define aes_setkey		crypto_aes_set_key
39 #define aes_expandkey		crypto_aes_expand_key
40 #define aes_ecb_encrypt		neon_aes_ecb_encrypt
41 #define aes_ecb_decrypt		neon_aes_ecb_decrypt
42 #define aes_cbc_encrypt		neon_aes_cbc_encrypt
43 #define aes_cbc_decrypt		neon_aes_cbc_decrypt
44 #define aes_ctr_encrypt		neon_aes_ctr_encrypt
45 #define aes_xts_encrypt		neon_aes_xts_encrypt
46 #define aes_xts_decrypt		neon_aes_xts_decrypt
47 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
48 MODULE_ALIAS_CRYPTO("ecb(aes)");
49 MODULE_ALIAS_CRYPTO("cbc(aes)");
50 MODULE_ALIAS_CRYPTO("ctr(aes)");
51 MODULE_ALIAS_CRYPTO("xts(aes)");
52 #endif
53 
54 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
55 MODULE_LICENSE("GPL v2");
56 
57 /* defined in aes-modes.S */
58 asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
59 				int rounds, int blocks, int first);
60 asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
61 				int rounds, int blocks, int first);
62 
63 asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
64 				int rounds, int blocks, u8 iv[], int first);
65 asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
66 				int rounds, int blocks, u8 iv[], int first);
67 
68 asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
69 				int rounds, int blocks, u8 ctr[], int first);
70 
71 asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
72 				int rounds, int blocks, u8 const rk2[], u8 iv[],
73 				int first);
74 asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
75 				int rounds, int blocks, u8 const rk2[], u8 iv[],
76 				int first);
77 
78 struct crypto_aes_xts_ctx {
79 	struct crypto_aes_ctx key1;
80 	struct crypto_aes_ctx __aligned(8) key2;
81 };
82 
83 static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
84 		       unsigned int key_len)
85 {
86 	struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
87 	int ret;
88 
89 	ret = xts_check_key(tfm, in_key, key_len);
90 	if (ret)
91 		return ret;
92 
93 	ret = aes_expandkey(&ctx->key1, in_key, key_len / 2);
94 	if (!ret)
95 		ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
96 				    key_len / 2);
97 	if (!ret)
98 		return 0;
99 
100 	tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
101 	return -EINVAL;
102 }
103 
104 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
105 		       struct scatterlist *src, unsigned int nbytes)
106 {
107 	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
108 	int err, first, rounds = 6 + ctx->key_length / 4;
109 	struct blkcipher_walk walk;
110 	unsigned int blocks;
111 
112 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
113 	blkcipher_walk_init(&walk, dst, src, nbytes);
114 	err = blkcipher_walk_virt(desc, &walk);
115 
116 	kernel_neon_begin();
117 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
118 		aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
119 				(u8 *)ctx->key_enc, rounds, blocks, first);
120 		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
121 	}
122 	kernel_neon_end();
123 	return err;
124 }
125 
126 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
127 		       struct scatterlist *src, unsigned int nbytes)
128 {
129 	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
130 	int err, first, rounds = 6 + ctx->key_length / 4;
131 	struct blkcipher_walk walk;
132 	unsigned int blocks;
133 
134 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
135 	blkcipher_walk_init(&walk, dst, src, nbytes);
136 	err = blkcipher_walk_virt(desc, &walk);
137 
138 	kernel_neon_begin();
139 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
140 		aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
141 				(u8 *)ctx->key_dec, rounds, blocks, first);
142 		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
143 	}
144 	kernel_neon_end();
145 	return err;
146 }
147 
148 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
149 		       struct scatterlist *src, unsigned int nbytes)
150 {
151 	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
152 	int err, first, rounds = 6 + ctx->key_length / 4;
153 	struct blkcipher_walk walk;
154 	unsigned int blocks;
155 
156 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
157 	blkcipher_walk_init(&walk, dst, src, nbytes);
158 	err = blkcipher_walk_virt(desc, &walk);
159 
160 	kernel_neon_begin();
161 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
162 		aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
163 				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
164 				first);
165 		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
166 	}
167 	kernel_neon_end();
168 	return err;
169 }
170 
171 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
172 		       struct scatterlist *src, unsigned int nbytes)
173 {
174 	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
175 	int err, first, rounds = 6 + ctx->key_length / 4;
176 	struct blkcipher_walk walk;
177 	unsigned int blocks;
178 
179 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
180 	blkcipher_walk_init(&walk, dst, src, nbytes);
181 	err = blkcipher_walk_virt(desc, &walk);
182 
183 	kernel_neon_begin();
184 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
185 		aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
186 				(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
187 				first);
188 		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
189 	}
190 	kernel_neon_end();
191 	return err;
192 }
193 
194 static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
195 		       struct scatterlist *src, unsigned int nbytes)
196 {
197 	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
198 	int err, first, rounds = 6 + ctx->key_length / 4;
199 	struct blkcipher_walk walk;
200 	int blocks;
201 
202 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
203 	blkcipher_walk_init(&walk, dst, src, nbytes);
204 	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
205 
206 	first = 1;
207 	kernel_neon_begin();
208 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
209 		aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
210 				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
211 				first);
212 		first = 0;
213 		nbytes -= blocks * AES_BLOCK_SIZE;
214 		if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
215 			break;
216 		err = blkcipher_walk_done(desc, &walk,
217 					  walk.nbytes % AES_BLOCK_SIZE);
218 	}
219 	if (walk.nbytes % AES_BLOCK_SIZE) {
220 		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
221 		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
222 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
223 
224 		/*
225 		 * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
226 		 * to tell aes_ctr_encrypt() to only read half a block.
227 		 */
228 		blocks = (nbytes <= 8) ? -1 : 1;
229 
230 		aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
231 				blocks, walk.iv, first);
232 		memcpy(tdst, tail, nbytes);
233 		err = blkcipher_walk_done(desc, &walk, 0);
234 	}
235 	kernel_neon_end();
236 
237 	return err;
238 }
239 
240 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
241 		       struct scatterlist *src, unsigned int nbytes)
242 {
243 	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
244 	int err, first, rounds = 6 + ctx->key1.key_length / 4;
245 	struct blkcipher_walk walk;
246 	unsigned int blocks;
247 
248 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
249 	blkcipher_walk_init(&walk, dst, src, nbytes);
250 	err = blkcipher_walk_virt(desc, &walk);
251 
252 	kernel_neon_begin();
253 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
254 		aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
255 				(u8 *)ctx->key1.key_enc, rounds, blocks,
256 				(u8 *)ctx->key2.key_enc, walk.iv, first);
257 		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
258 	}
259 	kernel_neon_end();
260 
261 	return err;
262 }
263 
264 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
265 		       struct scatterlist *src, unsigned int nbytes)
266 {
267 	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
268 	int err, first, rounds = 6 + ctx->key1.key_length / 4;
269 	struct blkcipher_walk walk;
270 	unsigned int blocks;
271 
272 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
273 	blkcipher_walk_init(&walk, dst, src, nbytes);
274 	err = blkcipher_walk_virt(desc, &walk);
275 
276 	kernel_neon_begin();
277 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
278 		aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
279 				(u8 *)ctx->key1.key_dec, rounds, blocks,
280 				(u8 *)ctx->key2.key_enc, walk.iv, first);
281 		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
282 	}
283 	kernel_neon_end();
284 
285 	return err;
286 }
287 
288 static struct crypto_alg aes_algs[] = { {
289 	.cra_name		= "__ecb-aes-" MODE,
290 	.cra_driver_name	= "__driver-ecb-aes-" MODE,
291 	.cra_priority		= 0,
292 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
293 				  CRYPTO_ALG_INTERNAL,
294 	.cra_blocksize		= AES_BLOCK_SIZE,
295 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
296 	.cra_alignmask		= 7,
297 	.cra_type		= &crypto_blkcipher_type,
298 	.cra_module		= THIS_MODULE,
299 	.cra_blkcipher = {
300 		.min_keysize	= AES_MIN_KEY_SIZE,
301 		.max_keysize	= AES_MAX_KEY_SIZE,
302 		.ivsize		= 0,
303 		.setkey		= aes_setkey,
304 		.encrypt	= ecb_encrypt,
305 		.decrypt	= ecb_decrypt,
306 	},
307 }, {
308 	.cra_name		= "__cbc-aes-" MODE,
309 	.cra_driver_name	= "__driver-cbc-aes-" MODE,
310 	.cra_priority		= 0,
311 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
312 				  CRYPTO_ALG_INTERNAL,
313 	.cra_blocksize		= AES_BLOCK_SIZE,
314 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
315 	.cra_alignmask		= 7,
316 	.cra_type		= &crypto_blkcipher_type,
317 	.cra_module		= THIS_MODULE,
318 	.cra_blkcipher = {
319 		.min_keysize	= AES_MIN_KEY_SIZE,
320 		.max_keysize	= AES_MAX_KEY_SIZE,
321 		.ivsize		= AES_BLOCK_SIZE,
322 		.setkey		= aes_setkey,
323 		.encrypt	= cbc_encrypt,
324 		.decrypt	= cbc_decrypt,
325 	},
326 }, {
327 	.cra_name		= "__ctr-aes-" MODE,
328 	.cra_driver_name	= "__driver-ctr-aes-" MODE,
329 	.cra_priority		= 0,
330 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
331 				  CRYPTO_ALG_INTERNAL,
332 	.cra_blocksize		= 1,
333 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
334 	.cra_alignmask		= 7,
335 	.cra_type		= &crypto_blkcipher_type,
336 	.cra_module		= THIS_MODULE,
337 	.cra_blkcipher = {
338 		.min_keysize	= AES_MIN_KEY_SIZE,
339 		.max_keysize	= AES_MAX_KEY_SIZE,
340 		.ivsize		= AES_BLOCK_SIZE,
341 		.setkey		= aes_setkey,
342 		.encrypt	= ctr_encrypt,
343 		.decrypt	= ctr_encrypt,
344 	},
345 }, {
346 	.cra_name		= "__xts-aes-" MODE,
347 	.cra_driver_name	= "__driver-xts-aes-" MODE,
348 	.cra_priority		= 0,
349 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
350 				  CRYPTO_ALG_INTERNAL,
351 	.cra_blocksize		= AES_BLOCK_SIZE,
352 	.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
353 	.cra_alignmask		= 7,
354 	.cra_type		= &crypto_blkcipher_type,
355 	.cra_module		= THIS_MODULE,
356 	.cra_blkcipher = {
357 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
358 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
359 		.ivsize		= AES_BLOCK_SIZE,
360 		.setkey		= xts_set_key,
361 		.encrypt	= xts_encrypt,
362 		.decrypt	= xts_decrypt,
363 	},
364 }, {
365 	.cra_name		= "ecb(aes)",
366 	.cra_driver_name	= "ecb-aes-" MODE,
367 	.cra_priority		= PRIO,
368 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
369 	.cra_blocksize		= AES_BLOCK_SIZE,
370 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
371 	.cra_alignmask		= 7,
372 	.cra_type		= &crypto_ablkcipher_type,
373 	.cra_module		= THIS_MODULE,
374 	.cra_init		= ablk_init,
375 	.cra_exit		= ablk_exit,
376 	.cra_ablkcipher = {
377 		.min_keysize	= AES_MIN_KEY_SIZE,
378 		.max_keysize	= AES_MAX_KEY_SIZE,
379 		.ivsize		= 0,
380 		.setkey		= ablk_set_key,
381 		.encrypt	= ablk_encrypt,
382 		.decrypt	= ablk_decrypt,
383 	}
384 }, {
385 	.cra_name		= "cbc(aes)",
386 	.cra_driver_name	= "cbc-aes-" MODE,
387 	.cra_priority		= PRIO,
388 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
389 	.cra_blocksize		= AES_BLOCK_SIZE,
390 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
391 	.cra_alignmask		= 7,
392 	.cra_type		= &crypto_ablkcipher_type,
393 	.cra_module		= THIS_MODULE,
394 	.cra_init		= ablk_init,
395 	.cra_exit		= ablk_exit,
396 	.cra_ablkcipher = {
397 		.min_keysize	= AES_MIN_KEY_SIZE,
398 		.max_keysize	= AES_MAX_KEY_SIZE,
399 		.ivsize		= AES_BLOCK_SIZE,
400 		.setkey		= ablk_set_key,
401 		.encrypt	= ablk_encrypt,
402 		.decrypt	= ablk_decrypt,
403 	}
404 }, {
405 	.cra_name		= "ctr(aes)",
406 	.cra_driver_name	= "ctr-aes-" MODE,
407 	.cra_priority		= PRIO,
408 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
409 	.cra_blocksize		= 1,
410 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
411 	.cra_alignmask		= 7,
412 	.cra_type		= &crypto_ablkcipher_type,
413 	.cra_module		= THIS_MODULE,
414 	.cra_init		= ablk_init,
415 	.cra_exit		= ablk_exit,
416 	.cra_ablkcipher = {
417 		.min_keysize	= AES_MIN_KEY_SIZE,
418 		.max_keysize	= AES_MAX_KEY_SIZE,
419 		.ivsize		= AES_BLOCK_SIZE,
420 		.setkey		= ablk_set_key,
421 		.encrypt	= ablk_encrypt,
422 		.decrypt	= ablk_decrypt,
423 	}
424 }, {
425 	.cra_name		= "xts(aes)",
426 	.cra_driver_name	= "xts-aes-" MODE,
427 	.cra_priority		= PRIO,
428 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
429 	.cra_blocksize		= AES_BLOCK_SIZE,
430 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
431 	.cra_alignmask		= 7,
432 	.cra_type		= &crypto_ablkcipher_type,
433 	.cra_module		= THIS_MODULE,
434 	.cra_init		= ablk_init,
435 	.cra_exit		= ablk_exit,
436 	.cra_ablkcipher = {
437 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
438 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
439 		.ivsize		= AES_BLOCK_SIZE,
440 		.setkey		= ablk_set_key,
441 		.encrypt	= ablk_encrypt,
442 		.decrypt	= ablk_decrypt,
443 	}
444 } };
445 
446 static int __init aes_init(void)
447 {
448 	return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
449 }
450 
451 static void __exit aes_exit(void)
452 {
453 	crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
454 }
455 
456 #ifdef USE_V8_CRYPTO_EXTENSIONS
457 module_cpu_feature_match(AES, aes_init);
458 #else
459 module_init(aes_init);
460 #endif
461 module_exit(aes_exit);
462