1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Bit sliced AES using NEON instructions
4  *
5  * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6  */
7 
8 #include <asm/neon.h>
9 #include <asm/simd.h>
10 #include <crypto/aes.h>
11 #include <crypto/cbc.h>
12 #include <crypto/ctr.h>
13 #include <crypto/internal/simd.h>
14 #include <crypto/internal/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/module.h>
17 
18 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
19 MODULE_LICENSE("GPL v2");
20 
21 MODULE_ALIAS_CRYPTO("ecb(aes)");
22 MODULE_ALIAS_CRYPTO("cbc(aes)");
23 MODULE_ALIAS_CRYPTO("ctr(aes)");
24 MODULE_ALIAS_CRYPTO("xts(aes)");
25 
26 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
27 
28 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
29 				  int rounds, int blocks);
30 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
31 				  int rounds, int blocks);
32 
33 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
34 				  int rounds, int blocks, u8 iv[]);
35 
36 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
37 				  int rounds, int blocks, u8 ctr[], u8 final[]);
38 
39 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
40 				  int rounds, int blocks, u8 iv[]);
41 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
42 				  int rounds, int blocks, u8 iv[]);
43 
44 struct aesbs_ctx {
45 	int	rounds;
46 	u8	rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE);
47 };
48 
49 struct aesbs_cbc_ctx {
50 	struct aesbs_ctx	key;
51 	struct crypto_cipher	*enc_tfm;
52 };
53 
54 struct aesbs_xts_ctx {
55 	struct aesbs_ctx	key;
56 	struct crypto_cipher	*tweak_tfm;
57 };
58 
59 struct aesbs_ctr_ctx {
60 	struct aesbs_ctx	key;		/* must be first member */
61 	struct crypto_aes_ctx	fallback;
62 };
63 
64 static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
65 			unsigned int key_len)
66 {
67 	struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
68 	struct crypto_aes_ctx rk;
69 	int err;
70 
71 	err = aes_expandkey(&rk, in_key, key_len);
72 	if (err)
73 		return err;
74 
75 	ctx->rounds = 6 + key_len / 4;
76 
77 	kernel_neon_begin();
78 	aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
79 	kernel_neon_end();
80 
81 	return 0;
82 }
83 
84 static int __ecb_crypt(struct skcipher_request *req,
85 		       void (*fn)(u8 out[], u8 const in[], u8 const rk[],
86 				  int rounds, int blocks))
87 {
88 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
89 	struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
90 	struct skcipher_walk walk;
91 	int err;
92 
93 	err = skcipher_walk_virt(&walk, req, true);
94 
95 	kernel_neon_begin();
96 	while (walk.nbytes >= AES_BLOCK_SIZE) {
97 		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
98 
99 		if (walk.nbytes < walk.total)
100 			blocks = round_down(blocks,
101 					    walk.stride / AES_BLOCK_SIZE);
102 
103 		fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
104 		   ctx->rounds, blocks);
105 		err = skcipher_walk_done(&walk,
106 					 walk.nbytes - blocks * AES_BLOCK_SIZE);
107 	}
108 	kernel_neon_end();
109 
110 	return err;
111 }
112 
113 static int ecb_encrypt(struct skcipher_request *req)
114 {
115 	return __ecb_crypt(req, aesbs_ecb_encrypt);
116 }
117 
118 static int ecb_decrypt(struct skcipher_request *req)
119 {
120 	return __ecb_crypt(req, aesbs_ecb_decrypt);
121 }
122 
123 static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
124 			    unsigned int key_len)
125 {
126 	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
127 	struct crypto_aes_ctx rk;
128 	int err;
129 
130 	err = aes_expandkey(&rk, in_key, key_len);
131 	if (err)
132 		return err;
133 
134 	ctx->key.rounds = 6 + key_len / 4;
135 
136 	kernel_neon_begin();
137 	aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
138 	kernel_neon_end();
139 
140 	return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
141 }
142 
143 static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
144 {
145 	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
146 
147 	crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
148 }
149 
150 static int cbc_encrypt(struct skcipher_request *req)
151 {
152 	return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
153 }
154 
155 static int cbc_decrypt(struct skcipher_request *req)
156 {
157 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
158 	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
159 	struct skcipher_walk walk;
160 	int err;
161 
162 	err = skcipher_walk_virt(&walk, req, true);
163 
164 	kernel_neon_begin();
165 	while (walk.nbytes >= AES_BLOCK_SIZE) {
166 		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
167 
168 		if (walk.nbytes < walk.total)
169 			blocks = round_down(blocks,
170 					    walk.stride / AES_BLOCK_SIZE);
171 
172 		aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
173 				  ctx->key.rk, ctx->key.rounds, blocks,
174 				  walk.iv);
175 		err = skcipher_walk_done(&walk,
176 					 walk.nbytes - blocks * AES_BLOCK_SIZE);
177 	}
178 	kernel_neon_end();
179 
180 	return err;
181 }
182 
183 static int cbc_init(struct crypto_tfm *tfm)
184 {
185 	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
186 
187 	ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
188 
189 	return PTR_ERR_OR_ZERO(ctx->enc_tfm);
190 }
191 
192 static void cbc_exit(struct crypto_tfm *tfm)
193 {
194 	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
195 
196 	crypto_free_cipher(ctx->enc_tfm);
197 }
198 
199 static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
200 				 unsigned int key_len)
201 {
202 	struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
203 	int err;
204 
205 	err = aes_expandkey(&ctx->fallback, in_key, key_len);
206 	if (err)
207 		return err;
208 
209 	ctx->key.rounds = 6 + key_len / 4;
210 
211 	kernel_neon_begin();
212 	aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
213 	kernel_neon_end();
214 
215 	return 0;
216 }
217 
218 static int ctr_encrypt(struct skcipher_request *req)
219 {
220 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
221 	struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
222 	struct skcipher_walk walk;
223 	u8 buf[AES_BLOCK_SIZE];
224 	int err;
225 
226 	err = skcipher_walk_virt(&walk, req, true);
227 
228 	kernel_neon_begin();
229 	while (walk.nbytes > 0) {
230 		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
231 		u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
232 
233 		if (walk.nbytes < walk.total) {
234 			blocks = round_down(blocks,
235 					    walk.stride / AES_BLOCK_SIZE);
236 			final = NULL;
237 		}
238 
239 		aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
240 				  ctx->rk, ctx->rounds, blocks, walk.iv, final);
241 
242 		if (final) {
243 			u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
244 			u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
245 
246 			crypto_xor_cpy(dst, src, final,
247 				       walk.total % AES_BLOCK_SIZE);
248 
249 			err = skcipher_walk_done(&walk, 0);
250 			break;
251 		}
252 		err = skcipher_walk_done(&walk,
253 					 walk.nbytes - blocks * AES_BLOCK_SIZE);
254 	}
255 	kernel_neon_end();
256 
257 	return err;
258 }
259 
260 static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
261 {
262 	struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
263 	unsigned long flags;
264 
265 	/*
266 	 * Temporarily disable interrupts to avoid races where
267 	 * cachelines are evicted when the CPU is interrupted
268 	 * to do something else.
269 	 */
270 	local_irq_save(flags);
271 	aes_encrypt(&ctx->fallback, dst, src);
272 	local_irq_restore(flags);
273 }
274 
275 static int ctr_encrypt_sync(struct skcipher_request *req)
276 {
277 	if (!crypto_simd_usable())
278 		return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
279 
280 	return ctr_encrypt(req);
281 }
282 
283 static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
284 			    unsigned int key_len)
285 {
286 	struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
287 	int err;
288 
289 	err = xts_verify_key(tfm, in_key, key_len);
290 	if (err)
291 		return err;
292 
293 	key_len /= 2;
294 	err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
295 	if (err)
296 		return err;
297 
298 	return aesbs_setkey(tfm, in_key, key_len);
299 }
300 
301 static int xts_init(struct crypto_tfm *tfm)
302 {
303 	struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
304 
305 	ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
306 
307 	return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
308 }
309 
310 static void xts_exit(struct crypto_tfm *tfm)
311 {
312 	struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
313 
314 	crypto_free_cipher(ctx->tweak_tfm);
315 }
316 
317 static int __xts_crypt(struct skcipher_request *req,
318 		       void (*fn)(u8 out[], u8 const in[], u8 const rk[],
319 				  int rounds, int blocks, u8 iv[]))
320 {
321 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
322 	struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
323 	struct skcipher_walk walk;
324 	int err;
325 
326 	err = skcipher_walk_virt(&walk, req, true);
327 	if (err)
328 		return err;
329 
330 	crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
331 
332 	kernel_neon_begin();
333 	while (walk.nbytes >= AES_BLOCK_SIZE) {
334 		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
335 
336 		if (walk.nbytes < walk.total)
337 			blocks = round_down(blocks,
338 					    walk.stride / AES_BLOCK_SIZE);
339 
340 		fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
341 		   ctx->key.rounds, blocks, walk.iv);
342 		err = skcipher_walk_done(&walk,
343 					 walk.nbytes - blocks * AES_BLOCK_SIZE);
344 	}
345 	kernel_neon_end();
346 
347 	return err;
348 }
349 
350 static int xts_encrypt(struct skcipher_request *req)
351 {
352 	return __xts_crypt(req, aesbs_xts_encrypt);
353 }
354 
355 static int xts_decrypt(struct skcipher_request *req)
356 {
357 	return __xts_crypt(req, aesbs_xts_decrypt);
358 }
359 
360 static struct skcipher_alg aes_algs[] = { {
361 	.base.cra_name		= "__ecb(aes)",
362 	.base.cra_driver_name	= "__ecb-aes-neonbs",
363 	.base.cra_priority	= 250,
364 	.base.cra_blocksize	= AES_BLOCK_SIZE,
365 	.base.cra_ctxsize	= sizeof(struct aesbs_ctx),
366 	.base.cra_module	= THIS_MODULE,
367 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
368 
369 	.min_keysize		= AES_MIN_KEY_SIZE,
370 	.max_keysize		= AES_MAX_KEY_SIZE,
371 	.walksize		= 8 * AES_BLOCK_SIZE,
372 	.setkey			= aesbs_setkey,
373 	.encrypt		= ecb_encrypt,
374 	.decrypt		= ecb_decrypt,
375 }, {
376 	.base.cra_name		= "__cbc(aes)",
377 	.base.cra_driver_name	= "__cbc-aes-neonbs",
378 	.base.cra_priority	= 250,
379 	.base.cra_blocksize	= AES_BLOCK_SIZE,
380 	.base.cra_ctxsize	= sizeof(struct aesbs_cbc_ctx),
381 	.base.cra_module	= THIS_MODULE,
382 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
383 	.base.cra_init		= cbc_init,
384 	.base.cra_exit		= cbc_exit,
385 
386 	.min_keysize		= AES_MIN_KEY_SIZE,
387 	.max_keysize		= AES_MAX_KEY_SIZE,
388 	.walksize		= 8 * AES_BLOCK_SIZE,
389 	.ivsize			= AES_BLOCK_SIZE,
390 	.setkey			= aesbs_cbc_setkey,
391 	.encrypt		= cbc_encrypt,
392 	.decrypt		= cbc_decrypt,
393 }, {
394 	.base.cra_name		= "__ctr(aes)",
395 	.base.cra_driver_name	= "__ctr-aes-neonbs",
396 	.base.cra_priority	= 250,
397 	.base.cra_blocksize	= 1,
398 	.base.cra_ctxsize	= sizeof(struct aesbs_ctx),
399 	.base.cra_module	= THIS_MODULE,
400 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
401 
402 	.min_keysize		= AES_MIN_KEY_SIZE,
403 	.max_keysize		= AES_MAX_KEY_SIZE,
404 	.chunksize		= AES_BLOCK_SIZE,
405 	.walksize		= 8 * AES_BLOCK_SIZE,
406 	.ivsize			= AES_BLOCK_SIZE,
407 	.setkey			= aesbs_setkey,
408 	.encrypt		= ctr_encrypt,
409 	.decrypt		= ctr_encrypt,
410 }, {
411 	.base.cra_name		= "ctr(aes)",
412 	.base.cra_driver_name	= "ctr-aes-neonbs-sync",
413 	.base.cra_priority	= 250 - 1,
414 	.base.cra_blocksize	= 1,
415 	.base.cra_ctxsize	= sizeof(struct aesbs_ctr_ctx),
416 	.base.cra_module	= THIS_MODULE,
417 
418 	.min_keysize		= AES_MIN_KEY_SIZE,
419 	.max_keysize		= AES_MAX_KEY_SIZE,
420 	.chunksize		= AES_BLOCK_SIZE,
421 	.walksize		= 8 * AES_BLOCK_SIZE,
422 	.ivsize			= AES_BLOCK_SIZE,
423 	.setkey			= aesbs_ctr_setkey_sync,
424 	.encrypt		= ctr_encrypt_sync,
425 	.decrypt		= ctr_encrypt_sync,
426 }, {
427 	.base.cra_name		= "__xts(aes)",
428 	.base.cra_driver_name	= "__xts-aes-neonbs",
429 	.base.cra_priority	= 250,
430 	.base.cra_blocksize	= AES_BLOCK_SIZE,
431 	.base.cra_ctxsize	= sizeof(struct aesbs_xts_ctx),
432 	.base.cra_module	= THIS_MODULE,
433 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
434 	.base.cra_init		= xts_init,
435 	.base.cra_exit		= xts_exit,
436 
437 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
438 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
439 	.walksize		= 8 * AES_BLOCK_SIZE,
440 	.ivsize			= AES_BLOCK_SIZE,
441 	.setkey			= aesbs_xts_setkey,
442 	.encrypt		= xts_encrypt,
443 	.decrypt		= xts_decrypt,
444 } };
445 
446 static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
447 
448 static void aes_exit(void)
449 {
450 	int i;
451 
452 	for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
453 		if (aes_simd_algs[i])
454 			simd_skcipher_free(aes_simd_algs[i]);
455 
456 	crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
457 }
458 
459 static int __init aes_init(void)
460 {
461 	struct simd_skcipher_alg *simd;
462 	const char *basename;
463 	const char *algname;
464 	const char *drvname;
465 	int err;
466 	int i;
467 
468 	if (!(elf_hwcap & HWCAP_NEON))
469 		return -ENODEV;
470 
471 	err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
472 	if (err)
473 		return err;
474 
475 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
476 		if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
477 			continue;
478 
479 		algname = aes_algs[i].base.cra_name + 2;
480 		drvname = aes_algs[i].base.cra_driver_name + 2;
481 		basename = aes_algs[i].base.cra_driver_name;
482 		simd = simd_skcipher_create_compat(algname, drvname, basename);
483 		err = PTR_ERR(simd);
484 		if (IS_ERR(simd))
485 			goto unregister_simds;
486 
487 		aes_simd_algs[i] = simd;
488 	}
489 	return 0;
490 
491 unregister_simds:
492 	aes_exit();
493 	return err;
494 }
495 
496 late_initcall(aes_init);
497 module_exit(aes_exit);
498