1 /*
2  * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
3  *
4  * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <asm/neon.h>
12 #include <asm/simd.h>
13 #include <asm/unaligned.h>
14 #include <crypto/aes.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/skcipher.h>
18 #include <linux/module.h>
19 
20 #include "aes-ce-setkey.h"
21 
22 static int num_rounds(struct crypto_aes_ctx *ctx)
23 {
24 	/*
25 	 * # of rounds specified by AES:
26 	 * 128 bit key		10 rounds
27 	 * 192 bit key		12 rounds
28 	 * 256 bit key		14 rounds
29 	 * => n byte key	=> 6 + (n/4) rounds
30 	 */
31 	return 6 + ctx->key_length / 4;
32 }
33 
34 asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
35 				     u32 *macp, u32 const rk[], u32 rounds);
36 
37 asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
38 				   u32 const rk[], u32 rounds, u8 mac[],
39 				   u8 ctr[]);
40 
41 asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
42 				   u32 const rk[], u32 rounds, u8 mac[],
43 				   u8 ctr[]);
44 
45 asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
46 				 u32 rounds);
47 
48 asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
49 
50 static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
51 		      unsigned int key_len)
52 {
53 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
54 	int ret;
55 
56 	ret = ce_aes_expandkey(ctx, in_key, key_len);
57 	if (!ret)
58 		return 0;
59 
60 	tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
61 	return -EINVAL;
62 }
63 
64 static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
65 {
66 	if ((authsize & 1) || authsize < 4)
67 		return -EINVAL;
68 	return 0;
69 }
70 
71 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
72 {
73 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
74 	__be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
75 	u32 l = req->iv[0] + 1;
76 
77 	/* verify that CCM dimension 'L' is set correctly in the IV */
78 	if (l < 2 || l > 8)
79 		return -EINVAL;
80 
81 	/* verify that msglen can in fact be represented in L bytes */
82 	if (l < 4 && msglen >> (8 * l))
83 		return -EOVERFLOW;
84 
85 	/*
86 	 * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
87 	 * uses a u32 type to represent msglen so the top 4 bytes are always 0.
88 	 */
89 	n[0] = 0;
90 	n[1] = cpu_to_be32(msglen);
91 
92 	memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
93 
94 	/*
95 	 * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
96 	 * - bits 0..2	: max # of bytes required to represent msglen, minus 1
97 	 *                (already set by caller)
98 	 * - bits 3..5	: size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
99 	 * - bit 6	: indicates presence of authenticate-only data
100 	 */
101 	maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
102 	if (req->assoclen)
103 		maciv[0] |= 0x40;
104 
105 	memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
106 	return 0;
107 }
108 
109 static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
110 			   u32 abytes, u32 *macp, bool use_neon)
111 {
112 	if (likely(use_neon)) {
113 		ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
114 				     num_rounds(key));
115 	} else {
116 		if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
117 			int added = min(abytes, AES_BLOCK_SIZE - *macp);
118 
119 			crypto_xor(&mac[*macp], in, added);
120 
121 			*macp += added;
122 			in += added;
123 			abytes -= added;
124 		}
125 
126 		while (abytes > AES_BLOCK_SIZE) {
127 			__aes_arm64_encrypt(key->key_enc, mac, mac,
128 					    num_rounds(key));
129 			crypto_xor(mac, in, AES_BLOCK_SIZE);
130 
131 			in += AES_BLOCK_SIZE;
132 			abytes -= AES_BLOCK_SIZE;
133 		}
134 
135 		if (abytes > 0) {
136 			__aes_arm64_encrypt(key->key_enc, mac, mac,
137 					    num_rounds(key));
138 			crypto_xor(mac, in, abytes);
139 			*macp = abytes;
140 		} else {
141 			*macp = 0;
142 		}
143 	}
144 }
145 
146 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
147 				   bool use_neon)
148 {
149 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
150 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
151 	struct __packed { __be16 l; __be32 h; u16 len; } ltag;
152 	struct scatter_walk walk;
153 	u32 len = req->assoclen;
154 	u32 macp = 0;
155 
156 	/* prepend the AAD with a length tag */
157 	if (len < 0xff00) {
158 		ltag.l = cpu_to_be16(len);
159 		ltag.len = 2;
160 	} else  {
161 		ltag.l = cpu_to_be16(0xfffe);
162 		put_unaligned_be32(len, &ltag.h);
163 		ltag.len = 6;
164 	}
165 
166 	ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp, use_neon);
167 	scatterwalk_start(&walk, req->src);
168 
169 	do {
170 		u32 n = scatterwalk_clamp(&walk, len);
171 		u8 *p;
172 
173 		if (!n) {
174 			scatterwalk_start(&walk, sg_next(walk.sg));
175 			n = scatterwalk_clamp(&walk, len);
176 		}
177 		p = scatterwalk_map(&walk);
178 		ccm_update_mac(ctx, mac, p, n, &macp, use_neon);
179 		len -= n;
180 
181 		scatterwalk_unmap(p);
182 		scatterwalk_advance(&walk, n);
183 		scatterwalk_done(&walk, 0, len);
184 	} while (len);
185 }
186 
187 static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
188 			      struct crypto_aes_ctx *ctx, bool enc)
189 {
190 	u8 buf[AES_BLOCK_SIZE];
191 	int err = 0;
192 
193 	while (walk->nbytes) {
194 		int blocks = walk->nbytes / AES_BLOCK_SIZE;
195 		u32 tail = walk->nbytes % AES_BLOCK_SIZE;
196 		u8 *dst = walk->dst.virt.addr;
197 		u8 *src = walk->src.virt.addr;
198 		u32 nbytes = walk->nbytes;
199 
200 		if (nbytes == walk->total && tail > 0) {
201 			blocks++;
202 			tail = 0;
203 		}
204 
205 		do {
206 			u32 bsize = AES_BLOCK_SIZE;
207 
208 			if (nbytes < AES_BLOCK_SIZE)
209 				bsize = nbytes;
210 
211 			crypto_inc(walk->iv, AES_BLOCK_SIZE);
212 			__aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
213 					    num_rounds(ctx));
214 			__aes_arm64_encrypt(ctx->key_enc, mac, mac,
215 					    num_rounds(ctx));
216 			if (enc)
217 				crypto_xor(mac, src, bsize);
218 			crypto_xor_cpy(dst, src, buf, bsize);
219 			if (!enc)
220 				crypto_xor(mac, dst, bsize);
221 			dst += bsize;
222 			src += bsize;
223 			nbytes -= bsize;
224 		} while (--blocks);
225 
226 		err = skcipher_walk_done(walk, tail);
227 	}
228 
229 	if (!err) {
230 		__aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
231 		__aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
232 		crypto_xor(mac, buf, AES_BLOCK_SIZE);
233 	}
234 	return err;
235 }
236 
237 static int ccm_encrypt(struct aead_request *req)
238 {
239 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
240 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
241 	struct skcipher_walk walk;
242 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
243 	u8 buf[AES_BLOCK_SIZE];
244 	u32 len = req->cryptlen;
245 	bool use_neon = may_use_simd();
246 	int err;
247 
248 	err = ccm_init_mac(req, mac, len);
249 	if (err)
250 		return err;
251 
252 	if (likely(use_neon))
253 		kernel_neon_begin();
254 
255 	if (req->assoclen)
256 		ccm_calculate_auth_mac(req, mac, use_neon);
257 
258 	/* preserve the original iv for the final round */
259 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
260 
261 	err = skcipher_walk_aead_encrypt(&walk, req, true);
262 
263 	if (likely(use_neon)) {
264 		while (walk.nbytes) {
265 			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
266 
267 			if (walk.nbytes == walk.total)
268 				tail = 0;
269 
270 			ce_aes_ccm_encrypt(walk.dst.virt.addr,
271 					   walk.src.virt.addr,
272 					   walk.nbytes - tail, ctx->key_enc,
273 					   num_rounds(ctx), mac, walk.iv);
274 
275 			err = skcipher_walk_done(&walk, tail);
276 		}
277 		if (!err)
278 			ce_aes_ccm_final(mac, buf, ctx->key_enc,
279 					 num_rounds(ctx));
280 
281 		kernel_neon_end();
282 	} else {
283 		err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
284 	}
285 	if (err)
286 		return err;
287 
288 	/* copy authtag to end of dst */
289 	scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
290 				 crypto_aead_authsize(aead), 1);
291 
292 	return 0;
293 }
294 
295 static int ccm_decrypt(struct aead_request *req)
296 {
297 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
298 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
299 	unsigned int authsize = crypto_aead_authsize(aead);
300 	struct skcipher_walk walk;
301 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
302 	u8 buf[AES_BLOCK_SIZE];
303 	u32 len = req->cryptlen - authsize;
304 	bool use_neon = may_use_simd();
305 	int err;
306 
307 	err = ccm_init_mac(req, mac, len);
308 	if (err)
309 		return err;
310 
311 	if (likely(use_neon))
312 		kernel_neon_begin();
313 
314 	if (req->assoclen)
315 		ccm_calculate_auth_mac(req, mac, use_neon);
316 
317 	/* preserve the original iv for the final round */
318 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
319 
320 	err = skcipher_walk_aead_decrypt(&walk, req, true);
321 
322 	if (likely(use_neon)) {
323 		while (walk.nbytes) {
324 			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
325 
326 			if (walk.nbytes == walk.total)
327 				tail = 0;
328 
329 			ce_aes_ccm_decrypt(walk.dst.virt.addr,
330 					   walk.src.virt.addr,
331 					   walk.nbytes - tail, ctx->key_enc,
332 					   num_rounds(ctx), mac, walk.iv);
333 
334 			err = skcipher_walk_done(&walk, tail);
335 		}
336 		if (!err)
337 			ce_aes_ccm_final(mac, buf, ctx->key_enc,
338 					 num_rounds(ctx));
339 
340 		kernel_neon_end();
341 	} else {
342 		err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
343 	}
344 
345 	if (err)
346 		return err;
347 
348 	/* compare calculated auth tag with the stored one */
349 	scatterwalk_map_and_copy(buf, req->src,
350 				 req->assoclen + req->cryptlen - authsize,
351 				 authsize, 0);
352 
353 	if (crypto_memneq(mac, buf, authsize))
354 		return -EBADMSG;
355 	return 0;
356 }
357 
358 static struct aead_alg ccm_aes_alg = {
359 	.base = {
360 		.cra_name		= "ccm(aes)",
361 		.cra_driver_name	= "ccm-aes-ce",
362 		.cra_priority		= 300,
363 		.cra_blocksize		= 1,
364 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
365 		.cra_module		= THIS_MODULE,
366 	},
367 	.ivsize		= AES_BLOCK_SIZE,
368 	.chunksize	= AES_BLOCK_SIZE,
369 	.maxauthsize	= AES_BLOCK_SIZE,
370 	.setkey		= ccm_setkey,
371 	.setauthsize	= ccm_setauthsize,
372 	.encrypt	= ccm_encrypt,
373 	.decrypt	= ccm_decrypt,
374 };
375 
376 static int __init aes_mod_init(void)
377 {
378 	if (!(elf_hwcap & HWCAP_AES))
379 		return -ENODEV;
380 	return crypto_register_aead(&ccm_aes_alg);
381 }
382 
383 static void __exit aes_mod_exit(void)
384 {
385 	crypto_unregister_aead(&ccm_aes_alg);
386 }
387 
388 module_init(aes_mod_init);
389 module_exit(aes_mod_exit);
390 
391 MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
392 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
393 MODULE_LICENSE("GPL v2");
394 MODULE_ALIAS_CRYPTO("ccm(aes)");
395