1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21 
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45 
46 
47 #define AESNI_ALIGN	16
48 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50 
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
55 struct aesni_rfc4106_gcm_ctx {
56 	u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57 	struct crypto_aes_ctx aes_key_expanded
58 		__attribute__ ((__aligned__(AESNI_ALIGN)));
59 	u8 nonce[4];
60 };
61 
62 struct aesni_lrw_ctx {
63 	struct lrw_table_ctx lrw_table;
64 	u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
65 };
66 
67 struct aesni_xts_ctx {
68 	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
69 	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
70 };
71 
72 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
73 			     unsigned int key_len);
74 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
75 			  const u8 *in);
76 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
77 			  const u8 *in);
78 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
79 			      const u8 *in, unsigned int len);
80 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
81 			      const u8 *in, unsigned int len);
82 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
83 			      const u8 *in, unsigned int len, u8 *iv);
84 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
85 			      const u8 *in, unsigned int len, u8 *iv);
86 
87 int crypto_fpu_init(void);
88 void crypto_fpu_exit(void);
89 
90 #define AVX_GEN2_OPTSIZE 640
91 #define AVX_GEN4_OPTSIZE 4096
92 
93 #ifdef CONFIG_X86_64
94 
95 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
96 			      const u8 *in, unsigned int len, u8 *iv);
97 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
98 			      const u8 *in, unsigned int len, u8 *iv);
99 
100 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
101 				 const u8 *in, bool enc, u8 *iv);
102 
103 /* asmlinkage void aesni_gcm_enc()
104  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
105  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
106  * const u8 *in, Plaintext input
107  * unsigned long plaintext_len, Length of data in bytes for encryption.
108  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
109  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
110  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
111  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
112  * const u8 *aad, Additional Authentication Data (AAD)
113  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
114  *          is going to be 8 or 12 bytes
115  * u8 *auth_tag, Authenticated Tag output.
116  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
117  *          Valid values are 16 (most likely), 12 or 8.
118  */
119 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
120 			const u8 *in, unsigned long plaintext_len, u8 *iv,
121 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
122 			u8 *auth_tag, unsigned long auth_tag_len);
123 
124 /* asmlinkage void aesni_gcm_dec()
125  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
126  * u8 *out, Plaintext output. Decrypt in-place is allowed.
127  * const u8 *in, Ciphertext input
128  * unsigned long ciphertext_len, Length of data in bytes for decryption.
129  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
130  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
131  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
132  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
133  * const u8 *aad, Additional Authentication Data (AAD)
134  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
135  * to be 8 or 12 bytes
136  * u8 *auth_tag, Authenticated Tag output.
137  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
138  * Valid values are 16 (most likely), 12 or 8.
139  */
140 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
141 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
142 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
143 			u8 *auth_tag, unsigned long auth_tag_len);
144 
145 
146 #ifdef CONFIG_AS_AVX
147 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
148 		void *keys, u8 *out, unsigned int num_bytes);
149 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
150 		void *keys, u8 *out, unsigned int num_bytes);
151 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
152 		void *keys, u8 *out, unsigned int num_bytes);
153 /*
154  * asmlinkage void aesni_gcm_precomp_avx_gen2()
155  * gcm_data *my_ctx_data, context data
156  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
157  */
158 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
159 
160 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
161 			const u8 *in, unsigned long plaintext_len, u8 *iv,
162 			const u8 *aad, unsigned long aad_len,
163 			u8 *auth_tag, unsigned long auth_tag_len);
164 
165 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
166 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
167 			const u8 *aad, unsigned long aad_len,
168 			u8 *auth_tag, unsigned long auth_tag_len);
169 
170 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
171 			const u8 *in, unsigned long plaintext_len, u8 *iv,
172 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
173 			u8 *auth_tag, unsigned long auth_tag_len)
174 {
175         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
176 	if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
177 		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
178 				aad_len, auth_tag, auth_tag_len);
179 	} else {
180 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
181 		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
182 					aad_len, auth_tag, auth_tag_len);
183 	}
184 }
185 
186 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
187 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
188 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
189 			u8 *auth_tag, unsigned long auth_tag_len)
190 {
191         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
192 	if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
193 		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
194 				aad_len, auth_tag, auth_tag_len);
195 	} else {
196 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
197 		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
198 					aad_len, auth_tag, auth_tag_len);
199 	}
200 }
201 #endif
202 
203 #ifdef CONFIG_AS_AVX2
204 /*
205  * asmlinkage void aesni_gcm_precomp_avx_gen4()
206  * gcm_data *my_ctx_data, context data
207  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
208  */
209 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
210 
211 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
212 			const u8 *in, unsigned long plaintext_len, u8 *iv,
213 			const u8 *aad, unsigned long aad_len,
214 			u8 *auth_tag, unsigned long auth_tag_len);
215 
216 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
217 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
218 			const u8 *aad, unsigned long aad_len,
219 			u8 *auth_tag, unsigned long auth_tag_len);
220 
221 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
222 			const u8 *in, unsigned long plaintext_len, u8 *iv,
223 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
224 			u8 *auth_tag, unsigned long auth_tag_len)
225 {
226        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
227 	if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
228 		aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
229 				aad_len, auth_tag, auth_tag_len);
230 	} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
231 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
232 		aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
233 					aad_len, auth_tag, auth_tag_len);
234 	} else {
235 		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
236 		aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
237 					aad_len, auth_tag, auth_tag_len);
238 	}
239 }
240 
241 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
242 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
243 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
244 			u8 *auth_tag, unsigned long auth_tag_len)
245 {
246        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
247 	if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
248 		aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
249 				aad, aad_len, auth_tag, auth_tag_len);
250 	} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
251 		aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
252 		aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
253 					aad_len, auth_tag, auth_tag_len);
254 	} else {
255 		aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
256 		aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
257 					aad_len, auth_tag, auth_tag_len);
258 	}
259 }
260 #endif
261 
262 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
263 			const u8 *in, unsigned long plaintext_len, u8 *iv,
264 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
265 			u8 *auth_tag, unsigned long auth_tag_len);
266 
267 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
268 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
269 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
270 			u8 *auth_tag, unsigned long auth_tag_len);
271 
272 static inline struct
273 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
274 {
275 	unsigned long align = AESNI_ALIGN;
276 
277 	if (align <= crypto_tfm_ctx_alignment())
278 		align = 1;
279 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
280 }
281 #endif
282 
283 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
284 {
285 	unsigned long addr = (unsigned long)raw_ctx;
286 	unsigned long align = AESNI_ALIGN;
287 
288 	if (align <= crypto_tfm_ctx_alignment())
289 		align = 1;
290 	return (struct crypto_aes_ctx *)ALIGN(addr, align);
291 }
292 
293 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
294 			      const u8 *in_key, unsigned int key_len)
295 {
296 	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
297 	u32 *flags = &tfm->crt_flags;
298 	int err;
299 
300 	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
301 	    key_len != AES_KEYSIZE_256) {
302 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
303 		return -EINVAL;
304 	}
305 
306 	if (!irq_fpu_usable())
307 		err = crypto_aes_expand_key(ctx, in_key, key_len);
308 	else {
309 		kernel_fpu_begin();
310 		err = aesni_set_key(ctx, in_key, key_len);
311 		kernel_fpu_end();
312 	}
313 
314 	return err;
315 }
316 
317 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
318 		       unsigned int key_len)
319 {
320 	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
321 }
322 
323 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
324 {
325 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
326 
327 	if (!irq_fpu_usable())
328 		crypto_aes_encrypt_x86(ctx, dst, src);
329 	else {
330 		kernel_fpu_begin();
331 		aesni_enc(ctx, dst, src);
332 		kernel_fpu_end();
333 	}
334 }
335 
336 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
337 {
338 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
339 
340 	if (!irq_fpu_usable())
341 		crypto_aes_decrypt_x86(ctx, dst, src);
342 	else {
343 		kernel_fpu_begin();
344 		aesni_dec(ctx, dst, src);
345 		kernel_fpu_end();
346 	}
347 }
348 
349 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
350 {
351 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
352 
353 	aesni_enc(ctx, dst, src);
354 }
355 
356 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
357 {
358 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
359 
360 	aesni_dec(ctx, dst, src);
361 }
362 
363 static int ecb_encrypt(struct blkcipher_desc *desc,
364 		       struct scatterlist *dst, struct scatterlist *src,
365 		       unsigned int nbytes)
366 {
367 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
368 	struct blkcipher_walk walk;
369 	int err;
370 
371 	blkcipher_walk_init(&walk, dst, src, nbytes);
372 	err = blkcipher_walk_virt(desc, &walk);
373 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
374 
375 	kernel_fpu_begin();
376 	while ((nbytes = walk.nbytes)) {
377 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
378 			      nbytes & AES_BLOCK_MASK);
379 		nbytes &= AES_BLOCK_SIZE - 1;
380 		err = blkcipher_walk_done(desc, &walk, nbytes);
381 	}
382 	kernel_fpu_end();
383 
384 	return err;
385 }
386 
387 static int ecb_decrypt(struct blkcipher_desc *desc,
388 		       struct scatterlist *dst, struct scatterlist *src,
389 		       unsigned int nbytes)
390 {
391 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
392 	struct blkcipher_walk walk;
393 	int err;
394 
395 	blkcipher_walk_init(&walk, dst, src, nbytes);
396 	err = blkcipher_walk_virt(desc, &walk);
397 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
398 
399 	kernel_fpu_begin();
400 	while ((nbytes = walk.nbytes)) {
401 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
402 			      nbytes & AES_BLOCK_MASK);
403 		nbytes &= AES_BLOCK_SIZE - 1;
404 		err = blkcipher_walk_done(desc, &walk, nbytes);
405 	}
406 	kernel_fpu_end();
407 
408 	return err;
409 }
410 
411 static int cbc_encrypt(struct blkcipher_desc *desc,
412 		       struct scatterlist *dst, struct scatterlist *src,
413 		       unsigned int nbytes)
414 {
415 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
416 	struct blkcipher_walk walk;
417 	int err;
418 
419 	blkcipher_walk_init(&walk, dst, src, nbytes);
420 	err = blkcipher_walk_virt(desc, &walk);
421 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
422 
423 	kernel_fpu_begin();
424 	while ((nbytes = walk.nbytes)) {
425 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
426 			      nbytes & AES_BLOCK_MASK, walk.iv);
427 		nbytes &= AES_BLOCK_SIZE - 1;
428 		err = blkcipher_walk_done(desc, &walk, nbytes);
429 	}
430 	kernel_fpu_end();
431 
432 	return err;
433 }
434 
435 static int cbc_decrypt(struct blkcipher_desc *desc,
436 		       struct scatterlist *dst, struct scatterlist *src,
437 		       unsigned int nbytes)
438 {
439 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
440 	struct blkcipher_walk walk;
441 	int err;
442 
443 	blkcipher_walk_init(&walk, dst, src, nbytes);
444 	err = blkcipher_walk_virt(desc, &walk);
445 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
446 
447 	kernel_fpu_begin();
448 	while ((nbytes = walk.nbytes)) {
449 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
450 			      nbytes & AES_BLOCK_MASK, walk.iv);
451 		nbytes &= AES_BLOCK_SIZE - 1;
452 		err = blkcipher_walk_done(desc, &walk, nbytes);
453 	}
454 	kernel_fpu_end();
455 
456 	return err;
457 }
458 
459 #ifdef CONFIG_X86_64
460 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
461 			    struct blkcipher_walk *walk)
462 {
463 	u8 *ctrblk = walk->iv;
464 	u8 keystream[AES_BLOCK_SIZE];
465 	u8 *src = walk->src.virt.addr;
466 	u8 *dst = walk->dst.virt.addr;
467 	unsigned int nbytes = walk->nbytes;
468 
469 	aesni_enc(ctx, keystream, ctrblk);
470 	crypto_xor(keystream, src, nbytes);
471 	memcpy(dst, keystream, nbytes);
472 	crypto_inc(ctrblk, AES_BLOCK_SIZE);
473 }
474 
475 #ifdef CONFIG_AS_AVX
476 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
477 			      const u8 *in, unsigned int len, u8 *iv)
478 {
479 	/*
480 	 * based on key length, override with the by8 version
481 	 * of ctr mode encryption/decryption for improved performance
482 	 * aes_set_key_common() ensures that key length is one of
483 	 * {128,192,256}
484 	 */
485 	if (ctx->key_length == AES_KEYSIZE_128)
486 		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
487 	else if (ctx->key_length == AES_KEYSIZE_192)
488 		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
489 	else
490 		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
491 }
492 #endif
493 
494 static int ctr_crypt(struct blkcipher_desc *desc,
495 		     struct scatterlist *dst, struct scatterlist *src,
496 		     unsigned int nbytes)
497 {
498 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
499 	struct blkcipher_walk walk;
500 	int err;
501 
502 	blkcipher_walk_init(&walk, dst, src, nbytes);
503 	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
504 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
505 
506 	kernel_fpu_begin();
507 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
508 		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
509 			              nbytes & AES_BLOCK_MASK, walk.iv);
510 		nbytes &= AES_BLOCK_SIZE - 1;
511 		err = blkcipher_walk_done(desc, &walk, nbytes);
512 	}
513 	if (walk.nbytes) {
514 		ctr_crypt_final(ctx, &walk);
515 		err = blkcipher_walk_done(desc, &walk, 0);
516 	}
517 	kernel_fpu_end();
518 
519 	return err;
520 }
521 #endif
522 
523 static int ablk_ecb_init(struct crypto_tfm *tfm)
524 {
525 	return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
526 }
527 
528 static int ablk_cbc_init(struct crypto_tfm *tfm)
529 {
530 	return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
531 }
532 
533 #ifdef CONFIG_X86_64
534 static int ablk_ctr_init(struct crypto_tfm *tfm)
535 {
536 	return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
537 }
538 
539 #endif
540 
541 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
542 static int ablk_pcbc_init(struct crypto_tfm *tfm)
543 {
544 	return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
545 }
546 #endif
547 
548 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
549 {
550 	aesni_ecb_enc(ctx, blks, blks, nbytes);
551 }
552 
553 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
554 {
555 	aesni_ecb_dec(ctx, blks, blks, nbytes);
556 }
557 
558 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
559 			    unsigned int keylen)
560 {
561 	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
562 	int err;
563 
564 	err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
565 				 keylen - AES_BLOCK_SIZE);
566 	if (err)
567 		return err;
568 
569 	return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
570 }
571 
572 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
573 {
574 	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
575 
576 	lrw_free_table(&ctx->lrw_table);
577 }
578 
579 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
580 		       struct scatterlist *src, unsigned int nbytes)
581 {
582 	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
583 	be128 buf[8];
584 	struct lrw_crypt_req req = {
585 		.tbuf = buf,
586 		.tbuflen = sizeof(buf),
587 
588 		.table_ctx = &ctx->lrw_table,
589 		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
590 		.crypt_fn = lrw_xts_encrypt_callback,
591 	};
592 	int ret;
593 
594 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
595 
596 	kernel_fpu_begin();
597 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
598 	kernel_fpu_end();
599 
600 	return ret;
601 }
602 
603 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
604 		       struct scatterlist *src, unsigned int nbytes)
605 {
606 	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
607 	be128 buf[8];
608 	struct lrw_crypt_req req = {
609 		.tbuf = buf,
610 		.tbuflen = sizeof(buf),
611 
612 		.table_ctx = &ctx->lrw_table,
613 		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
614 		.crypt_fn = lrw_xts_decrypt_callback,
615 	};
616 	int ret;
617 
618 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
619 
620 	kernel_fpu_begin();
621 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
622 	kernel_fpu_end();
623 
624 	return ret;
625 }
626 
627 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
628 			    unsigned int keylen)
629 {
630 	struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
631 	int err;
632 
633 	err = xts_check_key(tfm, key, keylen);
634 	if (err)
635 		return err;
636 
637 	/* first half of xts-key is for crypt */
638 	err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
639 	if (err)
640 		return err;
641 
642 	/* second half of xts-key is for tweak */
643 	return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
644 				  keylen / 2);
645 }
646 
647 
648 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
649 {
650 	aesni_enc(ctx, out, in);
651 }
652 
653 #ifdef CONFIG_X86_64
654 
655 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
656 {
657 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
658 }
659 
660 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
661 {
662 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
663 }
664 
665 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
666 {
667 	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
668 }
669 
670 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
671 {
672 	aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
673 }
674 
675 static const struct common_glue_ctx aesni_enc_xts = {
676 	.num_funcs = 2,
677 	.fpu_blocks_limit = 1,
678 
679 	.funcs = { {
680 		.num_blocks = 8,
681 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
682 	}, {
683 		.num_blocks = 1,
684 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
685 	} }
686 };
687 
688 static const struct common_glue_ctx aesni_dec_xts = {
689 	.num_funcs = 2,
690 	.fpu_blocks_limit = 1,
691 
692 	.funcs = { {
693 		.num_blocks = 8,
694 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
695 	}, {
696 		.num_blocks = 1,
697 		.fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
698 	} }
699 };
700 
701 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
702 		       struct scatterlist *src, unsigned int nbytes)
703 {
704 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
705 
706 	return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
707 				     XTS_TWEAK_CAST(aesni_xts_tweak),
708 				     aes_ctx(ctx->raw_tweak_ctx),
709 				     aes_ctx(ctx->raw_crypt_ctx));
710 }
711 
712 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
713 		       struct scatterlist *src, unsigned int nbytes)
714 {
715 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
716 
717 	return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
718 				     XTS_TWEAK_CAST(aesni_xts_tweak),
719 				     aes_ctx(ctx->raw_tweak_ctx),
720 				     aes_ctx(ctx->raw_crypt_ctx));
721 }
722 
723 #else
724 
725 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
726 		       struct scatterlist *src, unsigned int nbytes)
727 {
728 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
729 	be128 buf[8];
730 	struct xts_crypt_req req = {
731 		.tbuf = buf,
732 		.tbuflen = sizeof(buf),
733 
734 		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
735 		.tweak_fn = aesni_xts_tweak,
736 		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
737 		.crypt_fn = lrw_xts_encrypt_callback,
738 	};
739 	int ret;
740 
741 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
742 
743 	kernel_fpu_begin();
744 	ret = xts_crypt(desc, dst, src, nbytes, &req);
745 	kernel_fpu_end();
746 
747 	return ret;
748 }
749 
750 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
751 		       struct scatterlist *src, unsigned int nbytes)
752 {
753 	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
754 	be128 buf[8];
755 	struct xts_crypt_req req = {
756 		.tbuf = buf,
757 		.tbuflen = sizeof(buf),
758 
759 		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
760 		.tweak_fn = aesni_xts_tweak,
761 		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
762 		.crypt_fn = lrw_xts_decrypt_callback,
763 	};
764 	int ret;
765 
766 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
767 
768 	kernel_fpu_begin();
769 	ret = xts_crypt(desc, dst, src, nbytes, &req);
770 	kernel_fpu_end();
771 
772 	return ret;
773 }
774 
775 #endif
776 
777 #ifdef CONFIG_X86_64
778 static int rfc4106_init(struct crypto_aead *aead)
779 {
780 	struct cryptd_aead *cryptd_tfm;
781 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
782 
783 	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
784 				       CRYPTO_ALG_INTERNAL,
785 				       CRYPTO_ALG_INTERNAL);
786 	if (IS_ERR(cryptd_tfm))
787 		return PTR_ERR(cryptd_tfm);
788 
789 	*ctx = cryptd_tfm;
790 	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
791 	return 0;
792 }
793 
794 static void rfc4106_exit(struct crypto_aead *aead)
795 {
796 	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
797 
798 	cryptd_free_aead(*ctx);
799 }
800 
801 static int
802 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
803 {
804 	struct crypto_cipher *tfm;
805 	int ret;
806 
807 	tfm = crypto_alloc_cipher("aes", 0, 0);
808 	if (IS_ERR(tfm))
809 		return PTR_ERR(tfm);
810 
811 	ret = crypto_cipher_setkey(tfm, key, key_len);
812 	if (ret)
813 		goto out_free_cipher;
814 
815 	/* Clear the data in the hash sub key container to zero.*/
816 	/* We want to cipher all zeros to create the hash sub key. */
817 	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
818 
819 	crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
820 
821 out_free_cipher:
822 	crypto_free_cipher(tfm);
823 	return ret;
824 }
825 
826 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
827 				  unsigned int key_len)
828 {
829 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
830 
831 	if (key_len < 4) {
832 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
833 		return -EINVAL;
834 	}
835 	/*Account for 4 byte nonce at the end.*/
836 	key_len -= 4;
837 
838 	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
839 
840 	return aes_set_key_common(crypto_aead_tfm(aead),
841 				  &ctx->aes_key_expanded, key, key_len) ?:
842 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
843 }
844 
845 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
846 			   unsigned int key_len)
847 {
848 	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
849 	struct cryptd_aead *cryptd_tfm = *ctx;
850 
851 	return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
852 }
853 
854 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
855 				       unsigned int authsize)
856 {
857 	switch (authsize) {
858 	case 8:
859 	case 12:
860 	case 16:
861 		break;
862 	default:
863 		return -EINVAL;
864 	}
865 
866 	return 0;
867 }
868 
869 /* This is the Integrity Check Value (aka the authentication tag length and can
870  * be 8, 12 or 16 bytes long. */
871 static int rfc4106_set_authsize(struct crypto_aead *parent,
872 				unsigned int authsize)
873 {
874 	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
875 	struct cryptd_aead *cryptd_tfm = *ctx;
876 
877 	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
878 }
879 
880 static int helper_rfc4106_encrypt(struct aead_request *req)
881 {
882 	u8 one_entry_in_sg = 0;
883 	u8 *src, *dst, *assoc;
884 	__be32 counter = cpu_to_be32(1);
885 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
886 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
887 	void *aes_ctx = &(ctx->aes_key_expanded);
888 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
889 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
890 	struct scatter_walk src_sg_walk;
891 	struct scatter_walk dst_sg_walk;
892 	unsigned int i;
893 
894 	/* Assuming we are supporting rfc4106 64-bit extended */
895 	/* sequence numbers We need to have the AAD length equal */
896 	/* to 16 or 20 bytes */
897 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
898 		return -EINVAL;
899 
900 	/* IV below built */
901 	for (i = 0; i < 4; i++)
902 		*(iv+i) = ctx->nonce[i];
903 	for (i = 0; i < 8; i++)
904 		*(iv+4+i) = req->iv[i];
905 	*((__be32 *)(iv+12)) = counter;
906 
907 	if (sg_is_last(req->src) &&
908 	    req->src->offset + req->src->length <= PAGE_SIZE &&
909 	    sg_is_last(req->dst) &&
910 	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
911 		one_entry_in_sg = 1;
912 		scatterwalk_start(&src_sg_walk, req->src);
913 		assoc = scatterwalk_map(&src_sg_walk);
914 		src = assoc + req->assoclen;
915 		dst = src;
916 		if (unlikely(req->src != req->dst)) {
917 			scatterwalk_start(&dst_sg_walk, req->dst);
918 			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
919 		}
920 	} else {
921 		/* Allocate memory for src, dst, assoc */
922 		assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
923 			GFP_ATOMIC);
924 		if (unlikely(!assoc))
925 			return -ENOMEM;
926 		scatterwalk_map_and_copy(assoc, req->src, 0,
927 					 req->assoclen + req->cryptlen, 0);
928 		src = assoc + req->assoclen;
929 		dst = src;
930 	}
931 
932 	kernel_fpu_begin();
933 	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
934 			  ctx->hash_subkey, assoc, req->assoclen - 8,
935 			  dst + req->cryptlen, auth_tag_len);
936 	kernel_fpu_end();
937 
938 	/* The authTag (aka the Integrity Check Value) needs to be written
939 	 * back to the packet. */
940 	if (one_entry_in_sg) {
941 		if (unlikely(req->src != req->dst)) {
942 			scatterwalk_unmap(dst - req->assoclen);
943 			scatterwalk_advance(&dst_sg_walk, req->dst->length);
944 			scatterwalk_done(&dst_sg_walk, 1, 0);
945 		}
946 		scatterwalk_unmap(assoc);
947 		scatterwalk_advance(&src_sg_walk, req->src->length);
948 		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
949 	} else {
950 		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
951 					 req->cryptlen + auth_tag_len, 1);
952 		kfree(assoc);
953 	}
954 	return 0;
955 }
956 
957 static int helper_rfc4106_decrypt(struct aead_request *req)
958 {
959 	u8 one_entry_in_sg = 0;
960 	u8 *src, *dst, *assoc;
961 	unsigned long tempCipherLen = 0;
962 	__be32 counter = cpu_to_be32(1);
963 	int retval = 0;
964 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
965 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
966 	void *aes_ctx = &(ctx->aes_key_expanded);
967 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
968 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
969 	u8 authTag[16];
970 	struct scatter_walk src_sg_walk;
971 	struct scatter_walk dst_sg_walk;
972 	unsigned int i;
973 
974 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
975 		return -EINVAL;
976 
977 	/* Assuming we are supporting rfc4106 64-bit extended */
978 	/* sequence numbers We need to have the AAD length */
979 	/* equal to 16 or 20 bytes */
980 
981 	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
982 	/* IV below built */
983 	for (i = 0; i < 4; i++)
984 		*(iv+i) = ctx->nonce[i];
985 	for (i = 0; i < 8; i++)
986 		*(iv+4+i) = req->iv[i];
987 	*((__be32 *)(iv+12)) = counter;
988 
989 	if (sg_is_last(req->src) &&
990 	    req->src->offset + req->src->length <= PAGE_SIZE &&
991 	    sg_is_last(req->dst) &&
992 	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
993 		one_entry_in_sg = 1;
994 		scatterwalk_start(&src_sg_walk, req->src);
995 		assoc = scatterwalk_map(&src_sg_walk);
996 		src = assoc + req->assoclen;
997 		dst = src;
998 		if (unlikely(req->src != req->dst)) {
999 			scatterwalk_start(&dst_sg_walk, req->dst);
1000 			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1001 		}
1002 
1003 	} else {
1004 		/* Allocate memory for src, dst, assoc */
1005 		assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1006 		if (!assoc)
1007 			return -ENOMEM;
1008 		scatterwalk_map_and_copy(assoc, req->src, 0,
1009 					 req->assoclen + req->cryptlen, 0);
1010 		src = assoc + req->assoclen;
1011 		dst = src;
1012 	}
1013 
1014 	kernel_fpu_begin();
1015 	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1016 			  ctx->hash_subkey, assoc, req->assoclen - 8,
1017 			  authTag, auth_tag_len);
1018 	kernel_fpu_end();
1019 
1020 	/* Compare generated tag with passed in tag. */
1021 	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1022 		-EBADMSG : 0;
1023 
1024 	if (one_entry_in_sg) {
1025 		if (unlikely(req->src != req->dst)) {
1026 			scatterwalk_unmap(dst - req->assoclen);
1027 			scatterwalk_advance(&dst_sg_walk, req->dst->length);
1028 			scatterwalk_done(&dst_sg_walk, 1, 0);
1029 		}
1030 		scatterwalk_unmap(assoc);
1031 		scatterwalk_advance(&src_sg_walk, req->src->length);
1032 		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1033 	} else {
1034 		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1035 					 tempCipherLen, 1);
1036 		kfree(assoc);
1037 	}
1038 	return retval;
1039 }
1040 
1041 static int rfc4106_encrypt(struct aead_request *req)
1042 {
1043 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1044 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1045 	struct cryptd_aead *cryptd_tfm = *ctx;
1046 
1047 	tfm = &cryptd_tfm->base;
1048 	if (irq_fpu_usable() && (!in_atomic() ||
1049 				 !cryptd_aead_queued(cryptd_tfm)))
1050 		tfm = cryptd_aead_child(cryptd_tfm);
1051 
1052 	aead_request_set_tfm(req, tfm);
1053 
1054 	return crypto_aead_encrypt(req);
1055 }
1056 
1057 static int rfc4106_decrypt(struct aead_request *req)
1058 {
1059 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1060 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1061 	struct cryptd_aead *cryptd_tfm = *ctx;
1062 
1063 	tfm = &cryptd_tfm->base;
1064 	if (irq_fpu_usable() && (!in_atomic() ||
1065 				 !cryptd_aead_queued(cryptd_tfm)))
1066 		tfm = cryptd_aead_child(cryptd_tfm);
1067 
1068 	aead_request_set_tfm(req, tfm);
1069 
1070 	return crypto_aead_decrypt(req);
1071 }
1072 #endif
1073 
1074 static struct crypto_alg aesni_algs[] = { {
1075 	.cra_name		= "aes",
1076 	.cra_driver_name	= "aes-aesni",
1077 	.cra_priority		= 300,
1078 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1079 	.cra_blocksize		= AES_BLOCK_SIZE,
1080 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1081 				  AESNI_ALIGN - 1,
1082 	.cra_alignmask		= 0,
1083 	.cra_module		= THIS_MODULE,
1084 	.cra_u	= {
1085 		.cipher	= {
1086 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1087 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1088 			.cia_setkey		= aes_set_key,
1089 			.cia_encrypt		= aes_encrypt,
1090 			.cia_decrypt		= aes_decrypt
1091 		}
1092 	}
1093 }, {
1094 	.cra_name		= "__aes-aesni",
1095 	.cra_driver_name	= "__driver-aes-aesni",
1096 	.cra_priority		= 0,
1097 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1098 	.cra_blocksize		= AES_BLOCK_SIZE,
1099 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1100 				  AESNI_ALIGN - 1,
1101 	.cra_alignmask		= 0,
1102 	.cra_module		= THIS_MODULE,
1103 	.cra_u	= {
1104 		.cipher	= {
1105 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1106 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1107 			.cia_setkey		= aes_set_key,
1108 			.cia_encrypt		= __aes_encrypt,
1109 			.cia_decrypt		= __aes_decrypt
1110 		}
1111 	}
1112 }, {
1113 	.cra_name		= "__ecb-aes-aesni",
1114 	.cra_driver_name	= "__driver-ecb-aes-aesni",
1115 	.cra_priority		= 0,
1116 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1117 				  CRYPTO_ALG_INTERNAL,
1118 	.cra_blocksize		= AES_BLOCK_SIZE,
1119 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1120 				  AESNI_ALIGN - 1,
1121 	.cra_alignmask		= 0,
1122 	.cra_type		= &crypto_blkcipher_type,
1123 	.cra_module		= THIS_MODULE,
1124 	.cra_u = {
1125 		.blkcipher = {
1126 			.min_keysize	= AES_MIN_KEY_SIZE,
1127 			.max_keysize	= AES_MAX_KEY_SIZE,
1128 			.setkey		= aes_set_key,
1129 			.encrypt	= ecb_encrypt,
1130 			.decrypt	= ecb_decrypt,
1131 		},
1132 	},
1133 }, {
1134 	.cra_name		= "__cbc-aes-aesni",
1135 	.cra_driver_name	= "__driver-cbc-aes-aesni",
1136 	.cra_priority		= 0,
1137 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1138 				  CRYPTO_ALG_INTERNAL,
1139 	.cra_blocksize		= AES_BLOCK_SIZE,
1140 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1141 				  AESNI_ALIGN - 1,
1142 	.cra_alignmask		= 0,
1143 	.cra_type		= &crypto_blkcipher_type,
1144 	.cra_module		= THIS_MODULE,
1145 	.cra_u = {
1146 		.blkcipher = {
1147 			.min_keysize	= AES_MIN_KEY_SIZE,
1148 			.max_keysize	= AES_MAX_KEY_SIZE,
1149 			.setkey		= aes_set_key,
1150 			.encrypt	= cbc_encrypt,
1151 			.decrypt	= cbc_decrypt,
1152 		},
1153 	},
1154 }, {
1155 	.cra_name		= "ecb(aes)",
1156 	.cra_driver_name	= "ecb-aes-aesni",
1157 	.cra_priority		= 400,
1158 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1159 	.cra_blocksize		= AES_BLOCK_SIZE,
1160 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1161 	.cra_alignmask		= 0,
1162 	.cra_type		= &crypto_ablkcipher_type,
1163 	.cra_module		= THIS_MODULE,
1164 	.cra_init		= ablk_ecb_init,
1165 	.cra_exit		= ablk_exit,
1166 	.cra_u = {
1167 		.ablkcipher = {
1168 			.min_keysize	= AES_MIN_KEY_SIZE,
1169 			.max_keysize	= AES_MAX_KEY_SIZE,
1170 			.setkey		= ablk_set_key,
1171 			.encrypt	= ablk_encrypt,
1172 			.decrypt	= ablk_decrypt,
1173 		},
1174 	},
1175 }, {
1176 	.cra_name		= "cbc(aes)",
1177 	.cra_driver_name	= "cbc-aes-aesni",
1178 	.cra_priority		= 400,
1179 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1180 	.cra_blocksize		= AES_BLOCK_SIZE,
1181 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1182 	.cra_alignmask		= 0,
1183 	.cra_type		= &crypto_ablkcipher_type,
1184 	.cra_module		= THIS_MODULE,
1185 	.cra_init		= ablk_cbc_init,
1186 	.cra_exit		= ablk_exit,
1187 	.cra_u = {
1188 		.ablkcipher = {
1189 			.min_keysize	= AES_MIN_KEY_SIZE,
1190 			.max_keysize	= AES_MAX_KEY_SIZE,
1191 			.ivsize		= AES_BLOCK_SIZE,
1192 			.setkey		= ablk_set_key,
1193 			.encrypt	= ablk_encrypt,
1194 			.decrypt	= ablk_decrypt,
1195 		},
1196 	},
1197 #ifdef CONFIG_X86_64
1198 }, {
1199 	.cra_name		= "__ctr-aes-aesni",
1200 	.cra_driver_name	= "__driver-ctr-aes-aesni",
1201 	.cra_priority		= 0,
1202 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1203 				  CRYPTO_ALG_INTERNAL,
1204 	.cra_blocksize		= 1,
1205 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
1206 				  AESNI_ALIGN - 1,
1207 	.cra_alignmask		= 0,
1208 	.cra_type		= &crypto_blkcipher_type,
1209 	.cra_module		= THIS_MODULE,
1210 	.cra_u = {
1211 		.blkcipher = {
1212 			.min_keysize	= AES_MIN_KEY_SIZE,
1213 			.max_keysize	= AES_MAX_KEY_SIZE,
1214 			.ivsize		= AES_BLOCK_SIZE,
1215 			.setkey		= aes_set_key,
1216 			.encrypt	= ctr_crypt,
1217 			.decrypt	= ctr_crypt,
1218 		},
1219 	},
1220 }, {
1221 	.cra_name		= "ctr(aes)",
1222 	.cra_driver_name	= "ctr-aes-aesni",
1223 	.cra_priority		= 400,
1224 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1225 	.cra_blocksize		= 1,
1226 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1227 	.cra_alignmask		= 0,
1228 	.cra_type		= &crypto_ablkcipher_type,
1229 	.cra_module		= THIS_MODULE,
1230 	.cra_init		= ablk_ctr_init,
1231 	.cra_exit		= ablk_exit,
1232 	.cra_u = {
1233 		.ablkcipher = {
1234 			.min_keysize	= AES_MIN_KEY_SIZE,
1235 			.max_keysize	= AES_MAX_KEY_SIZE,
1236 			.ivsize		= AES_BLOCK_SIZE,
1237 			.setkey		= ablk_set_key,
1238 			.encrypt	= ablk_encrypt,
1239 			.decrypt	= ablk_encrypt,
1240 			.geniv		= "chainiv",
1241 		},
1242 	},
1243 #endif
1244 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1245 }, {
1246 	.cra_name		= "pcbc(aes)",
1247 	.cra_driver_name	= "pcbc-aes-aesni",
1248 	.cra_priority		= 400,
1249 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1250 	.cra_blocksize		= AES_BLOCK_SIZE,
1251 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1252 	.cra_alignmask		= 0,
1253 	.cra_type		= &crypto_ablkcipher_type,
1254 	.cra_module		= THIS_MODULE,
1255 	.cra_init		= ablk_pcbc_init,
1256 	.cra_exit		= ablk_exit,
1257 	.cra_u = {
1258 		.ablkcipher = {
1259 			.min_keysize	= AES_MIN_KEY_SIZE,
1260 			.max_keysize	= AES_MAX_KEY_SIZE,
1261 			.ivsize		= AES_BLOCK_SIZE,
1262 			.setkey		= ablk_set_key,
1263 			.encrypt	= ablk_encrypt,
1264 			.decrypt	= ablk_decrypt,
1265 		},
1266 	},
1267 #endif
1268 }, {
1269 	.cra_name		= "__lrw-aes-aesni",
1270 	.cra_driver_name	= "__driver-lrw-aes-aesni",
1271 	.cra_priority		= 0,
1272 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1273 				  CRYPTO_ALG_INTERNAL,
1274 	.cra_blocksize		= AES_BLOCK_SIZE,
1275 	.cra_ctxsize		= sizeof(struct aesni_lrw_ctx),
1276 	.cra_alignmask		= 0,
1277 	.cra_type		= &crypto_blkcipher_type,
1278 	.cra_module		= THIS_MODULE,
1279 	.cra_exit		= lrw_aesni_exit_tfm,
1280 	.cra_u = {
1281 		.blkcipher = {
1282 			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1283 			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1284 			.ivsize		= AES_BLOCK_SIZE,
1285 			.setkey		= lrw_aesni_setkey,
1286 			.encrypt	= lrw_encrypt,
1287 			.decrypt	= lrw_decrypt,
1288 		},
1289 	},
1290 }, {
1291 	.cra_name		= "__xts-aes-aesni",
1292 	.cra_driver_name	= "__driver-xts-aes-aesni",
1293 	.cra_priority		= 0,
1294 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1295 				  CRYPTO_ALG_INTERNAL,
1296 	.cra_blocksize		= AES_BLOCK_SIZE,
1297 	.cra_ctxsize		= sizeof(struct aesni_xts_ctx),
1298 	.cra_alignmask		= 0,
1299 	.cra_type		= &crypto_blkcipher_type,
1300 	.cra_module		= THIS_MODULE,
1301 	.cra_u = {
1302 		.blkcipher = {
1303 			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1304 			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1305 			.ivsize		= AES_BLOCK_SIZE,
1306 			.setkey		= xts_aesni_setkey,
1307 			.encrypt	= xts_encrypt,
1308 			.decrypt	= xts_decrypt,
1309 		},
1310 	},
1311 }, {
1312 	.cra_name		= "lrw(aes)",
1313 	.cra_driver_name	= "lrw-aes-aesni",
1314 	.cra_priority		= 400,
1315 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1316 	.cra_blocksize		= AES_BLOCK_SIZE,
1317 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1318 	.cra_alignmask		= 0,
1319 	.cra_type		= &crypto_ablkcipher_type,
1320 	.cra_module		= THIS_MODULE,
1321 	.cra_init		= ablk_init,
1322 	.cra_exit		= ablk_exit,
1323 	.cra_u = {
1324 		.ablkcipher = {
1325 			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1326 			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1327 			.ivsize		= AES_BLOCK_SIZE,
1328 			.setkey		= ablk_set_key,
1329 			.encrypt	= ablk_encrypt,
1330 			.decrypt	= ablk_decrypt,
1331 		},
1332 	},
1333 }, {
1334 	.cra_name		= "xts(aes)",
1335 	.cra_driver_name	= "xts-aes-aesni",
1336 	.cra_priority		= 400,
1337 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1338 	.cra_blocksize		= AES_BLOCK_SIZE,
1339 	.cra_ctxsize		= sizeof(struct async_helper_ctx),
1340 	.cra_alignmask		= 0,
1341 	.cra_type		= &crypto_ablkcipher_type,
1342 	.cra_module		= THIS_MODULE,
1343 	.cra_init		= ablk_init,
1344 	.cra_exit		= ablk_exit,
1345 	.cra_u = {
1346 		.ablkcipher = {
1347 			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1348 			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1349 			.ivsize		= AES_BLOCK_SIZE,
1350 			.setkey		= ablk_set_key,
1351 			.encrypt	= ablk_encrypt,
1352 			.decrypt	= ablk_decrypt,
1353 		},
1354 	},
1355 } };
1356 
1357 #ifdef CONFIG_X86_64
1358 static struct aead_alg aesni_aead_algs[] = { {
1359 	.setkey			= common_rfc4106_set_key,
1360 	.setauthsize		= common_rfc4106_set_authsize,
1361 	.encrypt		= helper_rfc4106_encrypt,
1362 	.decrypt		= helper_rfc4106_decrypt,
1363 	.ivsize			= 8,
1364 	.maxauthsize		= 16,
1365 	.base = {
1366 		.cra_name		= "__gcm-aes-aesni",
1367 		.cra_driver_name	= "__driver-gcm-aes-aesni",
1368 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1369 		.cra_blocksize		= 1,
1370 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1371 		.cra_alignmask		= AESNI_ALIGN - 1,
1372 		.cra_module		= THIS_MODULE,
1373 	},
1374 }, {
1375 	.init			= rfc4106_init,
1376 	.exit			= rfc4106_exit,
1377 	.setkey			= rfc4106_set_key,
1378 	.setauthsize		= rfc4106_set_authsize,
1379 	.encrypt		= rfc4106_encrypt,
1380 	.decrypt		= rfc4106_decrypt,
1381 	.ivsize			= 8,
1382 	.maxauthsize		= 16,
1383 	.base = {
1384 		.cra_name		= "rfc4106(gcm(aes))",
1385 		.cra_driver_name	= "rfc4106-gcm-aesni",
1386 		.cra_priority		= 400,
1387 		.cra_flags		= CRYPTO_ALG_ASYNC,
1388 		.cra_blocksize		= 1,
1389 		.cra_ctxsize		= sizeof(struct cryptd_aead *),
1390 		.cra_module		= THIS_MODULE,
1391 	},
1392 } };
1393 #else
1394 static struct aead_alg aesni_aead_algs[0];
1395 #endif
1396 
1397 
1398 static const struct x86_cpu_id aesni_cpu_id[] = {
1399 	X86_FEATURE_MATCH(X86_FEATURE_AES),
1400 	{}
1401 };
1402 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1403 
1404 static int __init aesni_init(void)
1405 {
1406 	int err;
1407 
1408 	if (!x86_match_cpu(aesni_cpu_id))
1409 		return -ENODEV;
1410 #ifdef CONFIG_X86_64
1411 #ifdef CONFIG_AS_AVX2
1412 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1413 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1414 		aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1415 		aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1416 	} else
1417 #endif
1418 #ifdef CONFIG_AS_AVX
1419 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1420 		pr_info("AVX version of gcm_enc/dec engaged.\n");
1421 		aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1422 		aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1423 	} else
1424 #endif
1425 	{
1426 		pr_info("SSE version of gcm_enc/dec engaged.\n");
1427 		aesni_gcm_enc_tfm = aesni_gcm_enc;
1428 		aesni_gcm_dec_tfm = aesni_gcm_dec;
1429 	}
1430 	aesni_ctr_enc_tfm = aesni_ctr_enc;
1431 #ifdef CONFIG_AS_AVX
1432 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1433 		/* optimize performance of ctr mode encryption transform */
1434 		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1435 		pr_info("AES CTR mode by8 optimization enabled\n");
1436 	}
1437 #endif
1438 #endif
1439 
1440 	err = crypto_fpu_init();
1441 	if (err)
1442 		return err;
1443 
1444 	err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1445 	if (err)
1446 		goto fpu_exit;
1447 
1448 	err = crypto_register_aeads(aesni_aead_algs,
1449 				    ARRAY_SIZE(aesni_aead_algs));
1450 	if (err)
1451 		goto unregister_algs;
1452 
1453 	return err;
1454 
1455 unregister_algs:
1456 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1457 fpu_exit:
1458 	crypto_fpu_exit();
1459 	return err;
1460 }
1461 
1462 static void __exit aesni_exit(void)
1463 {
1464 	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1465 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1466 
1467 	crypto_fpu_exit();
1468 }
1469 
1470 late_initcall(aesni_init);
1471 module_exit(aesni_exit);
1472 
1473 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1474 MODULE_LICENSE("GPL");
1475 MODULE_ALIAS_CRYPTO("aes");
1476