1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17 
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36 #ifdef CONFIG_X86_64
37 #include <asm/crypto/glue_helper.h>
38 #endif
39 
40 
41 #define AESNI_ALIGN	16
42 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
43 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
44 #define RFC4106_HASH_SUBKEY_SIZE 16
45 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
46 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
47 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
48 
49 /* This data is stored at the end of the crypto_tfm struct.
50  * It's a type of per "session" data storage location.
51  * This needs to be 16 byte aligned.
52  */
53 struct aesni_rfc4106_gcm_ctx {
54 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
55 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
56 	u8 nonce[4];
57 };
58 
59 struct generic_gcmaes_ctx {
60 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62 };
63 
64 struct aesni_xts_ctx {
65 	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66 	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
67 };
68 
69 #define GCM_BLOCK_LEN 16
70 
71 struct gcm_context_data {
72 	/* init, update and finalize context data */
73 	u8 aad_hash[GCM_BLOCK_LEN];
74 	u64 aad_length;
75 	u64 in_length;
76 	u8 partial_block_enc_key[GCM_BLOCK_LEN];
77 	u8 orig_IV[GCM_BLOCK_LEN];
78 	u8 current_counter[GCM_BLOCK_LEN];
79 	u64 partial_block_len;
80 	u64 unused;
81 	u8 hash_keys[GCM_BLOCK_LEN * 16];
82 };
83 
84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85 			     unsigned int key_len);
86 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
87 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
88 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 			      const u8 *in, unsigned int len);
90 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 			      const u8 *in, unsigned int len);
92 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 			      const u8 *in, unsigned int len, u8 *iv);
94 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 			      const u8 *in, unsigned int len, u8 *iv);
96 
97 #define AVX_GEN2_OPTSIZE 640
98 #define AVX_GEN4_OPTSIZE 4096
99 
100 #ifdef CONFIG_X86_64
101 
102 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
103 			      const u8 *in, unsigned int len, u8 *iv);
104 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
105 			      const u8 *in, unsigned int len, u8 *iv);
106 
107 asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
108 				 const u8 *in, bool enc, le128 *iv);
109 
110 /* asmlinkage void aesni_gcm_enc()
111  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
112  * struct gcm_context_data.  May be uninitialized.
113  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
114  * const u8 *in, Plaintext input
115  * unsigned long plaintext_len, Length of data in bytes for encryption.
116  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
117  *         16-byte aligned pointer.
118  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
119  * const u8 *aad, Additional Authentication Data (AAD)
120  * unsigned long aad_len, Length of AAD in bytes.
121  * u8 *auth_tag, Authenticated Tag output.
122  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
123  *          Valid values are 16 (most likely), 12 or 8.
124  */
125 asmlinkage void aesni_gcm_enc(void *ctx,
126 			struct gcm_context_data *gdata, u8 *out,
127 			const u8 *in, unsigned long plaintext_len, u8 *iv,
128 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
129 			u8 *auth_tag, unsigned long auth_tag_len);
130 
131 /* asmlinkage void aesni_gcm_dec()
132  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
133  * struct gcm_context_data.  May be uninitialized.
134  * u8 *out, Plaintext output. Decrypt in-place is allowed.
135  * const u8 *in, Ciphertext input
136  * unsigned long ciphertext_len, Length of data in bytes for decryption.
137  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
138  *         16-byte aligned pointer.
139  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
140  * const u8 *aad, Additional Authentication Data (AAD)
141  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
142  * to be 8 or 12 bytes
143  * u8 *auth_tag, Authenticated Tag output.
144  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
145  * Valid values are 16 (most likely), 12 or 8.
146  */
147 asmlinkage void aesni_gcm_dec(void *ctx,
148 			struct gcm_context_data *gdata, u8 *out,
149 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
150 			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
151 			u8 *auth_tag, unsigned long auth_tag_len);
152 
153 /* Scatter / Gather routines, with args similar to above */
154 asmlinkage void aesni_gcm_init(void *ctx,
155 			       struct gcm_context_data *gdata,
156 			       u8 *iv,
157 			       u8 *hash_subkey, const u8 *aad,
158 			       unsigned long aad_len);
159 asmlinkage void aesni_gcm_enc_update(void *ctx,
160 				     struct gcm_context_data *gdata, u8 *out,
161 				     const u8 *in, unsigned long plaintext_len);
162 asmlinkage void aesni_gcm_dec_update(void *ctx,
163 				     struct gcm_context_data *gdata, u8 *out,
164 				     const u8 *in,
165 				     unsigned long ciphertext_len);
166 asmlinkage void aesni_gcm_finalize(void *ctx,
167 				   struct gcm_context_data *gdata,
168 				   u8 *auth_tag, unsigned long auth_tag_len);
169 
170 static const struct aesni_gcm_tfm_s {
171 	void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
172 		     u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
173 	void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
174 			   const u8 *in, unsigned long plaintext_len);
175 	void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
176 			   const u8 *in, unsigned long ciphertext_len);
177 	void (*finalize)(void *ctx, struct gcm_context_data *gdata,
178 			 u8 *auth_tag, unsigned long auth_tag_len);
179 } *aesni_gcm_tfm;
180 
181 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
182 	.init = &aesni_gcm_init,
183 	.enc_update = &aesni_gcm_enc_update,
184 	.dec_update = &aesni_gcm_dec_update,
185 	.finalize = &aesni_gcm_finalize,
186 };
187 
188 #ifdef CONFIG_AS_AVX
189 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
190 		void *keys, u8 *out, unsigned int num_bytes);
191 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
192 		void *keys, u8 *out, unsigned int num_bytes);
193 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
194 		void *keys, u8 *out, unsigned int num_bytes);
195 /*
196  * asmlinkage void aesni_gcm_init_avx_gen2()
197  * gcm_data *my_ctx_data, context data
198  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
199  */
200 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
201 					struct gcm_context_data *gdata,
202 					u8 *iv,
203 					u8 *hash_subkey,
204 					const u8 *aad,
205 					unsigned long aad_len);
206 
207 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
208 				     struct gcm_context_data *gdata, u8 *out,
209 				     const u8 *in, unsigned long plaintext_len);
210 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
211 				     struct gcm_context_data *gdata, u8 *out,
212 				     const u8 *in,
213 				     unsigned long ciphertext_len);
214 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
215 				   struct gcm_context_data *gdata,
216 				   u8 *auth_tag, unsigned long auth_tag_len);
217 
218 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
219 				struct gcm_context_data *gdata, u8 *out,
220 			const u8 *in, unsigned long plaintext_len, u8 *iv,
221 			const u8 *aad, unsigned long aad_len,
222 			u8 *auth_tag, unsigned long auth_tag_len);
223 
224 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
225 				struct gcm_context_data *gdata, u8 *out,
226 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
227 			const u8 *aad, unsigned long aad_len,
228 			u8 *auth_tag, unsigned long auth_tag_len);
229 
230 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
231 	.init = &aesni_gcm_init_avx_gen2,
232 	.enc_update = &aesni_gcm_enc_update_avx_gen2,
233 	.dec_update = &aesni_gcm_dec_update_avx_gen2,
234 	.finalize = &aesni_gcm_finalize_avx_gen2,
235 };
236 
237 #endif
238 
239 #ifdef CONFIG_AS_AVX2
240 /*
241  * asmlinkage void aesni_gcm_init_avx_gen4()
242  * gcm_data *my_ctx_data, context data
243  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
244  */
245 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
246 					struct gcm_context_data *gdata,
247 					u8 *iv,
248 					u8 *hash_subkey,
249 					const u8 *aad,
250 					unsigned long aad_len);
251 
252 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
253 				     struct gcm_context_data *gdata, u8 *out,
254 				     const u8 *in, unsigned long plaintext_len);
255 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
256 				     struct gcm_context_data *gdata, u8 *out,
257 				     const u8 *in,
258 				     unsigned long ciphertext_len);
259 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
260 				   struct gcm_context_data *gdata,
261 				   u8 *auth_tag, unsigned long auth_tag_len);
262 
263 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
264 				struct gcm_context_data *gdata, u8 *out,
265 			const u8 *in, unsigned long plaintext_len, u8 *iv,
266 			const u8 *aad, unsigned long aad_len,
267 			u8 *auth_tag, unsigned long auth_tag_len);
268 
269 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
270 				struct gcm_context_data *gdata, u8 *out,
271 			const u8 *in, unsigned long ciphertext_len, u8 *iv,
272 			const u8 *aad, unsigned long aad_len,
273 			u8 *auth_tag, unsigned long auth_tag_len);
274 
275 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
276 	.init = &aesni_gcm_init_avx_gen4,
277 	.enc_update = &aesni_gcm_enc_update_avx_gen4,
278 	.dec_update = &aesni_gcm_dec_update_avx_gen4,
279 	.finalize = &aesni_gcm_finalize_avx_gen4,
280 };
281 
282 #endif
283 
284 static inline struct
285 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
286 {
287 	unsigned long align = AESNI_ALIGN;
288 
289 	if (align <= crypto_tfm_ctx_alignment())
290 		align = 1;
291 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
292 }
293 
294 static inline struct
295 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
296 {
297 	unsigned long align = AESNI_ALIGN;
298 
299 	if (align <= crypto_tfm_ctx_alignment())
300 		align = 1;
301 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
302 }
303 #endif
304 
305 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
306 {
307 	unsigned long addr = (unsigned long)raw_ctx;
308 	unsigned long align = AESNI_ALIGN;
309 
310 	if (align <= crypto_tfm_ctx_alignment())
311 		align = 1;
312 	return (struct crypto_aes_ctx *)ALIGN(addr, align);
313 }
314 
315 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
316 			      const u8 *in_key, unsigned int key_len)
317 {
318 	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
319 	int err;
320 
321 	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
322 	    key_len != AES_KEYSIZE_256)
323 		return -EINVAL;
324 
325 	if (!crypto_simd_usable())
326 		err = aes_expandkey(ctx, in_key, key_len);
327 	else {
328 		kernel_fpu_begin();
329 		err = aesni_set_key(ctx, in_key, key_len);
330 		kernel_fpu_end();
331 	}
332 
333 	return err;
334 }
335 
336 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
337 		       unsigned int key_len)
338 {
339 	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
340 }
341 
342 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
343 {
344 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
345 
346 	if (!crypto_simd_usable()) {
347 		aes_encrypt(ctx, dst, src);
348 	} else {
349 		kernel_fpu_begin();
350 		aesni_enc(ctx, dst, src);
351 		kernel_fpu_end();
352 	}
353 }
354 
355 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
356 {
357 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
358 
359 	if (!crypto_simd_usable()) {
360 		aes_decrypt(ctx, dst, src);
361 	} else {
362 		kernel_fpu_begin();
363 		aesni_dec(ctx, dst, src);
364 		kernel_fpu_end();
365 	}
366 }
367 
368 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
369 			         unsigned int len)
370 {
371 	return aes_set_key_common(crypto_skcipher_tfm(tfm),
372 				  crypto_skcipher_ctx(tfm), key, len);
373 }
374 
375 static int ecb_encrypt(struct skcipher_request *req)
376 {
377 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
378 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
379 	struct skcipher_walk walk;
380 	unsigned int nbytes;
381 	int err;
382 
383 	err = skcipher_walk_virt(&walk, req, true);
384 
385 	kernel_fpu_begin();
386 	while ((nbytes = walk.nbytes)) {
387 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
388 			      nbytes & AES_BLOCK_MASK);
389 		nbytes &= AES_BLOCK_SIZE - 1;
390 		err = skcipher_walk_done(&walk, nbytes);
391 	}
392 	kernel_fpu_end();
393 
394 	return err;
395 }
396 
397 static int ecb_decrypt(struct skcipher_request *req)
398 {
399 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
400 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
401 	struct skcipher_walk walk;
402 	unsigned int nbytes;
403 	int err;
404 
405 	err = skcipher_walk_virt(&walk, req, true);
406 
407 	kernel_fpu_begin();
408 	while ((nbytes = walk.nbytes)) {
409 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
410 			      nbytes & AES_BLOCK_MASK);
411 		nbytes &= AES_BLOCK_SIZE - 1;
412 		err = skcipher_walk_done(&walk, nbytes);
413 	}
414 	kernel_fpu_end();
415 
416 	return err;
417 }
418 
419 static int cbc_encrypt(struct skcipher_request *req)
420 {
421 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
422 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
423 	struct skcipher_walk walk;
424 	unsigned int nbytes;
425 	int err;
426 
427 	err = skcipher_walk_virt(&walk, req, true);
428 
429 	kernel_fpu_begin();
430 	while ((nbytes = walk.nbytes)) {
431 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
432 			      nbytes & AES_BLOCK_MASK, walk.iv);
433 		nbytes &= AES_BLOCK_SIZE - 1;
434 		err = skcipher_walk_done(&walk, nbytes);
435 	}
436 	kernel_fpu_end();
437 
438 	return err;
439 }
440 
441 static int cbc_decrypt(struct skcipher_request *req)
442 {
443 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
444 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
445 	struct skcipher_walk walk;
446 	unsigned int nbytes;
447 	int err;
448 
449 	err = skcipher_walk_virt(&walk, req, true);
450 
451 	kernel_fpu_begin();
452 	while ((nbytes = walk.nbytes)) {
453 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
454 			      nbytes & AES_BLOCK_MASK, walk.iv);
455 		nbytes &= AES_BLOCK_SIZE - 1;
456 		err = skcipher_walk_done(&walk, nbytes);
457 	}
458 	kernel_fpu_end();
459 
460 	return err;
461 }
462 
463 #ifdef CONFIG_X86_64
464 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
465 			    struct skcipher_walk *walk)
466 {
467 	u8 *ctrblk = walk->iv;
468 	u8 keystream[AES_BLOCK_SIZE];
469 	u8 *src = walk->src.virt.addr;
470 	u8 *dst = walk->dst.virt.addr;
471 	unsigned int nbytes = walk->nbytes;
472 
473 	aesni_enc(ctx, keystream, ctrblk);
474 	crypto_xor_cpy(dst, keystream, src, nbytes);
475 
476 	crypto_inc(ctrblk, AES_BLOCK_SIZE);
477 }
478 
479 #ifdef CONFIG_AS_AVX
480 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
481 			      const u8 *in, unsigned int len, u8 *iv)
482 {
483 	/*
484 	 * based on key length, override with the by8 version
485 	 * of ctr mode encryption/decryption for improved performance
486 	 * aes_set_key_common() ensures that key length is one of
487 	 * {128,192,256}
488 	 */
489 	if (ctx->key_length == AES_KEYSIZE_128)
490 		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
491 	else if (ctx->key_length == AES_KEYSIZE_192)
492 		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
493 	else
494 		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
495 }
496 #endif
497 
498 static int ctr_crypt(struct skcipher_request *req)
499 {
500 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
501 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
502 	struct skcipher_walk walk;
503 	unsigned int nbytes;
504 	int err;
505 
506 	err = skcipher_walk_virt(&walk, req, true);
507 
508 	kernel_fpu_begin();
509 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
510 		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
511 			              nbytes & AES_BLOCK_MASK, walk.iv);
512 		nbytes &= AES_BLOCK_SIZE - 1;
513 		err = skcipher_walk_done(&walk, nbytes);
514 	}
515 	if (walk.nbytes) {
516 		ctr_crypt_final(ctx, &walk);
517 		err = skcipher_walk_done(&walk, 0);
518 	}
519 	kernel_fpu_end();
520 
521 	return err;
522 }
523 
524 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
525 			    unsigned int keylen)
526 {
527 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
528 	int err;
529 
530 	err = xts_verify_key(tfm, key, keylen);
531 	if (err)
532 		return err;
533 
534 	keylen /= 2;
535 
536 	/* first half of xts-key is for crypt */
537 	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
538 				 key, keylen);
539 	if (err)
540 		return err;
541 
542 	/* second half of xts-key is for tweak */
543 	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
544 				  key + keylen, keylen);
545 }
546 
547 
548 static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
549 {
550 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
551 }
552 
553 static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
554 {
555 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
556 }
557 
558 static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
559 {
560 	aesni_xts_crypt8(ctx, dst, src, true, iv);
561 }
562 
563 static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
564 {
565 	aesni_xts_crypt8(ctx, dst, src, false, iv);
566 }
567 
568 static const struct common_glue_ctx aesni_enc_xts = {
569 	.num_funcs = 2,
570 	.fpu_blocks_limit = 1,
571 
572 	.funcs = { {
573 		.num_blocks = 8,
574 		.fn_u = { .xts = aesni_xts_enc8 }
575 	}, {
576 		.num_blocks = 1,
577 		.fn_u = { .xts = aesni_xts_enc }
578 	} }
579 };
580 
581 static const struct common_glue_ctx aesni_dec_xts = {
582 	.num_funcs = 2,
583 	.fpu_blocks_limit = 1,
584 
585 	.funcs = { {
586 		.num_blocks = 8,
587 		.fn_u = { .xts = aesni_xts_dec8 }
588 	}, {
589 		.num_blocks = 1,
590 		.fn_u = { .xts = aesni_xts_dec }
591 	} }
592 };
593 
594 static int xts_encrypt(struct skcipher_request *req)
595 {
596 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
597 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
598 
599 	return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
600 				   aes_ctx(ctx->raw_tweak_ctx),
601 				   aes_ctx(ctx->raw_crypt_ctx),
602 				   false);
603 }
604 
605 static int xts_decrypt(struct skcipher_request *req)
606 {
607 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
608 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
609 
610 	return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
611 				   aes_ctx(ctx->raw_tweak_ctx),
612 				   aes_ctx(ctx->raw_crypt_ctx),
613 				   true);
614 }
615 
616 static int
617 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
618 {
619 	struct crypto_aes_ctx ctx;
620 	int ret;
621 
622 	ret = aes_expandkey(&ctx, key, key_len);
623 	if (ret)
624 		return ret;
625 
626 	/* Clear the data in the hash sub key container to zero.*/
627 	/* We want to cipher all zeros to create the hash sub key. */
628 	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
629 
630 	aes_encrypt(&ctx, hash_subkey, hash_subkey);
631 
632 	memzero_explicit(&ctx, sizeof(ctx));
633 	return 0;
634 }
635 
636 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
637 				  unsigned int key_len)
638 {
639 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
640 
641 	if (key_len < 4)
642 		return -EINVAL;
643 
644 	/*Account for 4 byte nonce at the end.*/
645 	key_len -= 4;
646 
647 	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
648 
649 	return aes_set_key_common(crypto_aead_tfm(aead),
650 				  &ctx->aes_key_expanded, key, key_len) ?:
651 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
652 }
653 
654 /* This is the Integrity Check Value (aka the authentication tag) length and can
655  * be 8, 12 or 16 bytes long. */
656 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
657 				       unsigned int authsize)
658 {
659 	switch (authsize) {
660 	case 8:
661 	case 12:
662 	case 16:
663 		break;
664 	default:
665 		return -EINVAL;
666 	}
667 
668 	return 0;
669 }
670 
671 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
672 				       unsigned int authsize)
673 {
674 	switch (authsize) {
675 	case 4:
676 	case 8:
677 	case 12:
678 	case 13:
679 	case 14:
680 	case 15:
681 	case 16:
682 		break;
683 	default:
684 		return -EINVAL;
685 	}
686 
687 	return 0;
688 }
689 
690 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
691 			      unsigned int assoclen, u8 *hash_subkey,
692 			      u8 *iv, void *aes_ctx)
693 {
694 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
695 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
696 	const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
697 	struct gcm_context_data data AESNI_ALIGN_ATTR;
698 	struct scatter_walk dst_sg_walk = {};
699 	unsigned long left = req->cryptlen;
700 	unsigned long len, srclen, dstlen;
701 	struct scatter_walk assoc_sg_walk;
702 	struct scatter_walk src_sg_walk;
703 	struct scatterlist src_start[2];
704 	struct scatterlist dst_start[2];
705 	struct scatterlist *src_sg;
706 	struct scatterlist *dst_sg;
707 	u8 *src, *dst, *assoc;
708 	u8 *assocmem = NULL;
709 	u8 authTag[16];
710 
711 	if (!enc)
712 		left -= auth_tag_len;
713 
714 #ifdef CONFIG_AS_AVX2
715 	if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
716 		gcm_tfm = &aesni_gcm_tfm_avx_gen2;
717 #endif
718 #ifdef CONFIG_AS_AVX
719 	if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
720 		gcm_tfm = &aesni_gcm_tfm_sse;
721 #endif
722 
723 	/* Linearize assoc, if not already linear */
724 	if (req->src->length >= assoclen && req->src->length &&
725 		(!PageHighMem(sg_page(req->src)) ||
726 			req->src->offset + req->src->length <= PAGE_SIZE)) {
727 		scatterwalk_start(&assoc_sg_walk, req->src);
728 		assoc = scatterwalk_map(&assoc_sg_walk);
729 	} else {
730 		/* assoc can be any length, so must be on heap */
731 		assocmem = kmalloc(assoclen, GFP_ATOMIC);
732 		if (unlikely(!assocmem))
733 			return -ENOMEM;
734 		assoc = assocmem;
735 
736 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
737 	}
738 
739 	if (left) {
740 		src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
741 		scatterwalk_start(&src_sg_walk, src_sg);
742 		if (req->src != req->dst) {
743 			dst_sg = scatterwalk_ffwd(dst_start, req->dst,
744 						  req->assoclen);
745 			scatterwalk_start(&dst_sg_walk, dst_sg);
746 		}
747 	}
748 
749 	kernel_fpu_begin();
750 	gcm_tfm->init(aes_ctx, &data, iv,
751 		hash_subkey, assoc, assoclen);
752 	if (req->src != req->dst) {
753 		while (left) {
754 			src = scatterwalk_map(&src_sg_walk);
755 			dst = scatterwalk_map(&dst_sg_walk);
756 			srclen = scatterwalk_clamp(&src_sg_walk, left);
757 			dstlen = scatterwalk_clamp(&dst_sg_walk, left);
758 			len = min(srclen, dstlen);
759 			if (len) {
760 				if (enc)
761 					gcm_tfm->enc_update(aes_ctx, &data,
762 							     dst, src, len);
763 				else
764 					gcm_tfm->dec_update(aes_ctx, &data,
765 							     dst, src, len);
766 			}
767 			left -= len;
768 
769 			scatterwalk_unmap(src);
770 			scatterwalk_unmap(dst);
771 			scatterwalk_advance(&src_sg_walk, len);
772 			scatterwalk_advance(&dst_sg_walk, len);
773 			scatterwalk_done(&src_sg_walk, 0, left);
774 			scatterwalk_done(&dst_sg_walk, 1, left);
775 		}
776 	} else {
777 		while (left) {
778 			dst = src = scatterwalk_map(&src_sg_walk);
779 			len = scatterwalk_clamp(&src_sg_walk, left);
780 			if (len) {
781 				if (enc)
782 					gcm_tfm->enc_update(aes_ctx, &data,
783 							     src, src, len);
784 				else
785 					gcm_tfm->dec_update(aes_ctx, &data,
786 							     src, src, len);
787 			}
788 			left -= len;
789 			scatterwalk_unmap(src);
790 			scatterwalk_advance(&src_sg_walk, len);
791 			scatterwalk_done(&src_sg_walk, 1, left);
792 		}
793 	}
794 	gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
795 	kernel_fpu_end();
796 
797 	if (!assocmem)
798 		scatterwalk_unmap(assoc);
799 	else
800 		kfree(assocmem);
801 
802 	if (!enc) {
803 		u8 authTagMsg[16];
804 
805 		/* Copy out original authTag */
806 		scatterwalk_map_and_copy(authTagMsg, req->src,
807 					 req->assoclen + req->cryptlen -
808 					 auth_tag_len,
809 					 auth_tag_len, 0);
810 
811 		/* Compare generated tag with passed in tag. */
812 		return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
813 			-EBADMSG : 0;
814 	}
815 
816 	/* Copy in the authTag */
817 	scatterwalk_map_and_copy(authTag, req->dst,
818 				 req->assoclen + req->cryptlen,
819 				 auth_tag_len, 1);
820 
821 	return 0;
822 }
823 
824 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
825 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
826 {
827 	return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
828 				aes_ctx);
829 }
830 
831 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
832 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
833 {
834 	return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
835 				aes_ctx);
836 }
837 
838 static int helper_rfc4106_encrypt(struct aead_request *req)
839 {
840 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
841 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
842 	void *aes_ctx = &(ctx->aes_key_expanded);
843 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
844 	unsigned int i;
845 	__be32 counter = cpu_to_be32(1);
846 
847 	/* Assuming we are supporting rfc4106 64-bit extended */
848 	/* sequence numbers We need to have the AAD length equal */
849 	/* to 16 or 20 bytes */
850 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
851 		return -EINVAL;
852 
853 	/* IV below built */
854 	for (i = 0; i < 4; i++)
855 		*(iv+i) = ctx->nonce[i];
856 	for (i = 0; i < 8; i++)
857 		*(iv+4+i) = req->iv[i];
858 	*((__be32 *)(iv+12)) = counter;
859 
860 	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
861 			      aes_ctx);
862 }
863 
864 static int helper_rfc4106_decrypt(struct aead_request *req)
865 {
866 	__be32 counter = cpu_to_be32(1);
867 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
868 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
869 	void *aes_ctx = &(ctx->aes_key_expanded);
870 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
871 	unsigned int i;
872 
873 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
874 		return -EINVAL;
875 
876 	/* Assuming we are supporting rfc4106 64-bit extended */
877 	/* sequence numbers We need to have the AAD length */
878 	/* equal to 16 or 20 bytes */
879 
880 	/* IV below built */
881 	for (i = 0; i < 4; i++)
882 		*(iv+i) = ctx->nonce[i];
883 	for (i = 0; i < 8; i++)
884 		*(iv+4+i) = req->iv[i];
885 	*((__be32 *)(iv+12)) = counter;
886 
887 	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
888 			      aes_ctx);
889 }
890 #endif
891 
892 static struct crypto_alg aesni_cipher_alg = {
893 	.cra_name		= "aes",
894 	.cra_driver_name	= "aes-aesni",
895 	.cra_priority		= 300,
896 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
897 	.cra_blocksize		= AES_BLOCK_SIZE,
898 	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
899 	.cra_module		= THIS_MODULE,
900 	.cra_u	= {
901 		.cipher	= {
902 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
903 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
904 			.cia_setkey		= aes_set_key,
905 			.cia_encrypt		= aesni_encrypt,
906 			.cia_decrypt		= aesni_decrypt
907 		}
908 	}
909 };
910 
911 static struct skcipher_alg aesni_skciphers[] = {
912 	{
913 		.base = {
914 			.cra_name		= "__ecb(aes)",
915 			.cra_driver_name	= "__ecb-aes-aesni",
916 			.cra_priority		= 400,
917 			.cra_flags		= CRYPTO_ALG_INTERNAL,
918 			.cra_blocksize		= AES_BLOCK_SIZE,
919 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
920 			.cra_module		= THIS_MODULE,
921 		},
922 		.min_keysize	= AES_MIN_KEY_SIZE,
923 		.max_keysize	= AES_MAX_KEY_SIZE,
924 		.setkey		= aesni_skcipher_setkey,
925 		.encrypt	= ecb_encrypt,
926 		.decrypt	= ecb_decrypt,
927 	}, {
928 		.base = {
929 			.cra_name		= "__cbc(aes)",
930 			.cra_driver_name	= "__cbc-aes-aesni",
931 			.cra_priority		= 400,
932 			.cra_flags		= CRYPTO_ALG_INTERNAL,
933 			.cra_blocksize		= AES_BLOCK_SIZE,
934 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
935 			.cra_module		= THIS_MODULE,
936 		},
937 		.min_keysize	= AES_MIN_KEY_SIZE,
938 		.max_keysize	= AES_MAX_KEY_SIZE,
939 		.ivsize		= AES_BLOCK_SIZE,
940 		.setkey		= aesni_skcipher_setkey,
941 		.encrypt	= cbc_encrypt,
942 		.decrypt	= cbc_decrypt,
943 #ifdef CONFIG_X86_64
944 	}, {
945 		.base = {
946 			.cra_name		= "__ctr(aes)",
947 			.cra_driver_name	= "__ctr-aes-aesni",
948 			.cra_priority		= 400,
949 			.cra_flags		= CRYPTO_ALG_INTERNAL,
950 			.cra_blocksize		= 1,
951 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
952 			.cra_module		= THIS_MODULE,
953 		},
954 		.min_keysize	= AES_MIN_KEY_SIZE,
955 		.max_keysize	= AES_MAX_KEY_SIZE,
956 		.ivsize		= AES_BLOCK_SIZE,
957 		.chunksize	= AES_BLOCK_SIZE,
958 		.setkey		= aesni_skcipher_setkey,
959 		.encrypt	= ctr_crypt,
960 		.decrypt	= ctr_crypt,
961 	}, {
962 		.base = {
963 			.cra_name		= "__xts(aes)",
964 			.cra_driver_name	= "__xts-aes-aesni",
965 			.cra_priority		= 401,
966 			.cra_flags		= CRYPTO_ALG_INTERNAL,
967 			.cra_blocksize		= AES_BLOCK_SIZE,
968 			.cra_ctxsize		= XTS_AES_CTX_SIZE,
969 			.cra_module		= THIS_MODULE,
970 		},
971 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
972 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
973 		.ivsize		= AES_BLOCK_SIZE,
974 		.setkey		= xts_aesni_setkey,
975 		.encrypt	= xts_encrypt,
976 		.decrypt	= xts_decrypt,
977 #endif
978 	}
979 };
980 
981 static
982 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
983 
984 #ifdef CONFIG_X86_64
985 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
986 				  unsigned int key_len)
987 {
988 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
989 
990 	return aes_set_key_common(crypto_aead_tfm(aead),
991 				  &ctx->aes_key_expanded, key, key_len) ?:
992 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
993 }
994 
995 static int generic_gcmaes_encrypt(struct aead_request *req)
996 {
997 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
998 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
999 	void *aes_ctx = &(ctx->aes_key_expanded);
1000 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1001 	__be32 counter = cpu_to_be32(1);
1002 
1003 	memcpy(iv, req->iv, 12);
1004 	*((__be32 *)(iv+12)) = counter;
1005 
1006 	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1007 			      aes_ctx);
1008 }
1009 
1010 static int generic_gcmaes_decrypt(struct aead_request *req)
1011 {
1012 	__be32 counter = cpu_to_be32(1);
1013 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1014 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1015 	void *aes_ctx = &(ctx->aes_key_expanded);
1016 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1017 
1018 	memcpy(iv, req->iv, 12);
1019 	*((__be32 *)(iv+12)) = counter;
1020 
1021 	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1022 			      aes_ctx);
1023 }
1024 
1025 static struct aead_alg aesni_aeads[] = { {
1026 	.setkey			= common_rfc4106_set_key,
1027 	.setauthsize		= common_rfc4106_set_authsize,
1028 	.encrypt		= helper_rfc4106_encrypt,
1029 	.decrypt		= helper_rfc4106_decrypt,
1030 	.ivsize			= GCM_RFC4106_IV_SIZE,
1031 	.maxauthsize		= 16,
1032 	.base = {
1033 		.cra_name		= "__rfc4106(gcm(aes))",
1034 		.cra_driver_name	= "__rfc4106-gcm-aesni",
1035 		.cra_priority		= 400,
1036 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1037 		.cra_blocksize		= 1,
1038 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1039 		.cra_alignmask		= AESNI_ALIGN - 1,
1040 		.cra_module		= THIS_MODULE,
1041 	},
1042 }, {
1043 	.setkey			= generic_gcmaes_set_key,
1044 	.setauthsize		= generic_gcmaes_set_authsize,
1045 	.encrypt		= generic_gcmaes_encrypt,
1046 	.decrypt		= generic_gcmaes_decrypt,
1047 	.ivsize			= GCM_AES_IV_SIZE,
1048 	.maxauthsize		= 16,
1049 	.base = {
1050 		.cra_name		= "__gcm(aes)",
1051 		.cra_driver_name	= "__generic-gcm-aesni",
1052 		.cra_priority		= 400,
1053 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1054 		.cra_blocksize		= 1,
1055 		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1056 		.cra_alignmask		= AESNI_ALIGN - 1,
1057 		.cra_module		= THIS_MODULE,
1058 	},
1059 } };
1060 #else
1061 static struct aead_alg aesni_aeads[0];
1062 #endif
1063 
1064 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1065 
1066 static const struct x86_cpu_id aesni_cpu_id[] = {
1067 	X86_FEATURE_MATCH(X86_FEATURE_AES),
1068 	{}
1069 };
1070 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1071 
1072 static int __init aesni_init(void)
1073 {
1074 	int err;
1075 
1076 	if (!x86_match_cpu(aesni_cpu_id))
1077 		return -ENODEV;
1078 #ifdef CONFIG_X86_64
1079 #ifdef CONFIG_AS_AVX2
1080 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1081 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1082 		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
1083 	} else
1084 #endif
1085 #ifdef CONFIG_AS_AVX
1086 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1087 		pr_info("AVX version of gcm_enc/dec engaged.\n");
1088 		aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1089 	} else
1090 #endif
1091 	{
1092 		pr_info("SSE version of gcm_enc/dec engaged.\n");
1093 		aesni_gcm_tfm = &aesni_gcm_tfm_sse;
1094 	}
1095 	aesni_ctr_enc_tfm = aesni_ctr_enc;
1096 #ifdef CONFIG_AS_AVX
1097 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1098 		/* optimize performance of ctr mode encryption transform */
1099 		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1100 		pr_info("AES CTR mode by8 optimization enabled\n");
1101 	}
1102 #endif
1103 #endif
1104 
1105 	err = crypto_register_alg(&aesni_cipher_alg);
1106 	if (err)
1107 		return err;
1108 
1109 	err = simd_register_skciphers_compat(aesni_skciphers,
1110 					     ARRAY_SIZE(aesni_skciphers),
1111 					     aesni_simd_skciphers);
1112 	if (err)
1113 		goto unregister_cipher;
1114 
1115 	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1116 					 aesni_simd_aeads);
1117 	if (err)
1118 		goto unregister_skciphers;
1119 
1120 	return 0;
1121 
1122 unregister_skciphers:
1123 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1124 				  aesni_simd_skciphers);
1125 unregister_cipher:
1126 	crypto_unregister_alg(&aesni_cipher_alg);
1127 	return err;
1128 }
1129 
1130 static void __exit aesni_exit(void)
1131 {
1132 	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1133 			      aesni_simd_aeads);
1134 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1135 				  aesni_simd_skciphers);
1136 	crypto_unregister_alg(&aesni_cipher_alg);
1137 }
1138 
1139 late_initcall(aesni_init);
1140 module_exit(aesni_exit);
1141 
1142 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1143 MODULE_LICENSE("GPL");
1144 MODULE_ALIAS_CRYPTO("aes");
1145