1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Support for Intel AES-NI instructions. This file contains glue 4 * code, the real AES implementation is in intel-aes_asm.S. 5 * 6 * Copyright (C) 2008, Intel Corp. 7 * Author: Huang Ying <ying.huang@intel.com> 8 * 9 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD 10 * interface for 64-bit kernels. 11 * Authors: Adrian Hoban <adrian.hoban@intel.com> 12 * Gabriele Paoloni <gabriele.paoloni@intel.com> 13 * Tadeusz Struk (tadeusz.struk@intel.com) 14 * Aidan O'Mahony (aidan.o.mahony@intel.com) 15 * Copyright (c) 2010, Intel Corporation. 16 */ 17 18 #include <linux/hardirq.h> 19 #include <linux/types.h> 20 #include <linux/module.h> 21 #include <linux/err.h> 22 #include <crypto/algapi.h> 23 #include <crypto/aes.h> 24 #include <crypto/ctr.h> 25 #include <crypto/b128ops.h> 26 #include <crypto/gcm.h> 27 #include <crypto/xts.h> 28 #include <asm/cpu_device_id.h> 29 #include <asm/simd.h> 30 #include <crypto/scatterwalk.h> 31 #include <crypto/internal/aead.h> 32 #include <crypto/internal/simd.h> 33 #include <crypto/internal/skcipher.h> 34 #include <linux/workqueue.h> 35 #include <linux/spinlock.h> 36 #ifdef CONFIG_X86_64 37 #include <asm/crypto/glue_helper.h> 38 #endif 39 40 41 #define AESNI_ALIGN 16 42 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN))) 43 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1)) 44 #define RFC4106_HASH_SUBKEY_SIZE 16 45 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) 46 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA) 47 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA) 48 49 /* This data is stored at the end of the crypto_tfm struct. 50 * It's a type of per "session" data storage location. 51 * This needs to be 16 byte aligned. 52 */ 53 struct aesni_rfc4106_gcm_ctx { 54 u8 hash_subkey[16] AESNI_ALIGN_ATTR; 55 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; 56 u8 nonce[4]; 57 }; 58 59 struct generic_gcmaes_ctx { 60 u8 hash_subkey[16] AESNI_ALIGN_ATTR; 61 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; 62 }; 63 64 struct aesni_xts_ctx { 65 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; 66 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; 67 }; 68 69 #define GCM_BLOCK_LEN 16 70 71 struct gcm_context_data { 72 /* init, update and finalize context data */ 73 u8 aad_hash[GCM_BLOCK_LEN]; 74 u64 aad_length; 75 u64 in_length; 76 u8 partial_block_enc_key[GCM_BLOCK_LEN]; 77 u8 orig_IV[GCM_BLOCK_LEN]; 78 u8 current_counter[GCM_BLOCK_LEN]; 79 u64 partial_block_len; 80 u64 unused; 81 u8 hash_keys[GCM_BLOCK_LEN * 16]; 82 }; 83 84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, 85 unsigned int key_len); 86 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, 87 const u8 *in); 88 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, 89 const u8 *in); 90 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, 91 const u8 *in, unsigned int len); 92 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, 93 const u8 *in, unsigned int len); 94 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, 95 const u8 *in, unsigned int len, u8 *iv); 96 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, 97 const u8 *in, unsigned int len, u8 *iv); 98 99 #define AVX_GEN2_OPTSIZE 640 100 #define AVX_GEN4_OPTSIZE 4096 101 102 #ifdef CONFIG_X86_64 103 104 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, 105 const u8 *in, unsigned int len, u8 *iv); 106 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, 107 const u8 *in, unsigned int len, u8 *iv); 108 109 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, 110 const u8 *in, bool enc, u8 *iv); 111 112 /* asmlinkage void aesni_gcm_enc() 113 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 114 * struct gcm_context_data. May be uninitialized. 115 * u8 *out, Ciphertext output. Encrypt in-place is allowed. 116 * const u8 *in, Plaintext input 117 * unsigned long plaintext_len, Length of data in bytes for encryption. 118 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. 119 * 16-byte aligned pointer. 120 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 121 * const u8 *aad, Additional Authentication Data (AAD) 122 * unsigned long aad_len, Length of AAD in bytes. 123 * u8 *auth_tag, Authenticated Tag output. 124 * unsigned long auth_tag_len), Authenticated Tag Length in bytes. 125 * Valid values are 16 (most likely), 12 or 8. 126 */ 127 asmlinkage void aesni_gcm_enc(void *ctx, 128 struct gcm_context_data *gdata, u8 *out, 129 const u8 *in, unsigned long plaintext_len, u8 *iv, 130 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 131 u8 *auth_tag, unsigned long auth_tag_len); 132 133 /* asmlinkage void aesni_gcm_dec() 134 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 135 * struct gcm_context_data. May be uninitialized. 136 * u8 *out, Plaintext output. Decrypt in-place is allowed. 137 * const u8 *in, Ciphertext input 138 * unsigned long ciphertext_len, Length of data in bytes for decryption. 139 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. 140 * 16-byte aligned pointer. 141 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 142 * const u8 *aad, Additional Authentication Data (AAD) 143 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going 144 * to be 8 or 12 bytes 145 * u8 *auth_tag, Authenticated Tag output. 146 * unsigned long auth_tag_len) Authenticated Tag Length in bytes. 147 * Valid values are 16 (most likely), 12 or 8. 148 */ 149 asmlinkage void aesni_gcm_dec(void *ctx, 150 struct gcm_context_data *gdata, u8 *out, 151 const u8 *in, unsigned long ciphertext_len, u8 *iv, 152 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 153 u8 *auth_tag, unsigned long auth_tag_len); 154 155 /* Scatter / Gather routines, with args similar to above */ 156 asmlinkage void aesni_gcm_init(void *ctx, 157 struct gcm_context_data *gdata, 158 u8 *iv, 159 u8 *hash_subkey, const u8 *aad, 160 unsigned long aad_len); 161 asmlinkage void aesni_gcm_enc_update(void *ctx, 162 struct gcm_context_data *gdata, u8 *out, 163 const u8 *in, unsigned long plaintext_len); 164 asmlinkage void aesni_gcm_dec_update(void *ctx, 165 struct gcm_context_data *gdata, u8 *out, 166 const u8 *in, 167 unsigned long ciphertext_len); 168 asmlinkage void aesni_gcm_finalize(void *ctx, 169 struct gcm_context_data *gdata, 170 u8 *auth_tag, unsigned long auth_tag_len); 171 172 static const struct aesni_gcm_tfm_s { 173 void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv, 174 u8 *hash_subkey, const u8 *aad, unsigned long aad_len); 175 void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, 176 const u8 *in, unsigned long plaintext_len); 177 void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, 178 const u8 *in, unsigned long ciphertext_len); 179 void (*finalize)(void *ctx, struct gcm_context_data *gdata, 180 u8 *auth_tag, unsigned long auth_tag_len); 181 } *aesni_gcm_tfm; 182 183 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { 184 .init = &aesni_gcm_init, 185 .enc_update = &aesni_gcm_enc_update, 186 .dec_update = &aesni_gcm_dec_update, 187 .finalize = &aesni_gcm_finalize, 188 }; 189 190 #ifdef CONFIG_AS_AVX 191 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, 192 void *keys, u8 *out, unsigned int num_bytes); 193 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, 194 void *keys, u8 *out, unsigned int num_bytes); 195 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, 196 void *keys, u8 *out, unsigned int num_bytes); 197 /* 198 * asmlinkage void aesni_gcm_init_avx_gen2() 199 * gcm_data *my_ctx_data, context data 200 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 201 */ 202 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data, 203 struct gcm_context_data *gdata, 204 u8 *iv, 205 u8 *hash_subkey, 206 const u8 *aad, 207 unsigned long aad_len); 208 209 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx, 210 struct gcm_context_data *gdata, u8 *out, 211 const u8 *in, unsigned long plaintext_len); 212 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx, 213 struct gcm_context_data *gdata, u8 *out, 214 const u8 *in, 215 unsigned long ciphertext_len); 216 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx, 217 struct gcm_context_data *gdata, 218 u8 *auth_tag, unsigned long auth_tag_len); 219 220 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, 221 struct gcm_context_data *gdata, u8 *out, 222 const u8 *in, unsigned long plaintext_len, u8 *iv, 223 const u8 *aad, unsigned long aad_len, 224 u8 *auth_tag, unsigned long auth_tag_len); 225 226 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, 227 struct gcm_context_data *gdata, u8 *out, 228 const u8 *in, unsigned long ciphertext_len, u8 *iv, 229 const u8 *aad, unsigned long aad_len, 230 u8 *auth_tag, unsigned long auth_tag_len); 231 232 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { 233 .init = &aesni_gcm_init_avx_gen2, 234 .enc_update = &aesni_gcm_enc_update_avx_gen2, 235 .dec_update = &aesni_gcm_dec_update_avx_gen2, 236 .finalize = &aesni_gcm_finalize_avx_gen2, 237 }; 238 239 #endif 240 241 #ifdef CONFIG_AS_AVX2 242 /* 243 * asmlinkage void aesni_gcm_init_avx_gen4() 244 * gcm_data *my_ctx_data, context data 245 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 246 */ 247 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data, 248 struct gcm_context_data *gdata, 249 u8 *iv, 250 u8 *hash_subkey, 251 const u8 *aad, 252 unsigned long aad_len); 253 254 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx, 255 struct gcm_context_data *gdata, u8 *out, 256 const u8 *in, unsigned long plaintext_len); 257 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx, 258 struct gcm_context_data *gdata, u8 *out, 259 const u8 *in, 260 unsigned long ciphertext_len); 261 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx, 262 struct gcm_context_data *gdata, 263 u8 *auth_tag, unsigned long auth_tag_len); 264 265 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, 266 struct gcm_context_data *gdata, u8 *out, 267 const u8 *in, unsigned long plaintext_len, u8 *iv, 268 const u8 *aad, unsigned long aad_len, 269 u8 *auth_tag, unsigned long auth_tag_len); 270 271 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, 272 struct gcm_context_data *gdata, u8 *out, 273 const u8 *in, unsigned long ciphertext_len, u8 *iv, 274 const u8 *aad, unsigned long aad_len, 275 u8 *auth_tag, unsigned long auth_tag_len); 276 277 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { 278 .init = &aesni_gcm_init_avx_gen4, 279 .enc_update = &aesni_gcm_enc_update_avx_gen4, 280 .dec_update = &aesni_gcm_dec_update_avx_gen4, 281 .finalize = &aesni_gcm_finalize_avx_gen4, 282 }; 283 284 #endif 285 286 static inline struct 287 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) 288 { 289 unsigned long align = AESNI_ALIGN; 290 291 if (align <= crypto_tfm_ctx_alignment()) 292 align = 1; 293 return PTR_ALIGN(crypto_aead_ctx(tfm), align); 294 } 295 296 static inline struct 297 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) 298 { 299 unsigned long align = AESNI_ALIGN; 300 301 if (align <= crypto_tfm_ctx_alignment()) 302 align = 1; 303 return PTR_ALIGN(crypto_aead_ctx(tfm), align); 304 } 305 #endif 306 307 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) 308 { 309 unsigned long addr = (unsigned long)raw_ctx; 310 unsigned long align = AESNI_ALIGN; 311 312 if (align <= crypto_tfm_ctx_alignment()) 313 align = 1; 314 return (struct crypto_aes_ctx *)ALIGN(addr, align); 315 } 316 317 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, 318 const u8 *in_key, unsigned int key_len) 319 { 320 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); 321 u32 *flags = &tfm->crt_flags; 322 int err; 323 324 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && 325 key_len != AES_KEYSIZE_256) { 326 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 327 return -EINVAL; 328 } 329 330 if (!crypto_simd_usable()) 331 err = aes_expandkey(ctx, in_key, key_len); 332 else { 333 kernel_fpu_begin(); 334 err = aesni_set_key(ctx, in_key, key_len); 335 kernel_fpu_end(); 336 } 337 338 return err; 339 } 340 341 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 342 unsigned int key_len) 343 { 344 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); 345 } 346 347 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 348 { 349 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 350 351 if (!crypto_simd_usable()) { 352 aes_encrypt(ctx, dst, src); 353 } else { 354 kernel_fpu_begin(); 355 aesni_enc(ctx, dst, src); 356 kernel_fpu_end(); 357 } 358 } 359 360 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 361 { 362 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 363 364 if (!crypto_simd_usable()) { 365 aes_decrypt(ctx, dst, src); 366 } else { 367 kernel_fpu_begin(); 368 aesni_dec(ctx, dst, src); 369 kernel_fpu_end(); 370 } 371 } 372 373 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 374 unsigned int len) 375 { 376 return aes_set_key_common(crypto_skcipher_tfm(tfm), 377 crypto_skcipher_ctx(tfm), key, len); 378 } 379 380 static int ecb_encrypt(struct skcipher_request *req) 381 { 382 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 383 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 384 struct skcipher_walk walk; 385 unsigned int nbytes; 386 int err; 387 388 err = skcipher_walk_virt(&walk, req, true); 389 390 kernel_fpu_begin(); 391 while ((nbytes = walk.nbytes)) { 392 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, 393 nbytes & AES_BLOCK_MASK); 394 nbytes &= AES_BLOCK_SIZE - 1; 395 err = skcipher_walk_done(&walk, nbytes); 396 } 397 kernel_fpu_end(); 398 399 return err; 400 } 401 402 static int ecb_decrypt(struct skcipher_request *req) 403 { 404 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 405 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 406 struct skcipher_walk walk; 407 unsigned int nbytes; 408 int err; 409 410 err = skcipher_walk_virt(&walk, req, true); 411 412 kernel_fpu_begin(); 413 while ((nbytes = walk.nbytes)) { 414 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, 415 nbytes & AES_BLOCK_MASK); 416 nbytes &= AES_BLOCK_SIZE - 1; 417 err = skcipher_walk_done(&walk, nbytes); 418 } 419 kernel_fpu_end(); 420 421 return err; 422 } 423 424 static int cbc_encrypt(struct skcipher_request *req) 425 { 426 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 427 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 428 struct skcipher_walk walk; 429 unsigned int nbytes; 430 int err; 431 432 err = skcipher_walk_virt(&walk, req, true); 433 434 kernel_fpu_begin(); 435 while ((nbytes = walk.nbytes)) { 436 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, 437 nbytes & AES_BLOCK_MASK, walk.iv); 438 nbytes &= AES_BLOCK_SIZE - 1; 439 err = skcipher_walk_done(&walk, nbytes); 440 } 441 kernel_fpu_end(); 442 443 return err; 444 } 445 446 static int cbc_decrypt(struct skcipher_request *req) 447 { 448 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 449 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 450 struct skcipher_walk walk; 451 unsigned int nbytes; 452 int err; 453 454 err = skcipher_walk_virt(&walk, req, true); 455 456 kernel_fpu_begin(); 457 while ((nbytes = walk.nbytes)) { 458 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, 459 nbytes & AES_BLOCK_MASK, walk.iv); 460 nbytes &= AES_BLOCK_SIZE - 1; 461 err = skcipher_walk_done(&walk, nbytes); 462 } 463 kernel_fpu_end(); 464 465 return err; 466 } 467 468 #ifdef CONFIG_X86_64 469 static void ctr_crypt_final(struct crypto_aes_ctx *ctx, 470 struct skcipher_walk *walk) 471 { 472 u8 *ctrblk = walk->iv; 473 u8 keystream[AES_BLOCK_SIZE]; 474 u8 *src = walk->src.virt.addr; 475 u8 *dst = walk->dst.virt.addr; 476 unsigned int nbytes = walk->nbytes; 477 478 aesni_enc(ctx, keystream, ctrblk); 479 crypto_xor_cpy(dst, keystream, src, nbytes); 480 481 crypto_inc(ctrblk, AES_BLOCK_SIZE); 482 } 483 484 #ifdef CONFIG_AS_AVX 485 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, 486 const u8 *in, unsigned int len, u8 *iv) 487 { 488 /* 489 * based on key length, override with the by8 version 490 * of ctr mode encryption/decryption for improved performance 491 * aes_set_key_common() ensures that key length is one of 492 * {128,192,256} 493 */ 494 if (ctx->key_length == AES_KEYSIZE_128) 495 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); 496 else if (ctx->key_length == AES_KEYSIZE_192) 497 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); 498 else 499 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); 500 } 501 #endif 502 503 static int ctr_crypt(struct skcipher_request *req) 504 { 505 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 506 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 507 struct skcipher_walk walk; 508 unsigned int nbytes; 509 int err; 510 511 err = skcipher_walk_virt(&walk, req, true); 512 513 kernel_fpu_begin(); 514 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 515 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, 516 nbytes & AES_BLOCK_MASK, walk.iv); 517 nbytes &= AES_BLOCK_SIZE - 1; 518 err = skcipher_walk_done(&walk, nbytes); 519 } 520 if (walk.nbytes) { 521 ctr_crypt_final(ctx, &walk); 522 err = skcipher_walk_done(&walk, 0); 523 } 524 kernel_fpu_end(); 525 526 return err; 527 } 528 529 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, 530 unsigned int keylen) 531 { 532 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 533 int err; 534 535 err = xts_verify_key(tfm, key, keylen); 536 if (err) 537 return err; 538 539 keylen /= 2; 540 541 /* first half of xts-key is for crypt */ 542 err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, 543 key, keylen); 544 if (err) 545 return err; 546 547 /* second half of xts-key is for tweak */ 548 return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, 549 key + keylen, keylen); 550 } 551 552 553 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) 554 { 555 aesni_enc(ctx, out, in); 556 } 557 558 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 559 { 560 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); 561 } 562 563 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 564 { 565 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); 566 } 567 568 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 569 { 570 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); 571 } 572 573 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 574 { 575 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); 576 } 577 578 static const struct common_glue_ctx aesni_enc_xts = { 579 .num_funcs = 2, 580 .fpu_blocks_limit = 1, 581 582 .funcs = { { 583 .num_blocks = 8, 584 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } 585 }, { 586 .num_blocks = 1, 587 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } 588 } } 589 }; 590 591 static const struct common_glue_ctx aesni_dec_xts = { 592 .num_funcs = 2, 593 .fpu_blocks_limit = 1, 594 595 .funcs = { { 596 .num_blocks = 8, 597 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } 598 }, { 599 .num_blocks = 1, 600 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } 601 } } 602 }; 603 604 static int xts_encrypt(struct skcipher_request *req) 605 { 606 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 607 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 608 609 return glue_xts_req_128bit(&aesni_enc_xts, req, 610 XTS_TWEAK_CAST(aesni_xts_tweak), 611 aes_ctx(ctx->raw_tweak_ctx), 612 aes_ctx(ctx->raw_crypt_ctx), 613 false); 614 } 615 616 static int xts_decrypt(struct skcipher_request *req) 617 { 618 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 619 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 620 621 return glue_xts_req_128bit(&aesni_dec_xts, req, 622 XTS_TWEAK_CAST(aesni_xts_tweak), 623 aes_ctx(ctx->raw_tweak_ctx), 624 aes_ctx(ctx->raw_crypt_ctx), 625 true); 626 } 627 628 static int 629 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) 630 { 631 struct crypto_cipher *tfm; 632 int ret; 633 634 tfm = crypto_alloc_cipher("aes", 0, 0); 635 if (IS_ERR(tfm)) 636 return PTR_ERR(tfm); 637 638 ret = crypto_cipher_setkey(tfm, key, key_len); 639 if (ret) 640 goto out_free_cipher; 641 642 /* Clear the data in the hash sub key container to zero.*/ 643 /* We want to cipher all zeros to create the hash sub key. */ 644 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); 645 646 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey); 647 648 out_free_cipher: 649 crypto_free_cipher(tfm); 650 return ret; 651 } 652 653 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, 654 unsigned int key_len) 655 { 656 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); 657 658 if (key_len < 4) { 659 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 660 return -EINVAL; 661 } 662 /*Account for 4 byte nonce at the end.*/ 663 key_len -= 4; 664 665 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); 666 667 return aes_set_key_common(crypto_aead_tfm(aead), 668 &ctx->aes_key_expanded, key, key_len) ?: 669 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 670 } 671 672 /* This is the Integrity Check Value (aka the authentication tag) length and can 673 * be 8, 12 or 16 bytes long. */ 674 static int common_rfc4106_set_authsize(struct crypto_aead *aead, 675 unsigned int authsize) 676 { 677 switch (authsize) { 678 case 8: 679 case 12: 680 case 16: 681 break; 682 default: 683 return -EINVAL; 684 } 685 686 return 0; 687 } 688 689 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm, 690 unsigned int authsize) 691 { 692 switch (authsize) { 693 case 4: 694 case 8: 695 case 12: 696 case 13: 697 case 14: 698 case 15: 699 case 16: 700 break; 701 default: 702 return -EINVAL; 703 } 704 705 return 0; 706 } 707 708 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, 709 unsigned int assoclen, u8 *hash_subkey, 710 u8 *iv, void *aes_ctx) 711 { 712 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 713 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 714 const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; 715 struct gcm_context_data data AESNI_ALIGN_ATTR; 716 struct scatter_walk dst_sg_walk = {}; 717 unsigned long left = req->cryptlen; 718 unsigned long len, srclen, dstlen; 719 struct scatter_walk assoc_sg_walk; 720 struct scatter_walk src_sg_walk; 721 struct scatterlist src_start[2]; 722 struct scatterlist dst_start[2]; 723 struct scatterlist *src_sg; 724 struct scatterlist *dst_sg; 725 u8 *src, *dst, *assoc; 726 u8 *assocmem = NULL; 727 u8 authTag[16]; 728 729 if (!enc) 730 left -= auth_tag_len; 731 732 #ifdef CONFIG_AS_AVX2 733 if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) 734 gcm_tfm = &aesni_gcm_tfm_avx_gen2; 735 #endif 736 #ifdef CONFIG_AS_AVX 737 if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) 738 gcm_tfm = &aesni_gcm_tfm_sse; 739 #endif 740 741 /* Linearize assoc, if not already linear */ 742 if (req->src->length >= assoclen && req->src->length && 743 (!PageHighMem(sg_page(req->src)) || 744 req->src->offset + req->src->length <= PAGE_SIZE)) { 745 scatterwalk_start(&assoc_sg_walk, req->src); 746 assoc = scatterwalk_map(&assoc_sg_walk); 747 } else { 748 /* assoc can be any length, so must be on heap */ 749 assocmem = kmalloc(assoclen, GFP_ATOMIC); 750 if (unlikely(!assocmem)) 751 return -ENOMEM; 752 assoc = assocmem; 753 754 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); 755 } 756 757 if (left) { 758 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); 759 scatterwalk_start(&src_sg_walk, src_sg); 760 if (req->src != req->dst) { 761 dst_sg = scatterwalk_ffwd(dst_start, req->dst, 762 req->assoclen); 763 scatterwalk_start(&dst_sg_walk, dst_sg); 764 } 765 } 766 767 kernel_fpu_begin(); 768 gcm_tfm->init(aes_ctx, &data, iv, 769 hash_subkey, assoc, assoclen); 770 if (req->src != req->dst) { 771 while (left) { 772 src = scatterwalk_map(&src_sg_walk); 773 dst = scatterwalk_map(&dst_sg_walk); 774 srclen = scatterwalk_clamp(&src_sg_walk, left); 775 dstlen = scatterwalk_clamp(&dst_sg_walk, left); 776 len = min(srclen, dstlen); 777 if (len) { 778 if (enc) 779 gcm_tfm->enc_update(aes_ctx, &data, 780 dst, src, len); 781 else 782 gcm_tfm->dec_update(aes_ctx, &data, 783 dst, src, len); 784 } 785 left -= len; 786 787 scatterwalk_unmap(src); 788 scatterwalk_unmap(dst); 789 scatterwalk_advance(&src_sg_walk, len); 790 scatterwalk_advance(&dst_sg_walk, len); 791 scatterwalk_done(&src_sg_walk, 0, left); 792 scatterwalk_done(&dst_sg_walk, 1, left); 793 } 794 } else { 795 while (left) { 796 dst = src = scatterwalk_map(&src_sg_walk); 797 len = scatterwalk_clamp(&src_sg_walk, left); 798 if (len) { 799 if (enc) 800 gcm_tfm->enc_update(aes_ctx, &data, 801 src, src, len); 802 else 803 gcm_tfm->dec_update(aes_ctx, &data, 804 src, src, len); 805 } 806 left -= len; 807 scatterwalk_unmap(src); 808 scatterwalk_advance(&src_sg_walk, len); 809 scatterwalk_done(&src_sg_walk, 1, left); 810 } 811 } 812 gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len); 813 kernel_fpu_end(); 814 815 if (!assocmem) 816 scatterwalk_unmap(assoc); 817 else 818 kfree(assocmem); 819 820 if (!enc) { 821 u8 authTagMsg[16]; 822 823 /* Copy out original authTag */ 824 scatterwalk_map_and_copy(authTagMsg, req->src, 825 req->assoclen + req->cryptlen - 826 auth_tag_len, 827 auth_tag_len, 0); 828 829 /* Compare generated tag with passed in tag. */ 830 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ? 831 -EBADMSG : 0; 832 } 833 834 /* Copy in the authTag */ 835 scatterwalk_map_and_copy(authTag, req->dst, 836 req->assoclen + req->cryptlen, 837 auth_tag_len, 1); 838 839 return 0; 840 } 841 842 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, 843 u8 *hash_subkey, u8 *iv, void *aes_ctx) 844 { 845 return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, 846 aes_ctx); 847 } 848 849 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, 850 u8 *hash_subkey, u8 *iv, void *aes_ctx) 851 { 852 return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, 853 aes_ctx); 854 } 855 856 static int helper_rfc4106_encrypt(struct aead_request *req) 857 { 858 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 859 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 860 void *aes_ctx = &(ctx->aes_key_expanded); 861 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 862 unsigned int i; 863 __be32 counter = cpu_to_be32(1); 864 865 /* Assuming we are supporting rfc4106 64-bit extended */ 866 /* sequence numbers We need to have the AAD length equal */ 867 /* to 16 or 20 bytes */ 868 if (unlikely(req->assoclen != 16 && req->assoclen != 20)) 869 return -EINVAL; 870 871 /* IV below built */ 872 for (i = 0; i < 4; i++) 873 *(iv+i) = ctx->nonce[i]; 874 for (i = 0; i < 8; i++) 875 *(iv+4+i) = req->iv[i]; 876 *((__be32 *)(iv+12)) = counter; 877 878 return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, 879 aes_ctx); 880 } 881 882 static int helper_rfc4106_decrypt(struct aead_request *req) 883 { 884 __be32 counter = cpu_to_be32(1); 885 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 886 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 887 void *aes_ctx = &(ctx->aes_key_expanded); 888 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 889 unsigned int i; 890 891 if (unlikely(req->assoclen != 16 && req->assoclen != 20)) 892 return -EINVAL; 893 894 /* Assuming we are supporting rfc4106 64-bit extended */ 895 /* sequence numbers We need to have the AAD length */ 896 /* equal to 16 or 20 bytes */ 897 898 /* IV below built */ 899 for (i = 0; i < 4; i++) 900 *(iv+i) = ctx->nonce[i]; 901 for (i = 0; i < 8; i++) 902 *(iv+4+i) = req->iv[i]; 903 *((__be32 *)(iv+12)) = counter; 904 905 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, 906 aes_ctx); 907 } 908 #endif 909 910 static struct crypto_alg aesni_cipher_alg = { 911 .cra_name = "aes", 912 .cra_driver_name = "aes-aesni", 913 .cra_priority = 300, 914 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 915 .cra_blocksize = AES_BLOCK_SIZE, 916 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 917 .cra_module = THIS_MODULE, 918 .cra_u = { 919 .cipher = { 920 .cia_min_keysize = AES_MIN_KEY_SIZE, 921 .cia_max_keysize = AES_MAX_KEY_SIZE, 922 .cia_setkey = aes_set_key, 923 .cia_encrypt = aesni_encrypt, 924 .cia_decrypt = aesni_decrypt 925 } 926 } 927 }; 928 929 static struct skcipher_alg aesni_skciphers[] = { 930 { 931 .base = { 932 .cra_name = "__ecb(aes)", 933 .cra_driver_name = "__ecb-aes-aesni", 934 .cra_priority = 400, 935 .cra_flags = CRYPTO_ALG_INTERNAL, 936 .cra_blocksize = AES_BLOCK_SIZE, 937 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 938 .cra_module = THIS_MODULE, 939 }, 940 .min_keysize = AES_MIN_KEY_SIZE, 941 .max_keysize = AES_MAX_KEY_SIZE, 942 .setkey = aesni_skcipher_setkey, 943 .encrypt = ecb_encrypt, 944 .decrypt = ecb_decrypt, 945 }, { 946 .base = { 947 .cra_name = "__cbc(aes)", 948 .cra_driver_name = "__cbc-aes-aesni", 949 .cra_priority = 400, 950 .cra_flags = CRYPTO_ALG_INTERNAL, 951 .cra_blocksize = AES_BLOCK_SIZE, 952 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 953 .cra_module = THIS_MODULE, 954 }, 955 .min_keysize = AES_MIN_KEY_SIZE, 956 .max_keysize = AES_MAX_KEY_SIZE, 957 .ivsize = AES_BLOCK_SIZE, 958 .setkey = aesni_skcipher_setkey, 959 .encrypt = cbc_encrypt, 960 .decrypt = cbc_decrypt, 961 #ifdef CONFIG_X86_64 962 }, { 963 .base = { 964 .cra_name = "__ctr(aes)", 965 .cra_driver_name = "__ctr-aes-aesni", 966 .cra_priority = 400, 967 .cra_flags = CRYPTO_ALG_INTERNAL, 968 .cra_blocksize = 1, 969 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 970 .cra_module = THIS_MODULE, 971 }, 972 .min_keysize = AES_MIN_KEY_SIZE, 973 .max_keysize = AES_MAX_KEY_SIZE, 974 .ivsize = AES_BLOCK_SIZE, 975 .chunksize = AES_BLOCK_SIZE, 976 .setkey = aesni_skcipher_setkey, 977 .encrypt = ctr_crypt, 978 .decrypt = ctr_crypt, 979 }, { 980 .base = { 981 .cra_name = "__xts(aes)", 982 .cra_driver_name = "__xts-aes-aesni", 983 .cra_priority = 401, 984 .cra_flags = CRYPTO_ALG_INTERNAL, 985 .cra_blocksize = AES_BLOCK_SIZE, 986 .cra_ctxsize = XTS_AES_CTX_SIZE, 987 .cra_module = THIS_MODULE, 988 }, 989 .min_keysize = 2 * AES_MIN_KEY_SIZE, 990 .max_keysize = 2 * AES_MAX_KEY_SIZE, 991 .ivsize = AES_BLOCK_SIZE, 992 .setkey = xts_aesni_setkey, 993 .encrypt = xts_encrypt, 994 .decrypt = xts_decrypt, 995 #endif 996 } 997 }; 998 999 static 1000 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; 1001 1002 #ifdef CONFIG_X86_64 1003 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, 1004 unsigned int key_len) 1005 { 1006 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead); 1007 1008 return aes_set_key_common(crypto_aead_tfm(aead), 1009 &ctx->aes_key_expanded, key, key_len) ?: 1010 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 1011 } 1012 1013 static int generic_gcmaes_encrypt(struct aead_request *req) 1014 { 1015 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1016 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); 1017 void *aes_ctx = &(ctx->aes_key_expanded); 1018 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1019 __be32 counter = cpu_to_be32(1); 1020 1021 memcpy(iv, req->iv, 12); 1022 *((__be32 *)(iv+12)) = counter; 1023 1024 return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv, 1025 aes_ctx); 1026 } 1027 1028 static int generic_gcmaes_decrypt(struct aead_request *req) 1029 { 1030 __be32 counter = cpu_to_be32(1); 1031 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1032 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); 1033 void *aes_ctx = &(ctx->aes_key_expanded); 1034 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1035 1036 memcpy(iv, req->iv, 12); 1037 *((__be32 *)(iv+12)) = counter; 1038 1039 return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv, 1040 aes_ctx); 1041 } 1042 1043 static struct aead_alg aesni_aeads[] = { { 1044 .setkey = common_rfc4106_set_key, 1045 .setauthsize = common_rfc4106_set_authsize, 1046 .encrypt = helper_rfc4106_encrypt, 1047 .decrypt = helper_rfc4106_decrypt, 1048 .ivsize = GCM_RFC4106_IV_SIZE, 1049 .maxauthsize = 16, 1050 .base = { 1051 .cra_name = "__rfc4106(gcm(aes))", 1052 .cra_driver_name = "__rfc4106-gcm-aesni", 1053 .cra_priority = 400, 1054 .cra_flags = CRYPTO_ALG_INTERNAL, 1055 .cra_blocksize = 1, 1056 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx), 1057 .cra_alignmask = AESNI_ALIGN - 1, 1058 .cra_module = THIS_MODULE, 1059 }, 1060 }, { 1061 .setkey = generic_gcmaes_set_key, 1062 .setauthsize = generic_gcmaes_set_authsize, 1063 .encrypt = generic_gcmaes_encrypt, 1064 .decrypt = generic_gcmaes_decrypt, 1065 .ivsize = GCM_AES_IV_SIZE, 1066 .maxauthsize = 16, 1067 .base = { 1068 .cra_name = "__gcm(aes)", 1069 .cra_driver_name = "__generic-gcm-aesni", 1070 .cra_priority = 400, 1071 .cra_flags = CRYPTO_ALG_INTERNAL, 1072 .cra_blocksize = 1, 1073 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), 1074 .cra_alignmask = AESNI_ALIGN - 1, 1075 .cra_module = THIS_MODULE, 1076 }, 1077 } }; 1078 #else 1079 static struct aead_alg aesni_aeads[0]; 1080 #endif 1081 1082 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)]; 1083 1084 static const struct x86_cpu_id aesni_cpu_id[] = { 1085 X86_FEATURE_MATCH(X86_FEATURE_AES), 1086 {} 1087 }; 1088 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); 1089 1090 static int __init aesni_init(void) 1091 { 1092 int err; 1093 1094 if (!x86_match_cpu(aesni_cpu_id)) 1095 return -ENODEV; 1096 #ifdef CONFIG_X86_64 1097 #ifdef CONFIG_AS_AVX2 1098 if (boot_cpu_has(X86_FEATURE_AVX2)) { 1099 pr_info("AVX2 version of gcm_enc/dec engaged.\n"); 1100 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; 1101 } else 1102 #endif 1103 #ifdef CONFIG_AS_AVX 1104 if (boot_cpu_has(X86_FEATURE_AVX)) { 1105 pr_info("AVX version of gcm_enc/dec engaged.\n"); 1106 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; 1107 } else 1108 #endif 1109 { 1110 pr_info("SSE version of gcm_enc/dec engaged.\n"); 1111 aesni_gcm_tfm = &aesni_gcm_tfm_sse; 1112 } 1113 aesni_ctr_enc_tfm = aesni_ctr_enc; 1114 #ifdef CONFIG_AS_AVX 1115 if (boot_cpu_has(X86_FEATURE_AVX)) { 1116 /* optimize performance of ctr mode encryption transform */ 1117 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; 1118 pr_info("AES CTR mode by8 optimization enabled\n"); 1119 } 1120 #endif 1121 #endif 1122 1123 err = crypto_register_alg(&aesni_cipher_alg); 1124 if (err) 1125 return err; 1126 1127 err = simd_register_skciphers_compat(aesni_skciphers, 1128 ARRAY_SIZE(aesni_skciphers), 1129 aesni_simd_skciphers); 1130 if (err) 1131 goto unregister_cipher; 1132 1133 err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads), 1134 aesni_simd_aeads); 1135 if (err) 1136 goto unregister_skciphers; 1137 1138 return 0; 1139 1140 unregister_skciphers: 1141 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), 1142 aesni_simd_skciphers); 1143 unregister_cipher: 1144 crypto_unregister_alg(&aesni_cipher_alg); 1145 return err; 1146 } 1147 1148 static void __exit aesni_exit(void) 1149 { 1150 simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads), 1151 aesni_simd_aeads); 1152 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), 1153 aesni_simd_skciphers); 1154 crypto_unregister_alg(&aesni_cipher_alg); 1155 } 1156 1157 late_initcall(aesni_init); 1158 module_exit(aesni_exit); 1159 1160 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); 1161 MODULE_LICENSE("GPL"); 1162 MODULE_ALIAS_CRYPTO("aes"); 1163