1 /* 2 * Support for Intel AES-NI instructions. This file contains glue 3 * code, the real AES implementation is in intel-aes_asm.S. 4 * 5 * Copyright (C) 2008, Intel Corp. 6 * Author: Huang Ying <ying.huang@intel.com> 7 * 8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD 9 * interface for 64-bit kernels. 10 * Authors: Adrian Hoban <adrian.hoban@intel.com> 11 * Gabriele Paoloni <gabriele.paoloni@intel.com> 12 * Tadeusz Struk (tadeusz.struk@intel.com) 13 * Aidan O'Mahony (aidan.o.mahony@intel.com) 14 * Copyright (c) 2010, Intel Corporation. 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License as published by 18 * the Free Software Foundation; either version 2 of the License, or 19 * (at your option) any later version. 20 */ 21 22 #include <linux/hardirq.h> 23 #include <linux/types.h> 24 #include <linux/module.h> 25 #include <linux/err.h> 26 #include <crypto/algapi.h> 27 #include <crypto/aes.h> 28 #include <crypto/ctr.h> 29 #include <crypto/b128ops.h> 30 #include <crypto/gcm.h> 31 #include <crypto/xts.h> 32 #include <asm/cpu_device_id.h> 33 #include <asm/crypto/aes.h> 34 #include <asm/simd.h> 35 #include <crypto/scatterwalk.h> 36 #include <crypto/internal/aead.h> 37 #include <crypto/internal/simd.h> 38 #include <crypto/internal/skcipher.h> 39 #include <linux/workqueue.h> 40 #include <linux/spinlock.h> 41 #ifdef CONFIG_X86_64 42 #include <asm/crypto/glue_helper.h> 43 #endif 44 45 46 #define AESNI_ALIGN 16 47 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN))) 48 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1)) 49 #define RFC4106_HASH_SUBKEY_SIZE 16 50 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) 51 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA) 52 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA) 53 54 /* This data is stored at the end of the crypto_tfm struct. 55 * It's a type of per "session" data storage location. 56 * This needs to be 16 byte aligned. 57 */ 58 struct aesni_rfc4106_gcm_ctx { 59 u8 hash_subkey[16] AESNI_ALIGN_ATTR; 60 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; 61 u8 nonce[4]; 62 }; 63 64 struct generic_gcmaes_ctx { 65 u8 hash_subkey[16] AESNI_ALIGN_ATTR; 66 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; 67 }; 68 69 struct aesni_xts_ctx { 70 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; 71 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; 72 }; 73 74 #define GCM_BLOCK_LEN 16 75 76 struct gcm_context_data { 77 /* init, update and finalize context data */ 78 u8 aad_hash[GCM_BLOCK_LEN]; 79 u64 aad_length; 80 u64 in_length; 81 u8 partial_block_enc_key[GCM_BLOCK_LEN]; 82 u8 orig_IV[GCM_BLOCK_LEN]; 83 u8 current_counter[GCM_BLOCK_LEN]; 84 u64 partial_block_len; 85 u64 unused; 86 u8 hash_keys[GCM_BLOCK_LEN * 16]; 87 }; 88 89 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, 90 unsigned int key_len); 91 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, 92 const u8 *in); 93 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, 94 const u8 *in); 95 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, 96 const u8 *in, unsigned int len); 97 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, 98 const u8 *in, unsigned int len); 99 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, 100 const u8 *in, unsigned int len, u8 *iv); 101 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, 102 const u8 *in, unsigned int len, u8 *iv); 103 104 #define AVX_GEN2_OPTSIZE 640 105 #define AVX_GEN4_OPTSIZE 4096 106 107 #ifdef CONFIG_X86_64 108 109 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, 110 const u8 *in, unsigned int len, u8 *iv); 111 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, 112 const u8 *in, unsigned int len, u8 *iv); 113 114 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, 115 const u8 *in, bool enc, u8 *iv); 116 117 /* asmlinkage void aesni_gcm_enc() 118 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 119 * struct gcm_context_data. May be uninitialized. 120 * u8 *out, Ciphertext output. Encrypt in-place is allowed. 121 * const u8 *in, Plaintext input 122 * unsigned long plaintext_len, Length of data in bytes for encryption. 123 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. 124 * 16-byte aligned pointer. 125 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 126 * const u8 *aad, Additional Authentication Data (AAD) 127 * unsigned long aad_len, Length of AAD in bytes. 128 * u8 *auth_tag, Authenticated Tag output. 129 * unsigned long auth_tag_len), Authenticated Tag Length in bytes. 130 * Valid values are 16 (most likely), 12 or 8. 131 */ 132 asmlinkage void aesni_gcm_enc(void *ctx, 133 struct gcm_context_data *gdata, u8 *out, 134 const u8 *in, unsigned long plaintext_len, u8 *iv, 135 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 136 u8 *auth_tag, unsigned long auth_tag_len); 137 138 /* asmlinkage void aesni_gcm_dec() 139 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 140 * struct gcm_context_data. May be uninitialized. 141 * u8 *out, Plaintext output. Decrypt in-place is allowed. 142 * const u8 *in, Ciphertext input 143 * unsigned long ciphertext_len, Length of data in bytes for decryption. 144 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. 145 * 16-byte aligned pointer. 146 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 147 * const u8 *aad, Additional Authentication Data (AAD) 148 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going 149 * to be 8 or 12 bytes 150 * u8 *auth_tag, Authenticated Tag output. 151 * unsigned long auth_tag_len) Authenticated Tag Length in bytes. 152 * Valid values are 16 (most likely), 12 or 8. 153 */ 154 asmlinkage void aesni_gcm_dec(void *ctx, 155 struct gcm_context_data *gdata, u8 *out, 156 const u8 *in, unsigned long ciphertext_len, u8 *iv, 157 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 158 u8 *auth_tag, unsigned long auth_tag_len); 159 160 /* Scatter / Gather routines, with args similar to above */ 161 asmlinkage void aesni_gcm_init(void *ctx, 162 struct gcm_context_data *gdata, 163 u8 *iv, 164 u8 *hash_subkey, const u8 *aad, 165 unsigned long aad_len); 166 asmlinkage void aesni_gcm_enc_update(void *ctx, 167 struct gcm_context_data *gdata, u8 *out, 168 const u8 *in, unsigned long plaintext_len); 169 asmlinkage void aesni_gcm_dec_update(void *ctx, 170 struct gcm_context_data *gdata, u8 *out, 171 const u8 *in, 172 unsigned long ciphertext_len); 173 asmlinkage void aesni_gcm_finalize(void *ctx, 174 struct gcm_context_data *gdata, 175 u8 *auth_tag, unsigned long auth_tag_len); 176 177 static const struct aesni_gcm_tfm_s { 178 void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv, 179 u8 *hash_subkey, const u8 *aad, unsigned long aad_len); 180 void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, 181 const u8 *in, unsigned long plaintext_len); 182 void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, 183 const u8 *in, unsigned long ciphertext_len); 184 void (*finalize)(void *ctx, struct gcm_context_data *gdata, 185 u8 *auth_tag, unsigned long auth_tag_len); 186 } *aesni_gcm_tfm; 187 188 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { 189 .init = &aesni_gcm_init, 190 .enc_update = &aesni_gcm_enc_update, 191 .dec_update = &aesni_gcm_dec_update, 192 .finalize = &aesni_gcm_finalize, 193 }; 194 195 #ifdef CONFIG_AS_AVX 196 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, 197 void *keys, u8 *out, unsigned int num_bytes); 198 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, 199 void *keys, u8 *out, unsigned int num_bytes); 200 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, 201 void *keys, u8 *out, unsigned int num_bytes); 202 /* 203 * asmlinkage void aesni_gcm_init_avx_gen2() 204 * gcm_data *my_ctx_data, context data 205 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 206 */ 207 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data, 208 struct gcm_context_data *gdata, 209 u8 *iv, 210 u8 *hash_subkey, 211 const u8 *aad, 212 unsigned long aad_len); 213 214 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx, 215 struct gcm_context_data *gdata, u8 *out, 216 const u8 *in, unsigned long plaintext_len); 217 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx, 218 struct gcm_context_data *gdata, u8 *out, 219 const u8 *in, 220 unsigned long ciphertext_len); 221 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx, 222 struct gcm_context_data *gdata, 223 u8 *auth_tag, unsigned long auth_tag_len); 224 225 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, 226 struct gcm_context_data *gdata, u8 *out, 227 const u8 *in, unsigned long plaintext_len, u8 *iv, 228 const u8 *aad, unsigned long aad_len, 229 u8 *auth_tag, unsigned long auth_tag_len); 230 231 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, 232 struct gcm_context_data *gdata, u8 *out, 233 const u8 *in, unsigned long ciphertext_len, u8 *iv, 234 const u8 *aad, unsigned long aad_len, 235 u8 *auth_tag, unsigned long auth_tag_len); 236 237 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { 238 .init = &aesni_gcm_init_avx_gen2, 239 .enc_update = &aesni_gcm_enc_update_avx_gen2, 240 .dec_update = &aesni_gcm_dec_update_avx_gen2, 241 .finalize = &aesni_gcm_finalize_avx_gen2, 242 }; 243 244 #endif 245 246 #ifdef CONFIG_AS_AVX2 247 /* 248 * asmlinkage void aesni_gcm_init_avx_gen4() 249 * gcm_data *my_ctx_data, context data 250 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 251 */ 252 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data, 253 struct gcm_context_data *gdata, 254 u8 *iv, 255 u8 *hash_subkey, 256 const u8 *aad, 257 unsigned long aad_len); 258 259 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx, 260 struct gcm_context_data *gdata, u8 *out, 261 const u8 *in, unsigned long plaintext_len); 262 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx, 263 struct gcm_context_data *gdata, u8 *out, 264 const u8 *in, 265 unsigned long ciphertext_len); 266 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx, 267 struct gcm_context_data *gdata, 268 u8 *auth_tag, unsigned long auth_tag_len); 269 270 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, 271 struct gcm_context_data *gdata, u8 *out, 272 const u8 *in, unsigned long plaintext_len, u8 *iv, 273 const u8 *aad, unsigned long aad_len, 274 u8 *auth_tag, unsigned long auth_tag_len); 275 276 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, 277 struct gcm_context_data *gdata, u8 *out, 278 const u8 *in, unsigned long ciphertext_len, u8 *iv, 279 const u8 *aad, unsigned long aad_len, 280 u8 *auth_tag, unsigned long auth_tag_len); 281 282 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { 283 .init = &aesni_gcm_init_avx_gen4, 284 .enc_update = &aesni_gcm_enc_update_avx_gen4, 285 .dec_update = &aesni_gcm_dec_update_avx_gen4, 286 .finalize = &aesni_gcm_finalize_avx_gen4, 287 }; 288 289 #endif 290 291 static inline struct 292 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) 293 { 294 unsigned long align = AESNI_ALIGN; 295 296 if (align <= crypto_tfm_ctx_alignment()) 297 align = 1; 298 return PTR_ALIGN(crypto_aead_ctx(tfm), align); 299 } 300 301 static inline struct 302 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) 303 { 304 unsigned long align = AESNI_ALIGN; 305 306 if (align <= crypto_tfm_ctx_alignment()) 307 align = 1; 308 return PTR_ALIGN(crypto_aead_ctx(tfm), align); 309 } 310 #endif 311 312 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) 313 { 314 unsigned long addr = (unsigned long)raw_ctx; 315 unsigned long align = AESNI_ALIGN; 316 317 if (align <= crypto_tfm_ctx_alignment()) 318 align = 1; 319 return (struct crypto_aes_ctx *)ALIGN(addr, align); 320 } 321 322 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, 323 const u8 *in_key, unsigned int key_len) 324 { 325 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); 326 u32 *flags = &tfm->crt_flags; 327 int err; 328 329 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && 330 key_len != AES_KEYSIZE_256) { 331 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 332 return -EINVAL; 333 } 334 335 if (!crypto_simd_usable()) 336 err = crypto_aes_expand_key(ctx, in_key, key_len); 337 else { 338 kernel_fpu_begin(); 339 err = aesni_set_key(ctx, in_key, key_len); 340 kernel_fpu_end(); 341 } 342 343 return err; 344 } 345 346 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 347 unsigned int key_len) 348 { 349 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); 350 } 351 352 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 353 { 354 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 355 356 if (!crypto_simd_usable()) 357 crypto_aes_encrypt_x86(ctx, dst, src); 358 else { 359 kernel_fpu_begin(); 360 aesni_enc(ctx, dst, src); 361 kernel_fpu_end(); 362 } 363 } 364 365 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 366 { 367 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 368 369 if (!crypto_simd_usable()) 370 crypto_aes_decrypt_x86(ctx, dst, src); 371 else { 372 kernel_fpu_begin(); 373 aesni_dec(ctx, dst, src); 374 kernel_fpu_end(); 375 } 376 } 377 378 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 379 { 380 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 381 382 aesni_enc(ctx, dst, src); 383 } 384 385 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 386 { 387 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 388 389 aesni_dec(ctx, dst, src); 390 } 391 392 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 393 unsigned int len) 394 { 395 return aes_set_key_common(crypto_skcipher_tfm(tfm), 396 crypto_skcipher_ctx(tfm), key, len); 397 } 398 399 static int ecb_encrypt(struct skcipher_request *req) 400 { 401 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 402 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 403 struct skcipher_walk walk; 404 unsigned int nbytes; 405 int err; 406 407 err = skcipher_walk_virt(&walk, req, true); 408 409 kernel_fpu_begin(); 410 while ((nbytes = walk.nbytes)) { 411 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, 412 nbytes & AES_BLOCK_MASK); 413 nbytes &= AES_BLOCK_SIZE - 1; 414 err = skcipher_walk_done(&walk, nbytes); 415 } 416 kernel_fpu_end(); 417 418 return err; 419 } 420 421 static int ecb_decrypt(struct skcipher_request *req) 422 { 423 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 424 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 425 struct skcipher_walk walk; 426 unsigned int nbytes; 427 int err; 428 429 err = skcipher_walk_virt(&walk, req, true); 430 431 kernel_fpu_begin(); 432 while ((nbytes = walk.nbytes)) { 433 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, 434 nbytes & AES_BLOCK_MASK); 435 nbytes &= AES_BLOCK_SIZE - 1; 436 err = skcipher_walk_done(&walk, nbytes); 437 } 438 kernel_fpu_end(); 439 440 return err; 441 } 442 443 static int cbc_encrypt(struct skcipher_request *req) 444 { 445 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 446 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 447 struct skcipher_walk walk; 448 unsigned int nbytes; 449 int err; 450 451 err = skcipher_walk_virt(&walk, req, true); 452 453 kernel_fpu_begin(); 454 while ((nbytes = walk.nbytes)) { 455 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, 456 nbytes & AES_BLOCK_MASK, walk.iv); 457 nbytes &= AES_BLOCK_SIZE - 1; 458 err = skcipher_walk_done(&walk, nbytes); 459 } 460 kernel_fpu_end(); 461 462 return err; 463 } 464 465 static int cbc_decrypt(struct skcipher_request *req) 466 { 467 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 468 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 469 struct skcipher_walk walk; 470 unsigned int nbytes; 471 int err; 472 473 err = skcipher_walk_virt(&walk, req, true); 474 475 kernel_fpu_begin(); 476 while ((nbytes = walk.nbytes)) { 477 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, 478 nbytes & AES_BLOCK_MASK, walk.iv); 479 nbytes &= AES_BLOCK_SIZE - 1; 480 err = skcipher_walk_done(&walk, nbytes); 481 } 482 kernel_fpu_end(); 483 484 return err; 485 } 486 487 #ifdef CONFIG_X86_64 488 static void ctr_crypt_final(struct crypto_aes_ctx *ctx, 489 struct skcipher_walk *walk) 490 { 491 u8 *ctrblk = walk->iv; 492 u8 keystream[AES_BLOCK_SIZE]; 493 u8 *src = walk->src.virt.addr; 494 u8 *dst = walk->dst.virt.addr; 495 unsigned int nbytes = walk->nbytes; 496 497 aesni_enc(ctx, keystream, ctrblk); 498 crypto_xor_cpy(dst, keystream, src, nbytes); 499 500 crypto_inc(ctrblk, AES_BLOCK_SIZE); 501 } 502 503 #ifdef CONFIG_AS_AVX 504 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, 505 const u8 *in, unsigned int len, u8 *iv) 506 { 507 /* 508 * based on key length, override with the by8 version 509 * of ctr mode encryption/decryption for improved performance 510 * aes_set_key_common() ensures that key length is one of 511 * {128,192,256} 512 */ 513 if (ctx->key_length == AES_KEYSIZE_128) 514 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); 515 else if (ctx->key_length == AES_KEYSIZE_192) 516 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); 517 else 518 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); 519 } 520 #endif 521 522 static int ctr_crypt(struct skcipher_request *req) 523 { 524 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 525 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 526 struct skcipher_walk walk; 527 unsigned int nbytes; 528 int err; 529 530 err = skcipher_walk_virt(&walk, req, true); 531 532 kernel_fpu_begin(); 533 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 534 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, 535 nbytes & AES_BLOCK_MASK, walk.iv); 536 nbytes &= AES_BLOCK_SIZE - 1; 537 err = skcipher_walk_done(&walk, nbytes); 538 } 539 if (walk.nbytes) { 540 ctr_crypt_final(ctx, &walk); 541 err = skcipher_walk_done(&walk, 0); 542 } 543 kernel_fpu_end(); 544 545 return err; 546 } 547 548 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, 549 unsigned int keylen) 550 { 551 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 552 int err; 553 554 err = xts_verify_key(tfm, key, keylen); 555 if (err) 556 return err; 557 558 keylen /= 2; 559 560 /* first half of xts-key is for crypt */ 561 err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, 562 key, keylen); 563 if (err) 564 return err; 565 566 /* second half of xts-key is for tweak */ 567 return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, 568 key + keylen, keylen); 569 } 570 571 572 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) 573 { 574 aesni_enc(ctx, out, in); 575 } 576 577 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 578 { 579 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); 580 } 581 582 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 583 { 584 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); 585 } 586 587 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 588 { 589 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); 590 } 591 592 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 593 { 594 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); 595 } 596 597 static const struct common_glue_ctx aesni_enc_xts = { 598 .num_funcs = 2, 599 .fpu_blocks_limit = 1, 600 601 .funcs = { { 602 .num_blocks = 8, 603 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } 604 }, { 605 .num_blocks = 1, 606 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } 607 } } 608 }; 609 610 static const struct common_glue_ctx aesni_dec_xts = { 611 .num_funcs = 2, 612 .fpu_blocks_limit = 1, 613 614 .funcs = { { 615 .num_blocks = 8, 616 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } 617 }, { 618 .num_blocks = 1, 619 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } 620 } } 621 }; 622 623 static int xts_encrypt(struct skcipher_request *req) 624 { 625 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 626 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 627 628 return glue_xts_req_128bit(&aesni_enc_xts, req, 629 XTS_TWEAK_CAST(aesni_xts_tweak), 630 aes_ctx(ctx->raw_tweak_ctx), 631 aes_ctx(ctx->raw_crypt_ctx)); 632 } 633 634 static int xts_decrypt(struct skcipher_request *req) 635 { 636 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 637 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 638 639 return glue_xts_req_128bit(&aesni_dec_xts, req, 640 XTS_TWEAK_CAST(aesni_xts_tweak), 641 aes_ctx(ctx->raw_tweak_ctx), 642 aes_ctx(ctx->raw_crypt_ctx)); 643 } 644 645 static int 646 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) 647 { 648 struct crypto_cipher *tfm; 649 int ret; 650 651 tfm = crypto_alloc_cipher("aes", 0, 0); 652 if (IS_ERR(tfm)) 653 return PTR_ERR(tfm); 654 655 ret = crypto_cipher_setkey(tfm, key, key_len); 656 if (ret) 657 goto out_free_cipher; 658 659 /* Clear the data in the hash sub key container to zero.*/ 660 /* We want to cipher all zeros to create the hash sub key. */ 661 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); 662 663 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey); 664 665 out_free_cipher: 666 crypto_free_cipher(tfm); 667 return ret; 668 } 669 670 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, 671 unsigned int key_len) 672 { 673 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); 674 675 if (key_len < 4) { 676 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 677 return -EINVAL; 678 } 679 /*Account for 4 byte nonce at the end.*/ 680 key_len -= 4; 681 682 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); 683 684 return aes_set_key_common(crypto_aead_tfm(aead), 685 &ctx->aes_key_expanded, key, key_len) ?: 686 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 687 } 688 689 /* This is the Integrity Check Value (aka the authentication tag) length and can 690 * be 8, 12 or 16 bytes long. */ 691 static int common_rfc4106_set_authsize(struct crypto_aead *aead, 692 unsigned int authsize) 693 { 694 switch (authsize) { 695 case 8: 696 case 12: 697 case 16: 698 break; 699 default: 700 return -EINVAL; 701 } 702 703 return 0; 704 } 705 706 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm, 707 unsigned int authsize) 708 { 709 switch (authsize) { 710 case 4: 711 case 8: 712 case 12: 713 case 13: 714 case 14: 715 case 15: 716 case 16: 717 break; 718 default: 719 return -EINVAL; 720 } 721 722 return 0; 723 } 724 725 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, 726 unsigned int assoclen, u8 *hash_subkey, 727 u8 *iv, void *aes_ctx) 728 { 729 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 730 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 731 const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; 732 struct gcm_context_data data AESNI_ALIGN_ATTR; 733 struct scatter_walk dst_sg_walk = {}; 734 unsigned long left = req->cryptlen; 735 unsigned long len, srclen, dstlen; 736 struct scatter_walk assoc_sg_walk; 737 struct scatter_walk src_sg_walk; 738 struct scatterlist src_start[2]; 739 struct scatterlist dst_start[2]; 740 struct scatterlist *src_sg; 741 struct scatterlist *dst_sg; 742 u8 *src, *dst, *assoc; 743 u8 *assocmem = NULL; 744 u8 authTag[16]; 745 746 if (!enc) 747 left -= auth_tag_len; 748 749 #ifdef CONFIG_AS_AVX2 750 if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) 751 gcm_tfm = &aesni_gcm_tfm_avx_gen2; 752 #endif 753 #ifdef CONFIG_AS_AVX 754 if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) 755 gcm_tfm = &aesni_gcm_tfm_sse; 756 #endif 757 758 /* Linearize assoc, if not already linear */ 759 if (req->src->length >= assoclen && req->src->length && 760 (!PageHighMem(sg_page(req->src)) || 761 req->src->offset + req->src->length <= PAGE_SIZE)) { 762 scatterwalk_start(&assoc_sg_walk, req->src); 763 assoc = scatterwalk_map(&assoc_sg_walk); 764 } else { 765 /* assoc can be any length, so must be on heap */ 766 assocmem = kmalloc(assoclen, GFP_ATOMIC); 767 if (unlikely(!assocmem)) 768 return -ENOMEM; 769 assoc = assocmem; 770 771 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); 772 } 773 774 if (left) { 775 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); 776 scatterwalk_start(&src_sg_walk, src_sg); 777 if (req->src != req->dst) { 778 dst_sg = scatterwalk_ffwd(dst_start, req->dst, 779 req->assoclen); 780 scatterwalk_start(&dst_sg_walk, dst_sg); 781 } 782 } 783 784 kernel_fpu_begin(); 785 gcm_tfm->init(aes_ctx, &data, iv, 786 hash_subkey, assoc, assoclen); 787 if (req->src != req->dst) { 788 while (left) { 789 src = scatterwalk_map(&src_sg_walk); 790 dst = scatterwalk_map(&dst_sg_walk); 791 srclen = scatterwalk_clamp(&src_sg_walk, left); 792 dstlen = scatterwalk_clamp(&dst_sg_walk, left); 793 len = min(srclen, dstlen); 794 if (len) { 795 if (enc) 796 gcm_tfm->enc_update(aes_ctx, &data, 797 dst, src, len); 798 else 799 gcm_tfm->dec_update(aes_ctx, &data, 800 dst, src, len); 801 } 802 left -= len; 803 804 scatterwalk_unmap(src); 805 scatterwalk_unmap(dst); 806 scatterwalk_advance(&src_sg_walk, len); 807 scatterwalk_advance(&dst_sg_walk, len); 808 scatterwalk_done(&src_sg_walk, 0, left); 809 scatterwalk_done(&dst_sg_walk, 1, left); 810 } 811 } else { 812 while (left) { 813 dst = src = scatterwalk_map(&src_sg_walk); 814 len = scatterwalk_clamp(&src_sg_walk, left); 815 if (len) { 816 if (enc) 817 gcm_tfm->enc_update(aes_ctx, &data, 818 src, src, len); 819 else 820 gcm_tfm->dec_update(aes_ctx, &data, 821 src, src, len); 822 } 823 left -= len; 824 scatterwalk_unmap(src); 825 scatterwalk_advance(&src_sg_walk, len); 826 scatterwalk_done(&src_sg_walk, 1, left); 827 } 828 } 829 gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len); 830 kernel_fpu_end(); 831 832 if (!assocmem) 833 scatterwalk_unmap(assoc); 834 else 835 kfree(assocmem); 836 837 if (!enc) { 838 u8 authTagMsg[16]; 839 840 /* Copy out original authTag */ 841 scatterwalk_map_and_copy(authTagMsg, req->src, 842 req->assoclen + req->cryptlen - 843 auth_tag_len, 844 auth_tag_len, 0); 845 846 /* Compare generated tag with passed in tag. */ 847 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ? 848 -EBADMSG : 0; 849 } 850 851 /* Copy in the authTag */ 852 scatterwalk_map_and_copy(authTag, req->dst, 853 req->assoclen + req->cryptlen, 854 auth_tag_len, 1); 855 856 return 0; 857 } 858 859 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, 860 u8 *hash_subkey, u8 *iv, void *aes_ctx) 861 { 862 return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, 863 aes_ctx); 864 } 865 866 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, 867 u8 *hash_subkey, u8 *iv, void *aes_ctx) 868 { 869 return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, 870 aes_ctx); 871 } 872 873 static int helper_rfc4106_encrypt(struct aead_request *req) 874 { 875 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 876 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 877 void *aes_ctx = &(ctx->aes_key_expanded); 878 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 879 unsigned int i; 880 __be32 counter = cpu_to_be32(1); 881 882 /* Assuming we are supporting rfc4106 64-bit extended */ 883 /* sequence numbers We need to have the AAD length equal */ 884 /* to 16 or 20 bytes */ 885 if (unlikely(req->assoclen != 16 && req->assoclen != 20)) 886 return -EINVAL; 887 888 /* IV below built */ 889 for (i = 0; i < 4; i++) 890 *(iv+i) = ctx->nonce[i]; 891 for (i = 0; i < 8; i++) 892 *(iv+4+i) = req->iv[i]; 893 *((__be32 *)(iv+12)) = counter; 894 895 return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, 896 aes_ctx); 897 } 898 899 static int helper_rfc4106_decrypt(struct aead_request *req) 900 { 901 __be32 counter = cpu_to_be32(1); 902 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 903 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 904 void *aes_ctx = &(ctx->aes_key_expanded); 905 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 906 unsigned int i; 907 908 if (unlikely(req->assoclen != 16 && req->assoclen != 20)) 909 return -EINVAL; 910 911 /* Assuming we are supporting rfc4106 64-bit extended */ 912 /* sequence numbers We need to have the AAD length */ 913 /* equal to 16 or 20 bytes */ 914 915 /* IV below built */ 916 for (i = 0; i < 4; i++) 917 *(iv+i) = ctx->nonce[i]; 918 for (i = 0; i < 8; i++) 919 *(iv+4+i) = req->iv[i]; 920 *((__be32 *)(iv+12)) = counter; 921 922 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, 923 aes_ctx); 924 } 925 #endif 926 927 static struct crypto_alg aesni_algs[] = { { 928 .cra_name = "aes", 929 .cra_driver_name = "aes-aesni", 930 .cra_priority = 300, 931 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 932 .cra_blocksize = AES_BLOCK_SIZE, 933 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 934 .cra_module = THIS_MODULE, 935 .cra_u = { 936 .cipher = { 937 .cia_min_keysize = AES_MIN_KEY_SIZE, 938 .cia_max_keysize = AES_MAX_KEY_SIZE, 939 .cia_setkey = aes_set_key, 940 .cia_encrypt = aes_encrypt, 941 .cia_decrypt = aes_decrypt 942 } 943 } 944 }, { 945 .cra_name = "__aes", 946 .cra_driver_name = "__aes-aesni", 947 .cra_priority = 300, 948 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL, 949 .cra_blocksize = AES_BLOCK_SIZE, 950 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 951 .cra_module = THIS_MODULE, 952 .cra_u = { 953 .cipher = { 954 .cia_min_keysize = AES_MIN_KEY_SIZE, 955 .cia_max_keysize = AES_MAX_KEY_SIZE, 956 .cia_setkey = aes_set_key, 957 .cia_encrypt = __aes_encrypt, 958 .cia_decrypt = __aes_decrypt 959 } 960 } 961 } }; 962 963 static struct skcipher_alg aesni_skciphers[] = { 964 { 965 .base = { 966 .cra_name = "__ecb(aes)", 967 .cra_driver_name = "__ecb-aes-aesni", 968 .cra_priority = 400, 969 .cra_flags = CRYPTO_ALG_INTERNAL, 970 .cra_blocksize = AES_BLOCK_SIZE, 971 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 972 .cra_module = THIS_MODULE, 973 }, 974 .min_keysize = AES_MIN_KEY_SIZE, 975 .max_keysize = AES_MAX_KEY_SIZE, 976 .setkey = aesni_skcipher_setkey, 977 .encrypt = ecb_encrypt, 978 .decrypt = ecb_decrypt, 979 }, { 980 .base = { 981 .cra_name = "__cbc(aes)", 982 .cra_driver_name = "__cbc-aes-aesni", 983 .cra_priority = 400, 984 .cra_flags = CRYPTO_ALG_INTERNAL, 985 .cra_blocksize = AES_BLOCK_SIZE, 986 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 987 .cra_module = THIS_MODULE, 988 }, 989 .min_keysize = AES_MIN_KEY_SIZE, 990 .max_keysize = AES_MAX_KEY_SIZE, 991 .ivsize = AES_BLOCK_SIZE, 992 .setkey = aesni_skcipher_setkey, 993 .encrypt = cbc_encrypt, 994 .decrypt = cbc_decrypt, 995 #ifdef CONFIG_X86_64 996 }, { 997 .base = { 998 .cra_name = "__ctr(aes)", 999 .cra_driver_name = "__ctr-aes-aesni", 1000 .cra_priority = 400, 1001 .cra_flags = CRYPTO_ALG_INTERNAL, 1002 .cra_blocksize = 1, 1003 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 1004 .cra_module = THIS_MODULE, 1005 }, 1006 .min_keysize = AES_MIN_KEY_SIZE, 1007 .max_keysize = AES_MAX_KEY_SIZE, 1008 .ivsize = AES_BLOCK_SIZE, 1009 .chunksize = AES_BLOCK_SIZE, 1010 .setkey = aesni_skcipher_setkey, 1011 .encrypt = ctr_crypt, 1012 .decrypt = ctr_crypt, 1013 }, { 1014 .base = { 1015 .cra_name = "__xts(aes)", 1016 .cra_driver_name = "__xts-aes-aesni", 1017 .cra_priority = 401, 1018 .cra_flags = CRYPTO_ALG_INTERNAL, 1019 .cra_blocksize = AES_BLOCK_SIZE, 1020 .cra_ctxsize = XTS_AES_CTX_SIZE, 1021 .cra_module = THIS_MODULE, 1022 }, 1023 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1024 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1025 .ivsize = AES_BLOCK_SIZE, 1026 .setkey = xts_aesni_setkey, 1027 .encrypt = xts_encrypt, 1028 .decrypt = xts_decrypt, 1029 #endif 1030 } 1031 }; 1032 1033 static 1034 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; 1035 1036 #ifdef CONFIG_X86_64 1037 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, 1038 unsigned int key_len) 1039 { 1040 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead); 1041 1042 return aes_set_key_common(crypto_aead_tfm(aead), 1043 &ctx->aes_key_expanded, key, key_len) ?: 1044 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 1045 } 1046 1047 static int generic_gcmaes_encrypt(struct aead_request *req) 1048 { 1049 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1050 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); 1051 void *aes_ctx = &(ctx->aes_key_expanded); 1052 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1053 __be32 counter = cpu_to_be32(1); 1054 1055 memcpy(iv, req->iv, 12); 1056 *((__be32 *)(iv+12)) = counter; 1057 1058 return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv, 1059 aes_ctx); 1060 } 1061 1062 static int generic_gcmaes_decrypt(struct aead_request *req) 1063 { 1064 __be32 counter = cpu_to_be32(1); 1065 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1066 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); 1067 void *aes_ctx = &(ctx->aes_key_expanded); 1068 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1069 1070 memcpy(iv, req->iv, 12); 1071 *((__be32 *)(iv+12)) = counter; 1072 1073 return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv, 1074 aes_ctx); 1075 } 1076 1077 static struct aead_alg aesni_aeads[] = { { 1078 .setkey = common_rfc4106_set_key, 1079 .setauthsize = common_rfc4106_set_authsize, 1080 .encrypt = helper_rfc4106_encrypt, 1081 .decrypt = helper_rfc4106_decrypt, 1082 .ivsize = GCM_RFC4106_IV_SIZE, 1083 .maxauthsize = 16, 1084 .base = { 1085 .cra_name = "__rfc4106(gcm(aes))", 1086 .cra_driver_name = "__rfc4106-gcm-aesni", 1087 .cra_priority = 400, 1088 .cra_flags = CRYPTO_ALG_INTERNAL, 1089 .cra_blocksize = 1, 1090 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx), 1091 .cra_alignmask = AESNI_ALIGN - 1, 1092 .cra_module = THIS_MODULE, 1093 }, 1094 }, { 1095 .setkey = generic_gcmaes_set_key, 1096 .setauthsize = generic_gcmaes_set_authsize, 1097 .encrypt = generic_gcmaes_encrypt, 1098 .decrypt = generic_gcmaes_decrypt, 1099 .ivsize = GCM_AES_IV_SIZE, 1100 .maxauthsize = 16, 1101 .base = { 1102 .cra_name = "__gcm(aes)", 1103 .cra_driver_name = "__generic-gcm-aesni", 1104 .cra_priority = 400, 1105 .cra_flags = CRYPTO_ALG_INTERNAL, 1106 .cra_blocksize = 1, 1107 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), 1108 .cra_alignmask = AESNI_ALIGN - 1, 1109 .cra_module = THIS_MODULE, 1110 }, 1111 } }; 1112 #else 1113 static struct aead_alg aesni_aeads[0]; 1114 #endif 1115 1116 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)]; 1117 1118 static const struct x86_cpu_id aesni_cpu_id[] = { 1119 X86_FEATURE_MATCH(X86_FEATURE_AES), 1120 {} 1121 }; 1122 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); 1123 1124 static int __init aesni_init(void) 1125 { 1126 int err; 1127 1128 if (!x86_match_cpu(aesni_cpu_id)) 1129 return -ENODEV; 1130 #ifdef CONFIG_X86_64 1131 #ifdef CONFIG_AS_AVX2 1132 if (boot_cpu_has(X86_FEATURE_AVX2)) { 1133 pr_info("AVX2 version of gcm_enc/dec engaged.\n"); 1134 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; 1135 } else 1136 #endif 1137 #ifdef CONFIG_AS_AVX 1138 if (boot_cpu_has(X86_FEATURE_AVX)) { 1139 pr_info("AVX version of gcm_enc/dec engaged.\n"); 1140 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; 1141 } else 1142 #endif 1143 { 1144 pr_info("SSE version of gcm_enc/dec engaged.\n"); 1145 aesni_gcm_tfm = &aesni_gcm_tfm_sse; 1146 } 1147 aesni_ctr_enc_tfm = aesni_ctr_enc; 1148 #ifdef CONFIG_AS_AVX 1149 if (boot_cpu_has(X86_FEATURE_AVX)) { 1150 /* optimize performance of ctr mode encryption transform */ 1151 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; 1152 pr_info("AES CTR mode by8 optimization enabled\n"); 1153 } 1154 #endif 1155 #endif 1156 1157 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1158 if (err) 1159 return err; 1160 1161 err = simd_register_skciphers_compat(aesni_skciphers, 1162 ARRAY_SIZE(aesni_skciphers), 1163 aesni_simd_skciphers); 1164 if (err) 1165 goto unregister_algs; 1166 1167 err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads), 1168 aesni_simd_aeads); 1169 if (err) 1170 goto unregister_skciphers; 1171 1172 return 0; 1173 1174 unregister_skciphers: 1175 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), 1176 aesni_simd_skciphers); 1177 unregister_algs: 1178 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1179 return err; 1180 } 1181 1182 static void __exit aesni_exit(void) 1183 { 1184 simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads), 1185 aesni_simd_aeads); 1186 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), 1187 aesni_simd_skciphers); 1188 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1189 } 1190 1191 late_initcall(aesni_init); 1192 module_exit(aesni_exit); 1193 1194 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); 1195 MODULE_LICENSE("GPL"); 1196 MODULE_ALIAS_CRYPTO("aes"); 1197