1 /* 2 * Support for Intel AES-NI instructions. This file contains glue 3 * code, the real AES implementation is in intel-aes_asm.S. 4 * 5 * Copyright (C) 2008, Intel Corp. 6 * Author: Huang Ying <ying.huang@intel.com> 7 * 8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD 9 * interface for 64-bit kernels. 10 * Authors: Adrian Hoban <adrian.hoban@intel.com> 11 * Gabriele Paoloni <gabriele.paoloni@intel.com> 12 * Tadeusz Struk (tadeusz.struk@intel.com) 13 * Aidan O'Mahony (aidan.o.mahony@intel.com) 14 * Copyright (c) 2010, Intel Corporation. 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License as published by 18 * the Free Software Foundation; either version 2 of the License, or 19 * (at your option) any later version. 20 */ 21 22 #include <linux/hardirq.h> 23 #include <linux/types.h> 24 #include <linux/module.h> 25 #include <linux/err.h> 26 #include <crypto/algapi.h> 27 #include <crypto/aes.h> 28 #include <crypto/cryptd.h> 29 #include <crypto/ctr.h> 30 #include <crypto/b128ops.h> 31 #include <crypto/gcm.h> 32 #include <crypto/xts.h> 33 #include <asm/cpu_device_id.h> 34 #include <asm/fpu/api.h> 35 #include <asm/crypto/aes.h> 36 #include <crypto/scatterwalk.h> 37 #include <crypto/internal/aead.h> 38 #include <crypto/internal/simd.h> 39 #include <crypto/internal/skcipher.h> 40 #include <linux/workqueue.h> 41 #include <linux/spinlock.h> 42 #ifdef CONFIG_X86_64 43 #include <asm/crypto/glue_helper.h> 44 #endif 45 46 47 #define AESNI_ALIGN 16 48 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN))) 49 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1)) 50 #define RFC4106_HASH_SUBKEY_SIZE 16 51 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) 52 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA) 53 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA) 54 55 /* This data is stored at the end of the crypto_tfm struct. 56 * It's a type of per "session" data storage location. 57 * This needs to be 16 byte aligned. 58 */ 59 struct aesni_rfc4106_gcm_ctx { 60 u8 hash_subkey[16] AESNI_ALIGN_ATTR; 61 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; 62 u8 nonce[4]; 63 }; 64 65 struct generic_gcmaes_ctx { 66 u8 hash_subkey[16] AESNI_ALIGN_ATTR; 67 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; 68 }; 69 70 struct aesni_xts_ctx { 71 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; 72 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; 73 }; 74 75 #define GCM_BLOCK_LEN 16 76 77 struct gcm_context_data { 78 /* init, update and finalize context data */ 79 u8 aad_hash[GCM_BLOCK_LEN]; 80 u64 aad_length; 81 u64 in_length; 82 u8 partial_block_enc_key[GCM_BLOCK_LEN]; 83 u8 orig_IV[GCM_BLOCK_LEN]; 84 u8 current_counter[GCM_BLOCK_LEN]; 85 u64 partial_block_len; 86 u64 unused; 87 u8 hash_keys[GCM_BLOCK_LEN * 8]; 88 }; 89 90 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, 91 unsigned int key_len); 92 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, 93 const u8 *in); 94 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, 95 const u8 *in); 96 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, 97 const u8 *in, unsigned int len); 98 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, 99 const u8 *in, unsigned int len); 100 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, 101 const u8 *in, unsigned int len, u8 *iv); 102 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, 103 const u8 *in, unsigned int len, u8 *iv); 104 105 int crypto_fpu_init(void); 106 void crypto_fpu_exit(void); 107 108 #define AVX_GEN2_OPTSIZE 640 109 #define AVX_GEN4_OPTSIZE 4096 110 111 #ifdef CONFIG_X86_64 112 113 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, 114 const u8 *in, unsigned int len, u8 *iv); 115 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, 116 const u8 *in, unsigned int len, u8 *iv); 117 118 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, 119 const u8 *in, bool enc, u8 *iv); 120 121 /* asmlinkage void aesni_gcm_enc() 122 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 123 * struct gcm_context_data. May be uninitialized. 124 * u8 *out, Ciphertext output. Encrypt in-place is allowed. 125 * const u8 *in, Plaintext input 126 * unsigned long plaintext_len, Length of data in bytes for encryption. 127 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. 128 * 16-byte aligned pointer. 129 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 130 * const u8 *aad, Additional Authentication Data (AAD) 131 * unsigned long aad_len, Length of AAD in bytes. 132 * u8 *auth_tag, Authenticated Tag output. 133 * unsigned long auth_tag_len), Authenticated Tag Length in bytes. 134 * Valid values are 16 (most likely), 12 or 8. 135 */ 136 asmlinkage void aesni_gcm_enc(void *ctx, 137 struct gcm_context_data *gdata, u8 *out, 138 const u8 *in, unsigned long plaintext_len, u8 *iv, 139 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 140 u8 *auth_tag, unsigned long auth_tag_len); 141 142 /* asmlinkage void aesni_gcm_dec() 143 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 144 * struct gcm_context_data. May be uninitialized. 145 * u8 *out, Plaintext output. Decrypt in-place is allowed. 146 * const u8 *in, Ciphertext input 147 * unsigned long ciphertext_len, Length of data in bytes for decryption. 148 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. 149 * 16-byte aligned pointer. 150 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 151 * const u8 *aad, Additional Authentication Data (AAD) 152 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going 153 * to be 8 or 12 bytes 154 * u8 *auth_tag, Authenticated Tag output. 155 * unsigned long auth_tag_len) Authenticated Tag Length in bytes. 156 * Valid values are 16 (most likely), 12 or 8. 157 */ 158 asmlinkage void aesni_gcm_dec(void *ctx, 159 struct gcm_context_data *gdata, u8 *out, 160 const u8 *in, unsigned long ciphertext_len, u8 *iv, 161 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 162 u8 *auth_tag, unsigned long auth_tag_len); 163 164 /* Scatter / Gather routines, with args similar to above */ 165 asmlinkage void aesni_gcm_init(void *ctx, 166 struct gcm_context_data *gdata, 167 u8 *iv, 168 u8 *hash_subkey, const u8 *aad, 169 unsigned long aad_len); 170 asmlinkage void aesni_gcm_enc_update(void *ctx, 171 struct gcm_context_data *gdata, u8 *out, 172 const u8 *in, unsigned long plaintext_len); 173 asmlinkage void aesni_gcm_dec_update(void *ctx, 174 struct gcm_context_data *gdata, u8 *out, 175 const u8 *in, 176 unsigned long ciphertext_len); 177 asmlinkage void aesni_gcm_finalize(void *ctx, 178 struct gcm_context_data *gdata, 179 u8 *auth_tag, unsigned long auth_tag_len); 180 181 #ifdef CONFIG_AS_AVX 182 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, 183 void *keys, u8 *out, unsigned int num_bytes); 184 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, 185 void *keys, u8 *out, unsigned int num_bytes); 186 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, 187 void *keys, u8 *out, unsigned int num_bytes); 188 /* 189 * asmlinkage void aesni_gcm_precomp_avx_gen2() 190 * gcm_data *my_ctx_data, context data 191 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 192 */ 193 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey); 194 195 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out, 196 const u8 *in, unsigned long plaintext_len, u8 *iv, 197 const u8 *aad, unsigned long aad_len, 198 u8 *auth_tag, unsigned long auth_tag_len); 199 200 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out, 201 const u8 *in, unsigned long ciphertext_len, u8 *iv, 202 const u8 *aad, unsigned long aad_len, 203 u8 *auth_tag, unsigned long auth_tag_len); 204 205 static void aesni_gcm_enc_avx(void *ctx, 206 struct gcm_context_data *data, u8 *out, 207 const u8 *in, unsigned long plaintext_len, u8 *iv, 208 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 209 u8 *auth_tag, unsigned long auth_tag_len) 210 { 211 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 212 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){ 213 aesni_gcm_enc(ctx, data, out, in, 214 plaintext_len, iv, hash_subkey, aad, 215 aad_len, auth_tag, auth_tag_len); 216 } else { 217 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 218 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, 219 aad_len, auth_tag, auth_tag_len); 220 } 221 } 222 223 static void aesni_gcm_dec_avx(void *ctx, 224 struct gcm_context_data *data, u8 *out, 225 const u8 *in, unsigned long ciphertext_len, u8 *iv, 226 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 227 u8 *auth_tag, unsigned long auth_tag_len) 228 { 229 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 230 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 231 aesni_gcm_dec(ctx, data, out, in, 232 ciphertext_len, iv, hash_subkey, aad, 233 aad_len, auth_tag, auth_tag_len); 234 } else { 235 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 236 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, 237 aad_len, auth_tag, auth_tag_len); 238 } 239 } 240 #endif 241 242 #ifdef CONFIG_AS_AVX2 243 /* 244 * asmlinkage void aesni_gcm_precomp_avx_gen4() 245 * gcm_data *my_ctx_data, context data 246 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. 247 */ 248 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey); 249 250 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out, 251 const u8 *in, unsigned long plaintext_len, u8 *iv, 252 const u8 *aad, unsigned long aad_len, 253 u8 *auth_tag, unsigned long auth_tag_len); 254 255 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out, 256 const u8 *in, unsigned long ciphertext_len, u8 *iv, 257 const u8 *aad, unsigned long aad_len, 258 u8 *auth_tag, unsigned long auth_tag_len); 259 260 static void aesni_gcm_enc_avx2(void *ctx, 261 struct gcm_context_data *data, u8 *out, 262 const u8 *in, unsigned long plaintext_len, u8 *iv, 263 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 264 u8 *auth_tag, unsigned long auth_tag_len) 265 { 266 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 267 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 268 aesni_gcm_enc(ctx, data, out, in, 269 plaintext_len, iv, hash_subkey, aad, 270 aad_len, auth_tag, auth_tag_len); 271 } else if (plaintext_len < AVX_GEN4_OPTSIZE) { 272 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 273 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, 274 aad_len, auth_tag, auth_tag_len); 275 } else { 276 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); 277 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad, 278 aad_len, auth_tag, auth_tag_len); 279 } 280 } 281 282 static void aesni_gcm_dec_avx2(void *ctx, 283 struct gcm_context_data *data, u8 *out, 284 const u8 *in, unsigned long ciphertext_len, u8 *iv, 285 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 286 u8 *auth_tag, unsigned long auth_tag_len) 287 { 288 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 289 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 290 aesni_gcm_dec(ctx, data, out, in, 291 ciphertext_len, iv, hash_subkey, 292 aad, aad_len, auth_tag, auth_tag_len); 293 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { 294 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 295 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, 296 aad_len, auth_tag, auth_tag_len); 297 } else { 298 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); 299 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad, 300 aad_len, auth_tag, auth_tag_len); 301 } 302 } 303 #endif 304 305 static void (*aesni_gcm_enc_tfm)(void *ctx, 306 struct gcm_context_data *data, u8 *out, 307 const u8 *in, unsigned long plaintext_len, 308 u8 *iv, u8 *hash_subkey, const u8 *aad, 309 unsigned long aad_len, u8 *auth_tag, 310 unsigned long auth_tag_len); 311 312 static void (*aesni_gcm_dec_tfm)(void *ctx, 313 struct gcm_context_data *data, u8 *out, 314 const u8 *in, unsigned long ciphertext_len, 315 u8 *iv, u8 *hash_subkey, const u8 *aad, 316 unsigned long aad_len, u8 *auth_tag, 317 unsigned long auth_tag_len); 318 319 static inline struct 320 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) 321 { 322 unsigned long align = AESNI_ALIGN; 323 324 if (align <= crypto_tfm_ctx_alignment()) 325 align = 1; 326 return PTR_ALIGN(crypto_aead_ctx(tfm), align); 327 } 328 329 static inline struct 330 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) 331 { 332 unsigned long align = AESNI_ALIGN; 333 334 if (align <= crypto_tfm_ctx_alignment()) 335 align = 1; 336 return PTR_ALIGN(crypto_aead_ctx(tfm), align); 337 } 338 #endif 339 340 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) 341 { 342 unsigned long addr = (unsigned long)raw_ctx; 343 unsigned long align = AESNI_ALIGN; 344 345 if (align <= crypto_tfm_ctx_alignment()) 346 align = 1; 347 return (struct crypto_aes_ctx *)ALIGN(addr, align); 348 } 349 350 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, 351 const u8 *in_key, unsigned int key_len) 352 { 353 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); 354 u32 *flags = &tfm->crt_flags; 355 int err; 356 357 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && 358 key_len != AES_KEYSIZE_256) { 359 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 360 return -EINVAL; 361 } 362 363 if (!irq_fpu_usable()) 364 err = crypto_aes_expand_key(ctx, in_key, key_len); 365 else { 366 kernel_fpu_begin(); 367 err = aesni_set_key(ctx, in_key, key_len); 368 kernel_fpu_end(); 369 } 370 371 return err; 372 } 373 374 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 375 unsigned int key_len) 376 { 377 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); 378 } 379 380 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 381 { 382 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 383 384 if (!irq_fpu_usable()) 385 crypto_aes_encrypt_x86(ctx, dst, src); 386 else { 387 kernel_fpu_begin(); 388 aesni_enc(ctx, dst, src); 389 kernel_fpu_end(); 390 } 391 } 392 393 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 394 { 395 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 396 397 if (!irq_fpu_usable()) 398 crypto_aes_decrypt_x86(ctx, dst, src); 399 else { 400 kernel_fpu_begin(); 401 aesni_dec(ctx, dst, src); 402 kernel_fpu_end(); 403 } 404 } 405 406 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 407 { 408 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 409 410 aesni_enc(ctx, dst, src); 411 } 412 413 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 414 { 415 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 416 417 aesni_dec(ctx, dst, src); 418 } 419 420 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 421 unsigned int len) 422 { 423 return aes_set_key_common(crypto_skcipher_tfm(tfm), 424 crypto_skcipher_ctx(tfm), key, len); 425 } 426 427 static int ecb_encrypt(struct skcipher_request *req) 428 { 429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 430 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 431 struct skcipher_walk walk; 432 unsigned int nbytes; 433 int err; 434 435 err = skcipher_walk_virt(&walk, req, true); 436 437 kernel_fpu_begin(); 438 while ((nbytes = walk.nbytes)) { 439 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, 440 nbytes & AES_BLOCK_MASK); 441 nbytes &= AES_BLOCK_SIZE - 1; 442 err = skcipher_walk_done(&walk, nbytes); 443 } 444 kernel_fpu_end(); 445 446 return err; 447 } 448 449 static int ecb_decrypt(struct skcipher_request *req) 450 { 451 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 452 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 453 struct skcipher_walk walk; 454 unsigned int nbytes; 455 int err; 456 457 err = skcipher_walk_virt(&walk, req, true); 458 459 kernel_fpu_begin(); 460 while ((nbytes = walk.nbytes)) { 461 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, 462 nbytes & AES_BLOCK_MASK); 463 nbytes &= AES_BLOCK_SIZE - 1; 464 err = skcipher_walk_done(&walk, nbytes); 465 } 466 kernel_fpu_end(); 467 468 return err; 469 } 470 471 static int cbc_encrypt(struct skcipher_request *req) 472 { 473 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 474 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 475 struct skcipher_walk walk; 476 unsigned int nbytes; 477 int err; 478 479 err = skcipher_walk_virt(&walk, req, true); 480 481 kernel_fpu_begin(); 482 while ((nbytes = walk.nbytes)) { 483 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, 484 nbytes & AES_BLOCK_MASK, walk.iv); 485 nbytes &= AES_BLOCK_SIZE - 1; 486 err = skcipher_walk_done(&walk, nbytes); 487 } 488 kernel_fpu_end(); 489 490 return err; 491 } 492 493 static int cbc_decrypt(struct skcipher_request *req) 494 { 495 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 496 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 497 struct skcipher_walk walk; 498 unsigned int nbytes; 499 int err; 500 501 err = skcipher_walk_virt(&walk, req, true); 502 503 kernel_fpu_begin(); 504 while ((nbytes = walk.nbytes)) { 505 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, 506 nbytes & AES_BLOCK_MASK, walk.iv); 507 nbytes &= AES_BLOCK_SIZE - 1; 508 err = skcipher_walk_done(&walk, nbytes); 509 } 510 kernel_fpu_end(); 511 512 return err; 513 } 514 515 #ifdef CONFIG_X86_64 516 static void ctr_crypt_final(struct crypto_aes_ctx *ctx, 517 struct skcipher_walk *walk) 518 { 519 u8 *ctrblk = walk->iv; 520 u8 keystream[AES_BLOCK_SIZE]; 521 u8 *src = walk->src.virt.addr; 522 u8 *dst = walk->dst.virt.addr; 523 unsigned int nbytes = walk->nbytes; 524 525 aesni_enc(ctx, keystream, ctrblk); 526 crypto_xor_cpy(dst, keystream, src, nbytes); 527 528 crypto_inc(ctrblk, AES_BLOCK_SIZE); 529 } 530 531 #ifdef CONFIG_AS_AVX 532 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, 533 const u8 *in, unsigned int len, u8 *iv) 534 { 535 /* 536 * based on key length, override with the by8 version 537 * of ctr mode encryption/decryption for improved performance 538 * aes_set_key_common() ensures that key length is one of 539 * {128,192,256} 540 */ 541 if (ctx->key_length == AES_KEYSIZE_128) 542 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); 543 else if (ctx->key_length == AES_KEYSIZE_192) 544 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); 545 else 546 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); 547 } 548 #endif 549 550 static int ctr_crypt(struct skcipher_request *req) 551 { 552 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 553 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); 554 struct skcipher_walk walk; 555 unsigned int nbytes; 556 int err; 557 558 err = skcipher_walk_virt(&walk, req, true); 559 560 kernel_fpu_begin(); 561 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 562 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, 563 nbytes & AES_BLOCK_MASK, walk.iv); 564 nbytes &= AES_BLOCK_SIZE - 1; 565 err = skcipher_walk_done(&walk, nbytes); 566 } 567 if (walk.nbytes) { 568 ctr_crypt_final(ctx, &walk); 569 err = skcipher_walk_done(&walk, 0); 570 } 571 kernel_fpu_end(); 572 573 return err; 574 } 575 576 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, 577 unsigned int keylen) 578 { 579 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 580 int err; 581 582 err = xts_verify_key(tfm, key, keylen); 583 if (err) 584 return err; 585 586 keylen /= 2; 587 588 /* first half of xts-key is for crypt */ 589 err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, 590 key, keylen); 591 if (err) 592 return err; 593 594 /* second half of xts-key is for tweak */ 595 return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, 596 key + keylen, keylen); 597 } 598 599 600 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) 601 { 602 aesni_enc(ctx, out, in); 603 } 604 605 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 606 { 607 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); 608 } 609 610 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 611 { 612 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); 613 } 614 615 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 616 { 617 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); 618 } 619 620 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 621 { 622 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); 623 } 624 625 static const struct common_glue_ctx aesni_enc_xts = { 626 .num_funcs = 2, 627 .fpu_blocks_limit = 1, 628 629 .funcs = { { 630 .num_blocks = 8, 631 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } 632 }, { 633 .num_blocks = 1, 634 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } 635 } } 636 }; 637 638 static const struct common_glue_ctx aesni_dec_xts = { 639 .num_funcs = 2, 640 .fpu_blocks_limit = 1, 641 642 .funcs = { { 643 .num_blocks = 8, 644 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } 645 }, { 646 .num_blocks = 1, 647 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } 648 } } 649 }; 650 651 static int xts_encrypt(struct skcipher_request *req) 652 { 653 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 654 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 655 656 return glue_xts_req_128bit(&aesni_enc_xts, req, 657 XTS_TWEAK_CAST(aesni_xts_tweak), 658 aes_ctx(ctx->raw_tweak_ctx), 659 aes_ctx(ctx->raw_crypt_ctx)); 660 } 661 662 static int xts_decrypt(struct skcipher_request *req) 663 { 664 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 665 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 666 667 return glue_xts_req_128bit(&aesni_dec_xts, req, 668 XTS_TWEAK_CAST(aesni_xts_tweak), 669 aes_ctx(ctx->raw_tweak_ctx), 670 aes_ctx(ctx->raw_crypt_ctx)); 671 } 672 673 static int rfc4106_init(struct crypto_aead *aead) 674 { 675 struct cryptd_aead *cryptd_tfm; 676 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 677 678 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 679 CRYPTO_ALG_INTERNAL, 680 CRYPTO_ALG_INTERNAL); 681 if (IS_ERR(cryptd_tfm)) 682 return PTR_ERR(cryptd_tfm); 683 684 *ctx = cryptd_tfm; 685 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); 686 return 0; 687 } 688 689 static void rfc4106_exit(struct crypto_aead *aead) 690 { 691 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 692 693 cryptd_free_aead(*ctx); 694 } 695 696 static int 697 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) 698 { 699 struct crypto_cipher *tfm; 700 int ret; 701 702 tfm = crypto_alloc_cipher("aes", 0, 0); 703 if (IS_ERR(tfm)) 704 return PTR_ERR(tfm); 705 706 ret = crypto_cipher_setkey(tfm, key, key_len); 707 if (ret) 708 goto out_free_cipher; 709 710 /* Clear the data in the hash sub key container to zero.*/ 711 /* We want to cipher all zeros to create the hash sub key. */ 712 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); 713 714 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey); 715 716 out_free_cipher: 717 crypto_free_cipher(tfm); 718 return ret; 719 } 720 721 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, 722 unsigned int key_len) 723 { 724 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); 725 726 if (key_len < 4) { 727 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 728 return -EINVAL; 729 } 730 /*Account for 4 byte nonce at the end.*/ 731 key_len -= 4; 732 733 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); 734 735 return aes_set_key_common(crypto_aead_tfm(aead), 736 &ctx->aes_key_expanded, key, key_len) ?: 737 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 738 } 739 740 static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, 741 unsigned int key_len) 742 { 743 struct cryptd_aead **ctx = crypto_aead_ctx(parent); 744 struct cryptd_aead *cryptd_tfm = *ctx; 745 746 return crypto_aead_setkey(&cryptd_tfm->base, key, key_len); 747 } 748 749 static int common_rfc4106_set_authsize(struct crypto_aead *aead, 750 unsigned int authsize) 751 { 752 switch (authsize) { 753 case 8: 754 case 12: 755 case 16: 756 break; 757 default: 758 return -EINVAL; 759 } 760 761 return 0; 762 } 763 764 /* This is the Integrity Check Value (aka the authentication tag length and can 765 * be 8, 12 or 16 bytes long. */ 766 static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent, 767 unsigned int authsize) 768 { 769 struct cryptd_aead **ctx = crypto_aead_ctx(parent); 770 struct cryptd_aead *cryptd_tfm = *ctx; 771 772 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); 773 } 774 775 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm, 776 unsigned int authsize) 777 { 778 switch (authsize) { 779 case 4: 780 case 8: 781 case 12: 782 case 13: 783 case 14: 784 case 15: 785 case 16: 786 break; 787 default: 788 return -EINVAL; 789 } 790 791 return 0; 792 } 793 794 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, 795 unsigned int assoclen, u8 *hash_subkey, 796 u8 *iv, void *aes_ctx) 797 { 798 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 799 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 800 struct gcm_context_data data AESNI_ALIGN_ATTR; 801 struct scatter_walk dst_sg_walk = {}; 802 unsigned long left = req->cryptlen; 803 unsigned long len, srclen, dstlen; 804 struct scatter_walk assoc_sg_walk; 805 struct scatter_walk src_sg_walk; 806 struct scatterlist src_start[2]; 807 struct scatterlist dst_start[2]; 808 struct scatterlist *src_sg; 809 struct scatterlist *dst_sg; 810 u8 *src, *dst, *assoc; 811 u8 *assocmem = NULL; 812 u8 authTag[16]; 813 814 if (!enc) 815 left -= auth_tag_len; 816 817 /* Linearize assoc, if not already linear */ 818 if (req->src->length >= assoclen && req->src->length && 819 (!PageHighMem(sg_page(req->src)) || 820 req->src->offset + req->src->length < PAGE_SIZE)) { 821 scatterwalk_start(&assoc_sg_walk, req->src); 822 assoc = scatterwalk_map(&assoc_sg_walk); 823 } else { 824 /* assoc can be any length, so must be on heap */ 825 assocmem = kmalloc(assoclen, GFP_ATOMIC); 826 if (unlikely(!assocmem)) 827 return -ENOMEM; 828 assoc = assocmem; 829 830 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); 831 } 832 833 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); 834 scatterwalk_start(&src_sg_walk, src_sg); 835 if (req->src != req->dst) { 836 dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen); 837 scatterwalk_start(&dst_sg_walk, dst_sg); 838 } 839 840 kernel_fpu_begin(); 841 aesni_gcm_init(aes_ctx, &data, iv, 842 hash_subkey, assoc, assoclen); 843 if (req->src != req->dst) { 844 while (left) { 845 src = scatterwalk_map(&src_sg_walk); 846 dst = scatterwalk_map(&dst_sg_walk); 847 srclen = scatterwalk_clamp(&src_sg_walk, left); 848 dstlen = scatterwalk_clamp(&dst_sg_walk, left); 849 len = min(srclen, dstlen); 850 if (len) { 851 if (enc) 852 aesni_gcm_enc_update(aes_ctx, &data, 853 dst, src, len); 854 else 855 aesni_gcm_dec_update(aes_ctx, &data, 856 dst, src, len); 857 } 858 left -= len; 859 860 scatterwalk_unmap(src); 861 scatterwalk_unmap(dst); 862 scatterwalk_advance(&src_sg_walk, len); 863 scatterwalk_advance(&dst_sg_walk, len); 864 scatterwalk_done(&src_sg_walk, 0, left); 865 scatterwalk_done(&dst_sg_walk, 1, left); 866 } 867 } else { 868 while (left) { 869 dst = src = scatterwalk_map(&src_sg_walk); 870 len = scatterwalk_clamp(&src_sg_walk, left); 871 if (len) { 872 if (enc) 873 aesni_gcm_enc_update(aes_ctx, &data, 874 src, src, len); 875 else 876 aesni_gcm_dec_update(aes_ctx, &data, 877 src, src, len); 878 } 879 left -= len; 880 scatterwalk_unmap(src); 881 scatterwalk_advance(&src_sg_walk, len); 882 scatterwalk_done(&src_sg_walk, 1, left); 883 } 884 } 885 aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len); 886 kernel_fpu_end(); 887 888 if (!assocmem) 889 scatterwalk_unmap(assoc); 890 else 891 kfree(assocmem); 892 893 if (!enc) { 894 u8 authTagMsg[16]; 895 896 /* Copy out original authTag */ 897 scatterwalk_map_and_copy(authTagMsg, req->src, 898 req->assoclen + req->cryptlen - 899 auth_tag_len, 900 auth_tag_len, 0); 901 902 /* Compare generated tag with passed in tag. */ 903 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ? 904 -EBADMSG : 0; 905 } 906 907 /* Copy in the authTag */ 908 scatterwalk_map_and_copy(authTag, req->dst, 909 req->assoclen + req->cryptlen, 910 auth_tag_len, 1); 911 912 return 0; 913 } 914 915 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, 916 u8 *hash_subkey, u8 *iv, void *aes_ctx) 917 { 918 u8 one_entry_in_sg = 0; 919 u8 *src, *dst, *assoc; 920 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 921 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 922 struct scatter_walk src_sg_walk; 923 struct scatter_walk dst_sg_walk = {}; 924 struct gcm_context_data data AESNI_ALIGN_ATTR; 925 926 if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 || 927 aesni_gcm_enc_tfm == aesni_gcm_enc || 928 req->cryptlen < AVX_GEN2_OPTSIZE) { 929 return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, 930 aes_ctx); 931 } 932 if (sg_is_last(req->src) && 933 (!PageHighMem(sg_page(req->src)) || 934 req->src->offset + req->src->length <= PAGE_SIZE) && 935 sg_is_last(req->dst) && 936 (!PageHighMem(sg_page(req->dst)) || 937 req->dst->offset + req->dst->length <= PAGE_SIZE)) { 938 one_entry_in_sg = 1; 939 scatterwalk_start(&src_sg_walk, req->src); 940 assoc = scatterwalk_map(&src_sg_walk); 941 src = assoc + req->assoclen; 942 dst = src; 943 if (unlikely(req->src != req->dst)) { 944 scatterwalk_start(&dst_sg_walk, req->dst); 945 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; 946 } 947 } else { 948 /* Allocate memory for src, dst, assoc */ 949 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, 950 GFP_ATOMIC); 951 if (unlikely(!assoc)) 952 return -ENOMEM; 953 scatterwalk_map_and_copy(assoc, req->src, 0, 954 req->assoclen + req->cryptlen, 0); 955 src = assoc + req->assoclen; 956 dst = src; 957 } 958 959 kernel_fpu_begin(); 960 aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv, 961 hash_subkey, assoc, assoclen, 962 dst + req->cryptlen, auth_tag_len); 963 kernel_fpu_end(); 964 965 /* The authTag (aka the Integrity Check Value) needs to be written 966 * back to the packet. */ 967 if (one_entry_in_sg) { 968 if (unlikely(req->src != req->dst)) { 969 scatterwalk_unmap(dst - req->assoclen); 970 scatterwalk_advance(&dst_sg_walk, req->dst->length); 971 scatterwalk_done(&dst_sg_walk, 1, 0); 972 } 973 scatterwalk_unmap(assoc); 974 scatterwalk_advance(&src_sg_walk, req->src->length); 975 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); 976 } else { 977 scatterwalk_map_and_copy(dst, req->dst, req->assoclen, 978 req->cryptlen + auth_tag_len, 1); 979 kfree(assoc); 980 } 981 return 0; 982 } 983 984 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, 985 u8 *hash_subkey, u8 *iv, void *aes_ctx) 986 { 987 u8 one_entry_in_sg = 0; 988 u8 *src, *dst, *assoc; 989 unsigned long tempCipherLen = 0; 990 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 991 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 992 u8 authTag[16]; 993 struct scatter_walk src_sg_walk; 994 struct scatter_walk dst_sg_walk = {}; 995 struct gcm_context_data data AESNI_ALIGN_ATTR; 996 int retval = 0; 997 998 if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 || 999 aesni_gcm_enc_tfm == aesni_gcm_enc || 1000 req->cryptlen < AVX_GEN2_OPTSIZE) { 1001 return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, 1002 aes_ctx); 1003 } 1004 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); 1005 1006 if (sg_is_last(req->src) && 1007 (!PageHighMem(sg_page(req->src)) || 1008 req->src->offset + req->src->length <= PAGE_SIZE) && 1009 sg_is_last(req->dst) && req->dst->length && 1010 (!PageHighMem(sg_page(req->dst)) || 1011 req->dst->offset + req->dst->length <= PAGE_SIZE)) { 1012 one_entry_in_sg = 1; 1013 scatterwalk_start(&src_sg_walk, req->src); 1014 assoc = scatterwalk_map(&src_sg_walk); 1015 src = assoc + req->assoclen; 1016 dst = src; 1017 if (unlikely(req->src != req->dst)) { 1018 scatterwalk_start(&dst_sg_walk, req->dst); 1019 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; 1020 } 1021 } else { 1022 /* Allocate memory for src, dst, assoc */ 1023 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); 1024 if (!assoc) 1025 return -ENOMEM; 1026 scatterwalk_map_and_copy(assoc, req->src, 0, 1027 req->assoclen + req->cryptlen, 0); 1028 src = assoc + req->assoclen; 1029 dst = src; 1030 } 1031 1032 1033 kernel_fpu_begin(); 1034 aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv, 1035 hash_subkey, assoc, assoclen, 1036 authTag, auth_tag_len); 1037 kernel_fpu_end(); 1038 1039 /* Compare generated tag with passed in tag. */ 1040 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? 1041 -EBADMSG : 0; 1042 1043 if (one_entry_in_sg) { 1044 if (unlikely(req->src != req->dst)) { 1045 scatterwalk_unmap(dst - req->assoclen); 1046 scatterwalk_advance(&dst_sg_walk, req->dst->length); 1047 scatterwalk_done(&dst_sg_walk, 1, 0); 1048 } 1049 scatterwalk_unmap(assoc); 1050 scatterwalk_advance(&src_sg_walk, req->src->length); 1051 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); 1052 } else { 1053 scatterwalk_map_and_copy(dst, req->dst, req->assoclen, 1054 tempCipherLen, 1); 1055 kfree(assoc); 1056 } 1057 return retval; 1058 1059 } 1060 1061 static int helper_rfc4106_encrypt(struct aead_request *req) 1062 { 1063 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1064 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1065 void *aes_ctx = &(ctx->aes_key_expanded); 1066 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1067 unsigned int i; 1068 __be32 counter = cpu_to_be32(1); 1069 1070 /* Assuming we are supporting rfc4106 64-bit extended */ 1071 /* sequence numbers We need to have the AAD length equal */ 1072 /* to 16 or 20 bytes */ 1073 if (unlikely(req->assoclen != 16 && req->assoclen != 20)) 1074 return -EINVAL; 1075 1076 /* IV below built */ 1077 for (i = 0; i < 4; i++) 1078 *(iv+i) = ctx->nonce[i]; 1079 for (i = 0; i < 8; i++) 1080 *(iv+4+i) = req->iv[i]; 1081 *((__be32 *)(iv+12)) = counter; 1082 1083 return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, 1084 aes_ctx); 1085 } 1086 1087 static int helper_rfc4106_decrypt(struct aead_request *req) 1088 { 1089 __be32 counter = cpu_to_be32(1); 1090 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1091 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1092 void *aes_ctx = &(ctx->aes_key_expanded); 1093 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1094 unsigned int i; 1095 1096 if (unlikely(req->assoclen != 16 && req->assoclen != 20)) 1097 return -EINVAL; 1098 1099 /* Assuming we are supporting rfc4106 64-bit extended */ 1100 /* sequence numbers We need to have the AAD length */ 1101 /* equal to 16 or 20 bytes */ 1102 1103 /* IV below built */ 1104 for (i = 0; i < 4; i++) 1105 *(iv+i) = ctx->nonce[i]; 1106 for (i = 0; i < 8; i++) 1107 *(iv+4+i) = req->iv[i]; 1108 *((__be32 *)(iv+12)) = counter; 1109 1110 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, 1111 aes_ctx); 1112 } 1113 1114 static int gcmaes_wrapper_encrypt(struct aead_request *req) 1115 { 1116 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1117 struct cryptd_aead **ctx = crypto_aead_ctx(tfm); 1118 struct cryptd_aead *cryptd_tfm = *ctx; 1119 1120 tfm = &cryptd_tfm->base; 1121 if (irq_fpu_usable() && (!in_atomic() || 1122 !cryptd_aead_queued(cryptd_tfm))) 1123 tfm = cryptd_aead_child(cryptd_tfm); 1124 1125 aead_request_set_tfm(req, tfm); 1126 1127 return crypto_aead_encrypt(req); 1128 } 1129 1130 static int gcmaes_wrapper_decrypt(struct aead_request *req) 1131 { 1132 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1133 struct cryptd_aead **ctx = crypto_aead_ctx(tfm); 1134 struct cryptd_aead *cryptd_tfm = *ctx; 1135 1136 tfm = &cryptd_tfm->base; 1137 if (irq_fpu_usable() && (!in_atomic() || 1138 !cryptd_aead_queued(cryptd_tfm))) 1139 tfm = cryptd_aead_child(cryptd_tfm); 1140 1141 aead_request_set_tfm(req, tfm); 1142 1143 return crypto_aead_decrypt(req); 1144 } 1145 #endif 1146 1147 static struct crypto_alg aesni_algs[] = { { 1148 .cra_name = "aes", 1149 .cra_driver_name = "aes-aesni", 1150 .cra_priority = 300, 1151 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 1152 .cra_blocksize = AES_BLOCK_SIZE, 1153 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 1154 .cra_module = THIS_MODULE, 1155 .cra_u = { 1156 .cipher = { 1157 .cia_min_keysize = AES_MIN_KEY_SIZE, 1158 .cia_max_keysize = AES_MAX_KEY_SIZE, 1159 .cia_setkey = aes_set_key, 1160 .cia_encrypt = aes_encrypt, 1161 .cia_decrypt = aes_decrypt 1162 } 1163 } 1164 }, { 1165 .cra_name = "__aes", 1166 .cra_driver_name = "__aes-aesni", 1167 .cra_priority = 300, 1168 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL, 1169 .cra_blocksize = AES_BLOCK_SIZE, 1170 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 1171 .cra_module = THIS_MODULE, 1172 .cra_u = { 1173 .cipher = { 1174 .cia_min_keysize = AES_MIN_KEY_SIZE, 1175 .cia_max_keysize = AES_MAX_KEY_SIZE, 1176 .cia_setkey = aes_set_key, 1177 .cia_encrypt = __aes_encrypt, 1178 .cia_decrypt = __aes_decrypt 1179 } 1180 } 1181 } }; 1182 1183 static struct skcipher_alg aesni_skciphers[] = { 1184 { 1185 .base = { 1186 .cra_name = "__ecb(aes)", 1187 .cra_driver_name = "__ecb-aes-aesni", 1188 .cra_priority = 400, 1189 .cra_flags = CRYPTO_ALG_INTERNAL, 1190 .cra_blocksize = AES_BLOCK_SIZE, 1191 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 1192 .cra_module = THIS_MODULE, 1193 }, 1194 .min_keysize = AES_MIN_KEY_SIZE, 1195 .max_keysize = AES_MAX_KEY_SIZE, 1196 .setkey = aesni_skcipher_setkey, 1197 .encrypt = ecb_encrypt, 1198 .decrypt = ecb_decrypt, 1199 }, { 1200 .base = { 1201 .cra_name = "__cbc(aes)", 1202 .cra_driver_name = "__cbc-aes-aesni", 1203 .cra_priority = 400, 1204 .cra_flags = CRYPTO_ALG_INTERNAL, 1205 .cra_blocksize = AES_BLOCK_SIZE, 1206 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 1207 .cra_module = THIS_MODULE, 1208 }, 1209 .min_keysize = AES_MIN_KEY_SIZE, 1210 .max_keysize = AES_MAX_KEY_SIZE, 1211 .ivsize = AES_BLOCK_SIZE, 1212 .setkey = aesni_skcipher_setkey, 1213 .encrypt = cbc_encrypt, 1214 .decrypt = cbc_decrypt, 1215 #ifdef CONFIG_X86_64 1216 }, { 1217 .base = { 1218 .cra_name = "__ctr(aes)", 1219 .cra_driver_name = "__ctr-aes-aesni", 1220 .cra_priority = 400, 1221 .cra_flags = CRYPTO_ALG_INTERNAL, 1222 .cra_blocksize = 1, 1223 .cra_ctxsize = CRYPTO_AES_CTX_SIZE, 1224 .cra_module = THIS_MODULE, 1225 }, 1226 .min_keysize = AES_MIN_KEY_SIZE, 1227 .max_keysize = AES_MAX_KEY_SIZE, 1228 .ivsize = AES_BLOCK_SIZE, 1229 .chunksize = AES_BLOCK_SIZE, 1230 .setkey = aesni_skcipher_setkey, 1231 .encrypt = ctr_crypt, 1232 .decrypt = ctr_crypt, 1233 }, { 1234 .base = { 1235 .cra_name = "__xts(aes)", 1236 .cra_driver_name = "__xts-aes-aesni", 1237 .cra_priority = 401, 1238 .cra_flags = CRYPTO_ALG_INTERNAL, 1239 .cra_blocksize = AES_BLOCK_SIZE, 1240 .cra_ctxsize = XTS_AES_CTX_SIZE, 1241 .cra_module = THIS_MODULE, 1242 }, 1243 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1244 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1245 .ivsize = AES_BLOCK_SIZE, 1246 .setkey = xts_aesni_setkey, 1247 .encrypt = xts_encrypt, 1248 .decrypt = xts_decrypt, 1249 #endif 1250 } 1251 }; 1252 1253 static 1254 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; 1255 1256 static struct { 1257 const char *algname; 1258 const char *drvname; 1259 const char *basename; 1260 struct simd_skcipher_alg *simd; 1261 } aesni_simd_skciphers2[] = { 1262 #if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \ 1263 IS_BUILTIN(CONFIG_CRYPTO_PCBC) 1264 { 1265 .algname = "pcbc(aes)", 1266 .drvname = "pcbc-aes-aesni", 1267 .basename = "fpu(pcbc(__aes-aesni))", 1268 }, 1269 #endif 1270 }; 1271 1272 #ifdef CONFIG_X86_64 1273 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, 1274 unsigned int key_len) 1275 { 1276 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead); 1277 1278 return aes_set_key_common(crypto_aead_tfm(aead), 1279 &ctx->aes_key_expanded, key, key_len) ?: 1280 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 1281 } 1282 1283 static int generic_gcmaes_encrypt(struct aead_request *req) 1284 { 1285 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1286 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); 1287 void *aes_ctx = &(ctx->aes_key_expanded); 1288 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1289 __be32 counter = cpu_to_be32(1); 1290 1291 memcpy(iv, req->iv, 12); 1292 *((__be32 *)(iv+12)) = counter; 1293 1294 return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv, 1295 aes_ctx); 1296 } 1297 1298 static int generic_gcmaes_decrypt(struct aead_request *req) 1299 { 1300 __be32 counter = cpu_to_be32(1); 1301 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1302 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); 1303 void *aes_ctx = &(ctx->aes_key_expanded); 1304 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1305 1306 memcpy(iv, req->iv, 12); 1307 *((__be32 *)(iv+12)) = counter; 1308 1309 return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv, 1310 aes_ctx); 1311 } 1312 1313 static int generic_gcmaes_init(struct crypto_aead *aead) 1314 { 1315 struct cryptd_aead *cryptd_tfm; 1316 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 1317 1318 cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni", 1319 CRYPTO_ALG_INTERNAL, 1320 CRYPTO_ALG_INTERNAL); 1321 if (IS_ERR(cryptd_tfm)) 1322 return PTR_ERR(cryptd_tfm); 1323 1324 *ctx = cryptd_tfm; 1325 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); 1326 1327 return 0; 1328 } 1329 1330 static void generic_gcmaes_exit(struct crypto_aead *aead) 1331 { 1332 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 1333 1334 cryptd_free_aead(*ctx); 1335 } 1336 1337 static struct aead_alg aesni_aead_algs[] = { { 1338 .setkey = common_rfc4106_set_key, 1339 .setauthsize = common_rfc4106_set_authsize, 1340 .encrypt = helper_rfc4106_encrypt, 1341 .decrypt = helper_rfc4106_decrypt, 1342 .ivsize = GCM_RFC4106_IV_SIZE, 1343 .maxauthsize = 16, 1344 .base = { 1345 .cra_name = "__gcm-aes-aesni", 1346 .cra_driver_name = "__driver-gcm-aes-aesni", 1347 .cra_flags = CRYPTO_ALG_INTERNAL, 1348 .cra_blocksize = 1, 1349 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx), 1350 .cra_alignmask = AESNI_ALIGN - 1, 1351 .cra_module = THIS_MODULE, 1352 }, 1353 }, { 1354 .init = rfc4106_init, 1355 .exit = rfc4106_exit, 1356 .setkey = gcmaes_wrapper_set_key, 1357 .setauthsize = gcmaes_wrapper_set_authsize, 1358 .encrypt = gcmaes_wrapper_encrypt, 1359 .decrypt = gcmaes_wrapper_decrypt, 1360 .ivsize = GCM_RFC4106_IV_SIZE, 1361 .maxauthsize = 16, 1362 .base = { 1363 .cra_name = "rfc4106(gcm(aes))", 1364 .cra_driver_name = "rfc4106-gcm-aesni", 1365 .cra_priority = 400, 1366 .cra_flags = CRYPTO_ALG_ASYNC, 1367 .cra_blocksize = 1, 1368 .cra_ctxsize = sizeof(struct cryptd_aead *), 1369 .cra_module = THIS_MODULE, 1370 }, 1371 }, { 1372 .setkey = generic_gcmaes_set_key, 1373 .setauthsize = generic_gcmaes_set_authsize, 1374 .encrypt = generic_gcmaes_encrypt, 1375 .decrypt = generic_gcmaes_decrypt, 1376 .ivsize = GCM_AES_IV_SIZE, 1377 .maxauthsize = 16, 1378 .base = { 1379 .cra_name = "__generic-gcm-aes-aesni", 1380 .cra_driver_name = "__driver-generic-gcm-aes-aesni", 1381 .cra_priority = 0, 1382 .cra_flags = CRYPTO_ALG_INTERNAL, 1383 .cra_blocksize = 1, 1384 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), 1385 .cra_alignmask = AESNI_ALIGN - 1, 1386 .cra_module = THIS_MODULE, 1387 }, 1388 }, { 1389 .init = generic_gcmaes_init, 1390 .exit = generic_gcmaes_exit, 1391 .setkey = gcmaes_wrapper_set_key, 1392 .setauthsize = gcmaes_wrapper_set_authsize, 1393 .encrypt = gcmaes_wrapper_encrypt, 1394 .decrypt = gcmaes_wrapper_decrypt, 1395 .ivsize = GCM_AES_IV_SIZE, 1396 .maxauthsize = 16, 1397 .base = { 1398 .cra_name = "gcm(aes)", 1399 .cra_driver_name = "generic-gcm-aesni", 1400 .cra_priority = 400, 1401 .cra_flags = CRYPTO_ALG_ASYNC, 1402 .cra_blocksize = 1, 1403 .cra_ctxsize = sizeof(struct cryptd_aead *), 1404 .cra_module = THIS_MODULE, 1405 }, 1406 } }; 1407 #else 1408 static struct aead_alg aesni_aead_algs[0]; 1409 #endif 1410 1411 1412 static const struct x86_cpu_id aesni_cpu_id[] = { 1413 X86_FEATURE_MATCH(X86_FEATURE_AES), 1414 {} 1415 }; 1416 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); 1417 1418 static void aesni_free_simds(void) 1419 { 1420 int i; 1421 1422 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) && 1423 aesni_simd_skciphers[i]; i++) 1424 simd_skcipher_free(aesni_simd_skciphers[i]); 1425 1426 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) 1427 if (aesni_simd_skciphers2[i].simd) 1428 simd_skcipher_free(aesni_simd_skciphers2[i].simd); 1429 } 1430 1431 static int __init aesni_init(void) 1432 { 1433 struct simd_skcipher_alg *simd; 1434 const char *basename; 1435 const char *algname; 1436 const char *drvname; 1437 int err; 1438 int i; 1439 1440 if (!x86_match_cpu(aesni_cpu_id)) 1441 return -ENODEV; 1442 #ifdef CONFIG_X86_64 1443 #ifdef CONFIG_AS_AVX2 1444 if (boot_cpu_has(X86_FEATURE_AVX2)) { 1445 pr_info("AVX2 version of gcm_enc/dec engaged.\n"); 1446 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2; 1447 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2; 1448 } else 1449 #endif 1450 #ifdef CONFIG_AS_AVX 1451 if (boot_cpu_has(X86_FEATURE_AVX)) { 1452 pr_info("AVX version of gcm_enc/dec engaged.\n"); 1453 aesni_gcm_enc_tfm = aesni_gcm_enc_avx; 1454 aesni_gcm_dec_tfm = aesni_gcm_dec_avx; 1455 } else 1456 #endif 1457 { 1458 pr_info("SSE version of gcm_enc/dec engaged.\n"); 1459 aesni_gcm_enc_tfm = aesni_gcm_enc; 1460 aesni_gcm_dec_tfm = aesni_gcm_dec; 1461 } 1462 aesni_ctr_enc_tfm = aesni_ctr_enc; 1463 #ifdef CONFIG_AS_AVX 1464 if (boot_cpu_has(X86_FEATURE_AVX)) { 1465 /* optimize performance of ctr mode encryption transform */ 1466 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; 1467 pr_info("AES CTR mode by8 optimization enabled\n"); 1468 } 1469 #endif 1470 #endif 1471 1472 err = crypto_fpu_init(); 1473 if (err) 1474 return err; 1475 1476 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1477 if (err) 1478 goto fpu_exit; 1479 1480 err = crypto_register_skciphers(aesni_skciphers, 1481 ARRAY_SIZE(aesni_skciphers)); 1482 if (err) 1483 goto unregister_algs; 1484 1485 err = crypto_register_aeads(aesni_aead_algs, 1486 ARRAY_SIZE(aesni_aead_algs)); 1487 if (err) 1488 goto unregister_skciphers; 1489 1490 for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) { 1491 algname = aesni_skciphers[i].base.cra_name + 2; 1492 drvname = aesni_skciphers[i].base.cra_driver_name + 2; 1493 basename = aesni_skciphers[i].base.cra_driver_name; 1494 simd = simd_skcipher_create_compat(algname, drvname, basename); 1495 err = PTR_ERR(simd); 1496 if (IS_ERR(simd)) 1497 goto unregister_simds; 1498 1499 aesni_simd_skciphers[i] = simd; 1500 } 1501 1502 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) { 1503 algname = aesni_simd_skciphers2[i].algname; 1504 drvname = aesni_simd_skciphers2[i].drvname; 1505 basename = aesni_simd_skciphers2[i].basename; 1506 simd = simd_skcipher_create_compat(algname, drvname, basename); 1507 err = PTR_ERR(simd); 1508 if (IS_ERR(simd)) 1509 continue; 1510 1511 aesni_simd_skciphers2[i].simd = simd; 1512 } 1513 1514 return 0; 1515 1516 unregister_simds: 1517 aesni_free_simds(); 1518 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); 1519 unregister_skciphers: 1520 crypto_unregister_skciphers(aesni_skciphers, 1521 ARRAY_SIZE(aesni_skciphers)); 1522 unregister_algs: 1523 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1524 fpu_exit: 1525 crypto_fpu_exit(); 1526 return err; 1527 } 1528 1529 static void __exit aesni_exit(void) 1530 { 1531 aesni_free_simds(); 1532 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); 1533 crypto_unregister_skciphers(aesni_skciphers, 1534 ARRAY_SIZE(aesni_skciphers)); 1535 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1536 1537 crypto_fpu_exit(); 1538 } 1539 1540 late_initcall(aesni_init); 1541 module_exit(aesni_exit); 1542 1543 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); 1544 MODULE_LICENSE("GPL"); 1545 MODULE_ALIAS_CRYPTO("aes"); 1546