1 /* 2 * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES 3 * 4 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <asm/neon.h> 12 #include <asm/hwcap.h> 13 #include <asm/simd.h> 14 #include <crypto/aes.h> 15 #include <crypto/internal/hash.h> 16 #include <crypto/internal/simd.h> 17 #include <crypto/internal/skcipher.h> 18 #include <linux/module.h> 19 #include <linux/cpufeature.h> 20 #include <crypto/xts.h> 21 22 #include "aes-ce-setkey.h" 23 #include "aes-ctr-fallback.h" 24 25 #ifdef USE_V8_CRYPTO_EXTENSIONS 26 #define MODE "ce" 27 #define PRIO 300 28 #define aes_setkey ce_aes_setkey 29 #define aes_expandkey ce_aes_expandkey 30 #define aes_ecb_encrypt ce_aes_ecb_encrypt 31 #define aes_ecb_decrypt ce_aes_ecb_decrypt 32 #define aes_cbc_encrypt ce_aes_cbc_encrypt 33 #define aes_cbc_decrypt ce_aes_cbc_decrypt 34 #define aes_ctr_encrypt ce_aes_ctr_encrypt 35 #define aes_xts_encrypt ce_aes_xts_encrypt 36 #define aes_xts_decrypt ce_aes_xts_decrypt 37 #define aes_mac_update ce_aes_mac_update 38 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); 39 #else 40 #define MODE "neon" 41 #define PRIO 200 42 #define aes_setkey crypto_aes_set_key 43 #define aes_expandkey crypto_aes_expand_key 44 #define aes_ecb_encrypt neon_aes_ecb_encrypt 45 #define aes_ecb_decrypt neon_aes_ecb_decrypt 46 #define aes_cbc_encrypt neon_aes_cbc_encrypt 47 #define aes_cbc_decrypt neon_aes_cbc_decrypt 48 #define aes_ctr_encrypt neon_aes_ctr_encrypt 49 #define aes_xts_encrypt neon_aes_xts_encrypt 50 #define aes_xts_decrypt neon_aes_xts_decrypt 51 #define aes_mac_update neon_aes_mac_update 52 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); 53 MODULE_ALIAS_CRYPTO("ecb(aes)"); 54 MODULE_ALIAS_CRYPTO("cbc(aes)"); 55 MODULE_ALIAS_CRYPTO("ctr(aes)"); 56 MODULE_ALIAS_CRYPTO("xts(aes)"); 57 MODULE_ALIAS_CRYPTO("cmac(aes)"); 58 MODULE_ALIAS_CRYPTO("xcbc(aes)"); 59 MODULE_ALIAS_CRYPTO("cbcmac(aes)"); 60 #endif 61 62 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 63 MODULE_LICENSE("GPL v2"); 64 65 /* defined in aes-modes.S */ 66 asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 67 int rounds, int blocks); 68 asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 69 int rounds, int blocks); 70 71 asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], 72 int rounds, int blocks, u8 iv[]); 73 asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 74 int rounds, int blocks, u8 iv[]); 75 76 asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 77 int rounds, int blocks, u8 ctr[]); 78 79 asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], 80 int rounds, int blocks, u8 const rk2[], u8 iv[], 81 int first); 82 asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], 83 int rounds, int blocks, u8 const rk2[], u8 iv[], 84 int first); 85 86 asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds, 87 int blocks, u8 dg[], int enc_before, 88 int enc_after); 89 90 struct crypto_aes_xts_ctx { 91 struct crypto_aes_ctx key1; 92 struct crypto_aes_ctx __aligned(8) key2; 93 }; 94 95 struct mac_tfm_ctx { 96 struct crypto_aes_ctx key; 97 u8 __aligned(8) consts[]; 98 }; 99 100 struct mac_desc_ctx { 101 unsigned int len; 102 u8 dg[AES_BLOCK_SIZE]; 103 }; 104 105 static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 106 unsigned int key_len) 107 { 108 return aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len); 109 } 110 111 static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 112 unsigned int key_len) 113 { 114 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 115 int ret; 116 117 ret = xts_verify_key(tfm, in_key, key_len); 118 if (ret) 119 return ret; 120 121 ret = aes_expandkey(&ctx->key1, in_key, key_len / 2); 122 if (!ret) 123 ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2], 124 key_len / 2); 125 if (!ret) 126 return 0; 127 128 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 129 return -EINVAL; 130 } 131 132 static int ecb_encrypt(struct skcipher_request *req) 133 { 134 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 135 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 136 int err, rounds = 6 + ctx->key_length / 4; 137 struct skcipher_walk walk; 138 unsigned int blocks; 139 140 err = skcipher_walk_virt(&walk, req, false); 141 142 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 143 kernel_neon_begin(); 144 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 145 (u8 *)ctx->key_enc, rounds, blocks); 146 kernel_neon_end(); 147 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 148 } 149 return err; 150 } 151 152 static int ecb_decrypt(struct skcipher_request *req) 153 { 154 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 155 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 156 int err, rounds = 6 + ctx->key_length / 4; 157 struct skcipher_walk walk; 158 unsigned int blocks; 159 160 err = skcipher_walk_virt(&walk, req, false); 161 162 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 163 kernel_neon_begin(); 164 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 165 (u8 *)ctx->key_dec, rounds, blocks); 166 kernel_neon_end(); 167 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 168 } 169 return err; 170 } 171 172 static int cbc_encrypt(struct skcipher_request *req) 173 { 174 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 175 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 176 int err, rounds = 6 + ctx->key_length / 4; 177 struct skcipher_walk walk; 178 unsigned int blocks; 179 180 err = skcipher_walk_virt(&walk, req, false); 181 182 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 183 kernel_neon_begin(); 184 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 185 (u8 *)ctx->key_enc, rounds, blocks, walk.iv); 186 kernel_neon_end(); 187 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 188 } 189 return err; 190 } 191 192 static int cbc_decrypt(struct skcipher_request *req) 193 { 194 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 195 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 196 int err, rounds = 6 + ctx->key_length / 4; 197 struct skcipher_walk walk; 198 unsigned int blocks; 199 200 err = skcipher_walk_virt(&walk, req, false); 201 202 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 203 kernel_neon_begin(); 204 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 205 (u8 *)ctx->key_dec, rounds, blocks, walk.iv); 206 kernel_neon_end(); 207 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 208 } 209 return err; 210 } 211 212 static int ctr_encrypt(struct skcipher_request *req) 213 { 214 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 215 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 216 int err, rounds = 6 + ctx->key_length / 4; 217 struct skcipher_walk walk; 218 int blocks; 219 220 err = skcipher_walk_virt(&walk, req, false); 221 222 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 223 kernel_neon_begin(); 224 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 225 (u8 *)ctx->key_enc, rounds, blocks, walk.iv); 226 kernel_neon_end(); 227 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 228 } 229 if (walk.nbytes) { 230 u8 __aligned(8) tail[AES_BLOCK_SIZE]; 231 unsigned int nbytes = walk.nbytes; 232 u8 *tdst = walk.dst.virt.addr; 233 u8 *tsrc = walk.src.virt.addr; 234 235 /* 236 * Tell aes_ctr_encrypt() to process a tail block. 237 */ 238 blocks = -1; 239 240 kernel_neon_begin(); 241 aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds, 242 blocks, walk.iv); 243 kernel_neon_end(); 244 crypto_xor_cpy(tdst, tsrc, tail, nbytes); 245 err = skcipher_walk_done(&walk, 0); 246 } 247 248 return err; 249 } 250 251 static int ctr_encrypt_sync(struct skcipher_request *req) 252 { 253 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 254 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 255 256 if (!may_use_simd()) 257 return aes_ctr_encrypt_fallback(ctx, req); 258 259 return ctr_encrypt(req); 260 } 261 262 static int xts_encrypt(struct skcipher_request *req) 263 { 264 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 265 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 266 int err, first, rounds = 6 + ctx->key1.key_length / 4; 267 struct skcipher_walk walk; 268 unsigned int blocks; 269 270 err = skcipher_walk_virt(&walk, req, false); 271 272 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 273 kernel_neon_begin(); 274 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 275 (u8 *)ctx->key1.key_enc, rounds, blocks, 276 (u8 *)ctx->key2.key_enc, walk.iv, first); 277 kernel_neon_end(); 278 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 279 } 280 281 return err; 282 } 283 284 static int xts_decrypt(struct skcipher_request *req) 285 { 286 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 287 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 288 int err, first, rounds = 6 + ctx->key1.key_length / 4; 289 struct skcipher_walk walk; 290 unsigned int blocks; 291 292 err = skcipher_walk_virt(&walk, req, false); 293 294 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 295 kernel_neon_begin(); 296 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 297 (u8 *)ctx->key1.key_dec, rounds, blocks, 298 (u8 *)ctx->key2.key_enc, walk.iv, first); 299 kernel_neon_end(); 300 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 301 } 302 303 return err; 304 } 305 306 static struct skcipher_alg aes_algs[] = { { 307 .base = { 308 .cra_name = "__ecb(aes)", 309 .cra_driver_name = "__ecb-aes-" MODE, 310 .cra_priority = PRIO, 311 .cra_flags = CRYPTO_ALG_INTERNAL, 312 .cra_blocksize = AES_BLOCK_SIZE, 313 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 314 .cra_module = THIS_MODULE, 315 }, 316 .min_keysize = AES_MIN_KEY_SIZE, 317 .max_keysize = AES_MAX_KEY_SIZE, 318 .setkey = skcipher_aes_setkey, 319 .encrypt = ecb_encrypt, 320 .decrypt = ecb_decrypt, 321 }, { 322 .base = { 323 .cra_name = "__cbc(aes)", 324 .cra_driver_name = "__cbc-aes-" MODE, 325 .cra_priority = PRIO, 326 .cra_flags = CRYPTO_ALG_INTERNAL, 327 .cra_blocksize = AES_BLOCK_SIZE, 328 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 329 .cra_module = THIS_MODULE, 330 }, 331 .min_keysize = AES_MIN_KEY_SIZE, 332 .max_keysize = AES_MAX_KEY_SIZE, 333 .ivsize = AES_BLOCK_SIZE, 334 .setkey = skcipher_aes_setkey, 335 .encrypt = cbc_encrypt, 336 .decrypt = cbc_decrypt, 337 }, { 338 .base = { 339 .cra_name = "__ctr(aes)", 340 .cra_driver_name = "__ctr-aes-" MODE, 341 .cra_priority = PRIO, 342 .cra_flags = CRYPTO_ALG_INTERNAL, 343 .cra_blocksize = 1, 344 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 345 .cra_module = THIS_MODULE, 346 }, 347 .min_keysize = AES_MIN_KEY_SIZE, 348 .max_keysize = AES_MAX_KEY_SIZE, 349 .ivsize = AES_BLOCK_SIZE, 350 .chunksize = AES_BLOCK_SIZE, 351 .setkey = skcipher_aes_setkey, 352 .encrypt = ctr_encrypt, 353 .decrypt = ctr_encrypt, 354 }, { 355 .base = { 356 .cra_name = "ctr(aes)", 357 .cra_driver_name = "ctr-aes-" MODE, 358 .cra_priority = PRIO - 1, 359 .cra_blocksize = 1, 360 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 361 .cra_module = THIS_MODULE, 362 }, 363 .min_keysize = AES_MIN_KEY_SIZE, 364 .max_keysize = AES_MAX_KEY_SIZE, 365 .ivsize = AES_BLOCK_SIZE, 366 .chunksize = AES_BLOCK_SIZE, 367 .setkey = skcipher_aes_setkey, 368 .encrypt = ctr_encrypt_sync, 369 .decrypt = ctr_encrypt_sync, 370 }, { 371 .base = { 372 .cra_name = "__xts(aes)", 373 .cra_driver_name = "__xts-aes-" MODE, 374 .cra_priority = PRIO, 375 .cra_flags = CRYPTO_ALG_INTERNAL, 376 .cra_blocksize = AES_BLOCK_SIZE, 377 .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), 378 .cra_module = THIS_MODULE, 379 }, 380 .min_keysize = 2 * AES_MIN_KEY_SIZE, 381 .max_keysize = 2 * AES_MAX_KEY_SIZE, 382 .ivsize = AES_BLOCK_SIZE, 383 .setkey = xts_set_key, 384 .encrypt = xts_encrypt, 385 .decrypt = xts_decrypt, 386 } }; 387 388 static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key, 389 unsigned int key_len) 390 { 391 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 392 int err; 393 394 err = aes_expandkey(&ctx->key, in_key, key_len); 395 if (err) 396 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 397 398 return err; 399 } 400 401 static void cmac_gf128_mul_by_x(be128 *y, const be128 *x) 402 { 403 u64 a = be64_to_cpu(x->a); 404 u64 b = be64_to_cpu(x->b); 405 406 y->a = cpu_to_be64((a << 1) | (b >> 63)); 407 y->b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0)); 408 } 409 410 static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, 411 unsigned int key_len) 412 { 413 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 414 be128 *consts = (be128 *)ctx->consts; 415 u8 *rk = (u8 *)ctx->key.key_enc; 416 int rounds = 6 + key_len / 4; 417 int err; 418 419 err = cbcmac_setkey(tfm, in_key, key_len); 420 if (err) 421 return err; 422 423 /* encrypt the zero vector */ 424 kernel_neon_begin(); 425 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1); 426 kernel_neon_end(); 427 428 cmac_gf128_mul_by_x(consts, consts); 429 cmac_gf128_mul_by_x(consts + 1, consts); 430 431 return 0; 432 } 433 434 static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, 435 unsigned int key_len) 436 { 437 static u8 const ks[3][AES_BLOCK_SIZE] = { 438 { [0 ... AES_BLOCK_SIZE - 1] = 0x1 }, 439 { [0 ... AES_BLOCK_SIZE - 1] = 0x2 }, 440 { [0 ... AES_BLOCK_SIZE - 1] = 0x3 }, 441 }; 442 443 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 444 u8 *rk = (u8 *)ctx->key.key_enc; 445 int rounds = 6 + key_len / 4; 446 u8 key[AES_BLOCK_SIZE]; 447 int err; 448 449 err = cbcmac_setkey(tfm, in_key, key_len); 450 if (err) 451 return err; 452 453 kernel_neon_begin(); 454 aes_ecb_encrypt(key, ks[0], rk, rounds, 1); 455 aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2); 456 kernel_neon_end(); 457 458 return cbcmac_setkey(tfm, key, sizeof(key)); 459 } 460 461 static int mac_init(struct shash_desc *desc) 462 { 463 struct mac_desc_ctx *ctx = shash_desc_ctx(desc); 464 465 memset(ctx->dg, 0, AES_BLOCK_SIZE); 466 ctx->len = 0; 467 468 return 0; 469 } 470 471 static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, 472 u8 dg[], int enc_before, int enc_after) 473 { 474 int rounds = 6 + ctx->key_length / 4; 475 476 if (may_use_simd()) { 477 kernel_neon_begin(); 478 aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before, 479 enc_after); 480 kernel_neon_end(); 481 } else { 482 if (enc_before) 483 __aes_arm64_encrypt(ctx->key_enc, dg, dg, rounds); 484 485 while (blocks--) { 486 crypto_xor(dg, in, AES_BLOCK_SIZE); 487 in += AES_BLOCK_SIZE; 488 489 if (blocks || enc_after) 490 __aes_arm64_encrypt(ctx->key_enc, dg, dg, 491 rounds); 492 } 493 } 494 } 495 496 static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len) 497 { 498 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 499 struct mac_desc_ctx *ctx = shash_desc_ctx(desc); 500 501 while (len > 0) { 502 unsigned int l; 503 504 if ((ctx->len % AES_BLOCK_SIZE) == 0 && 505 (ctx->len + len) > AES_BLOCK_SIZE) { 506 507 int blocks = len / AES_BLOCK_SIZE; 508 509 len %= AES_BLOCK_SIZE; 510 511 mac_do_update(&tctx->key, p, blocks, ctx->dg, 512 (ctx->len != 0), (len != 0)); 513 514 p += blocks * AES_BLOCK_SIZE; 515 516 if (!len) { 517 ctx->len = AES_BLOCK_SIZE; 518 break; 519 } 520 ctx->len = 0; 521 } 522 523 l = min(len, AES_BLOCK_SIZE - ctx->len); 524 525 if (l <= AES_BLOCK_SIZE) { 526 crypto_xor(ctx->dg + ctx->len, p, l); 527 ctx->len += l; 528 len -= l; 529 p += l; 530 } 531 } 532 533 return 0; 534 } 535 536 static int cbcmac_final(struct shash_desc *desc, u8 *out) 537 { 538 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 539 struct mac_desc_ctx *ctx = shash_desc_ctx(desc); 540 541 mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0); 542 543 memcpy(out, ctx->dg, AES_BLOCK_SIZE); 544 545 return 0; 546 } 547 548 static int cmac_final(struct shash_desc *desc, u8 *out) 549 { 550 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 551 struct mac_desc_ctx *ctx = shash_desc_ctx(desc); 552 u8 *consts = tctx->consts; 553 554 if (ctx->len != AES_BLOCK_SIZE) { 555 ctx->dg[ctx->len] ^= 0x80; 556 consts += AES_BLOCK_SIZE; 557 } 558 559 mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1); 560 561 memcpy(out, ctx->dg, AES_BLOCK_SIZE); 562 563 return 0; 564 } 565 566 static struct shash_alg mac_algs[] = { { 567 .base.cra_name = "cmac(aes)", 568 .base.cra_driver_name = "cmac-aes-" MODE, 569 .base.cra_priority = PRIO, 570 .base.cra_blocksize = AES_BLOCK_SIZE, 571 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 572 2 * AES_BLOCK_SIZE, 573 .base.cra_module = THIS_MODULE, 574 575 .digestsize = AES_BLOCK_SIZE, 576 .init = mac_init, 577 .update = mac_update, 578 .final = cmac_final, 579 .setkey = cmac_setkey, 580 .descsize = sizeof(struct mac_desc_ctx), 581 }, { 582 .base.cra_name = "xcbc(aes)", 583 .base.cra_driver_name = "xcbc-aes-" MODE, 584 .base.cra_priority = PRIO, 585 .base.cra_blocksize = AES_BLOCK_SIZE, 586 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 587 2 * AES_BLOCK_SIZE, 588 .base.cra_module = THIS_MODULE, 589 590 .digestsize = AES_BLOCK_SIZE, 591 .init = mac_init, 592 .update = mac_update, 593 .final = cmac_final, 594 .setkey = xcbc_setkey, 595 .descsize = sizeof(struct mac_desc_ctx), 596 }, { 597 .base.cra_name = "cbcmac(aes)", 598 .base.cra_driver_name = "cbcmac-aes-" MODE, 599 .base.cra_priority = PRIO, 600 .base.cra_blocksize = 1, 601 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx), 602 .base.cra_module = THIS_MODULE, 603 604 .digestsize = AES_BLOCK_SIZE, 605 .init = mac_init, 606 .update = mac_update, 607 .final = cbcmac_final, 608 .setkey = cbcmac_setkey, 609 .descsize = sizeof(struct mac_desc_ctx), 610 } }; 611 612 static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; 613 614 static void aes_exit(void) 615 { 616 int i; 617 618 for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++) 619 if (aes_simd_algs[i]) 620 simd_skcipher_free(aes_simd_algs[i]); 621 622 crypto_unregister_shashes(mac_algs, ARRAY_SIZE(mac_algs)); 623 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); 624 } 625 626 static int __init aes_init(void) 627 { 628 struct simd_skcipher_alg *simd; 629 const char *basename; 630 const char *algname; 631 const char *drvname; 632 int err; 633 int i; 634 635 err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); 636 if (err) 637 return err; 638 639 err = crypto_register_shashes(mac_algs, ARRAY_SIZE(mac_algs)); 640 if (err) 641 goto unregister_ciphers; 642 643 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 644 if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL)) 645 continue; 646 647 algname = aes_algs[i].base.cra_name + 2; 648 drvname = aes_algs[i].base.cra_driver_name + 2; 649 basename = aes_algs[i].base.cra_driver_name; 650 simd = simd_skcipher_create_compat(algname, drvname, basename); 651 err = PTR_ERR(simd); 652 if (IS_ERR(simd)) 653 goto unregister_simds; 654 655 aes_simd_algs[i] = simd; 656 } 657 658 return 0; 659 660 unregister_simds: 661 aes_exit(); 662 return err; 663 unregister_ciphers: 664 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); 665 return err; 666 } 667 668 #ifdef USE_V8_CRYPTO_EXTENSIONS 669 module_cpu_feature_match(AES, aes_init); 670 #else 671 module_init(aes_init); 672 EXPORT_SYMBOL(neon_aes_ecb_encrypt); 673 EXPORT_SYMBOL(neon_aes_cbc_encrypt); 674 #endif 675 module_exit(aes_exit); 676