1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale FSL CAAM support for crypto API over QI backend. 4 * Based on caamalg.c 5 * 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 * Copyright 2016-2019 NXP 8 */ 9 10 #include "compat.h" 11 #include "ctrl.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "desc_constr.h" 15 #include "error.h" 16 #include "sg_sw_qm.h" 17 #include "key_gen.h" 18 #include "qi.h" 19 #include "jr.h" 20 #include "caamalg_desc.h" 21 22 /* 23 * crypto alg 24 */ 25 #define CAAM_CRA_PRIORITY 2000 26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 28 SHA512_DIGEST_SIZE * 2) 29 30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 31 CAAM_MAX_KEY_SIZE) 32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 33 34 struct caam_alg_entry { 35 int class1_alg_type; 36 int class2_alg_type; 37 bool rfc3686; 38 bool geniv; 39 bool nodkp; 40 }; 41 42 struct caam_aead_alg { 43 struct aead_alg aead; 44 struct caam_alg_entry caam; 45 bool registered; 46 }; 47 48 struct caam_skcipher_alg { 49 struct skcipher_alg skcipher; 50 struct caam_alg_entry caam; 51 bool registered; 52 }; 53 54 /* 55 * per-session context 56 */ 57 struct caam_ctx { 58 struct device *jrdev; 59 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 60 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 61 u8 key[CAAM_MAX_KEY_SIZE]; 62 dma_addr_t key_dma; 63 enum dma_data_direction dir; 64 struct alginfo adata; 65 struct alginfo cdata; 66 unsigned int authsize; 67 struct device *qidev; 68 spinlock_t lock; /* Protects multiple init of driver context */ 69 struct caam_drv_ctx *drv_ctx[NUM_OP]; 70 }; 71 72 static int aead_set_sh_desc(struct crypto_aead *aead) 73 { 74 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 75 typeof(*alg), aead); 76 struct caam_ctx *ctx = crypto_aead_ctx(aead); 77 unsigned int ivsize = crypto_aead_ivsize(aead); 78 u32 ctx1_iv_off = 0; 79 u32 *nonce = NULL; 80 unsigned int data_len[2]; 81 u32 inl_mask; 82 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 83 OP_ALG_AAI_CTR_MOD128); 84 const bool is_rfc3686 = alg->caam.rfc3686; 85 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 86 87 if (!ctx->cdata.keylen || !ctx->authsize) 88 return 0; 89 90 /* 91 * AES-CTR needs to load IV in CONTEXT1 reg 92 * at an offset of 128bits (16bytes) 93 * CONTEXT1[255:128] = IV 94 */ 95 if (ctr_mode) 96 ctx1_iv_off = 16; 97 98 /* 99 * RFC3686 specific: 100 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 101 */ 102 if (is_rfc3686) { 103 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 104 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 106 } 107 108 /* 109 * In case |user key| > |derived key|, using DKP<imm,imm> would result 110 * in invalid opcodes (last bytes of user key) in the resulting 111 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key 112 * addresses are needed. 113 */ 114 ctx->adata.key_virt = ctx->key; 115 ctx->adata.key_dma = ctx->key_dma; 116 117 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 118 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 119 120 data_len[0] = ctx->adata.keylen_pad; 121 data_len[1] = ctx->cdata.keylen; 122 123 if (alg->caam.geniv) 124 goto skip_enc; 125 126 /* aead_encrypt shared descriptor */ 127 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 128 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 129 DESC_JOB_IO_LEN, data_len, &inl_mask, 130 ARRAY_SIZE(data_len)) < 0) 131 return -EINVAL; 132 133 ctx->adata.key_inline = !!(inl_mask & 1); 134 ctx->cdata.key_inline = !!(inl_mask & 2); 135 136 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 137 ivsize, ctx->authsize, is_rfc3686, nonce, 138 ctx1_iv_off, true, ctrlpriv->era); 139 140 skip_enc: 141 /* aead_decrypt shared descriptor */ 142 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 143 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 144 DESC_JOB_IO_LEN, data_len, &inl_mask, 145 ARRAY_SIZE(data_len)) < 0) 146 return -EINVAL; 147 148 ctx->adata.key_inline = !!(inl_mask & 1); 149 ctx->cdata.key_inline = !!(inl_mask & 2); 150 151 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 152 ivsize, ctx->authsize, alg->caam.geniv, 153 is_rfc3686, nonce, ctx1_iv_off, true, 154 ctrlpriv->era); 155 156 if (!alg->caam.geniv) 157 goto skip_givenc; 158 159 /* aead_givencrypt shared descriptor */ 160 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 161 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 162 DESC_JOB_IO_LEN, data_len, &inl_mask, 163 ARRAY_SIZE(data_len)) < 0) 164 return -EINVAL; 165 166 ctx->adata.key_inline = !!(inl_mask & 1); 167 ctx->cdata.key_inline = !!(inl_mask & 2); 168 169 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 170 ivsize, ctx->authsize, is_rfc3686, nonce, 171 ctx1_iv_off, true, ctrlpriv->era); 172 173 skip_givenc: 174 return 0; 175 } 176 177 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 178 { 179 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 180 181 ctx->authsize = authsize; 182 aead_set_sh_desc(authenc); 183 184 return 0; 185 } 186 187 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 188 unsigned int keylen) 189 { 190 struct caam_ctx *ctx = crypto_aead_ctx(aead); 191 struct device *jrdev = ctx->jrdev; 192 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 193 struct crypto_authenc_keys keys; 194 int ret = 0; 195 196 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 197 goto badkey; 198 199 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 200 keys.authkeylen + keys.enckeylen, keys.enckeylen, 201 keys.authkeylen); 202 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 203 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 204 205 /* 206 * If DKP is supported, use it in the shared descriptor to generate 207 * the split key. 208 */ 209 if (ctrlpriv->era >= 6) { 210 ctx->adata.keylen = keys.authkeylen; 211 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 212 OP_ALG_ALGSEL_MASK); 213 214 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 215 goto badkey; 216 217 memcpy(ctx->key, keys.authkey, keys.authkeylen); 218 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 219 keys.enckeylen); 220 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 221 ctx->adata.keylen_pad + 222 keys.enckeylen, ctx->dir); 223 goto skip_split_key; 224 } 225 226 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 227 keys.authkeylen, CAAM_MAX_KEY_SIZE - 228 keys.enckeylen); 229 if (ret) 230 goto badkey; 231 232 /* postpend encryption key to auth split key */ 233 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 234 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 235 ctx->adata.keylen_pad + keys.enckeylen, 236 ctx->dir); 237 238 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", 239 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 240 ctx->adata.keylen_pad + keys.enckeylen, 1); 241 242 skip_split_key: 243 ctx->cdata.keylen = keys.enckeylen; 244 245 ret = aead_set_sh_desc(aead); 246 if (ret) 247 goto badkey; 248 249 /* Now update the driver contexts with the new shared descriptor */ 250 if (ctx->drv_ctx[ENCRYPT]) { 251 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 252 ctx->sh_desc_enc); 253 if (ret) { 254 dev_err(jrdev, "driver enc context update failed\n"); 255 goto badkey; 256 } 257 } 258 259 if (ctx->drv_ctx[DECRYPT]) { 260 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 261 ctx->sh_desc_dec); 262 if (ret) { 263 dev_err(jrdev, "driver dec context update failed\n"); 264 goto badkey; 265 } 266 } 267 268 memzero_explicit(&keys, sizeof(keys)); 269 return ret; 270 badkey: 271 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 272 memzero_explicit(&keys, sizeof(keys)); 273 return -EINVAL; 274 } 275 276 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 277 unsigned int keylen) 278 { 279 struct crypto_authenc_keys keys; 280 int err; 281 282 err = crypto_authenc_extractkeys(&keys, key, keylen); 283 if (unlikely(err)) 284 return err; 285 286 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: 287 aead_setkey(aead, key, keylen); 288 289 memzero_explicit(&keys, sizeof(keys)); 290 return err; 291 } 292 293 static int gcm_set_sh_desc(struct crypto_aead *aead) 294 { 295 struct caam_ctx *ctx = crypto_aead_ctx(aead); 296 unsigned int ivsize = crypto_aead_ivsize(aead); 297 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 298 ctx->cdata.keylen; 299 300 if (!ctx->cdata.keylen || !ctx->authsize) 301 return 0; 302 303 /* 304 * Job Descriptor and Shared Descriptor 305 * must fit into the 64-word Descriptor h/w Buffer 306 */ 307 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { 308 ctx->cdata.key_inline = true; 309 ctx->cdata.key_virt = ctx->key; 310 } else { 311 ctx->cdata.key_inline = false; 312 ctx->cdata.key_dma = ctx->key_dma; 313 } 314 315 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 316 ctx->authsize, true); 317 318 /* 319 * Job Descriptor and Shared Descriptor 320 * must fit into the 64-word Descriptor h/w Buffer 321 */ 322 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { 323 ctx->cdata.key_inline = true; 324 ctx->cdata.key_virt = ctx->key; 325 } else { 326 ctx->cdata.key_inline = false; 327 ctx->cdata.key_dma = ctx->key_dma; 328 } 329 330 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 331 ctx->authsize, true); 332 333 return 0; 334 } 335 336 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 337 { 338 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 339 int err; 340 341 err = crypto_gcm_check_authsize(authsize); 342 if (err) 343 return err; 344 345 ctx->authsize = authsize; 346 gcm_set_sh_desc(authenc); 347 348 return 0; 349 } 350 351 static int gcm_setkey(struct crypto_aead *aead, 352 const u8 *key, unsigned int keylen) 353 { 354 struct caam_ctx *ctx = crypto_aead_ctx(aead); 355 struct device *jrdev = ctx->jrdev; 356 int ret; 357 358 ret = aes_check_keylen(keylen); 359 if (ret) { 360 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 361 return ret; 362 } 363 364 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 365 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 366 367 memcpy(ctx->key, key, keylen); 368 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, 369 ctx->dir); 370 ctx->cdata.keylen = keylen; 371 372 ret = gcm_set_sh_desc(aead); 373 if (ret) 374 return ret; 375 376 /* Now update the driver contexts with the new shared descriptor */ 377 if (ctx->drv_ctx[ENCRYPT]) { 378 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 379 ctx->sh_desc_enc); 380 if (ret) { 381 dev_err(jrdev, "driver enc context update failed\n"); 382 return ret; 383 } 384 } 385 386 if (ctx->drv_ctx[DECRYPT]) { 387 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 388 ctx->sh_desc_dec); 389 if (ret) { 390 dev_err(jrdev, "driver dec context update failed\n"); 391 return ret; 392 } 393 } 394 395 return 0; 396 } 397 398 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 399 { 400 struct caam_ctx *ctx = crypto_aead_ctx(aead); 401 unsigned int ivsize = crypto_aead_ivsize(aead); 402 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 403 ctx->cdata.keylen; 404 405 if (!ctx->cdata.keylen || !ctx->authsize) 406 return 0; 407 408 ctx->cdata.key_virt = ctx->key; 409 410 /* 411 * Job Descriptor and Shared Descriptor 412 * must fit into the 64-word Descriptor h/w Buffer 413 */ 414 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { 415 ctx->cdata.key_inline = true; 416 } else { 417 ctx->cdata.key_inline = false; 418 ctx->cdata.key_dma = ctx->key_dma; 419 } 420 421 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 422 ctx->authsize, true); 423 424 /* 425 * Job Descriptor and Shared Descriptor 426 * must fit into the 64-word Descriptor h/w Buffer 427 */ 428 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { 429 ctx->cdata.key_inline = true; 430 } else { 431 ctx->cdata.key_inline = false; 432 ctx->cdata.key_dma = ctx->key_dma; 433 } 434 435 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 436 ctx->authsize, true); 437 438 return 0; 439 } 440 441 static int rfc4106_setauthsize(struct crypto_aead *authenc, 442 unsigned int authsize) 443 { 444 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 445 int err; 446 447 err = crypto_rfc4106_check_authsize(authsize); 448 if (err) 449 return err; 450 451 ctx->authsize = authsize; 452 rfc4106_set_sh_desc(authenc); 453 454 return 0; 455 } 456 457 static int rfc4106_setkey(struct crypto_aead *aead, 458 const u8 *key, unsigned int keylen) 459 { 460 struct caam_ctx *ctx = crypto_aead_ctx(aead); 461 struct device *jrdev = ctx->jrdev; 462 int ret; 463 464 ret = aes_check_keylen(keylen - 4); 465 if (ret) { 466 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 467 return ret; 468 } 469 470 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 471 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 472 473 memcpy(ctx->key, key, keylen); 474 /* 475 * The last four bytes of the key material are used as the salt value 476 * in the nonce. Update the AES key length. 477 */ 478 ctx->cdata.keylen = keylen - 4; 479 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 480 ctx->cdata.keylen, ctx->dir); 481 482 ret = rfc4106_set_sh_desc(aead); 483 if (ret) 484 return ret; 485 486 /* Now update the driver contexts with the new shared descriptor */ 487 if (ctx->drv_ctx[ENCRYPT]) { 488 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 489 ctx->sh_desc_enc); 490 if (ret) { 491 dev_err(jrdev, "driver enc context update failed\n"); 492 return ret; 493 } 494 } 495 496 if (ctx->drv_ctx[DECRYPT]) { 497 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 498 ctx->sh_desc_dec); 499 if (ret) { 500 dev_err(jrdev, "driver dec context update failed\n"); 501 return ret; 502 } 503 } 504 505 return 0; 506 } 507 508 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 509 { 510 struct caam_ctx *ctx = crypto_aead_ctx(aead); 511 unsigned int ivsize = crypto_aead_ivsize(aead); 512 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 513 ctx->cdata.keylen; 514 515 if (!ctx->cdata.keylen || !ctx->authsize) 516 return 0; 517 518 ctx->cdata.key_virt = ctx->key; 519 520 /* 521 * Job Descriptor and Shared Descriptor 522 * must fit into the 64-word Descriptor h/w Buffer 523 */ 524 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { 525 ctx->cdata.key_inline = true; 526 } else { 527 ctx->cdata.key_inline = false; 528 ctx->cdata.key_dma = ctx->key_dma; 529 } 530 531 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 532 ctx->authsize, true); 533 534 /* 535 * Job Descriptor and Shared Descriptor 536 * must fit into the 64-word Descriptor h/w Buffer 537 */ 538 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { 539 ctx->cdata.key_inline = true; 540 } else { 541 ctx->cdata.key_inline = false; 542 ctx->cdata.key_dma = ctx->key_dma; 543 } 544 545 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 546 ctx->authsize, true); 547 548 return 0; 549 } 550 551 static int rfc4543_setauthsize(struct crypto_aead *authenc, 552 unsigned int authsize) 553 { 554 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 555 556 if (authsize != 16) 557 return -EINVAL; 558 559 ctx->authsize = authsize; 560 rfc4543_set_sh_desc(authenc); 561 562 return 0; 563 } 564 565 static int rfc4543_setkey(struct crypto_aead *aead, 566 const u8 *key, unsigned int keylen) 567 { 568 struct caam_ctx *ctx = crypto_aead_ctx(aead); 569 struct device *jrdev = ctx->jrdev; 570 int ret; 571 572 ret = aes_check_keylen(keylen - 4); 573 if (ret) { 574 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 575 return ret; 576 } 577 578 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 579 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 580 581 memcpy(ctx->key, key, keylen); 582 /* 583 * The last four bytes of the key material are used as the salt value 584 * in the nonce. Update the AES key length. 585 */ 586 ctx->cdata.keylen = keylen - 4; 587 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 588 ctx->cdata.keylen, ctx->dir); 589 590 ret = rfc4543_set_sh_desc(aead); 591 if (ret) 592 return ret; 593 594 /* Now update the driver contexts with the new shared descriptor */ 595 if (ctx->drv_ctx[ENCRYPT]) { 596 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 597 ctx->sh_desc_enc); 598 if (ret) { 599 dev_err(jrdev, "driver enc context update failed\n"); 600 return ret; 601 } 602 } 603 604 if (ctx->drv_ctx[DECRYPT]) { 605 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 606 ctx->sh_desc_dec); 607 if (ret) { 608 dev_err(jrdev, "driver dec context update failed\n"); 609 return ret; 610 } 611 } 612 613 return 0; 614 } 615 616 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 617 unsigned int keylen, const u32 ctx1_iv_off) 618 { 619 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 620 struct caam_skcipher_alg *alg = 621 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 622 skcipher); 623 struct device *jrdev = ctx->jrdev; 624 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 625 const bool is_rfc3686 = alg->caam.rfc3686; 626 int ret = 0; 627 628 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 629 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 630 631 ctx->cdata.keylen = keylen; 632 ctx->cdata.key_virt = key; 633 ctx->cdata.key_inline = true; 634 635 /* skcipher encrypt, decrypt shared descriptors */ 636 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 637 is_rfc3686, ctx1_iv_off); 638 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 639 is_rfc3686, ctx1_iv_off); 640 641 /* Now update the driver contexts with the new shared descriptor */ 642 if (ctx->drv_ctx[ENCRYPT]) { 643 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 644 ctx->sh_desc_enc); 645 if (ret) { 646 dev_err(jrdev, "driver enc context update failed\n"); 647 goto badkey; 648 } 649 } 650 651 if (ctx->drv_ctx[DECRYPT]) { 652 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 653 ctx->sh_desc_dec); 654 if (ret) { 655 dev_err(jrdev, "driver dec context update failed\n"); 656 goto badkey; 657 } 658 } 659 660 return ret; 661 badkey: 662 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 663 return -EINVAL; 664 } 665 666 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, 667 const u8 *key, unsigned int keylen) 668 { 669 int err; 670 671 err = aes_check_keylen(keylen); 672 if (err) { 673 crypto_skcipher_set_flags(skcipher, 674 CRYPTO_TFM_RES_BAD_KEY_LEN); 675 return err; 676 } 677 678 return skcipher_setkey(skcipher, key, keylen, 0); 679 } 680 681 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, 682 const u8 *key, unsigned int keylen) 683 { 684 u32 ctx1_iv_off; 685 int err; 686 687 /* 688 * RFC3686 specific: 689 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 690 * | *key = {KEY, NONCE} 691 */ 692 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 693 keylen -= CTR_RFC3686_NONCE_SIZE; 694 695 err = aes_check_keylen(keylen); 696 if (err) { 697 crypto_skcipher_set_flags(skcipher, 698 CRYPTO_TFM_RES_BAD_KEY_LEN); 699 return err; 700 } 701 702 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 703 } 704 705 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, 706 const u8 *key, unsigned int keylen) 707 { 708 u32 ctx1_iv_off; 709 int err; 710 711 /* 712 * AES-CTR needs to load IV in CONTEXT1 reg 713 * at an offset of 128bits (16bytes) 714 * CONTEXT1[255:128] = IV 715 */ 716 ctx1_iv_off = 16; 717 718 err = aes_check_keylen(keylen); 719 if (err) { 720 crypto_skcipher_set_flags(skcipher, 721 CRYPTO_TFM_RES_BAD_KEY_LEN); 722 return err; 723 } 724 725 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 726 } 727 728 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 729 const u8 *key, unsigned int keylen) 730 { 731 return verify_skcipher_des3_key(skcipher, key) ?: 732 skcipher_setkey(skcipher, key, keylen, 0); 733 } 734 735 static int des_skcipher_setkey(struct crypto_skcipher *skcipher, 736 const u8 *key, unsigned int keylen) 737 { 738 return verify_skcipher_des_key(skcipher, key) ?: 739 skcipher_setkey(skcipher, key, keylen, 0); 740 } 741 742 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 743 unsigned int keylen) 744 { 745 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 746 struct device *jrdev = ctx->jrdev; 747 int ret = 0; 748 749 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 750 dev_err(jrdev, "key size mismatch\n"); 751 goto badkey; 752 } 753 754 ctx->cdata.keylen = keylen; 755 ctx->cdata.key_virt = key; 756 ctx->cdata.key_inline = true; 757 758 /* xts skcipher encrypt, decrypt shared descriptors */ 759 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 760 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 761 762 /* Now update the driver contexts with the new shared descriptor */ 763 if (ctx->drv_ctx[ENCRYPT]) { 764 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 765 ctx->sh_desc_enc); 766 if (ret) { 767 dev_err(jrdev, "driver enc context update failed\n"); 768 goto badkey; 769 } 770 } 771 772 if (ctx->drv_ctx[DECRYPT]) { 773 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 774 ctx->sh_desc_dec); 775 if (ret) { 776 dev_err(jrdev, "driver dec context update failed\n"); 777 goto badkey; 778 } 779 } 780 781 return ret; 782 badkey: 783 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 784 return -EINVAL; 785 } 786 787 /* 788 * aead_edesc - s/w-extended aead descriptor 789 * @src_nents: number of segments in input scatterlist 790 * @dst_nents: number of segments in output scatterlist 791 * @iv_dma: dma address of iv for checking continuity and link table 792 * @qm_sg_bytes: length of dma mapped h/w link table 793 * @qm_sg_dma: bus physical mapped address of h/w link table 794 * @assoclen: associated data length, in CAAM endianness 795 * @assoclen_dma: bus physical mapped address of req->assoclen 796 * @drv_req: driver-specific request structure 797 * @sgt: the h/w link table, followed by IV 798 */ 799 struct aead_edesc { 800 int src_nents; 801 int dst_nents; 802 dma_addr_t iv_dma; 803 int qm_sg_bytes; 804 dma_addr_t qm_sg_dma; 805 unsigned int assoclen; 806 dma_addr_t assoclen_dma; 807 struct caam_drv_req drv_req; 808 struct qm_sg_entry sgt[0]; 809 }; 810 811 /* 812 * skcipher_edesc - s/w-extended skcipher descriptor 813 * @src_nents: number of segments in input scatterlist 814 * @dst_nents: number of segments in output scatterlist 815 * @iv_dma: dma address of iv for checking continuity and link table 816 * @qm_sg_bytes: length of dma mapped h/w link table 817 * @qm_sg_dma: bus physical mapped address of h/w link table 818 * @drv_req: driver-specific request structure 819 * @sgt: the h/w link table, followed by IV 820 */ 821 struct skcipher_edesc { 822 int src_nents; 823 int dst_nents; 824 dma_addr_t iv_dma; 825 int qm_sg_bytes; 826 dma_addr_t qm_sg_dma; 827 struct caam_drv_req drv_req; 828 struct qm_sg_entry sgt[0]; 829 }; 830 831 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 832 enum optype type) 833 { 834 /* 835 * This function is called on the fast path with values of 'type' 836 * known at compile time. Invalid arguments are not expected and 837 * thus no checks are made. 838 */ 839 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 840 u32 *desc; 841 842 if (unlikely(!drv_ctx)) { 843 spin_lock(&ctx->lock); 844 845 /* Read again to check if some other core init drv_ctx */ 846 drv_ctx = ctx->drv_ctx[type]; 847 if (!drv_ctx) { 848 int cpu; 849 850 if (type == ENCRYPT) 851 desc = ctx->sh_desc_enc; 852 else /* (type == DECRYPT) */ 853 desc = ctx->sh_desc_dec; 854 855 cpu = smp_processor_id(); 856 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 857 if (!IS_ERR_OR_NULL(drv_ctx)) 858 drv_ctx->op_type = type; 859 860 ctx->drv_ctx[type] = drv_ctx; 861 } 862 863 spin_unlock(&ctx->lock); 864 } 865 866 return drv_ctx; 867 } 868 869 static void caam_unmap(struct device *dev, struct scatterlist *src, 870 struct scatterlist *dst, int src_nents, 871 int dst_nents, dma_addr_t iv_dma, int ivsize, 872 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, 873 int qm_sg_bytes) 874 { 875 if (dst != src) { 876 if (src_nents) 877 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 878 if (dst_nents) 879 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 880 } else { 881 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 882 } 883 884 if (iv_dma) 885 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 886 if (qm_sg_bytes) 887 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 888 } 889 890 static void aead_unmap(struct device *dev, 891 struct aead_edesc *edesc, 892 struct aead_request *req) 893 { 894 struct crypto_aead *aead = crypto_aead_reqtfm(req); 895 int ivsize = crypto_aead_ivsize(aead); 896 897 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 898 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, 899 edesc->qm_sg_bytes); 900 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 901 } 902 903 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 904 struct skcipher_request *req) 905 { 906 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 907 int ivsize = crypto_skcipher_ivsize(skcipher); 908 909 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 910 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, 911 edesc->qm_sg_bytes); 912 } 913 914 static void aead_done(struct caam_drv_req *drv_req, u32 status) 915 { 916 struct device *qidev; 917 struct aead_edesc *edesc; 918 struct aead_request *aead_req = drv_req->app_ctx; 919 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 920 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); 921 int ecode = 0; 922 923 qidev = caam_ctx->qidev; 924 925 if (unlikely(status)) 926 ecode = caam_jr_strstatus(qidev, status); 927 928 edesc = container_of(drv_req, typeof(*edesc), drv_req); 929 aead_unmap(qidev, edesc, aead_req); 930 931 aead_request_complete(aead_req, ecode); 932 qi_cache_free(edesc); 933 } 934 935 /* 936 * allocate and map the aead extended descriptor 937 */ 938 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 939 bool encrypt) 940 { 941 struct crypto_aead *aead = crypto_aead_reqtfm(req); 942 struct caam_ctx *ctx = crypto_aead_ctx(aead); 943 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 944 typeof(*alg), aead); 945 struct device *qidev = ctx->qidev; 946 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 947 GFP_KERNEL : GFP_ATOMIC; 948 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 949 int src_len, dst_len = 0; 950 struct aead_edesc *edesc; 951 dma_addr_t qm_sg_dma, iv_dma = 0; 952 int ivsize = 0; 953 unsigned int authsize = ctx->authsize; 954 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 955 int in_len, out_len; 956 struct qm_sg_entry *sg_table, *fd_sgt; 957 struct caam_drv_ctx *drv_ctx; 958 959 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 960 if (IS_ERR_OR_NULL(drv_ctx)) 961 return (struct aead_edesc *)drv_ctx; 962 963 /* allocate space for base edesc and hw desc commands, link tables */ 964 edesc = qi_cache_alloc(GFP_DMA | flags); 965 if (unlikely(!edesc)) { 966 dev_err(qidev, "could not allocate extended descriptor\n"); 967 return ERR_PTR(-ENOMEM); 968 } 969 970 if (likely(req->src == req->dst)) { 971 src_len = req->assoclen + req->cryptlen + 972 (encrypt ? authsize : 0); 973 974 src_nents = sg_nents_for_len(req->src, src_len); 975 if (unlikely(src_nents < 0)) { 976 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 977 src_len); 978 qi_cache_free(edesc); 979 return ERR_PTR(src_nents); 980 } 981 982 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 983 DMA_BIDIRECTIONAL); 984 if (unlikely(!mapped_src_nents)) { 985 dev_err(qidev, "unable to map source\n"); 986 qi_cache_free(edesc); 987 return ERR_PTR(-ENOMEM); 988 } 989 } else { 990 src_len = req->assoclen + req->cryptlen; 991 dst_len = src_len + (encrypt ? authsize : (-authsize)); 992 993 src_nents = sg_nents_for_len(req->src, src_len); 994 if (unlikely(src_nents < 0)) { 995 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 996 src_len); 997 qi_cache_free(edesc); 998 return ERR_PTR(src_nents); 999 } 1000 1001 dst_nents = sg_nents_for_len(req->dst, dst_len); 1002 if (unlikely(dst_nents < 0)) { 1003 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1004 dst_len); 1005 qi_cache_free(edesc); 1006 return ERR_PTR(dst_nents); 1007 } 1008 1009 if (src_nents) { 1010 mapped_src_nents = dma_map_sg(qidev, req->src, 1011 src_nents, DMA_TO_DEVICE); 1012 if (unlikely(!mapped_src_nents)) { 1013 dev_err(qidev, "unable to map source\n"); 1014 qi_cache_free(edesc); 1015 return ERR_PTR(-ENOMEM); 1016 } 1017 } else { 1018 mapped_src_nents = 0; 1019 } 1020 1021 if (dst_nents) { 1022 mapped_dst_nents = dma_map_sg(qidev, req->dst, 1023 dst_nents, 1024 DMA_FROM_DEVICE); 1025 if (unlikely(!mapped_dst_nents)) { 1026 dev_err(qidev, "unable to map destination\n"); 1027 dma_unmap_sg(qidev, req->src, src_nents, 1028 DMA_TO_DEVICE); 1029 qi_cache_free(edesc); 1030 return ERR_PTR(-ENOMEM); 1031 } 1032 } else { 1033 mapped_dst_nents = 0; 1034 } 1035 } 1036 1037 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) 1038 ivsize = crypto_aead_ivsize(aead); 1039 1040 /* 1041 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1042 * Input is not contiguous. 1043 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1044 * the end of the table by allocating more S/G entries. Logic: 1045 * if (src != dst && output S/G) 1046 * pad output S/G, if needed 1047 * else if (src == dst && S/G) 1048 * overlapping S/Gs; pad one of them 1049 * else if (input S/G) ... 1050 * pad input S/G, if needed 1051 */ 1052 qm_sg_ents = 1 + !!ivsize + mapped_src_nents; 1053 if (mapped_dst_nents > 1) 1054 qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1055 else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1056 qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1057 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); 1058 else 1059 qm_sg_ents = pad_sg_nents(qm_sg_ents); 1060 1061 sg_table = &edesc->sgt[0]; 1062 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1063 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1064 CAAM_QI_MEMCACHE_SIZE)) { 1065 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1066 qm_sg_ents, ivsize); 1067 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1068 0, DMA_NONE, 0, 0); 1069 qi_cache_free(edesc); 1070 return ERR_PTR(-ENOMEM); 1071 } 1072 1073 if (ivsize) { 1074 u8 *iv = (u8 *)(sg_table + qm_sg_ents); 1075 1076 /* Make sure IV is located in a DMAable area */ 1077 memcpy(iv, req->iv, ivsize); 1078 1079 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1080 if (dma_mapping_error(qidev, iv_dma)) { 1081 dev_err(qidev, "unable to map IV\n"); 1082 caam_unmap(qidev, req->src, req->dst, src_nents, 1083 dst_nents, 0, 0, DMA_NONE, 0, 0); 1084 qi_cache_free(edesc); 1085 return ERR_PTR(-ENOMEM); 1086 } 1087 } 1088 1089 edesc->src_nents = src_nents; 1090 edesc->dst_nents = dst_nents; 1091 edesc->iv_dma = iv_dma; 1092 edesc->drv_req.app_ctx = req; 1093 edesc->drv_req.cbk = aead_done; 1094 edesc->drv_req.drv_ctx = drv_ctx; 1095 1096 edesc->assoclen = cpu_to_caam32(req->assoclen); 1097 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 1098 DMA_TO_DEVICE); 1099 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1100 dev_err(qidev, "unable to map assoclen\n"); 1101 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1102 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1103 qi_cache_free(edesc); 1104 return ERR_PTR(-ENOMEM); 1105 } 1106 1107 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 1108 qm_sg_index++; 1109 if (ivsize) { 1110 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1111 qm_sg_index++; 1112 } 1113 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 1114 qm_sg_index += mapped_src_nents; 1115 1116 if (mapped_dst_nents > 1) 1117 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 1118 1119 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1120 if (dma_mapping_error(qidev, qm_sg_dma)) { 1121 dev_err(qidev, "unable to map S/G table\n"); 1122 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1123 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1124 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1125 qi_cache_free(edesc); 1126 return ERR_PTR(-ENOMEM); 1127 } 1128 1129 edesc->qm_sg_dma = qm_sg_dma; 1130 edesc->qm_sg_bytes = qm_sg_bytes; 1131 1132 out_len = req->assoclen + req->cryptlen + 1133 (encrypt ? ctx->authsize : (-ctx->authsize)); 1134 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 1135 1136 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1137 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 1138 1139 if (req->dst == req->src) { 1140 if (mapped_src_nents == 1) 1141 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 1142 out_len, 0); 1143 else 1144 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1145 (1 + !!ivsize) * sizeof(*sg_table), 1146 out_len, 0); 1147 } else if (mapped_dst_nents <= 1) { 1148 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1149 0); 1150 } else { 1151 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 1152 qm_sg_index, out_len, 0); 1153 } 1154 1155 return edesc; 1156 } 1157 1158 static inline int aead_crypt(struct aead_request *req, bool encrypt) 1159 { 1160 struct aead_edesc *edesc; 1161 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1162 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1163 int ret; 1164 1165 if (unlikely(caam_congested)) 1166 return -EAGAIN; 1167 1168 /* allocate extended descriptor */ 1169 edesc = aead_edesc_alloc(req, encrypt); 1170 if (IS_ERR_OR_NULL(edesc)) 1171 return PTR_ERR(edesc); 1172 1173 /* Create and submit job descriptor */ 1174 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1175 if (!ret) { 1176 ret = -EINPROGRESS; 1177 } else { 1178 aead_unmap(ctx->qidev, edesc, req); 1179 qi_cache_free(edesc); 1180 } 1181 1182 return ret; 1183 } 1184 1185 static int aead_encrypt(struct aead_request *req) 1186 { 1187 return aead_crypt(req, true); 1188 } 1189 1190 static int aead_decrypt(struct aead_request *req) 1191 { 1192 return aead_crypt(req, false); 1193 } 1194 1195 static int ipsec_gcm_encrypt(struct aead_request *req) 1196 { 1197 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1198 true); 1199 } 1200 1201 static int ipsec_gcm_decrypt(struct aead_request *req) 1202 { 1203 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1204 false); 1205 } 1206 1207 static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1208 { 1209 struct skcipher_edesc *edesc; 1210 struct skcipher_request *req = drv_req->app_ctx; 1211 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1212 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1213 struct device *qidev = caam_ctx->qidev; 1214 int ivsize = crypto_skcipher_ivsize(skcipher); 1215 int ecode = 0; 1216 1217 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1218 1219 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1220 1221 if (status) 1222 ecode = caam_jr_strstatus(qidev, status); 1223 1224 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1225 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1226 edesc->src_nents > 1 ? 100 : ivsize, 1); 1227 caam_dump_sg("dst @" __stringify(__LINE__)": ", 1228 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1229 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1230 1231 skcipher_unmap(qidev, edesc, req); 1232 1233 /* 1234 * The crypto API expects us to set the IV (req->iv) to the last 1235 * ciphertext block (CBC mode) or last counter (CTR mode). 1236 * This is used e.g. by the CTS mode. 1237 */ 1238 if (!ecode) 1239 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, 1240 ivsize); 1241 1242 qi_cache_free(edesc); 1243 skcipher_request_complete(req, ecode); 1244 } 1245 1246 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1247 bool encrypt) 1248 { 1249 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1250 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1251 struct device *qidev = ctx->qidev; 1252 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1253 GFP_KERNEL : GFP_ATOMIC; 1254 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1255 struct skcipher_edesc *edesc; 1256 dma_addr_t iv_dma; 1257 u8 *iv; 1258 int ivsize = crypto_skcipher_ivsize(skcipher); 1259 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1260 struct qm_sg_entry *sg_table, *fd_sgt; 1261 struct caam_drv_ctx *drv_ctx; 1262 1263 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 1264 if (IS_ERR_OR_NULL(drv_ctx)) 1265 return (struct skcipher_edesc *)drv_ctx; 1266 1267 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1268 if (unlikely(src_nents < 0)) { 1269 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1270 req->cryptlen); 1271 return ERR_PTR(src_nents); 1272 } 1273 1274 if (unlikely(req->src != req->dst)) { 1275 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 1276 if (unlikely(dst_nents < 0)) { 1277 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1278 req->cryptlen); 1279 return ERR_PTR(dst_nents); 1280 } 1281 1282 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1283 DMA_TO_DEVICE); 1284 if (unlikely(!mapped_src_nents)) { 1285 dev_err(qidev, "unable to map source\n"); 1286 return ERR_PTR(-ENOMEM); 1287 } 1288 1289 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1290 DMA_FROM_DEVICE); 1291 if (unlikely(!mapped_dst_nents)) { 1292 dev_err(qidev, "unable to map destination\n"); 1293 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1294 return ERR_PTR(-ENOMEM); 1295 } 1296 } else { 1297 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1298 DMA_BIDIRECTIONAL); 1299 if (unlikely(!mapped_src_nents)) { 1300 dev_err(qidev, "unable to map source\n"); 1301 return ERR_PTR(-ENOMEM); 1302 } 1303 } 1304 1305 qm_sg_ents = 1 + mapped_src_nents; 1306 dst_sg_idx = qm_sg_ents; 1307 1308 /* 1309 * Input, output HW S/G tables: [IV, src][dst, IV] 1310 * IV entries point to the same buffer 1311 * If src == dst, S/G entries are reused (S/G tables overlap) 1312 * 1313 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1314 * the end of the table by allocating more S/G entries. 1315 */ 1316 if (req->src != req->dst) 1317 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); 1318 else 1319 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1320 1321 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1322 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1323 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1324 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1325 qm_sg_ents, ivsize); 1326 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1327 0, DMA_NONE, 0, 0); 1328 return ERR_PTR(-ENOMEM); 1329 } 1330 1331 /* allocate space for base edesc, link tables and IV */ 1332 edesc = qi_cache_alloc(GFP_DMA | flags); 1333 if (unlikely(!edesc)) { 1334 dev_err(qidev, "could not allocate extended descriptor\n"); 1335 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1336 0, DMA_NONE, 0, 0); 1337 return ERR_PTR(-ENOMEM); 1338 } 1339 1340 /* Make sure IV is located in a DMAable area */ 1341 sg_table = &edesc->sgt[0]; 1342 iv = (u8 *)(sg_table + qm_sg_ents); 1343 memcpy(iv, req->iv, ivsize); 1344 1345 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); 1346 if (dma_mapping_error(qidev, iv_dma)) { 1347 dev_err(qidev, "unable to map IV\n"); 1348 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1349 0, DMA_NONE, 0, 0); 1350 qi_cache_free(edesc); 1351 return ERR_PTR(-ENOMEM); 1352 } 1353 1354 edesc->src_nents = src_nents; 1355 edesc->dst_nents = dst_nents; 1356 edesc->iv_dma = iv_dma; 1357 edesc->qm_sg_bytes = qm_sg_bytes; 1358 edesc->drv_req.app_ctx = req; 1359 edesc->drv_req.cbk = skcipher_done; 1360 edesc->drv_req.drv_ctx = drv_ctx; 1361 1362 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1363 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); 1364 1365 if (req->src != req->dst) 1366 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); 1367 1368 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, 1369 ivsize, 0); 1370 1371 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1372 DMA_TO_DEVICE); 1373 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1374 dev_err(qidev, "unable to map S/G table\n"); 1375 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1376 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); 1377 qi_cache_free(edesc); 1378 return ERR_PTR(-ENOMEM); 1379 } 1380 1381 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1382 1383 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1384 ivsize + req->cryptlen, 0); 1385 1386 if (req->src == req->dst) 1387 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1388 sizeof(*sg_table), req->cryptlen + ivsize, 1389 0); 1390 else 1391 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1392 sizeof(*sg_table), req->cryptlen + ivsize, 1393 0); 1394 1395 return edesc; 1396 } 1397 1398 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) 1399 { 1400 struct skcipher_edesc *edesc; 1401 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1402 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1403 int ret; 1404 1405 if (!req->cryptlen) 1406 return 0; 1407 1408 if (unlikely(caam_congested)) 1409 return -EAGAIN; 1410 1411 /* allocate extended descriptor */ 1412 edesc = skcipher_edesc_alloc(req, encrypt); 1413 if (IS_ERR(edesc)) 1414 return PTR_ERR(edesc); 1415 1416 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1417 if (!ret) { 1418 ret = -EINPROGRESS; 1419 } else { 1420 skcipher_unmap(ctx->qidev, edesc, req); 1421 qi_cache_free(edesc); 1422 } 1423 1424 return ret; 1425 } 1426 1427 static int skcipher_encrypt(struct skcipher_request *req) 1428 { 1429 return skcipher_crypt(req, true); 1430 } 1431 1432 static int skcipher_decrypt(struct skcipher_request *req) 1433 { 1434 return skcipher_crypt(req, false); 1435 } 1436 1437 static struct caam_skcipher_alg driver_algs[] = { 1438 { 1439 .skcipher = { 1440 .base = { 1441 .cra_name = "cbc(aes)", 1442 .cra_driver_name = "cbc-aes-caam-qi", 1443 .cra_blocksize = AES_BLOCK_SIZE, 1444 }, 1445 .setkey = aes_skcipher_setkey, 1446 .encrypt = skcipher_encrypt, 1447 .decrypt = skcipher_decrypt, 1448 .min_keysize = AES_MIN_KEY_SIZE, 1449 .max_keysize = AES_MAX_KEY_SIZE, 1450 .ivsize = AES_BLOCK_SIZE, 1451 }, 1452 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1453 }, 1454 { 1455 .skcipher = { 1456 .base = { 1457 .cra_name = "cbc(des3_ede)", 1458 .cra_driver_name = "cbc-3des-caam-qi", 1459 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1460 }, 1461 .setkey = des3_skcipher_setkey, 1462 .encrypt = skcipher_encrypt, 1463 .decrypt = skcipher_decrypt, 1464 .min_keysize = DES3_EDE_KEY_SIZE, 1465 .max_keysize = DES3_EDE_KEY_SIZE, 1466 .ivsize = DES3_EDE_BLOCK_SIZE, 1467 }, 1468 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1469 }, 1470 { 1471 .skcipher = { 1472 .base = { 1473 .cra_name = "cbc(des)", 1474 .cra_driver_name = "cbc-des-caam-qi", 1475 .cra_blocksize = DES_BLOCK_SIZE, 1476 }, 1477 .setkey = des_skcipher_setkey, 1478 .encrypt = skcipher_encrypt, 1479 .decrypt = skcipher_decrypt, 1480 .min_keysize = DES_KEY_SIZE, 1481 .max_keysize = DES_KEY_SIZE, 1482 .ivsize = DES_BLOCK_SIZE, 1483 }, 1484 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1485 }, 1486 { 1487 .skcipher = { 1488 .base = { 1489 .cra_name = "ctr(aes)", 1490 .cra_driver_name = "ctr-aes-caam-qi", 1491 .cra_blocksize = 1, 1492 }, 1493 .setkey = ctr_skcipher_setkey, 1494 .encrypt = skcipher_encrypt, 1495 .decrypt = skcipher_decrypt, 1496 .min_keysize = AES_MIN_KEY_SIZE, 1497 .max_keysize = AES_MAX_KEY_SIZE, 1498 .ivsize = AES_BLOCK_SIZE, 1499 .chunksize = AES_BLOCK_SIZE, 1500 }, 1501 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1502 OP_ALG_AAI_CTR_MOD128, 1503 }, 1504 { 1505 .skcipher = { 1506 .base = { 1507 .cra_name = "rfc3686(ctr(aes))", 1508 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1509 .cra_blocksize = 1, 1510 }, 1511 .setkey = rfc3686_skcipher_setkey, 1512 .encrypt = skcipher_encrypt, 1513 .decrypt = skcipher_decrypt, 1514 .min_keysize = AES_MIN_KEY_SIZE + 1515 CTR_RFC3686_NONCE_SIZE, 1516 .max_keysize = AES_MAX_KEY_SIZE + 1517 CTR_RFC3686_NONCE_SIZE, 1518 .ivsize = CTR_RFC3686_IV_SIZE, 1519 .chunksize = AES_BLOCK_SIZE, 1520 }, 1521 .caam = { 1522 .class1_alg_type = OP_ALG_ALGSEL_AES | 1523 OP_ALG_AAI_CTR_MOD128, 1524 .rfc3686 = true, 1525 }, 1526 }, 1527 { 1528 .skcipher = { 1529 .base = { 1530 .cra_name = "xts(aes)", 1531 .cra_driver_name = "xts-aes-caam-qi", 1532 .cra_blocksize = AES_BLOCK_SIZE, 1533 }, 1534 .setkey = xts_skcipher_setkey, 1535 .encrypt = skcipher_encrypt, 1536 .decrypt = skcipher_decrypt, 1537 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1538 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1539 .ivsize = AES_BLOCK_SIZE, 1540 }, 1541 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1542 }, 1543 }; 1544 1545 static struct caam_aead_alg driver_aeads[] = { 1546 { 1547 .aead = { 1548 .base = { 1549 .cra_name = "rfc4106(gcm(aes))", 1550 .cra_driver_name = "rfc4106-gcm-aes-caam-qi", 1551 .cra_blocksize = 1, 1552 }, 1553 .setkey = rfc4106_setkey, 1554 .setauthsize = rfc4106_setauthsize, 1555 .encrypt = ipsec_gcm_encrypt, 1556 .decrypt = ipsec_gcm_decrypt, 1557 .ivsize = 8, 1558 .maxauthsize = AES_BLOCK_SIZE, 1559 }, 1560 .caam = { 1561 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1562 .nodkp = true, 1563 }, 1564 }, 1565 { 1566 .aead = { 1567 .base = { 1568 .cra_name = "rfc4543(gcm(aes))", 1569 .cra_driver_name = "rfc4543-gcm-aes-caam-qi", 1570 .cra_blocksize = 1, 1571 }, 1572 .setkey = rfc4543_setkey, 1573 .setauthsize = rfc4543_setauthsize, 1574 .encrypt = ipsec_gcm_encrypt, 1575 .decrypt = ipsec_gcm_decrypt, 1576 .ivsize = 8, 1577 .maxauthsize = AES_BLOCK_SIZE, 1578 }, 1579 .caam = { 1580 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1581 .nodkp = true, 1582 }, 1583 }, 1584 /* Galois Counter Mode */ 1585 { 1586 .aead = { 1587 .base = { 1588 .cra_name = "gcm(aes)", 1589 .cra_driver_name = "gcm-aes-caam-qi", 1590 .cra_blocksize = 1, 1591 }, 1592 .setkey = gcm_setkey, 1593 .setauthsize = gcm_setauthsize, 1594 .encrypt = aead_encrypt, 1595 .decrypt = aead_decrypt, 1596 .ivsize = 12, 1597 .maxauthsize = AES_BLOCK_SIZE, 1598 }, 1599 .caam = { 1600 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1601 .nodkp = true, 1602 } 1603 }, 1604 /* single-pass ipsec_esp descriptor */ 1605 { 1606 .aead = { 1607 .base = { 1608 .cra_name = "authenc(hmac(md5),cbc(aes))", 1609 .cra_driver_name = "authenc-hmac-md5-" 1610 "cbc-aes-caam-qi", 1611 .cra_blocksize = AES_BLOCK_SIZE, 1612 }, 1613 .setkey = aead_setkey, 1614 .setauthsize = aead_setauthsize, 1615 .encrypt = aead_encrypt, 1616 .decrypt = aead_decrypt, 1617 .ivsize = AES_BLOCK_SIZE, 1618 .maxauthsize = MD5_DIGEST_SIZE, 1619 }, 1620 .caam = { 1621 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1622 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1623 OP_ALG_AAI_HMAC_PRECOMP, 1624 } 1625 }, 1626 { 1627 .aead = { 1628 .base = { 1629 .cra_name = "echainiv(authenc(hmac(md5)," 1630 "cbc(aes)))", 1631 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1632 "cbc-aes-caam-qi", 1633 .cra_blocksize = AES_BLOCK_SIZE, 1634 }, 1635 .setkey = aead_setkey, 1636 .setauthsize = aead_setauthsize, 1637 .encrypt = aead_encrypt, 1638 .decrypt = aead_decrypt, 1639 .ivsize = AES_BLOCK_SIZE, 1640 .maxauthsize = MD5_DIGEST_SIZE, 1641 }, 1642 .caam = { 1643 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1644 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1645 OP_ALG_AAI_HMAC_PRECOMP, 1646 .geniv = true, 1647 } 1648 }, 1649 { 1650 .aead = { 1651 .base = { 1652 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1653 .cra_driver_name = "authenc-hmac-sha1-" 1654 "cbc-aes-caam-qi", 1655 .cra_blocksize = AES_BLOCK_SIZE, 1656 }, 1657 .setkey = aead_setkey, 1658 .setauthsize = aead_setauthsize, 1659 .encrypt = aead_encrypt, 1660 .decrypt = aead_decrypt, 1661 .ivsize = AES_BLOCK_SIZE, 1662 .maxauthsize = SHA1_DIGEST_SIZE, 1663 }, 1664 .caam = { 1665 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1666 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1667 OP_ALG_AAI_HMAC_PRECOMP, 1668 } 1669 }, 1670 { 1671 .aead = { 1672 .base = { 1673 .cra_name = "echainiv(authenc(hmac(sha1)," 1674 "cbc(aes)))", 1675 .cra_driver_name = "echainiv-authenc-" 1676 "hmac-sha1-cbc-aes-caam-qi", 1677 .cra_blocksize = AES_BLOCK_SIZE, 1678 }, 1679 .setkey = aead_setkey, 1680 .setauthsize = aead_setauthsize, 1681 .encrypt = aead_encrypt, 1682 .decrypt = aead_decrypt, 1683 .ivsize = AES_BLOCK_SIZE, 1684 .maxauthsize = SHA1_DIGEST_SIZE, 1685 }, 1686 .caam = { 1687 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1688 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1689 OP_ALG_AAI_HMAC_PRECOMP, 1690 .geniv = true, 1691 }, 1692 }, 1693 { 1694 .aead = { 1695 .base = { 1696 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1697 .cra_driver_name = "authenc-hmac-sha224-" 1698 "cbc-aes-caam-qi", 1699 .cra_blocksize = AES_BLOCK_SIZE, 1700 }, 1701 .setkey = aead_setkey, 1702 .setauthsize = aead_setauthsize, 1703 .encrypt = aead_encrypt, 1704 .decrypt = aead_decrypt, 1705 .ivsize = AES_BLOCK_SIZE, 1706 .maxauthsize = SHA224_DIGEST_SIZE, 1707 }, 1708 .caam = { 1709 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1710 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1711 OP_ALG_AAI_HMAC_PRECOMP, 1712 } 1713 }, 1714 { 1715 .aead = { 1716 .base = { 1717 .cra_name = "echainiv(authenc(hmac(sha224)," 1718 "cbc(aes)))", 1719 .cra_driver_name = "echainiv-authenc-" 1720 "hmac-sha224-cbc-aes-caam-qi", 1721 .cra_blocksize = AES_BLOCK_SIZE, 1722 }, 1723 .setkey = aead_setkey, 1724 .setauthsize = aead_setauthsize, 1725 .encrypt = aead_encrypt, 1726 .decrypt = aead_decrypt, 1727 .ivsize = AES_BLOCK_SIZE, 1728 .maxauthsize = SHA224_DIGEST_SIZE, 1729 }, 1730 .caam = { 1731 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1732 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1733 OP_ALG_AAI_HMAC_PRECOMP, 1734 .geniv = true, 1735 } 1736 }, 1737 { 1738 .aead = { 1739 .base = { 1740 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1741 .cra_driver_name = "authenc-hmac-sha256-" 1742 "cbc-aes-caam-qi", 1743 .cra_blocksize = AES_BLOCK_SIZE, 1744 }, 1745 .setkey = aead_setkey, 1746 .setauthsize = aead_setauthsize, 1747 .encrypt = aead_encrypt, 1748 .decrypt = aead_decrypt, 1749 .ivsize = AES_BLOCK_SIZE, 1750 .maxauthsize = SHA256_DIGEST_SIZE, 1751 }, 1752 .caam = { 1753 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1754 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1755 OP_ALG_AAI_HMAC_PRECOMP, 1756 } 1757 }, 1758 { 1759 .aead = { 1760 .base = { 1761 .cra_name = "echainiv(authenc(hmac(sha256)," 1762 "cbc(aes)))", 1763 .cra_driver_name = "echainiv-authenc-" 1764 "hmac-sha256-cbc-aes-" 1765 "caam-qi", 1766 .cra_blocksize = AES_BLOCK_SIZE, 1767 }, 1768 .setkey = aead_setkey, 1769 .setauthsize = aead_setauthsize, 1770 .encrypt = aead_encrypt, 1771 .decrypt = aead_decrypt, 1772 .ivsize = AES_BLOCK_SIZE, 1773 .maxauthsize = SHA256_DIGEST_SIZE, 1774 }, 1775 .caam = { 1776 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1777 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1778 OP_ALG_AAI_HMAC_PRECOMP, 1779 .geniv = true, 1780 } 1781 }, 1782 { 1783 .aead = { 1784 .base = { 1785 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1786 .cra_driver_name = "authenc-hmac-sha384-" 1787 "cbc-aes-caam-qi", 1788 .cra_blocksize = AES_BLOCK_SIZE, 1789 }, 1790 .setkey = aead_setkey, 1791 .setauthsize = aead_setauthsize, 1792 .encrypt = aead_encrypt, 1793 .decrypt = aead_decrypt, 1794 .ivsize = AES_BLOCK_SIZE, 1795 .maxauthsize = SHA384_DIGEST_SIZE, 1796 }, 1797 .caam = { 1798 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1799 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1800 OP_ALG_AAI_HMAC_PRECOMP, 1801 } 1802 }, 1803 { 1804 .aead = { 1805 .base = { 1806 .cra_name = "echainiv(authenc(hmac(sha384)," 1807 "cbc(aes)))", 1808 .cra_driver_name = "echainiv-authenc-" 1809 "hmac-sha384-cbc-aes-" 1810 "caam-qi", 1811 .cra_blocksize = AES_BLOCK_SIZE, 1812 }, 1813 .setkey = aead_setkey, 1814 .setauthsize = aead_setauthsize, 1815 .encrypt = aead_encrypt, 1816 .decrypt = aead_decrypt, 1817 .ivsize = AES_BLOCK_SIZE, 1818 .maxauthsize = SHA384_DIGEST_SIZE, 1819 }, 1820 .caam = { 1821 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1822 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1823 OP_ALG_AAI_HMAC_PRECOMP, 1824 .geniv = true, 1825 } 1826 }, 1827 { 1828 .aead = { 1829 .base = { 1830 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1831 .cra_driver_name = "authenc-hmac-sha512-" 1832 "cbc-aes-caam-qi", 1833 .cra_blocksize = AES_BLOCK_SIZE, 1834 }, 1835 .setkey = aead_setkey, 1836 .setauthsize = aead_setauthsize, 1837 .encrypt = aead_encrypt, 1838 .decrypt = aead_decrypt, 1839 .ivsize = AES_BLOCK_SIZE, 1840 .maxauthsize = SHA512_DIGEST_SIZE, 1841 }, 1842 .caam = { 1843 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1844 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1845 OP_ALG_AAI_HMAC_PRECOMP, 1846 } 1847 }, 1848 { 1849 .aead = { 1850 .base = { 1851 .cra_name = "echainiv(authenc(hmac(sha512)," 1852 "cbc(aes)))", 1853 .cra_driver_name = "echainiv-authenc-" 1854 "hmac-sha512-cbc-aes-" 1855 "caam-qi", 1856 .cra_blocksize = AES_BLOCK_SIZE, 1857 }, 1858 .setkey = aead_setkey, 1859 .setauthsize = aead_setauthsize, 1860 .encrypt = aead_encrypt, 1861 .decrypt = aead_decrypt, 1862 .ivsize = AES_BLOCK_SIZE, 1863 .maxauthsize = SHA512_DIGEST_SIZE, 1864 }, 1865 .caam = { 1866 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1867 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1868 OP_ALG_AAI_HMAC_PRECOMP, 1869 .geniv = true, 1870 } 1871 }, 1872 { 1873 .aead = { 1874 .base = { 1875 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1876 .cra_driver_name = "authenc-hmac-md5-" 1877 "cbc-des3_ede-caam-qi", 1878 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1879 }, 1880 .setkey = des3_aead_setkey, 1881 .setauthsize = aead_setauthsize, 1882 .encrypt = aead_encrypt, 1883 .decrypt = aead_decrypt, 1884 .ivsize = DES3_EDE_BLOCK_SIZE, 1885 .maxauthsize = MD5_DIGEST_SIZE, 1886 }, 1887 .caam = { 1888 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1889 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1890 OP_ALG_AAI_HMAC_PRECOMP, 1891 } 1892 }, 1893 { 1894 .aead = { 1895 .base = { 1896 .cra_name = "echainiv(authenc(hmac(md5)," 1897 "cbc(des3_ede)))", 1898 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1899 "cbc-des3_ede-caam-qi", 1900 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1901 }, 1902 .setkey = des3_aead_setkey, 1903 .setauthsize = aead_setauthsize, 1904 .encrypt = aead_encrypt, 1905 .decrypt = aead_decrypt, 1906 .ivsize = DES3_EDE_BLOCK_SIZE, 1907 .maxauthsize = MD5_DIGEST_SIZE, 1908 }, 1909 .caam = { 1910 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1911 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1912 OP_ALG_AAI_HMAC_PRECOMP, 1913 .geniv = true, 1914 } 1915 }, 1916 { 1917 .aead = { 1918 .base = { 1919 .cra_name = "authenc(hmac(sha1)," 1920 "cbc(des3_ede))", 1921 .cra_driver_name = "authenc-hmac-sha1-" 1922 "cbc-des3_ede-caam-qi", 1923 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1924 }, 1925 .setkey = des3_aead_setkey, 1926 .setauthsize = aead_setauthsize, 1927 .encrypt = aead_encrypt, 1928 .decrypt = aead_decrypt, 1929 .ivsize = DES3_EDE_BLOCK_SIZE, 1930 .maxauthsize = SHA1_DIGEST_SIZE, 1931 }, 1932 .caam = { 1933 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1934 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1935 OP_ALG_AAI_HMAC_PRECOMP, 1936 }, 1937 }, 1938 { 1939 .aead = { 1940 .base = { 1941 .cra_name = "echainiv(authenc(hmac(sha1)," 1942 "cbc(des3_ede)))", 1943 .cra_driver_name = "echainiv-authenc-" 1944 "hmac-sha1-" 1945 "cbc-des3_ede-caam-qi", 1946 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1947 }, 1948 .setkey = des3_aead_setkey, 1949 .setauthsize = aead_setauthsize, 1950 .encrypt = aead_encrypt, 1951 .decrypt = aead_decrypt, 1952 .ivsize = DES3_EDE_BLOCK_SIZE, 1953 .maxauthsize = SHA1_DIGEST_SIZE, 1954 }, 1955 .caam = { 1956 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1957 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1958 OP_ALG_AAI_HMAC_PRECOMP, 1959 .geniv = true, 1960 } 1961 }, 1962 { 1963 .aead = { 1964 .base = { 1965 .cra_name = "authenc(hmac(sha224)," 1966 "cbc(des3_ede))", 1967 .cra_driver_name = "authenc-hmac-sha224-" 1968 "cbc-des3_ede-caam-qi", 1969 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1970 }, 1971 .setkey = des3_aead_setkey, 1972 .setauthsize = aead_setauthsize, 1973 .encrypt = aead_encrypt, 1974 .decrypt = aead_decrypt, 1975 .ivsize = DES3_EDE_BLOCK_SIZE, 1976 .maxauthsize = SHA224_DIGEST_SIZE, 1977 }, 1978 .caam = { 1979 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1980 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1981 OP_ALG_AAI_HMAC_PRECOMP, 1982 }, 1983 }, 1984 { 1985 .aead = { 1986 .base = { 1987 .cra_name = "echainiv(authenc(hmac(sha224)," 1988 "cbc(des3_ede)))", 1989 .cra_driver_name = "echainiv-authenc-" 1990 "hmac-sha224-" 1991 "cbc-des3_ede-caam-qi", 1992 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1993 }, 1994 .setkey = des3_aead_setkey, 1995 .setauthsize = aead_setauthsize, 1996 .encrypt = aead_encrypt, 1997 .decrypt = aead_decrypt, 1998 .ivsize = DES3_EDE_BLOCK_SIZE, 1999 .maxauthsize = SHA224_DIGEST_SIZE, 2000 }, 2001 .caam = { 2002 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2003 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2004 OP_ALG_AAI_HMAC_PRECOMP, 2005 .geniv = true, 2006 } 2007 }, 2008 { 2009 .aead = { 2010 .base = { 2011 .cra_name = "authenc(hmac(sha256)," 2012 "cbc(des3_ede))", 2013 .cra_driver_name = "authenc-hmac-sha256-" 2014 "cbc-des3_ede-caam-qi", 2015 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2016 }, 2017 .setkey = des3_aead_setkey, 2018 .setauthsize = aead_setauthsize, 2019 .encrypt = aead_encrypt, 2020 .decrypt = aead_decrypt, 2021 .ivsize = DES3_EDE_BLOCK_SIZE, 2022 .maxauthsize = SHA256_DIGEST_SIZE, 2023 }, 2024 .caam = { 2025 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2026 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2027 OP_ALG_AAI_HMAC_PRECOMP, 2028 }, 2029 }, 2030 { 2031 .aead = { 2032 .base = { 2033 .cra_name = "echainiv(authenc(hmac(sha256)," 2034 "cbc(des3_ede)))", 2035 .cra_driver_name = "echainiv-authenc-" 2036 "hmac-sha256-" 2037 "cbc-des3_ede-caam-qi", 2038 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2039 }, 2040 .setkey = des3_aead_setkey, 2041 .setauthsize = aead_setauthsize, 2042 .encrypt = aead_encrypt, 2043 .decrypt = aead_decrypt, 2044 .ivsize = DES3_EDE_BLOCK_SIZE, 2045 .maxauthsize = SHA256_DIGEST_SIZE, 2046 }, 2047 .caam = { 2048 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2049 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2050 OP_ALG_AAI_HMAC_PRECOMP, 2051 .geniv = true, 2052 } 2053 }, 2054 { 2055 .aead = { 2056 .base = { 2057 .cra_name = "authenc(hmac(sha384)," 2058 "cbc(des3_ede))", 2059 .cra_driver_name = "authenc-hmac-sha384-" 2060 "cbc-des3_ede-caam-qi", 2061 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2062 }, 2063 .setkey = des3_aead_setkey, 2064 .setauthsize = aead_setauthsize, 2065 .encrypt = aead_encrypt, 2066 .decrypt = aead_decrypt, 2067 .ivsize = DES3_EDE_BLOCK_SIZE, 2068 .maxauthsize = SHA384_DIGEST_SIZE, 2069 }, 2070 .caam = { 2071 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2072 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2073 OP_ALG_AAI_HMAC_PRECOMP, 2074 }, 2075 }, 2076 { 2077 .aead = { 2078 .base = { 2079 .cra_name = "echainiv(authenc(hmac(sha384)," 2080 "cbc(des3_ede)))", 2081 .cra_driver_name = "echainiv-authenc-" 2082 "hmac-sha384-" 2083 "cbc-des3_ede-caam-qi", 2084 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2085 }, 2086 .setkey = des3_aead_setkey, 2087 .setauthsize = aead_setauthsize, 2088 .encrypt = aead_encrypt, 2089 .decrypt = aead_decrypt, 2090 .ivsize = DES3_EDE_BLOCK_SIZE, 2091 .maxauthsize = SHA384_DIGEST_SIZE, 2092 }, 2093 .caam = { 2094 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2095 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2096 OP_ALG_AAI_HMAC_PRECOMP, 2097 .geniv = true, 2098 } 2099 }, 2100 { 2101 .aead = { 2102 .base = { 2103 .cra_name = "authenc(hmac(sha512)," 2104 "cbc(des3_ede))", 2105 .cra_driver_name = "authenc-hmac-sha512-" 2106 "cbc-des3_ede-caam-qi", 2107 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2108 }, 2109 .setkey = des3_aead_setkey, 2110 .setauthsize = aead_setauthsize, 2111 .encrypt = aead_encrypt, 2112 .decrypt = aead_decrypt, 2113 .ivsize = DES3_EDE_BLOCK_SIZE, 2114 .maxauthsize = SHA512_DIGEST_SIZE, 2115 }, 2116 .caam = { 2117 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2118 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2119 OP_ALG_AAI_HMAC_PRECOMP, 2120 }, 2121 }, 2122 { 2123 .aead = { 2124 .base = { 2125 .cra_name = "echainiv(authenc(hmac(sha512)," 2126 "cbc(des3_ede)))", 2127 .cra_driver_name = "echainiv-authenc-" 2128 "hmac-sha512-" 2129 "cbc-des3_ede-caam-qi", 2130 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2131 }, 2132 .setkey = des3_aead_setkey, 2133 .setauthsize = aead_setauthsize, 2134 .encrypt = aead_encrypt, 2135 .decrypt = aead_decrypt, 2136 .ivsize = DES3_EDE_BLOCK_SIZE, 2137 .maxauthsize = SHA512_DIGEST_SIZE, 2138 }, 2139 .caam = { 2140 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2141 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2142 OP_ALG_AAI_HMAC_PRECOMP, 2143 .geniv = true, 2144 } 2145 }, 2146 { 2147 .aead = { 2148 .base = { 2149 .cra_name = "authenc(hmac(md5),cbc(des))", 2150 .cra_driver_name = "authenc-hmac-md5-" 2151 "cbc-des-caam-qi", 2152 .cra_blocksize = DES_BLOCK_SIZE, 2153 }, 2154 .setkey = aead_setkey, 2155 .setauthsize = aead_setauthsize, 2156 .encrypt = aead_encrypt, 2157 .decrypt = aead_decrypt, 2158 .ivsize = DES_BLOCK_SIZE, 2159 .maxauthsize = MD5_DIGEST_SIZE, 2160 }, 2161 .caam = { 2162 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2163 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2164 OP_ALG_AAI_HMAC_PRECOMP, 2165 }, 2166 }, 2167 { 2168 .aead = { 2169 .base = { 2170 .cra_name = "echainiv(authenc(hmac(md5)," 2171 "cbc(des)))", 2172 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2173 "cbc-des-caam-qi", 2174 .cra_blocksize = DES_BLOCK_SIZE, 2175 }, 2176 .setkey = aead_setkey, 2177 .setauthsize = aead_setauthsize, 2178 .encrypt = aead_encrypt, 2179 .decrypt = aead_decrypt, 2180 .ivsize = DES_BLOCK_SIZE, 2181 .maxauthsize = MD5_DIGEST_SIZE, 2182 }, 2183 .caam = { 2184 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2185 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2186 OP_ALG_AAI_HMAC_PRECOMP, 2187 .geniv = true, 2188 } 2189 }, 2190 { 2191 .aead = { 2192 .base = { 2193 .cra_name = "authenc(hmac(sha1),cbc(des))", 2194 .cra_driver_name = "authenc-hmac-sha1-" 2195 "cbc-des-caam-qi", 2196 .cra_blocksize = DES_BLOCK_SIZE, 2197 }, 2198 .setkey = aead_setkey, 2199 .setauthsize = aead_setauthsize, 2200 .encrypt = aead_encrypt, 2201 .decrypt = aead_decrypt, 2202 .ivsize = DES_BLOCK_SIZE, 2203 .maxauthsize = SHA1_DIGEST_SIZE, 2204 }, 2205 .caam = { 2206 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2207 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2208 OP_ALG_AAI_HMAC_PRECOMP, 2209 }, 2210 }, 2211 { 2212 .aead = { 2213 .base = { 2214 .cra_name = "echainiv(authenc(hmac(sha1)," 2215 "cbc(des)))", 2216 .cra_driver_name = "echainiv-authenc-" 2217 "hmac-sha1-cbc-des-caam-qi", 2218 .cra_blocksize = DES_BLOCK_SIZE, 2219 }, 2220 .setkey = aead_setkey, 2221 .setauthsize = aead_setauthsize, 2222 .encrypt = aead_encrypt, 2223 .decrypt = aead_decrypt, 2224 .ivsize = DES_BLOCK_SIZE, 2225 .maxauthsize = SHA1_DIGEST_SIZE, 2226 }, 2227 .caam = { 2228 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2229 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2230 OP_ALG_AAI_HMAC_PRECOMP, 2231 .geniv = true, 2232 } 2233 }, 2234 { 2235 .aead = { 2236 .base = { 2237 .cra_name = "authenc(hmac(sha224),cbc(des))", 2238 .cra_driver_name = "authenc-hmac-sha224-" 2239 "cbc-des-caam-qi", 2240 .cra_blocksize = DES_BLOCK_SIZE, 2241 }, 2242 .setkey = aead_setkey, 2243 .setauthsize = aead_setauthsize, 2244 .encrypt = aead_encrypt, 2245 .decrypt = aead_decrypt, 2246 .ivsize = DES_BLOCK_SIZE, 2247 .maxauthsize = SHA224_DIGEST_SIZE, 2248 }, 2249 .caam = { 2250 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2251 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2252 OP_ALG_AAI_HMAC_PRECOMP, 2253 }, 2254 }, 2255 { 2256 .aead = { 2257 .base = { 2258 .cra_name = "echainiv(authenc(hmac(sha224)," 2259 "cbc(des)))", 2260 .cra_driver_name = "echainiv-authenc-" 2261 "hmac-sha224-cbc-des-" 2262 "caam-qi", 2263 .cra_blocksize = DES_BLOCK_SIZE, 2264 }, 2265 .setkey = aead_setkey, 2266 .setauthsize = aead_setauthsize, 2267 .encrypt = aead_encrypt, 2268 .decrypt = aead_decrypt, 2269 .ivsize = DES_BLOCK_SIZE, 2270 .maxauthsize = SHA224_DIGEST_SIZE, 2271 }, 2272 .caam = { 2273 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2274 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2275 OP_ALG_AAI_HMAC_PRECOMP, 2276 .geniv = true, 2277 } 2278 }, 2279 { 2280 .aead = { 2281 .base = { 2282 .cra_name = "authenc(hmac(sha256),cbc(des))", 2283 .cra_driver_name = "authenc-hmac-sha256-" 2284 "cbc-des-caam-qi", 2285 .cra_blocksize = DES_BLOCK_SIZE, 2286 }, 2287 .setkey = aead_setkey, 2288 .setauthsize = aead_setauthsize, 2289 .encrypt = aead_encrypt, 2290 .decrypt = aead_decrypt, 2291 .ivsize = DES_BLOCK_SIZE, 2292 .maxauthsize = SHA256_DIGEST_SIZE, 2293 }, 2294 .caam = { 2295 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2296 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2297 OP_ALG_AAI_HMAC_PRECOMP, 2298 }, 2299 }, 2300 { 2301 .aead = { 2302 .base = { 2303 .cra_name = "echainiv(authenc(hmac(sha256)," 2304 "cbc(des)))", 2305 .cra_driver_name = "echainiv-authenc-" 2306 "hmac-sha256-cbc-des-" 2307 "caam-qi", 2308 .cra_blocksize = DES_BLOCK_SIZE, 2309 }, 2310 .setkey = aead_setkey, 2311 .setauthsize = aead_setauthsize, 2312 .encrypt = aead_encrypt, 2313 .decrypt = aead_decrypt, 2314 .ivsize = DES_BLOCK_SIZE, 2315 .maxauthsize = SHA256_DIGEST_SIZE, 2316 }, 2317 .caam = { 2318 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2319 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2320 OP_ALG_AAI_HMAC_PRECOMP, 2321 .geniv = true, 2322 }, 2323 }, 2324 { 2325 .aead = { 2326 .base = { 2327 .cra_name = "authenc(hmac(sha384),cbc(des))", 2328 .cra_driver_name = "authenc-hmac-sha384-" 2329 "cbc-des-caam-qi", 2330 .cra_blocksize = DES_BLOCK_SIZE, 2331 }, 2332 .setkey = aead_setkey, 2333 .setauthsize = aead_setauthsize, 2334 .encrypt = aead_encrypt, 2335 .decrypt = aead_decrypt, 2336 .ivsize = DES_BLOCK_SIZE, 2337 .maxauthsize = SHA384_DIGEST_SIZE, 2338 }, 2339 .caam = { 2340 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2341 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2342 OP_ALG_AAI_HMAC_PRECOMP, 2343 }, 2344 }, 2345 { 2346 .aead = { 2347 .base = { 2348 .cra_name = "echainiv(authenc(hmac(sha384)," 2349 "cbc(des)))", 2350 .cra_driver_name = "echainiv-authenc-" 2351 "hmac-sha384-cbc-des-" 2352 "caam-qi", 2353 .cra_blocksize = DES_BLOCK_SIZE, 2354 }, 2355 .setkey = aead_setkey, 2356 .setauthsize = aead_setauthsize, 2357 .encrypt = aead_encrypt, 2358 .decrypt = aead_decrypt, 2359 .ivsize = DES_BLOCK_SIZE, 2360 .maxauthsize = SHA384_DIGEST_SIZE, 2361 }, 2362 .caam = { 2363 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2364 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2365 OP_ALG_AAI_HMAC_PRECOMP, 2366 .geniv = true, 2367 } 2368 }, 2369 { 2370 .aead = { 2371 .base = { 2372 .cra_name = "authenc(hmac(sha512),cbc(des))", 2373 .cra_driver_name = "authenc-hmac-sha512-" 2374 "cbc-des-caam-qi", 2375 .cra_blocksize = DES_BLOCK_SIZE, 2376 }, 2377 .setkey = aead_setkey, 2378 .setauthsize = aead_setauthsize, 2379 .encrypt = aead_encrypt, 2380 .decrypt = aead_decrypt, 2381 .ivsize = DES_BLOCK_SIZE, 2382 .maxauthsize = SHA512_DIGEST_SIZE, 2383 }, 2384 .caam = { 2385 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2386 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2387 OP_ALG_AAI_HMAC_PRECOMP, 2388 } 2389 }, 2390 { 2391 .aead = { 2392 .base = { 2393 .cra_name = "echainiv(authenc(hmac(sha512)," 2394 "cbc(des)))", 2395 .cra_driver_name = "echainiv-authenc-" 2396 "hmac-sha512-cbc-des-" 2397 "caam-qi", 2398 .cra_blocksize = DES_BLOCK_SIZE, 2399 }, 2400 .setkey = aead_setkey, 2401 .setauthsize = aead_setauthsize, 2402 .encrypt = aead_encrypt, 2403 .decrypt = aead_decrypt, 2404 .ivsize = DES_BLOCK_SIZE, 2405 .maxauthsize = SHA512_DIGEST_SIZE, 2406 }, 2407 .caam = { 2408 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2409 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2410 OP_ALG_AAI_HMAC_PRECOMP, 2411 .geniv = true, 2412 } 2413 }, 2414 }; 2415 2416 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2417 bool uses_dkp) 2418 { 2419 struct caam_drv_private *priv; 2420 struct device *dev; 2421 2422 /* 2423 * distribute tfms across job rings to ensure in-order 2424 * crypto request processing per tfm 2425 */ 2426 ctx->jrdev = caam_jr_alloc(); 2427 if (IS_ERR(ctx->jrdev)) { 2428 pr_err("Job Ring Device allocation for transform failed\n"); 2429 return PTR_ERR(ctx->jrdev); 2430 } 2431 2432 dev = ctx->jrdev->parent; 2433 priv = dev_get_drvdata(dev); 2434 if (priv->era >= 6 && uses_dkp) 2435 ctx->dir = DMA_BIDIRECTIONAL; 2436 else 2437 ctx->dir = DMA_TO_DEVICE; 2438 2439 ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), 2440 ctx->dir); 2441 if (dma_mapping_error(dev, ctx->key_dma)) { 2442 dev_err(dev, "unable to map key\n"); 2443 caam_jr_free(ctx->jrdev); 2444 return -ENOMEM; 2445 } 2446 2447 /* copy descriptor header template value */ 2448 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2449 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2450 2451 ctx->qidev = dev; 2452 2453 spin_lock_init(&ctx->lock); 2454 ctx->drv_ctx[ENCRYPT] = NULL; 2455 ctx->drv_ctx[DECRYPT] = NULL; 2456 2457 return 0; 2458 } 2459 2460 static int caam_cra_init(struct crypto_skcipher *tfm) 2461 { 2462 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 2463 struct caam_skcipher_alg *caam_alg = 2464 container_of(alg, typeof(*caam_alg), skcipher); 2465 2466 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, 2467 false); 2468 } 2469 2470 static int caam_aead_init(struct crypto_aead *tfm) 2471 { 2472 struct aead_alg *alg = crypto_aead_alg(tfm); 2473 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2474 aead); 2475 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2476 2477 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); 2478 } 2479 2480 static void caam_exit_common(struct caam_ctx *ctx) 2481 { 2482 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2483 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2484 2485 dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), 2486 ctx->dir); 2487 2488 caam_jr_free(ctx->jrdev); 2489 } 2490 2491 static void caam_cra_exit(struct crypto_skcipher *tfm) 2492 { 2493 caam_exit_common(crypto_skcipher_ctx(tfm)); 2494 } 2495 2496 static void caam_aead_exit(struct crypto_aead *tfm) 2497 { 2498 caam_exit_common(crypto_aead_ctx(tfm)); 2499 } 2500 2501 void caam_qi_algapi_exit(void) 2502 { 2503 int i; 2504 2505 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2506 struct caam_aead_alg *t_alg = driver_aeads + i; 2507 2508 if (t_alg->registered) 2509 crypto_unregister_aead(&t_alg->aead); 2510 } 2511 2512 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2513 struct caam_skcipher_alg *t_alg = driver_algs + i; 2514 2515 if (t_alg->registered) 2516 crypto_unregister_skcipher(&t_alg->skcipher); 2517 } 2518 } 2519 2520 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) 2521 { 2522 struct skcipher_alg *alg = &t_alg->skcipher; 2523 2524 alg->base.cra_module = THIS_MODULE; 2525 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2526 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2527 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2528 2529 alg->init = caam_cra_init; 2530 alg->exit = caam_cra_exit; 2531 } 2532 2533 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2534 { 2535 struct aead_alg *alg = &t_alg->aead; 2536 2537 alg->base.cra_module = THIS_MODULE; 2538 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2539 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2540 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2541 2542 alg->init = caam_aead_init; 2543 alg->exit = caam_aead_exit; 2544 } 2545 2546 int caam_qi_algapi_init(struct device *ctrldev) 2547 { 2548 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 2549 int i = 0, err = 0; 2550 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2551 unsigned int md_limit = SHA512_DIGEST_SIZE; 2552 bool registered = false; 2553 2554 /* Make sure this runs only on (DPAA 1.x) QI */ 2555 if (!priv->qi_present || caam_dpaa2) 2556 return 0; 2557 2558 /* 2559 * Register crypto algorithms the device supports. 2560 * First, detect presence and attributes of DES, AES, and MD blocks. 2561 */ 2562 if (priv->era < 10) { 2563 u32 cha_vid, cha_inst; 2564 2565 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2566 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 2567 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2568 2569 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2570 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> 2571 CHA_ID_LS_DES_SHIFT; 2572 aes_inst = cha_inst & CHA_ID_LS_AES_MASK; 2573 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2574 } else { 2575 u32 aesa, mdha; 2576 2577 aesa = rd_reg32(&priv->ctrl->vreg.aesa); 2578 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2579 2580 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2581 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2582 2583 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; 2584 aes_inst = aesa & CHA_VER_NUM_MASK; 2585 md_inst = mdha & CHA_VER_NUM_MASK; 2586 } 2587 2588 /* If MD is present, limit digest size based on LP256 */ 2589 if (md_inst && md_vid == CHA_VER_VID_MD_LP256) 2590 md_limit = SHA256_DIGEST_SIZE; 2591 2592 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2593 struct caam_skcipher_alg *t_alg = driver_algs + i; 2594 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; 2595 2596 /* Skip DES algorithms if not supported by device */ 2597 if (!des_inst && 2598 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2599 (alg_sel == OP_ALG_ALGSEL_DES))) 2600 continue; 2601 2602 /* Skip AES algorithms if not supported by device */ 2603 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2604 continue; 2605 2606 caam_skcipher_alg_init(t_alg); 2607 2608 err = crypto_register_skcipher(&t_alg->skcipher); 2609 if (err) { 2610 dev_warn(ctrldev, "%s alg registration failed\n", 2611 t_alg->skcipher.base.cra_driver_name); 2612 continue; 2613 } 2614 2615 t_alg->registered = true; 2616 registered = true; 2617 } 2618 2619 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2620 struct caam_aead_alg *t_alg = driver_aeads + i; 2621 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2622 OP_ALG_ALGSEL_MASK; 2623 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2624 OP_ALG_ALGSEL_MASK; 2625 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2626 2627 /* Skip DES algorithms if not supported by device */ 2628 if (!des_inst && 2629 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2630 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2631 continue; 2632 2633 /* Skip AES algorithms if not supported by device */ 2634 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2635 continue; 2636 2637 /* 2638 * Check support for AES algorithms not available 2639 * on LP devices. 2640 */ 2641 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) 2642 continue; 2643 2644 /* 2645 * Skip algorithms requiring message digests 2646 * if MD or MD size is not supported by device. 2647 */ 2648 if (c2_alg_sel && 2649 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2650 continue; 2651 2652 caam_aead_alg_init(t_alg); 2653 2654 err = crypto_register_aead(&t_alg->aead); 2655 if (err) { 2656 pr_warn("%s alg registration failed\n", 2657 t_alg->aead.base.cra_driver_name); 2658 continue; 2659 } 2660 2661 t_alg->registered = true; 2662 registered = true; 2663 } 2664 2665 if (registered) 2666 dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); 2667 2668 return err; 2669 } 2670